Compare commits
63 Commits
azureml-sd
...
azureml-sd
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ae7b234ba0 | ||
|
|
9788d1965f | ||
|
|
387e43a423 | ||
|
|
25f407fc81 | ||
|
|
dcb2c4638f | ||
|
|
7fb5dd3ef9 | ||
|
|
6a38f4bec3 | ||
|
|
aed078aeab | ||
|
|
f999f41ed3 | ||
|
|
07e43ee7e4 | ||
|
|
aac706c3f0 | ||
|
|
4ccb278051 | ||
|
|
64a733480b | ||
|
|
dd0976f678 | ||
|
|
15a3ca649d | ||
|
|
3c4770cfe5 | ||
|
|
8d7de05908 | ||
|
|
863faae57f | ||
|
|
8d3f5adcdb | ||
|
|
cd3394e129 | ||
|
|
ee5d0239a3 | ||
|
|
388111cedc | ||
|
|
b86191ed7f | ||
|
|
22753486de | ||
|
|
cf1d1dbf01 | ||
|
|
2e45d9800d | ||
|
|
a9a8de02ec | ||
|
|
dd8339e650 | ||
|
|
1594ee64a1 | ||
|
|
83ed8222d2 | ||
|
|
b0aa91acce | ||
|
|
5928ba83bb | ||
|
|
ffa3a43979 | ||
|
|
7ce79a43f1 | ||
|
|
edcc50ab0c | ||
|
|
4a391522d0 | ||
|
|
1903f78285 | ||
|
|
a4dfcc4693 | ||
|
|
faffb3fef7 | ||
|
|
6c6227c403 | ||
|
|
e3be364e7a | ||
|
|
90e20a60e9 | ||
|
|
33a4eacf1d | ||
|
|
e30b53fddc | ||
|
|
95b0392ed2 | ||
|
|
796798cb49 | ||
|
|
08b0ba7854 | ||
|
|
ceaf82acc6 | ||
|
|
dadc93cfe5 | ||
|
|
c7076bf95c | ||
|
|
ebdffd5626 | ||
|
|
d123880562 | ||
|
|
4864e8ea60 | ||
|
|
c86db0d7fd | ||
|
|
ccfbbb3b14 | ||
|
|
c42ba64b15 | ||
|
|
6d8bf32243 | ||
|
|
9094da4085 | ||
|
|
ebf9d2855c | ||
|
|
1bbd78eb33 | ||
|
|
77f5a69e04 | ||
|
|
ce82af2ab0 | ||
|
|
2a2d2efa17 |
@@ -103,7 +103,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"import azureml.core\n",
|
"import azureml.core\n",
|
||||||
"\n",
|
"\n",
|
||||||
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
|
"print(\"This notebook was created using version 1.44.0 of the Azure ML SDK\")\n",
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -188,13 +188,6 @@
|
|||||||
"### Script to process data and train model"
|
"### Script to process data and train model"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"The _process_data.py_ script used in the step below is a slightly modified implementation of [RAPIDS Mortgage E2E example](https://github.com/rapidsai/notebooks-contrib/blob/master/intermediate_notebooks/E2E/mortgage/mortgage_e2e.ipynb)."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
@@ -373,7 +366,7 @@
|
|||||||
"run_config.target = gpu_cluster_name\n",
|
"run_config.target = gpu_cluster_name\n",
|
||||||
"run_config.environment.docker.enabled = True\n",
|
"run_config.environment.docker.enabled = True\n",
|
||||||
"run_config.environment.docker.gpu_support = True\n",
|
"run_config.environment.docker.gpu_support = True\n",
|
||||||
"run_config.environment.docker.base_image = \"mcr.microsoft.com/azureml/base-gpu:intelmpi2018.3-cuda10.0-cudnn7-ubuntu16.04\"\n",
|
"run_config.environment.docker.base_image = \"mcr.microsoft.com/azureml/openmpi4.1.0-cuda11.1-cudnn8-ubuntu20.04\"\n",
|
||||||
"run_config.environment.spark.precache_packages = False\n",
|
"run_config.environment.spark.precache_packages = False\n",
|
||||||
"run_config.data_references={'data':data_ref.to_config()}"
|
"run_config.data_references={'data':data_ref.to_config()}"
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -49,7 +49,7 @@
|
|||||||
"* `fairlearn>=0.6.2` (pre-v0.5.0 will work with minor modifications)\n",
|
"* `fairlearn>=0.6.2` (pre-v0.5.0 will work with minor modifications)\n",
|
||||||
"* `joblib`\n",
|
"* `joblib`\n",
|
||||||
"* `liac-arff`\n",
|
"* `liac-arff`\n",
|
||||||
"* `raiwidgets~=0.7.0`\n",
|
"* `raiwidgets`\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Fairlearn relies on features introduced in v0.22.1 of `scikit-learn`. If you have an older version already installed, please uncomment and run the following cell:"
|
"Fairlearn relies on features introduced in v0.22.1 of `scikit-learn`. If you have an older version already installed, please uncomment and run the following cell:"
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -6,4 +6,7 @@ dependencies:
|
|||||||
- fairlearn>=0.6.2
|
- fairlearn>=0.6.2
|
||||||
- joblib
|
- joblib
|
||||||
- liac-arff
|
- liac-arff
|
||||||
- raiwidgets~=0.15.0
|
- raiwidgets~=0.19.0
|
||||||
|
- itsdangerous==2.0.1
|
||||||
|
- markupsafe<2.1.0
|
||||||
|
- protobuf==3.20.0
|
||||||
|
|||||||
@@ -51,7 +51,7 @@
|
|||||||
"* `fairlearn>=0.6.2` (also works for pre-v0.5.0 with slight modifications)\n",
|
"* `fairlearn>=0.6.2` (also works for pre-v0.5.0 with slight modifications)\n",
|
||||||
"* `joblib`\n",
|
"* `joblib`\n",
|
||||||
"* `liac-arff`\n",
|
"* `liac-arff`\n",
|
||||||
"* `raiwidgets~=0.7.0`\n",
|
"* `raiwidgets`\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Fairlearn relies on features introduced in v0.22.1 of `scikit-learn`. If you have an older version already installed, please uncomment and run the following cell:"
|
"Fairlearn relies on features introduced in v0.22.1 of `scikit-learn`. If you have an older version already installed, please uncomment and run the following cell:"
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -6,4 +6,7 @@ dependencies:
|
|||||||
- fairlearn>=0.6.2
|
- fairlearn>=0.6.2
|
||||||
- joblib
|
- joblib
|
||||||
- liac-arff
|
- liac-arff
|
||||||
- raiwidgets~=0.15.0
|
- raiwidgets~=0.19.0
|
||||||
|
- itsdangerous==2.0.1
|
||||||
|
- markupsafe<2.1.0
|
||||||
|
- protobuf==3.20.0
|
||||||
|
|||||||
@@ -1,29 +1,33 @@
|
|||||||
name: azure_automl
|
name: azure_automl
|
||||||
|
channels:
|
||||||
|
- conda-forge
|
||||||
|
- pytorch
|
||||||
|
- main
|
||||||
dependencies:
|
dependencies:
|
||||||
# The python interpreter version.
|
# The python interpreter version.
|
||||||
# Currently Azure ML only supports 3.5.2 and later.
|
# Currently Azure ML only supports 3.6.0 and later.
|
||||||
- pip==21.1.2
|
- pip==20.2.4
|
||||||
- python>=3.5.2,<3.8
|
- python>=3.6,<3.9
|
||||||
- boto3==1.15.18
|
- matplotlib==3.2.1
|
||||||
- matplotlib==2.1.0
|
- py-xgboost==1.3.3
|
||||||
- numpy==1.18.5
|
|
||||||
- cython
|
|
||||||
- urllib3<1.24
|
|
||||||
- scipy>=1.4.1,<=1.5.2
|
|
||||||
- scikit-learn==0.22.1
|
|
||||||
- pandas==0.25.1
|
|
||||||
- py-xgboost<=0.90
|
|
||||||
- conda-forge::fbprophet==0.5
|
|
||||||
- holidays==0.9.11
|
|
||||||
- pytorch::pytorch=1.4.0
|
- pytorch::pytorch=1.4.0
|
||||||
|
- conda-forge::fbprophet==0.7.1
|
||||||
- cudatoolkit=10.1.243
|
- cudatoolkit=10.1.243
|
||||||
- tornado==6.1.0
|
- scipy==1.5.3
|
||||||
|
- notebook
|
||||||
|
- pywin32==227
|
||||||
|
- PySocks==1.7.1
|
||||||
|
- conda-forge::pyqt==5.12.3
|
||||||
|
- jsonschema==4.9.1
|
||||||
|
- Pygments==2.12.0
|
||||||
|
|
||||||
- pip:
|
- pip:
|
||||||
# Required packages for AzureML execution, history, and data preparation.
|
# Required packages for AzureML execution, history, and data preparation.
|
||||||
- azureml-widgets~=1.37.0
|
- azureml-widgets~=1.44.0
|
||||||
- pytorch-transformers==1.0.0
|
- pytorch-transformers==1.0.0
|
||||||
- spacy==2.1.8
|
- spacy==2.2.4
|
||||||
|
- pystan==2.19.1.1
|
||||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||||
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.37.0/validated_win32_requirements.txt [--no-deps]
|
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.44.0/validated_win32_requirements.txt [--no-deps]
|
||||||
- arch==4.14
|
- arch==4.14
|
||||||
|
- wasabi==0.9.1
|
||||||
|
|||||||
@@ -1,30 +1,33 @@
|
|||||||
name: azure_automl
|
name: azure_automl
|
||||||
|
channels:
|
||||||
|
- conda-forge
|
||||||
|
- pytorch
|
||||||
|
- main
|
||||||
dependencies:
|
dependencies:
|
||||||
# The python interpreter version.
|
# The python interpreter version.
|
||||||
# Currently Azure ML only supports 3.5.2 and later.
|
# Currently Azure ML only supports 3.6.0 and later.
|
||||||
- pip==21.1.2
|
- pip==20.2.4
|
||||||
- python>=3.5.2,<3.8
|
- python>=3.6,<3.9
|
||||||
- nb_conda
|
- boto3==1.20.19
|
||||||
- boto3==1.15.18
|
- botocore<=1.23.19
|
||||||
- matplotlib==2.1.0
|
- matplotlib==3.2.1
|
||||||
- numpy==1.18.5
|
- numpy>=1.21.6,<=1.22.3
|
||||||
- cython
|
- cython==0.29.14
|
||||||
- urllib3<1.24
|
- urllib3==1.26.7
|
||||||
- scipy>=1.4.1,<=1.5.2
|
- scipy>=1.4.1,<=1.5.3
|
||||||
- scikit-learn==0.22.1
|
- scikit-learn==0.22.1
|
||||||
- pandas==0.25.1
|
- py-xgboost<=1.3.3
|
||||||
- py-xgboost<=0.90
|
- holidays==0.10.3
|
||||||
- conda-forge::fbprophet==0.5
|
- conda-forge::fbprophet==0.7.1
|
||||||
- holidays==0.9.11
|
|
||||||
- pytorch::pytorch=1.4.0
|
- pytorch::pytorch=1.4.0
|
||||||
- cudatoolkit=10.1.243
|
- cudatoolkit=10.1.243
|
||||||
- tornado==6.1.0
|
|
||||||
|
|
||||||
- pip:
|
- pip:
|
||||||
# Required packages for AzureML execution, history, and data preparation.
|
# Required packages for AzureML execution, history, and data preparation.
|
||||||
- azureml-widgets~=1.37.0
|
- azureml-widgets~=1.44.0
|
||||||
- pytorch-transformers==1.0.0
|
- pytorch-transformers==1.0.0
|
||||||
- spacy==2.1.8
|
- spacy==2.2.4
|
||||||
|
- pystan==2.19.1.1
|
||||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||||
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.37.0/validated_linux_requirements.txt [--no-deps]
|
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.44.0/validated_linux_requirements.txt [--no-deps]
|
||||||
- arch==4.14
|
- arch==4.14
|
||||||
|
|||||||
@@ -1,31 +1,34 @@
|
|||||||
name: azure_automl
|
name: azure_automl
|
||||||
|
channels:
|
||||||
|
- conda-forge
|
||||||
|
- pytorch
|
||||||
|
- main
|
||||||
dependencies:
|
dependencies:
|
||||||
# The python interpreter version.
|
# The python interpreter version.
|
||||||
# Currently Azure ML only supports 3.5.2 and later.
|
# Currently Azure ML only supports 3.6.0 and later.
|
||||||
- pip==21.1.2
|
- pip==20.2.4
|
||||||
- nomkl
|
- nomkl
|
||||||
- python>=3.5.2,<3.8
|
- python>=3.6,<3.9
|
||||||
- nb_conda
|
- boto3==1.20.19
|
||||||
- boto3==1.15.18
|
- botocore<=1.23.19
|
||||||
- matplotlib==2.1.0
|
- matplotlib==3.2.1
|
||||||
- numpy==1.18.5
|
- numpy>=1.21.6,<=1.22.3
|
||||||
- cython
|
- cython==0.29.14
|
||||||
- urllib3<1.24
|
- urllib3==1.26.7
|
||||||
- scipy>=1.4.1,<=1.5.2
|
- scipy>=1.4.1,<=1.5.3
|
||||||
- scikit-learn==0.22.1
|
- scikit-learn==0.22.1
|
||||||
- pandas==0.25.1
|
- py-xgboost<=1.3.3
|
||||||
- py-xgboost<=0.90
|
- holidays==0.10.3
|
||||||
- conda-forge::fbprophet==0.5
|
- conda-forge::fbprophet==0.7.1
|
||||||
- holidays==0.9.11
|
|
||||||
- pytorch::pytorch=1.4.0
|
- pytorch::pytorch=1.4.0
|
||||||
- cudatoolkit=9.0
|
- cudatoolkit=9.0
|
||||||
- tornado==6.1.0
|
|
||||||
|
|
||||||
- pip:
|
- pip:
|
||||||
# Required packages for AzureML execution, history, and data preparation.
|
# Required packages for AzureML execution, history, and data preparation.
|
||||||
- azureml-widgets~=1.37.0
|
- azureml-widgets~=1.44.0
|
||||||
- pytorch-transformers==1.0.0
|
- pytorch-transformers==1.0.0
|
||||||
- spacy==2.1.8
|
- spacy==2.2.4
|
||||||
|
- pystan==2.19.1.1
|
||||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||||
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.37.0/validated_darwin_requirements.txt [--no-deps]
|
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.44.0/validated_darwin_requirements.txt [--no-deps]
|
||||||
- arch==4.14
|
- arch==4.14
|
||||||
|
|||||||
@@ -1,21 +1,5 @@
|
|||||||
{
|
{
|
||||||
"cells": [
|
"cells": [
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
|
||||||
"\n",
|
|
||||||
"Licensed under the MIT License."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
""
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@@ -30,6 +14,7 @@
|
|||||||
"1. [Results](#Results)\n",
|
"1. [Results](#Results)\n",
|
||||||
"1. [Deploy](#Deploy)\n",
|
"1. [Deploy](#Deploy)\n",
|
||||||
"1. [Test](#Test)\n",
|
"1. [Test](#Test)\n",
|
||||||
|
"1. [Use auto-generated code for retraining](#Using-the-auto-generated-model-training-code-for-retraining-on-new-data)\n",
|
||||||
"1. [Acknowledgements](#Acknowledgements)"
|
"1. [Acknowledgements](#Acknowledgements)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -55,6 +40,7 @@
|
|||||||
"7. Create a container image.\n",
|
"7. Create a container image.\n",
|
||||||
"8. Create an Azure Container Instance (ACI) service.\n",
|
"8. Create an Azure Container Instance (ACI) service.\n",
|
||||||
"9. Test the ACI service.\n",
|
"9. Test the ACI service.\n",
|
||||||
|
"10. Leverage the auto generated training code and use it for retraining on an updated dataset\n",
|
||||||
"\n",
|
"\n",
|
||||||
"In addition this notebook showcases the following features\n",
|
"In addition this notebook showcases the following features\n",
|
||||||
"- **Blocking** certain pipelines\n",
|
"- **Blocking** certain pipelines\n",
|
||||||
@@ -74,7 +60,9 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"name": "automl-import"
|
||||||
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"import json\n",
|
"import json\n",
|
||||||
@@ -99,16 +87,6 @@
|
|||||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
|
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@@ -138,24 +116,27 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"name": "ws-setup"
|
||||||
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"ws = Workspace.from_config()\n",
|
"ws = Workspace.from_config()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# choose a name for experiment\n",
|
"# choose a name for experiment\n",
|
||||||
"experiment_name = 'automl-classification-bmarketing-all'\n",
|
"experiment_name = \"automl-classification-bmarketing-all\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"experiment=Experiment(ws, experiment_name)\n",
|
"experiment = Experiment(ws, experiment_name)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"output = {}\n",
|
"output = {}\n",
|
||||||
"output['Subscription ID'] = ws.subscription_id\n",
|
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||||
"output['Workspace'] = ws.name\n",
|
"output[\"Workspace\"] = ws.name\n",
|
||||||
"output['Resource Group'] = ws.resource_group\n",
|
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||||
"output['Location'] = ws.location\n",
|
"output[\"Location\"] = ws.location\n",
|
||||||
"output['Experiment Name'] = experiment.name\n",
|
"output[\"Experiment Name\"] = experiment.name\n",
|
||||||
"pd.set_option('display.max_colwidth', -1)\n",
|
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||||
|
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||||
"outputDf.T"
|
"outputDf.T"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -176,7 +157,9 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||||
@@ -188,12 +171,12 @@
|
|||||||
"# Verify that cluster does not exist already\n",
|
"# Verify that cluster does not exist already\n",
|
||||||
"try:\n",
|
"try:\n",
|
||||||
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
||||||
" print('Found existing cluster, use it.')\n",
|
" print(\"Found existing cluster, use it.\")\n",
|
||||||
"except ComputeTargetException:\n",
|
"except ComputeTargetException:\n",
|
||||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
|
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||||
" max_nodes=6)\n",
|
" vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
|
||||||
|
" )\n",
|
||||||
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
||||||
"\n",
|
|
||||||
"compute_target.wait_for_completion(show_output=True)"
|
"compute_target.wait_for_completion(show_output=True)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -226,7 +209,9 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"data = pd.read_csv(\"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/bankmarketing_train.csv\")\n",
|
"data = pd.read_csv(\n",
|
||||||
|
" \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/bankmarketing_train.csv\"\n",
|
||||||
|
")\n",
|
||||||
"data.head()"
|
"data.head()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -241,7 +226,12 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"missing_rate = 0.75\n",
|
"missing_rate = 0.75\n",
|
||||||
"n_missing_samples = int(np.floor(data.shape[0] * missing_rate))\n",
|
"n_missing_samples = int(np.floor(data.shape[0] * missing_rate))\n",
|
||||||
"missing_samples = np.hstack((np.zeros(data.shape[0] - n_missing_samples, dtype=np.bool), np.ones(n_missing_samples, dtype=np.bool)))\n",
|
"missing_samples = np.hstack(\n",
|
||||||
|
" (\n",
|
||||||
|
" np.zeros(data.shape[0] - n_missing_samples, dtype=bool),\n",
|
||||||
|
" np.ones(n_missing_samples, dtype=bool),\n",
|
||||||
|
" )\n",
|
||||||
|
")\n",
|
||||||
"rng = np.random.RandomState(0)\n",
|
"rng = np.random.RandomState(0)\n",
|
||||||
"rng.shuffle(missing_samples)\n",
|
"rng.shuffle(missing_samples)\n",
|
||||||
"missing_features = rng.randint(0, data.shape[1], n_missing_samples)\n",
|
"missing_features = rng.randint(0, data.shape[1], n_missing_samples)\n",
|
||||||
@@ -254,19 +244,21 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"if not os.path.isdir('data'):\n",
|
"if not os.path.isdir(\"data\"):\n",
|
||||||
" os.mkdir('data')\n",
|
" os.mkdir(\"data\")\n",
|
||||||
" \n",
|
|
||||||
"# Save the train data to a csv to be uploaded to the datastore\n",
|
"# Save the train data to a csv to be uploaded to the datastore\n",
|
||||||
"pd.DataFrame(data).to_csv(\"data/train_data.csv\", index=False)\n",
|
"pd.DataFrame(data).to_csv(\"data/train_data.csv\", index=False)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"ds = ws.get_default_datastore()\n",
|
"ds = ws.get_default_datastore()\n",
|
||||||
"ds.upload(src_dir='./data', target_path='bankmarketing', overwrite=True, show_progress=True)\n",
|
"ds.upload(\n",
|
||||||
|
" src_dir=\"./data\", target_path=\"bankmarketing\", overwrite=True, show_progress=True\n",
|
||||||
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
" \n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"# Upload the training data as a tabular dataset for access during training on remote compute\n",
|
"# Upload the training data as a tabular dataset for access during training on remote compute\n",
|
||||||
"train_data = Dataset.Tabular.from_delimited_files(path=ds.path('bankmarketing/train_data.csv'))\n",
|
"train_data = Dataset.Tabular.from_delimited_files(\n",
|
||||||
|
" path=ds.path(\"bankmarketing/train_data.csv\")\n",
|
||||||
|
")\n",
|
||||||
"label = \"y\""
|
"label = \"y\""
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -326,6 +318,7 @@
|
|||||||
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
||||||
"|**training_data**|Input dataset, containing both features and label column.|\n",
|
"|**training_data**|Input dataset, containing both features and label column.|\n",
|
||||||
"|**label_column_name**|The name of the label column.|\n",
|
"|**label_column_name**|The name of the label column.|\n",
|
||||||
|
"|**enable_code_generation**|Flag to enable generation of training code for each of the models that AutoML is creating.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"**_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric)"
|
"**_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric)"
|
||||||
]
|
]
|
||||||
@@ -337,33 +330,37 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"automl_settings = {\n",
|
"automl_settings = {\n",
|
||||||
" \"experiment_timeout_hours\" : 0.3,\n",
|
" \"experiment_timeout_hours\": 0.3,\n",
|
||||||
" \"enable_early_stopping\" : True,\n",
|
" \"enable_early_stopping\": True,\n",
|
||||||
" \"iteration_timeout_minutes\": 5,\n",
|
" \"iteration_timeout_minutes\": 5,\n",
|
||||||
" \"max_concurrent_iterations\": 4,\n",
|
" \"max_concurrent_iterations\": 4,\n",
|
||||||
" \"max_cores_per_iteration\": -1,\n",
|
" \"max_cores_per_iteration\": -1,\n",
|
||||||
" #\"n_cross_validations\": 2,\n",
|
" # \"n_cross_validations\": 2,\n",
|
||||||
" \"primary_metric\": 'AUC_weighted',\n",
|
" \"primary_metric\": \"AUC_weighted\",\n",
|
||||||
" \"featurization\": 'auto',\n",
|
" \"featurization\": \"auto\",\n",
|
||||||
" \"verbosity\": logging.INFO,\n",
|
" \"verbosity\": logging.INFO,\n",
|
||||||
|
" \"enable_code_generation\": True,\n",
|
||||||
"}\n",
|
"}\n",
|
||||||
"\n",
|
"\n",
|
||||||
"automl_config = AutoMLConfig(task = 'classification',\n",
|
"automl_config = AutoMLConfig(\n",
|
||||||
" debug_log = 'automl_errors.log',\n",
|
" task=\"classification\",\n",
|
||||||
" compute_target=compute_target,\n",
|
" debug_log=\"automl_errors.log\",\n",
|
||||||
" experiment_exit_score = 0.9984,\n",
|
" compute_target=compute_target,\n",
|
||||||
" blocked_models = ['KNN','LinearSVM'],\n",
|
" experiment_exit_score=0.9984,\n",
|
||||||
" enable_onnx_compatible_models=True,\n",
|
" blocked_models=[\"KNN\", \"LinearSVM\"],\n",
|
||||||
" training_data = train_data,\n",
|
" enable_onnx_compatible_models=True,\n",
|
||||||
" label_column_name = label,\n",
|
" training_data=train_data,\n",
|
||||||
" validation_data = validation_dataset,\n",
|
" label_column_name=label,\n",
|
||||||
" **automl_settings\n",
|
" validation_data=validation_dataset,\n",
|
||||||
" )"
|
" **automl_settings,\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"Call the `submit` method on the experiment object and pass the run configuration. Execution of local runs is synchronous. Depending on the data and the number of iterations this can run for a while. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous."
|
"Call the `submit` method on the experiment object and pass the run configuration. Execution of local runs is synchronous. Depending on the data and the number of iterations this can run for a while. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous."
|
||||||
]
|
]
|
||||||
@@ -371,15 +368,19 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"name": "experiment-submit"
|
||||||
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"remote_run = experiment.submit(automl_config, show_output = False)"
|
"remote_run = experiment.submit(automl_config, show_output=False)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"Run the following cell to access previous runs. Uncomment the cell below and update the run_id."
|
"Run the following cell to access previous runs. Uncomment the cell below and update the run_id."
|
||||||
]
|
]
|
||||||
@@ -390,9 +391,9 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"#from azureml.train.automl.run import AutoMLRun\n",
|
"# from azureml.train.automl.run import AutoMLRun\n",
|
||||||
"#remote_run = AutoMLRun(experiment=experiment, run_id='<run_ID_goes_here')\n",
|
"# remote_run = AutoMLRun(experiment=experiment, run_id='<run_ID_goes_here')\n",
|
||||||
"#remote_run"
|
"# remote_run"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -430,8 +431,10 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Download the featuurization summary JSON file locally\n",
|
"# Download the featurization summary JSON file locally\n",
|
||||||
"best_run.download_file(\"outputs/featurization_summary.json\", \"featurization_summary.json\")\n",
|
"best_run.download_file(\n",
|
||||||
|
" \"outputs/featurization_summary.json\", \"featurization_summary.json\"\n",
|
||||||
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Render the JSON as a pandas DataFrame\n",
|
"# Render the JSON as a pandas DataFrame\n",
|
||||||
"with open(\"featurization_summary.json\", \"r\") as f:\n",
|
"with open(\"featurization_summary.json\", \"r\") as f:\n",
|
||||||
@@ -450,11 +453,14 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"name": "run-details"
|
||||||
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from azureml.widgets import RunDetails\n",
|
"from azureml.widgets import RunDetails\n",
|
||||||
"RunDetails(remote_run).show() "
|
"\n",
|
||||||
|
"RunDetails(remote_run).show()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -473,9 +479,12 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"# Wait for the best model explanation run to complete\n",
|
"# Wait for the best model explanation run to complete\n",
|
||||||
"from azureml.core.run import Run\n",
|
"from azureml.core.run import Run\n",
|
||||||
|
"\n",
|
||||||
"model_explainability_run_id = remote_run.id + \"_\" + \"ModelExplain\"\n",
|
"model_explainability_run_id = remote_run.id + \"_\" + \"ModelExplain\"\n",
|
||||||
"print(model_explainability_run_id)\n",
|
"print(model_explainability_run_id)\n",
|
||||||
"model_explainability_run = Run(experiment=experiment, run_id=model_explainability_run_id)\n",
|
"model_explainability_run = Run(\n",
|
||||||
|
" experiment=experiment, run_id=model_explainability_run_id\n",
|
||||||
|
")\n",
|
||||||
"model_explainability_run.wait_for_completion()\n",
|
"model_explainability_run.wait_for_completion()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Get the best run object\n",
|
"# Get the best run object\n",
|
||||||
@@ -556,6 +565,7 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from azureml.automl.runtime.onnx_convert import OnnxConverter\n",
|
"from azureml.automl.runtime.onnx_convert import OnnxConverter\n",
|
||||||
|
"\n",
|
||||||
"onnx_fl_path = \"./best_model.onnx\"\n",
|
"onnx_fl_path = \"./best_model.onnx\"\n",
|
||||||
"OnnxConverter.save_onnx_model(onnx_mdl, onnx_fl_path)"
|
"OnnxConverter.save_onnx_model(onnx_mdl, onnx_fl_path)"
|
||||||
]
|
]
|
||||||
@@ -580,13 +590,17 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"from azureml.automl.runtime.onnx_convert import OnnxInferenceHelper\n",
|
"from azureml.automl.runtime.onnx_convert import OnnxInferenceHelper\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"\n",
|
||||||
"def get_onnx_res(run):\n",
|
"def get_onnx_res(run):\n",
|
||||||
" res_path = 'onnx_resource.json'\n",
|
" res_path = \"onnx_resource.json\"\n",
|
||||||
" run.download_file(name=constants.MODEL_RESOURCE_PATH_ONNX, output_file_path=res_path)\n",
|
" run.download_file(\n",
|
||||||
|
" name=constants.MODEL_RESOURCE_PATH_ONNX, output_file_path=res_path\n",
|
||||||
|
" )\n",
|
||||||
" with open(res_path) as f:\n",
|
" with open(res_path) as f:\n",
|
||||||
" result = json.load(f)\n",
|
" result = json.load(f)\n",
|
||||||
" return result\n",
|
" return result\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"\n",
|
||||||
"if sys.version_info < OnnxConvertConstants.OnnxIncompatiblePythonVersion:\n",
|
"if sys.version_info < OnnxConvertConstants.OnnxIncompatiblePythonVersion:\n",
|
||||||
" test_df = test_dataset.to_pandas_dataframe()\n",
|
" test_df = test_dataset.to_pandas_dataframe()\n",
|
||||||
" mdl_bytes = onnx_mdl.SerializeToString()\n",
|
" mdl_bytes = onnx_mdl.SerializeToString()\n",
|
||||||
@@ -598,7 +612,7 @@
|
|||||||
" print(pred_onnx)\n",
|
" print(pred_onnx)\n",
|
||||||
" print(pred_prob_onnx)\n",
|
" print(pred_prob_onnx)\n",
|
||||||
"else:\n",
|
"else:\n",
|
||||||
" print('Please use Python version 3.6 or 3.7 to run the inference helper.')"
|
" print(\"Please use Python version 3.6 or 3.7 to run the inference helper.\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -609,7 +623,7 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"### Retrieve the Best Model\n",
|
"### Retrieve the Best Model\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Below we select the best pipeline from our iterations. The `get_best_child` method returns the Run object for the best model based on the default primary metric. There are additional flags that can be passed to the method if we want to retrieve the best Run based on any of the other supported metrics, or if we are just interested in the best run among the ONNX compatible runs. As always, you can execute `remote_run.get_best_child??` in a new cell to view the source or docs for the function."
|
"Below we select the best pipeline from our iterations. The `get_best_child` method returns the Run object for the best model based on the default primary metric. There are additional flags that can be passed to the method if we want to retrieve the best Run based on any of the other supported metrics, or if we are just interested in the best run among the ONNX compatible runs. As always, you can execute `??remote_run.get_best_child` in a new cell to view the source or docs for the function."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -618,7 +632,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"remote_run.get_best_child??"
|
"??remote_run.get_best_child"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -647,11 +661,11 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"model_name = best_run.properties['model_name']\n",
|
"model_name = best_run.properties[\"model_name\"]\n",
|
||||||
"\n",
|
"\n",
|
||||||
"script_file_name = 'inference/score.py'\n",
|
"script_file_name = \"inference/score.py\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"best_run.download_file('outputs/scoring_file_v_1_0_0.py', 'inference/score.py')"
|
"best_run.download_file(\"outputs/scoring_file_v_1_0_0.py\", \"inference/score.py\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -668,11 +682,15 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"description = 'AutoML Model trained on bank marketing data to predict if a client will subscribe to a term deposit'\n",
|
"description = \"AutoML Model trained on bank marketing data to predict if a client will subscribe to a term deposit\"\n",
|
||||||
"tags = None\n",
|
"tags = None\n",
|
||||||
"model = remote_run.register_model(model_name = model_name, description = description, tags = tags)\n",
|
"model = remote_run.register_model(\n",
|
||||||
|
" model_name=model_name, description=description, tags=tags\n",
|
||||||
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"print(remote_run.model_id) # This will be written to the script file later in the notebook."
|
"print(\n",
|
||||||
|
" remote_run.model_id\n",
|
||||||
|
") # This will be written to the script file later in the notebook."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -690,16 +708,20 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"from azureml.core.model import InferenceConfig\n",
|
"from azureml.core.model import InferenceConfig\n",
|
||||||
"from azureml.core.webservice import AciWebservice\n",
|
"from azureml.core.webservice import AciWebservice\n",
|
||||||
|
"from azureml.core.webservice import Webservice\n",
|
||||||
"from azureml.core.model import Model\n",
|
"from azureml.core.model import Model\n",
|
||||||
|
"from azureml.core.environment import Environment\n",
|
||||||
"\n",
|
"\n",
|
||||||
"inference_config = InferenceConfig(environment = best_run.get_environment(), entry_script=script_file_name)\n",
|
"inference_config = InferenceConfig(entry_script=script_file_name)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"aciconfig = AciWebservice.deploy_configuration(cpu_cores = 2, \n",
|
"aciconfig = AciWebservice.deploy_configuration(\n",
|
||||||
" memory_gb = 2, \n",
|
" cpu_cores=2,\n",
|
||||||
" tags = {'area': \"bmData\", 'type': \"automl_classification\"}, \n",
|
" memory_gb=2,\n",
|
||||||
" description = 'sample service for Automl Classification')\n",
|
" tags={\"area\": \"bmData\", \"type\": \"automl_classification\"},\n",
|
||||||
|
" description=\"sample service for Automl Classification\",\n",
|
||||||
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"aci_service_name = 'automl-sample-bankmarketing-all'\n",
|
"aci_service_name = model_name.lower()\n",
|
||||||
"print(aci_service_name)\n",
|
"print(aci_service_name)\n",
|
||||||
"aci_service = Model.deploy(ws, aci_service_name, [model], inference_config, aciconfig)\n",
|
"aci_service = Model.deploy(ws, aci_service_name, [model], inference_config, aciconfig)\n",
|
||||||
"aci_service.wait_for_deployment(True)\n",
|
"aci_service.wait_for_deployment(True)\n",
|
||||||
@@ -721,7 +743,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"#aci_service.get_logs()"
|
"# aci_service.get_logs()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -751,8 +773,8 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"X_test = test_dataset.drop_columns(columns=['y'])\n",
|
"X_test = test_dataset.drop_columns(columns=[\"y\"])\n",
|
||||||
"y_test = test_dataset.keep_columns(columns=['y'], validate=True)\n",
|
"y_test = test_dataset.keep_columns(columns=[\"y\"], validate=True)\n",
|
||||||
"test_dataset.take(5).to_pandas_dataframe()"
|
"test_dataset.take(5).to_pandas_dataframe()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -774,13 +796,13 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"import requests\n",
|
"import requests\n",
|
||||||
"\n",
|
"\n",
|
||||||
"X_test_json = X_test.to_json(orient='records')\n",
|
"X_test_json = X_test.to_json(orient=\"records\")\n",
|
||||||
"data = \"{\\\"data\\\": \" + X_test_json +\"}\"\n",
|
"data = '{\"data\": ' + X_test_json + \"}\"\n",
|
||||||
"headers = {'Content-Type': 'application/json'}\n",
|
"headers = {\"Content-Type\": \"application/json\"}\n",
|
||||||
"\n",
|
"\n",
|
||||||
"resp = requests.post(aci_service.scoring_uri, data, headers=headers)\n",
|
"resp = requests.post(aci_service.scoring_uri, data, headers=headers)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"y_pred = json.loads(json.loads(resp.text))['result']"
|
"y_pred = json.loads(json.loads(resp.text))[\"result\"]"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -790,7 +812,7 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"actual = array(y_test)\n",
|
"actual = array(y_test)\n",
|
||||||
"actual = actual[:,0]\n",
|
"actual = actual[:, 0]\n",
|
||||||
"print(len(y_pred), \" \", len(actual))"
|
"print(len(y_pred), \" \", len(actual))"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -806,27 +828,35 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"scrolled": true
|
||||||
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"%matplotlib notebook\n",
|
"%matplotlib notebook\n",
|
||||||
"from sklearn.metrics import confusion_matrix\n",
|
"from sklearn.metrics import confusion_matrix\n",
|
||||||
"import itertools\n",
|
"import itertools\n",
|
||||||
"\n",
|
"\n",
|
||||||
"cf =confusion_matrix(actual,y_pred)\n",
|
"cf = confusion_matrix(actual, y_pred)\n",
|
||||||
"plt.imshow(cf,cmap=plt.cm.Blues,interpolation='nearest')\n",
|
"plt.imshow(cf, cmap=plt.cm.Blues, interpolation=\"nearest\")\n",
|
||||||
"plt.colorbar()\n",
|
"plt.colorbar()\n",
|
||||||
"plt.title('Confusion Matrix')\n",
|
"plt.title(\"Confusion Matrix\")\n",
|
||||||
"plt.xlabel('Predicted')\n",
|
"plt.xlabel(\"Predicted\")\n",
|
||||||
"plt.ylabel('Actual')\n",
|
"plt.ylabel(\"Actual\")\n",
|
||||||
"class_labels = ['no','yes']\n",
|
"class_labels = [\"no\", \"yes\"]\n",
|
||||||
"tick_marks = np.arange(len(class_labels))\n",
|
"tick_marks = np.arange(len(class_labels))\n",
|
||||||
"plt.xticks(tick_marks,class_labels)\n",
|
"plt.xticks(tick_marks, class_labels)\n",
|
||||||
"plt.yticks([-0.5,0,1,1.5],['','no','yes',''])\n",
|
"plt.yticks([-0.5, 0, 1, 1.5], [\"\", \"no\", \"yes\", \"\"])\n",
|
||||||
"# plotting text value inside cells\n",
|
"# plotting text value inside cells\n",
|
||||||
"thresh = cf.max() / 2.\n",
|
"thresh = cf.max() / 2.0\n",
|
||||||
"for i,j in itertools.product(range(cf.shape[0]),range(cf.shape[1])):\n",
|
"for i, j in itertools.product(range(cf.shape[0]), range(cf.shape[1])):\n",
|
||||||
" plt.text(j,i,format(cf[i,j],'d'),horizontalalignment='center',color='white' if cf[i,j] >thresh else 'black')\n",
|
" plt.text(\n",
|
||||||
|
" j,\n",
|
||||||
|
" i,\n",
|
||||||
|
" format(cf[i, j], \"d\"),\n",
|
||||||
|
" horizontalalignment=\"center\",\n",
|
||||||
|
" color=\"white\" if cf[i, j] > thresh else \"black\",\n",
|
||||||
|
" )\n",
|
||||||
"plt.show()"
|
"plt.show()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -848,6 +878,142 @@
|
|||||||
"aci_service.delete()"
|
"aci_service.delete()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Using the auto generated model training code for retraining on new data\n",
|
||||||
|
"\n",
|
||||||
|
"Because we enabled code generation when the original experiment was created, we now have access to the code that was used to generate any of the AutoML tried models. Below we'll be using the generated training script of the best model to retrain on a new dataset.\n",
|
||||||
|
"\n",
|
||||||
|
"For this demo, we'll begin by creating new retraining dataset by combining the Train & Validation datasets that were used in the original experiment."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"original_train_data = pd.read_csv(\n",
|
||||||
|
" \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/bankmarketing_train.csv\"\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"valid_data = pd.read_csv(\n",
|
||||||
|
" \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/bankmarketing_validate.csv\"\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"# we'll emulate an updated dataset for retraining by combining the Train & Validation datasets into a new one\n",
|
||||||
|
"retrain_pd = pd.concat([original_train_data, valid_data])\n",
|
||||||
|
"retrain_pd.to_csv(\"data/retrain_data.csv\", index=False)\n",
|
||||||
|
"ds.upload_files(\n",
|
||||||
|
" files=[\"data/retrain_data.csv\"],\n",
|
||||||
|
" target_path=\"bankmarketing/\",\n",
|
||||||
|
" overwrite=True,\n",
|
||||||
|
" show_progress=True,\n",
|
||||||
|
")\n",
|
||||||
|
"retrain_dataset = Dataset.Tabular.from_delimited_files(\n",
|
||||||
|
" path=ds.path(\"bankmarketing/retrain_data.csv\")\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"# after creating and uploading the retraining dataset, let's register it with the workspace for reuse\n",
|
||||||
|
"retrain_dataset = retrain_dataset.register(\n",
|
||||||
|
" workspace=ws,\n",
|
||||||
|
" name=\"Bankmarketing_retrain\",\n",
|
||||||
|
" description=\"Updated training dataset, includes validation data\",\n",
|
||||||
|
" create_new_version=True,\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Next, we'll download the generated script for the best run and use it for retraining. For more advanced scenarios, you can customize the training script as you need: change the featurization pipeline, change the learner algorithm or its hyperparameters, etc. \n",
|
||||||
|
"\n",
|
||||||
|
"For this exercise, we'll leave the script as it was generated."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# download the autogenerated training script into the generated_code folder\n",
|
||||||
|
"best_run.download_file(\n",
|
||||||
|
" \"outputs/generated_code/script.py\", \"generated_code/training_script.py\"\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"# view the contents of the autogenerated training script\n",
|
||||||
|
"! cat generated_code/training_script.py"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import uuid\n",
|
||||||
|
"from azureml.core import ScriptRunConfig\n",
|
||||||
|
"from azureml._restclient.models import RunTypeV2\n",
|
||||||
|
"from azureml._restclient.models.create_run_dto import CreateRunDto\n",
|
||||||
|
"from azureml._restclient.run_client import RunClient\n",
|
||||||
|
"\n",
|
||||||
|
"codegen_runid = str(uuid.uuid4())\n",
|
||||||
|
"client = RunClient(\n",
|
||||||
|
" experiment.workspace.service_context,\n",
|
||||||
|
" experiment.name,\n",
|
||||||
|
" codegen_runid,\n",
|
||||||
|
" experiment_id=experiment.id,\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"# override the training_dataset_id to point to our new retraining dataset we just registered above\n",
|
||||||
|
"dataset_arguments = [\"--training_dataset_id\", retrain_dataset.id]\n",
|
||||||
|
"\n",
|
||||||
|
"# create the retraining run as a child of the AutoML generated training run\n",
|
||||||
|
"create_run_dto = CreateRunDto(\n",
|
||||||
|
" run_id=codegen_runid,\n",
|
||||||
|
" parent_run_id=best_run.id,\n",
|
||||||
|
" description=\"AutoML Codegen Script Run using an updated training dataset\",\n",
|
||||||
|
" target=cpu_cluster_name,\n",
|
||||||
|
" run_type_v2=RunTypeV2(orchestrator=\"Execution\", traits=[\"automl-codegen\"]),\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"# the script for retraining run is pointing to the AutoML generated script\n",
|
||||||
|
"src = ScriptRunConfig(\n",
|
||||||
|
" source_directory=\"generated_code\",\n",
|
||||||
|
" script=\"training_script.py\",\n",
|
||||||
|
" arguments=dataset_arguments,\n",
|
||||||
|
" compute_target=cpu_cluster_name,\n",
|
||||||
|
" environment=best_run.get_environment(),\n",
|
||||||
|
")\n",
|
||||||
|
"run_dto = client.create_run(run_id=codegen_runid, create_run_dto=create_run_dto)\n",
|
||||||
|
"\n",
|
||||||
|
"# submit the experiment\n",
|
||||||
|
"retraining_run = experiment.submit(config=src, run_id=codegen_runid)\n",
|
||||||
|
"retraining_run"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"After the run completes, we can get download/test/deploy to the model it has built."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"retraining_run.wait_for_completion()\n",
|
||||||
|
"\n",
|
||||||
|
"retraining_run.download_file(\"outputs/model.pkl\", \"generated_code/model.pkl\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@@ -890,6 +1056,9 @@
|
|||||||
],
|
],
|
||||||
"friendly_name": "Automated ML run with basic edition features.",
|
"friendly_name": "Automated ML run with basic edition features.",
|
||||||
"index_order": 5,
|
"index_order": 5,
|
||||||
|
"kernel_info": {
|
||||||
|
"name": "python3-azureml"
|
||||||
|
},
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3.6",
|
"display_name": "Python 3.6",
|
||||||
"language": "python",
|
"language": "python",
|
||||||
@@ -905,7 +1074,10 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.6.7"
|
"version": "3.8.12"
|
||||||
|
},
|
||||||
|
"nteract": {
|
||||||
|
"version": "nteract-front-end@1.0.0"
|
||||||
},
|
},
|
||||||
"tags": [
|
"tags": [
|
||||||
"featurization",
|
"featurization",
|
||||||
@@ -916,5 +1088,5 @@
|
|||||||
"task": "Classification"
|
"task": "Classification"
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
"nbformat_minor": 2
|
"nbformat_minor": 1
|
||||||
}
|
}
|
||||||
@@ -1,21 +1,5 @@
|
|||||||
{
|
{
|
||||||
"cells": [
|
"cells": [
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
|
||||||
"\n",
|
|
||||||
"Licensed under the MIT License."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
""
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@@ -87,16 +71,6 @@
|
|||||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
|
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
@@ -106,18 +80,19 @@
|
|||||||
"ws = Workspace.from_config()\n",
|
"ws = Workspace.from_config()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# choose a name for experiment\n",
|
"# choose a name for experiment\n",
|
||||||
"experiment_name = 'automl-classification-ccard-remote'\n",
|
"experiment_name = \"automl-classification-ccard-remote\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"experiment=Experiment(ws, experiment_name)\n",
|
"experiment = Experiment(ws, experiment_name)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"output = {}\n",
|
"output = {}\n",
|
||||||
"output['Subscription ID'] = ws.subscription_id\n",
|
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||||
"output['Workspace'] = ws.name\n",
|
"output[\"Workspace\"] = ws.name\n",
|
||||||
"output['Resource Group'] = ws.resource_group\n",
|
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||||
"output['Location'] = ws.location\n",
|
"output[\"Location\"] = ws.location\n",
|
||||||
"output['Experiment Name'] = experiment.name\n",
|
"output[\"Experiment Name\"] = experiment.name\n",
|
||||||
"pd.set_option('display.max_colwidth', -1)\n",
|
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||||
|
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||||
"outputDf.T"
|
"outputDf.T"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -150,12 +125,12 @@
|
|||||||
"# Verify that cluster does not exist already\n",
|
"# Verify that cluster does not exist already\n",
|
||||||
"try:\n",
|
"try:\n",
|
||||||
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
||||||
" print('Found existing cluster, use it.')\n",
|
" print(\"Found existing cluster, use it.\")\n",
|
||||||
"except ComputeTargetException:\n",
|
"except ComputeTargetException:\n",
|
||||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
|
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||||
" max_nodes=6)\n",
|
" vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
|
||||||
|
" )\n",
|
||||||
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
||||||
"\n",
|
|
||||||
"compute_target.wait_for_completion(show_output=True)"
|
"compute_target.wait_for_completion(show_output=True)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -178,13 +153,15 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"name": "load-data"
|
||||||
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"data = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/creditcard.csv\"\n",
|
"data = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/creditcard.csv\"\n",
|
||||||
"dataset = Dataset.Tabular.from_delimited_files(data)\n",
|
"dataset = Dataset.Tabular.from_delimited_files(data)\n",
|
||||||
"training_data, validation_data = dataset.random_split(percentage=0.8, seed=223)\n",
|
"training_data, validation_data = dataset.random_split(percentage=0.8, seed=223)\n",
|
||||||
"label_column_name = 'Class'"
|
"label_column_name = \"Class\""
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -210,25 +187,28 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"name": "automl-config"
|
||||||
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"automl_settings = {\n",
|
"automl_settings = {\n",
|
||||||
" \"n_cross_validations\": 3,\n",
|
" \"n_cross_validations\": 3,\n",
|
||||||
" \"primary_metric\": 'AUC_weighted',\n",
|
" \"primary_metric\": \"average_precision_score_weighted\",\n",
|
||||||
" \"enable_early_stopping\": True,\n",
|
" \"enable_early_stopping\": True,\n",
|
||||||
" \"max_concurrent_iterations\": 2, # This is a limit for testing purpose, please increase it as per cluster size\n",
|
" \"max_concurrent_iterations\": 2, # This is a limit for testing purpose, please increase it as per cluster size\n",
|
||||||
" \"experiment_timeout_hours\": 0.25, # This is a time limit for testing purposes, remove it for real use cases, this will drastically limit ablity to find the best model possible\n",
|
" \"experiment_timeout_hours\": 0.25, # This is a time limit for testing purposes, remove it for real use cases, this will drastically limit ablity to find the best model possible\n",
|
||||||
" \"verbosity\": logging.INFO,\n",
|
" \"verbosity\": logging.INFO,\n",
|
||||||
"}\n",
|
"}\n",
|
||||||
"\n",
|
"\n",
|
||||||
"automl_config = AutoMLConfig(task = 'classification',\n",
|
"automl_config = AutoMLConfig(\n",
|
||||||
" debug_log = 'automl_errors.log',\n",
|
" task=\"classification\",\n",
|
||||||
" compute_target = compute_target,\n",
|
" debug_log=\"automl_errors.log\",\n",
|
||||||
" training_data = training_data,\n",
|
" compute_target=compute_target,\n",
|
||||||
" label_column_name = label_column_name,\n",
|
" training_data=training_data,\n",
|
||||||
" **automl_settings\n",
|
" label_column_name=label_column_name,\n",
|
||||||
" )"
|
" **automl_settings,\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -244,7 +224,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"remote_run = experiment.submit(automl_config, show_output = False)"
|
"remote_run = experiment.submit(automl_config, show_output=False)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -254,8 +234,8 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# If you need to retrieve a run that already started, use the following code\n",
|
"# If you need to retrieve a run that already started, use the following code\n",
|
||||||
"#from azureml.train.automl.run import AutoMLRun\n",
|
"# from azureml.train.automl.run import AutoMLRun\n",
|
||||||
"#remote_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')"
|
"# remote_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -287,6 +267,7 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from azureml.widgets import RunDetails\n",
|
"from azureml.widgets import RunDetails\n",
|
||||||
|
"\n",
|
||||||
"RunDetails(remote_run).show()"
|
"RunDetails(remote_run).show()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -353,8 +334,12 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# convert the test data to dataframe\n",
|
"# convert the test data to dataframe\n",
|
||||||
"X_test_df = validation_data.drop_columns(columns=[label_column_name]).to_pandas_dataframe()\n",
|
"X_test_df = validation_data.drop_columns(\n",
|
||||||
"y_test_df = validation_data.keep_columns(columns=[label_column_name], validate=True).to_pandas_dataframe()"
|
" columns=[label_column_name]\n",
|
||||||
|
").to_pandas_dataframe()\n",
|
||||||
|
"y_test_df = validation_data.keep_columns(\n",
|
||||||
|
" columns=[label_column_name], validate=True\n",
|
||||||
|
").to_pandas_dataframe()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -388,20 +373,26 @@
|
|||||||
"import numpy as np\n",
|
"import numpy as np\n",
|
||||||
"import itertools\n",
|
"import itertools\n",
|
||||||
"\n",
|
"\n",
|
||||||
"cf =confusion_matrix(y_test_df.values,y_pred)\n",
|
"cf = confusion_matrix(y_test_df.values, y_pred)\n",
|
||||||
"plt.imshow(cf,cmap=plt.cm.Blues,interpolation='nearest')\n",
|
"plt.imshow(cf, cmap=plt.cm.Blues, interpolation=\"nearest\")\n",
|
||||||
"plt.colorbar()\n",
|
"plt.colorbar()\n",
|
||||||
"plt.title('Confusion Matrix')\n",
|
"plt.title(\"Confusion Matrix\")\n",
|
||||||
"plt.xlabel('Predicted')\n",
|
"plt.xlabel(\"Predicted\")\n",
|
||||||
"plt.ylabel('Actual')\n",
|
"plt.ylabel(\"Actual\")\n",
|
||||||
"class_labels = ['False','True']\n",
|
"class_labels = [\"False\", \"True\"]\n",
|
||||||
"tick_marks = np.arange(len(class_labels))\n",
|
"tick_marks = np.arange(len(class_labels))\n",
|
||||||
"plt.xticks(tick_marks,class_labels)\n",
|
"plt.xticks(tick_marks, class_labels)\n",
|
||||||
"plt.yticks([-0.5,0,1,1.5],['','False','True',''])\n",
|
"plt.yticks([-0.5, 0, 1, 1.5], [\"\", \"False\", \"True\", \"\"])\n",
|
||||||
"# plotting text value inside cells\n",
|
"# plotting text value inside cells\n",
|
||||||
"thresh = cf.max() / 2.\n",
|
"thresh = cf.max() / 2.0\n",
|
||||||
"for i,j in itertools.product(range(cf.shape[0]),range(cf.shape[1])):\n",
|
"for i, j in itertools.product(range(cf.shape[0]), range(cf.shape[1])):\n",
|
||||||
" plt.text(j,i,format(cf[i,j],'d'),horizontalalignment='center',color='white' if cf[i,j] >thresh else 'black')\n",
|
" plt.text(\n",
|
||||||
|
" j,\n",
|
||||||
|
" i,\n",
|
||||||
|
" format(cf[i, j], \"d\"),\n",
|
||||||
|
" horizontalalignment=\"center\",\n",
|
||||||
|
" color=\"white\" if cf[i, j] > thresh else \"black\",\n",
|
||||||
|
" )\n",
|
||||||
"plt.show()"
|
"plt.show()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -1,21 +1,5 @@
|
|||||||
{
|
{
|
||||||
"cells": [
|
"cells": [
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
|
||||||
"\n",
|
|
||||||
"Licensed under the MIT License."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
""
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@@ -78,7 +62,7 @@
|
|||||||
"from azureml.core.compute import ComputeTarget\n",
|
"from azureml.core.compute import ComputeTarget\n",
|
||||||
"from azureml.core.run import Run\n",
|
"from azureml.core.run import Run\n",
|
||||||
"from azureml.widgets import RunDetails\n",
|
"from azureml.widgets import RunDetails\n",
|
||||||
"from azureml.core.model import Model \n",
|
"from azureml.core.model import Model\n",
|
||||||
"from helper import run_inference, get_result_df\n",
|
"from helper import run_inference, get_result_df\n",
|
||||||
"from azureml.train.automl import AutoMLConfig\n",
|
"from azureml.train.automl import AutoMLConfig\n",
|
||||||
"from sklearn.datasets import fetch_20newsgroups"
|
"from sklearn.datasets import fetch_20newsgroups"
|
||||||
@@ -91,16 +75,6 @@
|
|||||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
|
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@@ -117,18 +91,19 @@
|
|||||||
"ws = Workspace.from_config()\n",
|
"ws = Workspace.from_config()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Choose an experiment name.\n",
|
"# Choose an experiment name.\n",
|
||||||
"experiment_name = 'automl-classification-text-dnn'\n",
|
"experiment_name = \"automl-classification-text-dnn\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"experiment = Experiment(ws, experiment_name)\n",
|
"experiment = Experiment(ws, experiment_name)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"output = {}\n",
|
"output = {}\n",
|
||||||
"output['Subscription ID'] = ws.subscription_id\n",
|
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||||
"output['Workspace Name'] = ws.name\n",
|
"output[\"Workspace Name\"] = ws.name\n",
|
||||||
"output['Resource Group'] = ws.resource_group\n",
|
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||||
"output['Location'] = ws.location\n",
|
"output[\"Location\"] = ws.location\n",
|
||||||
"output['Experiment Name'] = experiment.name\n",
|
"output[\"Experiment Name\"] = experiment.name\n",
|
||||||
"pd.set_option('display.max_colwidth', -1)\n",
|
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||||
|
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||||
"outputDf.T"
|
"outputDf.T"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -161,13 +136,16 @@
|
|||||||
"# Verify that cluster does not exist already\n",
|
"# Verify that cluster does not exist already\n",
|
||||||
"try:\n",
|
"try:\n",
|
||||||
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
||||||
" print('Found existing cluster, use it.')\n",
|
" print(\"Found existing cluster, use it.\")\n",
|
||||||
"except ComputeTargetException:\n",
|
"except ComputeTargetException:\n",
|
||||||
" compute_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_NC6\", # CPU for BiLSTM, such as \"STANDARD_DS12_V2\" \n",
|
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||||
" # To use BERT (this is recommended for best performance), select a GPU such as \"STANDARD_NC6\" \n",
|
" vm_size=\"STANDARD_NC6\", # CPU for BiLSTM, such as \"STANDARD_D2_V2\"\n",
|
||||||
" # or similar GPU option\n",
|
" # To use BERT (this is recommended for best performance), select a GPU such as \"STANDARD_NC6\"\n",
|
||||||
" # available in your workspace\n",
|
" # or similar GPU option\n",
|
||||||
" max_nodes = num_nodes)\n",
|
" # available in your workspace\n",
|
||||||
|
" idle_seconds_before_scaledown=60,\n",
|
||||||
|
" max_nodes=num_nodes,\n",
|
||||||
|
" )\n",
|
||||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"compute_target.wait_for_completion(show_output=True)"
|
"compute_target.wait_for_completion(show_output=True)"
|
||||||
@@ -187,41 +165,55 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"data_dir = \"text-dnn-data\" # Local directory to store data\n",
|
"data_dir = \"text-dnn-data\" # Local directory to store data\n",
|
||||||
"blobstore_datadir = data_dir # Blob store directory to store data in\n",
|
"blobstore_datadir = data_dir # Blob store directory to store data in\n",
|
||||||
"target_column_name = 'y'\n",
|
"target_column_name = \"y\"\n",
|
||||||
"feature_column_name = 'X'\n",
|
"feature_column_name = \"X\"\n",
|
||||||
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"def get_20newsgroups_data():\n",
|
"def get_20newsgroups_data():\n",
|
||||||
" '''Fetches 20 Newsgroups data from scikit-learn\n",
|
" \"\"\"Fetches 20 Newsgroups data from scikit-learn\n",
|
||||||
" Returns them in form of pandas dataframes\n",
|
" Returns them in form of pandas dataframes\n",
|
||||||
" '''\n",
|
" \"\"\"\n",
|
||||||
" remove = ('headers', 'footers', 'quotes')\n",
|
" remove = (\"headers\", \"footers\", \"quotes\")\n",
|
||||||
" categories = [\n",
|
" categories = [\n",
|
||||||
" 'rec.sport.baseball',\n",
|
" \"rec.sport.baseball\",\n",
|
||||||
" 'rec.sport.hockey',\n",
|
" \"rec.sport.hockey\",\n",
|
||||||
" 'comp.graphics',\n",
|
" \"comp.graphics\",\n",
|
||||||
" 'sci.space',\n",
|
" \"sci.space\",\n",
|
||||||
" ]\n",
|
" ]\n",
|
||||||
"\n",
|
"\n",
|
||||||
" data = fetch_20newsgroups(subset = 'train', categories = categories,\n",
|
" data = fetch_20newsgroups(\n",
|
||||||
" shuffle = True, random_state = 42,\n",
|
" subset=\"train\",\n",
|
||||||
" remove = remove)\n",
|
" categories=categories,\n",
|
||||||
" data = pd.DataFrame({feature_column_name: data.data, target_column_name: data.target})\n",
|
" shuffle=True,\n",
|
||||||
|
" random_state=42,\n",
|
||||||
|
" remove=remove,\n",
|
||||||
|
" )\n",
|
||||||
|
" data = pd.DataFrame(\n",
|
||||||
|
" {feature_column_name: data.data, target_column_name: data.target}\n",
|
||||||
|
" )\n",
|
||||||
"\n",
|
"\n",
|
||||||
" data_train = data[:200]\n",
|
" data_train = data[:200]\n",
|
||||||
" data_test = data[200:300] \n",
|
" data_test = data[200:300]\n",
|
||||||
"\n",
|
"\n",
|
||||||
" data_train = remove_blanks_20news(data_train, feature_column_name, target_column_name)\n",
|
" data_train = remove_blanks_20news(\n",
|
||||||
|
" data_train, feature_column_name, target_column_name\n",
|
||||||
|
" )\n",
|
||||||
" data_test = remove_blanks_20news(data_test, feature_column_name, target_column_name)\n",
|
" data_test = remove_blanks_20news(data_test, feature_column_name, target_column_name)\n",
|
||||||
" \n",
|
"\n",
|
||||||
" return data_train, data_test\n",
|
" return data_train, data_test\n",
|
||||||
" \n",
|
"\n",
|
||||||
|
"\n",
|
||||||
"def remove_blanks_20news(data, feature_column_name, target_column_name):\n",
|
"def remove_blanks_20news(data, feature_column_name, target_column_name):\n",
|
||||||
" \n",
|
"\n",
|
||||||
" data[feature_column_name] = data[feature_column_name].replace(r'\\n', ' ', regex=True).apply(lambda x: x.strip())\n",
|
" for index, row in data.iterrows():\n",
|
||||||
" data = data[data[feature_column_name] != '']\n",
|
" data.at[index, feature_column_name] = (\n",
|
||||||
" \n",
|
" row[feature_column_name].replace(\"\\n\", \" \").strip()\n",
|
||||||
|
" )\n",
|
||||||
|
"\n",
|
||||||
|
" data = data[data[feature_column_name] != \"\"]\n",
|
||||||
|
"\n",
|
||||||
" return data"
|
" return data"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -242,16 +234,15 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"if not os.path.isdir(data_dir):\n",
|
"if not os.path.isdir(data_dir):\n",
|
||||||
" os.mkdir(data_dir)\n",
|
" os.mkdir(data_dir)\n",
|
||||||
" \n",
|
"\n",
|
||||||
"train_data_fname = data_dir + '/train_data.csv'\n",
|
"train_data_fname = data_dir + \"/train_data.csv\"\n",
|
||||||
"test_data_fname = data_dir + '/test_data.csv'\n",
|
"test_data_fname = data_dir + \"/test_data.csv\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"data_train.to_csv(train_data_fname, index=False)\n",
|
"data_train.to_csv(train_data_fname, index=False)\n",
|
||||||
"data_test.to_csv(test_data_fname, index=False)\n",
|
"data_test.to_csv(test_data_fname, index=False)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"datastore = ws.get_default_datastore()\n",
|
"datastore = ws.get_default_datastore()\n",
|
||||||
"datastore.upload(src_dir=data_dir, target_path=blobstore_datadir,\n",
|
"datastore.upload(src_dir=data_dir, target_path=blobstore_datadir, overwrite=True)"
|
||||||
" overwrite=True)"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -260,7 +251,9 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"train_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, blobstore_datadir + '/train_data.csv')])"
|
"train_dataset = Dataset.Tabular.from_delimited_files(\n",
|
||||||
|
" path=[(datastore, blobstore_datadir + \"/train_data.csv\")]\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -285,8 +278,8 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"automl_settings = {\n",
|
"automl_settings = {\n",
|
||||||
" \"experiment_timeout_minutes\": 30,\n",
|
" \"experiment_timeout_minutes\": 30,\n",
|
||||||
" \"primary_metric\": 'AUC_weighted',\n",
|
" \"primary_metric\": \"accuracy\",\n",
|
||||||
" \"max_concurrent_iterations\": num_nodes, \n",
|
" \"max_concurrent_iterations\": num_nodes,\n",
|
||||||
" \"max_cores_per_iteration\": -1,\n",
|
" \"max_cores_per_iteration\": -1,\n",
|
||||||
" \"enable_dnn\": True,\n",
|
" \"enable_dnn\": True,\n",
|
||||||
" \"enable_early_stopping\": True,\n",
|
" \"enable_early_stopping\": True,\n",
|
||||||
@@ -296,14 +289,15 @@
|
|||||||
" \"enable_stack_ensemble\": False,\n",
|
" \"enable_stack_ensemble\": False,\n",
|
||||||
"}\n",
|
"}\n",
|
||||||
"\n",
|
"\n",
|
||||||
"automl_config = AutoMLConfig(task = 'classification',\n",
|
"automl_config = AutoMLConfig(\n",
|
||||||
" debug_log = 'automl_errors.log',\n",
|
" task=\"classification\",\n",
|
||||||
" compute_target=compute_target,\n",
|
" debug_log=\"automl_errors.log\",\n",
|
||||||
" training_data=train_dataset,\n",
|
" compute_target=compute_target,\n",
|
||||||
" label_column_name=target_column_name,\n",
|
" training_data=train_dataset,\n",
|
||||||
" blocked_models = ['LightGBM', 'XGBoostClassifier'],\n",
|
" label_column_name=target_column_name,\n",
|
||||||
" **automl_settings\n",
|
" blocked_models=[\"LightGBM\", \"XGBoostClassifier\"],\n",
|
||||||
" )"
|
" **automl_settings,\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -342,8 +336,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"For local inferencing, you can load the model locally via. the method `remote_run.get_output()`. For more information on the arguments expected by this method, you can run `remote_run.get_output??`.\n",
|
"For local inferencing, you can load the model locally via. the method `remote_run.get_output()`. For more information on the arguments expected by this method, you can run `remote_run.get_output??`.\n",
|
||||||
"Note that when the model contains BERT, this step will require pytorch and pytorch-transformers installed in your local environment. The exact versions of these packages can be found in the **automl_env.yml** file located in the local copy of your MachineLearningNotebooks folder here:\n",
|
"Note that when the model contains BERT, this step will require pytorch and pytorch-transformers installed in your local environment. The exact versions of these packages can be found in the **automl_env.yml** file located in the local copy of your azureml-examples folder here: \"azureml-examples/python-sdk/tutorials/automl-with-azureml\""
|
||||||
"MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/automl_env.yml\n"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -369,15 +362,17 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Download the featuurization summary JSON file locally\n",
|
"# Download the featurization summary JSON file locally\n",
|
||||||
"best_run.download_file(\"outputs/featurization_summary.json\", \"featurization_summary.json\")\n",
|
"best_run.download_file(\n",
|
||||||
|
" \"outputs/featurization_summary.json\", \"featurization_summary.json\"\n",
|
||||||
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Render the JSON as a pandas DataFrame\n",
|
"# Render the JSON as a pandas DataFrame\n",
|
||||||
"with open(\"featurization_summary.json\", \"r\") as f:\n",
|
"with open(\"featurization_summary.json\", \"r\") as f:\n",
|
||||||
" records = json.load(f)\n",
|
" records = json.load(f)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"featurization_summary = pd.DataFrame.from_records(records)\n",
|
"featurization_summary = pd.DataFrame.from_records(records)\n",
|
||||||
"featurization_summary['Transformations'].tolist()"
|
"featurization_summary[\"Transformations\"].tolist()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -402,7 +397,7 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"summary_df = get_result_df(automl_run)\n",
|
"summary_df = get_result_df(automl_run)\n",
|
||||||
"best_dnn_run_id = summary_df['run_id'].iloc[0]\n",
|
"best_dnn_run_id = summary_df[\"run_id\"].iloc[0]\n",
|
||||||
"best_dnn_run = Run(experiment, best_dnn_run_id)"
|
"best_dnn_run = Run(experiment, best_dnn_run_id)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -412,11 +407,11 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"model_dir = 'Model' # Local folder where the model will be stored temporarily\n",
|
"model_dir = \"Model\" # Local folder where the model will be stored temporarily\n",
|
||||||
"if not os.path.isdir(model_dir):\n",
|
"if not os.path.isdir(model_dir):\n",
|
||||||
" os.mkdir(model_dir)\n",
|
" os.mkdir(model_dir)\n",
|
||||||
" \n",
|
"\n",
|
||||||
"best_dnn_run.download_file('outputs/model.pkl', model_dir + '/model.pkl')"
|
"best_dnn_run.download_file(\"outputs/model.pkl\", model_dir + \"/model.pkl\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -433,11 +428,10 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Register the model\n",
|
"# Register the model\n",
|
||||||
"model_name = 'textDNN-20News'\n",
|
"model_name = \"textDNN-20News\"\n",
|
||||||
"model = Model.register(model_path = model_dir + '/model.pkl',\n",
|
"model = Model.register(\n",
|
||||||
" model_name = model_name,\n",
|
" model_path=model_dir + \"/model.pkl\", model_name=model_name, tags=None, workspace=ws\n",
|
||||||
" tags=None,\n",
|
")"
|
||||||
" workspace=ws)"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -462,7 +456,9 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"test_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, blobstore_datadir + '/test_data.csv')])\n",
|
"test_dataset = Dataset.Tabular.from_delimited_files(\n",
|
||||||
|
" path=[(datastore, blobstore_datadir + \"/test_data.csv\")]\n",
|
||||||
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# preview the first 3 rows of the dataset\n",
|
"# preview the first 3 rows of the dataset\n",
|
||||||
"test_dataset.take(3).to_pandas_dataframe()"
|
"test_dataset.take(3).to_pandas_dataframe()"
|
||||||
@@ -483,9 +479,9 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"script_folder = os.path.join(os.getcwd(), 'inference')\n",
|
"script_folder = os.path.join(os.getcwd(), \"inference\")\n",
|
||||||
"os.makedirs(script_folder, exist_ok=True)\n",
|
"os.makedirs(script_folder, exist_ok=True)\n",
|
||||||
"shutil.copy('infer.py', script_folder)"
|
"shutil.copy(\"infer.py\", script_folder)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -494,8 +490,15 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"test_run = run_inference(test_experiment, compute_target, script_folder, best_dnn_run,\n",
|
"test_run = run_inference(\n",
|
||||||
" test_dataset, target_column_name, model_name)"
|
" test_experiment,\n",
|
||||||
|
" compute_target,\n",
|
||||||
|
" script_folder,\n",
|
||||||
|
" best_dnn_run,\n",
|
||||||
|
" test_dataset,\n",
|
||||||
|
" target_column_name,\n",
|
||||||
|
" model_name,\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,55 +1,70 @@
|
|||||||
import pandas as pd
|
import pandas as pd
|
||||||
from azureml.core import Environment
|
from azureml.core import Environment, ScriptRunConfig
|
||||||
from azureml.train.estimator import Estimator
|
|
||||||
from azureml.core.run import Run
|
from azureml.core.run import Run
|
||||||
|
|
||||||
|
|
||||||
def run_inference(test_experiment, compute_target, script_folder, train_run,
|
def run_inference(
|
||||||
test_dataset, target_column_name, model_name):
|
test_experiment,
|
||||||
|
compute_target,
|
||||||
|
script_folder,
|
||||||
|
train_run,
|
||||||
|
test_dataset,
|
||||||
|
target_column_name,
|
||||||
|
model_name,
|
||||||
|
):
|
||||||
|
|
||||||
inference_env = train_run.get_environment()
|
inference_env = train_run.get_environment()
|
||||||
|
|
||||||
est = Estimator(source_directory=script_folder,
|
est = ScriptRunConfig(
|
||||||
entry_script='infer.py',
|
source_directory=script_folder,
|
||||||
script_params={
|
script="infer.py",
|
||||||
'--target_column_name': target_column_name,
|
arguments=[
|
||||||
'--model_name': model_name
|
"--target_column_name",
|
||||||
},
|
target_column_name,
|
||||||
inputs=[
|
"--model_name",
|
||||||
test_dataset.as_named_input('test_data')
|
model_name,
|
||||||
],
|
"--input-data",
|
||||||
compute_target=compute_target,
|
test_dataset.as_named_input("data"),
|
||||||
environment_definition=inference_env)
|
],
|
||||||
|
compute_target=compute_target,
|
||||||
|
environment=inference_env,
|
||||||
|
)
|
||||||
|
|
||||||
run = test_experiment.submit(
|
run = test_experiment.submit(
|
||||||
est, tags={
|
est,
|
||||||
'training_run_id': train_run.id,
|
tags={
|
||||||
'run_algorithm': train_run.properties['run_algorithm'],
|
"training_run_id": train_run.id,
|
||||||
'valid_score': train_run.properties['score'],
|
"run_algorithm": train_run.properties["run_algorithm"],
|
||||||
'primary_metric': train_run.properties['primary_metric']
|
"valid_score": train_run.properties["score"],
|
||||||
})
|
"primary_metric": train_run.properties["primary_metric"],
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
run.log("run_algorithm", run.tags['run_algorithm'])
|
run.log("run_algorithm", run.tags["run_algorithm"])
|
||||||
return run
|
return run
|
||||||
|
|
||||||
|
|
||||||
def get_result_df(remote_run):
|
def get_result_df(remote_run):
|
||||||
|
|
||||||
children = list(remote_run.get_children(recursive=True))
|
children = list(remote_run.get_children(recursive=True))
|
||||||
summary_df = pd.DataFrame(index=['run_id', 'run_algorithm',
|
summary_df = pd.DataFrame(
|
||||||
'primary_metric', 'Score'])
|
index=["run_id", "run_algorithm", "primary_metric", "Score"]
|
||||||
|
)
|
||||||
goal_minimize = False
|
goal_minimize = False
|
||||||
for run in children:
|
for run in children:
|
||||||
if('run_algorithm' in run.properties and 'score' in run.properties):
|
if "run_algorithm" in run.properties and "score" in run.properties:
|
||||||
summary_df[run.id] = [run.id, run.properties['run_algorithm'],
|
summary_df[run.id] = [
|
||||||
run.properties['primary_metric'],
|
run.id,
|
||||||
float(run.properties['score'])]
|
run.properties["run_algorithm"],
|
||||||
if('goal' in run.properties):
|
run.properties["primary_metric"],
|
||||||
goal_minimize = run.properties['goal'].split('_')[-1] == 'min'
|
float(run.properties["score"]),
|
||||||
|
]
|
||||||
|
if "goal" in run.properties:
|
||||||
|
goal_minimize = run.properties["goal"].split("_")[-1] == "min"
|
||||||
|
|
||||||
summary_df = summary_df.T.sort_values(
|
summary_df = summary_df.T.sort_values(
|
||||||
'Score',
|
"Score", ascending=goal_minimize
|
||||||
ascending=goal_minimize).drop_duplicates(['run_algorithm'])
|
).drop_duplicates(["run_algorithm"])
|
||||||
summary_df = summary_df.set_index('run_algorithm')
|
summary_df = summary_df.set_index("run_algorithm")
|
||||||
|
|
||||||
return summary_df
|
return summary_df
|
||||||
|
|||||||
@@ -6,39 +6,47 @@ import numpy as np
|
|||||||
from sklearn.externals import joblib
|
from sklearn.externals import joblib
|
||||||
|
|
||||||
from azureml.automl.runtime.shared.score import scoring, constants
|
from azureml.automl.runtime.shared.score import scoring, constants
|
||||||
from azureml.core import Run
|
from azureml.core import Run, Dataset
|
||||||
from azureml.core.model import Model
|
from azureml.core.model import Model
|
||||||
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--target_column_name', type=str, dest='target_column_name',
|
"--target_column_name",
|
||||||
help='Target Column Name')
|
type=str,
|
||||||
|
dest="target_column_name",
|
||||||
|
help="Target Column Name",
|
||||||
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--model_name', type=str, dest='model_name',
|
"--model_name", type=str, dest="model_name", help="Name of registered model"
|
||||||
help='Name of registered model')
|
)
|
||||||
|
|
||||||
|
parser.add_argument("--input-data", type=str, dest="input_data", help="Dataset")
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
target_column_name = args.target_column_name
|
target_column_name = args.target_column_name
|
||||||
model_name = args.model_name
|
model_name = args.model_name
|
||||||
|
|
||||||
print('args passed are: ')
|
print("args passed are: ")
|
||||||
print('Target column name: ', target_column_name)
|
print("Target column name: ", target_column_name)
|
||||||
print('Name of registered model: ', model_name)
|
print("Name of registered model: ", model_name)
|
||||||
|
|
||||||
model_path = Model.get_model_path(model_name)
|
model_path = Model.get_model_path(model_name)
|
||||||
# deserialize the model file back into a sklearn model
|
# deserialize the model file back into a sklearn model
|
||||||
model = joblib.load(model_path)
|
model = joblib.load(model_path)
|
||||||
|
|
||||||
run = Run.get_context()
|
run = Run.get_context()
|
||||||
# get input dataset by name
|
|
||||||
test_dataset = run.input_datasets['test_data']
|
|
||||||
|
|
||||||
X_test_df = test_dataset.drop_columns(columns=[target_column_name]) \
|
test_dataset = Dataset.get_by_id(run.experiment.workspace, id=args.input_data)
|
||||||
.to_pandas_dataframe()
|
|
||||||
y_test_df = test_dataset.with_timestamp_columns(None) \
|
X_test_df = test_dataset.drop_columns(
|
||||||
.keep_columns(columns=[target_column_name]) \
|
columns=[target_column_name]
|
||||||
.to_pandas_dataframe()
|
).to_pandas_dataframe()
|
||||||
|
y_test_df = (
|
||||||
|
test_dataset.with_timestamp_columns(None)
|
||||||
|
.keep_columns(columns=[target_column_name])
|
||||||
|
.to_pandas_dataframe()
|
||||||
|
)
|
||||||
|
|
||||||
predicted = model.predict_proba(X_test_df)
|
predicted = model.predict_proba(X_test_df)
|
||||||
|
|
||||||
@@ -47,11 +55,13 @@ if isinstance(predicted, pd.DataFrame):
|
|||||||
|
|
||||||
# Use the AutoML scoring module
|
# Use the AutoML scoring module
|
||||||
train_labels = model.classes_
|
train_labels = model.classes_
|
||||||
class_labels = np.unique(np.concatenate((y_test_df.values, np.reshape(train_labels, (-1, 1)))))
|
class_labels = np.unique(
|
||||||
|
np.concatenate((y_test_df.values, np.reshape(train_labels, (-1, 1))))
|
||||||
|
)
|
||||||
classification_metrics = list(constants.CLASSIFICATION_SCALAR_SET)
|
classification_metrics = list(constants.CLASSIFICATION_SCALAR_SET)
|
||||||
scores = scoring.score_classification(y_test_df.values, predicted,
|
scores = scoring.score_classification(
|
||||||
classification_metrics,
|
y_test_df.values, predicted, classification_metrics, class_labels, train_labels
|
||||||
class_labels, train_labels)
|
)
|
||||||
|
|
||||||
print("scores:")
|
print("scores:")
|
||||||
print(scores)
|
print(scores)
|
||||||
|
|||||||
@@ -1,20 +1,5 @@
|
|||||||
{
|
{
|
||||||
"cells": [
|
"cells": [
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"Copyright (c) Microsoft Corporation. All rights reserved. \n",
|
|
||||||
"Licensed under the MIT License."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
""
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@@ -75,16 +60,6 @@
|
|||||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
|
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@@ -118,17 +93,18 @@
|
|||||||
"dstor = ws.get_default_datastore()\n",
|
"dstor = ws.get_default_datastore()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Choose a name for the run history container in the workspace.\n",
|
"# Choose a name for the run history container in the workspace.\n",
|
||||||
"experiment_name = 'retrain-noaaweather'\n",
|
"experiment_name = \"retrain-noaaweather\"\n",
|
||||||
"experiment = Experiment(ws, experiment_name)\n",
|
"experiment = Experiment(ws, experiment_name)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"output = {}\n",
|
"output = {}\n",
|
||||||
"output['Subscription ID'] = ws.subscription_id\n",
|
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||||
"output['Workspace'] = ws.name\n",
|
"output[\"Workspace\"] = ws.name\n",
|
||||||
"output['Resource Group'] = ws.resource_group\n",
|
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||||
"output['Location'] = ws.location\n",
|
"output[\"Location\"] = ws.location\n",
|
||||||
"output['Run History Name'] = experiment_name\n",
|
"output[\"Run History Name\"] = experiment_name\n",
|
||||||
"pd.set_option('display.max_colwidth', -1)\n",
|
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||||
|
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||||
"outputDf.T"
|
"outputDf.T"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -164,12 +140,12 @@
|
|||||||
"# Verify that cluster does not exist already\n",
|
"# Verify that cluster does not exist already\n",
|
||||||
"try:\n",
|
"try:\n",
|
||||||
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
||||||
" print('Found existing cluster, use it.')\n",
|
" print(\"Found existing cluster, use it.\")\n",
|
||||||
"except ComputeTargetException:\n",
|
"except ComputeTargetException:\n",
|
||||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
|
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||||
" max_nodes=4)\n",
|
" vm_size=\"STANDARD_DS12_V2\", max_nodes=4\n",
|
||||||
|
" )\n",
|
||||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
||||||
"\n",
|
|
||||||
"compute_target.wait_for_completion(show_output=True)"
|
"compute_target.wait_for_completion(show_output=True)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -196,12 +172,19 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"conda_run_config.environment.docker.enabled = True\n",
|
"conda_run_config.environment.docker.enabled = True\n",
|
||||||
"\n",
|
"\n",
|
||||||
"cd = CondaDependencies.create(pip_packages=['azureml-sdk[automl]', 'applicationinsights', 'azureml-opendatasets', 'azureml-defaults'], \n",
|
"cd = CondaDependencies.create(\n",
|
||||||
" conda_packages=['numpy==1.16.2'], \n",
|
" pip_packages=[\n",
|
||||||
" pin_sdk_version=False)\n",
|
" \"azureml-sdk[automl]\",\n",
|
||||||
|
" \"applicationinsights\",\n",
|
||||||
|
" \"azureml-opendatasets\",\n",
|
||||||
|
" \"azureml-defaults\",\n",
|
||||||
|
" ],\n",
|
||||||
|
" conda_packages=[\"numpy==1.19.5\"],\n",
|
||||||
|
" pin_sdk_version=False,\n",
|
||||||
|
")\n",
|
||||||
"conda_run_config.environment.python.conda_dependencies = cd\n",
|
"conda_run_config.environment.python.conda_dependencies = cd\n",
|
||||||
"\n",
|
"\n",
|
||||||
"print('run config is ready')"
|
"print(\"run config is ready\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -218,7 +201,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# The name and target column of the Dataset to create \n",
|
"# The name and target column of the Dataset to create\n",
|
||||||
"dataset = \"NOAA-Weather-DS4\"\n",
|
"dataset = \"NOAA-Weather-DS4\"\n",
|
||||||
"target_column_name = \"temperature\""
|
"target_column_name = \"temperature\""
|
||||||
]
|
]
|
||||||
@@ -242,12 +225,14 @@
|
|||||||
"from azureml.pipeline.steps import PythonScriptStep\n",
|
"from azureml.pipeline.steps import PythonScriptStep\n",
|
||||||
"\n",
|
"\n",
|
||||||
"ds_name = PipelineParameter(name=\"ds_name\", default_value=dataset)\n",
|
"ds_name = PipelineParameter(name=\"ds_name\", default_value=dataset)\n",
|
||||||
"upload_data_step = PythonScriptStep(script_name=\"upload_weather_data.py\", \n",
|
"upload_data_step = PythonScriptStep(\n",
|
||||||
" allow_reuse=False,\n",
|
" script_name=\"upload_weather_data.py\",\n",
|
||||||
" name=\"upload_weather_data\",\n",
|
" allow_reuse=False,\n",
|
||||||
" arguments=[\"--ds_name\", ds_name],\n",
|
" name=\"upload_weather_data\",\n",
|
||||||
" compute_target=compute_target, \n",
|
" arguments=[\"--ds_name\", ds_name],\n",
|
||||||
" runconfig=conda_run_config)"
|
" compute_target=compute_target,\n",
|
||||||
|
" runconfig=conda_run_config,\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -264,10 +249,11 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"data_pipeline = Pipeline(\n",
|
"data_pipeline = Pipeline(\n",
|
||||||
" description=\"pipeline_with_uploaddata\",\n",
|
" description=\"pipeline_with_uploaddata\", workspace=ws, steps=[upload_data_step]\n",
|
||||||
" workspace=ws, \n",
|
")\n",
|
||||||
" steps=[upload_data_step])\n",
|
"data_pipeline_run = experiment.submit(\n",
|
||||||
"data_pipeline_run = experiment.submit(data_pipeline, pipeline_parameters={\"ds_name\":dataset})"
|
" data_pipeline, pipeline_parameters={\"ds_name\": dataset}\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -307,13 +293,14 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"data_prep_step = PythonScriptStep(script_name=\"check_data.py\", \n",
|
"data_prep_step = PythonScriptStep(\n",
|
||||||
" allow_reuse=False,\n",
|
" script_name=\"check_data.py\",\n",
|
||||||
" name=\"check_data\",\n",
|
" allow_reuse=False,\n",
|
||||||
" arguments=[\"--ds_name\", ds_name,\n",
|
" name=\"check_data\",\n",
|
||||||
" \"--model_name\", model_name],\n",
|
" arguments=[\"--ds_name\", ds_name, \"--model_name\", model_name],\n",
|
||||||
" compute_target=compute_target, \n",
|
" compute_target=compute_target,\n",
|
||||||
" runconfig=conda_run_config)"
|
" runconfig=conda_run_config,\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -323,6 +310,7 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from azureml.core import Dataset\n",
|
"from azureml.core import Dataset\n",
|
||||||
|
"\n",
|
||||||
"train_ds = Dataset.get_by_name(ws, dataset)\n",
|
"train_ds = Dataset.get_by_name(ws, dataset)\n",
|
||||||
"train_ds = train_ds.drop_columns([\"partition_date\"])"
|
"train_ds = train_ds.drop_columns([\"partition_date\"])"
|
||||||
]
|
]
|
||||||
@@ -348,21 +336,22 @@
|
|||||||
" \"iteration_timeout_minutes\": 10,\n",
|
" \"iteration_timeout_minutes\": 10,\n",
|
||||||
" \"experiment_timeout_hours\": 0.25,\n",
|
" \"experiment_timeout_hours\": 0.25,\n",
|
||||||
" \"n_cross_validations\": 3,\n",
|
" \"n_cross_validations\": 3,\n",
|
||||||
" \"primary_metric\": 'normalized_root_mean_squared_error',\n",
|
" \"primary_metric\": \"r2_score\",\n",
|
||||||
" \"max_concurrent_iterations\": 3,\n",
|
" \"max_concurrent_iterations\": 3,\n",
|
||||||
" \"max_cores_per_iteration\": -1,\n",
|
" \"max_cores_per_iteration\": -1,\n",
|
||||||
" \"verbosity\": logging.INFO,\n",
|
" \"verbosity\": logging.INFO,\n",
|
||||||
" \"enable_early_stopping\": True\n",
|
" \"enable_early_stopping\": True,\n",
|
||||||
"}\n",
|
"}\n",
|
||||||
"\n",
|
"\n",
|
||||||
"automl_config = AutoMLConfig(task = 'regression',\n",
|
"automl_config = AutoMLConfig(\n",
|
||||||
" debug_log = 'automl_errors.log',\n",
|
" task=\"regression\",\n",
|
||||||
" path = \".\",\n",
|
" debug_log=\"automl_errors.log\",\n",
|
||||||
" compute_target=compute_target,\n",
|
" path=\".\",\n",
|
||||||
" training_data = train_ds,\n",
|
" compute_target=compute_target,\n",
|
||||||
" label_column_name = target_column_name,\n",
|
" training_data=train_ds,\n",
|
||||||
" **automl_settings\n",
|
" label_column_name=target_column_name,\n",
|
||||||
" )"
|
" **automl_settings,\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -373,17 +362,21 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"from azureml.pipeline.core import PipelineData, TrainingOutput\n",
|
"from azureml.pipeline.core import PipelineData, TrainingOutput\n",
|
||||||
"\n",
|
"\n",
|
||||||
"metrics_output_name = 'metrics_output'\n",
|
"metrics_output_name = \"metrics_output\"\n",
|
||||||
"best_model_output_name = 'best_model_output'\n",
|
"best_model_output_name = \"best_model_output\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"metrics_data = PipelineData(name='metrics_data',\n",
|
"metrics_data = PipelineData(\n",
|
||||||
" datastore=dstor,\n",
|
" name=\"metrics_data\",\n",
|
||||||
" pipeline_output_name=metrics_output_name,\n",
|
" datastore=dstor,\n",
|
||||||
" training_output=TrainingOutput(type='Metrics'))\n",
|
" pipeline_output_name=metrics_output_name,\n",
|
||||||
"model_data = PipelineData(name='model_data',\n",
|
" training_output=TrainingOutput(type=\"Metrics\"),\n",
|
||||||
" datastore=dstor,\n",
|
")\n",
|
||||||
" pipeline_output_name=best_model_output_name,\n",
|
"model_data = PipelineData(\n",
|
||||||
" training_output=TrainingOutput(type='Model'))"
|
" name=\"model_data\",\n",
|
||||||
|
" datastore=dstor,\n",
|
||||||
|
" pipeline_output_name=best_model_output_name,\n",
|
||||||
|
" training_output=TrainingOutput(type=\"Model\"),\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -393,10 +386,11 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"automl_step = AutoMLStep(\n",
|
"automl_step = AutoMLStep(\n",
|
||||||
" name='automl_module',\n",
|
" name=\"automl_module\",\n",
|
||||||
" automl_config=automl_config,\n",
|
" automl_config=automl_config,\n",
|
||||||
" outputs=[metrics_data, model_data],\n",
|
" outputs=[metrics_data, model_data],\n",
|
||||||
" allow_reuse=False)"
|
" allow_reuse=False,\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -413,13 +407,22 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"register_model_step = PythonScriptStep(script_name=\"register_model.py\",\n",
|
"register_model_step = PythonScriptStep(\n",
|
||||||
" name=\"register_model\",\n",
|
" script_name=\"register_model.py\",\n",
|
||||||
" allow_reuse=False,\n",
|
" name=\"register_model\",\n",
|
||||||
" arguments=[\"--model_name\", model_name, \"--model_path\", model_data, \"--ds_name\", ds_name],\n",
|
" allow_reuse=False,\n",
|
||||||
" inputs=[model_data],\n",
|
" arguments=[\n",
|
||||||
" compute_target=compute_target,\n",
|
" \"--model_name\",\n",
|
||||||
" runconfig=conda_run_config)"
|
" model_name,\n",
|
||||||
|
" \"--model_path\",\n",
|
||||||
|
" model_data,\n",
|
||||||
|
" \"--ds_name\",\n",
|
||||||
|
" ds_name,\n",
|
||||||
|
" ],\n",
|
||||||
|
" inputs=[model_data],\n",
|
||||||
|
" compute_target=compute_target,\n",
|
||||||
|
" runconfig=conda_run_config,\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -437,8 +440,9 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"training_pipeline = Pipeline(\n",
|
"training_pipeline = Pipeline(\n",
|
||||||
" description=\"training_pipeline\",\n",
|
" description=\"training_pipeline\",\n",
|
||||||
" workspace=ws, \n",
|
" workspace=ws,\n",
|
||||||
" steps=[data_prep_step, automl_step, register_model_step])"
|
" steps=[data_prep_step, automl_step, register_model_step],\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -447,8 +451,10 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"training_pipeline_run = experiment.submit(training_pipeline, pipeline_parameters={\n",
|
"training_pipeline_run = experiment.submit(\n",
|
||||||
" \"ds_name\": dataset, \"model_name\": \"noaaweatherds\"})"
|
" training_pipeline,\n",
|
||||||
|
" pipeline_parameters={\"ds_name\": dataset, \"model_name\": \"noaaweatherds\"},\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -477,8 +483,8 @@
|
|||||||
"pipeline_name = \"Retraining-Pipeline-NOAAWeather\"\n",
|
"pipeline_name = \"Retraining-Pipeline-NOAAWeather\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"published_pipeline = training_pipeline.publish(\n",
|
"published_pipeline = training_pipeline.publish(\n",
|
||||||
" name=pipeline_name, \n",
|
" name=pipeline_name, description=\"Pipeline that retrains AutoML model\"\n",
|
||||||
" description=\"Pipeline that retrains AutoML model\")\n",
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"published_pipeline"
|
"published_pipeline"
|
||||||
]
|
]
|
||||||
@@ -490,13 +496,17 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from azureml.pipeline.core import Schedule\n",
|
"from azureml.pipeline.core import Schedule\n",
|
||||||
"schedule = Schedule.create(workspace=ws, name=\"RetrainingSchedule\",\n",
|
"\n",
|
||||||
" pipeline_parameters={\"ds_name\": dataset, \"model_name\": \"noaaweatherds\"},\n",
|
"schedule = Schedule.create(\n",
|
||||||
" pipeline_id=published_pipeline.id, \n",
|
" workspace=ws,\n",
|
||||||
" experiment_name=experiment_name, \n",
|
" name=\"RetrainingSchedule\",\n",
|
||||||
" datastore=dstor,\n",
|
" pipeline_parameters={\"ds_name\": dataset, \"model_name\": \"noaaweatherds\"},\n",
|
||||||
" wait_for_provisioning=True,\n",
|
" pipeline_id=published_pipeline.id,\n",
|
||||||
" polling_interval=1440)"
|
" experiment_name=experiment_name,\n",
|
||||||
|
" datastore=dstor,\n",
|
||||||
|
" wait_for_provisioning=True,\n",
|
||||||
|
" polling_interval=1440,\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -520,8 +530,8 @@
|
|||||||
"pipeline_name = \"DataIngestion-Pipeline-NOAAWeather\"\n",
|
"pipeline_name = \"DataIngestion-Pipeline-NOAAWeather\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"published_pipeline = training_pipeline.publish(\n",
|
"published_pipeline = training_pipeline.publish(\n",
|
||||||
" name=pipeline_name, \n",
|
" name=pipeline_name, description=\"Pipeline that updates NOAAWeather Dataset\"\n",
|
||||||
" description=\"Pipeline that updates NOAAWeather Dataset\")\n",
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"published_pipeline"
|
"published_pipeline"
|
||||||
]
|
]
|
||||||
@@ -533,13 +543,17 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from azureml.pipeline.core import Schedule\n",
|
"from azureml.pipeline.core import Schedule\n",
|
||||||
"schedule = Schedule.create(workspace=ws, name=\"RetrainingSchedule-DataIngestion\",\n",
|
"\n",
|
||||||
" pipeline_parameters={\"ds_name\":dataset},\n",
|
"schedule = Schedule.create(\n",
|
||||||
" pipeline_id=published_pipeline.id, \n",
|
" workspace=ws,\n",
|
||||||
" experiment_name=experiment_name, \n",
|
" name=\"RetrainingSchedule-DataIngestion\",\n",
|
||||||
" datastore=dstor,\n",
|
" pipeline_parameters={\"ds_name\": dataset},\n",
|
||||||
" wait_for_provisioning=True,\n",
|
" pipeline_id=published_pipeline.id,\n",
|
||||||
" polling_interval=1440)"
|
" experiment_name=experiment_name,\n",
|
||||||
|
" datastore=dstor,\n",
|
||||||
|
" wait_for_provisioning=True,\n",
|
||||||
|
" polling_interval=1440,\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ try:
|
|||||||
model = Model(ws, args.model_name)
|
model = Model(ws, args.model_name)
|
||||||
last_train_time = model.created_time
|
last_train_time = model.created_time
|
||||||
print("Model was last trained on {0}.".format(last_train_time))
|
print("Model was last trained on {0}.".format(last_train_time))
|
||||||
except Exception:
|
except Exception as e:
|
||||||
print("Could not get last model train time.")
|
print("Could not get last model train time.")
|
||||||
last_train_time = datetime.min.replace(tzinfo=pytz.UTC)
|
last_train_time = datetime.min.replace(tzinfo=pytz.UTC)
|
||||||
|
|
||||||
|
|||||||
@@ -25,9 +25,11 @@ datasets = [(Dataset.Scenario.TRAINING, train_ds)]
|
|||||||
|
|
||||||
# Register model with training dataset
|
# Register model with training dataset
|
||||||
|
|
||||||
model = Model.register(workspace=ws,
|
model = Model.register(
|
||||||
model_path=args.model_path,
|
workspace=ws,
|
||||||
model_name=args.model_name,
|
model_path=args.model_path,
|
||||||
datasets=datasets)
|
model_name=args.model_name,
|
||||||
|
datasets=datasets,
|
||||||
|
)
|
||||||
|
|
||||||
print("Registered version {0} of model {1}".format(model.version, model.name))
|
print("Registered version {0} of model {1}".format(model.version, model.name))
|
||||||
|
|||||||
@@ -16,26 +16,82 @@ if type(run) == _OfflineRun:
|
|||||||
else:
|
else:
|
||||||
ws = run.experiment.workspace
|
ws = run.experiment.workspace
|
||||||
|
|
||||||
usaf_list = ['725724', '722149', '723090', '722159', '723910', '720279',
|
usaf_list = [
|
||||||
'725513', '725254', '726430', '720381', '723074', '726682',
|
"725724",
|
||||||
'725486', '727883', '723177', '722075', '723086', '724053',
|
"722149",
|
||||||
'725070', '722073', '726060', '725224', '725260', '724520',
|
"723090",
|
||||||
'720305', '724020', '726510', '725126', '722523', '703333',
|
"722159",
|
||||||
'722249', '722728', '725483', '722972', '724975', '742079',
|
"723910",
|
||||||
'727468', '722193', '725624', '722030', '726380', '720309',
|
"720279",
|
||||||
'722071', '720326', '725415', '724504', '725665', '725424',
|
"725513",
|
||||||
'725066']
|
"725254",
|
||||||
|
"726430",
|
||||||
|
"720381",
|
||||||
|
"723074",
|
||||||
|
"726682",
|
||||||
|
"725486",
|
||||||
|
"727883",
|
||||||
|
"723177",
|
||||||
|
"722075",
|
||||||
|
"723086",
|
||||||
|
"724053",
|
||||||
|
"725070",
|
||||||
|
"722073",
|
||||||
|
"726060",
|
||||||
|
"725224",
|
||||||
|
"725260",
|
||||||
|
"724520",
|
||||||
|
"720305",
|
||||||
|
"724020",
|
||||||
|
"726510",
|
||||||
|
"725126",
|
||||||
|
"722523",
|
||||||
|
"703333",
|
||||||
|
"722249",
|
||||||
|
"722728",
|
||||||
|
"725483",
|
||||||
|
"722972",
|
||||||
|
"724975",
|
||||||
|
"742079",
|
||||||
|
"727468",
|
||||||
|
"722193",
|
||||||
|
"725624",
|
||||||
|
"722030",
|
||||||
|
"726380",
|
||||||
|
"720309",
|
||||||
|
"722071",
|
||||||
|
"720326",
|
||||||
|
"725415",
|
||||||
|
"724504",
|
||||||
|
"725665",
|
||||||
|
"725424",
|
||||||
|
"725066",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
def get_noaa_data(start_time, end_time):
|
def get_noaa_data(start_time, end_time):
|
||||||
columns = ['usaf', 'wban', 'datetime', 'latitude', 'longitude', 'elevation',
|
columns = [
|
||||||
'windAngle', 'windSpeed', 'temperature', 'stationName', 'p_k']
|
"usaf",
|
||||||
|
"wban",
|
||||||
|
"datetime",
|
||||||
|
"latitude",
|
||||||
|
"longitude",
|
||||||
|
"elevation",
|
||||||
|
"windAngle",
|
||||||
|
"windSpeed",
|
||||||
|
"temperature",
|
||||||
|
"stationName",
|
||||||
|
"p_k",
|
||||||
|
]
|
||||||
isd = NoaaIsdWeather(start_time, end_time, cols=columns)
|
isd = NoaaIsdWeather(start_time, end_time, cols=columns)
|
||||||
noaa_df = isd.to_pandas_dataframe()
|
noaa_df = isd.to_pandas_dataframe()
|
||||||
df_filtered = noaa_df[noaa_df["usaf"].isin(usaf_list)]
|
df_filtered = noaa_df[noaa_df["usaf"].isin(usaf_list)]
|
||||||
df_filtered.reset_index(drop=True)
|
df_filtered.reset_index(drop=True)
|
||||||
print("Received {0} rows of training data between {1} and {2}".format(
|
print(
|
||||||
df_filtered.shape[0], start_time, end_time))
|
"Received {0} rows of training data between {1} and {2}".format(
|
||||||
|
df_filtered.shape[0], start_time, end_time
|
||||||
|
)
|
||||||
|
)
|
||||||
return df_filtered
|
return df_filtered
|
||||||
|
|
||||||
|
|
||||||
@@ -54,38 +110,52 @@ end_time = datetime.utcnow()
|
|||||||
try:
|
try:
|
||||||
ds = Dataset.get_by_name(ws, args.ds_name)
|
ds = Dataset.get_by_name(ws, args.ds_name)
|
||||||
end_time_last_slice = ds.data_changed_time.replace(tzinfo=None)
|
end_time_last_slice = ds.data_changed_time.replace(tzinfo=None)
|
||||||
print("Dataset {0} last updated on {1}".format(args.ds_name,
|
print("Dataset {0} last updated on {1}".format(args.ds_name, end_time_last_slice))
|
||||||
end_time_last_slice))
|
|
||||||
except Exception:
|
except Exception:
|
||||||
print(traceback.format_exc())
|
print(traceback.format_exc())
|
||||||
print("Dataset with name {0} not found, registering new dataset.".format(args.ds_name))
|
print(
|
||||||
|
"Dataset with name {0} not found, registering new dataset.".format(args.ds_name)
|
||||||
|
)
|
||||||
register_dataset = True
|
register_dataset = True
|
||||||
end_time = datetime(2021, 5, 1, 0, 0)
|
end_time = datetime(2021, 5, 1, 0, 0)
|
||||||
end_time_last_slice = end_time - relativedelta(weeks=2)
|
end_time_last_slice = end_time - relativedelta(weeks=2)
|
||||||
|
|
||||||
train_df = get_noaa_data(end_time_last_slice, end_time)
|
try:
|
||||||
|
train_df = get_noaa_data(end_time_last_slice, end_time)
|
||||||
|
except Exception as ex:
|
||||||
|
print("get_noaa_data failed:", ex)
|
||||||
|
train_df = None
|
||||||
|
|
||||||
if train_df.size > 0:
|
if train_df is not None and train_df.size > 0:
|
||||||
print("Received {0} rows of new data after {1}.".format(
|
print(
|
||||||
train_df.shape[0], end_time_last_slice))
|
"Received {0} rows of new data after {1}.".format(
|
||||||
folder_name = "{}/{:04d}/{:02d}/{:02d}/{:02d}/{:02d}/{:02d}".format(args.ds_name, end_time.year,
|
train_df.shape[0], end_time_last_slice
|
||||||
end_time.month, end_time.day,
|
)
|
||||||
end_time.hour, end_time.minute,
|
)
|
||||||
end_time.second)
|
folder_name = "{}/{:04d}/{:02d}/{:02d}/{:02d}/{:02d}/{:02d}".format(
|
||||||
|
args.ds_name,
|
||||||
|
end_time.year,
|
||||||
|
end_time.month,
|
||||||
|
end_time.day,
|
||||||
|
end_time.hour,
|
||||||
|
end_time.minute,
|
||||||
|
end_time.second,
|
||||||
|
)
|
||||||
file_path = "{0}/data.csv".format(folder_name)
|
file_path = "{0}/data.csv".format(folder_name)
|
||||||
|
|
||||||
# Add a new partition to the registered dataset
|
# Add a new partition to the registered dataset
|
||||||
os.makedirs(folder_name, exist_ok=True)
|
os.makedirs(folder_name, exist_ok=True)
|
||||||
train_df.to_csv(file_path, index=False)
|
train_df.to_csv(file_path, index=False)
|
||||||
|
|
||||||
dstor.upload_files(files=[file_path],
|
dstor.upload_files(
|
||||||
target_path=folder_name,
|
files=[file_path], target_path=folder_name, overwrite=True, show_progress=True
|
||||||
overwrite=True,
|
)
|
||||||
show_progress=True)
|
|
||||||
else:
|
else:
|
||||||
print("No new data since {0}.".format(end_time_last_slice))
|
print("No new data since {0}.".format(end_time_last_slice))
|
||||||
|
|
||||||
if register_dataset:
|
if register_dataset:
|
||||||
ds = Dataset.Tabular.from_delimited_files(dstor.path("{}/**/*.csv".format(
|
ds = Dataset.Tabular.from_delimited_files(
|
||||||
args.ds_name)), partition_format='/{partition_date:yyyy/MM/dd/HH/mm/ss}/data.csv')
|
dstor.path("{}/**/*.csv".format(args.ds_name)),
|
||||||
|
partition_format="/{partition_date:yyyy/MM/dd/HH/mm/ss}/data.csv",
|
||||||
|
)
|
||||||
ds.register(ws, name=args.ds_name)
|
ds.register(ws, name=args.ds_name)
|
||||||
|
|||||||
@@ -0,0 +1,346 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||||
|
"\n",
|
||||||
|
"Licensed under the MIT License."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Automated Machine Learning - Codegen for AutoFeaturization \n",
|
||||||
|
"_**Autofeaturization of credit card fraudulent transactions dataset on remote compute and codegen functionality**_\n",
|
||||||
|
"\n",
|
||||||
|
"## Contents\n",
|
||||||
|
"1. [Introduction](#Introduction)\n",
|
||||||
|
"1. [Setup](#Setup)\n",
|
||||||
|
"1. [Data](#Data)\n",
|
||||||
|
"1. [Autofeaturization](#Autofeaturization)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"<a id='Introduction'></a>\n",
|
||||||
|
"## Introduction"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"**Autofeaturization** lets you run an AutoML experiment to only featurize the datasets. These datasets along with the transformer are stored in AML Storage and linked to the run which can later be retrieved and used to train models. \n",
|
||||||
|
"\n",
|
||||||
|
"**To run Autofeaturization, set the number of iterations to zero and featurization as auto.**\n",
|
||||||
|
"\n",
|
||||||
|
"Please refer to [Autofeaturization and custom model training](../autofeaturization-custom-model-training/custom-model-training-from-autofeaturization-run.ipynb) for more details on the same.\n",
|
||||||
|
"\n",
|
||||||
|
"[Codegen](https://github.com/Azure/automl-codegen-preview) is a feature, which when enabled, provides a user with the script of the underlying functionality and a notebook to tweak inputs or code and rerun the same.\n",
|
||||||
|
"\n",
|
||||||
|
"In this example we use the credit card fraudulent transactions dataset to showcase how you can use AutoML for autofeaturization and further how you can enable the `Codegen` feature.\n",
|
||||||
|
"\n",
|
||||||
|
"This notebook is using remote compute to complete the featurization.\n",
|
||||||
|
"\n",
|
||||||
|
"If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration](../../configuration.ipynb) notebook first if you haven't already, to establish your connection to the AzureML Workspace. \n",
|
||||||
|
"\n",
|
||||||
|
"Here you will learn how to create an autofeaturization experiment using an existing workspace with codegen feature enabled."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"<a id='Setup'></a>\n",
|
||||||
|
"## Setup\n",
|
||||||
|
"\n",
|
||||||
|
"As part of the setup you have already created an Azure ML `Workspace` object. For Automated ML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import logging\n",
|
||||||
|
"import pandas as pd\n",
|
||||||
|
"import azureml.core\n",
|
||||||
|
"from azureml.core.experiment import Experiment\n",
|
||||||
|
"from azureml.core.workspace import Workspace\n",
|
||||||
|
"from azureml.core.dataset import Dataset\n",
|
||||||
|
"from azureml.train.automl import AutoMLConfig"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"print(\"This notebook was created using version 1.44.0 of the Azure ML SDK\")\n",
|
||||||
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"ws = Workspace.from_config()\n",
|
||||||
|
"\n",
|
||||||
|
"# choose a name for experiment\n",
|
||||||
|
"experiment_name = 'automl-autofeaturization-ccard-codegen-remote'\n",
|
||||||
|
"\n",
|
||||||
|
"experiment=Experiment(ws, experiment_name)\n",
|
||||||
|
"\n",
|
||||||
|
"output = {}\n",
|
||||||
|
"output['Subscription ID'] = ws.subscription_id\n",
|
||||||
|
"output['Workspace'] = ws.name\n",
|
||||||
|
"output['Resource Group'] = ws.resource_group\n",
|
||||||
|
"output['Location'] = ws.location\n",
|
||||||
|
"output['Experiment Name'] = experiment.name\n",
|
||||||
|
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||||
|
"outputDf.T"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Create or Attach existing AmlCompute\n",
|
||||||
|
"A compute target is required to execute the Automated ML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
||||||
|
"\n",
|
||||||
|
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||||
|
"\n",
|
||||||
|
"#### Creation of AmlCompute takes approximately 5 minutes. \n",
|
||||||
|
"If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
|
||||||
|
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||||
|
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||||
|
"\n",
|
||||||
|
"# Choose a name for your CPU cluster\n",
|
||||||
|
"cpu_cluster_name = \"cpu-cluster\"\n",
|
||||||
|
"\n",
|
||||||
|
"# Verify that cluster does not exist already\n",
|
||||||
|
"try:\n",
|
||||||
|
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
||||||
|
" print('Found existing cluster, use it.')\n",
|
||||||
|
"except ComputeTargetException:\n",
|
||||||
|
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
|
||||||
|
" max_nodes=6)\n",
|
||||||
|
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
||||||
|
"\n",
|
||||||
|
"compute_target.wait_for_completion(show_output=True)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"<a id='Data'></a>\n",
|
||||||
|
"## Data"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Load Data\n",
|
||||||
|
"\n",
|
||||||
|
"Load the credit card fraudulent transactions dataset from a CSV file, containing both training features and labels. The features are inputs to the model, while the training labels represent the expected output of the model. \n",
|
||||||
|
"\n",
|
||||||
|
"Here the autofeaturization run will featurize the training data passed in."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"##### Training Dataset"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"training_data = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/creditcard_train.csv\"\n",
|
||||||
|
"training_dataset = Dataset.Tabular.from_delimited_files(training_data) # Tabular dataset\n",
|
||||||
|
"\n",
|
||||||
|
"label_column_name = 'Class' # output label"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"<a id='Autofeaturization'></a>\n",
|
||||||
|
"## AutoFeaturization\n",
|
||||||
|
"\n",
|
||||||
|
"Instantiate an AutoMLConfig object. This defines the settings and data used to run the autofeaturization experiment.\n",
|
||||||
|
"\n",
|
||||||
|
"|Property|Description|\n",
|
||||||
|
"|-|-|\n",
|
||||||
|
"|**task**|classification or regression or forecasting|\n",
|
||||||
|
"|**training_data**|Input training dataset, containing both features and label column.|\n",
|
||||||
|
"|**iterations**|For an autofeaturization run, iterations will be 0.|\n",
|
||||||
|
"|**featurization**|For an autofeaturization run, featurization can be 'auto' or 'custom'.|\n",
|
||||||
|
"|**label_column_name**|The name of the label column.|\n",
|
||||||
|
"|**enable_code_generation**|For enabling codegen for the run, value would be True|\n",
|
||||||
|
"\n",
|
||||||
|
"**_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"automl_config = AutoMLConfig(task = 'classification',\n",
|
||||||
|
" debug_log = 'automl_errors.log',\n",
|
||||||
|
" iterations = 0, # autofeaturization run can be triggered by setting iterations to 0\n",
|
||||||
|
" compute_target = compute_target,\n",
|
||||||
|
" training_data = training_dataset,\n",
|
||||||
|
" label_column_name = label_column_name,\n",
|
||||||
|
" featurization = 'auto',\n",
|
||||||
|
" verbosity = logging.INFO,\n",
|
||||||
|
" enable_code_generation = True # enable codegen\n",
|
||||||
|
" )"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Call the `submit` method on the experiment object and pass the run configuration. Depending on the data this can run for a while. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"remote_run = experiment.submit(automl_config, show_output = False)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Results"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### Widget for Monitoring Runs\n",
|
||||||
|
"\n",
|
||||||
|
"The widget will first report a \"loading\" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete.\n",
|
||||||
|
"\n",
|
||||||
|
"**Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.widgets import RunDetails\n",
|
||||||
|
"RunDetails(remote_run).show()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"remote_run.wait_for_completion(show_output=False)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Codegen Script and Notebook"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Codegen script and notebook can be found under the `Outputs + logs` section from the details page of the remote run. Please check for the `autofeaturization_notebook.ipynb` under `/outputs/generated_code`. To modify the featurization code, open `script.py` and make changes. The codegen notebook can be run with the same environment configuration as the above AutoML run."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### Experiment Complete!"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"authors": [
|
||||||
|
{
|
||||||
|
"name": "bhavanatumma"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"interpreter": {
|
||||||
|
"hash": "adb464b67752e4577e3dc163235ced27038d19b7d88def00d75d1975bde5d9ab"
|
||||||
|
},
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3.6",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python36"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.6.9"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 2
|
||||||
|
}
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: codegen-for-autofeaturization
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
@@ -0,0 +1,735 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||||
|
"\n",
|
||||||
|
"Licensed under the MIT License."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Automated Machine Learning - AutoFeaturization (Part 1)\n",
|
||||||
|
"_**Autofeaturization of credit card fraudulent transactions dataset on remote compute**_\n",
|
||||||
|
"\n",
|
||||||
|
"## Contents\n",
|
||||||
|
"1. [Introduction](#Introduction)\n",
|
||||||
|
"1. [Setup](#Setup)\n",
|
||||||
|
"1. [Data](#Data)\n",
|
||||||
|
"1. [Autofeaturization](#Autofeaturization)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"<a id='Introduction'></a>\n",
|
||||||
|
"## Introduction"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Autofeaturization is a new feature to let you as the user run an AutoML experiment to only featurize the datasets. These datasets along with the transformer will be stored in the experiment which can later be retrieved and used to train models, either via AutoML or custom training. \n",
|
||||||
|
"\n",
|
||||||
|
"**To run Autofeaturization, pass in zero iterations and featurization as auto. This will featurize the datasets and terminate the experiment. Training will not occur.**\n",
|
||||||
|
"\n",
|
||||||
|
"*Limitations - Sparse data cannot be supported at the moment. Any dataset that has extensive categorical data might be featurized into sparse data which will not be allowed as input to AutoML. Efforts are underway to support sparse data and will be updated soon.* \n",
|
||||||
|
"\n",
|
||||||
|
"In this example we use the credit card fraudulent transactions dataset to showcase how you can use AutoML for autofeaturization. The goal is to clean and featurize the training dataset.\n",
|
||||||
|
"\n",
|
||||||
|
"This notebook is using remote compute to complete the featurization.\n",
|
||||||
|
"\n",
|
||||||
|
"If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration](../../configuration.ipynb) notebook first if you haven't already, to establish your connection to the AzureML Workspace. \n",
|
||||||
|
"\n",
|
||||||
|
"In the below steps, you will learn how to:\n",
|
||||||
|
"1. Create an autofeaturization experiment using an existing workspace.\n",
|
||||||
|
"2. View the featurized datasets and transformer"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"<a id='Setup'></a>\n",
|
||||||
|
"## Setup\n",
|
||||||
|
"\n",
|
||||||
|
"As part of the setup you have already created an Azure ML `Workspace` object. For Automated ML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import logging\n",
|
||||||
|
"import pandas as pd\n",
|
||||||
|
"import azureml.core\n",
|
||||||
|
"from azureml.core.experiment import Experiment\n",
|
||||||
|
"from azureml.core.workspace import Workspace\n",
|
||||||
|
"from azureml.core.dataset import Dataset\n",
|
||||||
|
"from azureml.train.automl import AutoMLConfig"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"print(\"This notebook was created using version 1.44.0 of the Azure ML SDK\")\n",
|
||||||
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"ws = Workspace.from_config()\n",
|
||||||
|
"\n",
|
||||||
|
"# choose a name for experiment\n",
|
||||||
|
"experiment_name = 'automl-autofeaturization-ccard-remote'\n",
|
||||||
|
"\n",
|
||||||
|
"experiment=Experiment(ws, experiment_name)\n",
|
||||||
|
"\n",
|
||||||
|
"output = {}\n",
|
||||||
|
"output['Subscription ID'] = ws.subscription_id\n",
|
||||||
|
"output['Workspace'] = ws.name\n",
|
||||||
|
"output['Resource Group'] = ws.resource_group\n",
|
||||||
|
"output['Location'] = ws.location\n",
|
||||||
|
"output['Experiment Name'] = experiment.name\n",
|
||||||
|
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||||
|
"outputDf.T"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Create or Attach existing AmlCompute\n",
|
||||||
|
"A compute target is required to execute the Automated ML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
||||||
|
"\n",
|
||||||
|
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||||
|
"\n",
|
||||||
|
"#### Creation of AmlCompute takes approximately 5 minutes. \n",
|
||||||
|
"If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
|
||||||
|
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||||
|
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||||
|
"\n",
|
||||||
|
"# Choose a name for your CPU cluster\n",
|
||||||
|
"cpu_cluster_name = \"cpu-cluster\"\n",
|
||||||
|
"\n",
|
||||||
|
"# Verify that cluster does not exist already\n",
|
||||||
|
"try:\n",
|
||||||
|
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
||||||
|
" print('Found existing cluster, use it.')\n",
|
||||||
|
"except ComputeTargetException:\n",
|
||||||
|
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
|
||||||
|
" max_nodes=6)\n",
|
||||||
|
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
||||||
|
"\n",
|
||||||
|
"compute_target.wait_for_completion(show_output=True)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"<a id='Data'></a>\n",
|
||||||
|
"## Data"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Load Data\n",
|
||||||
|
"\n",
|
||||||
|
"Load the credit card fraudulent transactions dataset from a CSV file, containing both training features and labels. The features are inputs to the model, while the training labels represent the expected output of the model. \n",
|
||||||
|
"\n",
|
||||||
|
"Here the autofeaturization run will featurize the training data passed in."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"##### Training Dataset"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"training_data = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/creditcard_train.csv\"\n",
|
||||||
|
"training_dataset = Dataset.Tabular.from_delimited_files(training_data) # Tabular dataset\n",
|
||||||
|
"\n",
|
||||||
|
"label_column_name = 'Class' # output label"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"<a id='Autofeaturization'></a>\n",
|
||||||
|
"## AutoFeaturization\n",
|
||||||
|
"\n",
|
||||||
|
"Instantiate an AutoMLConfig object. This defines the settings and data used to run the autofeaturization experiment.\n",
|
||||||
|
"\n",
|
||||||
|
"|Property|Description|\n",
|
||||||
|
"|-|-|\n",
|
||||||
|
"|**task**|classification or regression|\n",
|
||||||
|
"|**training_data**|Input training dataset, containing both features and label column.|\n",
|
||||||
|
"|**iterations**|For an autofeaturization run, iterations will be 0.|\n",
|
||||||
|
"|**featurization**|For an autofeaturization run, featurization will be 'auto'.|\n",
|
||||||
|
"|**label_column_name**|The name of the label column.|\n",
|
||||||
|
"\n",
|
||||||
|
"**_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"automl_config = AutoMLConfig(task = 'classification',\n",
|
||||||
|
" debug_log = 'automl_errors.log',\n",
|
||||||
|
" iterations = 0, # autofeaturization run can be triggered by setting iterations to 0\n",
|
||||||
|
" compute_target = compute_target,\n",
|
||||||
|
" training_data = training_dataset,\n",
|
||||||
|
" label_column_name = label_column_name,\n",
|
||||||
|
" featurization = 'auto',\n",
|
||||||
|
" verbosity = logging.INFO\n",
|
||||||
|
" )"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Call the `submit` method on the experiment object and pass the run configuration. Depending on the data this can run for a while. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"remote_run = experiment.submit(automl_config, show_output = False)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Transformer and Featurized Datasets\n",
|
||||||
|
"The given datasets have been featurized and stored under `Outputs + logs` from the details page of the remote run. The structure is shown below. The featurized dataset is stored under `/outputs/featurization/data` and the transformer is saved under `/outputs/featurization/pipeline` \n",
|
||||||
|
"\n",
|
||||||
|
"Below you will learn how to refer to the data saved in your run and retrieve the same."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Results"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### Widget for Monitoring Runs\n",
|
||||||
|
"\n",
|
||||||
|
"The widget will first report a \"loading\" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete.\n",
|
||||||
|
"\n",
|
||||||
|
"**Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.widgets import RunDetails\n",
|
||||||
|
"RunDetails(remote_run).show()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"remote_run.wait_for_completion(show_output=False)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Automated Machine Learning - AutoFeaturization (Part 2)\n",
|
||||||
|
"_**Training using a custom model with the featurized data from Autofeaturization run of credit card fraudulent transactions dataset**_\n",
|
||||||
|
"\n",
|
||||||
|
"## Contents\n",
|
||||||
|
"1. [Introduction](#Introduction)\n",
|
||||||
|
"1. [Data Setup](#DataSetup)\n",
|
||||||
|
"1. [Autofeaturization Data](#AutofeaturizationData)\n",
|
||||||
|
"1. [Train](#Train)\n",
|
||||||
|
"1. [Results](#Results)\n",
|
||||||
|
"1. [Test](#Test)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"<a id='Introduction'></a>\n",
|
||||||
|
"## Introduction\n",
|
||||||
|
"\n",
|
||||||
|
"Here we use the featurized dataset saved in the above run to showcase how you can perform custom training by using the transformer from an autofeaturization run to transform validation / test datasets. \n",
|
||||||
|
"\n",
|
||||||
|
"The goal is to use autofeaturized run data and transformer to transform and run a custom training experiment independently\n",
|
||||||
|
"\n",
|
||||||
|
"In the below steps, you will learn how to:\n",
|
||||||
|
"1. Read transformer from a completed autofeaturization run and transform data\n",
|
||||||
|
"2. Pull featurized data from a completed autofeaturization run\n",
|
||||||
|
"3. Run a custom training experiment with the above data\n",
|
||||||
|
"4. Check results"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"<a id='DataSetup'></a>\n",
|
||||||
|
"## Data Setup"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"We will load the featurized training data and also load the transformer from the above autofeaturized run. This transformer can then be used to transform the test data to check the accuracy of the custom model after training."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Load Test Data"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"load test dataset from CSV and split into X and y columns to featurize with the transformer going forward."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"test_data = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/creditcard_test.csv\"\n",
|
||||||
|
"\n",
|
||||||
|
"test_dataset = pd.read_csv(test_data)\n",
|
||||||
|
"label_column_name = 'Class'\n",
|
||||||
|
"\n",
|
||||||
|
"X_test_data = test_dataset[test_dataset.columns.difference([label_column_name])]\n",
|
||||||
|
"y_test_data = test_dataset[label_column_name].values\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Load data_transformer from the above remote run artifact"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### (Method 1)\n",
|
||||||
|
"\n",
|
||||||
|
"Method 1 allows you to read the transformer from the remote storage."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import mlflow\n",
|
||||||
|
"mlflow.set_tracking_uri(ws.get_mlflow_tracking_uri())\n",
|
||||||
|
"\n",
|
||||||
|
"# Set uri to fetch data transformer from remote parent run.\n",
|
||||||
|
"artifact_path = \"/outputs/featurization/pipeline/\"\n",
|
||||||
|
"uri = \"runs:/\" + remote_run.id + artifact_path\n",
|
||||||
|
"\n",
|
||||||
|
"print(uri)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### (Method 2)\n",
|
||||||
|
"\n",
|
||||||
|
"Method 2 downloads the transformer to the local directory and then can be used to transform the data. Uncomment to use."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"''' import pathlib\n",
|
||||||
|
"\n",
|
||||||
|
"# Download the transformer to the local directory\n",
|
||||||
|
"transformers_file_path = \"/outputs/featurization/pipeline/\"\n",
|
||||||
|
"local_path = \"./transformer\"\n",
|
||||||
|
"remote_run.download_files(prefix=transformers_file_path, output_directory=local_path, batch_size=500)\n",
|
||||||
|
"\n",
|
||||||
|
"path = pathlib.Path(\"transformer\") \n",
|
||||||
|
"path = str(path.absolute()) + transformers_file_path\n",
|
||||||
|
"str_uri = \"file:///\" + path\n",
|
||||||
|
"\n",
|
||||||
|
"print(str_uri) '''"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Transform Data"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"**Note:** Not all datasets produce a y_transformer. The dataset used in the current notebook requires a transformer as the y column data is categorical."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.automl.core.shared.constants import Transformers\n",
|
||||||
|
"\n",
|
||||||
|
"transformers = mlflow.sklearn.load_model(uri) # Using method 1\n",
|
||||||
|
"data_transformers = transformers.get_transformers()\n",
|
||||||
|
"x_transformer = data_transformers[Transformers.X_TRANSFORMER]\n",
|
||||||
|
"y_transformer = data_transformers[Transformers.Y_TRANSFORMER]\n",
|
||||||
|
"\n",
|
||||||
|
"X_test = x_transformer.transform(X_test_data)\n",
|
||||||
|
"y_test = y_transformer.transform(y_test_data)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Run the following cell to see the featurization summary of X and y transformers. "
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"X_data_summary = x_transformer.get_featurization_summary(is_user_friendly=False)\n",
|
||||||
|
"\n",
|
||||||
|
"summary_df = pd.DataFrame.from_records(X_data_summary)\n",
|
||||||
|
"summary_df"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Load Datastore\n",
|
||||||
|
"\n",
|
||||||
|
"The below data store holds the featurized datasets, hence we load and access the data. Check the path and file names according to the saved structure in your experiment `Outputs + logs` as seen in <i>Autofeaturization Part 1</i>"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.core.datastore import Datastore\n",
|
||||||
|
"\n",
|
||||||
|
"ds = Datastore.get(ws, \"workspaceartifactstore\")\n",
|
||||||
|
"experiment_loc = \"ExperimentRun/dcid.\" + remote_run.id\n",
|
||||||
|
"\n",
|
||||||
|
"remote_data_path = \"/outputs/featurization/data/\""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"<a id='AutofeaturizationData'></a>\n",
|
||||||
|
"## Autofeaturization Data\n",
|
||||||
|
"\n",
|
||||||
|
"We will load the training data from the previously completed Autofeaturization experiment. The resulting featurized dataframe can be passed into the custom model for training. Here we are saving the file to local from the experiment storage and reading the data."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"train_data_file_path = \"full_training_dataset.df.parquet\"\n",
|
||||||
|
"local_data_path = \"./data/\" + train_data_file_path\n",
|
||||||
|
"\n",
|
||||||
|
"remote_run.download_file(remote_data_path + train_data_file_path, local_data_path)\n",
|
||||||
|
"\n",
|
||||||
|
"full_training_data = pd.read_parquet(local_data_path)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Another way to load the data is to go to the above autofeaturization experiment and check for the featurized dataset ids under `Output datasets`. Uncomment and replace them accordingly below to use."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# train_data = Dataset.get_by_id(ws, 'cb4418ee-bac4-45ac-b055-600653bdf83a') # replace the featurized full_training_dataset id\n",
|
||||||
|
"# full_training_data = train_data.to_pandas_dataframe()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Training Data"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"We are dropping the y column and weights column from the featurized training dataset."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"Y_COLUMN = \"automl_y\"\n",
|
||||||
|
"SW_COLUMN = \"automl_weights\"\n",
|
||||||
|
"\n",
|
||||||
|
"X_train = full_training_data[full_training_data.columns.difference([Y_COLUMN, SW_COLUMN])]\n",
|
||||||
|
"y_train = full_training_data[Y_COLUMN].values\n",
|
||||||
|
"sample_weight = full_training_data[SW_COLUMN].values"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"<a id='Train'></a>\n",
|
||||||
|
"## Train"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Here we are passing our training data to the lightgbm classifier, any custom model can be used with your data."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import lightgbm as lgb\n",
|
||||||
|
"\n",
|
||||||
|
"model = lgb.LGBMClassifier(learning_rate=0.08,max_depth=-5,random_state=42)\n",
|
||||||
|
"model.fit(X_train, y_train, sample_weight=sample_weight, eval_set=[(X_test, y_test),(X_train, y_train)],\n",
|
||||||
|
" verbose=20,eval_metric='logloss')\n",
|
||||||
|
"\n",
|
||||||
|
"print('Training accuracy {:.4f}'.format(model.score(X_train, y_train)))\n",
|
||||||
|
"print('Testing accuracy {:.4f}'.format(model.score(X_test, y_test)))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"<a id='Results'></a>\n",
|
||||||
|
"## Analyze results\n",
|
||||||
|
"\n",
|
||||||
|
"### Retrieve the Model"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"model"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"<a id='Test'></a>\n",
|
||||||
|
"## Test the fitted model\n",
|
||||||
|
"\n",
|
||||||
|
"Now that the model is trained, split the data in the same way the data was split for training (The difference here is the data is being split locally) and then run the test data through the trained model to get the predicted values."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"y_pred = model.predict(X_test)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Calculate metrics for the prediction\n",
|
||||||
|
"\n",
|
||||||
|
"Now visualize the data on a scatter plot to show what our truth (actual) values are compared to the predicted values \n",
|
||||||
|
"from the trained model that was returned."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from sklearn.metrics import confusion_matrix\n",
|
||||||
|
"from matplotlib import pyplot as plt\n",
|
||||||
|
"import numpy as np\n",
|
||||||
|
"import itertools\n",
|
||||||
|
"\n",
|
||||||
|
"cf =confusion_matrix(y_test,y_pred)\n",
|
||||||
|
"plt.imshow(cf,cmap=plt.cm.Blues,interpolation='nearest')\n",
|
||||||
|
"plt.colorbar()\n",
|
||||||
|
"plt.title('Confusion Matrix')\n",
|
||||||
|
"plt.xlabel('Predicted')\n",
|
||||||
|
"plt.ylabel('Actual')\n",
|
||||||
|
"class_labels = ['False','True']\n",
|
||||||
|
"tick_marks = np.arange(len(class_labels))\n",
|
||||||
|
"plt.xticks(tick_marks,class_labels)\n",
|
||||||
|
"plt.yticks([-0.5,0,1,1.5],['','False','True',''])\n",
|
||||||
|
"# plotting text value inside cells\n",
|
||||||
|
"thresh = cf.max() / 2.\n",
|
||||||
|
"for i,j in itertools.product(range(cf.shape[0]),range(cf.shape[1])):\n",
|
||||||
|
" plt.text(j,i,format(cf[i,j],'d'),horizontalalignment='center',color='white' if cf[i,j] >thresh else 'black')\n",
|
||||||
|
"plt.show()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### Experiment Complete!"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"authors": [
|
||||||
|
{
|
||||||
|
"name": "bhavanatumma"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"interpreter": {
|
||||||
|
"hash": "adb464b67752e4577e3dc163235ced27038d19b7d88def00d75d1975bde5d9ab"
|
||||||
|
},
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3.6",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python36"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.6.9"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 2
|
||||||
|
}
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: custom-model-training-from-autofeaturization-run
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
@@ -1,17 +1,20 @@
|
|||||||
name: azure_automl_experimental
|
name: azure_automl_experimental
|
||||||
dependencies:
|
dependencies:
|
||||||
# The python interpreter version.
|
# The python interpreter version.
|
||||||
# Currently Azure ML only supports 3.5.2 and later.
|
# Currently Azure ML only supports 3.6.0 and later.
|
||||||
- pip<=19.3.1
|
- pip<=20.2.4
|
||||||
- python>=3.5.2,<3.8
|
- python>=3.6.0,<3.9
|
||||||
- nb_conda
|
- cython==0.29.14
|
||||||
- cython
|
- urllib3==1.26.7
|
||||||
- urllib3<1.24
|
|
||||||
- PyJWT < 2.0.0
|
- PyJWT < 2.0.0
|
||||||
- numpy==1.18.5
|
- numpy==1.21.6
|
||||||
|
- pywin32==227
|
||||||
|
- cryptography<37.0.0
|
||||||
|
|
||||||
- pip:
|
- pip:
|
||||||
# Required packages for AzureML execution, history, and data preparation.
|
# Required packages for AzureML execution, history, and data preparation.
|
||||||
|
- azure-core==1.24.1
|
||||||
|
- azure-identity==1.7.0
|
||||||
- azureml-defaults
|
- azureml-defaults
|
||||||
- azureml-sdk
|
- azureml-sdk
|
||||||
- azureml-widgets
|
- azureml-widgets
|
||||||
|
|||||||
@@ -1,18 +1,22 @@
|
|||||||
name: azure_automl_experimental
|
name: azure_automl_experimental
|
||||||
|
channels:
|
||||||
|
- conda-forge
|
||||||
|
- main
|
||||||
dependencies:
|
dependencies:
|
||||||
# The python interpreter version.
|
# The python interpreter version.
|
||||||
# Currently Azure ML only supports 3.5.2 and later.
|
# Currently Azure ML only supports 3.6.0 and later.
|
||||||
- pip<=19.3.1
|
- pip<=20.2.4
|
||||||
- nomkl
|
- nomkl
|
||||||
- python>=3.5.2,<3.8
|
- python>=3.6.0,<3.9
|
||||||
- nb_conda
|
- urllib3==1.26.7
|
||||||
- cython
|
|
||||||
- urllib3<1.24
|
|
||||||
- PyJWT < 2.0.0
|
- PyJWT < 2.0.0
|
||||||
- numpy==1.18.5
|
- numpy>=1.21.6,<=1.22.3
|
||||||
|
- cryptography<37.0.0
|
||||||
|
|
||||||
- pip:
|
- pip:
|
||||||
# Required packages for AzureML execution, history, and data preparation.
|
# Required packages for AzureML execution, history, and data preparation.
|
||||||
|
- azure-core==1.24.1
|
||||||
|
- azure-identity==1.7.0
|
||||||
- azureml-defaults
|
- azureml-defaults
|
||||||
- azureml-sdk
|
- azureml-sdk
|
||||||
- azureml-widgets
|
- azureml-widgets
|
||||||
|
|||||||
@@ -92,7 +92,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
|
"print(\"This notebook was created using version 1.44.0 of the Azure ML SDK\")\n",
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -115,7 +115,7 @@
|
|||||||
"output['Resource Group'] = ws.resource_group\n",
|
"output['Resource Group'] = ws.resource_group\n",
|
||||||
"output['Location'] = ws.location\n",
|
"output['Location'] = ws.location\n",
|
||||||
"output['Experiment Name'] = experiment.name\n",
|
"output['Experiment Name'] = experiment.name\n",
|
||||||
"pd.set_option('display.max_colwidth', -1)\n",
|
"pd.set_option('display.max_colwidth', None)\n",
|
||||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||||
"outputDf.T"
|
"outputDf.T"
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -91,7 +91,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
|
"print(\"This notebook was created using version 1.44.0 of the Azure ML SDK\")\n",
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -180,6 +180,29 @@
|
|||||||
"label = \"ERP\"\n"
|
"label = \"ERP\"\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"The split data will be used in the remote compute by ModelProxy and locally to compare results.\n",
|
||||||
|
"So, we need to persist the split data to avoid descrepencies from different package versions in the local and remote."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"ds = ws.get_default_datastore()\n",
|
||||||
|
"\n",
|
||||||
|
"train_data = Dataset.Tabular.register_pandas_dataframe(\n",
|
||||||
|
" train_data.to_pandas_dataframe(), target=(ds, \"machineTrainData\"), name=\"train_data\")\n",
|
||||||
|
"\n",
|
||||||
|
"test_data = Dataset.Tabular.register_pandas_dataframe(\n",
|
||||||
|
" test_data.to_pandas_dataframe(), target=(ds, \"machineTestData\"), name=\"test_data\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@@ -304,7 +327,8 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"#### Show hyperparameters\n",
|
"#### Show hyperparameters\n",
|
||||||
"Show the model pipeline used for the best run with its hyperparameters."
|
"Show the model pipeline used for the best run with its hyperparameters.\n",
|
||||||
|
"For ensemble pipelines it shows the iterations and algorithms that are ensembled."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -313,8 +337,19 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"run_properties = json.loads(best_run.get_details()['properties']['pipeline_script'])\n",
|
"run_properties = best_run.get_details()['properties']\n",
|
||||||
"print(json.dumps(run_properties, indent = 1)) "
|
"pipeline_script = json.loads(run_properties['pipeline_script'])\n",
|
||||||
|
"print(json.dumps(pipeline_script, indent = 1)) \n",
|
||||||
|
"\n",
|
||||||
|
"if 'ensembled_iterations' in run_properties:\n",
|
||||||
|
" print(\"\")\n",
|
||||||
|
" print(\"Ensembled Iterations\")\n",
|
||||||
|
" print(run_properties['ensembled_iterations'])\n",
|
||||||
|
" \n",
|
||||||
|
"if 'ensembled_algorithms' in run_properties:\n",
|
||||||
|
" print(\"\")\n",
|
||||||
|
" print(\"Ensembled Algorithms\")\n",
|
||||||
|
" print(run_properties['ensembled_algorithms'])"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import json
|
|||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
|
||||||
from matplotlib import pyplot as plt
|
from matplotlib import pyplot as plt
|
||||||
@@ -121,7 +122,7 @@ def calculate_scores_and_build_plots(
|
|||||||
input_dir: str, output_dir: str, automl_settings: Dict[str, Any]
|
input_dir: str, output_dir: str, automl_settings: Dict[str, Any]
|
||||||
):
|
):
|
||||||
os.makedirs(output_dir, exist_ok=True)
|
os.makedirs(output_dir, exist_ok=True)
|
||||||
grains = automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES)
|
grains = automl_settings.get(constants.TimeSeries.TIME_SERIES_ID_COLUMN_NAMES)
|
||||||
time_column_name = automl_settings.get(constants.TimeSeries.TIME_COLUMN_NAME)
|
time_column_name = automl_settings.get(constants.TimeSeries.TIME_COLUMN_NAME)
|
||||||
if grains is None:
|
if grains is None:
|
||||||
grains = []
|
grains = []
|
||||||
@@ -146,6 +147,9 @@ def calculate_scores_and_build_plots(
|
|||||||
_draw_one_plot(one_forecast, time_column_name, grains, pdf)
|
_draw_one_plot(one_forecast, time_column_name, grains, pdf)
|
||||||
pdf.close()
|
pdf.close()
|
||||||
forecast_df.to_csv(os.path.join(output_dir, FORECASTS_FILE), index=False)
|
forecast_df.to_csv(os.path.join(output_dir, FORECASTS_FILE), index=False)
|
||||||
|
# Remove np.NaN and np.inf from the prediction and actuals data.
|
||||||
|
forecast_df.replace([np.inf, -np.inf], np.nan, inplace=True)
|
||||||
|
forecast_df.dropna(subset=[ACTUALS, PREDICTIONS], inplace=True)
|
||||||
metrics = compute_all_metrics(forecast_df, grains + [BACKTEST_ITER])
|
metrics = compute_all_metrics(forecast_df, grains + [BACKTEST_ITER])
|
||||||
metrics.to_csv(os.path.join(output_dir, SCORES_FILE), index=False)
|
metrics.to_csv(os.path.join(output_dir, SCORES_FILE), index=False)
|
||||||
|
|
||||||
|
|||||||
@@ -86,7 +86,8 @@
|
|||||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||||
"output[\"Location\"] = ws.location\n",
|
"output[\"Location\"] = ws.location\n",
|
||||||
"output[\"Default datastore name\"] = dstore.name\n",
|
"output[\"Default datastore name\"] = dstore.name\n",
|
||||||
"pd.set_option(\"display.max_colwidth\", -1)\n",
|
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||||
|
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||||
"outputDf.T"
|
"outputDf.T"
|
||||||
]
|
]
|
||||||
@@ -322,10 +323,10 @@
|
|||||||
"| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n",
|
"| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||||
"| **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. |\n",
|
"| **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||||
"| **label_column_name** | The name of the label column. |\n",
|
"| **label_column_name** | The name of the label column. |\n",
|
||||||
"| **max_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n",
|
"| **forecast_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n",
|
||||||
"| **n_cross_validations** | Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way. |\n",
|
"| **n_cross_validations** | Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way. |\n",
|
||||||
"| **time_column_name** | The name of your time column. |\n",
|
"| **time_column_name** | The name of your time column. |\n",
|
||||||
"| **grain_column_names** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |\n",
|
"| **time_series_id_column_names** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |\n",
|
||||||
"| **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). |\n",
|
"| **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). |\n",
|
||||||
"| **partition_column_names** | The names of columns used to group your models. For timeseries, the groups must not split up individual time-series. That is, each group must contain one or more whole time-series. |"
|
"| **partition_column_names** | The names of columns used to group your models. For timeseries, the groups must not split up individual time-series. That is, each group must contain one or more whole time-series. |"
|
||||||
]
|
]
|
||||||
@@ -354,8 +355,8 @@
|
|||||||
" \"label_column_name\": TARGET_COLNAME,\n",
|
" \"label_column_name\": TARGET_COLNAME,\n",
|
||||||
" \"n_cross_validations\": 3,\n",
|
" \"n_cross_validations\": 3,\n",
|
||||||
" \"time_column_name\": TIME_COLNAME,\n",
|
" \"time_column_name\": TIME_COLNAME,\n",
|
||||||
" \"max_horizon\": 6,\n",
|
" \"forecast_horizon\": 6,\n",
|
||||||
" \"grain_column_names\": partition_column_names,\n",
|
" \"time_series_id_column_names\": partition_column_names,\n",
|
||||||
" \"track_child_runs\": False,\n",
|
" \"track_child_runs\": False,\n",
|
||||||
"}\n",
|
"}\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import json
|
|||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
|
||||||
from matplotlib import pyplot as plt
|
from matplotlib import pyplot as plt
|
||||||
@@ -146,6 +147,9 @@ def calculate_scores_and_build_plots(
|
|||||||
_draw_one_plot(one_forecast, time_column_name, grains, pdf)
|
_draw_one_plot(one_forecast, time_column_name, grains, pdf)
|
||||||
pdf.close()
|
pdf.close()
|
||||||
forecast_df.to_csv(os.path.join(output_dir, FORECASTS_FILE), index=False)
|
forecast_df.to_csv(os.path.join(output_dir, FORECASTS_FILE), index=False)
|
||||||
|
# Remove np.NaN and np.inf from the prediction and actuals data.
|
||||||
|
forecast_df.replace([np.inf, -np.inf], np.nan, inplace=True)
|
||||||
|
forecast_df.dropna(subset=[ACTUALS, PREDICTIONS], inplace=True)
|
||||||
metrics = compute_all_metrics(forecast_df, grains + [BACKTEST_ITER])
|
metrics = compute_all_metrics(forecast_df, grains + [BACKTEST_ITER])
|
||||||
metrics.to_csv(os.path.join(output_dir, SCORES_FILE), index=False)
|
metrics.to_csv(os.path.join(output_dir, SCORES_FILE), index=False)
|
||||||
|
|
||||||
|
|||||||
@@ -100,7 +100,8 @@
|
|||||||
"output[\"SKU\"] = ws.sku\n",
|
"output[\"SKU\"] = ws.sku\n",
|
||||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||||
"output[\"Location\"] = ws.location\n",
|
"output[\"Location\"] = ws.location\n",
|
||||||
"pd.set_option(\"display.max_colwidth\", -1)\n",
|
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||||
|
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||||
"outputDf.T"
|
"outputDf.T"
|
||||||
]
|
]
|
||||||
@@ -523,7 +524,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"model_list = Model.list(ws, tags={\"experiment\": \"automl-backtesting\"})\n",
|
"model_list = Model.list(ws, tags=[[\"experiment\", \"automl-backtesting\"]])\n",
|
||||||
"model_data = {\"name\": [], \"last_training_date\": []}\n",
|
"model_data = {\"name\": [], \"last_training_date\": []}\n",
|
||||||
"for model in model_list:\n",
|
"for model in model_list:\n",
|
||||||
" if (\n",
|
" if (\n",
|
||||||
|
|||||||
@@ -72,6 +72,8 @@ def get_backtest_pipeline(
|
|||||||
run_config.docker.use_docker = True
|
run_config.docker.use_docker = True
|
||||||
run_config.environment = env
|
run_config.environment = env
|
||||||
|
|
||||||
|
utilities.set_environment_variables_for_run(run_config)
|
||||||
|
|
||||||
split_data = PipelineData(name="split_data_output", datastore=None).as_dataset()
|
split_data = PipelineData(name="split_data_output", datastore=None).as_dataset()
|
||||||
split_step = PythonScriptStep(
|
split_step = PythonScriptStep(
|
||||||
name="split_data_for_backtest",
|
name="split_data_for_backtest",
|
||||||
@@ -114,6 +116,7 @@ def get_backtest_pipeline(
|
|||||||
run_invocation_timeout=3600,
|
run_invocation_timeout=3600,
|
||||||
node_count=node_count,
|
node_count=node_count,
|
||||||
)
|
)
|
||||||
|
utilities.set_environment_variables_for_run(back_test_config)
|
||||||
forecasts = PipelineData(name="forecasts", datastore=None)
|
forecasts = PipelineData(name="forecasts", datastore=None)
|
||||||
if model_name:
|
if model_name:
|
||||||
parallel_step_name = "{}-backtest".format(model_name.replace("_", "-"))
|
parallel_step_name = "{}-backtest".format(model_name.replace("_", "-"))
|
||||||
@@ -149,12 +152,7 @@ def get_backtest_pipeline(
|
|||||||
inputs=[forecasts.as_mount()],
|
inputs=[forecasts.as_mount()],
|
||||||
outputs=[data_results],
|
outputs=[data_results],
|
||||||
source_directory=PROJECT_FOLDER,
|
source_directory=PROJECT_FOLDER,
|
||||||
arguments=[
|
arguments=["--forecasts", forecasts, "--output-dir", data_results],
|
||||||
"--forecasts",
|
|
||||||
forecasts,
|
|
||||||
"--output-dir",
|
|
||||||
data_results,
|
|
||||||
],
|
|
||||||
runconfig=run_config,
|
runconfig=run_config,
|
||||||
compute_target=compute_target,
|
compute_target=compute_target,
|
||||||
allow_reuse=False,
|
allow_reuse=False,
|
||||||
|
|||||||
@@ -1,20 +0,0 @@
|
|||||||
DATE,grain,BeerProduction
|
|
||||||
2017-01-01,grain,9049
|
|
||||||
2017-02-01,grain,10458
|
|
||||||
2017-03-01,grain,12489
|
|
||||||
2017-04-01,grain,11499
|
|
||||||
2017-05-01,grain,13553
|
|
||||||
2017-06-01,grain,14740
|
|
||||||
2017-07-01,grain,11424
|
|
||||||
2017-08-01,grain,13412
|
|
||||||
2017-09-01,grain,11917
|
|
||||||
2017-10-01,grain,12721
|
|
||||||
2017-11-01,grain,13272
|
|
||||||
2017-12-01,grain,14278
|
|
||||||
2018-01-01,grain,9572
|
|
||||||
2018-02-01,grain,10423
|
|
||||||
2018-03-01,grain,12667
|
|
||||||
2018-04-01,grain,11904
|
|
||||||
2018-05-01,grain,14120
|
|
||||||
2018-06-01,grain,14565
|
|
||||||
2018-07-01,grain,12622
|
|
||||||
|
@@ -1,301 +0,0 @@
|
|||||||
DATE,grain,BeerProduction
|
|
||||||
1992-01-01,grain,3459
|
|
||||||
1992-02-01,grain,3458
|
|
||||||
1992-03-01,grain,4002
|
|
||||||
1992-04-01,grain,4564
|
|
||||||
1992-05-01,grain,4221
|
|
||||||
1992-06-01,grain,4529
|
|
||||||
1992-07-01,grain,4466
|
|
||||||
1992-08-01,grain,4137
|
|
||||||
1992-09-01,grain,4126
|
|
||||||
1992-10-01,grain,4259
|
|
||||||
1992-11-01,grain,4240
|
|
||||||
1992-12-01,grain,4936
|
|
||||||
1993-01-01,grain,3031
|
|
||||||
1993-02-01,grain,3261
|
|
||||||
1993-03-01,grain,4160
|
|
||||||
1993-04-01,grain,4377
|
|
||||||
1993-05-01,grain,4307
|
|
||||||
1993-06-01,grain,4696
|
|
||||||
1993-07-01,grain,4458
|
|
||||||
1993-08-01,grain,4457
|
|
||||||
1993-09-01,grain,4364
|
|
||||||
1993-10-01,grain,4236
|
|
||||||
1993-11-01,grain,4500
|
|
||||||
1993-12-01,grain,4974
|
|
||||||
1994-01-01,grain,3075
|
|
||||||
1994-02-01,grain,3377
|
|
||||||
1994-03-01,grain,4443
|
|
||||||
1994-04-01,grain,4261
|
|
||||||
1994-05-01,grain,4460
|
|
||||||
1994-06-01,grain,4985
|
|
||||||
1994-07-01,grain,4324
|
|
||||||
1994-08-01,grain,4719
|
|
||||||
1994-09-01,grain,4374
|
|
||||||
1994-10-01,grain,4248
|
|
||||||
1994-11-01,grain,4784
|
|
||||||
1994-12-01,grain,4971
|
|
||||||
1995-01-01,grain,3370
|
|
||||||
1995-02-01,grain,3484
|
|
||||||
1995-03-01,grain,4269
|
|
||||||
1995-04-01,grain,3994
|
|
||||||
1995-05-01,grain,4715
|
|
||||||
1995-06-01,grain,4974
|
|
||||||
1995-07-01,grain,4223
|
|
||||||
1995-08-01,grain,5000
|
|
||||||
1995-09-01,grain,4235
|
|
||||||
1995-10-01,grain,4554
|
|
||||||
1995-11-01,grain,4851
|
|
||||||
1995-12-01,grain,4826
|
|
||||||
1996-01-01,grain,3699
|
|
||||||
1996-02-01,grain,3983
|
|
||||||
1996-03-01,grain,4262
|
|
||||||
1996-04-01,grain,4619
|
|
||||||
1996-05-01,grain,5219
|
|
||||||
1996-06-01,grain,4836
|
|
||||||
1996-07-01,grain,4941
|
|
||||||
1996-08-01,grain,5062
|
|
||||||
1996-09-01,grain,4365
|
|
||||||
1996-10-01,grain,5012
|
|
||||||
1996-11-01,grain,4850
|
|
||||||
1996-12-01,grain,5097
|
|
||||||
1997-01-01,grain,3758
|
|
||||||
1997-02-01,grain,3825
|
|
||||||
1997-03-01,grain,4454
|
|
||||||
1997-04-01,grain,4635
|
|
||||||
1997-05-01,grain,5210
|
|
||||||
1997-06-01,grain,5057
|
|
||||||
1997-07-01,grain,5231
|
|
||||||
1997-08-01,grain,5034
|
|
||||||
1997-09-01,grain,4970
|
|
||||||
1997-10-01,grain,5342
|
|
||||||
1997-11-01,grain,4831
|
|
||||||
1997-12-01,grain,5965
|
|
||||||
1998-01-01,grain,3796
|
|
||||||
1998-02-01,grain,4019
|
|
||||||
1998-03-01,grain,4898
|
|
||||||
1998-04-01,grain,5090
|
|
||||||
1998-05-01,grain,5237
|
|
||||||
1998-06-01,grain,5447
|
|
||||||
1998-07-01,grain,5435
|
|
||||||
1998-08-01,grain,5107
|
|
||||||
1998-09-01,grain,5515
|
|
||||||
1998-10-01,grain,5583
|
|
||||||
1998-11-01,grain,5346
|
|
||||||
1998-12-01,grain,6286
|
|
||||||
1999-01-01,grain,4032
|
|
||||||
1999-02-01,grain,4435
|
|
||||||
1999-03-01,grain,5479
|
|
||||||
1999-04-01,grain,5483
|
|
||||||
1999-05-01,grain,5587
|
|
||||||
1999-06-01,grain,6176
|
|
||||||
1999-07-01,grain,5621
|
|
||||||
1999-08-01,grain,5889
|
|
||||||
1999-09-01,grain,5828
|
|
||||||
1999-10-01,grain,5849
|
|
||||||
1999-11-01,grain,6180
|
|
||||||
1999-12-01,grain,6771
|
|
||||||
2000-01-01,grain,4243
|
|
||||||
2000-02-01,grain,4952
|
|
||||||
2000-03-01,grain,6008
|
|
||||||
2000-04-01,grain,5353
|
|
||||||
2000-05-01,grain,6435
|
|
||||||
2000-06-01,grain,6673
|
|
||||||
2000-07-01,grain,5636
|
|
||||||
2000-08-01,grain,6630
|
|
||||||
2000-09-01,grain,5887
|
|
||||||
2000-10-01,grain,6322
|
|
||||||
2000-11-01,grain,6520
|
|
||||||
2000-12-01,grain,6678
|
|
||||||
2001-01-01,grain,5082
|
|
||||||
2001-02-01,grain,5216
|
|
||||||
2001-03-01,grain,5893
|
|
||||||
2001-04-01,grain,5894
|
|
||||||
2001-05-01,grain,6799
|
|
||||||
2001-06-01,grain,6667
|
|
||||||
2001-07-01,grain,6374
|
|
||||||
2001-08-01,grain,6840
|
|
||||||
2001-09-01,grain,5575
|
|
||||||
2001-10-01,grain,6545
|
|
||||||
2001-11-01,grain,6789
|
|
||||||
2001-12-01,grain,7180
|
|
||||||
2002-01-01,grain,5117
|
|
||||||
2002-02-01,grain,5442
|
|
||||||
2002-03-01,grain,6337
|
|
||||||
2002-04-01,grain,6525
|
|
||||||
2002-05-01,grain,7216
|
|
||||||
2002-06-01,grain,6761
|
|
||||||
2002-07-01,grain,6958
|
|
||||||
2002-08-01,grain,7070
|
|
||||||
2002-09-01,grain,6148
|
|
||||||
2002-10-01,grain,6924
|
|
||||||
2002-11-01,grain,6716
|
|
||||||
2002-12-01,grain,7975
|
|
||||||
2003-01-01,grain,5326
|
|
||||||
2003-02-01,grain,5609
|
|
||||||
2003-03-01,grain,6414
|
|
||||||
2003-04-01,grain,6741
|
|
||||||
2003-05-01,grain,7144
|
|
||||||
2003-06-01,grain,7133
|
|
||||||
2003-07-01,grain,7568
|
|
||||||
2003-08-01,grain,7266
|
|
||||||
2003-09-01,grain,6634
|
|
||||||
2003-10-01,grain,7626
|
|
||||||
2003-11-01,grain,6843
|
|
||||||
2003-12-01,grain,8540
|
|
||||||
2004-01-01,grain,5629
|
|
||||||
2004-02-01,grain,5898
|
|
||||||
2004-03-01,grain,7045
|
|
||||||
2004-04-01,grain,7094
|
|
||||||
2004-05-01,grain,7333
|
|
||||||
2004-06-01,grain,7918
|
|
||||||
2004-07-01,grain,7289
|
|
||||||
2004-08-01,grain,7396
|
|
||||||
2004-09-01,grain,7259
|
|
||||||
2004-10-01,grain,7268
|
|
||||||
2004-11-01,grain,7731
|
|
||||||
2004-12-01,grain,9058
|
|
||||||
2005-01-01,grain,5557
|
|
||||||
2005-02-01,grain,6237
|
|
||||||
2005-03-01,grain,7723
|
|
||||||
2005-04-01,grain,7262
|
|
||||||
2005-05-01,grain,8241
|
|
||||||
2005-06-01,grain,8757
|
|
||||||
2005-07-01,grain,7352
|
|
||||||
2005-08-01,grain,8496
|
|
||||||
2005-09-01,grain,7741
|
|
||||||
2005-10-01,grain,7710
|
|
||||||
2005-11-01,grain,8247
|
|
||||||
2005-12-01,grain,8902
|
|
||||||
2006-01-01,grain,6066
|
|
||||||
2006-02-01,grain,6590
|
|
||||||
2006-03-01,grain,7923
|
|
||||||
2006-04-01,grain,7335
|
|
||||||
2006-05-01,grain,8843
|
|
||||||
2006-06-01,grain,9327
|
|
||||||
2006-07-01,grain,7792
|
|
||||||
2006-08-01,grain,9156
|
|
||||||
2006-09-01,grain,8037
|
|
||||||
2006-10-01,grain,8640
|
|
||||||
2006-11-01,grain,9128
|
|
||||||
2006-12-01,grain,9545
|
|
||||||
2007-01-01,grain,6627
|
|
||||||
2007-02-01,grain,6743
|
|
||||||
2007-03-01,grain,8195
|
|
||||||
2007-04-01,grain,7828
|
|
||||||
2007-05-01,grain,9570
|
|
||||||
2007-06-01,grain,9484
|
|
||||||
2007-07-01,grain,8608
|
|
||||||
2007-08-01,grain,9543
|
|
||||||
2007-09-01,grain,8123
|
|
||||||
2007-10-01,grain,9649
|
|
||||||
2007-11-01,grain,9390
|
|
||||||
2007-12-01,grain,10065
|
|
||||||
2008-01-01,grain,7093
|
|
||||||
2008-02-01,grain,7483
|
|
||||||
2008-03-01,grain,8365
|
|
||||||
2008-04-01,grain,8895
|
|
||||||
2008-05-01,grain,9794
|
|
||||||
2008-06-01,grain,9977
|
|
||||||
2008-07-01,grain,9553
|
|
||||||
2008-08-01,grain,9375
|
|
||||||
2008-09-01,grain,9225
|
|
||||||
2008-10-01,grain,9948
|
|
||||||
2008-11-01,grain,8758
|
|
||||||
2008-12-01,grain,10839
|
|
||||||
2009-01-01,grain,7266
|
|
||||||
2009-02-01,grain,7578
|
|
||||||
2009-03-01,grain,8688
|
|
||||||
2009-04-01,grain,9162
|
|
||||||
2009-05-01,grain,9369
|
|
||||||
2009-06-01,grain,10167
|
|
||||||
2009-07-01,grain,9507
|
|
||||||
2009-08-01,grain,8923
|
|
||||||
2009-09-01,grain,9272
|
|
||||||
2009-10-01,grain,9075
|
|
||||||
2009-11-01,grain,8949
|
|
||||||
2009-12-01,grain,10843
|
|
||||||
2010-01-01,grain,6558
|
|
||||||
2010-02-01,grain,7481
|
|
||||||
2010-03-01,grain,9475
|
|
||||||
2010-04-01,grain,9424
|
|
||||||
2010-05-01,grain,9351
|
|
||||||
2010-06-01,grain,10552
|
|
||||||
2010-07-01,grain,9077
|
|
||||||
2010-08-01,grain,9273
|
|
||||||
2010-09-01,grain,9420
|
|
||||||
2010-10-01,grain,9413
|
|
||||||
2010-11-01,grain,9866
|
|
||||||
2010-12-01,grain,11455
|
|
||||||
2011-01-01,grain,6901
|
|
||||||
2011-02-01,grain,8014
|
|
||||||
2011-03-01,grain,9832
|
|
||||||
2011-04-01,grain,9281
|
|
||||||
2011-05-01,grain,9967
|
|
||||||
2011-06-01,grain,11344
|
|
||||||
2011-07-01,grain,9106
|
|
||||||
2011-08-01,grain,10469
|
|
||||||
2011-09-01,grain,10085
|
|
||||||
2011-10-01,grain,9612
|
|
||||||
2011-11-01,grain,10328
|
|
||||||
2011-12-01,grain,11483
|
|
||||||
2012-01-01,grain,7486
|
|
||||||
2012-02-01,grain,8641
|
|
||||||
2012-03-01,grain,9709
|
|
||||||
2012-04-01,grain,9423
|
|
||||||
2012-05-01,grain,11342
|
|
||||||
2012-06-01,grain,11274
|
|
||||||
2012-07-01,grain,9845
|
|
||||||
2012-08-01,grain,11163
|
|
||||||
2012-09-01,grain,9532
|
|
||||||
2012-10-01,grain,10754
|
|
||||||
2012-11-01,grain,10953
|
|
||||||
2012-12-01,grain,11922
|
|
||||||
2013-01-01,grain,8395
|
|
||||||
2013-02-01,grain,8888
|
|
||||||
2013-03-01,grain,10110
|
|
||||||
2013-04-01,grain,10493
|
|
||||||
2013-05-01,grain,12218
|
|
||||||
2013-06-01,grain,11385
|
|
||||||
2013-07-01,grain,11186
|
|
||||||
2013-08-01,grain,11462
|
|
||||||
2013-09-01,grain,10494
|
|
||||||
2013-10-01,grain,11540
|
|
||||||
2013-11-01,grain,11138
|
|
||||||
2013-12-01,grain,12709
|
|
||||||
2014-01-01,grain,8557
|
|
||||||
2014-02-01,grain,9059
|
|
||||||
2014-03-01,grain,10055
|
|
||||||
2014-04-01,grain,10977
|
|
||||||
2014-05-01,grain,11792
|
|
||||||
2014-06-01,grain,11904
|
|
||||||
2014-07-01,grain,10965
|
|
||||||
2014-08-01,grain,10981
|
|
||||||
2014-09-01,grain,10828
|
|
||||||
2014-10-01,grain,11817
|
|
||||||
2014-11-01,grain,10470
|
|
||||||
2014-12-01,grain,13310
|
|
||||||
2015-01-01,grain,8400
|
|
||||||
2015-02-01,grain,9062
|
|
||||||
2015-03-01,grain,10722
|
|
||||||
2015-04-01,grain,11107
|
|
||||||
2015-05-01,grain,11508
|
|
||||||
2015-06-01,grain,12904
|
|
||||||
2015-07-01,grain,11869
|
|
||||||
2015-08-01,grain,11224
|
|
||||||
2015-09-01,grain,12022
|
|
||||||
2015-10-01,grain,11983
|
|
||||||
2015-11-01,grain,11506
|
|
||||||
2015-12-01,grain,14183
|
|
||||||
2016-01-01,grain,8650
|
|
||||||
2016-02-01,grain,10323
|
|
||||||
2016-03-01,grain,12110
|
|
||||||
2016-04-01,grain,11424
|
|
||||||
2016-05-01,grain,12243
|
|
||||||
2016-06-01,grain,13686
|
|
||||||
2016-07-01,grain,10956
|
|
||||||
2016-08-01,grain,12706
|
|
||||||
2016-09-01,grain,12279
|
|
||||||
2016-10-01,grain,11914
|
|
||||||
2016-11-01,grain,13025
|
|
||||||
2016-12-01,grain,14431
|
|
||||||
|
@@ -1,4 +0,0 @@
|
|||||||
name: auto-ml-forecasting-beer-remote
|
|
||||||
dependencies:
|
|
||||||
- pip:
|
|
||||||
- azureml-sdk
|
|
||||||
@@ -64,22 +64,23 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"import azureml.core\n",
|
"import json\n",
|
||||||
"import pandas as pd\n",
|
|
||||||
"import numpy as np\n",
|
|
||||||
"import logging\n",
|
"import logging\n",
|
||||||
"\n",
|
|
||||||
"from azureml.core import Workspace, Experiment, Dataset\n",
|
|
||||||
"from azureml.train.automl import AutoMLConfig\n",
|
|
||||||
"from datetime import datetime\n",
|
"from datetime import datetime\n",
|
||||||
"from azureml.automl.core.featurization import FeaturizationConfig"
|
"\n",
|
||||||
|
"import azureml.core\n",
|
||||||
|
"import numpy as np\n",
|
||||||
|
"import pandas as pd\n",
|
||||||
|
"from azureml.automl.core.featurization import FeaturizationConfig\n",
|
||||||
|
"from azureml.core import Dataset, Experiment, Workspace\n",
|
||||||
|
"from azureml.train.automl import AutoMLConfig"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
"This notebook is compatible with Azure ML SDK version 1.35.0 or later."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -88,7 +89,6 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
|
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -119,7 +119,8 @@
|
|||||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||||
"output[\"Location\"] = ws.location\n",
|
"output[\"Location\"] = ws.location\n",
|
||||||
"output[\"Run History Name\"] = experiment_name\n",
|
"output[\"Run History Name\"] = experiment_name\n",
|
||||||
"pd.set_option(\"display.max_colwidth\", -1)\n",
|
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||||
|
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||||
"outputDf.T"
|
"outputDf.T"
|
||||||
]
|
]
|
||||||
@@ -398,8 +399,8 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"### Retrieve the Best Model\n",
|
"### Retrieve the Best Run details\n",
|
||||||
"Below we select the best model from all the training iterations using get_output method."
|
"Below we retrieve the best Run object from among all the runs in the experiment."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -408,8 +409,8 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"best_run, fitted_model = remote_run.get_output()\n",
|
"best_run = remote_run.get_best_child()\n",
|
||||||
"fitted_model.steps"
|
"best_run"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -418,7 +419,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"## Featurization\n",
|
"## Featurization\n",
|
||||||
"\n",
|
"\n",
|
||||||
"You can access the engineered feature names generated in time-series featurization. Note that a number of named holiday periods are represented. We recommend that you have at least one year of data when using this feature to ensure that all yearly holidays are captured in the training featurization."
|
"We can look at the engineered feature names generated in time-series featurization via. the JSON file named 'engineered_feature_names.json' under the run outputs. Note that a number of named holiday periods are represented. We recommend that you have at least one year of data when using this feature to ensure that all yearly holidays are captured in the training featurization."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -427,7 +428,14 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"fitted_model.named_steps[\"timeseriestransformer\"].get_engineered_feature_names()"
|
"# Download the JSON file locally\n",
|
||||||
|
"best_run.download_file(\n",
|
||||||
|
" \"outputs/engineered_feature_names.json\", \"engineered_feature_names.json\"\n",
|
||||||
|
")\n",
|
||||||
|
"with open(\"engineered_feature_names.json\", \"r\") as f:\n",
|
||||||
|
" records = json.load(f)\n",
|
||||||
|
"\n",
|
||||||
|
"records"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -451,12 +459,26 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Get the featurization summary as a list of JSON\n",
|
"# Download the featurization summary JSON file locally\n",
|
||||||
"featurization_summary = fitted_model.named_steps[\n",
|
"best_run.download_file(\n",
|
||||||
" \"timeseriestransformer\"\n",
|
" \"outputs/featurization_summary.json\", \"featurization_summary.json\"\n",
|
||||||
"].get_featurization_summary()\n",
|
")\n",
|
||||||
"# View the featurization summary as a pandas dataframe\n",
|
"\n",
|
||||||
"pd.DataFrame.from_records(featurization_summary)"
|
"# Render the JSON as a pandas DataFrame\n",
|
||||||
|
"with open(\"featurization_summary.json\", \"r\") as f:\n",
|
||||||
|
" records = json.load(f)\n",
|
||||||
|
"fs = pd.DataFrame.from_records(records)\n",
|
||||||
|
"\n",
|
||||||
|
"# View a summary of the featurization\n",
|
||||||
|
"fs[\n",
|
||||||
|
" [\n",
|
||||||
|
" \"RawFeatureName\",\n",
|
||||||
|
" \"TypeDetected\",\n",
|
||||||
|
" \"Dropped\",\n",
|
||||||
|
" \"EngineeredFeatureCount\",\n",
|
||||||
|
" \"Transformations\",\n",
|
||||||
|
" ]\n",
|
||||||
|
"]"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -68,6 +68,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
|
"import json\n",
|
||||||
"import logging\n",
|
"import logging\n",
|
||||||
"\n",
|
"\n",
|
||||||
"from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score\n",
|
"from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score\n",
|
||||||
@@ -90,7 +91,7 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
"This notebook is compatible with Azure ML SDK version 1.35.0 or later."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -99,7 +100,6 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
|
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -132,7 +132,8 @@
|
|||||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||||
"output[\"Location\"] = ws.location\n",
|
"output[\"Location\"] = ws.location\n",
|
||||||
"output[\"Run History Name\"] = experiment_name\n",
|
"output[\"Run History Name\"] = experiment_name\n",
|
||||||
"pd.set_option(\"display.max_colwidth\", -1)\n",
|
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||||
|
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||||
"outputDf.T"
|
"outputDf.T"
|
||||||
]
|
]
|
||||||
@@ -398,8 +399,8 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"## Retrieve the Best Model\n",
|
"## Retrieve the Best Run details\n",
|
||||||
"Below we select the best model from all the training iterations using get_output method."
|
"Below we retrieve the best Run object from among all the runs in the experiment."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -408,8 +409,8 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"best_run, fitted_model = remote_run.get_output()\n",
|
"best_run = remote_run.get_best_child()\n",
|
||||||
"fitted_model.steps"
|
"best_run"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -417,7 +418,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"## Featurization\n",
|
"## Featurization\n",
|
||||||
"You can access the engineered feature names generated in time-series featurization."
|
"We can look at the engineered feature names generated in time-series featurization via. the JSON file named 'engineered_feature_names.json' under the run outputs."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -426,7 +427,14 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"fitted_model.named_steps[\"timeseriestransformer\"].get_engineered_feature_names()"
|
"# Download the JSON file locally\n",
|
||||||
|
"best_run.download_file(\n",
|
||||||
|
" \"outputs/engineered_feature_names.json\", \"engineered_feature_names.json\"\n",
|
||||||
|
")\n",
|
||||||
|
"with open(\"engineered_feature_names.json\", \"r\") as f:\n",
|
||||||
|
" records = json.load(f)\n",
|
||||||
|
"\n",
|
||||||
|
"records"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -449,12 +457,26 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Get the featurization summary as a list of JSON\n",
|
"# Download the featurization summary JSON file locally\n",
|
||||||
"featurization_summary = fitted_model.named_steps[\n",
|
"best_run.download_file(\n",
|
||||||
" \"timeseriestransformer\"\n",
|
" \"outputs/featurization_summary.json\", \"featurization_summary.json\"\n",
|
||||||
"].get_featurization_summary()\n",
|
")\n",
|
||||||
"# View the featurization summary as a pandas dataframe\n",
|
"\n",
|
||||||
"pd.DataFrame.from_records(featurization_summary)"
|
"# Render the JSON as a pandas DataFrame\n",
|
||||||
|
"with open(\"featurization_summary.json\", \"r\") as f:\n",
|
||||||
|
" records = json.load(f)\n",
|
||||||
|
"fs = pd.DataFrame.from_records(records)\n",
|
||||||
|
"\n",
|
||||||
|
"# View a summary of the featurization\n",
|
||||||
|
"fs[\n",
|
||||||
|
" [\n",
|
||||||
|
" \"RawFeatureName\",\n",
|
||||||
|
" \"TypeDetected\",\n",
|
||||||
|
" \"Dropped\",\n",
|
||||||
|
" \"EngineeredFeatureCount\",\n",
|
||||||
|
" \"Transformations\",\n",
|
||||||
|
" ]\n",
|
||||||
|
"]"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -481,7 +503,7 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"### Retreiving forecasts from the model\n",
|
"### Retrieving forecasts from the model\n",
|
||||||
"We have created a function called `run_forecast` that submits the test data to the best model determined during the training run and retrieves forecasts. This function uses a helper script `forecasting_script` which is uploaded and expecuted on the remote compute."
|
"We have created a function called `run_forecast` that submits the test data to the best model determined during the training run and retrieves forecasts. This function uses a helper script `forecasting_script` which is uploaded and expecuted on the remote compute."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -641,7 +663,7 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"### Retrieve the Best Model"
|
"### Retrieve the Best Run details"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -650,7 +672,8 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"best_run_lags, fitted_model_lags = advanced_remote_run.get_output()"
|
"best_run_lags = remote_run.get_best_child()\n",
|
||||||
|
"best_run_lags"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -85,7 +85,7 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
"This notebook is compatible with Azure ML SDK version 1.35.0 or later."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -94,7 +94,6 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
|
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -122,7 +121,8 @@
|
|||||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||||
"output[\"Location\"] = ws.location\n",
|
"output[\"Location\"] = ws.location\n",
|
||||||
"output[\"Run History Name\"] = experiment_name\n",
|
"output[\"Run History Name\"] = experiment_name\n",
|
||||||
"pd.set_option(\"display.max_colwidth\", -1)\n",
|
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||||
|
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||||
"outputDf.T"
|
"outputDf.T"
|
||||||
]
|
]
|
||||||
@@ -647,13 +647,11 @@
|
|||||||
" & (fulldata[time_column_name] <= forecast_origin + horizon)\n",
|
" & (fulldata[time_column_name] <= forecast_origin + horizon)\n",
|
||||||
" ]\n",
|
" ]\n",
|
||||||
"\n",
|
"\n",
|
||||||
" y_past = X_past.pop(target_column_name).values.astype(np.float)\n",
|
" y_past = X_past.pop(target_column_name).values.astype(float)\n",
|
||||||
" y_future = X_future.pop(target_column_name).values.astype(np.float)\n",
|
" y_future = X_future.pop(target_column_name).values.astype(float)\n",
|
||||||
"\n",
|
"\n",
|
||||||
" # Now take y_future and turn it into question marks\n",
|
" # Now take y_future and turn it into question marks\n",
|
||||||
" y_query = y_future.copy().astype(\n",
|
" y_query = y_future.copy().astype(float) # because sometimes life hands you an int\n",
|
||||||
" np.float\n",
|
|
||||||
" ) # because sometimes life hands you an int\n",
|
|
||||||
" y_query.fill(np.NaN)\n",
|
" y_query.fill(np.NaN)\n",
|
||||||
"\n",
|
"\n",
|
||||||
" print(\"X_past is \" + str(X_past.shape) + \" - shaped\")\n",
|
" print(\"X_past is \" + str(X_past.shape) + \" - shaped\")\n",
|
||||||
|
|||||||
@@ -30,7 +30,7 @@
|
|||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"# Automated Machine Learning\n",
|
"# Automated Machine Learning\n",
|
||||||
"**Beer Production Forecasting**\n",
|
"**Github DAU Forecasting**\n",
|
||||||
"\n",
|
"\n",
|
||||||
"## Contents\n",
|
"## Contents\n",
|
||||||
"1. [Introduction](#Introduction)\n",
|
"1. [Introduction](#Introduction)\n",
|
||||||
@@ -48,7 +48,7 @@
|
|||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"## Introduction\n",
|
"## Introduction\n",
|
||||||
"This notebook demonstrates demand forecasting for Beer Production Dataset using AutoML.\n",
|
"This notebook demonstrates demand forecasting for Github Daily Active Users Dataset using AutoML.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"AutoML highlights here include using Deep Learning forecasts, Arima, Prophet, Remote Execution and Remote Inferencing, and working with the `forecast` function. Please also look at the additional forecasting notebooks, which document lagging, rolling windows, forecast quantiles, other ways to use the forecast function, and forecaster deployment.\n",
|
"AutoML highlights here include using Deep Learning forecasts, Arima, Prophet, Remote Execution and Remote Inferencing, and working with the `forecast` function. Please also look at the additional forecasting notebooks, which document lagging, rolling windows, forecast quantiles, other ways to use the forecast function, and forecaster deployment.\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -57,7 +57,7 @@
|
|||||||
"Notebook synopsis:\n",
|
"Notebook synopsis:\n",
|
||||||
"\n",
|
"\n",
|
||||||
"1. Creating an Experiment in an existing Workspace\n",
|
"1. Creating an Experiment in an existing Workspace\n",
|
||||||
"2. Configuration and remote run of AutoML for a time-series model exploring Regression learners, Arima, Prophet and DNNs\n",
|
"2. Configuration and remote run of AutoML for a time-series model exploring DNNs\n",
|
||||||
"4. Evaluating the fitted model using a rolling test "
|
"4. Evaluating the fitted model using a rolling test "
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -92,8 +92,7 @@
|
|||||||
"# Squash warning messages for cleaner output in the notebook\n",
|
"# Squash warning messages for cleaner output in the notebook\n",
|
||||||
"warnings.showwarning = lambda *args, **kwargs: None\n",
|
"warnings.showwarning = lambda *args, **kwargs: None\n",
|
||||||
"\n",
|
"\n",
|
||||||
"from azureml.core.workspace import Workspace\n",
|
"from azureml.core import Workspace, Experiment, Dataset\n",
|
||||||
"from azureml.core.experiment import Experiment\n",
|
|
||||||
"from azureml.train.automl import AutoMLConfig\n",
|
"from azureml.train.automl import AutoMLConfig\n",
|
||||||
"from matplotlib import pyplot as plt\n",
|
"from matplotlib import pyplot as plt\n",
|
||||||
"from sklearn.metrics import mean_absolute_error, mean_squared_error\n",
|
"from sklearn.metrics import mean_absolute_error, mean_squared_error\n",
|
||||||
@@ -104,7 +103,7 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
"This notebook is compatible with Azure ML SDK version 1.35.0 or later."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -113,7 +112,6 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
|
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -139,7 +137,7 @@
|
|||||||
"ws = Workspace.from_config()\n",
|
"ws = Workspace.from_config()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# choose a name for the run history container in the workspace\n",
|
"# choose a name for the run history container in the workspace\n",
|
||||||
"experiment_name = \"beer-remote-cpu\"\n",
|
"experiment_name = \"github-remote-cpu\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"experiment = Experiment(ws, experiment_name)\n",
|
"experiment = Experiment(ws, experiment_name)\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -149,7 +147,8 @@
|
|||||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||||
"output[\"Location\"] = ws.location\n",
|
"output[\"Location\"] = ws.location\n",
|
||||||
"output[\"Run History Name\"] = experiment_name\n",
|
"output[\"Run History Name\"] = experiment_name\n",
|
||||||
"pd.set_option(\"display.max_colwidth\", -1)\n",
|
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||||
|
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||||
"outputDf.T"
|
"outputDf.T"
|
||||||
]
|
]
|
||||||
@@ -180,7 +179,7 @@
|
|||||||
"from azureml.core.compute_target import ComputeTargetException\n",
|
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Choose a name for your CPU cluster\n",
|
"# Choose a name for your CPU cluster\n",
|
||||||
"cpu_cluster_name = \"beer-cluster\"\n",
|
"cpu_cluster_name = \"github-cluster\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Verify that cluster does not exist already\n",
|
"# Verify that cluster does not exist already\n",
|
||||||
"try:\n",
|
"try:\n",
|
||||||
@@ -203,7 +202,7 @@
|
|||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"## Data\n",
|
"## Data\n",
|
||||||
"Read Beer demand data from file, and preview data."
|
"Read Github DAU data from file, and preview data."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -246,21 +245,19 @@
|
|||||||
"plt.tight_layout()\n",
|
"plt.tight_layout()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"plt.subplot(2, 1, 1)\n",
|
"plt.subplot(2, 1, 1)\n",
|
||||||
"plt.title(\"Beer Production By Year\")\n",
|
"plt.title(\"Github Daily Active User By Year\")\n",
|
||||||
"df = pd.read_csv(\n",
|
"df = pd.read_csv(\"github_dau_2011-2018_train.csv\", parse_dates=True, index_col=\"date\")\n",
|
||||||
" \"Beer_no_valid_split_train.csv\", parse_dates=True, index_col=\"DATE\"\n",
|
|
||||||
").drop(columns=\"grain\")\n",
|
|
||||||
"test_df = pd.read_csv(\n",
|
"test_df = pd.read_csv(\n",
|
||||||
" \"Beer_no_valid_split_test.csv\", parse_dates=True, index_col=\"DATE\"\n",
|
" \"github_dau_2011-2018_test.csv\", parse_dates=True, index_col=\"date\"\n",
|
||||||
").drop(columns=\"grain\")\n",
|
")\n",
|
||||||
"plt.plot(df)\n",
|
"plt.plot(df)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"plt.subplot(2, 1, 2)\n",
|
"plt.subplot(2, 1, 2)\n",
|
||||||
"plt.title(\"Beer Production By Month\")\n",
|
"plt.title(\"Github Daily Active User By Month\")\n",
|
||||||
"groups = df.groupby(df.index.month)\n",
|
"groups = df.groupby(df.index.month)\n",
|
||||||
"months = concat([DataFrame(x[1].values) for x in groups], axis=1)\n",
|
"months = concat([DataFrame(x[1].values) for x in groups], axis=1)\n",
|
||||||
"months = DataFrame(months)\n",
|
"months = DataFrame(months)\n",
|
||||||
"months.columns = range(1, 13)\n",
|
"months.columns = range(1, 49)\n",
|
||||||
"months.boxplot()\n",
|
"months.boxplot()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"plt.show()"
|
"plt.show()"
|
||||||
@@ -275,10 +272,10 @@
|
|||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"target_column_name = \"BeerProduction\"\n",
|
"target_column_name = \"count\"\n",
|
||||||
"time_column_name = \"DATE\"\n",
|
"time_column_name = \"date\"\n",
|
||||||
"time_series_id_column_names = []\n",
|
"time_series_id_column_names = []\n",
|
||||||
"freq = \"M\" # Monthly data"
|
"freq = \"D\" # Daily data"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -301,40 +298,21 @@
|
|||||||
"from helper import split_full_for_forecasting\n",
|
"from helper import split_full_for_forecasting\n",
|
||||||
"\n",
|
"\n",
|
||||||
"train, valid = split_full_for_forecasting(df, time_column_name)\n",
|
"train, valid = split_full_for_forecasting(df, time_column_name)\n",
|
||||||
"train.to_csv(\"train.csv\")\n",
|
"\n",
|
||||||
"valid.to_csv(\"valid.csv\")\n",
|
"# Reset index to create a Tabualr Dataset.\n",
|
||||||
"test_df.to_csv(\"test.csv\")\n",
|
"train.reset_index(inplace=True)\n",
|
||||||
|
"valid.reset_index(inplace=True)\n",
|
||||||
|
"test_df.reset_index(inplace=True)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"datastore = ws.get_default_datastore()\n",
|
"datastore = ws.get_default_datastore()\n",
|
||||||
"datastore.upload_files(\n",
|
"train_dataset = Dataset.Tabular.register_pandas_dataframe(\n",
|
||||||
" files=[\"./train.csv\"],\n",
|
" train, target=(datastore, \"dataset/\"), name=\"Github_DAU_train\"\n",
|
||||||
" target_path=\"beer-dataset/tabular/\",\n",
|
|
||||||
" overwrite=True,\n",
|
|
||||||
" show_progress=True,\n",
|
|
||||||
")\n",
|
")\n",
|
||||||
"datastore.upload_files(\n",
|
"valid_dataset = Dataset.Tabular.register_pandas_dataframe(\n",
|
||||||
" files=[\"./valid.csv\"],\n",
|
" valid, target=(datastore, \"dataset/\"), name=\"Github_DAU_valid\"\n",
|
||||||
" target_path=\"beer-dataset/tabular/\",\n",
|
|
||||||
" overwrite=True,\n",
|
|
||||||
" show_progress=True,\n",
|
|
||||||
")\n",
|
")\n",
|
||||||
"datastore.upload_files(\n",
|
"test_dataset = Dataset.Tabular.register_pandas_dataframe(\n",
|
||||||
" files=[\"./test.csv\"],\n",
|
" test_df, target=(datastore, \"dataset/\"), name=\"Github_DAU_test\"\n",
|
||||||
" target_path=\"beer-dataset/tabular/\",\n",
|
|
||||||
" overwrite=True,\n",
|
|
||||||
" show_progress=True,\n",
|
|
||||||
")\n",
|
|
||||||
"\n",
|
|
||||||
"from azureml.core import Dataset\n",
|
|
||||||
"\n",
|
|
||||||
"train_dataset = Dataset.Tabular.from_delimited_files(\n",
|
|
||||||
" path=[(datastore, \"beer-dataset/tabular/train.csv\")]\n",
|
|
||||||
")\n",
|
|
||||||
"valid_dataset = Dataset.Tabular.from_delimited_files(\n",
|
|
||||||
" path=[(datastore, \"beer-dataset/tabular/valid.csv\")]\n",
|
|
||||||
")\n",
|
|
||||||
"test_dataset = Dataset.Tabular.from_delimited_files(\n",
|
|
||||||
" path=[(datastore, \"beer-dataset/tabular/test.csv\")]\n",
|
|
||||||
")"
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -397,10 +375,10 @@
|
|||||||
"forecasting_parameters = ForecastingParameters(\n",
|
"forecasting_parameters = ForecastingParameters(\n",
|
||||||
" time_column_name=time_column_name,\n",
|
" time_column_name=time_column_name,\n",
|
||||||
" forecast_horizon=forecast_horizon,\n",
|
" forecast_horizon=forecast_horizon,\n",
|
||||||
" freq=\"MS\", # Set the forecast frequency to be monthly (start of the month)\n",
|
" freq=\"D\", # Set the forecast frequency to be daily\n",
|
||||||
")\n",
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# We will disable the enable_early_stopping flag to ensure the DNN model is recommended for demonstration purpose.\n",
|
"# To only allow the TCNForecaster we set the allowed_models parameter to reflect this.\n",
|
||||||
"automl_config = AutoMLConfig(\n",
|
"automl_config = AutoMLConfig(\n",
|
||||||
" task=\"forecasting\",\n",
|
" task=\"forecasting\",\n",
|
||||||
" primary_metric=\"normalized_root_mean_squared_error\",\n",
|
" primary_metric=\"normalized_root_mean_squared_error\",\n",
|
||||||
@@ -413,7 +391,7 @@
|
|||||||
" max_concurrent_iterations=4,\n",
|
" max_concurrent_iterations=4,\n",
|
||||||
" max_cores_per_iteration=-1,\n",
|
" max_cores_per_iteration=-1,\n",
|
||||||
" enable_dnn=True,\n",
|
" enable_dnn=True,\n",
|
||||||
" enable_early_stopping=False,\n",
|
" allowed_models=[\"TCNForecaster\"],\n",
|
||||||
" forecasting_parameters=forecasting_parameters,\n",
|
" forecasting_parameters=forecasting_parameters,\n",
|
||||||
")"
|
")"
|
||||||
]
|
]
|
||||||
@@ -506,7 +484,9 @@
|
|||||||
"if not forecast_model in summary_df[\"run_id\"]:\n",
|
"if not forecast_model in summary_df[\"run_id\"]:\n",
|
||||||
" forecast_model = \"ForecastTCN\"\n",
|
" forecast_model = \"ForecastTCN\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"best_dnn_run_id = summary_df[\"run_id\"][forecast_model]\n",
|
"best_dnn_run_id = summary_df[summary_df[\"Score\"] == summary_df[\"Score\"].min()][\n",
|
||||||
|
" \"run_id\"\n",
|
||||||
|
"][forecast_model]\n",
|
||||||
"best_dnn_run = Run(experiment, best_dnn_run_id)"
|
"best_dnn_run = Run(experiment, best_dnn_run_id)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -567,11 +547,6 @@
|
|||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from azureml.core import Dataset\n",
|
|
||||||
"\n",
|
|
||||||
"test_dataset = Dataset.Tabular.from_delimited_files(\n",
|
|
||||||
" path=[(datastore, \"beer-dataset/tabular/test.csv\")]\n",
|
|
||||||
")\n",
|
|
||||||
"# preview the first 3 rows of the dataset\n",
|
"# preview the first 3 rows of the dataset\n",
|
||||||
"test_dataset.take(5).to_pandas_dataframe()"
|
"test_dataset.take(5).to_pandas_dataframe()"
|
||||||
]
|
]
|
||||||
@@ -582,7 +557,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"compute_target = ws.compute_targets[\"beer-cluster\"]\n",
|
"compute_target = ws.compute_targets[\"github-cluster\"]\n",
|
||||||
"test_experiment = Experiment(ws, experiment_name + \"_test\")"
|
"test_experiment = Experiment(ws, experiment_name + \"_test\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: auto-ml-forecasting-github-dau
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
@@ -0,0 +1,455 @@
|
|||||||
|
date,count,day_of_week,month_of_year,holiday
|
||||||
|
2017-06-04,104663,6.0,5.0,0.0
|
||||||
|
2017-06-05,155824,0.0,5.0,0.0
|
||||||
|
2017-06-06,164908,1.0,5.0,0.0
|
||||||
|
2017-06-07,170309,2.0,5.0,0.0
|
||||||
|
2017-06-08,164256,3.0,5.0,0.0
|
||||||
|
2017-06-09,153406,4.0,5.0,0.0
|
||||||
|
2017-06-10,97024,5.0,5.0,0.0
|
||||||
|
2017-06-11,103442,6.0,5.0,0.0
|
||||||
|
2017-06-12,160768,0.0,5.0,0.0
|
||||||
|
2017-06-13,166288,1.0,5.0,0.0
|
||||||
|
2017-06-14,163819,2.0,5.0,0.0
|
||||||
|
2017-06-15,157593,3.0,5.0,0.0
|
||||||
|
2017-06-16,149259,4.0,5.0,0.0
|
||||||
|
2017-06-17,95579,5.0,5.0,0.0
|
||||||
|
2017-06-18,98723,6.0,5.0,0.0
|
||||||
|
2017-06-19,159076,0.0,5.0,0.0
|
||||||
|
2017-06-20,163340,1.0,5.0,0.0
|
||||||
|
2017-06-21,163344,2.0,5.0,0.0
|
||||||
|
2017-06-22,159528,3.0,5.0,0.0
|
||||||
|
2017-06-23,146563,4.0,5.0,0.0
|
||||||
|
2017-06-24,92631,5.0,5.0,0.0
|
||||||
|
2017-06-25,96549,6.0,5.0,0.0
|
||||||
|
2017-06-26,153249,0.0,5.0,0.0
|
||||||
|
2017-06-27,160357,1.0,5.0,0.0
|
||||||
|
2017-06-28,159941,2.0,5.0,0.0
|
||||||
|
2017-06-29,156781,3.0,5.0,0.0
|
||||||
|
2017-06-30,144709,4.0,5.0,0.0
|
||||||
|
2017-07-01,89101,5.0,6.0,0.0
|
||||||
|
2017-07-02,93046,6.0,6.0,0.0
|
||||||
|
2017-07-03,144113,0.0,6.0,0.0
|
||||||
|
2017-07-04,143061,1.0,6.0,1.0
|
||||||
|
2017-07-05,154603,2.0,6.0,0.0
|
||||||
|
2017-07-06,157200,3.0,6.0,0.0
|
||||||
|
2017-07-07,147213,4.0,6.0,0.0
|
||||||
|
2017-07-08,92348,5.0,6.0,0.0
|
||||||
|
2017-07-09,97018,6.0,6.0,0.0
|
||||||
|
2017-07-10,157192,0.0,6.0,0.0
|
||||||
|
2017-07-11,161819,1.0,6.0,0.0
|
||||||
|
2017-07-12,161998,2.0,6.0,0.0
|
||||||
|
2017-07-13,160280,3.0,6.0,0.0
|
||||||
|
2017-07-14,146818,4.0,6.0,0.0
|
||||||
|
2017-07-15,93041,5.0,6.0,0.0
|
||||||
|
2017-07-16,97505,6.0,6.0,0.0
|
||||||
|
2017-07-17,156167,0.0,6.0,0.0
|
||||||
|
2017-07-18,162855,1.0,6.0,0.0
|
||||||
|
2017-07-19,162519,2.0,6.0,0.0
|
||||||
|
2017-07-20,159941,3.0,6.0,0.0
|
||||||
|
2017-07-21,148460,4.0,6.0,0.0
|
||||||
|
2017-07-22,93431,5.0,6.0,0.0
|
||||||
|
2017-07-23,98553,6.0,6.0,0.0
|
||||||
|
2017-07-24,156202,0.0,6.0,0.0
|
||||||
|
2017-07-25,162503,1.0,6.0,0.0
|
||||||
|
2017-07-26,158479,2.0,6.0,0.0
|
||||||
|
2017-07-27,158192,3.0,6.0,0.0
|
||||||
|
2017-07-28,147108,4.0,6.0,0.0
|
||||||
|
2017-07-29,93799,5.0,6.0,0.0
|
||||||
|
2017-07-30,97920,6.0,6.0,0.0
|
||||||
|
2017-07-31,152197,0.0,6.0,0.0
|
||||||
|
2017-08-01,158477,1.0,7.0,0.0
|
||||||
|
2017-08-02,159089,2.0,7.0,0.0
|
||||||
|
2017-08-03,157182,3.0,7.0,0.0
|
||||||
|
2017-08-04,146345,4.0,7.0,0.0
|
||||||
|
2017-08-05,92534,5.0,7.0,0.0
|
||||||
|
2017-08-06,97128,6.0,7.0,0.0
|
||||||
|
2017-08-07,151359,0.0,7.0,0.0
|
||||||
|
2017-08-08,159895,1.0,7.0,0.0
|
||||||
|
2017-08-09,158329,2.0,7.0,0.0
|
||||||
|
2017-08-10,155468,3.0,7.0,0.0
|
||||||
|
2017-08-11,144914,4.0,7.0,0.0
|
||||||
|
2017-08-12,92258,5.0,7.0,0.0
|
||||||
|
2017-08-13,95933,6.0,7.0,0.0
|
||||||
|
2017-08-14,147706,0.0,7.0,0.0
|
||||||
|
2017-08-15,151115,1.0,7.0,0.0
|
||||||
|
2017-08-16,157640,2.0,7.0,0.0
|
||||||
|
2017-08-17,156600,3.0,7.0,0.0
|
||||||
|
2017-08-18,146980,4.0,7.0,0.0
|
||||||
|
2017-08-19,94592,5.0,7.0,0.0
|
||||||
|
2017-08-20,99320,6.0,7.0,0.0
|
||||||
|
2017-08-21,145727,0.0,7.0,0.0
|
||||||
|
2017-08-22,160260,1.0,7.0,0.0
|
||||||
|
2017-08-23,160440,2.0,7.0,0.0
|
||||||
|
2017-08-24,157830,3.0,7.0,0.0
|
||||||
|
2017-08-25,145822,4.0,7.0,0.0
|
||||||
|
2017-08-26,94706,5.0,7.0,0.0
|
||||||
|
2017-08-27,99047,6.0,7.0,0.0
|
||||||
|
2017-08-28,152112,0.0,7.0,0.0
|
||||||
|
2017-08-29,162440,1.0,7.0,0.0
|
||||||
|
2017-08-30,162902,2.0,7.0,0.0
|
||||||
|
2017-08-31,159498,3.0,7.0,0.0
|
||||||
|
2017-09-01,145689,4.0,8.0,0.0
|
||||||
|
2017-09-02,93589,5.0,8.0,0.0
|
||||||
|
2017-09-03,100058,6.0,8.0,0.0
|
||||||
|
2017-09-04,140865,0.0,8.0,1.0
|
||||||
|
2017-09-05,165715,1.0,8.0,0.0
|
||||||
|
2017-09-06,167463,2.0,8.0,0.0
|
||||||
|
2017-09-07,164811,3.0,8.0,0.0
|
||||||
|
2017-09-08,156157,4.0,8.0,0.0
|
||||||
|
2017-09-09,101358,5.0,8.0,0.0
|
||||||
|
2017-09-10,107915,6.0,8.0,0.0
|
||||||
|
2017-09-11,167845,0.0,8.0,0.0
|
||||||
|
2017-09-12,172756,1.0,8.0,0.0
|
||||||
|
2017-09-13,172851,2.0,8.0,0.0
|
||||||
|
2017-09-14,171675,3.0,8.0,0.0
|
||||||
|
2017-09-15,159266,4.0,8.0,0.0
|
||||||
|
2017-09-16,103547,5.0,8.0,0.0
|
||||||
|
2017-09-17,110964,6.0,8.0,0.0
|
||||||
|
2017-09-18,170976,0.0,8.0,0.0
|
||||||
|
2017-09-19,177864,1.0,8.0,0.0
|
||||||
|
2017-09-20,173567,2.0,8.0,0.0
|
||||||
|
2017-09-21,172017,3.0,8.0,0.0
|
||||||
|
2017-09-22,161357,4.0,8.0,0.0
|
||||||
|
2017-09-23,104681,5.0,8.0,0.0
|
||||||
|
2017-09-24,111711,6.0,8.0,0.0
|
||||||
|
2017-09-25,173517,0.0,8.0,0.0
|
||||||
|
2017-09-26,180049,1.0,8.0,0.0
|
||||||
|
2017-09-27,178307,2.0,8.0,0.0
|
||||||
|
2017-09-28,174157,3.0,8.0,0.0
|
||||||
|
2017-09-29,161707,4.0,8.0,0.0
|
||||||
|
2017-09-30,110536,5.0,8.0,0.0
|
||||||
|
2017-10-01,106505,6.0,9.0,0.0
|
||||||
|
2017-10-02,157565,0.0,9.0,0.0
|
||||||
|
2017-10-03,164764,1.0,9.0,0.0
|
||||||
|
2017-10-04,163383,2.0,9.0,0.0
|
||||||
|
2017-10-05,162847,3.0,9.0,0.0
|
||||||
|
2017-10-06,153575,4.0,9.0,0.0
|
||||||
|
2017-10-07,107472,5.0,9.0,0.0
|
||||||
|
2017-10-08,116127,6.0,9.0,0.0
|
||||||
|
2017-10-09,174457,0.0,9.0,1.0
|
||||||
|
2017-10-10,185217,1.0,9.0,0.0
|
||||||
|
2017-10-11,185120,2.0,9.0,0.0
|
||||||
|
2017-10-12,180844,3.0,9.0,0.0
|
||||||
|
2017-10-13,170178,4.0,9.0,0.0
|
||||||
|
2017-10-14,112754,5.0,9.0,0.0
|
||||||
|
2017-10-15,121251,6.0,9.0,0.0
|
||||||
|
2017-10-16,183906,0.0,9.0,0.0
|
||||||
|
2017-10-17,188945,1.0,9.0,0.0
|
||||||
|
2017-10-18,187297,2.0,9.0,0.0
|
||||||
|
2017-10-19,183867,3.0,9.0,0.0
|
||||||
|
2017-10-20,173021,4.0,9.0,0.0
|
||||||
|
2017-10-21,115851,5.0,9.0,0.0
|
||||||
|
2017-10-22,126088,6.0,9.0,0.0
|
||||||
|
2017-10-23,189452,0.0,9.0,0.0
|
||||||
|
2017-10-24,194412,1.0,9.0,0.0
|
||||||
|
2017-10-25,192293,2.0,9.0,0.0
|
||||||
|
2017-10-26,190163,3.0,9.0,0.0
|
||||||
|
2017-10-27,177053,4.0,9.0,0.0
|
||||||
|
2017-10-28,114934,5.0,9.0,0.0
|
||||||
|
2017-10-29,125289,6.0,9.0,0.0
|
||||||
|
2017-10-30,189245,0.0,9.0,0.0
|
||||||
|
2017-10-31,191480,1.0,9.0,0.0
|
||||||
|
2017-11-01,182281,2.0,10.0,0.0
|
||||||
|
2017-11-02,186351,3.0,10.0,0.0
|
||||||
|
2017-11-03,175422,4.0,10.0,0.0
|
||||||
|
2017-11-04,118160,5.0,10.0,0.0
|
||||||
|
2017-11-05,127602,6.0,10.0,0.0
|
||||||
|
2017-11-06,191067,0.0,10.0,0.0
|
||||||
|
2017-11-07,197083,1.0,10.0,0.0
|
||||||
|
2017-11-08,194333,2.0,10.0,0.0
|
||||||
|
2017-11-09,193914,3.0,10.0,0.0
|
||||||
|
2017-11-10,179933,4.0,10.0,1.0
|
||||||
|
2017-11-11,121346,5.0,10.0,0.0
|
||||||
|
2017-11-12,131900,6.0,10.0,0.0
|
||||||
|
2017-11-13,196969,0.0,10.0,0.0
|
||||||
|
2017-11-14,201949,1.0,10.0,0.0
|
||||||
|
2017-11-15,198424,2.0,10.0,0.0
|
||||||
|
2017-11-16,196902,3.0,10.0,0.0
|
||||||
|
2017-11-17,183893,4.0,10.0,0.0
|
||||||
|
2017-11-18,122767,5.0,10.0,0.0
|
||||||
|
2017-11-19,130890,6.0,10.0,0.0
|
||||||
|
2017-11-20,194515,0.0,10.0,0.0
|
||||||
|
2017-11-21,198601,1.0,10.0,0.0
|
||||||
|
2017-11-22,191041,2.0,10.0,0.0
|
||||||
|
2017-11-23,170321,3.0,10.0,1.0
|
||||||
|
2017-11-24,155623,4.0,10.0,0.0
|
||||||
|
2017-11-25,115759,5.0,10.0,0.0
|
||||||
|
2017-11-26,128771,6.0,10.0,0.0
|
||||||
|
2017-11-27,199419,0.0,10.0,0.0
|
||||||
|
2017-11-28,207253,1.0,10.0,0.0
|
||||||
|
2017-11-29,205406,2.0,10.0,0.0
|
||||||
|
2017-11-30,200674,3.0,10.0,0.0
|
||||||
|
2017-12-01,187017,4.0,11.0,0.0
|
||||||
|
2017-12-02,129735,5.0,11.0,0.0
|
||||||
|
2017-12-03,139120,6.0,11.0,0.0
|
||||||
|
2017-12-04,205505,0.0,11.0,0.0
|
||||||
|
2017-12-05,208218,1.0,11.0,0.0
|
||||||
|
2017-12-06,202480,2.0,11.0,0.0
|
||||||
|
2017-12-07,197822,3.0,11.0,0.0
|
||||||
|
2017-12-08,180686,4.0,11.0,0.0
|
||||||
|
2017-12-09,123667,5.0,11.0,0.0
|
||||||
|
2017-12-10,130987,6.0,11.0,0.0
|
||||||
|
2017-12-11,193901,0.0,11.0,0.0
|
||||||
|
2017-12-12,194997,1.0,11.0,0.0
|
||||||
|
2017-12-13,192063,2.0,11.0,0.0
|
||||||
|
2017-12-14,186496,3.0,11.0,0.0
|
||||||
|
2017-12-15,170812,4.0,11.0,0.0
|
||||||
|
2017-12-16,110474,5.0,11.0,0.0
|
||||||
|
2017-12-17,118165,6.0,11.0,0.0
|
||||||
|
2017-12-18,176843,0.0,11.0,0.0
|
||||||
|
2017-12-19,179550,1.0,11.0,0.0
|
||||||
|
2017-12-20,173506,2.0,11.0,0.0
|
||||||
|
2017-12-21,165910,3.0,11.0,0.0
|
||||||
|
2017-12-22,145886,4.0,11.0,0.0
|
||||||
|
2017-12-23,95246,5.0,11.0,0.0
|
||||||
|
2017-12-24,88781,6.0,11.0,0.0
|
||||||
|
2017-12-25,98189,0.0,11.0,1.0
|
||||||
|
2017-12-26,121383,1.0,11.0,0.0
|
||||||
|
2017-12-27,135300,2.0,11.0,0.0
|
||||||
|
2017-12-28,136827,3.0,11.0,0.0
|
||||||
|
2017-12-29,127700,4.0,11.0,0.0
|
||||||
|
2017-12-30,93014,5.0,11.0,0.0
|
||||||
|
2017-12-31,82878,6.0,11.0,0.0
|
||||||
|
2018-01-01,86419,0.0,0.0,1.0
|
||||||
|
2018-01-02,147428,1.0,0.0,0.0
|
||||||
|
2018-01-03,162193,2.0,0.0,0.0
|
||||||
|
2018-01-04,163784,3.0,0.0,0.0
|
||||||
|
2018-01-05,158606,4.0,0.0,0.0
|
||||||
|
2018-01-06,113467,5.0,0.0,0.0
|
||||||
|
2018-01-07,118313,6.0,0.0,0.0
|
||||||
|
2018-01-08,175623,0.0,0.0,0.0
|
||||||
|
2018-01-09,183880,1.0,0.0,0.0
|
||||||
|
2018-01-10,183945,2.0,0.0,0.0
|
||||||
|
2018-01-11,181769,3.0,0.0,0.0
|
||||||
|
2018-01-12,170552,4.0,0.0,0.0
|
||||||
|
2018-01-13,115707,5.0,0.0,0.0
|
||||||
|
2018-01-14,121191,6.0,0.0,0.0
|
||||||
|
2018-01-15,176127,0.0,0.0,1.0
|
||||||
|
2018-01-16,188032,1.0,0.0,0.0
|
||||||
|
2018-01-17,189871,2.0,0.0,0.0
|
||||||
|
2018-01-18,189348,3.0,0.0,0.0
|
||||||
|
2018-01-19,177456,4.0,0.0,0.0
|
||||||
|
2018-01-20,123321,5.0,0.0,0.0
|
||||||
|
2018-01-21,128306,6.0,0.0,0.0
|
||||||
|
2018-01-22,186132,0.0,0.0,0.0
|
||||||
|
2018-01-23,197618,1.0,0.0,0.0
|
||||||
|
2018-01-24,196402,2.0,0.0,0.0
|
||||||
|
2018-01-25,192722,3.0,0.0,0.0
|
||||||
|
2018-01-26,179415,4.0,0.0,0.0
|
||||||
|
2018-01-27,125769,5.0,0.0,0.0
|
||||||
|
2018-01-28,133306,6.0,0.0,0.0
|
||||||
|
2018-01-29,194151,0.0,0.0,0.0
|
||||||
|
2018-01-30,198680,1.0,0.0,0.0
|
||||||
|
2018-01-31,198652,2.0,0.0,0.0
|
||||||
|
2018-02-01,195472,3.0,1.0,0.0
|
||||||
|
2018-02-02,183173,4.0,1.0,0.0
|
||||||
|
2018-02-03,124276,5.0,1.0,0.0
|
||||||
|
2018-02-04,129054,6.0,1.0,0.0
|
||||||
|
2018-02-05,190024,0.0,1.0,0.0
|
||||||
|
2018-02-06,198658,1.0,1.0,0.0
|
||||||
|
2018-02-07,198272,2.0,1.0,0.0
|
||||||
|
2018-02-08,195339,3.0,1.0,0.0
|
||||||
|
2018-02-09,183086,4.0,1.0,0.0
|
||||||
|
2018-02-10,122536,5.0,1.0,0.0
|
||||||
|
2018-02-11,133033,6.0,1.0,0.0
|
||||||
|
2018-02-12,185386,0.0,1.0,0.0
|
||||||
|
2018-02-13,184789,1.0,1.0,0.0
|
||||||
|
2018-02-14,176089,2.0,1.0,0.0
|
||||||
|
2018-02-15,171317,3.0,1.0,0.0
|
||||||
|
2018-02-16,162693,4.0,1.0,0.0
|
||||||
|
2018-02-17,116342,5.0,1.0,0.0
|
||||||
|
2018-02-18,122466,6.0,1.0,0.0
|
||||||
|
2018-02-19,172364,0.0,1.0,1.0
|
||||||
|
2018-02-20,185896,1.0,1.0,0.0
|
||||||
|
2018-02-21,188166,2.0,1.0,0.0
|
||||||
|
2018-02-22,189427,3.0,1.0,0.0
|
||||||
|
2018-02-23,178732,4.0,1.0,0.0
|
||||||
|
2018-02-24,132664,5.0,1.0,0.0
|
||||||
|
2018-02-25,134008,6.0,1.0,0.0
|
||||||
|
2018-02-26,200075,0.0,1.0,0.0
|
||||||
|
2018-02-27,207996,1.0,1.0,0.0
|
||||||
|
2018-02-28,204416,2.0,1.0,0.0
|
||||||
|
2018-03-01,201320,3.0,2.0,0.0
|
||||||
|
2018-03-02,188205,4.0,2.0,0.0
|
||||||
|
2018-03-03,131162,5.0,2.0,0.0
|
||||||
|
2018-03-04,138320,6.0,2.0,0.0
|
||||||
|
2018-03-05,207326,0.0,2.0,0.0
|
||||||
|
2018-03-06,212462,1.0,2.0,0.0
|
||||||
|
2018-03-07,209357,2.0,2.0,0.0
|
||||||
|
2018-03-08,194876,3.0,2.0,0.0
|
||||||
|
2018-03-09,193761,4.0,2.0,0.0
|
||||||
|
2018-03-10,133449,5.0,2.0,0.0
|
||||||
|
2018-03-11,142258,6.0,2.0,0.0
|
||||||
|
2018-03-12,208753,0.0,2.0,0.0
|
||||||
|
2018-03-13,210602,1.0,2.0,0.0
|
||||||
|
2018-03-14,214236,2.0,2.0,0.0
|
||||||
|
2018-03-15,210761,3.0,2.0,0.0
|
||||||
|
2018-03-16,196619,4.0,2.0,0.0
|
||||||
|
2018-03-17,133056,5.0,2.0,0.0
|
||||||
|
2018-03-18,141335,6.0,2.0,0.0
|
||||||
|
2018-03-19,211580,0.0,2.0,0.0
|
||||||
|
2018-03-20,219051,1.0,2.0,0.0
|
||||||
|
2018-03-21,215435,2.0,2.0,0.0
|
||||||
|
2018-03-22,211961,3.0,2.0,0.0
|
||||||
|
2018-03-23,196009,4.0,2.0,0.0
|
||||||
|
2018-03-24,132390,5.0,2.0,0.0
|
||||||
|
2018-03-25,140021,6.0,2.0,0.0
|
||||||
|
2018-03-26,205273,0.0,2.0,0.0
|
||||||
|
2018-03-27,212686,1.0,2.0,0.0
|
||||||
|
2018-03-28,210683,2.0,2.0,0.0
|
||||||
|
2018-03-29,189044,3.0,2.0,0.0
|
||||||
|
2018-03-30,170256,4.0,2.0,0.0
|
||||||
|
2018-03-31,125999,5.0,2.0,0.0
|
||||||
|
2018-04-01,126749,6.0,3.0,0.0
|
||||||
|
2018-04-02,186546,0.0,3.0,0.0
|
||||||
|
2018-04-03,207905,1.0,3.0,0.0
|
||||||
|
2018-04-04,201528,2.0,3.0,0.0
|
||||||
|
2018-04-05,188580,3.0,3.0,0.0
|
||||||
|
2018-04-06,173714,4.0,3.0,0.0
|
||||||
|
2018-04-07,125723,5.0,3.0,0.0
|
||||||
|
2018-04-08,142545,6.0,3.0,0.0
|
||||||
|
2018-04-09,204767,0.0,3.0,0.0
|
||||||
|
2018-04-10,212048,1.0,3.0,0.0
|
||||||
|
2018-04-11,210517,2.0,3.0,0.0
|
||||||
|
2018-04-12,206924,3.0,3.0,0.0
|
||||||
|
2018-04-13,191679,4.0,3.0,0.0
|
||||||
|
2018-04-14,126394,5.0,3.0,0.0
|
||||||
|
2018-04-15,137279,6.0,3.0,0.0
|
||||||
|
2018-04-16,208085,0.0,3.0,0.0
|
||||||
|
2018-04-17,213273,1.0,3.0,0.0
|
||||||
|
2018-04-18,211580,2.0,3.0,0.0
|
||||||
|
2018-04-19,206037,3.0,3.0,0.0
|
||||||
|
2018-04-20,191211,4.0,3.0,0.0
|
||||||
|
2018-04-21,125564,5.0,3.0,0.0
|
||||||
|
2018-04-22,136469,6.0,3.0,0.0
|
||||||
|
2018-04-23,206288,0.0,3.0,0.0
|
||||||
|
2018-04-24,212115,1.0,3.0,0.0
|
||||||
|
2018-04-25,207948,2.0,3.0,0.0
|
||||||
|
2018-04-26,205759,3.0,3.0,0.0
|
||||||
|
2018-04-27,181330,4.0,3.0,0.0
|
||||||
|
2018-04-28,130046,5.0,3.0,0.0
|
||||||
|
2018-04-29,120802,6.0,3.0,0.0
|
||||||
|
2018-04-30,170390,0.0,3.0,0.0
|
||||||
|
2018-05-01,169054,1.0,4.0,0.0
|
||||||
|
2018-05-02,197891,2.0,4.0,0.0
|
||||||
|
2018-05-03,199820,3.0,4.0,0.0
|
||||||
|
2018-05-04,186783,4.0,4.0,0.0
|
||||||
|
2018-05-05,124420,5.0,4.0,0.0
|
||||||
|
2018-05-06,130666,6.0,4.0,0.0
|
||||||
|
2018-05-07,196014,0.0,4.0,0.0
|
||||||
|
2018-05-08,203058,1.0,4.0,0.0
|
||||||
|
2018-05-09,198582,2.0,4.0,0.0
|
||||||
|
2018-05-10,191321,3.0,4.0,0.0
|
||||||
|
2018-05-11,183639,4.0,4.0,0.0
|
||||||
|
2018-05-12,122023,5.0,4.0,0.0
|
||||||
|
2018-05-13,128775,6.0,4.0,0.0
|
||||||
|
2018-05-14,199104,0.0,4.0,0.0
|
||||||
|
2018-05-15,200658,1.0,4.0,0.0
|
||||||
|
2018-05-16,201541,2.0,4.0,0.0
|
||||||
|
2018-05-17,196886,3.0,4.0,0.0
|
||||||
|
2018-05-18,188597,4.0,4.0,0.0
|
||||||
|
2018-05-19,121392,5.0,4.0,0.0
|
||||||
|
2018-05-20,126981,6.0,4.0,0.0
|
||||||
|
2018-05-21,189291,0.0,4.0,0.0
|
||||||
|
2018-05-22,203038,1.0,4.0,0.0
|
||||||
|
2018-05-23,205330,2.0,4.0,0.0
|
||||||
|
2018-05-24,199208,3.0,4.0,0.0
|
||||||
|
2018-05-25,187768,4.0,4.0,0.0
|
||||||
|
2018-05-26,117635,5.0,4.0,0.0
|
||||||
|
2018-05-27,124352,6.0,4.0,0.0
|
||||||
|
2018-05-28,180398,0.0,4.0,1.0
|
||||||
|
2018-05-29,194170,1.0,4.0,0.0
|
||||||
|
2018-05-30,200281,2.0,4.0,0.0
|
||||||
|
2018-05-31,197244,3.0,4.0,0.0
|
||||||
|
2018-06-01,184037,4.0,5.0,0.0
|
||||||
|
2018-06-02,121135,5.0,5.0,0.0
|
||||||
|
2018-06-03,129389,6.0,5.0,0.0
|
||||||
|
2018-06-04,200331,0.0,5.0,0.0
|
||||||
|
2018-06-05,207735,1.0,5.0,0.0
|
||||||
|
2018-06-06,203354,2.0,5.0,0.0
|
||||||
|
2018-06-07,200520,3.0,5.0,0.0
|
||||||
|
2018-06-08,182038,4.0,5.0,0.0
|
||||||
|
2018-06-09,120164,5.0,5.0,0.0
|
||||||
|
2018-06-10,125256,6.0,5.0,0.0
|
||||||
|
2018-06-11,194786,0.0,5.0,0.0
|
||||||
|
2018-06-12,200815,1.0,5.0,0.0
|
||||||
|
2018-06-13,197740,2.0,5.0,0.0
|
||||||
|
2018-06-14,192294,3.0,5.0,0.0
|
||||||
|
2018-06-15,173587,4.0,5.0,0.0
|
||||||
|
2018-06-16,105955,5.0,5.0,0.0
|
||||||
|
2018-06-17,110780,6.0,5.0,0.0
|
||||||
|
2018-06-18,174582,0.0,5.0,0.0
|
||||||
|
2018-06-19,193310,1.0,5.0,0.0
|
||||||
|
2018-06-20,193062,2.0,5.0,0.0
|
||||||
|
2018-06-21,187986,3.0,5.0,0.0
|
||||||
|
2018-06-22,173606,4.0,5.0,0.0
|
||||||
|
2018-06-23,111795,5.0,5.0,0.0
|
||||||
|
2018-06-24,116134,6.0,5.0,0.0
|
||||||
|
2018-06-25,185919,0.0,5.0,0.0
|
||||||
|
2018-06-26,193142,1.0,5.0,0.0
|
||||||
|
2018-06-27,188114,2.0,5.0,0.0
|
||||||
|
2018-06-28,183737,3.0,5.0,0.0
|
||||||
|
2018-06-29,171496,4.0,5.0,0.0
|
||||||
|
2018-06-30,107210,5.0,5.0,0.0
|
||||||
|
2018-07-01,111053,6.0,6.0,0.0
|
||||||
|
2018-07-02,176198,0.0,6.0,0.0
|
||||||
|
2018-07-03,184040,1.0,6.0,0.0
|
||||||
|
2018-07-04,169783,2.0,6.0,1.0
|
||||||
|
2018-07-05,177996,3.0,6.0,0.0
|
||||||
|
2018-07-06,167378,4.0,6.0,0.0
|
||||||
|
2018-07-07,106401,5.0,6.0,0.0
|
||||||
|
2018-07-08,112327,6.0,6.0,0.0
|
||||||
|
2018-07-09,182835,0.0,6.0,0.0
|
||||||
|
2018-07-10,187694,1.0,6.0,0.0
|
||||||
|
2018-07-11,185762,2.0,6.0,0.0
|
||||||
|
2018-07-12,184099,3.0,6.0,0.0
|
||||||
|
2018-07-13,170860,4.0,6.0,0.0
|
||||||
|
2018-07-14,106799,5.0,6.0,0.0
|
||||||
|
2018-07-15,108475,6.0,6.0,0.0
|
||||||
|
2018-07-16,175704,0.0,6.0,0.0
|
||||||
|
2018-07-17,183596,1.0,6.0,0.0
|
||||||
|
2018-07-18,179897,2.0,6.0,0.0
|
||||||
|
2018-07-19,183373,3.0,6.0,0.0
|
||||||
|
2018-07-20,169626,4.0,6.0,0.0
|
||||||
|
2018-07-21,106785,5.0,6.0,0.0
|
||||||
|
2018-07-22,112387,6.0,6.0,0.0
|
||||||
|
2018-07-23,180572,0.0,6.0,0.0
|
||||||
|
2018-07-24,186943,1.0,6.0,0.0
|
||||||
|
2018-07-25,185744,2.0,6.0,0.0
|
||||||
|
2018-07-26,183117,3.0,6.0,0.0
|
||||||
|
2018-07-27,168526,4.0,6.0,0.0
|
||||||
|
2018-07-28,105936,5.0,6.0,0.0
|
||||||
|
2018-07-29,111708,6.0,6.0,0.0
|
||||||
|
2018-07-30,179950,0.0,6.0,0.0
|
||||||
|
2018-07-31,185930,1.0,6.0,0.0
|
||||||
|
2018-08-01,183366,2.0,7.0,0.0
|
||||||
|
2018-08-02,182412,3.0,7.0,0.0
|
||||||
|
2018-08-03,173429,4.0,7.0,0.0
|
||||||
|
2018-08-04,106108,5.0,7.0,0.0
|
||||||
|
2018-08-05,110059,6.0,7.0,0.0
|
||||||
|
2018-08-06,178355,0.0,7.0,0.0
|
||||||
|
2018-08-07,185518,1.0,7.0,0.0
|
||||||
|
2018-08-08,183204,2.0,7.0,0.0
|
||||||
|
2018-08-09,181276,3.0,7.0,0.0
|
||||||
|
2018-08-10,168297,4.0,7.0,0.0
|
||||||
|
2018-08-11,106488,5.0,7.0,0.0
|
||||||
|
2018-08-12,111786,6.0,7.0,0.0
|
||||||
|
2018-08-13,178620,0.0,7.0,0.0
|
||||||
|
2018-08-14,181922,1.0,7.0,0.0
|
||||||
|
2018-08-15,172198,2.0,7.0,0.0
|
||||||
|
2018-08-16,177367,3.0,7.0,0.0
|
||||||
|
2018-08-17,166550,4.0,7.0,0.0
|
||||||
|
2018-08-18,107011,5.0,7.0,0.0
|
||||||
|
2018-08-19,112299,6.0,7.0,0.0
|
||||||
|
2018-08-20,176718,0.0,7.0,0.0
|
||||||
|
2018-08-21,182562,1.0,7.0,0.0
|
||||||
|
2018-08-22,181484,2.0,7.0,0.0
|
||||||
|
2018-08-23,180317,3.0,7.0,0.0
|
||||||
|
2018-08-24,170197,4.0,7.0,0.0
|
||||||
|
2018-08-25,109383,5.0,7.0,0.0
|
||||||
|
2018-08-26,113373,6.0,7.0,0.0
|
||||||
|
2018-08-27,180142,0.0,7.0,0.0
|
||||||
|
2018-08-28,191628,1.0,7.0,0.0
|
||||||
|
2018-08-29,191149,2.0,7.0,0.0
|
||||||
|
2018-08-30,187503,3.0,7.0,0.0
|
||||||
|
2018-08-31,172280,4.0,7.0,0.0
|
||||||
|
@@ -79,9 +79,7 @@ def get_result_df(remote_run):
|
|||||||
if "goal" in run.properties:
|
if "goal" in run.properties:
|
||||||
goal_minimize = run.properties["goal"].split("_")[-1] == "min"
|
goal_minimize = run.properties["goal"].split("_")[-1] == "min"
|
||||||
|
|
||||||
summary_df = summary_df.T.sort_values(
|
summary_df = summary_df.T.sort_values("Score", ascending=goal_minimize)
|
||||||
"Score", ascending=goal_minimize
|
|
||||||
).drop_duplicates(["run_algorithm"])
|
|
||||||
summary_df = summary_df.set_index("run_algorithm")
|
summary_df = summary_df.set_index("run_algorithm")
|
||||||
return summary_df
|
return summary_df
|
||||||
|
|
||||||
@@ -105,13 +103,8 @@ def run_inference(
|
|||||||
train_run.download_file(
|
train_run.download_file(
|
||||||
"outputs/{}".format(model_base_name), "inference/{}".format(model_base_name)
|
"outputs/{}".format(model_base_name), "inference/{}".format(model_base_name)
|
||||||
)
|
)
|
||||||
train_run.download_file("outputs/conda_env_v_1_0_0.yml", "inference/condafile.yml")
|
|
||||||
|
|
||||||
inference_env = Environment("myenv")
|
inference_env = train_run.get_environment()
|
||||||
inference_env.docker.enabled = True
|
|
||||||
inference_env.python.conda_dependencies = CondaDependencies(
|
|
||||||
conda_dependencies_file_path="inference/condafile.yml"
|
|
||||||
)
|
|
||||||
|
|
||||||
est = Estimator(
|
est = Estimator(
|
||||||
source_directory=script_folder,
|
source_directory=script_folder,
|
||||||
@@ -95,7 +95,7 @@ def do_rolling_forecast_with_lookback(
|
|||||||
# Extract test data from an expanding window up-to the horizon
|
# Extract test data from an expanding window up-to the horizon
|
||||||
expand_wind = X[time_column_name] < horizon_time
|
expand_wind = X[time_column_name] < horizon_time
|
||||||
X_test_expand = X[expand_wind]
|
X_test_expand = X[expand_wind]
|
||||||
y_query_expand = np.zeros(len(X_test_expand)).astype(np.float)
|
y_query_expand = np.zeros(len(X_test_expand)).astype(float)
|
||||||
y_query_expand.fill(np.NaN)
|
y_query_expand.fill(np.NaN)
|
||||||
|
|
||||||
if origin_time != X[time_column_name].min():
|
if origin_time != X[time_column_name].min():
|
||||||
@@ -176,7 +176,7 @@ def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq="D"):
|
|||||||
# Extract test data from an expanding window up-to the horizon
|
# Extract test data from an expanding window up-to the horizon
|
||||||
expand_wind = X_test[time_column_name] < horizon_time
|
expand_wind = X_test[time_column_name] < horizon_time
|
||||||
X_test_expand = X_test[expand_wind]
|
X_test_expand = X_test[expand_wind]
|
||||||
y_query_expand = np.zeros(len(X_test_expand)).astype(np.float)
|
y_query_expand = np.zeros(len(X_test_expand)).astype(float)
|
||||||
y_query_expand.fill(np.NaN)
|
y_query_expand.fill(np.NaN)
|
||||||
|
|
||||||
if origin_time != X_test[time_column_name].min():
|
if origin_time != X_test[time_column_name].min():
|
||||||
@@ -78,7 +78,8 @@
|
|||||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||||
"output[\"Location\"] = ws.location\n",
|
"output[\"Location\"] = ws.location\n",
|
||||||
"output[\"Default datastore name\"] = dstore.name\n",
|
"output[\"Default datastore name\"] = dstore.name\n",
|
||||||
"pd.set_option(\"display.max_colwidth\", -1)\n",
|
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||||
|
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||||
"outputDf.T"
|
"outputDf.T"
|
||||||
]
|
]
|
||||||
@@ -381,7 +382,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"### Submit the pipeline to run\n",
|
"### Submit the pipeline to run\n",
|
||||||
"Next we submit our pipeline to run. The whole training pipeline takes about 1h 11m using a Standard_D12_V2 VM with our current ParallelRunConfig setting."
|
"Next we submit our pipeline to run. The whole training pipeline takes about 1h using a Standard_D16_V3 VM with our current ParallelRunConfig setting."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -571,7 +572,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"## Retrieve results\n",
|
"## Retrieve results\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Forecast results can be retrieved through the following code. The prediction results summary and the actual predictions are downloaded the \"forecast_results\" folder"
|
"Forecast results can be retrieved through the following code. The prediction results summary and the actual predictions are downloaded in forecast_results folder"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -0,0 +1,122 @@
|
|||||||
|
---
|
||||||
|
page_type: sample
|
||||||
|
languages:
|
||||||
|
- python
|
||||||
|
products:
|
||||||
|
- azure-machine-learning
|
||||||
|
description: Tutorial showing how to solve a complex machine learning time series forecasting problems at scale by using Azure Automated ML and Many Models solution accelerator.
|
||||||
|
---
|
||||||
|
|
||||||
|

|
||||||
|
# Many Models Solution Accelerator
|
||||||
|
|
||||||
|
<!--
|
||||||
|
Guidelines on README format: https://review.docs.microsoft.com/help/onboard/admin/samples/concepts/readme-template?branch=master
|
||||||
|
|
||||||
|
Guidance on onboarding samples to docs.microsoft.com/samples: https://review.docs.microsoft.com/help/onboard/admin/samples/process/onboarding?branch=master
|
||||||
|
|
||||||
|
Taxonomies for products and languages: https://review.docs.microsoft.com/new-hope/information-architecture/metadata/taxonomies?branch=master
|
||||||
|
-->
|
||||||
|
|
||||||
|
In the real world, many problems can be too complex to be solved by a single machine learning model. Whether that be predicting sales for each individual store, building a predictive maintanence model for hundreds of oil wells, or tailoring an experience to individual users, building a model for each instance can lead to improved results on many machine learning problems.
|
||||||
|
|
||||||
|
This Pattern is very common across a wide variety of industries and applicable to many real world use cases. Below are some examples we have seen where this pattern is being used.
|
||||||
|
|
||||||
|
- Energy and utility companies building predictive maintenance models for thousands of oil wells, hundreds of wind turbines or hundreds of smart meters
|
||||||
|
|
||||||
|
- Retail organizations building workforce optimization models for thousands of stores, campaign promotion propensity models, Price optimization models for hundreds of thousands of products they sell
|
||||||
|
|
||||||
|
- Restaurant chains building demand forecasting models across thousands of restaurants
|
||||||
|
|
||||||
|
- Banks and financial institutes building models for cash replenishment for ATM Machine and for several ATMs or building personalized models for individuals
|
||||||
|
|
||||||
|
- Enterprises building revenue forecasting models at each division level
|
||||||
|
|
||||||
|
- Document management companies building text analytics and legal document search models per each state
|
||||||
|
|
||||||
|
Azure Machine Learning (AML) makes it easy to train, operate, and manage hundreds or even thousands of models. This repo will walk you through the end to end process of creating a many models solution from training to scoring to monitoring.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
To use this solution accelerator, all you need is access to an [Azure subscription](https://azure.microsoft.com/free/) and an [Azure Machine Learning Workspace](https://docs.microsoft.com/azure/machine-learning/how-to-manage-workspace) that you'll create below.
|
||||||
|
|
||||||
|
While it's not required, a basic understanding of Azure Machine Learning will be helpful for understanding the solution. The following resources can help introduce you to AML:
|
||||||
|
|
||||||
|
1. [Azure Machine Learning Overview](https://azure.microsoft.com/services/machine-learning/)
|
||||||
|
2. [Azure Machine Learning Tutorials](https://docs.microsoft.com/azure/machine-learning/tutorial-1st-experiment-sdk-setup)
|
||||||
|
3. [Azure Machine Learning Sample Notebooks on Github](https://github.com/Azure/azureml-examples)
|
||||||
|
|
||||||
|
## Getting started
|
||||||
|
|
||||||
|
### 1. Deploy Resources
|
||||||
|
|
||||||
|
Start by deploying the resources to Azure. The button below will deploy Azure Machine Learning and its related resources:
|
||||||
|
|
||||||
|
<a href="https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2Fmicrosoft%2Fsolution-accelerator-many-models%2Fmaster%2Fazuredeploy.json" target="_blank">
|
||||||
|
<img src="http://azuredeploy.net/deploybutton.png"/>
|
||||||
|
</a>
|
||||||
|
|
||||||
|
### 2. Configure Development Environment
|
||||||
|
|
||||||
|
Next you'll need to configure your [development environment](https://docs.microsoft.com/azure/machine-learning/how-to-configure-environment) for Azure Machine Learning. We recommend using a [Compute Instance](https://docs.microsoft.com/azure/machine-learning/how-to-configure-environment#compute-instance) as it's the fastest way to get up and running.
|
||||||
|
|
||||||
|
### 3. Run Notebooks
|
||||||
|
|
||||||
|
Once your development environment is set up, run through the Jupyter Notebooks sequentially following the steps outlined. By the end, you'll know how to train, score, and make predictions using the many models pattern on Azure Machine Learning.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
## Contents
|
||||||
|
|
||||||
|
In this repo, you'll train and score a forecasting model for each orange juice brand and for each store at a (simulated) grocery chain. By the end, you'll have forecasted sales by using up to 11,973 models to predict sales for the next few weeks.
|
||||||
|
|
||||||
|
The data used in this sample is simulated based on the [Dominick's Orange Juice Dataset](http://www.cs.unitn.it/~taufer/QMMA/L10-OJ-Data.html#(1)), sales data from a Chicago area grocery store.
|
||||||
|
|
||||||
|
<img src="images/Flow_map.png" width="1000">
|
||||||
|
|
||||||
|
### Using Automated ML to train the models:
|
||||||
|
|
||||||
|
The [`auto-ml-forecasting-many-models.ipynb`](./auto-ml-forecasting-many-models.ipynb) noteboook is a guided solution accelerator that demonstrates steps from data preparation, to model training, and forecasting on train models as well as operationalizing the solution.
|
||||||
|
|
||||||
|
## How-to-videos
|
||||||
|
|
||||||
|
Watch these how-to-videos for a step by step walk-through of the many model solution accelerator to learn how to setup your models using Automated ML.
|
||||||
|
|
||||||
|
### Automated ML
|
||||||
|
|
||||||
|
[](https://channel9.msdn.com/Shows/Docs-AI/Building-Large-Scale-Machine-Learning-Forecasting-Models-using-Azure-Machine-Learnings-Automated-ML)
|
||||||
|
|
||||||
|
## Key concepts
|
||||||
|
|
||||||
|
### ParallelRunStep
|
||||||
|
|
||||||
|
[ParallelRunStep](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.parallel_run_step.parallelrunstep?view=azure-ml-py) enables the parallel training of models and is commonly used for batch inferencing. This [document](https://docs.microsoft.com/azure/machine-learning/how-to-use-parallel-run-step) walks through some of the key concepts around ParallelRunStep.
|
||||||
|
|
||||||
|
### Pipelines
|
||||||
|
|
||||||
|
[Pipelines](https://docs.microsoft.com/azure/machine-learning/concept-ml-pipelines) allow you to create workflows in your machine learning projects. These workflows have a number of benefits including speed, simplicity, repeatability, and modularity.
|
||||||
|
|
||||||
|
### Automated Machine Learning
|
||||||
|
|
||||||
|
[Automated Machine Learning](https://docs.microsoft.com/azure/machine-learning/concept-automated-ml) also referred to as automated ML or AutoML, is the process of automating the time consuming, iterative tasks of machine learning model development. It allows data scientists, analysts, and developers to build ML models with high scale, efficiency, and productivity all while sustaining model quality.
|
||||||
|
|
||||||
|
### Other Concepts
|
||||||
|
|
||||||
|
In additional to ParallelRunStep, Pipelines and Automated Machine Learning, you'll also be working with the following concepts including [workspace](https://docs.microsoft.com/azure/machine-learning/concept-workspace), [datasets](https://docs.microsoft.com/azure/machine-learning/concept-data#datasets), [compute targets](https://docs.microsoft.com/azure/machine-learning/concept-compute-target#train), [python script steps](https://docs.microsoft.com/python/api/azureml-pipeline-steps/azureml.pipeline.steps.python_script_step.pythonscriptstep?view=azure-ml-py), and [Azure Open Datasets](https://azure.microsoft.com/services/open-datasets/).
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
This project welcomes contributions and suggestions. To learn more visit the [contributing](../../../CONTRIBUTING.md) section.
|
||||||
|
|
||||||
|
Most contributions require you to agree to a Contributor License Agreement (CLA)
|
||||||
|
declaring that you have the right to, and actually do, grant us
|
||||||
|
the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com.
|
||||||
|
|
||||||
|
When you submit a pull request, a CLA bot will automatically determine whether you need to provide
|
||||||
|
a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions
|
||||||
|
provided by the bot. You will only need to do this once across all repos using our CLA.
|
||||||
|
|
||||||
|
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
|
||||||
|
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
|
||||||
|
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
|
||||||
@@ -30,7 +30,7 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"For this notebook we are using a synthetic dataset portraying sales data to predict the quantity of a vartiety of product SKUs across several states, stores, and product categories.\n",
|
"For this notebook we are using a synthetic dataset portraying sales data to predict the the quantity of a vartiety of product skus across several states, stores, and product categories.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"**NOTE: There are limits on how many runs we can do in parallel per workspace, and we currently recommend to set the parallelism to maximum of 320 runs per experiment per workspace. If users want to have more parallelism and increase this limit they might encounter Too Many Requests errors (HTTP 429).**"
|
"**NOTE: There are limits on how many runs we can do in parallel per workspace, and we currently recommend to set the parallelism to maximum of 320 runs per experiment per workspace. If users want to have more parallelism and increase this limit they might encounter Too Many Requests errors (HTTP 429).**"
|
||||||
]
|
]
|
||||||
@@ -78,7 +78,8 @@
|
|||||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||||
"output[\"Location\"] = ws.location\n",
|
"output[\"Location\"] = ws.location\n",
|
||||||
"output[\"Default datastore name\"] = dstore.name\n",
|
"output[\"Default datastore name\"] = dstore.name\n",
|
||||||
"pd.set_option(\"display.max_colwidth\", -1)\n",
|
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||||
|
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||||
"outputDf.T"
|
"outputDf.T"
|
||||||
]
|
]
|
||||||
@@ -241,6 +242,34 @@
|
|||||||
")"
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### 2.4 Configure data with ``OutputFileDatasetConfig`` objects\n",
|
||||||
|
"This step shows how to configure output data from a pipeline step. One of the use cases for this step is when you want to do some preprocessing before feeding the data to training step. Intermediate data (or output of a step) is represented by an ``OutputFileDatasetConfig`` object. ``output_data`` is produced as the output of a step. Optionally, this data can be registered as a dataset by calling the ``register_on_complete`` method. If you create an ``OutputFileDatasetConfig`` in one step and use it as an input to another step, that data dependency between steps creates an implicit execution order in the pipeline.\n",
|
||||||
|
"\n",
|
||||||
|
"``OutputFileDatasetConfig`` objects return a directory, and by default write output to the default datastore of the workspace.\n",
|
||||||
|
"\n",
|
||||||
|
"Since instance creation for class ``OutputTabularDatasetConfig`` is not allowed, we first create an instance of this class. Then we use the ``read_parquet_files`` method to read the parquet file into ``OutputTabularDatasetConfig``."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.data.output_dataset_config import OutputFileDatasetConfig\n",
|
||||||
|
"\n",
|
||||||
|
"output_data = OutputFileDatasetConfig(\n",
|
||||||
|
" name=\"processed_data\", destination=(dstore, \"outputdataset/{run-id}/{output-name}\")\n",
|
||||||
|
").as_upload()\n",
|
||||||
|
"# output_data_dataset = output_data.register_on_complete(\n",
|
||||||
|
"# name='processed_data', description = 'files from prev step')\n",
|
||||||
|
"output_data = output_data.read_parquet_files()"
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@@ -302,13 +331,55 @@
|
|||||||
" print(compute_target.status.serialize())"
|
" print(compute_target.status.serialize())"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Configure the training run's environment\n",
|
||||||
|
"The next step is making sure that the remote training run has all the dependencies needed by the training steps. Dependencies and the runtime context are set by creating and configuring a RunConfiguration object.\n",
|
||||||
|
"\n",
|
||||||
|
"The code below shows two options for handling dependencies. As presented, with ``USE_CURATED_ENV = True``, the configuration is based on a [curated environment](https://docs.microsoft.com/en-us/azure/machine-learning/resource-curated-environments). Curated environments have prebuilt Docker images in the [Microsoft Container Registry](https://hub.docker.com/publishers/microsoftowner). For more information, see [Azure Machine Learning curated environments](https://docs.microsoft.com/en-us/azure/machine-learning/resource-curated-environments).\n",
|
||||||
|
"\n",
|
||||||
|
"The path taken if you change ``USE_CURATED_ENV`` to False shows the pattern for explicitly setting your dependencies. In that scenario, a new custom Docker image will be created and registered in an Azure Container Registry within your resource group (see [Introduction to private Docker container registries in Azure](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-intro)). Building and registering this image can take quite a few minutes."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.core.runconfig import RunConfiguration\n",
|
||||||
|
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||||
|
"from azureml.core import Environment\n",
|
||||||
|
"\n",
|
||||||
|
"aml_run_config = RunConfiguration()\n",
|
||||||
|
"aml_run_config.target = compute_target\n",
|
||||||
|
"\n",
|
||||||
|
"USE_CURATED_ENV = True\n",
|
||||||
|
"if USE_CURATED_ENV:\n",
|
||||||
|
" curated_environment = Environment.get(\n",
|
||||||
|
" workspace=ws, name=\"AzureML-sklearn-0.24-ubuntu18.04-py37-cpu\"\n",
|
||||||
|
" )\n",
|
||||||
|
" aml_run_config.environment = curated_environment\n",
|
||||||
|
"else:\n",
|
||||||
|
" aml_run_config.environment.python.user_managed_dependencies = False\n",
|
||||||
|
"\n",
|
||||||
|
" # Add some packages relied on by data prep step\n",
|
||||||
|
" aml_run_config.environment.python.conda_dependencies = CondaDependencies.create(\n",
|
||||||
|
" conda_packages=[\"pandas\", \"scikit-learn\"],\n",
|
||||||
|
" pip_packages=[\"azureml-sdk\", \"azureml-dataset-runtime[fuse,pandas]\"],\n",
|
||||||
|
" pin_sdk_version=False,\n",
|
||||||
|
" )"
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"### Set up training parameters\n",
|
"### Set up training parameters\n",
|
||||||
"\n",
|
"\n",
|
||||||
"This dictionary defines the AutoML and many models settings. For this forecasting task we need to define several settings including the name of the time column, the maximum forecast horizon, and the partition column name definition.\n",
|
"This dictionary defines the AutoML and many models settings. For this forecasting task we need to define several settings inncluding the name of the time column, the maximum forecast horizon, and the partition column name definition.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"| Property | Description|\n",
|
"| Property | Description|\n",
|
||||||
"| :--------------- | :------------------- |\n",
|
"| :--------------- | :------------------- |\n",
|
||||||
@@ -324,7 +395,7 @@
|
|||||||
"| **enable_early_stopping** | Flag to enable early termination if the score is not improving in the short term. |\n",
|
"| **enable_early_stopping** | Flag to enable early termination if the score is not improving in the short term. |\n",
|
||||||
"| **time_column_name** | The name of your time column. |\n",
|
"| **time_column_name** | The name of your time column. |\n",
|
||||||
"| **enable_engineered_explanations** | Engineered feature explanations will be downloaded if enable_engineered_explanations flag is set to True. By default it is set to False to save storage space. |\n",
|
"| **enable_engineered_explanations** | Engineered feature explanations will be downloaded if enable_engineered_explanations flag is set to True. By default it is set to False to save storage space. |\n",
|
||||||
"| **time_series_id_column_name** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |\n",
|
"| **time_series_id_column_names** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |\n",
|
||||||
"| **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). |\n",
|
"| **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). |\n",
|
||||||
"| **pipeline_fetch_max_batch_size** | Determines how many pipelines (training algorithms) to fetch at a time for training, this helps reduce throttling when training at large scale. |\n",
|
"| **pipeline_fetch_max_batch_size** | Determines how many pipelines (training algorithms) to fetch at a time for training, this helps reduce throttling when training at large scale. |\n",
|
||||||
"| **partition_column_names** | The names of columns used to group your models. For timeseries, the groups must not split up individual time-series. That is, each group must contain one or more whole time-series. |"
|
"| **partition_column_names** | The names of columns used to group your models. For timeseries, the groups must not split up individual time-series. That is, each group must contain one or more whole time-series. |"
|
||||||
@@ -355,8 +426,8 @@
|
|||||||
" \"n_cross_validations\": 3,\n",
|
" \"n_cross_validations\": 3,\n",
|
||||||
" \"time_column_name\": \"WeekStarting\",\n",
|
" \"time_column_name\": \"WeekStarting\",\n",
|
||||||
" \"drop_column_names\": \"Revenue\",\n",
|
" \"drop_column_names\": \"Revenue\",\n",
|
||||||
" \"max_horizon\": 6,\n",
|
" \"forecast_horizon\": 6,\n",
|
||||||
" \"grain_column_names\": partition_column_names,\n",
|
" \"time_series_id_column_names\": partition_column_names,\n",
|
||||||
" \"track_child_runs\": False,\n",
|
" \"track_child_runs\": False,\n",
|
||||||
"}\n",
|
"}\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -365,6 +436,46 @@
|
|||||||
")"
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Construct your pipeline steps\n",
|
||||||
|
"Once you have the compute resource and environment created, you're ready to define your pipeline's steps. There are many built-in steps available via the Azure Machine Learning SDK, as you can see on the [reference documentation for the azureml.pipeline.steps package](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps?view=azure-ml-py). The most flexible class is [PythonScriptStep](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.python_script_step.pythonscriptstep?view=azure-ml-py), which runs a Python script.\n",
|
||||||
|
"\n",
|
||||||
|
"Your data preparation code is in a subdirectory (in this example, \"data_preprocessing_tabular.py\" in the directory \"./scripts\"). As part of the pipeline creation process, this directory is zipped and uploaded to the compute_target and the step runs the script specified as the value for ``script_name``.\n",
|
||||||
|
"\n",
|
||||||
|
"The ``arguments`` values specify the inputs and outputs of the step. In the example below, the baseline data is the ``input_ds_small`` dataset. The script data_preprocessing_tabular.py does whatever data-transformation tasks are appropriate to the task at hand and outputs the data to ``output_data``, of type ``OutputFileDatasetConfig``. For more information, see [Moving data into and between ML pipeline steps (Python)](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-move-data-in-out-of-pipelines). The step will run on the machine defined by ``compute_target``, using the configuration ``aml_run_config``.\n",
|
||||||
|
"\n",
|
||||||
|
"Reuse of previous results (``allow_reuse``) is key when using pipelines in a collaborative environment since eliminating unnecessary reruns offers agility. Reuse is the default behavior when the ``script_name``, ``inputs``, and the parameters of a step remain the same. When reuse is allowed, results from the previous run are immediately sent to the next step. If ``allow_reuse`` is set to False, a new run will always be generated for this step during pipeline execution.\n",
|
||||||
|
"\n",
|
||||||
|
"> Note that we only support partitioned FileDataset and TabularDataset without partition when using such output as input."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.pipeline.steps import PythonScriptStep\n",
|
||||||
|
"\n",
|
||||||
|
"dataprep_source_dir = \"./scripts\"\n",
|
||||||
|
"entry_point = \"data_preprocessing_tabular.py\"\n",
|
||||||
|
"ds_input = input_ds_small.as_named_input(\"train_10_models\")\n",
|
||||||
|
"\n",
|
||||||
|
"data_prep_step = PythonScriptStep(\n",
|
||||||
|
" script_name=entry_point,\n",
|
||||||
|
" source_directory=dataprep_source_dir,\n",
|
||||||
|
" arguments=[\"--input\", ds_input, \"--output\", output_data],\n",
|
||||||
|
" compute_target=compute_target,\n",
|
||||||
|
" runconfig=aml_run_config,\n",
|
||||||
|
" allow_reuse=False,\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"input_ds_small = output_data"
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@@ -554,12 +665,12 @@
|
|||||||
"| :--------------- | :------------------- |\n",
|
"| :--------------- | :------------------- |\n",
|
||||||
"| **experiment** | The experiment used for inference run. |\n",
|
"| **experiment** | The experiment used for inference run. |\n",
|
||||||
"| **inference_data** | The data to use for inferencing. It should be the same schema as used for training.\n",
|
"| **inference_data** | The data to use for inferencing. It should be the same schema as used for training.\n",
|
||||||
"| **compute_target** | The compute target that runs the inference pipeline.|\n",
|
"| **compute_target** The compute target that runs the inference pipeline.|\n",
|
||||||
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku). |\n",
|
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku). |\n",
|
||||||
"| **process_count_per_node** | The number of processes per node.\n",
|
"| **process_count_per_node** The number of processes per node.\n",
|
||||||
"| **train_run_id** | \\[Optional\\] The run id of the hierarchy training, by default it is the latest successful training many model run in the experiment. |\n",
|
"| **train_run_id** | \\[Optional] The run id of the hierarchy training, by default it is the latest successful training many model run in the experiment. |\n",
|
||||||
"| **train_experiment_name** | \\[Optional\\] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline. |\n",
|
"| **train_experiment_name** | \\[Optional] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline. |\n",
|
||||||
"| **process_count_per_node** | \\[Optional\\] The number of processes per node, by default it's 4. |"
|
"| **process_count_per_node** | \\[Optional] The number of processes per node, by default it's 4. |"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
|
After Width: | Height: | Size: 32 KiB |
|
After Width: | Height: | Size: 306 KiB |
|
After Width: | Height: | Size: 2.6 MiB |
|
After Width: | Height: | Size: 106 KiB |
|
After Width: | Height: | Size: 158 KiB |
|
After Width: | Height: | Size: 80 KiB |
|
After Width: | Height: | Size: 68 KiB |
|
After Width: | Height: | Size: 631 KiB |
@@ -0,0 +1,39 @@
|
|||||||
|
from pathlib import Path
|
||||||
|
from azureml.core import Run
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
|
def main(args):
|
||||||
|
output = Path(args.output)
|
||||||
|
output.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
run_context = Run.get_context()
|
||||||
|
input_path = run_context.input_datasets["train_10_models"]
|
||||||
|
|
||||||
|
for file_name in os.listdir(input_path):
|
||||||
|
input_file = os.path.join(input_path, file_name)
|
||||||
|
with open(input_file, "r") as f:
|
||||||
|
content = f.read()
|
||||||
|
|
||||||
|
# Apply any data pre-processing techniques here
|
||||||
|
|
||||||
|
output_file = os.path.join(output, file_name)
|
||||||
|
with open(output_file, "w") as f:
|
||||||
|
f.write(content)
|
||||||
|
|
||||||
|
|
||||||
|
def my_parse_args():
|
||||||
|
parser = argparse.ArgumentParser("Test")
|
||||||
|
|
||||||
|
parser.add_argument("--input", type=str)
|
||||||
|
parser.add_argument("--output", type=str)
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
return args
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
args = my_parse_args()
|
||||||
|
main(args)
|
||||||
@@ -0,0 +1,31 @@
|
|||||||
|
from pathlib import Path
|
||||||
|
from azureml.core import Run
|
||||||
|
import argparse
|
||||||
|
|
||||||
|
|
||||||
|
def main(args):
|
||||||
|
output = Path(args.output)
|
||||||
|
output.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
run_context = Run.get_context()
|
||||||
|
dataset = run_context.input_datasets["train_10_models"]
|
||||||
|
df = dataset.to_pandas_dataframe()
|
||||||
|
|
||||||
|
# Apply any data pre-processing techniques here
|
||||||
|
|
||||||
|
df.to_parquet(output / "data_prepared_result.parquet", compression=None)
|
||||||
|
|
||||||
|
|
||||||
|
def my_parse_args():
|
||||||
|
parser = argparse.ArgumentParser("Test")
|
||||||
|
|
||||||
|
parser.add_argument("--input", type=str)
|
||||||
|
parser.add_argument("--output", type=str)
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
return args
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
args = my_parse_args()
|
||||||
|
main(args)
|
||||||
@@ -0,0 +1,3 @@
|
|||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-contrib-automl-pipeline-steps
|
||||||
@@ -58,21 +58,22 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"import azureml.core\n",
|
"import json\n",
|
||||||
"import pandas as pd\n",
|
|
||||||
"import logging\n",
|
"import logging\n",
|
||||||
"\n",
|
"\n",
|
||||||
"from azureml.core.workspace import Workspace\n",
|
"import azureml.core\n",
|
||||||
|
"import pandas as pd\n",
|
||||||
|
"from azureml.automl.core.featurization import FeaturizationConfig\n",
|
||||||
"from azureml.core.experiment import Experiment\n",
|
"from azureml.core.experiment import Experiment\n",
|
||||||
"from azureml.train.automl import AutoMLConfig\n",
|
"from azureml.core.workspace import Workspace\n",
|
||||||
"from azureml.automl.core.featurization import FeaturizationConfig"
|
"from azureml.train.automl import AutoMLConfig"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
"This notebook is compatible with Azure ML SDK version 1.35.0 or later."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -81,7 +82,6 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
|
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -112,7 +112,8 @@
|
|||||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||||
"output[\"Location\"] = ws.location\n",
|
"output[\"Location\"] = ws.location\n",
|
||||||
"output[\"Run History Name\"] = experiment_name\n",
|
"output[\"Run History Name\"] = experiment_name\n",
|
||||||
"pd.set_option(\"display.max_colwidth\", -1)\n",
|
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||||
|
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||||
"outputDf.T"
|
"outputDf.T"
|
||||||
]
|
]
|
||||||
@@ -472,8 +473,8 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"### Retrieve the Best Model\n",
|
"### Retrieve the Best Run details\n",
|
||||||
"Each run within an Experiment stores serialized (i.e. pickled) pipelines from the AutoML iterations. We can now retrieve the pipeline with the best performance on the validation dataset:"
|
"Below we retrieve the best Run object from among all the runs in the experiment."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -482,9 +483,9 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"best_run, fitted_model = remote_run.get_output()\n",
|
"best_run = remote_run.get_best_child()\n",
|
||||||
"print(fitted_model.steps)\n",
|
"model_name = best_run.properties[\"model_name\"]\n",
|
||||||
"model_name = best_run.properties[\"model_name\"]"
|
"best_run"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -502,16 +503,26 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"custom_featurizer = fitted_model.named_steps[\"timeseriestransformer\"]"
|
"# Download the featurization summary JSON file locally\n",
|
||||||
]
|
"best_run.download_file(\n",
|
||||||
},
|
" \"outputs/featurization_summary.json\", \"featurization_summary.json\"\n",
|
||||||
{
|
")\n",
|
||||||
"cell_type": "code",
|
"\n",
|
||||||
"execution_count": null,
|
"# Render the JSON as a pandas DataFrame\n",
|
||||||
"metadata": {},
|
"with open(\"featurization_summary.json\", \"r\") as f:\n",
|
||||||
"outputs": [],
|
" records = json.load(f)\n",
|
||||||
"source": [
|
"fs = pd.DataFrame.from_records(records)\n",
|
||||||
"custom_featurizer.get_featurization_summary()"
|
"\n",
|
||||||
|
"# View a summary of the featurization\n",
|
||||||
|
"fs[\n",
|
||||||
|
" [\n",
|
||||||
|
" \"RawFeatureName\",\n",
|
||||||
|
" \"TypeDetected\",\n",
|
||||||
|
" \"Dropped\",\n",
|
||||||
|
" \"EngineeredFeatureCount\",\n",
|
||||||
|
" \"Transformations\",\n",
|
||||||
|
" ]\n",
|
||||||
|
"]"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -538,7 +549,7 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"### Retreiving forecasts from the model\n",
|
"### Retrieving forecasts from the model\n",
|
||||||
"We have created a function called `run_forecast` that submits the test data to the best model determined during the training run and retrieves forecasts. This function uses a helper script `forecasting_script` which is uploaded and expecuted on the remote compute."
|
"We have created a function called `run_forecast` that submits the test data to the best model determined during the training run and retrieves forecasts. This function uses a helper script `forecasting_script` which is uploaded and expecuted on the remote compute."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -0,0 +1,823 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Training and Inferencing AutoML Forecasting Model Using Pipelines"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Introduction\n",
|
||||||
|
"\n",
|
||||||
|
"In this notebook, we demonstrate how to use piplines to train and inference on AutoML Forecasting model. Two pipelines will be created: one for training AutoML model, and the other is for inference on AutoML model. We'll also demonstrate how to schedule the inference pipeline so you can get inference results periodically (with refreshed test dataset). Make sure you have executed the configuration notebook before running this notebook. In this notebook you will learn how to:\n",
|
||||||
|
"\n",
|
||||||
|
"- Configure AutoML using AutoMLConfig for forecasting tasks using pipeline AutoMLSteps.\n",
|
||||||
|
"- Create and register an AutoML model using AzureML pipeline.\n",
|
||||||
|
"- Inference and schdelue the pipeline using registered model."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Setup\n",
|
||||||
|
"\n",
|
||||||
|
"As part of the setup you have already created an Azure ML `Workspace` object. For AutoML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import json\n",
|
||||||
|
"import logging\n",
|
||||||
|
"import os\n",
|
||||||
|
"\n",
|
||||||
|
"from matplotlib import pyplot as plt\n",
|
||||||
|
"import pandas as pd\n",
|
||||||
|
"\n",
|
||||||
|
"import azureml.core\n",
|
||||||
|
"from azureml.core.experiment import Experiment\n",
|
||||||
|
"from azureml.core.workspace import Workspace\n",
|
||||||
|
"from azureml.train.automl import AutoMLConfig"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"print(\"This notebook was created using version 1.38.0 of the Azure ML SDK\")\n",
|
||||||
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Accessing the Azure ML workspace requires authentication with Azure.\n",
|
||||||
|
"\n",
|
||||||
|
"The default authentication is interactive authentication using the default tenant. Executing the ws = Workspace.from_config() line in the cell below will prompt for authentication the first time that it is run.\n",
|
||||||
|
"\n",
|
||||||
|
"If you have multiple Azure tenants, you can specify the tenant by replacing the ws = Workspace.from_config() line in the cell below with the following:\n",
|
||||||
|
"```\n",
|
||||||
|
"from azureml.core.authentication import InteractiveLoginAuthentication\n",
|
||||||
|
"auth = InteractiveLoginAuthentication(tenant_id = 'mytenantid')\n",
|
||||||
|
"ws = Workspace.from_config(auth = auth)\n",
|
||||||
|
"```\n",
|
||||||
|
"If you need to run in an environment where interactive login is not possible, you can use Service Principal authentication by replacing the ws = Workspace.from_config() line in the cell below with the following:\n",
|
||||||
|
"```\n",
|
||||||
|
"from azureml.core.authentication import ServicePrincipalAuthentication\n",
|
||||||
|
"auth = ServicePrincipalAuthentication('mytenantid', 'myappid', 'mypassword')\n",
|
||||||
|
"ws = Workspace.from_config(auth = auth)\n",
|
||||||
|
"```\n",
|
||||||
|
"For more details, see aka.ms/aml-notebook-auth"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"ws = Workspace.from_config()\n",
|
||||||
|
"dstor = ws.get_default_datastore()\n",
|
||||||
|
"\n",
|
||||||
|
"# Choose a name for the run history container in the workspace.\n",
|
||||||
|
"experiment_name = \"forecasting-pipeline\"\n",
|
||||||
|
"experiment = Experiment(ws, experiment_name)\n",
|
||||||
|
"\n",
|
||||||
|
"output = {}\n",
|
||||||
|
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||||
|
"output[\"Workspace\"] = ws.name\n",
|
||||||
|
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||||
|
"output[\"Location\"] = ws.location\n",
|
||||||
|
"output[\"Run History Name\"] = experiment_name\n",
|
||||||
|
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||||
|
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||||
|
"outputDf.T"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Compute"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Compute \n",
|
||||||
|
"\n",
|
||||||
|
"#### Create or Attach existing AmlCompute\n",
|
||||||
|
"\n",
|
||||||
|
"You will need to create a compute target for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
||||||
|
"\n",
|
||||||
|
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||||
|
"\n",
|
||||||
|
"#### Creation of AmlCompute takes approximately 5 minutes. \n",
|
||||||
|
"If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
|
||||||
|
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||||
|
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||||
|
"\n",
|
||||||
|
"# Choose a name for your CPU cluster\n",
|
||||||
|
"amlcompute_cluster_name = \"forecast-step-cluster\"\n",
|
||||||
|
"\n",
|
||||||
|
"# Verify that cluster does not exist already\n",
|
||||||
|
"try:\n",
|
||||||
|
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
||||||
|
" print(\"Found existing cluster, use it.\")\n",
|
||||||
|
"except ComputeTargetException:\n",
|
||||||
|
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||||
|
" vm_size=\"STANDARD_DS12_V2\", max_nodes=4\n",
|
||||||
|
" )\n",
|
||||||
|
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
||||||
|
"compute_target.wait_for_completion(show_output=True)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Data\n",
|
||||||
|
"You are now ready to load the historical orange juice sales data. For demonstration purposes, we extract sales time-series for just a few of the stores. We will load the CSV file into a plain pandas DataFrame; the time column in the CSV is called _WeekStarting_, so it will be specially parsed into the datetime type."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"time_column_name = \"WeekStarting\"\n",
|
||||||
|
"train = pd.read_csv(\"oj-train.csv\", parse_dates=[time_column_name])\n",
|
||||||
|
"\n",
|
||||||
|
"train.head()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Each row in the DataFrame holds a quantity of weekly sales for an OJ brand at a single store. The data also includes the sales price, a flag indicating if the OJ brand was advertised in the store that week, and some customer demographic information based on the store location. For historical reasons, the data also include the logarithm of the sales quantity. The Dominick's grocery data is commonly used to illustrate econometric modeling techniques where logarithms of quantities are generally preferred. \n",
|
||||||
|
"\n",
|
||||||
|
"The task is now to build a time-series model for the _Quantity_ column. It is important to note that this dataset is comprised of many individual time-series - one for each unique combination of _Store_ and _Brand_. To distinguish the individual time-series, we define the **time_series_id_column_names** - the columns whose values determine the boundaries between time-series: "
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"time_series_id_column_names = [\"Store\", \"Brand\"]\n",
|
||||||
|
"nseries = train.groupby(time_series_id_column_names).ngroups\n",
|
||||||
|
"print(\"Data contains {0} individual time-series.\".format(nseries))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Test Splitting\n",
|
||||||
|
"We now split the data into a training and a testing set for later forecast prediction. The test set will contain the final 4 weeks of observed sales for each time-series. The splits should be stratified by series, so we use a group-by statement on the time series identifier columns."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"n_test_periods = 4\n",
|
||||||
|
"\n",
|
||||||
|
"test = pd.read_csv(\"oj-test.csv\", parse_dates=[time_column_name])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Upload data to datastore\n",
|
||||||
|
"The [Machine Learning service workspace](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-workspace), is paired with the storage account, which contains the default data store. We will use it to upload the train and test data and create [tabular datasets](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py) for training and testing. A tabular dataset defines a series of lazily-evaluated, immutable operations to load data from the data source into tabular representation."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.data.dataset_factory import TabularDatasetFactory\n",
|
||||||
|
"\n",
|
||||||
|
"datastore = ws.get_default_datastore()\n",
|
||||||
|
"train_dataset = TabularDatasetFactory.register_pandas_dataframe(\n",
|
||||||
|
" train, target=(datastore, \"dataset/\"), name=\"dominicks_OJ_train_pipeline\"\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"test_dataset = TabularDatasetFactory.register_pandas_dataframe(\n",
|
||||||
|
" test, target=(datastore, \"dataset/\"), name=\"dominicks_OJ_test_pipeline\"\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Training"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Modeling\n",
|
||||||
|
"\n",
|
||||||
|
"For forecasting tasks, AutoML uses pre-processing and estimation steps that are specific to time-series. AutoML will undertake the following pre-processing steps:\n",
|
||||||
|
"* Detect time-series sample frequency (e.g. hourly, daily, weekly) and create new records for absent time points to make the series regular. A regular time series has a well-defined frequency and has a value at every sample point in a contiguous time span \n",
|
||||||
|
"* Impute missing values in the target (via forward-fill) and feature columns (using median column values) \n",
|
||||||
|
"* Create features based on time series identifiers to enable fixed effects across different series\n",
|
||||||
|
"* Create time-based features to assist in learning seasonal patterns\n",
|
||||||
|
"* Encode categorical variables to numeric quantities\n",
|
||||||
|
"\n",
|
||||||
|
"In this notebook, AutoML will train a single, regression-type model across **all** time-series in a given training set. This allows the model to generalize across related series. If you're looking for training multiple models for different time-series, please see the many-models notebook.\n",
|
||||||
|
"\n",
|
||||||
|
"You are almost ready to start an AutoML training job. First, we need to define the target column."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"target_column_name = \"Quantity\""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Forecasting Parameters\n",
|
||||||
|
"To define forecasting parameters for your experiment training, you can leverage the ForecastingParameters class. The table below details the forecasting parameter we will be passing into our experiment.\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"|Property|Description|\n",
|
||||||
|
"|-|-|\n",
|
||||||
|
"|**time_column_name**|The name of your time column.|\n",
|
||||||
|
"|**forecast_horizon**|The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly).|\n",
|
||||||
|
"|**time_series_id_column_names**|The column names used to uniquely identify the time series in data that has multiple rows with the same timestamp. If the time series identifiers are not defined, the data set is assumed to be one time series.|\n",
|
||||||
|
"|**freq**|Forecast frequency. This optional parameter represents the period with which the forecast is desired, for example, daily, weekly, yearly, etc. Use this parameter for the correction of time series containing irregular data points or for padding of short time series. The frequency needs to be a pandas offset alias. Please refer to [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects) for more information."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
|
||||||
|
"\n",
|
||||||
|
"forecasting_parameters = ForecastingParameters(\n",
|
||||||
|
" time_column_name=time_column_name,\n",
|
||||||
|
" forecast_horizon=n_test_periods,\n",
|
||||||
|
" time_series_id_column_names=time_series_id_column_names,\n",
|
||||||
|
" freq=\"W-THU\", # Set the forecast frequency to be weekly (start on each Thursday)\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"automl_config = AutoMLConfig(\n",
|
||||||
|
" task=\"forecasting\",\n",
|
||||||
|
" debug_log=\"automl_oj_sales_errors.log\",\n",
|
||||||
|
" primary_metric=\"normalized_mean_absolute_error\",\n",
|
||||||
|
" experiment_timeout_hours=0.25,\n",
|
||||||
|
" training_data=train_dataset,\n",
|
||||||
|
" label_column_name=target_column_name,\n",
|
||||||
|
" compute_target=compute_target,\n",
|
||||||
|
" enable_early_stopping=True,\n",
|
||||||
|
" n_cross_validations=5,\n",
|
||||||
|
" verbosity=logging.INFO,\n",
|
||||||
|
" max_cores_per_iteration=-1,\n",
|
||||||
|
" forecasting_parameters=forecasting_parameters,\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.pipeline.core import PipelineData, TrainingOutput\n",
|
||||||
|
"from azureml.pipeline.steps import AutoMLStep\n",
|
||||||
|
"from azureml.pipeline.core import Pipeline, PipelineParameter\n",
|
||||||
|
"from azureml.pipeline.steps import PythonScriptStep\n",
|
||||||
|
"\n",
|
||||||
|
"metrics_output_name = \"metrics_output\"\n",
|
||||||
|
"best_model_output_name = \"best_model_output\"\n",
|
||||||
|
"model_file_name = \"model_file\"\n",
|
||||||
|
"metrics_data_name = \"metrics_data\"\n",
|
||||||
|
"\n",
|
||||||
|
"metrics_data = PipelineData(\n",
|
||||||
|
" name=metrics_data_name,\n",
|
||||||
|
" datastore=datastore,\n",
|
||||||
|
" pipeline_output_name=metrics_output_name,\n",
|
||||||
|
" training_output=TrainingOutput(type=\"Metrics\"),\n",
|
||||||
|
")\n",
|
||||||
|
"model_data = PipelineData(\n",
|
||||||
|
" name=model_file_name,\n",
|
||||||
|
" datastore=datastore,\n",
|
||||||
|
" pipeline_output_name=best_model_output_name,\n",
|
||||||
|
" training_output=TrainingOutput(type=\"Model\"),\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"automl_step = AutoMLStep(\n",
|
||||||
|
" name=\"automl_module\",\n",
|
||||||
|
" automl_config=automl_config,\n",
|
||||||
|
" outputs=[metrics_data, model_data],\n",
|
||||||
|
" allow_reuse=False,\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Register Model Step"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### Run Configuration and Environment\n",
|
||||||
|
"To have a pipeline step run, we first need an environment to run the jobs. The environment can be build using the following code."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.core.runconfig import CondaDependencies, RunConfiguration\n",
|
||||||
|
"\n",
|
||||||
|
"# create a new RunConfig object\n",
|
||||||
|
"conda_run_config = RunConfiguration(framework=\"python\")\n",
|
||||||
|
"\n",
|
||||||
|
"# Set compute target to AmlCompute\n",
|
||||||
|
"conda_run_config.target = compute_target\n",
|
||||||
|
"\n",
|
||||||
|
"conda_run_config.docker.use_docker = True\n",
|
||||||
|
"\n",
|
||||||
|
"cd = CondaDependencies.create(\n",
|
||||||
|
" pip_packages=[\n",
|
||||||
|
" \"azureml-sdk[automl]\",\n",
|
||||||
|
" \"applicationinsights\",\n",
|
||||||
|
" \"azureml-opendatasets\",\n",
|
||||||
|
" \"azureml-defaults\",\n",
|
||||||
|
" ],\n",
|
||||||
|
" conda_packages=[\"numpy==1.19.5\"],\n",
|
||||||
|
" pin_sdk_version=False,\n",
|
||||||
|
")\n",
|
||||||
|
"conda_run_config.environment.python.conda_dependencies = cd\n",
|
||||||
|
"\n",
|
||||||
|
"print(\"run config is ready\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### Step to register the model.\n",
|
||||||
|
"The following code generates a step to register the model to the workspace from previous step. "
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.pipeline.core import PipelineData\n",
|
||||||
|
"\n",
|
||||||
|
"# The model name with which to register the trained model in the workspace.\n",
|
||||||
|
"model_name_str = \"ojmodel\"\n",
|
||||||
|
"model_name = PipelineParameter(\"model_name\", default_value=model_name_str)\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"register_model_step = PythonScriptStep(\n",
|
||||||
|
" script_name=\"register_model.py\",\n",
|
||||||
|
" name=\"register_model\",\n",
|
||||||
|
" source_directory=\"scripts\",\n",
|
||||||
|
" allow_reuse=False,\n",
|
||||||
|
" arguments=[\n",
|
||||||
|
" \"--model_name\",\n",
|
||||||
|
" model_name,\n",
|
||||||
|
" \"--model_path\",\n",
|
||||||
|
" model_data,\n",
|
||||||
|
" \"--ds_name\",\n",
|
||||||
|
" \"dominicks_OJ_train\",\n",
|
||||||
|
" ],\n",
|
||||||
|
" inputs=[model_data],\n",
|
||||||
|
" compute_target=compute_target,\n",
|
||||||
|
" runconfig=conda_run_config,\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Build the Pipeline"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"training_pipeline = Pipeline(\n",
|
||||||
|
" description=\"training_pipeline\",\n",
|
||||||
|
" workspace=ws,\n",
|
||||||
|
" steps=[automl_step, register_model_step],\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Submit Pipeline Run"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"training_pipeline_run = experiment.submit(training_pipeline)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"training_pipeline_run.wait_for_completion(show_output=True)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Get metrics for each runs"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"output_dir = \"train_output\"\n",
|
||||||
|
"pipeline_output = training_pipeline_run.get_pipeline_output(\"metrics_output\")\n",
|
||||||
|
"pipeline_output.download(output_dir)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"file_path = os.path.join(output_dir, pipeline_output.path_on_datastore)\n",
|
||||||
|
"with open(file_path) as f:\n",
|
||||||
|
" metrics = json.load(f)\n",
|
||||||
|
"for run_id, metrics in metrics.items():\n",
|
||||||
|
" print(\"{}: {}\".format(run_id, metrics[\"normalized_root_mean_squared_error\"][0]))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Inference"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"There are several ways to do the inference, for here we will demonstrate how to use the registered model and pipeline to do the inference. (how to register a model https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.model.model?view=azure-ml-py)."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Get Inference Pipeline Environment\n",
|
||||||
|
"To trigger an inference pipeline run, we first need a running environment for run that contains all the appropriate packages for the model unpickling. This environment can be either assess from the training run or using the `yml` file that comes with the model."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.core import Model\n",
|
||||||
|
"\n",
|
||||||
|
"model = Model(ws, model_name_str)\n",
|
||||||
|
"download_path = model.download(model_name_str, exist_ok=True)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"After all the files are downloaded, we can generate the run config for inference runs."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.core import Environment, RunConfiguration\n",
|
||||||
|
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||||
|
"\n",
|
||||||
|
"env_file = os.path.join(download_path, \"conda_env_v_1_0_0.yml\")\n",
|
||||||
|
"inference_env = Environment(\"oj-inference-env\")\n",
|
||||||
|
"inference_env.python.conda_dependencies = CondaDependencies(\n",
|
||||||
|
" conda_dependencies_file_path=env_file\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"[Optional] The enviroment can also be assessed from the training run using `get_environment()` API."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"After we have the environment for the inference, we could build run config based on this environment."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"run_config = RunConfiguration()\n",
|
||||||
|
"run_config.environment = inference_env"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Build and submit the inference pipeline"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"The inference pipeline will create two different format of outputs, 1) a tabular dataset that contains the prediction and 2) an `OutputFileDatasetConfig` that can be used for the sequential pipeline steps."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.data import OutputFileDatasetConfig\n",
|
||||||
|
"\n",
|
||||||
|
"output_data = OutputFileDatasetConfig(name=\"prediction_result\")\n",
|
||||||
|
"\n",
|
||||||
|
"output_ds_name = \"oj-output\"\n",
|
||||||
|
"\n",
|
||||||
|
"inference_step = PythonScriptStep(\n",
|
||||||
|
" name=\"infer-results\",\n",
|
||||||
|
" source_directory=\"scripts\",\n",
|
||||||
|
" script_name=\"infer.py\",\n",
|
||||||
|
" arguments=[\n",
|
||||||
|
" \"--model_name\",\n",
|
||||||
|
" model_name_str,\n",
|
||||||
|
" \"--ouput_dataset_name\",\n",
|
||||||
|
" output_ds_name,\n",
|
||||||
|
" \"--test_dataset_name\",\n",
|
||||||
|
" test_dataset.name,\n",
|
||||||
|
" \"--target_column_name\",\n",
|
||||||
|
" target_column_name,\n",
|
||||||
|
" \"--output_path\",\n",
|
||||||
|
" output_data,\n",
|
||||||
|
" ],\n",
|
||||||
|
" compute_target=compute_target,\n",
|
||||||
|
" allow_reuse=False,\n",
|
||||||
|
" runconfig=run_config,\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"inference_pipeline = Pipeline(ws, [inference_step])\n",
|
||||||
|
"inference_run = experiment.submit(inference_pipeline)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"inference_run.wait_for_completion(show_output=True)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Get the predicted data"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.core import Dataset\n",
|
||||||
|
"\n",
|
||||||
|
"inference_ds = Dataset.get_by_name(ws, output_ds_name)\n",
|
||||||
|
"inference_df = inference_ds.to_pandas_dataframe()\n",
|
||||||
|
"inference_df.tail(5)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Schedule Pipeline"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"This section is about how to schedule a pipeline for periodically predictions. For more info about pipeline schedule and pipeline endpoint, please follow this [notebook](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-setup-schedule-for-a-published-pipeline.ipynb)."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"inference_published_pipeline = inference_pipeline.publish(\n",
|
||||||
|
" name=\"OJ Inference Test\", description=\"OJ Inference Test\"\n",
|
||||||
|
")\n",
|
||||||
|
"print(\"Newly published pipeline id: {}\".format(inference_published_pipeline.id))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"If `test_dataset` is going to refresh every 4 weeks before Friday 16:00 and we want to predict every 4 weeks (forecast_horizon), we can schedule our pipeline to run every 4 weeks at 16:00 to get daily inference results. You can refresh your test dataset (a newer version will be created) periodically when new data is available (i.e. target column in test dataset would have values in the beginning as context data, and followed by NaNs to be predicted). The inference pipeline will pick up context to further improve the forecast accuracy."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# schedule\n",
|
||||||
|
"\n",
|
||||||
|
"from azureml.pipeline.core.schedule import ScheduleRecurrence, Schedule\n",
|
||||||
|
"\n",
|
||||||
|
"recurrence = ScheduleRecurrence(\n",
|
||||||
|
" frequency=\"Week\", interval=4, week_days=[\"Friday\"], hours=[16], minutes=[0]\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"schedule = Schedule.create(\n",
|
||||||
|
" workspace=ws,\n",
|
||||||
|
" name=\"OJ_Inference_schedule\",\n",
|
||||||
|
" pipeline_id=inference_published_pipeline.id,\n",
|
||||||
|
" experiment_name=\"Schedule-run-OJ\",\n",
|
||||||
|
" recurrence=recurrence,\n",
|
||||||
|
" wait_for_provisioning=True,\n",
|
||||||
|
" description=\"Schedule Run\",\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"# You may want to make sure that the schedule is provisioned properly\n",
|
||||||
|
"# before making any further changes to the schedule\n",
|
||||||
|
"\n",
|
||||||
|
"print(\"Created schedule with id: {}\".format(schedule.id))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### [Optional] Disable schedule"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"schedule.disable()"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"authors": [
|
||||||
|
{
|
||||||
|
"name": "jialiu"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"category": "tutorial",
|
||||||
|
"celltoolbar": "Raw Cell Format",
|
||||||
|
"compute": [
|
||||||
|
"Remote"
|
||||||
|
],
|
||||||
|
"datasets": [
|
||||||
|
"Orange Juice Sales"
|
||||||
|
],
|
||||||
|
"deployment": [
|
||||||
|
"Azure Container Instance"
|
||||||
|
],
|
||||||
|
"exclude_from_index": false,
|
||||||
|
"framework": [
|
||||||
|
"Azure ML AutoML"
|
||||||
|
],
|
||||||
|
"friendly_name": "Forecasting orange juice sales with deployment",
|
||||||
|
"index_order": 1,
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3.6",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python36"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.6.9"
|
||||||
|
},
|
||||||
|
"tags": [
|
||||||
|
"None"
|
||||||
|
],
|
||||||
|
"task": "Forecasting"
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 4
|
||||||
|
}
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: auto-ml-forecasting-pipelines
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
@@ -0,0 +1,37 @@
|
|||||||
|
WeekStarting,Store,Brand,Advert,Price,Age60,COLLEGE,INCOME,Hincome150,Large HH,Minorities,WorkingWoman,SSTRDIST,SSTRVOL,CPDIST5,CPWVOL5
|
||||||
|
1992-09-10,2,dominicks,0,2.09,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-09-10,2,minute.maid,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-09-10,2,tropicana,0,2.64,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-09-10,5,dominicks,0,1.85,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-09-10,5,minute.maid,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-09-10,5,tropicana,0,2.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-09-10,8,dominicks,0,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-09-10,8,minute.maid,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-09-10,8,tropicana,0,2.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-09-17,2,dominicks,0,1.77,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-09-17,2,minute.maid,0,2.83,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-09-17,2,tropicana,0,3.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-09-17,5,dominicks,0,1.85,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-09-17,5,minute.maid,0,2.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-09-17,5,tropicana,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-09-17,8,dominicks,0,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-09-17,8,minute.maid,0,2.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-09-17,8,tropicana,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-09-24,2,dominicks,0,1.49,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-09-24,2,minute.maid,0,2.67,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-09-24,2,tropicana,1,2.79,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-09-24,5,dominicks,0,1.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-09-24,5,minute.maid,0,2.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-09-24,5,tropicana,1,2.78,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-09-24,8,dominicks,0,1.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-09-24,8,minute.maid,0,2.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-09-24,8,tropicana,1,2.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-10-01,2,dominicks,0,1.82,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-10-01,2,minute.maid,1,2.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-10-01,2,tropicana,0,2.97,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-10-01,5,dominicks,0,1.85,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-10-01,5,minute.maid,1,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-10-01,5,tropicana,0,2.78,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-10-01,8,dominicks,0,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-10-01,8,minute.maid,1,2.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-10-01,8,tropicana,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
@@ -0,0 +1,997 @@
|
|||||||
|
WeekStarting,Store,Brand,Quantity,Advert,Price,Age60,COLLEGE,INCOME,Hincome150,Large HH,Minorities,WorkingWoman,SSTRDIST,SSTRVOL,CPDIST5,CPWVOL5
|
||||||
|
1990-06-14,2,dominicks,10560,1,1.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-06-14,2,minute.maid,4480,0,3.17,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-06-14,2,tropicana,8256,0,3.87,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-06-14,5,dominicks,1792,1,1.59,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-06-14,5,minute.maid,4224,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-06-14,5,tropicana,5888,0,3.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-06-14,8,dominicks,14336,1,1.59,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-06-14,8,minute.maid,6080,0,2.62,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-06-14,8,tropicana,8896,0,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-06-21,8,dominicks,6400,0,2.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-06-21,8,minute.maid,51968,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-06-21,8,tropicana,7296,0,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-06-28,5,dominicks,2496,0,2.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-06-28,5,minute.maid,4352,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-06-28,5,tropicana,6976,0,3.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-06-28,8,dominicks,3968,0,2.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-06-28,8,minute.maid,4928,0,2.62,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-06-28,8,tropicana,10368,0,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-07-05,5,dominicks,2944,0,2.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-07-05,5,minute.maid,4928,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-07-05,5,tropicana,6528,0,3.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-07-05,8,dominicks,4352,0,2.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-07-05,8,minute.maid,5312,0,2.62,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-07-05,8,tropicana,6976,0,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-07-12,5,dominicks,1024,0,2.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-07-12,5,minute.maid,31168,1,2.59,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-07-12,5,tropicana,4928,0,3.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-07-12,8,dominicks,3520,0,2.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-07-12,8,minute.maid,39424,1,2.59,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-07-12,8,tropicana,6464,0,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-07-19,8,dominicks,6464,0,2.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-07-19,8,minute.maid,5568,0,2.62,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-07-19,8,tropicana,8192,0,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-07-26,2,dominicks,8000,0,2.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-07-26,2,minute.maid,4672,0,3.17,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-07-26,2,tropicana,6144,0,3.87,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-07-26,5,dominicks,4224,0,2.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-07-26,5,minute.maid,10048,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-07-26,5,tropicana,5312,0,3.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-07-26,8,dominicks,5952,0,2.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-07-26,8,minute.maid,14592,0,2.62,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-07-26,8,tropicana,7936,0,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-08-02,2,dominicks,6848,1,2.09,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-08-02,2,minute.maid,20160,1,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-08-02,2,tropicana,3840,0,3.87,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-08-02,5,dominicks,4544,1,2.09,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-08-02,5,minute.maid,21760,1,2.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-08-02,5,tropicana,5120,0,3.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-08-02,8,dominicks,8832,1,2.09,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-08-02,8,minute.maid,22208,1,2.39,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-08-02,8,tropicana,6656,0,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-08-09,2,dominicks,2880,0,2.09,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-08-09,2,minute.maid,2688,0,3.17,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-08-09,2,tropicana,8000,0,3.87,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-08-09,5,dominicks,1728,0,2.09,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-08-09,5,minute.maid,4544,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-08-09,5,tropicana,7936,0,3.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-08-09,8,dominicks,7232,0,2.09,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-08-09,8,minute.maid,5760,0,2.62,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-08-09,8,tropicana,8256,0,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-08-16,5,dominicks,1216,0,2.09,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-08-16,5,minute.maid,52224,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-08-16,5,tropicana,6080,0,3.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-08-16,8,dominicks,5504,0,2.09,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-08-16,8,minute.maid,54016,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-08-16,8,tropicana,5568,0,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-08-23,2,dominicks,1600,0,2.09,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-08-23,2,minute.maid,3008,0,3.17,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-08-23,2,tropicana,8896,0,3.87,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-08-23,5,dominicks,1152,0,2.09,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-08-23,5,minute.maid,3584,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-08-23,5,tropicana,4160,0,3.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-08-23,8,dominicks,4800,0,2.09,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-08-23,8,minute.maid,5824,0,2.62,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-08-23,8,tropicana,7488,0,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-08-30,2,dominicks,25344,1,1.89,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-08-30,2,minute.maid,4672,0,3.17,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-08-30,2,tropicana,7168,0,3.87,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-08-30,5,dominicks,30144,1,1.89,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-08-30,5,minute.maid,5120,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-08-30,5,tropicana,5888,0,3.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-08-30,8,dominicks,52672,1,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-08-30,8,minute.maid,6528,0,2.62,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-08-30,8,tropicana,6144,0,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-09-06,2,dominicks,10752,0,1.89,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-09-06,2,minute.maid,2752,0,3.17,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-09-06,2,tropicana,10880,0,3.29,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-09-06,5,dominicks,8960,0,1.89,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-09-06,5,minute.maid,4416,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-09-06,5,tropicana,9536,0,3.29,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-09-06,8,dominicks,16448,0,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-09-06,8,minute.maid,5440,0,2.62,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-09-06,8,tropicana,11008,0,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-09-13,2,dominicks,6656,0,1.89,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-09-13,2,minute.maid,26176,1,2.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-09-13,2,tropicana,7744,0,3.29,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-09-13,5,dominicks,8192,0,1.89,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-09-13,5,minute.maid,30208,1,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-09-13,5,tropicana,8320,0,3.29,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-09-13,8,dominicks,19072,0,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-09-13,8,minute.maid,36544,1,2.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-09-13,8,tropicana,5760,0,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-09-20,2,dominicks,6592,0,1.79,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-09-20,2,minute.maid,3712,0,3.17,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-09-20,2,tropicana,8512,0,3.29,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-09-20,5,dominicks,6528,0,1.79,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-09-20,5,minute.maid,4160,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-09-20,5,tropicana,8000,0,3.29,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-09-20,8,dominicks,13376,0,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-09-20,8,minute.maid,3776,0,2.62,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-09-20,8,tropicana,10112,0,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-09-27,5,dominicks,34688,1,1.79,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-09-27,5,minute.maid,4992,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-09-27,5,tropicana,5824,0,3.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-09-27,8,dominicks,61440,1,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-09-27,8,minute.maid,5504,0,2.62,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-09-27,8,tropicana,8448,0,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-10-04,5,dominicks,4672,0,1.79,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-10-04,5,minute.maid,13952,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-10-04,5,tropicana,10624,1,3.29,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-10-04,8,dominicks,13760,0,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-10-04,8,minute.maid,12416,0,2.62,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-10-04,8,tropicana,8448,1,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-10-11,2,dominicks,1728,0,2.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-10-11,2,minute.maid,30656,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-10-11,2,tropicana,5504,0,3.29,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-10-11,5,dominicks,1088,0,2.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-10-11,5,minute.maid,47680,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-10-11,5,tropicana,6656,0,3.29,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-10-11,8,dominicks,3136,0,2.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-10-11,8,minute.maid,53696,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-10-11,8,tropicana,7424,0,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-10-18,2,dominicks,33792,1,1.24,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-10-18,2,minute.maid,3840,0,2.98,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-10-18,2,tropicana,5888,0,3.56,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-10-18,5,dominicks,69440,1,1.24,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-10-18,5,minute.maid,7616,0,2.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-10-18,5,tropicana,5184,0,3.51,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-10-18,8,dominicks,186176,1,1.14,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-10-18,8,minute.maid,5696,0,2.51,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-10-18,8,tropicana,5824,0,3.04,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-10-25,2,dominicks,1920,0,1.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-10-25,2,minute.maid,2816,0,3.17,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-10-25,2,tropicana,8384,0,3.56,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-10-25,5,dominicks,1280,0,1.59,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-10-25,5,minute.maid,8896,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-10-25,5,tropicana,4928,0,3.51,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-10-25,8,dominicks,3712,0,1.59,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-10-25,8,minute.maid,4864,0,2.62,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-10-25,8,tropicana,6656,0,3.04,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-11-01,2,dominicks,8960,1,1.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-11-01,2,minute.maid,23104,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-11-01,2,tropicana,5952,0,3.56,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-11-01,5,dominicks,35456,1,1.59,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-11-01,5,minute.maid,28544,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-11-01,5,tropicana,5888,0,3.51,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-11-01,8,dominicks,35776,1,1.59,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-11-01,8,minute.maid,37184,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-11-01,8,tropicana,6272,0,3.04,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-11-08,2,dominicks,11392,0,1.29,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-11-08,2,minute.maid,3392,0,3.17,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-11-08,2,tropicana,6848,0,3.56,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-11-08,5,dominicks,13824,0,1.29,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-11-08,5,minute.maid,5440,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-11-08,5,tropicana,5312,0,3.51,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-11-08,8,dominicks,26880,0,1.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-11-08,8,minute.maid,5504,0,2.62,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-11-08,8,tropicana,6912,0,3.04,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-11-15,2,dominicks,28416,0,0.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-11-15,2,minute.maid,26304,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-11-15,2,tropicana,9216,0,3.87,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-11-15,5,dominicks,14208,0,0.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-11-15,5,minute.maid,52416,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-11-15,5,tropicana,9984,0,3.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-11-15,8,dominicks,71680,0,0.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-11-15,8,minute.maid,51008,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-11-15,8,tropicana,10496,0,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-11-22,2,dominicks,17152,1,1.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-11-22,2,minute.maid,6336,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-11-22,2,tropicana,12160,0,2.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-11-22,5,dominicks,29312,1,1.59,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-11-22,5,minute.maid,11712,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-11-22,5,tropicana,8448,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-11-22,8,dominicks,25088,1,1.59,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-11-22,8,minute.maid,11072,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-11-22,8,tropicana,11840,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-11-29,2,dominicks,26560,1,2.49,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-11-29,2,minute.maid,9920,0,3.17,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-11-29,2,tropicana,12672,0,2.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-11-29,5,dominicks,52992,1,2.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-11-29,5,minute.maid,13952,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-11-29,5,tropicana,10880,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-11-29,8,dominicks,91456,1,2.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-11-29,8,minute.maid,12160,0,2.62,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-11-29,8,tropicana,9664,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-12-06,2,dominicks,6336,0,2.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-12-06,2,minute.maid,25280,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-12-06,2,tropicana,6528,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-12-06,5,dominicks,15680,0,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-12-06,5,minute.maid,36160,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-12-06,5,tropicana,5696,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-12-06,8,dominicks,23808,0,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-12-06,8,minute.maid,30528,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-12-06,8,tropicana,6272,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-12-13,2,dominicks,26368,1,1.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-12-13,2,minute.maid,14848,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-12-13,2,tropicana,6144,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-12-13,5,dominicks,43520,1,1.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-12-13,5,minute.maid,12864,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-12-13,5,tropicana,5696,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-12-13,8,dominicks,89856,1,1.39,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-12-13,8,minute.maid,12096,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-12-13,8,tropicana,7168,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-12-20,2,dominicks,896,0,2.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-12-20,2,minute.maid,12288,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-12-20,2,tropicana,21120,0,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-12-20,5,dominicks,3904,0,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-12-20,5,minute.maid,22208,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-12-20,5,tropicana,32384,0,2.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-12-20,8,dominicks,12224,0,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-12-20,8,minute.maid,16448,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-12-20,8,tropicana,29504,0,2.39,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-12-27,2,dominicks,1472,0,2.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-12-27,2,minute.maid,6272,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-12-27,2,tropicana,12416,0,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1990-12-27,5,dominicks,896,0,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-12-27,5,minute.maid,9984,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-12-27,5,tropicana,10752,0,2.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1990-12-27,8,dominicks,3776,0,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-12-27,8,minute.maid,9344,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1990-12-27,8,tropicana,8704,0,2.39,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-01-03,2,dominicks,1344,0,2.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-01-03,2,minute.maid,9152,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-01-03,2,tropicana,9472,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-01-03,5,dominicks,2240,0,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-01-03,5,minute.maid,14016,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-01-03,5,tropicana,6912,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-01-03,8,dominicks,13824,0,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-01-03,8,minute.maid,16128,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-01-03,8,tropicana,9280,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-01-10,2,dominicks,111680,1,0.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-01-10,2,minute.maid,4160,0,2.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-01-10,2,tropicana,17920,0,2.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-01-10,5,dominicks,125760,1,0.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-01-10,5,minute.maid,6080,0,2.46,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-01-10,5,tropicana,13440,0,2.59,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-01-10,8,dominicks,251072,1,0.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-01-10,8,minute.maid,5376,0,2.17,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-01-10,8,tropicana,12224,0,2.59,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-01-17,2,dominicks,1856,0,2.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-01-17,2,minute.maid,10176,0,2.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-01-17,2,tropicana,9408,0,2.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-01-17,5,dominicks,1408,0,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-01-17,5,minute.maid,7808,0,2.46,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-01-17,5,tropicana,7808,0,2.59,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-01-17,8,dominicks,4864,0,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-01-17,8,minute.maid,6656,0,2.17,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-01-17,8,tropicana,10368,0,2.59,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-01-24,2,dominicks,5568,0,2.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-01-24,2,minute.maid,29056,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-01-24,2,tropicana,6272,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-01-24,5,dominicks,7232,0,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-01-24,5,minute.maid,40896,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-01-24,5,tropicana,5248,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-01-24,8,dominicks,10176,0,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-01-24,8,minute.maid,59712,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-01-24,8,tropicana,8128,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-01-31,2,dominicks,32064,1,1.49,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-01-31,2,minute.maid,7104,0,2.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-01-31,2,tropicana,6912,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-01-31,5,dominicks,41216,1,1.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-01-31,5,minute.maid,6272,0,2.46,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-01-31,5,tropicana,6208,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-01-31,8,dominicks,105344,1,1.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-01-31,8,minute.maid,9856,0,2.17,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-01-31,8,tropicana,5952,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-02-07,2,dominicks,4352,0,1.49,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-02-07,2,minute.maid,7488,0,2.49,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-02-07,2,tropicana,16768,0,2.49,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-02-07,5,dominicks,9024,0,1.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-02-07,5,minute.maid,7872,0,2.41,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-02-07,5,tropicana,21440,0,2.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-02-07,8,dominicks,33600,0,1.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-02-07,8,minute.maid,6720,0,2.12,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-02-07,8,tropicana,21696,0,2.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-02-14,2,dominicks,704,0,2.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-02-14,2,minute.maid,4224,0,2.49,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-02-14,2,tropicana,6272,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-02-14,5,dominicks,1600,0,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-02-14,5,minute.maid,6144,0,2.41,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-02-14,5,tropicana,7360,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-02-14,8,dominicks,4736,0,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-02-14,8,minute.maid,4224,0,2.12,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-02-14,8,tropicana,7808,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-02-21,2,dominicks,13760,0,2.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-02-21,2,minute.maid,8960,0,2.49,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-02-21,2,tropicana,7936,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-02-21,5,dominicks,2496,0,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-02-21,5,minute.maid,8448,0,2.41,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-02-21,5,tropicana,6720,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-02-21,8,dominicks,10304,0,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-02-21,8,minute.maid,9728,0,2.12,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-02-21,8,tropicana,8128,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-02-28,2,dominicks,43328,1,1.09,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-02-28,2,minute.maid,22464,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-02-28,2,tropicana,6144,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-02-28,5,dominicks,6336,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-02-28,5,minute.maid,18688,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-02-28,5,tropicana,6656,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-02-28,8,dominicks,5056,1,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-02-28,8,minute.maid,40320,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-02-28,8,tropicana,7424,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-03-07,2,dominicks,57600,1,1.09,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-03-07,2,minute.maid,3840,0,2.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-03-07,2,tropicana,7936,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-03-07,5,dominicks,56384,1,1.09,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-03-07,5,minute.maid,6272,0,2.46,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-03-07,5,tropicana,6016,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-03-07,8,dominicks,179968,1,0.94,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-03-07,8,minute.maid,5120,0,2.17,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-03-07,8,tropicana,5952,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-03-14,2,dominicks,704,0,2.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-03-14,2,minute.maid,12992,0,2.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-03-14,2,tropicana,7808,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-03-14,5,dominicks,1600,0,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-03-14,5,minute.maid,12096,0,2.46,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-03-14,5,tropicana,6144,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-03-14,8,dominicks,4992,0,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-03-14,8,minute.maid,19264,0,2.17,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-03-14,8,tropicana,7616,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-03-21,2,dominicks,6016,0,2.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-03-21,2,minute.maid,70144,1,1.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-03-21,2,tropicana,6080,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-03-21,5,dominicks,2944,0,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-03-21,5,minute.maid,73216,1,1.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-03-21,5,tropicana,4928,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-03-21,8,dominicks,6400,0,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-03-21,8,minute.maid,170432,1,1.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-03-21,8,tropicana,5312,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-03-28,2,dominicks,10368,1,1.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-03-28,2,minute.maid,21248,0,1.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-03-28,2,tropicana,42176,1,1.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-03-28,5,dominicks,13504,1,1.59,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-03-28,5,minute.maid,18944,0,1.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-03-28,5,tropicana,67712,1,1.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-03-28,8,dominicks,14912,1,1.59,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-03-28,8,minute.maid,39680,0,1.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-03-28,8,tropicana,161792,1,1.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-04-04,2,dominicks,12608,0,1.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-04-04,2,minute.maid,5696,1,2.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-04-04,2,tropicana,4928,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-04-04,5,dominicks,5376,0,1.59,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-04-04,5,minute.maid,6400,1,2.46,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-04-04,5,tropicana,8640,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-04-04,8,dominicks,34624,0,1.59,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-04-04,8,minute.maid,8128,1,2.17,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-04-04,8,tropicana,17280,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-04-11,2,dominicks,6336,0,1.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-04-11,2,minute.maid,7680,0,2.09,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-04-11,2,tropicana,29504,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-04-11,5,dominicks,6656,0,1.59,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-04-11,5,minute.maid,8640,0,2.09,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-04-11,5,tropicana,35520,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-04-11,8,dominicks,10368,0,1.59,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-04-11,8,minute.maid,9088,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-04-11,8,tropicana,47040,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-04-18,2,dominicks,140736,1,0.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-04-18,2,minute.maid,6336,0,2.09,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-04-18,2,tropicana,9984,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-04-18,5,dominicks,95680,1,0.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-04-18,5,minute.maid,7296,0,2.09,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-04-18,5,tropicana,9664,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-04-18,8,dominicks,194880,1,0.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-04-18,8,minute.maid,6720,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-04-18,8,tropicana,14464,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-04-25,2,dominicks,960,1,2.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-04-25,2,minute.maid,8576,0,2.09,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-04-25,2,tropicana,35200,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-04-25,5,dominicks,896,1,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-04-25,5,minute.maid,12480,0,2.09,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-04-25,5,tropicana,49088,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-04-25,8,dominicks,5696,1,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-04-25,8,minute.maid,7552,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-04-25,8,tropicana,52928,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-05-02,2,dominicks,1216,0,2.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-05-02,2,minute.maid,15104,0,2.09,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-05-02,2,tropicana,23936,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-05-02,5,dominicks,1728,0,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-05-02,5,minute.maid,14144,0,2.09,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-05-02,5,tropicana,14912,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-05-02,8,dominicks,7168,0,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-05-02,8,minute.maid,24768,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-05-02,8,tropicana,21184,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-05-09,2,dominicks,1664,0,2.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-05-09,2,minute.maid,76480,1,1.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-05-09,2,tropicana,7104,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-05-09,5,dominicks,1280,0,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-05-09,5,minute.maid,88256,1,1.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-05-09,5,tropicana,6464,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-05-09,8,dominicks,2880,0,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-05-09,8,minute.maid,183296,1,1.39,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-05-09,8,tropicana,7360,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-05-16,2,dominicks,4992,0,2.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-05-16,2,minute.maid,5056,0,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-05-16,2,tropicana,24512,1,2.29,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-05-16,5,dominicks,5696,0,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-05-16,5,minute.maid,6848,0,2.26,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-05-16,5,tropicana,25024,1,2.29,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-05-16,8,dominicks,12288,0,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-05-16,8,minute.maid,8896,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-05-16,8,tropicana,15744,1,2.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-05-23,2,dominicks,27968,1,1.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-05-23,2,minute.maid,4736,0,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-05-23,2,tropicana,6336,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-05-23,5,dominicks,28288,1,1.59,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-05-23,5,minute.maid,7808,0,2.26,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-05-23,5,tropicana,6272,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-05-30,2,dominicks,12160,0,1.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-05-30,2,minute.maid,4480,0,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-05-30,2,tropicana,6080,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-05-30,5,dominicks,4864,0,1.59,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-05-30,5,minute.maid,6272,0,2.26,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-05-30,5,tropicana,5056,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-06-06,2,dominicks,2240,0,2.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-06-06,2,minute.maid,4032,0,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-06-06,2,tropicana,33536,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-06-06,5,dominicks,2880,0,2.09,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-06-06,5,minute.maid,6144,0,2.26,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-06-06,5,tropicana,47616,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-06-06,8,dominicks,9280,0,1.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-06-06,8,minute.maid,6656,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-06-06,8,tropicana,46912,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-06-13,2,dominicks,5504,1,1.49,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-06-13,2,minute.maid,14784,1,1.79,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-06-13,2,tropicana,13248,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-06-13,5,dominicks,5760,1,1.41,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-06-13,5,minute.maid,27776,1,1.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-06-13,5,tropicana,13888,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-06-13,8,dominicks,25856,1,1.26,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-06-13,8,minute.maid,35456,1,1.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-06-13,8,tropicana,18240,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-06-20,2,dominicks,8832,0,1.29,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-06-20,2,minute.maid,12096,0,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-06-20,2,tropicana,6208,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-06-20,5,dominicks,15040,0,1.29,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-06-20,5,minute.maid,20800,0,2.26,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-06-20,5,tropicana,6144,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-06-20,8,dominicks,19264,0,1.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-06-20,8,minute.maid,17408,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-06-20,8,tropicana,6464,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-06-27,2,dominicks,2624,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-06-27,2,minute.maid,41792,1,1.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-06-27,2,tropicana,10624,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-06-27,5,dominicks,5120,0,1.89,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-06-27,5,minute.maid,45696,1,1.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-06-27,5,tropicana,9344,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-06-27,8,dominicks,6848,0,1.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-06-27,8,minute.maid,75520,1,1.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-06-27,8,tropicana,8512,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-07-04,2,dominicks,10432,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-07-04,2,minute.maid,10560,0,1.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-07-04,2,tropicana,44672,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-07-04,5,dominicks,3264,0,1.89,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-07-04,5,minute.maid,14336,0,1.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-07-04,5,tropicana,32896,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-07-04,8,dominicks,12928,0,1.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-07-04,8,minute.maid,21632,0,1.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-07-04,8,tropicana,28416,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-07-11,5,dominicks,9536,1,1.59,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-07-11,5,minute.maid,4928,0,2.26,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-07-11,5,tropicana,21056,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-07-11,8,dominicks,44032,1,1.59,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-07-11,8,minute.maid,8384,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-07-11,8,tropicana,16960,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-07-18,2,dominicks,8320,0,1.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-07-18,2,minute.maid,4224,0,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-07-18,2,tropicana,20096,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-07-18,5,dominicks,6208,0,1.59,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-07-18,5,minute.maid,4608,0,2.26,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-07-18,5,tropicana,15360,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-07-18,8,dominicks,25408,0,1.59,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-07-18,8,minute.maid,9920,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-07-18,8,tropicana,8320,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-07-25,2,dominicks,6784,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-07-25,2,minute.maid,2880,0,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-07-25,2,tropicana,9152,1,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-07-25,5,dominicks,6592,0,1.89,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-07-25,5,minute.maid,5248,0,2.26,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-07-25,5,tropicana,8000,1,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-07-25,8,dominicks,38336,0,1.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-07-25,8,minute.maid,6592,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-07-25,8,tropicana,11136,1,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-08-01,2,dominicks,60544,1,0.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-08-01,2,minute.maid,3968,0,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-08-01,2,tropicana,21952,0,2.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-08-01,5,dominicks,63552,1,0.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-08-01,5,minute.maid,4224,0,2.26,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-08-01,5,tropicana,21120,0,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-08-01,8,dominicks,152384,1,0.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-08-01,8,minute.maid,7168,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-08-01,8,tropicana,27712,0,2.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-08-08,2,dominicks,20608,0,0.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-08-08,2,minute.maid,3712,0,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-08-08,2,tropicana,13568,0,2.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-08-08,5,dominicks,27968,0,0.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-08-08,5,minute.maid,4288,0,2.26,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-08-08,5,tropicana,11904,0,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-08-08,8,dominicks,54464,0,0.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-08-08,8,minute.maid,6208,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-08-08,8,tropicana,7744,0,2.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-08-15,5,dominicks,21760,1,1.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-08-15,5,minute.maid,16896,0,2.26,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-08-15,5,tropicana,5056,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-08-15,8,dominicks,47680,1,1.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-08-15,8,minute.maid,30528,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-08-15,8,tropicana,5184,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-08-22,5,dominicks,2688,0,1.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-08-22,5,minute.maid,77184,1,1.29,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-08-22,5,tropicana,4608,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-08-22,8,dominicks,14720,0,1.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-08-22,8,minute.maid,155840,1,1.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-08-22,8,tropicana,6272,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-08-29,2,dominicks,16064,0,1.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-08-29,2,minute.maid,2816,0,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-08-29,2,tropicana,4160,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-08-29,5,dominicks,10432,0,1.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-08-29,5,minute.maid,5184,0,2.26,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-08-29,5,tropicana,6016,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-08-29,8,dominicks,53248,0,1.39,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-08-29,8,minute.maid,10752,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-08-29,8,tropicana,7744,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-09-05,2,dominicks,12480,0,1.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-09-05,2,minute.maid,4288,0,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-09-05,2,tropicana,39424,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-09-05,5,dominicks,9792,0,1.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-09-05,5,minute.maid,5248,0,2.26,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-09-05,5,tropicana,50752,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-09-05,8,dominicks,40576,0,1.39,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-09-05,8,minute.maid,6976,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-09-05,8,tropicana,53184,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-09-12,2,dominicks,17024,0,1.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-09-12,2,minute.maid,18240,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-09-12,2,tropicana,5632,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-09-12,5,dominicks,8448,0,1.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-09-12,5,minute.maid,20672,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-09-12,5,tropicana,5632,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-09-12,8,dominicks,25856,0,1.39,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-09-12,8,minute.maid,31872,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-09-12,8,tropicana,6784,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-09-19,2,dominicks,13440,1,1.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-09-19,2,minute.maid,7360,0,1.95,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-09-19,2,tropicana,9024,1,2.68,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-09-19,8,dominicks,24064,1,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-09-19,8,minute.maid,5312,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-09-19,8,tropicana,8000,1,2.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-09-26,2,dominicks,10112,0,1.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-09-26,2,minute.maid,7808,0,1.83,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-09-26,2,tropicana,6016,0,3.44,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-09-26,5,dominicks,6912,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-09-26,5,minute.maid,12352,0,1.89,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-09-26,5,tropicana,6400,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-09-26,8,dominicks,15680,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-09-26,8,minute.maid,33344,0,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-09-26,8,tropicana,6592,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-10-03,2,dominicks,9088,0,1.56,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-10-03,2,minute.maid,13504,0,1.79,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-10-03,2,tropicana,7744,0,3.14,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-10-03,5,dominicks,8256,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-10-03,5,minute.maid,12032,0,1.79,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-10-03,5,tropicana,5440,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-10-03,8,dominicks,16576,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-10-03,8,minute.maid,13504,0,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-10-03,8,tropicana,5248,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-10-10,2,dominicks,22848,1,1.49,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-10-10,2,minute.maid,10048,0,1.91,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-10-10,2,tropicana,6784,0,3.07,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-10-10,5,dominicks,28672,1,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-10-10,5,minute.maid,13440,0,1.79,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-10-10,5,tropicana,8128,0,2.94,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-10-10,8,dominicks,49664,1,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-10-10,8,minute.maid,13504,0,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-10-10,8,tropicana,6592,0,2.94,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-10-17,2,dominicks,6976,0,1.65,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-10-17,2,minute.maid,135936,1,1.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-10-17,2,tropicana,6784,0,3.07,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-10-17,8,dominicks,10752,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-10-17,8,minute.maid,335808,1,1.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-10-17,8,tropicana,5888,0,2.94,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-10-24,2,dominicks,4160,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-10-24,2,minute.maid,5056,0,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-10-24,2,tropicana,6272,0,3.07,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-10-24,5,dominicks,4416,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-10-24,5,minute.maid,5824,0,2.26,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-10-24,5,tropicana,7232,0,2.94,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-10-24,8,dominicks,9792,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-10-24,8,minute.maid,13120,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-10-24,8,tropicana,6336,0,2.94,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-10-31,2,dominicks,3328,0,1.83,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-10-31,2,minute.maid,27968,0,1.49,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-10-31,2,tropicana,5312,0,3.07,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-10-31,5,dominicks,1856,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-10-31,5,minute.maid,50112,0,1.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-10-31,5,tropicana,7168,0,2.94,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-10-31,8,dominicks,7104,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-10-31,8,minute.maid,49664,0,1.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-10-31,8,tropicana,5888,0,2.94,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-11-07,2,dominicks,12096,1,1.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-11-07,2,minute.maid,4736,0,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-11-07,2,tropicana,9216,0,3.11,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-11-07,5,dominicks,6528,1,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-11-07,5,minute.maid,5184,0,2.26,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-11-07,5,tropicana,7872,0,2.94,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-11-07,8,dominicks,9216,1,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-11-07,8,minute.maid,10880,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-11-07,8,tropicana,6080,0,2.94,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-11-14,2,dominicks,6208,0,1.76,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-11-14,2,minute.maid,7808,0,2.14,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-11-14,2,tropicana,7296,0,3.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-11-14,5,dominicks,6080,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-11-14,5,minute.maid,8384,0,2.26,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-11-14,5,tropicana,7552,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-11-14,8,dominicks,12608,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-11-14,8,minute.maid,9984,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-11-14,8,tropicana,6848,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-11-21,2,dominicks,3008,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-11-21,2,minute.maid,12480,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-11-21,2,tropicana,34240,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-11-21,5,dominicks,3456,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-11-21,5,minute.maid,10112,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-11-21,5,tropicana,69504,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-11-21,8,dominicks,16448,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-11-21,8,minute.maid,9216,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-11-21,8,tropicana,54016,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-11-28,2,dominicks,19456,1,1.5,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-11-28,2,minute.maid,9664,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-11-28,2,tropicana,7168,0,2.64,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-11-28,5,dominicks,25856,1,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-11-28,5,minute.maid,8384,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-11-28,5,tropicana,8960,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-11-28,8,dominicks,27968,1,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-11-28,8,minute.maid,7680,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-11-28,8,tropicana,10368,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-12-05,2,dominicks,16768,0,1.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-12-05,2,minute.maid,7168,0,2.06,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-12-05,2,tropicana,6080,0,3.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-12-05,5,dominicks,25728,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-12-05,5,minute.maid,11456,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-12-05,5,tropicana,6912,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-12-05,8,dominicks,37824,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-12-05,8,minute.maid,7296,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-12-05,8,tropicana,5568,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-12-12,2,dominicks,13568,1,1.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-12-12,2,minute.maid,4480,0,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-12-12,2,tropicana,5120,0,3.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-12-12,5,dominicks,23552,1,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-12-12,5,minute.maid,5952,0,2.26,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-12-12,5,tropicana,6656,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-12-12,8,dominicks,33664,1,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-12-12,8,minute.maid,8192,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-12-12,8,tropicana,4864,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-12-19,2,dominicks,6080,0,1.61,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-12-19,2,minute.maid,5952,0,2.22,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-12-19,2,tropicana,8320,0,2.74,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-12-19,5,dominicks,2944,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-12-19,5,minute.maid,8512,0,2.26,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-12-19,5,tropicana,8192,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-12-19,8,dominicks,17728,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-12-19,8,minute.maid,6080,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-12-19,8,tropicana,7232,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-12-26,2,dominicks,10432,1,1.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-12-26,2,minute.maid,21696,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-12-26,2,tropicana,17728,0,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1991-12-26,5,dominicks,5888,1,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-12-26,5,minute.maid,27968,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-12-26,5,tropicana,13440,0,2.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1991-12-26,8,dominicks,25088,1,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-12-26,8,minute.maid,15040,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1991-12-26,8,tropicana,15232,0,2.39,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-01-02,2,dominicks,11712,0,1.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-01-02,2,minute.maid,12032,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-01-02,2,tropicana,13120,0,2.35,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-01-02,5,dominicks,6848,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-01-02,5,minute.maid,24000,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-01-02,5,tropicana,12160,0,2.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-01-02,8,dominicks,13184,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-01-02,8,minute.maid,9472,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-01-02,8,tropicana,47040,0,2.39,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-01-09,2,dominicks,4032,0,1.76,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-01-09,2,minute.maid,7040,0,2.12,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-01-09,2,tropicana,13120,0,2.29,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-01-09,5,dominicks,1792,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-01-09,5,minute.maid,6848,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-01-09,5,tropicana,11840,0,2.29,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-01-09,8,dominicks,3136,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-01-09,8,minute.maid,5888,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-01-09,8,tropicana,9280,0,2.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-01-16,2,dominicks,6336,0,1.82,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-01-16,2,minute.maid,10240,1,2.49,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-01-16,2,tropicana,9792,0,2.43,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-01-16,5,dominicks,5248,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-01-16,5,minute.maid,15104,1,2.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-01-16,5,tropicana,8640,0,2.29,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-01-16,8,dominicks,5696,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-01-16,8,minute.maid,14336,1,2.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-01-16,8,tropicana,6720,0,2.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-01-23,2,dominicks,13632,0,1.47,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-01-23,2,minute.maid,6848,1,2.49,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-01-23,2,tropicana,3520,0,3.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-01-23,5,dominicks,16768,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-01-23,5,minute.maid,11392,1,2.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-01-23,5,tropicana,5888,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-01-23,8,dominicks,19008,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-01-23,8,minute.maid,11712,1,2.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-01-23,8,tropicana,5056,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-01-30,2,dominicks,45120,0,1.29,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-01-30,2,minute.maid,3968,0,2.61,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-01-30,2,tropicana,5504,0,3.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-01-30,5,dominicks,52160,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-01-30,5,minute.maid,5824,0,2.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-01-30,5,tropicana,7424,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-01-30,8,dominicks,121664,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-01-30,8,minute.maid,7936,0,2.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-01-30,8,tropicana,6080,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-02-06,2,dominicks,9984,0,1.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-02-06,2,minute.maid,5888,0,2.26,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-02-06,2,tropicana,6720,0,3.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-02-06,5,dominicks,16640,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-02-06,5,minute.maid,7488,0,2.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-02-06,5,tropicana,5632,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-02-06,8,dominicks,38848,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-02-06,8,minute.maid,5184,0,2.39,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-02-06,8,tropicana,10496,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-02-13,2,dominicks,4800,0,1.82,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-02-13,2,minute.maid,6208,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-02-13,2,tropicana,20224,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-02-13,5,dominicks,1344,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-02-13,5,minute.maid,8320,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-02-13,5,tropicana,33600,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-02-13,8,dominicks,6144,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-02-13,8,minute.maid,7168,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-02-13,8,tropicana,39040,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-02-20,2,dominicks,11776,0,1.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-02-20,2,minute.maid,72256,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-02-20,2,tropicana,5056,0,3.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-02-20,5,dominicks,4608,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-02-20,5,minute.maid,99904,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-02-20,5,tropicana,5376,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-02-20,8,dominicks,13632,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-02-20,8,minute.maid,216064,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-02-20,8,tropicana,4480,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-02-27,2,dominicks,11584,0,1.54,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-02-27,2,minute.maid,11520,0,2.11,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-02-27,2,tropicana,43584,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-02-27,5,dominicks,12672,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-02-27,5,minute.maid,6976,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-02-27,5,tropicana,54272,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-02-27,8,dominicks,9792,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-02-27,8,minute.maid,15040,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-02-27,8,tropicana,61760,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-03-05,2,dominicks,51264,1,1.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-03-05,2,minute.maid,5824,0,2.35,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-03-05,2,tropicana,25728,0,1.79,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-03-05,5,dominicks,48640,1,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-03-05,5,minute.maid,9984,0,2.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-03-05,5,tropicana,33600,0,1.79,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-03-05,8,dominicks,86912,1,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-03-05,8,minute.maid,11840,0,2.39,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-03-05,8,tropicana,15360,0,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-03-12,2,dominicks,14976,0,1.44,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-03-12,2,minute.maid,19392,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-03-12,2,tropicana,31808,0,1.79,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-03-12,5,dominicks,13248,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-03-12,5,minute.maid,32832,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-03-12,5,tropicana,24448,0,1.79,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-03-12,8,dominicks,24512,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-03-12,8,minute.maid,25472,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-03-12,8,tropicana,54976,0,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-03-19,2,dominicks,30784,0,1.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-03-19,2,minute.maid,9536,0,2.1,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-03-19,2,tropicana,20736,0,1.91,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-03-19,5,dominicks,29248,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-03-19,5,minute.maid,8128,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-03-19,5,tropicana,22784,0,1.79,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-03-19,8,dominicks,58048,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-03-19,8,minute.maid,16384,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-03-19,8,tropicana,34368,0,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-03-26,2,dominicks,12480,0,1.6,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-03-26,2,minute.maid,5312,0,2.28,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-03-26,2,tropicana,15168,0,2.81,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-03-26,5,dominicks,4608,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-03-26,5,minute.maid,6464,0,2.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-03-26,5,tropicana,19008,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-03-26,8,dominicks,13952,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-03-26,8,minute.maid,20480,0,2.39,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-03-26,8,tropicana,10752,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-04-02,2,dominicks,3264,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-04-02,2,minute.maid,14528,1,1.9,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-04-02,2,tropicana,28096,1,2.5,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-04-02,5,dominicks,3136,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-04-02,5,minute.maid,36800,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-04-02,5,tropicana,15808,1,2.5,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-04-02,8,dominicks,15168,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-04-02,8,minute.maid,34688,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-04-02,8,tropicana,20096,1,2.5,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-04-09,2,dominicks,8768,0,1.48,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-04-09,2,minute.maid,12416,0,2.12,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-04-09,2,tropicana,12416,0,2.58,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-04-09,5,dominicks,13184,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-04-09,5,minute.maid,12928,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-04-09,5,tropicana,14144,0,2.5,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-04-09,8,dominicks,14592,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-04-09,8,minute.maid,22400,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-04-09,8,tropicana,16192,0,2.5,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-04-16,2,dominicks,70848,1,1.29,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-04-16,2,minute.maid,5376,0,2.79,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-04-16,2,tropicana,5376,0,3.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-04-16,5,dominicks,67712,1,1.29,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-04-16,5,minute.maid,7424,0,2.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-04-16,5,tropicana,9600,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-04-16,8,dominicks,145088,1,1.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-04-16,8,minute.maid,7808,0,2.39,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-04-16,8,tropicana,6528,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-04-23,2,dominicks,18560,0,1.42,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-04-23,2,minute.maid,19008,1,2.09,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-04-23,2,tropicana,9792,0,2.67,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-04-23,5,dominicks,18880,0,1.29,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-04-23,5,minute.maid,34176,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-04-23,5,tropicana,10112,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-04-23,8,dominicks,43712,0,1.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-04-23,8,minute.maid,48064,1,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-04-23,8,tropicana,8320,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-04-30,2,dominicks,9152,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-04-30,2,minute.maid,3904,0,2.79,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-04-30,2,tropicana,16960,1,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-04-30,5,dominicks,6208,0,1.89,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-04-30,5,minute.maid,4160,0,2.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-04-30,5,tropicana,31872,1,2.24,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-04-30,8,dominicks,20608,0,1.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-04-30,8,minute.maid,7360,0,2.39,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-04-30,8,tropicana,30784,1,2.16,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-05-07,2,dominicks,9600,0,2.0,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-05-07,2,minute.maid,6336,0,2.79,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-05-07,2,tropicana,8320,0,3.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-05-07,5,dominicks,5952,0,1.89,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-05-07,5,minute.maid,5952,0,2.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-05-07,5,tropicana,9280,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-05-07,8,dominicks,18752,0,1.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-05-07,8,minute.maid,6272,0,2.39,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-05-07,8,tropicana,18048,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-05-14,2,dominicks,4800,0,2.09,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-05-14,2,minute.maid,5440,0,2.79,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-05-14,2,tropicana,6912,0,3.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-05-14,5,dominicks,4160,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-05-14,5,minute.maid,6528,0,2.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-05-14,5,tropicana,7680,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-05-14,8,dominicks,20160,0,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-05-14,8,minute.maid,6400,0,2.39,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-05-14,8,tropicana,12864,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-05-21,2,dominicks,9664,0,1.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-05-21,2,minute.maid,22400,1,2.09,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-05-21,2,tropicana,6976,0,3.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-05-21,5,dominicks,23488,0,1.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-05-21,5,minute.maid,30656,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-05-21,5,tropicana,8704,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-05-21,8,dominicks,18688,0,1.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-05-21,8,minute.maid,54592,1,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-05-21,8,tropicana,7168,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-05-28,2,dominicks,45568,0,1.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-05-28,2,minute.maid,3968,0,2.84,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-05-28,2,tropicana,7232,0,3.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-05-28,5,dominicks,60480,0,1.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-05-28,5,minute.maid,6656,0,2.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-05-28,5,tropicana,9920,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-05-28,8,dominicks,133824,0,1.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-05-28,8,minute.maid,8128,0,2.39,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-05-28,8,tropicana,9024,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-06-04,2,dominicks,20992,0,1.74,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-06-04,2,minute.maid,3264,0,2.89,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-06-04,2,tropicana,51520,1,2.49,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-06-04,5,dominicks,20416,0,1.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-06-04,5,minute.maid,4416,0,2.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-06-04,5,tropicana,91968,1,2.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-06-04,8,dominicks,63488,0,1.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-06-04,8,minute.maid,4928,0,2.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-06-04,8,tropicana,84992,1,2.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-06-11,2,dominicks,6592,0,2.09,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-06-11,2,minute.maid,4352,0,2.89,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-06-11,2,tropicana,22272,0,2.21,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-06-11,5,dominicks,6336,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-06-11,5,minute.maid,5696,0,2.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-06-11,5,tropicana,44096,0,2.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-06-11,8,dominicks,71040,0,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-06-11,8,minute.maid,5440,0,2.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-06-11,8,tropicana,14144,0,2.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-06-18,2,dominicks,4992,0,2.05,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-06-18,2,minute.maid,4480,0,2.89,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-06-18,2,tropicana,46144,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-06-25,2,dominicks,8064,0,1.24,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-06-25,2,minute.maid,3840,0,2.52,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-06-25,2,tropicana,4352,1,3.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-06-25,5,dominicks,1408,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-06-25,5,minute.maid,5696,0,2.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-06-25,5,tropicana,7296,1,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-06-25,8,dominicks,15360,0,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-06-25,8,minute.maid,5888,0,2.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-06-25,8,tropicana,7488,1,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-07-02,2,dominicks,7360,0,1.61,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-07-02,2,minute.maid,13312,1,2.0,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-07-02,2,tropicana,17280,0,2.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-07-02,5,dominicks,4672,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-07-02,5,minute.maid,39680,1,2.01,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-07-02,5,tropicana,12928,0,2.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-07-02,8,dominicks,17728,0,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-07-02,8,minute.maid,23872,1,2.02,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-07-02,8,tropicana,12352,0,2.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-07-09,2,dominicks,10048,0,1.4,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-07-09,2,minute.maid,3776,1,2.33,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-07-09,2,tropicana,5696,0,3.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-07-09,5,dominicks,19520,0,1.29,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-07-09,5,minute.maid,6208,1,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-07-09,5,tropicana,6848,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-07-09,8,dominicks,24256,0,1.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-07-09,8,minute.maid,6848,1,2.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-07-09,8,tropicana,5696,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-07-16,2,dominicks,10112,0,1.91,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-07-16,2,minute.maid,4800,0,2.89,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-07-16,2,tropicana,6848,0,3.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-07-16,5,dominicks,7872,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-07-16,5,minute.maid,7872,0,2.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-07-16,5,tropicana,8064,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-07-16,8,dominicks,19968,0,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-07-16,8,minute.maid,8192,0,2.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-07-16,8,tropicana,7680,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-07-23,2,dominicks,9152,0,1.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-07-23,2,minute.maid,24960,1,2.29,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-07-23,2,tropicana,4416,0,3.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-07-23,5,dominicks,5184,0,1.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-07-23,5,minute.maid,54528,1,2.29,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-07-23,5,tropicana,4992,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-07-23,8,dominicks,15936,0,1.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-07-23,8,minute.maid,55040,1,2.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-07-23,8,tropicana,5440,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-07-30,2,dominicks,36288,1,1.49,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-07-30,2,minute.maid,4544,0,2.86,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-07-30,2,tropicana,4672,0,3.16,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-07-30,5,dominicks,42240,1,1.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-07-30,5,minute.maid,6400,0,2.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-07-30,5,tropicana,7360,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-07-30,8,dominicks,76352,1,1.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-07-30,8,minute.maid,6528,0,2.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-07-30,8,tropicana,5632,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-08-06,2,dominicks,3776,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-08-06,2,minute.maid,3968,1,2.81,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-08-06,2,tropicana,7168,1,3.09,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-08-06,5,dominicks,6592,1,1.89,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-08-06,5,minute.maid,5888,1,2.65,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-08-06,5,tropicana,8384,1,2.89,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-08-06,8,dominicks,17408,1,1.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-08-06,8,minute.maid,6208,1,2.45,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-08-06,8,tropicana,8960,1,2.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-08-13,2,dominicks,3328,0,1.97,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-08-13,2,minute.maid,49600,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-08-13,2,tropicana,5056,0,3.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-08-13,5,dominicks,2112,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-08-13,5,minute.maid,56384,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-08-13,5,tropicana,8832,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-08-13,8,dominicks,17536,0,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-08-13,8,minute.maid,94720,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-08-13,8,tropicana,6080,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-08-20,2,dominicks,13824,0,1.36,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-08-20,2,minute.maid,23488,1,1.94,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-08-20,2,tropicana,13376,1,2.79,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-08-20,5,dominicks,21248,0,1.79,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-08-20,5,minute.maid,27072,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-08-20,5,tropicana,17728,1,2.79,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-08-20,8,dominicks,31232,0,1.59,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-08-20,8,minute.maid,55552,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-08-20,8,tropicana,8576,1,2.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-08-27,2,dominicks,9024,0,1.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-08-27,2,minute.maid,19008,0,1.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-08-27,2,tropicana,8128,0,2.75,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-08-27,5,dominicks,1856,0,1.29,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-08-27,5,minute.maid,3840,0,1.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-08-27,5,tropicana,9600,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-08-27,8,dominicks,19200,0,1.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-08-27,8,minute.maid,18688,0,1.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-08-27,8,tropicana,8000,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-09-03,2,dominicks,2048,0,2.09,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-09-03,2,minute.maid,11584,0,1.81,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-09-03,2,tropicana,19456,1,2.49,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||||
|
1992-09-03,5,dominicks,3712,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-09-03,5,minute.maid,6144,0,1.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-09-03,5,tropicana,25664,1,2.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||||
|
1992-09-03,8,dominicks,12800,0,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-09-03,8,minute.maid,14656,0,1.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
1992-09-03,8,tropicana,21760,1,2.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||||
|
@@ -0,0 +1,155 @@
|
|||||||
|
import argparse
|
||||||
|
from datetime import datetime
|
||||||
|
import os
|
||||||
|
import uuid
|
||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
from pandas.tseries.frequencies import to_offset
|
||||||
|
from sklearn.externals import joblib
|
||||||
|
from sklearn.metrics import mean_absolute_error, mean_squared_error
|
||||||
|
|
||||||
|
from azureml.data.dataset_factory import TabularDatasetFactory
|
||||||
|
from azureml.automl.runtime.shared.score import scoring, constants as metrics_constants
|
||||||
|
import azureml.automl.core.shared.constants as constants
|
||||||
|
from azureml.core import Run, Dataset, Model
|
||||||
|
|
||||||
|
try:
|
||||||
|
import torch
|
||||||
|
|
||||||
|
_torch_present = True
|
||||||
|
except ImportError:
|
||||||
|
_torch_present = False
|
||||||
|
|
||||||
|
|
||||||
|
def infer_forecasting_dataset_tcn(
|
||||||
|
X_test, y_test, model, output_path, output_dataset_name="results"
|
||||||
|
):
|
||||||
|
|
||||||
|
y_pred, df_all = model.forecast(X_test, y_test)
|
||||||
|
|
||||||
|
run = Run.get_context()
|
||||||
|
|
||||||
|
registered_train = TabularDatasetFactory.register_pandas_dataframe(
|
||||||
|
df_all,
|
||||||
|
target=(
|
||||||
|
run.experiment.workspace.get_default_datastore(),
|
||||||
|
datetime.now().strftime("%Y-%m-%d-") + str(uuid.uuid4())[:6],
|
||||||
|
),
|
||||||
|
name=output_dataset_name,
|
||||||
|
)
|
||||||
|
df_all.to_csv(os.path.join(output_path, output_dataset_name + ".csv"), index=False)
|
||||||
|
|
||||||
|
|
||||||
|
def map_location_cuda(storage, loc):
|
||||||
|
return storage.cuda()
|
||||||
|
|
||||||
|
|
||||||
|
def get_model(model_path, model_file_name):
|
||||||
|
# _, ext = os.path.splitext(model_path)
|
||||||
|
model_full_path = os.path.join(model_path, model_file_name)
|
||||||
|
print(model_full_path)
|
||||||
|
if model_file_name.endswith("pt"):
|
||||||
|
# Load the fc-tcn torch model.
|
||||||
|
assert _torch_present, "Loading DNN models needs torch to be presented."
|
||||||
|
if torch.cuda.is_available():
|
||||||
|
map_location = map_location_cuda
|
||||||
|
else:
|
||||||
|
map_location = "cpu"
|
||||||
|
with open(model_full_path, "rb") as fh:
|
||||||
|
fitted_model = torch.load(fh, map_location=map_location)
|
||||||
|
else:
|
||||||
|
# Load the sklearn pipeline.
|
||||||
|
fitted_model = joblib.load(model_full_path)
|
||||||
|
return fitted_model
|
||||||
|
|
||||||
|
|
||||||
|
def get_args():
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument(
|
||||||
|
"--model_name", type=str, dest="model_name", help="Model to be loaded"
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--ouput_dataset_name",
|
||||||
|
type=str,
|
||||||
|
dest="ouput_dataset_name",
|
||||||
|
default="results",
|
||||||
|
help="Dataset name of the final output",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--target_column_name",
|
||||||
|
type=str,
|
||||||
|
dest="target_column_name",
|
||||||
|
help="The target column name.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--test_dataset_name",
|
||||||
|
type=str,
|
||||||
|
dest="test_dataset_name",
|
||||||
|
default="results",
|
||||||
|
help="Dataset name of the final output",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--output_path",
|
||||||
|
type=str,
|
||||||
|
dest="output_path",
|
||||||
|
default="results",
|
||||||
|
help="The output path",
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
return args
|
||||||
|
|
||||||
|
|
||||||
|
def get_data(run, fitted_model, target_column_name, test_dataset_name):
|
||||||
|
|
||||||
|
# get input dataset by name
|
||||||
|
test_dataset = Dataset.get_by_name(run.experiment.workspace, test_dataset_name)
|
||||||
|
test_df = test_dataset.to_pandas_dataframe()
|
||||||
|
if target_column_name in test_df:
|
||||||
|
y_test = test_df.pop(target_column_name).values
|
||||||
|
else:
|
||||||
|
y_test = np.full(test_df.shape[0], np.nan)
|
||||||
|
|
||||||
|
return test_df, y_test
|
||||||
|
|
||||||
|
|
||||||
|
def get_model_filename(run, model_name, model_path):
|
||||||
|
model = Model(run.experiment.workspace, model_name)
|
||||||
|
if "model_file_name" in model.tags:
|
||||||
|
return model.tags["model_file_name"]
|
||||||
|
is_pkl = True
|
||||||
|
if model.tags.get("algorithm") == "TCNForecaster" or os.path.exists(
|
||||||
|
os.path.join(model_path, "model.pt")
|
||||||
|
):
|
||||||
|
is_pkl = False
|
||||||
|
return "model.pkl" if is_pkl else "model.pt"
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
run = Run.get_context()
|
||||||
|
|
||||||
|
args = get_args()
|
||||||
|
model_name = args.model_name
|
||||||
|
ouput_dataset_name = args.ouput_dataset_name
|
||||||
|
test_dataset_name = args.test_dataset_name
|
||||||
|
target_column_name = args.target_column_name
|
||||||
|
print("args passed are: ")
|
||||||
|
|
||||||
|
print(model_name)
|
||||||
|
print(test_dataset_name)
|
||||||
|
print(ouput_dataset_name)
|
||||||
|
print(target_column_name)
|
||||||
|
|
||||||
|
model_path = Model.get_model_path(model_name)
|
||||||
|
model_file_name = get_model_filename(run, model_name, model_path)
|
||||||
|
print(model_file_name)
|
||||||
|
fitted_model = get_model(model_path, model_file_name)
|
||||||
|
|
||||||
|
X_test_df, y_test = get_data(
|
||||||
|
run, fitted_model, target_column_name, test_dataset_name
|
||||||
|
)
|
||||||
|
|
||||||
|
infer_forecasting_dataset_tcn(
|
||||||
|
X_test_df, y_test, fitted_model, args.output_path, ouput_dataset_name
|
||||||
|
)
|
||||||
@@ -0,0 +1,64 @@
|
|||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
import uuid
|
||||||
|
import shutil
|
||||||
|
from azureml.core.model import Model, Dataset
|
||||||
|
from azureml.core.run import Run, _OfflineRun
|
||||||
|
from azureml.core import Workspace
|
||||||
|
import azureml.automl.core.shared.constants as constants
|
||||||
|
from azureml.train.automl.run import AutoMLRun
|
||||||
|
|
||||||
|
|
||||||
|
def get_best_automl_run(pipeline_run):
|
||||||
|
all_children = [c for c in pipeline_run.get_children()]
|
||||||
|
automl_step = [
|
||||||
|
c for c in all_children if c.properties.get("runTemplate") == "AutoML"
|
||||||
|
]
|
||||||
|
for c in all_children:
|
||||||
|
print(c, c.properties)
|
||||||
|
automlrun = AutoMLRun(pipeline_run.experiment, automl_step[0].id)
|
||||||
|
best = automlrun.get_best_child()
|
||||||
|
return best
|
||||||
|
|
||||||
|
|
||||||
|
def get_model_path(model_artifact_path):
|
||||||
|
return model_artifact_path.split("/")[1]
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument("--model_name")
|
||||||
|
parser.add_argument("--model_path")
|
||||||
|
parser.add_argument("--ds_name")
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
print("Argument 1(model_name): %s" % args.model_name)
|
||||||
|
print("Argument 2(model_path): %s" % args.model_path)
|
||||||
|
print("Argument 3(ds_name): %s" % args.ds_name)
|
||||||
|
|
||||||
|
run = Run.get_context()
|
||||||
|
ws = None
|
||||||
|
if type(run) == _OfflineRun:
|
||||||
|
ws = Workspace.from_config()
|
||||||
|
else:
|
||||||
|
ws = run.experiment.workspace
|
||||||
|
|
||||||
|
train_ds = Dataset.get_by_name(ws, args.ds_name)
|
||||||
|
datasets = [(Dataset.Scenario.TRAINING, train_ds)]
|
||||||
|
new_dir = str(uuid.uuid4())
|
||||||
|
os.makedirs(new_dir)
|
||||||
|
|
||||||
|
# Register model with training dataset
|
||||||
|
best_run = get_best_automl_run(run.parent)
|
||||||
|
model_artifact_path = best_run.properties[constants.PROPERTY_KEY_OF_MODEL_PATH]
|
||||||
|
algo = best_run.properties.get("run_algorithm")
|
||||||
|
model_artifact_dir = model_artifact_path.split("/")[0]
|
||||||
|
model_file_name = model_artifact_path.split("/")[1]
|
||||||
|
model = best_run.register_model(
|
||||||
|
args.model_name,
|
||||||
|
model_path=model_artifact_dir,
|
||||||
|
datasets=datasets,
|
||||||
|
tags={"algorithm": algo, "model_file_name": model_file_name},
|
||||||
|
)
|
||||||
|
|
||||||
|
print("Registered version {0} of model {1}".format(model.version, model.name))
|
||||||
@@ -229,7 +229,7 @@
|
|||||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||||
"output[\"Location\"] = ws.location\n",
|
"output[\"Location\"] = ws.location\n",
|
||||||
"output[\"Run History Name\"] = experiment_name\n",
|
"output[\"Run History Name\"] = experiment_name\n",
|
||||||
"pd.set_option(\"display.max_colwidth\", -1)\n",
|
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||||
"print(outputDf.T)"
|
"print(outputDf.T)"
|
||||||
]
|
]
|
||||||
@@ -387,8 +387,8 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"### Retrieve the best model\n",
|
"### Retrieve the Best Run details\n",
|
||||||
"Below we select the best model from all the training iterations using get_output method."
|
"Below we retrieve the best Run object from among all the runs in the experiment."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -397,8 +397,8 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"best_run, fitted_model = remote_run.get_output()\n",
|
"best_run = remote_run.get_best_child()\n",
|
||||||
"fitted_model.steps"
|
"best_run"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -46,11 +46,11 @@ def kpss_test(series, **kw):
|
|||||||
"""
|
"""
|
||||||
if kw["store"]:
|
if kw["store"]:
|
||||||
statistic, p_value, critical_values, rstore = stattools.kpss(
|
statistic, p_value, critical_values, rstore = stattools.kpss(
|
||||||
series, regression=kw["reg_type"], lags=kw["lags"], store=kw["store"]
|
series, regression=kw["reg_type"], nlags=kw["lags"], store=kw["store"]
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
statistic, p_value, lags, critical_values = stattools.kpss(
|
statistic, p_value, lags, critical_values = stattools.kpss(
|
||||||
series, regression=kw["reg_type"], lags=kw["lags"]
|
series, regression=kw["reg_type"], nlags=kw["lags"]
|
||||||
)
|
)
|
||||||
output = {
|
output = {
|
||||||
"statistic": statistic,
|
"statistic": statistic,
|
||||||
|
|||||||
@@ -1,21 +1,5 @@
|
|||||||
{
|
{
|
||||||
"cells": [
|
"cells": [
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
|
||||||
"\n",
|
|
||||||
"Licensed under the MIT License."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
""
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@@ -90,16 +74,6 @@
|
|||||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
|
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
@@ -109,18 +83,19 @@
|
|||||||
"ws = Workspace.from_config()\n",
|
"ws = Workspace.from_config()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# choose a name for experiment\n",
|
"# choose a name for experiment\n",
|
||||||
"experiment_name = 'automl-classification-ccard-local'\n",
|
"experiment_name = \"automl-classification-ccard-local\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"experiment=Experiment(ws, experiment_name)\n",
|
"experiment = Experiment(ws, experiment_name)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"output = {}\n",
|
"output = {}\n",
|
||||||
"output['Subscription ID'] = ws.subscription_id\n",
|
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||||
"output['Workspace'] = ws.name\n",
|
"output[\"Workspace\"] = ws.name\n",
|
||||||
"output['Resource Group'] = ws.resource_group\n",
|
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||||
"output['Location'] = ws.location\n",
|
"output[\"Location\"] = ws.location\n",
|
||||||
"output['Experiment Name'] = experiment.name\n",
|
"output[\"Experiment Name\"] = experiment.name\n",
|
||||||
"pd.set_option('display.max_colwidth', -1)\n",
|
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||||
|
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||||
"outputDf.T"
|
"outputDf.T"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -142,7 +117,7 @@
|
|||||||
"data = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/creditcard.csv\"\n",
|
"data = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/creditcard.csv\"\n",
|
||||||
"dataset = Dataset.Tabular.from_delimited_files(data)\n",
|
"dataset = Dataset.Tabular.from_delimited_files(data)\n",
|
||||||
"training_data, validation_data = dataset.random_split(percentage=0.8, seed=223)\n",
|
"training_data, validation_data = dataset.random_split(percentage=0.8, seed=223)\n",
|
||||||
"label_column_name = 'Class'"
|
"label_column_name = \"Class\""
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -168,23 +143,26 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"name": "enable-ensemble"
|
||||||
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"automl_settings = {\n",
|
"automl_settings = {\n",
|
||||||
" \"n_cross_validations\": 3,\n",
|
" \"n_cross_validations\": 3,\n",
|
||||||
" \"primary_metric\": 'AUC_weighted',\n",
|
" \"primary_metric\": \"average_precision_score_weighted\",\n",
|
||||||
" \"experiment_timeout_hours\": 0.25, # This is a time limit for testing purposes, remove it for real use cases, this will drastically limit ability to find the best model possible\n",
|
" \"experiment_timeout_hours\": 0.25, # This is a time limit for testing purposes, remove it for real use cases, this will drastically limit ability to find the best model possible\n",
|
||||||
" \"verbosity\": logging.INFO,\n",
|
" \"verbosity\": logging.INFO,\n",
|
||||||
" \"enable_stack_ensemble\": False\n",
|
" \"enable_stack_ensemble\": False,\n",
|
||||||
"}\n",
|
"}\n",
|
||||||
"\n",
|
"\n",
|
||||||
"automl_config = AutoMLConfig(task = 'classification',\n",
|
"automl_config = AutoMLConfig(\n",
|
||||||
" debug_log = 'automl_errors.log',\n",
|
" task=\"classification\",\n",
|
||||||
" training_data = training_data,\n",
|
" debug_log=\"automl_errors.log\",\n",
|
||||||
" label_column_name = label_column_name,\n",
|
" training_data=training_data,\n",
|
||||||
" **automl_settings\n",
|
" label_column_name=label_column_name,\n",
|
||||||
" )"
|
" **automl_settings,\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -201,7 +179,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"local_run = experiment.submit(automl_config, show_output = True)"
|
"local_run = experiment.submit(automl_config, show_output=True)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -211,8 +189,8 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# If you need to retrieve a run that already started, use the following code\n",
|
"# If you need to retrieve a run that already started, use the following code\n",
|
||||||
"#from azureml.train.automl.run import AutoMLRun\n",
|
"# from azureml.train.automl.run import AutoMLRun\n",
|
||||||
"#local_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')"
|
"# local_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -240,6 +218,7 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from azureml.widgets import RunDetails\n",
|
"from azureml.widgets import RunDetails\n",
|
||||||
|
"\n",
|
||||||
"RunDetails(local_run).show()"
|
"RunDetails(local_run).show()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -288,8 +267,12 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# convert the test data to dataframe\n",
|
"# convert the test data to dataframe\n",
|
||||||
"X_test_df = validation_data.drop_columns(columns=[label_column_name]).to_pandas_dataframe()\n",
|
"X_test_df = validation_data.drop_columns(\n",
|
||||||
"y_test_df = validation_data.keep_columns(columns=[label_column_name], validate=True).to_pandas_dataframe()"
|
" columns=[label_column_name]\n",
|
||||||
|
").to_pandas_dataframe()\n",
|
||||||
|
"y_test_df = validation_data.keep_columns(\n",
|
||||||
|
" columns=[label_column_name], validate=True\n",
|
||||||
|
").to_pandas_dataframe()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -323,20 +306,26 @@
|
|||||||
"import numpy as np\n",
|
"import numpy as np\n",
|
||||||
"import itertools\n",
|
"import itertools\n",
|
||||||
"\n",
|
"\n",
|
||||||
"cf =confusion_matrix(y_test_df.values,y_pred)\n",
|
"cf = confusion_matrix(y_test_df.values, y_pred)\n",
|
||||||
"plt.imshow(cf,cmap=plt.cm.Blues,interpolation='nearest')\n",
|
"plt.imshow(cf, cmap=plt.cm.Blues, interpolation=\"nearest\")\n",
|
||||||
"plt.colorbar()\n",
|
"plt.colorbar()\n",
|
||||||
"plt.title('Confusion Matrix')\n",
|
"plt.title(\"Confusion Matrix\")\n",
|
||||||
"plt.xlabel('Predicted')\n",
|
"plt.xlabel(\"Predicted\")\n",
|
||||||
"plt.ylabel('Actual')\n",
|
"plt.ylabel(\"Actual\")\n",
|
||||||
"class_labels = ['False','True']\n",
|
"class_labels = [\"False\", \"True\"]\n",
|
||||||
"tick_marks = np.arange(len(class_labels))\n",
|
"tick_marks = np.arange(len(class_labels))\n",
|
||||||
"plt.xticks(tick_marks,class_labels)\n",
|
"plt.xticks(tick_marks, class_labels)\n",
|
||||||
"plt.yticks([-0.5,0,1,1.5],['','False','True',''])\n",
|
"plt.yticks([-0.5, 0, 1, 1.5], [\"\", \"False\", \"True\", \"\"])\n",
|
||||||
"# plotting text value inside cells\n",
|
"# plotting text value inside cells\n",
|
||||||
"thresh = cf.max() / 2.\n",
|
"thresh = cf.max() / 2.0\n",
|
||||||
"for i,j in itertools.product(range(cf.shape[0]),range(cf.shape[1])):\n",
|
"for i, j in itertools.product(range(cf.shape[0]), range(cf.shape[1])):\n",
|
||||||
" plt.text(j,i,format(cf[i,j],'d'),horizontalalignment='center',color='white' if cf[i,j] >thresh else 'black')\n",
|
" plt.text(\n",
|
||||||
|
" j,\n",
|
||||||
|
" i,\n",
|
||||||
|
" format(cf[i, j], \"d\"),\n",
|
||||||
|
" horizontalalignment=\"center\",\n",
|
||||||
|
" color=\"white\" if cf[i, j] > thresh else \"black\",\n",
|
||||||
|
" )\n",
|
||||||
"plt.show()"
|
"plt.show()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -363,7 +352,10 @@
|
|||||||
"client = ExplanationClient.from_run(best_run)\n",
|
"client = ExplanationClient.from_run(best_run)\n",
|
||||||
"engineered_explanations = client.download_model_explanation(raw=False)\n",
|
"engineered_explanations = client.download_model_explanation(raw=False)\n",
|
||||||
"print(engineered_explanations.get_feature_importance_dict())\n",
|
"print(engineered_explanations.get_feature_importance_dict())\n",
|
||||||
"print(\"You can visualize the engineered explanations under the 'Explanations (preview)' tab in the AutoML run at:-\\n\" + best_run.get_portal_url())"
|
"print(\n",
|
||||||
|
" \"You can visualize the engineered explanations under the 'Explanations (preview)' tab in the AutoML run at:-\\n\"\n",
|
||||||
|
" + best_run.get_portal_url()\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -382,7 +374,10 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"raw_explanations = client.download_model_explanation(raw=True)\n",
|
"raw_explanations = client.download_model_explanation(raw=True)\n",
|
||||||
"print(raw_explanations.get_feature_importance_dict())\n",
|
"print(raw_explanations.get_feature_importance_dict())\n",
|
||||||
"print(\"You can visualize the raw explanations under the 'Explanations (preview)' tab in the AutoML run at:-\\n\" + best_run.get_portal_url())"
|
"print(\n",
|
||||||
|
" \"You can visualize the raw explanations under the 'Explanations (preview)' tab in the AutoML run at:-\\n\"\n",
|
||||||
|
" + best_run.get_portal_url()\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -398,7 +393,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"automl_run, fitted_model = local_run.get_output(metric='accuracy')"
|
"automl_run, fitted_model = local_run.get_output(metric=\"accuracy\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -432,12 +427,18 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from azureml.train.automl.runtime.automl_explain_utilities import automl_setup_model_explanations\n",
|
"from azureml.train.automl.runtime.automl_explain_utilities import (\n",
|
||||||
|
" automl_setup_model_explanations,\n",
|
||||||
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"automl_explainer_setup_obj = automl_setup_model_explanations(fitted_model, X=X_train, \n",
|
"automl_explainer_setup_obj = automl_setup_model_explanations(\n",
|
||||||
" X_test=X_test, y=y_train, \n",
|
" fitted_model,\n",
|
||||||
" task='classification',\n",
|
" X=X_train,\n",
|
||||||
" automl_run=automl_run)"
|
" X_test=X_test,\n",
|
||||||
|
" y=y_train,\n",
|
||||||
|
" task=\"classification\",\n",
|
||||||
|
" automl_run=automl_run,\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -455,13 +456,18 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from azureml.interpret.mimic_wrapper import MimicWrapper\n",
|
"from azureml.interpret.mimic_wrapper import MimicWrapper\n",
|
||||||
"explainer = MimicWrapper(ws, automl_explainer_setup_obj.automl_estimator,\n",
|
"\n",
|
||||||
" explainable_model=automl_explainer_setup_obj.surrogate_model, \n",
|
"explainer = MimicWrapper(\n",
|
||||||
" init_dataset=automl_explainer_setup_obj.X_transform, run=automl_explainer_setup_obj.automl_run,\n",
|
" ws,\n",
|
||||||
" features=automl_explainer_setup_obj.engineered_feature_names, \n",
|
" automl_explainer_setup_obj.automl_estimator,\n",
|
||||||
" feature_maps=[automl_explainer_setup_obj.feature_map],\n",
|
" explainable_model=automl_explainer_setup_obj.surrogate_model,\n",
|
||||||
" classes=automl_explainer_setup_obj.classes,\n",
|
" init_dataset=automl_explainer_setup_obj.X_transform,\n",
|
||||||
" explainer_kwargs=automl_explainer_setup_obj.surrogate_model_params)"
|
" run=automl_explainer_setup_obj.automl_run,\n",
|
||||||
|
" features=automl_explainer_setup_obj.engineered_feature_names,\n",
|
||||||
|
" feature_maps=[automl_explainer_setup_obj.feature_map],\n",
|
||||||
|
" classes=automl_explainer_setup_obj.classes,\n",
|
||||||
|
" explainer_kwargs=automl_explainer_setup_obj.surrogate_model_params,\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -479,9 +485,14 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Compute the engineered explanations\n",
|
"# Compute the engineered explanations\n",
|
||||||
"engineered_explanations = explainer.explain(['local', 'global'], eval_dataset=automl_explainer_setup_obj.X_test_transform)\n",
|
"engineered_explanations = explainer.explain(\n",
|
||||||
|
" [\"local\", \"global\"], eval_dataset=automl_explainer_setup_obj.X_test_transform\n",
|
||||||
|
")\n",
|
||||||
"print(engineered_explanations.get_feature_importance_dict())\n",
|
"print(engineered_explanations.get_feature_importance_dict())\n",
|
||||||
"print(\"You can visualize the engineered explanations under the 'Explanations (preview)' tab in the AutoML run at:-\\n\" + automl_run.get_portal_url())"
|
"print(\n",
|
||||||
|
" \"You can visualize the engineered explanations under the 'Explanations (preview)' tab in the AutoML run at:-\\n\"\n",
|
||||||
|
" + automl_run.get_portal_url()\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -499,12 +510,18 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Compute the raw explanations\n",
|
"# Compute the raw explanations\n",
|
||||||
"raw_explanations = explainer.explain(['local', 'global'], get_raw=True,\n",
|
"raw_explanations = explainer.explain(\n",
|
||||||
" raw_feature_names=automl_explainer_setup_obj.raw_feature_names,\n",
|
" [\"local\", \"global\"],\n",
|
||||||
" eval_dataset=automl_explainer_setup_obj.X_test_transform,\n",
|
" get_raw=True,\n",
|
||||||
" raw_eval_dataset=automl_explainer_setup_obj.X_test_raw)\n",
|
" raw_feature_names=automl_explainer_setup_obj.raw_feature_names,\n",
|
||||||
|
" eval_dataset=automl_explainer_setup_obj.X_test_transform,\n",
|
||||||
|
" raw_eval_dataset=automl_explainer_setup_obj.X_test_raw,\n",
|
||||||
|
")\n",
|
||||||
"print(raw_explanations.get_feature_importance_dict())\n",
|
"print(raw_explanations.get_feature_importance_dict())\n",
|
||||||
"print(\"You can visualize the raw explanations under the 'Explanations (preview)' tab in the AutoML run at:-\\n\" + automl_run.get_portal_url())"
|
"print(\n",
|
||||||
|
" \"You can visualize the raw explanations under the 'Explanations (preview)' tab in the AutoML run at:-\\n\"\n",
|
||||||
|
" + automl_run.get_portal_url()\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -524,15 +541,17 @@
|
|||||||
"import joblib\n",
|
"import joblib\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Initialize the ScoringExplainer\n",
|
"# Initialize the ScoringExplainer\n",
|
||||||
"scoring_explainer = TreeScoringExplainer(explainer.explainer, feature_maps=[automl_explainer_setup_obj.feature_map])\n",
|
"scoring_explainer = TreeScoringExplainer(\n",
|
||||||
|
" explainer.explainer, feature_maps=[automl_explainer_setup_obj.feature_map]\n",
|
||||||
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Pickle scoring explainer locally to './scoring_explainer.pkl'\n",
|
"# Pickle scoring explainer locally to './scoring_explainer.pkl'\n",
|
||||||
"scoring_explainer_file_name = 'scoring_explainer.pkl'\n",
|
"scoring_explainer_file_name = \"scoring_explainer.pkl\"\n",
|
||||||
"with open(scoring_explainer_file_name, 'wb') as stream:\n",
|
"with open(scoring_explainer_file_name, \"wb\") as stream:\n",
|
||||||
" joblib.dump(scoring_explainer, stream)\n",
|
" joblib.dump(scoring_explainer, stream)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Upload the scoring explainer to the automl run\n",
|
"# Upload the scoring explainer to the automl run\n",
|
||||||
"automl_run.upload_file('outputs/scoring_explainer.pkl', scoring_explainer_file_name)"
|
"automl_run.upload_file(\"outputs/scoring_explainer.pkl\", scoring_explainer_file_name)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -551,10 +570,12 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Register trained automl model present in the 'outputs' folder in the artifacts\n",
|
"# Register trained automl model present in the 'outputs' folder in the artifacts\n",
|
||||||
"original_model = automl_run.register_model(model_name='automl_model', \n",
|
"original_model = automl_run.register_model(\n",
|
||||||
" model_path='outputs/model.pkl')\n",
|
" model_name=\"automl_model\", model_path=\"outputs/model.pkl\"\n",
|
||||||
"scoring_explainer_model = automl_run.register_model(model_name='scoring_explainer',\n",
|
")\n",
|
||||||
" model_path='outputs/scoring_explainer.pkl')"
|
"scoring_explainer_model = automl_run.register_model(\n",
|
||||||
|
" model_name=\"scoring_explainer\", model_path=\"outputs/scoring_explainer.pkl\"\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -575,7 +596,7 @@
|
|||||||
"from azureml.automl.core.shared import constants\n",
|
"from azureml.automl.core.shared import constants\n",
|
||||||
"from azureml.core.environment import Environment\n",
|
"from azureml.core.environment import Environment\n",
|
||||||
"\n",
|
"\n",
|
||||||
"automl_run.download_file(constants.CONDA_ENV_FILE_PATH, 'myenv.yml')\n",
|
"automl_run.download_file(constants.CONDA_ENV_FILE_PATH, \"myenv.yml\")\n",
|
||||||
"myenv = Environment.from_conda_specification(name=\"myenv\", file_path=\"myenv.yml\")\n",
|
"myenv = Environment.from_conda_specification(name=\"myenv\", file_path=\"myenv.yml\")\n",
|
||||||
"myenv"
|
"myenv"
|
||||||
]
|
]
|
||||||
@@ -598,7 +619,9 @@
|
|||||||
"import joblib\n",
|
"import joblib\n",
|
||||||
"import pandas as pd\n",
|
"import pandas as pd\n",
|
||||||
"from azureml.core.model import Model\n",
|
"from azureml.core.model import Model\n",
|
||||||
"from azureml.train.automl.runtime.automl_explain_utilities import automl_setup_model_explanations\n",
|
"from azureml.train.automl.runtime.automl_explain_utilities import (\n",
|
||||||
|
" automl_setup_model_explanations,\n",
|
||||||
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"def init():\n",
|
"def init():\n",
|
||||||
@@ -607,28 +630,35 @@
|
|||||||
"\n",
|
"\n",
|
||||||
" # Retrieve the path to the model file using the model name\n",
|
" # Retrieve the path to the model file using the model name\n",
|
||||||
" # Assume original model is named original_prediction_model\n",
|
" # Assume original model is named original_prediction_model\n",
|
||||||
" automl_model_path = Model.get_model_path('automl_model')\n",
|
" automl_model_path = Model.get_model_path(\"automl_model\")\n",
|
||||||
" scoring_explainer_path = Model.get_model_path('scoring_explainer')\n",
|
" scoring_explainer_path = Model.get_model_path(\"scoring_explainer\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
" automl_model = joblib.load(automl_model_path)\n",
|
" automl_model = joblib.load(automl_model_path)\n",
|
||||||
" scoring_explainer = joblib.load(scoring_explainer_path)\n",
|
" scoring_explainer = joblib.load(scoring_explainer_path)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"def run(raw_data):\n",
|
"def run(raw_data):\n",
|
||||||
" data = pd.read_json(raw_data, orient='records') \n",
|
" data = pd.read_json(raw_data, orient=\"records\")\n",
|
||||||
" # Make prediction\n",
|
" # Make prediction\n",
|
||||||
" predictions = automl_model.predict(data)\n",
|
" predictions = automl_model.predict(data)\n",
|
||||||
" # Setup for inferencing explanations\n",
|
" # Setup for inferencing explanations\n",
|
||||||
" automl_explainer_setup_obj = automl_setup_model_explanations(automl_model,\n",
|
" automl_explainer_setup_obj = automl_setup_model_explanations(\n",
|
||||||
" X_test=data, task='classification')\n",
|
" automl_model, X_test=data, task=\"classification\"\n",
|
||||||
|
" )\n",
|
||||||
" # Retrieve model explanations for engineered explanations\n",
|
" # Retrieve model explanations for engineered explanations\n",
|
||||||
" engineered_local_importance_values = scoring_explainer.explain(automl_explainer_setup_obj.X_test_transform)\n",
|
" engineered_local_importance_values = scoring_explainer.explain(\n",
|
||||||
|
" automl_explainer_setup_obj.X_test_transform\n",
|
||||||
|
" )\n",
|
||||||
" # Retrieve model explanations for raw explanations\n",
|
" # Retrieve model explanations for raw explanations\n",
|
||||||
" raw_local_importance_values = scoring_explainer.explain(automl_explainer_setup_obj.X_test_transform, get_raw=True)\n",
|
" raw_local_importance_values = scoring_explainer.explain(\n",
|
||||||
|
" automl_explainer_setup_obj.X_test_transform, get_raw=True\n",
|
||||||
|
" )\n",
|
||||||
" # You can return any data type as long as it is JSON-serializable\n",
|
" # You can return any data type as long as it is JSON-serializable\n",
|
||||||
" return {'predictions': predictions.tolist(),\n",
|
" return {\n",
|
||||||
" 'engineered_local_importance_values': engineered_local_importance_values,\n",
|
" \"predictions\": predictions.tolist(),\n",
|
||||||
" 'raw_local_importance_values': raw_local_importance_values}\n"
|
" \"engineered_local_importance_values\": engineered_local_importance_values,\n",
|
||||||
|
" \"raw_local_importance_values\": raw_local_importance_values,\n",
|
||||||
|
" }"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -647,7 +677,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"from azureml.core.model import InferenceConfig\n",
|
"from azureml.core.model import InferenceConfig\n",
|
||||||
"\n",
|
"\n",
|
||||||
"inf_config = InferenceConfig(entry_script='score.py', environment=myenv)"
|
"inf_config = InferenceConfig(entry_script=\"score.py\", environment=myenv)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -668,17 +698,17 @@
|
|||||||
"from azureml.core.compute_target import ComputeTargetException\n",
|
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Choose a name for your cluster.\n",
|
"# Choose a name for your cluster.\n",
|
||||||
"aks_name = 'scoring-explain'\n",
|
"aks_name = \"scoring-explain\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Verify that cluster does not exist already\n",
|
"# Verify that cluster does not exist already\n",
|
||||||
"try:\n",
|
"try:\n",
|
||||||
" aks_target = ComputeTarget(workspace=ws, name=aks_name)\n",
|
" aks_target = ComputeTarget(workspace=ws, name=aks_name)\n",
|
||||||
" print('Found existing cluster, use it.')\n",
|
" print(\"Found existing cluster, use it.\")\n",
|
||||||
"except ComputeTargetException:\n",
|
"except ComputeTargetException:\n",
|
||||||
" prov_config = AksCompute.provisioning_configuration(vm_size='STANDARD_D3_V2')\n",
|
" prov_config = AksCompute.provisioning_configuration(vm_size=\"STANDARD_D3_V2\")\n",
|
||||||
" aks_target = ComputeTarget.create(workspace=ws, \n",
|
" aks_target = ComputeTarget.create(\n",
|
||||||
" name=aks_name,\n",
|
" workspace=ws, name=aks_name, provisioning_configuration=prov_config\n",
|
||||||
" provisioning_configuration=prov_config)\n",
|
" )\n",
|
||||||
"aks_target.wait_for_completion(show_output=True)"
|
"aks_target.wait_for_completion(show_output=True)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -708,16 +738,18 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"aks_service_name ='model-scoring-local-aks'\n",
|
"aks_service_name = \"model-scoring-local-aks\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"aks_service = Model.deploy(workspace=ws,\n",
|
"aks_service = Model.deploy(\n",
|
||||||
" name=aks_service_name,\n",
|
" workspace=ws,\n",
|
||||||
" models=[scoring_explainer_model, original_model],\n",
|
" name=aks_service_name,\n",
|
||||||
" inference_config=inf_config,\n",
|
" models=[scoring_explainer_model, original_model],\n",
|
||||||
" deployment_config=aks_config,\n",
|
" inference_config=inf_config,\n",
|
||||||
" deployment_target=aks_target)\n",
|
" deployment_config=aks_config,\n",
|
||||||
|
" deployment_target=aks_target,\n",
|
||||||
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"aks_service.wait_for_deployment(show_output = True)\n",
|
"aks_service.wait_for_deployment(show_output=True)\n",
|
||||||
"print(aks_service.state)"
|
"print(aks_service.state)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -752,18 +784,24 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Serialize the first row of the test data into json\n",
|
"# Serialize the first row of the test data into json\n",
|
||||||
"X_test_json = X_test_df[:1].to_json(orient='records')\n",
|
"X_test_json = X_test_df[:1].to_json(orient=\"records\")\n",
|
||||||
"print(X_test_json)\n",
|
"print(X_test_json)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Call the service to get the predictions and the engineered and raw explanations\n",
|
"# Call the service to get the predictions and the engineered and raw explanations\n",
|
||||||
"output = aks_service.run(X_test_json)\n",
|
"output = aks_service.run(X_test_json)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Print the predicted value\n",
|
"# Print the predicted value\n",
|
||||||
"print('predictions:\\n{}\\n'.format(output['predictions']))\n",
|
"print(\"predictions:\\n{}\\n\".format(output[\"predictions\"]))\n",
|
||||||
"# Print the engineered feature importances for the predicted value\n",
|
"# Print the engineered feature importances for the predicted value\n",
|
||||||
"print('engineered_local_importance_values:\\n{}\\n'.format(output['engineered_local_importance_values']))\n",
|
"print(\n",
|
||||||
|
" \"engineered_local_importance_values:\\n{}\\n\".format(\n",
|
||||||
|
" output[\"engineered_local_importance_values\"]\n",
|
||||||
|
" )\n",
|
||||||
|
")\n",
|
||||||
"# Print the raw feature importances for the predicted value\n",
|
"# Print the raw feature importances for the predicted value\n",
|
||||||
"print('raw_local_importance_values:\\n{}\\n'.format(output['raw_local_importance_values']))\n"
|
"print(\n",
|
||||||
|
" \"raw_local_importance_values:\\n{}\\n\".format(output[\"raw_local_importance_values\"])\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,21 +1,5 @@
|
|||||||
{
|
{
|
||||||
"cells": [
|
"cells": [
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
|
||||||
"\n",
|
|
||||||
"Licensed under the MIT License."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
""
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@@ -78,6 +62,7 @@
|
|||||||
"import azureml.core\n",
|
"import azureml.core\n",
|
||||||
"from azureml.core.experiment import Experiment\n",
|
"from azureml.core.experiment import Experiment\n",
|
||||||
"from azureml.core.workspace import Workspace\n",
|
"from azureml.core.workspace import Workspace\n",
|
||||||
|
"\n",
|
||||||
"from azureml.automl.core.featurization import FeaturizationConfig\n",
|
"from azureml.automl.core.featurization import FeaturizationConfig\n",
|
||||||
"from azureml.train.automl import AutoMLConfig\n",
|
"from azureml.train.automl import AutoMLConfig\n",
|
||||||
"from azureml.core.dataset import Dataset"
|
"from azureml.core.dataset import Dataset"
|
||||||
@@ -90,16 +75,6 @@
|
|||||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
|
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
@@ -109,17 +84,18 @@
|
|||||||
"ws = Workspace.from_config()\n",
|
"ws = Workspace.from_config()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Choose a name for the experiment.\n",
|
"# Choose a name for the experiment.\n",
|
||||||
"experiment_name = 'automl-regression-hardware-explain'\n",
|
"experiment_name = \"automl-regression-hardware-explain\"\n",
|
||||||
"experiment = Experiment(ws, experiment_name)\n",
|
"experiment = Experiment(ws, experiment_name)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"output = {}\n",
|
"output = {}\n",
|
||||||
"output['Subscription ID'] = ws.subscription_id\n",
|
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||||
"output['Workspace Name'] = ws.name\n",
|
"output[\"Workspace Name\"] = ws.name\n",
|
||||||
"output['Resource Group'] = ws.resource_group\n",
|
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||||
"output['Location'] = ws.location\n",
|
"output[\"Location\"] = ws.location\n",
|
||||||
"output['Experiment Name'] = experiment.name\n",
|
"output[\"Experiment Name\"] = experiment.name\n",
|
||||||
"pd.set_option('display.max_colwidth', -1)\n",
|
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||||
|
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||||
"outputDf.T"
|
"outputDf.T"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -152,12 +128,12 @@
|
|||||||
"# Verify that cluster does not exist already\n",
|
"# Verify that cluster does not exist already\n",
|
||||||
"try:\n",
|
"try:\n",
|
||||||
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
||||||
" print('Found existing cluster, use it.')\n",
|
" print(\"Found existing cluster, use it.\")\n",
|
||||||
"except ComputeTargetException:\n",
|
"except ComputeTargetException:\n",
|
||||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
|
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||||
" max_nodes=4)\n",
|
" vm_size=\"STANDARD_DS12_V2\", max_nodes=4\n",
|
||||||
|
" )\n",
|
||||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
||||||
"\n",
|
|
||||||
"compute_target.wait_for_completion(show_output=True)"
|
"compute_target.wait_for_completion(show_output=True)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -176,7 +152,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"data = 'https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/machineData.csv'\n",
|
"data = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/machineData.csv\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"dataset = Dataset.Tabular.from_delimited_files(data)\n",
|
"dataset = Dataset.Tabular.from_delimited_files(data)\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -185,14 +161,22 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Register the train dataset with your workspace\n",
|
"# Register the train dataset with your workspace\n",
|
||||||
"train_data.register(workspace = ws, name = 'machineData_train_dataset',\n",
|
"train_data.register(\n",
|
||||||
" description = 'hardware performance training data',\n",
|
" workspace=ws,\n",
|
||||||
" create_new_version=True)\n",
|
" name=\"machineData_train_dataset\",\n",
|
||||||
|
" description=\"hardware performance training data\",\n",
|
||||||
|
" create_new_version=True,\n",
|
||||||
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Register the test dataset with your workspace\n",
|
"# Register the test dataset with your workspace\n",
|
||||||
"test_data.register(workspace = ws, name = 'machineData_test_dataset', description = 'hardware performance test data', create_new_version=True)\n",
|
"test_data.register(\n",
|
||||||
|
" workspace=ws,\n",
|
||||||
|
" name=\"machineData_test_dataset\",\n",
|
||||||
|
" description=\"hardware performance test data\",\n",
|
||||||
|
" create_new_version=True,\n",
|
||||||
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"label =\"ERP\"\n",
|
"label = \"ERP\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"train_data.to_pandas_dataframe().head()"
|
"train_data.to_pandas_dataframe().head()"
|
||||||
]
|
]
|
||||||
@@ -249,15 +233,19 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"featurization_config = FeaturizationConfig()\n",
|
"featurization_config = FeaturizationConfig()\n",
|
||||||
"featurization_config.blocked_transformers = ['LabelEncoder']\n",
|
"featurization_config.blocked_transformers = [\"LabelEncoder\"]\n",
|
||||||
"#featurization_config.drop_columns = ['MMIN']\n",
|
"# featurization_config.drop_columns = ['MMIN']\n",
|
||||||
"featurization_config.add_column_purpose('MYCT', 'Numeric')\n",
|
"featurization_config.add_column_purpose(\"MYCT\", \"Numeric\")\n",
|
||||||
"featurization_config.add_column_purpose('VendorName', 'CategoricalHash')\n",
|
"featurization_config.add_column_purpose(\"VendorName\", \"CategoricalHash\")\n",
|
||||||
"#default strategy mean, add transformer param for for 3 columns\n",
|
"# default strategy mean, add transformer param for for 3 columns\n",
|
||||||
"featurization_config.add_transformer_params('Imputer', ['CACH'], {\"strategy\": \"median\"})\n",
|
"featurization_config.add_transformer_params(\"Imputer\", [\"CACH\"], {\"strategy\": \"median\"})\n",
|
||||||
"featurization_config.add_transformer_params('Imputer', ['CHMIN'], {\"strategy\": \"median\"})\n",
|
"featurization_config.add_transformer_params(\n",
|
||||||
"featurization_config.add_transformer_params('Imputer', ['PRP'], {\"strategy\": \"most_frequent\"})\n",
|
" \"Imputer\", [\"CHMIN\"], {\"strategy\": \"median\"}\n",
|
||||||
"#featurization_config.add_transformer_params('HashOneHotEncoder', [], {\"number_of_bits\": 3})"
|
")\n",
|
||||||
|
"featurization_config.add_transformer_params(\n",
|
||||||
|
" \"Imputer\", [\"PRP\"], {\"strategy\": \"most_frequent\"}\n",
|
||||||
|
")\n",
|
||||||
|
"# featurization_config.add_transformer_params('HashOneHotEncoder', [], {\"number_of_bits\": 3})"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -271,23 +259,24 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"automl_settings = {\n",
|
"automl_settings = {\n",
|
||||||
" \"enable_early_stopping\": True, \n",
|
" \"enable_early_stopping\": True,\n",
|
||||||
" \"experiment_timeout_hours\" : 0.25,\n",
|
" \"experiment_timeout_hours\": 0.25,\n",
|
||||||
" \"max_concurrent_iterations\": 4,\n",
|
" \"max_concurrent_iterations\": 4,\n",
|
||||||
" \"max_cores_per_iteration\": -1,\n",
|
" \"max_cores_per_iteration\": -1,\n",
|
||||||
" \"n_cross_validations\": 5,\n",
|
" \"n_cross_validations\": 5,\n",
|
||||||
" \"primary_metric\": 'normalized_root_mean_squared_error',\n",
|
" \"primary_metric\": \"normalized_root_mean_squared_error\",\n",
|
||||||
" \"verbosity\": logging.INFO\n",
|
" \"verbosity\": logging.INFO,\n",
|
||||||
"}\n",
|
"}\n",
|
||||||
"\n",
|
"\n",
|
||||||
"automl_config = AutoMLConfig(task = 'regression',\n",
|
"automl_config = AutoMLConfig(\n",
|
||||||
" debug_log = 'automl_errors.log',\n",
|
" task=\"regression\",\n",
|
||||||
" compute_target=compute_target,\n",
|
" debug_log=\"automl_errors.log\",\n",
|
||||||
" featurization=featurization_config,\n",
|
" compute_target=compute_target,\n",
|
||||||
" training_data = train_data,\n",
|
" featurization=featurization_config,\n",
|
||||||
" label_column_name = label,\n",
|
" training_data=train_data,\n",
|
||||||
" **automl_settings\n",
|
" label_column_name=label,\n",
|
||||||
" )"
|
" **automl_settings,\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -304,7 +293,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"remote_run = experiment.submit(automl_config, show_output = False)"
|
"remote_run = experiment.submit(automl_config, show_output=False)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -320,9 +309,9 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"#from azureml.train.automl.run import AutoMLRun\n",
|
"# from azureml.train.automl.run import AutoMLRun\n",
|
||||||
"#remote_run = AutoMLRun(experiment=experiment, run_id='<run_ID_goes_here')\n",
|
"# remote_run = AutoMLRun(experiment=experiment, run_id='<run_ID_goes_here')\n",
|
||||||
"#remote_run"
|
"# remote_run"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -359,8 +348,10 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Download the featuurization summary JSON file locally\n",
|
"# Download the featurization summary JSON file locally\n",
|
||||||
"best_run.download_file(\"outputs/featurization_summary.json\", \"featurization_summary.json\")\n",
|
"best_run.download_file(\n",
|
||||||
|
" \"outputs/featurization_summary.json\", \"featurization_summary.json\"\n",
|
||||||
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Render the JSON as a pandas DataFrame\n",
|
"# Render the JSON as a pandas DataFrame\n",
|
||||||
"with open(\"featurization_summary.json\", \"r\") as f:\n",
|
"with open(\"featurization_summary.json\", \"r\") as f:\n",
|
||||||
@@ -394,7 +385,8 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from azureml.widgets import RunDetails\n",
|
"from azureml.widgets import RunDetails\n",
|
||||||
"RunDetails(remote_run).show() "
|
"\n",
|
||||||
|
"RunDetails(remote_run).show()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -415,7 +407,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"#automl_run, fitted_model = remote_run.get_output(metric='r2_score')\n",
|
"# automl_run, fitted_model = remote_run.get_output(metric='r2_score')\n",
|
||||||
"automl_run, fitted_model = remote_run.get_output(iteration=2)"
|
"automl_run, fitted_model = remote_run.get_output(iteration=2)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -441,7 +433,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"with open('train_explainer.py', 'r') as cefr:\n",
|
"with open(\"train_explainer.py\", \"r\") as cefr:\n",
|
||||||
" print(cefr.read())"
|
" print(cefr.read())"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -463,32 +455,36 @@
|
|||||||
"import os\n",
|
"import os\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# create script folder\n",
|
"# create script folder\n",
|
||||||
"script_folder = './sample_projects/automl-regression-hardware'\n",
|
"script_folder = \"./sample_projects/automl-regression-hardware\"\n",
|
||||||
"if not os.path.exists(script_folder):\n",
|
"if not os.path.exists(script_folder):\n",
|
||||||
" os.makedirs(script_folder)\n",
|
" os.makedirs(script_folder)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Copy the sample script to script folder.\n",
|
"# Copy the sample script to script folder.\n",
|
||||||
"shutil.copy('train_explainer.py', script_folder)\n",
|
"shutil.copy(\"train_explainer.py\", script_folder)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Create the explainer script that will run on the remote compute.\n",
|
"# Create the explainer script that will run on the remote compute.\n",
|
||||||
"script_file_name = script_folder + '/train_explainer.py'\n",
|
"script_file_name = script_folder + \"/train_explainer.py\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Open the sample script for modification\n",
|
"# Open the sample script for modification\n",
|
||||||
"with open(script_file_name, 'r') as cefr:\n",
|
"with open(script_file_name, \"r\") as cefr:\n",
|
||||||
" content = cefr.read()\n",
|
" content = cefr.read()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Replace the values in train_explainer.py file with the appropriate values\n",
|
"# Replace the values in train_explainer.py file with the appropriate values\n",
|
||||||
"content = content.replace('<<experiment_name>>', automl_run.experiment.name) # your experiment name.\n",
|
"content = content.replace(\n",
|
||||||
"content = content.replace('<<run_id>>', automl_run.id) # Run-id of the AutoML run for which you want to explain the model.\n",
|
" \"<<experiment_name>>\", automl_run.experiment.name\n",
|
||||||
"content = content.replace('<<target_column_name>>', 'ERP') # Your target column name\n",
|
") # your experiment name.\n",
|
||||||
"content = content.replace('<<task>>', 'regression') # Training task type\n",
|
"content = content.replace(\n",
|
||||||
|
" \"<<run_id>>\", automl_run.id\n",
|
||||||
|
") # Run-id of the AutoML run for which you want to explain the model.\n",
|
||||||
|
"content = content.replace(\"<<target_column_name>>\", \"ERP\") # Your target column name\n",
|
||||||
|
"content = content.replace(\"<<task>>\", \"regression\") # Training task type\n",
|
||||||
"# Name of your training dataset register with your workspace\n",
|
"# Name of your training dataset register with your workspace\n",
|
||||||
"content = content.replace('<<train_dataset_name>>', 'machineData_train_dataset') \n",
|
"content = content.replace(\"<<train_dataset_name>>\", \"machineData_train_dataset\")\n",
|
||||||
"# Name of your test dataset register with your workspace\n",
|
"# Name of your test dataset register with your workspace\n",
|
||||||
"content = content.replace('<<test_dataset_name>>', 'machineData_test_dataset')\n",
|
"content = content.replace(\"<<test_dataset_name>>\", \"machineData_test_dataset\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Write sample file into your script folder.\n",
|
"# Write sample file into your script folder.\n",
|
||||||
"with open(script_file_name, 'w') as cefw:\n",
|
"with open(script_file_name, \"w\") as cefw:\n",
|
||||||
" cefw.write(content)"
|
" cefw.write(content)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -506,6 +502,8 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from azureml.core.runconfig import RunConfiguration\n",
|
"from azureml.core.runconfig import RunConfiguration\n",
|
||||||
|
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||||
|
"import pkg_resources\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# create a new RunConfig object\n",
|
"# create a new RunConfig object\n",
|
||||||
"conda_run_config = RunConfiguration(framework=\"python\")\n",
|
"conda_run_config = RunConfiguration(framework=\"python\")\n",
|
||||||
@@ -515,7 +513,7 @@
|
|||||||
"conda_run_config.environment.docker.enabled = True\n",
|
"conda_run_config.environment.docker.enabled = True\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# specify CondaDependencies obj\n",
|
"# specify CondaDependencies obj\n",
|
||||||
"conda_run_config.environment.python.conda_dependencies = automl_run.get_environment().python.conda_dependencies"
|
"conda_run_config.environment = automl_run.get_environment()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -535,9 +533,11 @@
|
|||||||
"# Now submit a run on AmlCompute for model explanations\n",
|
"# Now submit a run on AmlCompute for model explanations\n",
|
||||||
"from azureml.core.script_run_config import ScriptRunConfig\n",
|
"from azureml.core.script_run_config import ScriptRunConfig\n",
|
||||||
"\n",
|
"\n",
|
||||||
"script_run_config = ScriptRunConfig(source_directory=script_folder,\n",
|
"script_run_config = ScriptRunConfig(\n",
|
||||||
" script='train_explainer.py',\n",
|
" source_directory=script_folder,\n",
|
||||||
" run_config=conda_run_config)\n",
|
" script=\"train_explainer.py\",\n",
|
||||||
|
" run_config=conda_run_config,\n",
|
||||||
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"run = experiment.submit(script_run_config)\n",
|
"run = experiment.submit(script_run_config)\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -579,10 +579,16 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from azureml.interpret import ExplanationClient\n",
|
"from azureml.interpret import ExplanationClient\n",
|
||||||
|
"\n",
|
||||||
"client = ExplanationClient.from_run(automl_run)\n",
|
"client = ExplanationClient.from_run(automl_run)\n",
|
||||||
"engineered_explanations = client.download_model_explanation(raw=False, comment='engineered explanations')\n",
|
"engineered_explanations = client.download_model_explanation(\n",
|
||||||
|
" raw=False, comment=\"engineered explanations\"\n",
|
||||||
|
")\n",
|
||||||
"print(engineered_explanations.get_feature_importance_dict())\n",
|
"print(engineered_explanations.get_feature_importance_dict())\n",
|
||||||
"print(\"You can visualize the engineered explanations under the 'Explanations (preview)' tab in the AutoML run at:-\\n\" + automl_run.get_portal_url())"
|
"print(\n",
|
||||||
|
" \"You can visualize the engineered explanations under the 'Explanations (preview)' tab in the AutoML run at:-\\n\"\n",
|
||||||
|
" + automl_run.get_portal_url()\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -599,9 +605,14 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"raw_explanations = client.download_model_explanation(raw=True, comment='raw explanations')\n",
|
"raw_explanations = client.download_model_explanation(\n",
|
||||||
|
" raw=True, comment=\"raw explanations\"\n",
|
||||||
|
")\n",
|
||||||
"print(raw_explanations.get_feature_importance_dict())\n",
|
"print(raw_explanations.get_feature_importance_dict())\n",
|
||||||
"print(\"You can visualize the raw explanations under the 'Explanations (preview)' tab in the AutoML run at:-\\n\" + automl_run.get_portal_url())"
|
"print(\n",
|
||||||
|
" \"You can visualize the raw explanations under the 'Explanations (preview)' tab in the AutoML run at:-\\n\"\n",
|
||||||
|
" + automl_run.get_portal_url()\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -623,33 +634,12 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Register trained automl model present in the 'outputs' folder in the artifacts\n",
|
"# Register trained automl model present in the 'outputs' folder in the artifacts\n",
|
||||||
"original_model = automl_run.register_model(model_name='automl_model', \n",
|
"original_model = automl_run.register_model(\n",
|
||||||
" model_path='outputs/model.pkl')\n",
|
" model_name=\"automl_model\", model_path=\"outputs/model.pkl\"\n",
|
||||||
"scoring_explainer_model = automl_run.register_model(model_name='scoring_explainer',\n",
|
")\n",
|
||||||
" model_path='outputs/scoring_explainer.pkl')"
|
"scoring_explainer_model = automl_run.register_model(\n",
|
||||||
]
|
" model_name=\"scoring_explainer\", model_path=\"outputs/scoring_explainer.pkl\"\n",
|
||||||
},
|
")"
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"### Create the conda dependencies for setting up the service\n",
|
|
||||||
"We need to create the conda dependencies comprising of the *azureml* packages using the training environment from the *automl_run*."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"conda_dep = automl_run.get_environment().python.conda_dependencies\n",
|
|
||||||
"\n",
|
|
||||||
"with open(\"myenv.yml\",\"w\") as f:\n",
|
|
||||||
" f.write(conda_dep.serialize_to_string())\n",
|
|
||||||
"\n",
|
|
||||||
"with open(\"myenv.yml\",\"r\") as f:\n",
|
|
||||||
" print(f.read())"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -665,7 +655,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"with open(\"score_explain.py\",\"r\") as f:\n",
|
"with open(\"score_explain.py\", \"r\") as f:\n",
|
||||||
" print(f.read())"
|
" print(f.read())"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -674,7 +664,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"### Deploy the service\n",
|
"### Deploy the service\n",
|
||||||
"In the cell below, we deploy the service using the conda file and the scoring file from the previous steps. "
|
"In the cell below, we deploy the service using the automl training environment and the scoring file from the previous steps. "
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -683,22 +673,30 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
|
"from azureml.core.webservice import Webservice\n",
|
||||||
"from azureml.core.model import InferenceConfig\n",
|
"from azureml.core.model import InferenceConfig\n",
|
||||||
"from azureml.core.webservice import AciWebservice\n",
|
"from azureml.core.webservice import AciWebservice\n",
|
||||||
"from azureml.core.model import Model\n",
|
"from azureml.core.model import Model\n",
|
||||||
"from azureml.core.environment import Environment\n",
|
"from azureml.core.environment import Environment\n",
|
||||||
"\n",
|
"\n",
|
||||||
"aciconfig = AciWebservice.deploy_configuration(cpu_cores=2, \n",
|
"aciconfig = AciWebservice.deploy_configuration(\n",
|
||||||
" memory_gb=2, \n",
|
" cpu_cores=2,\n",
|
||||||
" tags={\"data\": \"Machine Data\", \n",
|
" memory_gb=2,\n",
|
||||||
" \"method\" : \"local_explanation\"}, \n",
|
" tags={\"data\": \"Machine Data\", \"method\": \"local_explanation\"},\n",
|
||||||
" description='Get local explanations for Machine test data')\n",
|
" description=\"Get local explanations for Machine test data\",\n",
|
||||||
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"myenv = Environment.from_conda_specification(name=\"myenv\", file_path=\"myenv.yml\")\n",
|
"myenv = automl_run.get_environment()\n",
|
||||||
"inference_config = InferenceConfig(entry_script=\"score_explain.py\", environment=myenv)\n",
|
"inference_config = InferenceConfig(entry_script=\"score_explain.py\", environment=myenv)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Use configs and models generated above\n",
|
"# Use configs and models generated above\n",
|
||||||
"service = Model.deploy(ws, 'model-scoring', [scoring_explainer_model, original_model], inference_config, aciconfig)\n",
|
"service = Model.deploy(\n",
|
||||||
|
" ws,\n",
|
||||||
|
" \"model-scoring\",\n",
|
||||||
|
" [scoring_explainer_model, original_model],\n",
|
||||||
|
" inference_config,\n",
|
||||||
|
" aciconfig,\n",
|
||||||
|
")\n",
|
||||||
"service.wait_for_deployment(show_output=True)"
|
"service.wait_for_deployment(show_output=True)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -732,19 +730,19 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"if service.state == 'Healthy':\n",
|
"if service.state == \"Healthy\":\n",
|
||||||
" X_test = test_data.drop_columns([label]).to_pandas_dataframe()\n",
|
" X_test = test_data.drop_columns([label]).to_pandas_dataframe()\n",
|
||||||
" # Serialize the first row of the test data into json\n",
|
" # Serialize the first row of the test data into json\n",
|
||||||
" X_test_json = X_test[:1].to_json(orient='records')\n",
|
" X_test_json = X_test[:1].to_json(orient=\"records\")\n",
|
||||||
" print(X_test_json)\n",
|
" print(X_test_json)\n",
|
||||||
" # Call the service to get the predictions and the engineered and raw explanations\n",
|
" # Call the service to get the predictions and the engineered and raw explanations\n",
|
||||||
" output = service.run(X_test_json)\n",
|
" output = service.run(X_test_json)\n",
|
||||||
" # Print the predicted value\n",
|
" # Print the predicted value\n",
|
||||||
" print(output['predictions'])\n",
|
" print(output[\"predictions\"])\n",
|
||||||
" # Print the engineered feature importances for the predicted value\n",
|
" # Print the engineered feature importances for the predicted value\n",
|
||||||
" print(output['engineered_local_importance_values'])\n",
|
" print(output[\"engineered_local_importance_values\"])\n",
|
||||||
" # Print the raw feature importances for the predicted value\n",
|
" # Print the raw feature importances for the predicted value\n",
|
||||||
" print(output['raw_local_importance_values'])"
|
" print(output[\"raw_local_importance_values\"])"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -780,14 +778,14 @@
|
|||||||
"# preview the first 3 rows of the dataset\n",
|
"# preview the first 3 rows of the dataset\n",
|
||||||
"\n",
|
"\n",
|
||||||
"test_data = test_data.to_pandas_dataframe()\n",
|
"test_data = test_data.to_pandas_dataframe()\n",
|
||||||
"y_test = test_data['ERP'].fillna(0)\n",
|
"y_test = test_data[\"ERP\"].fillna(0)\n",
|
||||||
"test_data = test_data.drop('ERP', 1)\n",
|
"test_data = test_data.drop(\"ERP\", 1)\n",
|
||||||
"test_data = test_data.fillna(0)\n",
|
"test_data = test_data.fillna(0)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"train_data = train_data.to_pandas_dataframe()\n",
|
"train_data = train_data.to_pandas_dataframe()\n",
|
||||||
"y_train = train_data['ERP'].fillna(0)\n",
|
"y_train = train_data[\"ERP\"].fillna(0)\n",
|
||||||
"train_data = train_data.drop('ERP', 1)\n",
|
"train_data = train_data.drop(\"ERP\", 1)\n",
|
||||||
"train_data = train_data.fillna(0)"
|
"train_data = train_data.fillna(0)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -814,27 +812,41 @@
|
|||||||
"from sklearn.metrics import mean_squared_error, r2_score\n",
|
"from sklearn.metrics import mean_squared_error, r2_score\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Set up a multi-plot chart.\n",
|
"# Set up a multi-plot chart.\n",
|
||||||
"f, (a0, a1) = plt.subplots(1, 2, gridspec_kw = {'width_ratios':[1, 1], 'wspace':0, 'hspace': 0})\n",
|
"f, (a0, a1) = plt.subplots(\n",
|
||||||
"f.suptitle('Regression Residual Values', fontsize = 18)\n",
|
" 1, 2, gridspec_kw={\"width_ratios\": [1, 1], \"wspace\": 0, \"hspace\": 0}\n",
|
||||||
|
")\n",
|
||||||
|
"f.suptitle(\"Regression Residual Values\", fontsize=18)\n",
|
||||||
"f.set_figheight(6)\n",
|
"f.set_figheight(6)\n",
|
||||||
"f.set_figwidth(16)\n",
|
"f.set_figwidth(16)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Plot residual values of training set.\n",
|
"# Plot residual values of training set.\n",
|
||||||
"a0.axis([0, 360, -100, 100])\n",
|
"a0.axis([0, 360, -100, 100])\n",
|
||||||
"a0.plot(y_residual_train, 'bo', alpha = 0.5)\n",
|
"a0.plot(y_residual_train, \"bo\", alpha=0.5)\n",
|
||||||
"a0.plot([-10,360],[0,0], 'r-', lw = 3)\n",
|
"a0.plot([-10, 360], [0, 0], \"r-\", lw=3)\n",
|
||||||
"a0.text(16,170,'RMSE = {0:.2f}'.format(np.sqrt(mean_squared_error(y_train, y_pred_train))), fontsize = 12)\n",
|
"a0.text(\n",
|
||||||
"a0.text(16,140,'R2 score = {0:.2f}'.format(r2_score(y_train, y_pred_train)),fontsize = 12)\n",
|
" 16,\n",
|
||||||
"a0.set_xlabel('Training samples', fontsize = 12)\n",
|
" 170,\n",
|
||||||
"a0.set_ylabel('Residual Values', fontsize = 12)\n",
|
" \"RMSE = {0:.2f}\".format(np.sqrt(mean_squared_error(y_train, y_pred_train))),\n",
|
||||||
|
" fontsize=12,\n",
|
||||||
|
")\n",
|
||||||
|
"a0.text(\n",
|
||||||
|
" 16, 140, \"R2 score = {0:.2f}\".format(r2_score(y_train, y_pred_train)), fontsize=12\n",
|
||||||
|
")\n",
|
||||||
|
"a0.set_xlabel(\"Training samples\", fontsize=12)\n",
|
||||||
|
"a0.set_ylabel(\"Residual Values\", fontsize=12)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Plot residual values of test set.\n",
|
"# Plot residual values of test set.\n",
|
||||||
"a1.axis([0, 90, -100, 100])\n",
|
"a1.axis([0, 90, -100, 100])\n",
|
||||||
"a1.plot(y_residual_test, 'bo', alpha = 0.5)\n",
|
"a1.plot(y_residual_test, \"bo\", alpha=0.5)\n",
|
||||||
"a1.plot([-10,360],[0,0], 'r-', lw = 3)\n",
|
"a1.plot([-10, 360], [0, 0], \"r-\", lw=3)\n",
|
||||||
"a1.text(5,170,'RMSE = {0:.2f}'.format(np.sqrt(mean_squared_error(y_test, y_pred_test))), fontsize = 12)\n",
|
"a1.text(\n",
|
||||||
"a1.text(5,140,'R2 score = {0:.2f}'.format(r2_score(y_test, y_pred_test)),fontsize = 12)\n",
|
" 5,\n",
|
||||||
"a1.set_xlabel('Test samples', fontsize = 12)\n",
|
" 170,\n",
|
||||||
|
" \"RMSE = {0:.2f}\".format(np.sqrt(mean_squared_error(y_test, y_pred_test))),\n",
|
||||||
|
" fontsize=12,\n",
|
||||||
|
")\n",
|
||||||
|
"a1.text(5, 140, \"R2 score = {0:.2f}\".format(r2_score(y_test, y_pred_test)), fontsize=12)\n",
|
||||||
|
"a1.set_xlabel(\"Test samples\", fontsize=12)\n",
|
||||||
"a1.set_yticklabels([])\n",
|
"a1.set_yticklabels([])\n",
|
||||||
"\n",
|
"\n",
|
||||||
"plt.show()"
|
"plt.show()"
|
||||||
@@ -847,9 +859,11 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"%matplotlib inline\n",
|
"%matplotlib inline\n",
|
||||||
"test_pred = plt.scatter(y_test, y_pred_test, color='')\n",
|
"test_pred = plt.scatter(y_test, y_pred_test, color=\"\")\n",
|
||||||
"test_test = plt.scatter(y_test, y_test, color='g')\n",
|
"test_test = plt.scatter(y_test, y_test, color=\"g\")\n",
|
||||||
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n",
|
"plt.legend(\n",
|
||||||
|
" (test_pred, test_test), (\"prediction\", \"truth\"), loc=\"upper left\", fontsize=8\n",
|
||||||
|
")\n",
|
||||||
"plt.show()"
|
"plt.show()"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,10 @@
|
|||||||
import pandas as pd
|
import pandas as pd
|
||||||
import joblib
|
import joblib
|
||||||
from azureml.core.model import Model
|
from azureml.core.model import Model
|
||||||
from azureml.train.automl.runtime.automl_explain_utilities import automl_setup_model_explanations
|
from azureml.train.automl.runtime.automl_explain_utilities import (
|
||||||
|
automl_setup_model_explanations,
|
||||||
|
)
|
||||||
|
import scipy as sp
|
||||||
|
|
||||||
|
|
||||||
def init():
|
def init():
|
||||||
@@ -11,26 +14,55 @@ def init():
|
|||||||
|
|
||||||
# Retrieve the path to the model file using the model name
|
# Retrieve the path to the model file using the model name
|
||||||
# Assume original model is named original_prediction_model
|
# Assume original model is named original_prediction_model
|
||||||
automl_model_path = Model.get_model_path('automl_model')
|
automl_model_path = Model.get_model_path("automl_model")
|
||||||
scoring_explainer_path = Model.get_model_path('scoring_explainer')
|
scoring_explainer_path = Model.get_model_path("scoring_explainer")
|
||||||
|
|
||||||
automl_model = joblib.load(automl_model_path)
|
automl_model = joblib.load(automl_model_path)
|
||||||
scoring_explainer = joblib.load(scoring_explainer_path)
|
scoring_explainer = joblib.load(scoring_explainer_path)
|
||||||
|
|
||||||
|
|
||||||
|
def is_multi_dimensional(matrix):
|
||||||
|
if hasattr(matrix, "ndim") and matrix.ndim > 1:
|
||||||
|
return True
|
||||||
|
if hasattr(matrix, "shape") and matrix.shape[1]:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def convert_matrix(matrix):
|
||||||
|
if sp.sparse.issparse(matrix):
|
||||||
|
matrix = matrix.todense()
|
||||||
|
if is_multi_dimensional(matrix):
|
||||||
|
matrix = matrix.tolist()
|
||||||
|
return matrix
|
||||||
|
|
||||||
|
|
||||||
def run(raw_data):
|
def run(raw_data):
|
||||||
# Get predictions and explanations for each data point
|
# Get predictions and explanations for each data point
|
||||||
data = pd.read_json(raw_data, orient='records')
|
data = pd.read_json(raw_data, orient="records")
|
||||||
# Make prediction
|
# Make prediction
|
||||||
predictions = automl_model.predict(data)
|
predictions = automl_model.predict(data)
|
||||||
# Setup for inferencing explanations
|
# Setup for inferencing explanations
|
||||||
automl_explainer_setup_obj = automl_setup_model_explanations(automl_model,
|
automl_explainer_setup_obj = automl_setup_model_explanations(
|
||||||
X_test=data, task='regression')
|
automl_model, X_test=data, task="regression"
|
||||||
|
)
|
||||||
# Retrieve model explanations for engineered explanations
|
# Retrieve model explanations for engineered explanations
|
||||||
engineered_local_importance_values = scoring_explainer.explain(automl_explainer_setup_obj.X_test_transform)
|
engineered_local_importance_values = scoring_explainer.explain(
|
||||||
|
automl_explainer_setup_obj.X_test_transform
|
||||||
|
)
|
||||||
|
engineered_local_importance_values = convert_matrix(
|
||||||
|
engineered_local_importance_values
|
||||||
|
)
|
||||||
|
|
||||||
# Retrieve model explanations for raw explanations
|
# Retrieve model explanations for raw explanations
|
||||||
raw_local_importance_values = scoring_explainer.explain(automl_explainer_setup_obj.X_test_transform, get_raw=True)
|
raw_local_importance_values = scoring_explainer.explain(
|
||||||
|
automl_explainer_setup_obj.X_test_transform, get_raw=True
|
||||||
|
)
|
||||||
|
raw_local_importance_values = convert_matrix(raw_local_importance_values)
|
||||||
|
|
||||||
# You can return any data type as long as it is JSON-serializable
|
# You can return any data type as long as it is JSON-serializable
|
||||||
return {'predictions': predictions.tolist(),
|
return {
|
||||||
'engineered_local_importance_values': engineered_local_importance_values,
|
"predictions": predictions.tolist(),
|
||||||
'raw_local_importance_values': raw_local_importance_values}
|
"engineered_local_importance_values": engineered_local_importance_values,
|
||||||
|
"raw_local_importance_values": raw_local_importance_values,
|
||||||
|
}
|
||||||
|
|||||||
@@ -10,11 +10,13 @@ from azureml.core.dataset import Dataset
|
|||||||
from azureml.core.run import Run
|
from azureml.core.run import Run
|
||||||
from azureml.interpret.mimic_wrapper import MimicWrapper
|
from azureml.interpret.mimic_wrapper import MimicWrapper
|
||||||
from azureml.interpret.scoring.scoring_explainer import TreeScoringExplainer
|
from azureml.interpret.scoring.scoring_explainer import TreeScoringExplainer
|
||||||
from azureml.train.automl.runtime.automl_explain_utilities import automl_setup_model_explanations, \
|
from azureml.train.automl.runtime.automl_explain_utilities import (
|
||||||
automl_check_model_if_explainable
|
automl_setup_model_explanations,
|
||||||
|
automl_check_model_if_explainable,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
OUTPUT_DIR = './outputs/'
|
OUTPUT_DIR = "./outputs/"
|
||||||
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
||||||
|
|
||||||
# Get workspace from the run context
|
# Get workspace from the run context
|
||||||
@@ -22,63 +24,77 @@ run = Run.get_context()
|
|||||||
ws = run.experiment.workspace
|
ws = run.experiment.workspace
|
||||||
|
|
||||||
# Get the AutoML run object from the experiment name and the workspace
|
# Get the AutoML run object from the experiment name and the workspace
|
||||||
experiment = Experiment(ws, '<<experiment_name>>')
|
experiment = Experiment(ws, "<<experiment_name>>")
|
||||||
automl_run = Run(experiment=experiment, run_id='<<run_id>>')
|
automl_run = Run(experiment=experiment, run_id="<<run_id>>")
|
||||||
|
|
||||||
# Check if this AutoML model is explainable
|
# Check if this AutoML model is explainable
|
||||||
if not automl_check_model_if_explainable(automl_run):
|
if not automl_check_model_if_explainable(automl_run):
|
||||||
raise Exception("Model explanations are currently not supported for " + automl_run.get_properties().get(
|
raise Exception(
|
||||||
'run_algorithm'))
|
"Model explanations are currently not supported for "
|
||||||
|
+ automl_run.get_properties().get("run_algorithm")
|
||||||
|
)
|
||||||
|
|
||||||
# Download the best model from the artifact store
|
# Download the best model from the artifact store
|
||||||
automl_run.download_file(name=MODEL_PATH, output_file_path='model.pkl')
|
automl_run.download_file(name=MODEL_PATH, output_file_path="model.pkl")
|
||||||
|
|
||||||
# Load the AutoML model into memory
|
# Load the AutoML model into memory
|
||||||
fitted_model = joblib.load('model.pkl')
|
fitted_model = joblib.load("model.pkl")
|
||||||
|
|
||||||
# Get the train dataset from the workspace
|
# Get the train dataset from the workspace
|
||||||
train_dataset = Dataset.get_by_name(workspace=ws, name='<<train_dataset_name>>')
|
train_dataset = Dataset.get_by_name(workspace=ws, name="<<train_dataset_name>>")
|
||||||
# Drop the labeled column to get the training set.
|
# Drop the labeled column to get the training set.
|
||||||
X_train = train_dataset.drop_columns(columns=['<<target_column_name>>'])
|
X_train = train_dataset.drop_columns(columns=["<<target_column_name>>"])
|
||||||
y_train = train_dataset.keep_columns(columns=['<<target_column_name>>'], validate=True)
|
y_train = train_dataset.keep_columns(columns=["<<target_column_name>>"], validate=True)
|
||||||
|
|
||||||
# Get the test dataset from the workspace
|
# Get the test dataset from the workspace
|
||||||
test_dataset = Dataset.get_by_name(workspace=ws, name='<<test_dataset_name>>')
|
test_dataset = Dataset.get_by_name(workspace=ws, name="<<test_dataset_name>>")
|
||||||
# Drop the labeled column to get the testing set.
|
# Drop the labeled column to get the testing set.
|
||||||
X_test = test_dataset.drop_columns(columns=['<<target_column_name>>'])
|
X_test = test_dataset.drop_columns(columns=["<<target_column_name>>"])
|
||||||
|
|
||||||
# Setup the class for explaining the AutoML models
|
# Setup the class for explaining the AutoML models
|
||||||
automl_explainer_setup_obj = automl_setup_model_explanations(fitted_model, '<<task>>',
|
automl_explainer_setup_obj = automl_setup_model_explanations(
|
||||||
X=X_train, X_test=X_test,
|
fitted_model, "<<task>>", X=X_train, X_test=X_test, y=y_train, automl_run=automl_run
|
||||||
y=y_train,
|
)
|
||||||
automl_run=automl_run)
|
|
||||||
|
|
||||||
# Initialize the Mimic Explainer
|
# Initialize the Mimic Explainer
|
||||||
explainer = MimicWrapper(ws, automl_explainer_setup_obj.automl_estimator, LGBMExplainableModel,
|
explainer = MimicWrapper(
|
||||||
init_dataset=automl_explainer_setup_obj.X_transform,
|
ws,
|
||||||
run=automl_explainer_setup_obj.automl_run,
|
automl_explainer_setup_obj.automl_estimator,
|
||||||
features=automl_explainer_setup_obj.engineered_feature_names,
|
LGBMExplainableModel,
|
||||||
feature_maps=[automl_explainer_setup_obj.feature_map],
|
init_dataset=automl_explainer_setup_obj.X_transform,
|
||||||
classes=automl_explainer_setup_obj.classes)
|
run=automl_explainer_setup_obj.automl_run,
|
||||||
|
features=automl_explainer_setup_obj.engineered_feature_names,
|
||||||
|
feature_maps=[automl_explainer_setup_obj.feature_map],
|
||||||
|
classes=automl_explainer_setup_obj.classes,
|
||||||
|
)
|
||||||
|
|
||||||
# Compute the engineered explanations
|
# Compute the engineered explanations
|
||||||
engineered_explanations = explainer.explain(['local', 'global'], tag='engineered explanations',
|
engineered_explanations = explainer.explain(
|
||||||
eval_dataset=automl_explainer_setup_obj.X_test_transform)
|
["local", "global"],
|
||||||
|
tag="engineered explanations",
|
||||||
|
eval_dataset=automl_explainer_setup_obj.X_test_transform,
|
||||||
|
)
|
||||||
|
|
||||||
# Compute the raw explanations
|
# Compute the raw explanations
|
||||||
raw_explanations = explainer.explain(['local', 'global'], get_raw=True, tag='raw explanations',
|
raw_explanations = explainer.explain(
|
||||||
raw_feature_names=automl_explainer_setup_obj.raw_feature_names,
|
["local", "global"],
|
||||||
eval_dataset=automl_explainer_setup_obj.X_test_transform,
|
get_raw=True,
|
||||||
raw_eval_dataset=automl_explainer_setup_obj.X_test_raw)
|
tag="raw explanations",
|
||||||
|
raw_feature_names=automl_explainer_setup_obj.raw_feature_names,
|
||||||
|
eval_dataset=automl_explainer_setup_obj.X_test_transform,
|
||||||
|
raw_eval_dataset=automl_explainer_setup_obj.X_test_raw,
|
||||||
|
)
|
||||||
|
|
||||||
print("Engineered and raw explanations computed successfully")
|
print("Engineered and raw explanations computed successfully")
|
||||||
|
|
||||||
# Initialize the ScoringExplainer
|
# Initialize the ScoringExplainer
|
||||||
scoring_explainer = TreeScoringExplainer(explainer.explainer, feature_maps=[automl_explainer_setup_obj.feature_map])
|
scoring_explainer = TreeScoringExplainer(
|
||||||
|
explainer.explainer, feature_maps=[automl_explainer_setup_obj.feature_map]
|
||||||
|
)
|
||||||
|
|
||||||
# Pickle scoring explainer locally
|
# Pickle scoring explainer locally
|
||||||
with open('scoring_explainer.pkl', 'wb') as stream:
|
with open("scoring_explainer.pkl", "wb") as stream:
|
||||||
joblib.dump(scoring_explainer, stream)
|
joblib.dump(scoring_explainer, stream)
|
||||||
|
|
||||||
# Upload the scoring explainer to the automl run
|
# Upload the scoring explainer to the automl run
|
||||||
automl_run.upload_file('outputs/scoring_explainer.pkl', 'scoring_explainer.pkl')
|
automl_run.upload_file("outputs/scoring_explainer.pkl", "scoring_explainer.pkl")
|
||||||
|
|||||||
@@ -1,21 +1,5 @@
|
|||||||
{
|
{
|
||||||
"cells": [
|
"cells": [
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
|
||||||
"\n",
|
|
||||||
"Licensed under the MIT License."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
""
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@@ -70,7 +54,7 @@
|
|||||||
"from matplotlib import pyplot as plt\n",
|
"from matplotlib import pyplot as plt\n",
|
||||||
"import numpy as np\n",
|
"import numpy as np\n",
|
||||||
"import pandas as pd\n",
|
"import pandas as pd\n",
|
||||||
" \n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"import azureml.core\n",
|
"import azureml.core\n",
|
||||||
"from azureml.core.experiment import Experiment\n",
|
"from azureml.core.experiment import Experiment\n",
|
||||||
@@ -86,16 +70,6 @@
|
|||||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
|
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
@@ -105,18 +79,19 @@
|
|||||||
"ws = Workspace.from_config()\n",
|
"ws = Workspace.from_config()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Choose a name for the experiment.\n",
|
"# Choose a name for the experiment.\n",
|
||||||
"experiment_name = 'automl-regression'\n",
|
"experiment_name = \"automl-regression\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"experiment = Experiment(ws, experiment_name)\n",
|
"experiment = Experiment(ws, experiment_name)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"output = {}\n",
|
"output = {}\n",
|
||||||
"output['Subscription ID'] = ws.subscription_id\n",
|
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||||
"output['Workspace'] = ws.name\n",
|
"output[\"Workspace\"] = ws.name\n",
|
||||||
"output['Resource Group'] = ws.resource_group\n",
|
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||||
"output['Location'] = ws.location\n",
|
"output[\"Location\"] = ws.location\n",
|
||||||
"output['Run History Name'] = experiment_name\n",
|
"output[\"Run History Name\"] = experiment_name\n",
|
||||||
"pd.set_option('display.max_colwidth', -1)\n",
|
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||||
|
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||||
"outputDf.T"
|
"outputDf.T"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -143,10 +118,11 @@
|
|||||||
"# Verify that cluster does not exist already\n",
|
"# Verify that cluster does not exist already\n",
|
||||||
"try:\n",
|
"try:\n",
|
||||||
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
||||||
" print('Found existing cluster, use it.')\n",
|
" print(\"Found existing cluster, use it.\")\n",
|
||||||
"except ComputeTargetException:\n",
|
"except ComputeTargetException:\n",
|
||||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
|
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||||
" max_nodes=4)\n",
|
" vm_size=\"STANDARD_DS12_V2\", max_nodes=4\n",
|
||||||
|
" )\n",
|
||||||
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"compute_target.wait_for_completion(show_output=True)"
|
"compute_target.wait_for_completion(show_output=True)"
|
||||||
@@ -179,7 +155,7 @@
|
|||||||
"# Split the dataset into train and test datasets\n",
|
"# Split the dataset into train and test datasets\n",
|
||||||
"train_data, test_data = dataset.random_split(percentage=0.8, seed=223)\n",
|
"train_data, test_data = dataset.random_split(percentage=0.8, seed=223)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"label = \"ERP\"\n"
|
"label = \"ERP\""
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -213,20 +189,21 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"automl_settings = {\n",
|
"automl_settings = {\n",
|
||||||
" \"n_cross_validations\": 3,\n",
|
" \"n_cross_validations\": 3,\n",
|
||||||
" \"primary_metric\": 'normalized_root_mean_squared_error',\n",
|
" \"primary_metric\": \"r2_score\",\n",
|
||||||
" \"enable_early_stopping\": True, \n",
|
" \"enable_early_stopping\": True,\n",
|
||||||
" \"experiment_timeout_hours\": 0.3, #for real scenarios we reccommend a timeout of at least one hour \n",
|
" \"experiment_timeout_hours\": 0.3, # for real scenarios we reccommend a timeout of at least one hour\n",
|
||||||
" \"max_concurrent_iterations\": 4,\n",
|
" \"max_concurrent_iterations\": 4,\n",
|
||||||
" \"max_cores_per_iteration\": -1,\n",
|
" \"max_cores_per_iteration\": -1,\n",
|
||||||
" \"verbosity\": logging.INFO,\n",
|
" \"verbosity\": logging.INFO,\n",
|
||||||
"}\n",
|
"}\n",
|
||||||
"\n",
|
"\n",
|
||||||
"automl_config = AutoMLConfig(task = 'regression',\n",
|
"automl_config = AutoMLConfig(\n",
|
||||||
" compute_target = compute_target,\n",
|
" task=\"regression\",\n",
|
||||||
" training_data = train_data,\n",
|
" compute_target=compute_target,\n",
|
||||||
" label_column_name = label,\n",
|
" training_data=train_data,\n",
|
||||||
" **automl_settings\n",
|
" label_column_name=label,\n",
|
||||||
" )"
|
" **automl_settings,\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -242,7 +219,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"remote_run = experiment.submit(automl_config, show_output = False)"
|
"remote_run = experiment.submit(automl_config, show_output=False)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -252,8 +229,8 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# If you need to retrieve a run that already started, use the following code\n",
|
"# If you need to retrieve a run that already started, use the following code\n",
|
||||||
"#from azureml.train.automl.run import AutoMLRun\n",
|
"# from azureml.train.automl.run import AutoMLRun\n",
|
||||||
"#remote_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')"
|
"# remote_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -281,7 +258,8 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from azureml.widgets import RunDetails\n",
|
"from azureml.widgets import RunDetails\n",
|
||||||
"RunDetails(remote_run).show() "
|
"\n",
|
||||||
|
"RunDetails(remote_run).show()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -328,7 +306,7 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"lookup_metric = \"root_mean_squared_error\"\n",
|
"lookup_metric = \"root_mean_squared_error\"\n",
|
||||||
"best_run, fitted_model = remote_run.get_output(metric = lookup_metric)\n",
|
"best_run, fitted_model = remote_run.get_output(metric=lookup_metric)\n",
|
||||||
"print(best_run)\n",
|
"print(best_run)\n",
|
||||||
"print(fitted_model)"
|
"print(fitted_model)"
|
||||||
]
|
]
|
||||||
@@ -348,7 +326,7 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"iteration = 3\n",
|
"iteration = 3\n",
|
||||||
"third_run, third_model = remote_run.get_output(iteration = iteration)\n",
|
"third_run, third_model = remote_run.get_output(iteration=iteration)\n",
|
||||||
"print(third_run)\n",
|
"print(third_run)\n",
|
||||||
"print(third_model)"
|
"print(third_model)"
|
||||||
]
|
]
|
||||||
@@ -366,12 +344,12 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"y_test = test_data.keep_columns('ERP').to_pandas_dataframe()\n",
|
"y_test = test_data.keep_columns(\"ERP\").to_pandas_dataframe()\n",
|
||||||
"test_data = test_data.drop_columns('ERP').to_pandas_dataframe()\n",
|
"test_data = test_data.drop_columns(\"ERP\").to_pandas_dataframe()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"y_train = train_data.keep_columns('ERP').to_pandas_dataframe()\n",
|
"y_train = train_data.keep_columns(\"ERP\").to_pandas_dataframe()\n",
|
||||||
"train_data = train_data.drop_columns('ERP').to_pandas_dataframe()\n"
|
"train_data = train_data.drop_columns(\"ERP\").to_pandas_dataframe()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -397,27 +375,41 @@
|
|||||||
"from sklearn.metrics import mean_squared_error, r2_score\n",
|
"from sklearn.metrics import mean_squared_error, r2_score\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Set up a multi-plot chart.\n",
|
"# Set up a multi-plot chart.\n",
|
||||||
"f, (a0, a1) = plt.subplots(1, 2, gridspec_kw = {'width_ratios':[1, 1], 'wspace':0, 'hspace': 0})\n",
|
"f, (a0, a1) = plt.subplots(\n",
|
||||||
"f.suptitle('Regression Residual Values', fontsize = 18)\n",
|
" 1, 2, gridspec_kw={\"width_ratios\": [1, 1], \"wspace\": 0, \"hspace\": 0}\n",
|
||||||
|
")\n",
|
||||||
|
"f.suptitle(\"Regression Residual Values\", fontsize=18)\n",
|
||||||
"f.set_figheight(6)\n",
|
"f.set_figheight(6)\n",
|
||||||
"f.set_figwidth(16)\n",
|
"f.set_figwidth(16)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Plot residual values of training set.\n",
|
"# Plot residual values of training set.\n",
|
||||||
"a0.axis([0, 360, -100, 100])\n",
|
"a0.axis([0, 360, -100, 100])\n",
|
||||||
"a0.plot(y_residual_train, 'bo', alpha = 0.5)\n",
|
"a0.plot(y_residual_train, \"bo\", alpha=0.5)\n",
|
||||||
"a0.plot([-10,360],[0,0], 'r-', lw = 3)\n",
|
"a0.plot([-10, 360], [0, 0], \"r-\", lw=3)\n",
|
||||||
"a0.text(16,170,'RMSE = {0:.2f}'.format(np.sqrt(mean_squared_error(y_train, y_pred_train))), fontsize = 12)\n",
|
"a0.text(\n",
|
||||||
"a0.text(16,140,'R2 score = {0:.2f}'.format(r2_score(y_train, y_pred_train)),fontsize = 12)\n",
|
" 16,\n",
|
||||||
"a0.set_xlabel('Training samples', fontsize = 12)\n",
|
" 170,\n",
|
||||||
"a0.set_ylabel('Residual Values', fontsize = 12)\n",
|
" \"RMSE = {0:.2f}\".format(np.sqrt(mean_squared_error(y_train, y_pred_train))),\n",
|
||||||
|
" fontsize=12,\n",
|
||||||
|
")\n",
|
||||||
|
"a0.text(\n",
|
||||||
|
" 16, 140, \"R2 score = {0:.2f}\".format(r2_score(y_train, y_pred_train)), fontsize=12\n",
|
||||||
|
")\n",
|
||||||
|
"a0.set_xlabel(\"Training samples\", fontsize=12)\n",
|
||||||
|
"a0.set_ylabel(\"Residual Values\", fontsize=12)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Plot residual values of test set.\n",
|
"# Plot residual values of test set.\n",
|
||||||
"a1.axis([0, 90, -100, 100])\n",
|
"a1.axis([0, 90, -100, 100])\n",
|
||||||
"a1.plot(y_residual_test, 'bo', alpha = 0.5)\n",
|
"a1.plot(y_residual_test, \"bo\", alpha=0.5)\n",
|
||||||
"a1.plot([-10,360],[0,0], 'r-', lw = 3)\n",
|
"a1.plot([-10, 360], [0, 0], \"r-\", lw=3)\n",
|
||||||
"a1.text(5,170,'RMSE = {0:.2f}'.format(np.sqrt(mean_squared_error(y_test, y_pred_test))), fontsize = 12)\n",
|
"a1.text(\n",
|
||||||
"a1.text(5,140,'R2 score = {0:.2f}'.format(r2_score(y_test, y_pred_test)),fontsize = 12)\n",
|
" 5,\n",
|
||||||
"a1.set_xlabel('Test samples', fontsize = 12)\n",
|
" 170,\n",
|
||||||
|
" \"RMSE = {0:.2f}\".format(np.sqrt(mean_squared_error(y_test, y_pred_test))),\n",
|
||||||
|
" fontsize=12,\n",
|
||||||
|
")\n",
|
||||||
|
"a1.text(5, 140, \"R2 score = {0:.2f}\".format(r2_score(y_test, y_pred_test)), fontsize=12)\n",
|
||||||
|
"a1.set_xlabel(\"Test samples\", fontsize=12)\n",
|
||||||
"a1.set_yticklabels([])\n",
|
"a1.set_yticklabels([])\n",
|
||||||
"\n",
|
"\n",
|
||||||
"plt.show()"
|
"plt.show()"
|
||||||
@@ -430,9 +422,11 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"%matplotlib inline\n",
|
"%matplotlib inline\n",
|
||||||
"test_pred = plt.scatter(y_test, y_pred_test, color='')\n",
|
"test_pred = plt.scatter(y_test, y_pred_test, color=\"\")\n",
|
||||||
"test_test = plt.scatter(y_test, y_test, color='g')\n",
|
"test_test = plt.scatter(y_test, y_test, color=\"g\")\n",
|
||||||
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n",
|
"plt.legend(\n",
|
||||||
|
" (test_pred, test_test), (\"prediction\", \"truth\"), loc=\"upper left\", fontsize=8\n",
|
||||||
|
")\n",
|
||||||
"plt.show()"
|
"plt.show()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -82,7 +82,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"## Create trained model\n",
|
"## Create trained model\n",
|
||||||
"\n",
|
"\n",
|
||||||
"For this example, we will train a small model on scikit-learn's [diabetes dataset](https://scikit-learn.org/stable/datasets/index.html#diabetes-dataset). "
|
"For this example, we will train a small model on scikit-learn's [diabetes dataset](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_diabetes.html). "
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -279,7 +279,9 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"environment = Environment('my-sklearn-environment')\n",
|
"environment = Environment('my-sklearn-environment')\n",
|
||||||
"environment.python.conda_dependencies = CondaDependencies.create(pip_packages=[\n",
|
"environment.python.conda_dependencies = CondaDependencies.create(conda_packages=[\n",
|
||||||
|
" 'pip==20.2.4'],\n",
|
||||||
|
" pip_packages=[\n",
|
||||||
" 'azureml-defaults',\n",
|
" 'azureml-defaults',\n",
|
||||||
" 'inference-schema[numpy-support]',\n",
|
" 'inference-schema[numpy-support]',\n",
|
||||||
" 'joblib',\n",
|
" 'joblib',\n",
|
||||||
@@ -478,7 +480,9 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"environment = Environment('my-sklearn-environment')\n",
|
"environment = Environment('my-sklearn-environment')\n",
|
||||||
"environment.python.conda_dependencies = CondaDependencies.create(pip_packages=[\n",
|
"environment.python.conda_dependencies = CondaDependencies.create(conda_packages=[\n",
|
||||||
|
" 'pip==20.2.4'],\n",
|
||||||
|
" pip_packages=[\n",
|
||||||
" 'azureml-defaults',\n",
|
" 'azureml-defaults',\n",
|
||||||
" 'inference-schema[numpy-support]',\n",
|
" 'inference-schema[numpy-support]',\n",
|
||||||
" 'joblib',\n",
|
" 'joblib',\n",
|
||||||
|
|||||||
@@ -81,7 +81,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"## Create trained model\n",
|
"## Create trained model\n",
|
||||||
"\n",
|
"\n",
|
||||||
"For this example, we will train a small model on scikit-learn's [diabetes dataset](https://scikit-learn.org/stable/datasets/index.html#diabetes-dataset). "
|
"For this example, we will train a small model on scikit-learn's [diabetes dataset](https://scikit-learn.org/stable/datasets/toy_dataset.html#diabetes-dataset). "
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -263,7 +263,7 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"# explicitly set base_image to None when setting base_dockerfile\n",
|
"# explicitly set base_image to None when setting base_dockerfile\n",
|
||||||
"myenv.docker.base_image = None\n",
|
"myenv.docker.base_image = None\n",
|
||||||
"myenv.docker.base_dockerfile = \"FROM mcr.microsoft.com/azureml/base:intelmpi2018.3-ubuntu16.04\\nRUN echo \\\"this is test\\\"\"\n",
|
"myenv.docker.base_dockerfile = \"FROM mcr.microsoft.com/azureml/openmpi4.1.0-ubuntu20.04\\nRUN echo \\\"this is test\\\"\"\n",
|
||||||
"myenv.inferencing_stack_version = \"latest\"\n",
|
"myenv.inferencing_stack_version = \"latest\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"inference_config = InferenceConfig(source_directory=source_directory,\n",
|
"inference_config = InferenceConfig(source_directory=source_directory,\n",
|
||||||
|
|||||||
@@ -105,11 +105,13 @@
|
|||||||
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||||
"\n",
|
"\n",
|
||||||
"environment=Environment('my-sklearn-environment')\n",
|
"environment=Environment('my-sklearn-environment')\n",
|
||||||
"environment.python.conda_dependencies = CondaDependencies.create(pip_packages=[\n",
|
"environment.python.conda_dependencies = CondaDependencies.create(conda_packages=[\n",
|
||||||
|
" 'pip==20.2.4'],\n",
|
||||||
|
" pip_packages=[\n",
|
||||||
" 'azureml-defaults',\n",
|
" 'azureml-defaults',\n",
|
||||||
" 'inference-schema[numpy-support]',\n",
|
" 'inference-schema[numpy-support]',\n",
|
||||||
" 'numpy',\n",
|
" 'numpy',\n",
|
||||||
" 'scikit-learn==0.19.1',\n",
|
" 'scikit-learn==0.22.1',\n",
|
||||||
" 'scipy'\n",
|
" 'scipy'\n",
|
||||||
"])"
|
"])"
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -172,7 +172,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||||
"\n",
|
"\n",
|
||||||
"myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn==0.20.3'],\n",
|
"myenv = CondaDependencies.create(conda_packages=['numpy==1.19.5','scikit-learn==0.22.1'],\n",
|
||||||
" pip_packages=['azureml-defaults'])\n",
|
" pip_packages=['azureml-defaults'])\n",
|
||||||
"\n",
|
"\n",
|
||||||
"with open(\"myenv.yml\",\"w\") as f:\n",
|
"with open(\"myenv.yml\",\"w\") as f:\n",
|
||||||
|
|||||||
@@ -69,17 +69,19 @@
|
|||||||
"# ONNX Model Zoo and save it in the same folder as this tutorial\n",
|
"# ONNX Model Zoo and save it in the same folder as this tutorial\n",
|
||||||
"\n",
|
"\n",
|
||||||
"import urllib.request\n",
|
"import urllib.request\n",
|
||||||
|
"import os\n",
|
||||||
"\n",
|
"\n",
|
||||||
"onnx_model_url = \"https://github.com/onnx/models/blob/master/vision/body_analysis/emotion_ferplus/model/emotion-ferplus-7.tar.gz?raw=true\"\n",
|
"onnx_model_url = \"https://github.com/onnx/models/blob/main/vision/body_analysis/emotion_ferplus/model/emotion-ferplus-7.tar.gz?raw=true\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"urllib.request.urlretrieve(onnx_model_url, filename=\"emotion-ferplus-7.tar.gz\")\n",
|
"urllib.request.urlretrieve(onnx_model_url, filename=\"emotion-ferplus-7.tar.gz\")\n",
|
||||||
|
"os.mkdir(\"emotion_ferplus\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# the ! magic command tells our jupyter notebook kernel to run the following line of \n",
|
"# the ! magic command tells our jupyter notebook kernel to run the following line of \n",
|
||||||
"# code from the command line instead of the notebook kernel\n",
|
"# code from the command line instead of the notebook kernel\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# We use tar and xvcf to unzip the files we just retrieved from the ONNX model zoo\n",
|
"# We use tar and xvcf to unzip the files we just retrieved from the ONNX model zoo\n",
|
||||||
"\n",
|
"\n",
|
||||||
"!tar xvzf emotion-ferplus-7.tar.gz"
|
"!tar xvzf emotion-ferplus-7.tar.gz -C emotion_ferplus"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -130,7 +132,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"model_dir = \"emotion_ferplus\" # replace this with the location of your model files\n",
|
"model_dir = \"emotion_ferplus/model\" # replace this with the location of your model files\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# leave as is if it's in the same folder as this notebook"
|
"# leave as is if it's in the same folder as this notebook"
|
||||||
]
|
]
|
||||||
@@ -496,13 +498,12 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"# to use parsers to read in our model/data\n",
|
"# to use parsers to read in our model/data\n",
|
||||||
"import json\n",
|
"import json\n",
|
||||||
"import os\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"test_inputs = []\n",
|
"test_inputs = []\n",
|
||||||
"test_outputs = []\n",
|
"test_outputs = []\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# read in 3 testing images from .pb files\n",
|
"# read in 1 testing images from .pb files\n",
|
||||||
"test_data_size = 3\n",
|
"test_data_size = 1\n",
|
||||||
"\n",
|
"\n",
|
||||||
"for num in np.arange(test_data_size):\n",
|
"for num in np.arange(test_data_size):\n",
|
||||||
" input_test_data = os.path.join(model_dir, 'test_data_set_{0}'.format(num), 'input_0.pb')\n",
|
" input_test_data = os.path.join(model_dir, 'test_data_set_{0}'.format(num), 'input_0.pb')\n",
|
||||||
@@ -533,7 +534,7 @@
|
|||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"### Show some sample images\n",
|
"### Show some sample images\n",
|
||||||
"We use `matplotlib` to plot 3 test images from the dataset."
|
"We use `matplotlib` to plot 1 test images from the dataset."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -547,7 +548,7 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"plt.figure(figsize = (20, 20))\n",
|
"plt.figure(figsize = (20, 20))\n",
|
||||||
"for test_image in np.arange(3):\n",
|
"for test_image in np.arange(test_data_size):\n",
|
||||||
" test_inputs[test_image].reshape(1, 64, 64)\n",
|
" test_inputs[test_image].reshape(1, 64, 64)\n",
|
||||||
" plt.subplot(1, 8, test_image+1)\n",
|
" plt.subplot(1, 8, test_image+1)\n",
|
||||||
" plt.axhline('')\n",
|
" plt.axhline('')\n",
|
||||||
|
|||||||
@@ -69,10 +69,12 @@
|
|||||||
"# ONNX Model Zoo and save it in the same folder as this tutorial\n",
|
"# ONNX Model Zoo and save it in the same folder as this tutorial\n",
|
||||||
"\n",
|
"\n",
|
||||||
"import urllib.request\n",
|
"import urllib.request\n",
|
||||||
|
"import os\n",
|
||||||
"\n",
|
"\n",
|
||||||
"onnx_model_url = \"https://github.com/onnx/models/blob/master/vision/classification/mnist/model/mnist-7.tar.gz?raw=true\"\n",
|
"onnx_model_url = \"https://github.com/onnx/models/blob/main/vision/classification/mnist/model/mnist-7.tar.gz?raw=true\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"urllib.request.urlretrieve(onnx_model_url, filename=\"mnist-7.tar.gz\")"
|
"urllib.request.urlretrieve(onnx_model_url, filename=\"mnist-7.tar.gz\")\n",
|
||||||
|
"os.mkdir(\"mnist\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -86,7 +88,7 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"# We use tar and xvcf to unzip the files we just retrieved from the ONNX model zoo\n",
|
"# We use tar and xvcf to unzip the files we just retrieved from the ONNX model zoo\n",
|
||||||
"\n",
|
"\n",
|
||||||
"!tar xvzf mnist-7.tar.gz"
|
"!tar xvzf mnist-7.tar.gz -C mnist"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -137,7 +139,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"model_dir = \"mnist\" # replace this with the location of your model files\n",
|
"model_dir = \"mnist/model\" # replace this with the location of your model files\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# leave as is if it's in the same folder as this notebook"
|
"# leave as is if it's in the same folder as this notebook"
|
||||||
]
|
]
|
||||||
@@ -447,13 +449,12 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"# to use parsers to read in our model/data\n",
|
"# to use parsers to read in our model/data\n",
|
||||||
"import json\n",
|
"import json\n",
|
||||||
"import os\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"test_inputs = []\n",
|
"test_inputs = []\n",
|
||||||
"test_outputs = []\n",
|
"test_outputs = []\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# read in 3 testing images from .pb files\n",
|
"# read in 1 testing images from .pb files\n",
|
||||||
"test_data_size = 3\n",
|
"test_data_size = 1\n",
|
||||||
"\n",
|
"\n",
|
||||||
"for i in np.arange(test_data_size):\n",
|
"for i in np.arange(test_data_size):\n",
|
||||||
" input_test_data = os.path.join(model_dir, 'test_data_set_{0}'.format(i), 'input_0.pb')\n",
|
" input_test_data = os.path.join(model_dir, 'test_data_set_{0}'.format(i), 'input_0.pb')\n",
|
||||||
@@ -486,7 +487,7 @@
|
|||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"### Show some sample images\n",
|
"### Show some sample images\n",
|
||||||
"We use `matplotlib` to plot 3 test images from the dataset."
|
"We use `matplotlib` to plot 1 test images from the dataset."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -500,7 +501,7 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"plt.figure(figsize = (16, 6))\n",
|
"plt.figure(figsize = (16, 6))\n",
|
||||||
"for test_image in np.arange(3):\n",
|
"for test_image in np.arange(test_data_size):\n",
|
||||||
" plt.subplot(1, 15, test_image+1)\n",
|
" plt.subplot(1, 15, test_image+1)\n",
|
||||||
" plt.axhline('')\n",
|
" plt.axhline('')\n",
|
||||||
" plt.axvline('')\n",
|
" plt.axvline('')\n",
|
||||||
|
|||||||
@@ -240,8 +240,9 @@
|
|||||||
"# Please see [Azure ML Containers repository](https://github.com/Azure/AzureML-Containers#featured-tags)\n",
|
"# Please see [Azure ML Containers repository](https://github.com/Azure/AzureML-Containers#featured-tags)\n",
|
||||||
"# for open-sourced GPU base images.\n",
|
"# for open-sourced GPU base images.\n",
|
||||||
"env.docker.base_image = DEFAULT_GPU_IMAGE\n",
|
"env.docker.base_image = DEFAULT_GPU_IMAGE\n",
|
||||||
"env.python.conda_dependencies = CondaDependencies.create(conda_packages=['tensorflow-gpu==1.12.0','numpy'],\n",
|
"env.python.conda_dependencies = CondaDependencies.create(python_version=\"3.6.2\", \n",
|
||||||
" pip_packages=['azureml-contrib-services', 'azureml-defaults'])\n",
|
" conda_packages=['tensorflow-gpu==1.12.0','numpy'],\n",
|
||||||
|
" pip_packages=['azureml-contrib-services', 'azureml-defaults'])\n",
|
||||||
"\n",
|
"\n",
|
||||||
"inference_config = InferenceConfig(entry_script=\"score.py\", environment=env)\n",
|
"inference_config = InferenceConfig(entry_script=\"score.py\", environment=env)\n",
|
||||||
"aks_config = AksWebservice.deploy_configuration()\n",
|
"aks_config = AksWebservice.deploy_configuration()\n",
|
||||||
|
|||||||
@@ -109,7 +109,7 @@
|
|||||||
"from azureml.core import Environment\n",
|
"from azureml.core import Environment\n",
|
||||||
"from azureml.core.conda_dependencies import CondaDependencies \n",
|
"from azureml.core.conda_dependencies import CondaDependencies \n",
|
||||||
"\n",
|
"\n",
|
||||||
"conda_deps = CondaDependencies.create(conda_packages=['numpy', 'scikit-learn==0.19.1', 'scipy'], pip_packages=['azureml-defaults', 'inference-schema'])\n",
|
"conda_deps = CondaDependencies.create(conda_packages=['numpy', 'scikit-learn==0.22.1', 'scipy'], pip_packages=['azureml-defaults', 'inference-schema'])\n",
|
||||||
"myenv = Environment(name='myenv')\n",
|
"myenv = Environment(name='myenv')\n",
|
||||||
"myenv.python.conda_dependencies = conda_deps"
|
"myenv.python.conda_dependencies = conda_deps"
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -109,7 +109,7 @@
|
|||||||
"from azureml.core import Environment\n",
|
"from azureml.core import Environment\n",
|
||||||
"from azureml.core.conda_dependencies import CondaDependencies \n",
|
"from azureml.core.conda_dependencies import CondaDependencies \n",
|
||||||
"\n",
|
"\n",
|
||||||
"conda_deps = CondaDependencies.create(conda_packages=['numpy','scikit-learn==0.19.1','scipy'], pip_packages=['azureml-defaults', 'inference-schema'])\n",
|
"conda_deps = CondaDependencies.create(conda_packages=['numpy','scikit-learn==0.22.1','scipy'], pip_packages=['azureml-defaults', 'inference-schema'])\n",
|
||||||
"myenv = Environment(name='myenv')\n",
|
"myenv = Environment(name='myenv')\n",
|
||||||
"myenv.python.conda_dependencies = conda_deps"
|
"myenv.python.conda_dependencies = conda_deps"
|
||||||
]
|
]
|
||||||
@@ -295,12 +295,14 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"environment = Environment('my-sklearn-environment')\n",
|
"environment = Environment('my-sklearn-environment')\n",
|
||||||
"environment.python.conda_dependencies = CondaDependencies.create(pip_packages=[\n",
|
"environment.python.conda_dependencies = CondaDependencies.create(conda_packages=[\n",
|
||||||
|
" 'pip==20.2.4'],\n",
|
||||||
|
" pip_packages=[\n",
|
||||||
" 'azureml-defaults',\n",
|
" 'azureml-defaults',\n",
|
||||||
" 'inference-schema[numpy-support]',\n",
|
" 'inference-schema[numpy-support]',\n",
|
||||||
" 'joblib',\n",
|
" 'joblib',\n",
|
||||||
" 'numpy',\n",
|
" 'numpy',\n",
|
||||||
" 'scikit-learn==0.19.1',\n",
|
" 'scikit-learn==0.22.1',\n",
|
||||||
" 'scipy'\n",
|
" 'scipy'\n",
|
||||||
"])\n",
|
"])\n",
|
||||||
"inference_config = InferenceConfig(entry_script='score.py', environment=environment)\n",
|
"inference_config = InferenceConfig(entry_script='score.py', environment=environment)\n",
|
||||||
|
|||||||
@@ -2,6 +2,8 @@
|
|||||||
# Licensed under the MIT license.
|
# Licensed under the MIT license.
|
||||||
|
|
||||||
from azureml.core.run import Run
|
from azureml.core.run import Run
|
||||||
|
from azureml.interpret import ExplanationClient
|
||||||
|
from interpret_community.adapter import ExplanationAdapter
|
||||||
import joblib
|
import joblib
|
||||||
import os
|
import os
|
||||||
import shap
|
import shap
|
||||||
@@ -11,9 +13,11 @@ OUTPUT_DIR = './outputs/'
|
|||||||
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
||||||
|
|
||||||
run = Run.get_context()
|
run = Run.get_context()
|
||||||
|
client = ExplanationClient.from_run(run)
|
||||||
|
|
||||||
# get a dataset on income prediction
|
# get a dataset on income prediction
|
||||||
X, y = shap.datasets.adult()
|
X, y = shap.datasets.adult()
|
||||||
|
features = X.columns.values
|
||||||
|
|
||||||
# train an XGBoost model (but any other tree model type should work)
|
# train an XGBoost model (but any other tree model type should work)
|
||||||
model = xgboost.XGBClassifier()
|
model = xgboost.XGBClassifier()
|
||||||
@@ -26,6 +30,12 @@ shap_values = explainer(X_shap)
|
|||||||
print("computed shap values:")
|
print("computed shap values:")
|
||||||
print(shap_values)
|
print(shap_values)
|
||||||
|
|
||||||
|
# Use the explanation adapter to convert the importances into an interpret-community
|
||||||
|
# style explanation which can be uploaded to AzureML or visualized in the
|
||||||
|
# ExplanationDashboard widget
|
||||||
|
adapter = ExplanationAdapter(features, classification=True)
|
||||||
|
global_explanation = adapter.create_global(shap_values.values, X_shap, expected_values=shap_values.base_values)
|
||||||
|
|
||||||
# write X_shap out as a pickle file for later visualization
|
# write X_shap out as a pickle file for later visualization
|
||||||
x_shap_pkl = 'x_shap.pkl'
|
x_shap_pkl = 'x_shap.pkl'
|
||||||
with open(x_shap_pkl, 'wb') as file:
|
with open(x_shap_pkl, 'wb') as file:
|
||||||
@@ -42,3 +52,8 @@ with open(model_file_name, 'wb') as file:
|
|||||||
run.upload_file('xgboost_model.pkl', os.path.join('./outputs/', model_file_name))
|
run.upload_file('xgboost_model.pkl', os.path.join('./outputs/', model_file_name))
|
||||||
original_model = run.register_model(model_name='xgboost_with_gpu_tree_explainer',
|
original_model = run.register_model(model_name='xgboost_with_gpu_tree_explainer',
|
||||||
model_path='xgboost_model.pkl')
|
model_path='xgboost_model.pkl')
|
||||||
|
|
||||||
|
# Uploading model explanation data for storage or visualization in webUX
|
||||||
|
# The explanation can then be downloaded on any compute
|
||||||
|
comment = 'Global explanation on classification model trained on adult census income dataset'
|
||||||
|
client.upload_model_explanation(global_explanation, comment=comment, model_id=original_model.id)
|
||||||
|
|||||||
@@ -106,7 +106,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
|
"print(\"This notebook was created using version 1.44.0 of the Azure ML SDK\")\n",
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -225,36 +225,68 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"from azureml.core import Environment\n",
|
"from azureml.core import Environment\n",
|
||||||
"\n",
|
"\n",
|
||||||
"environment_name = \"shap-gpu-tree\"\n",
|
"environment_name = \"shapgpu\"\n",
|
||||||
"\n",
|
|
||||||
"env = Environment(environment_name)\n",
|
"env = Environment(environment_name)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"env.docker.enabled = True\n",
|
"env.docker.enabled = True\n",
|
||||||
"env.docker.base_image = None\n",
|
"env.docker.base_image = None\n",
|
||||||
"env.docker.base_dockerfile = \"\"\"\n",
|
"\n",
|
||||||
"FROM rapidsai/rapidsai:cuda10.0-devel-ubuntu18.04\n",
|
"\n",
|
||||||
|
"# Note: this is to pin the pandas and xgboost versions to be same as notebook.\n",
|
||||||
|
"# In production scenario user would choose their dependencies\n",
|
||||||
|
"import pkg_resources\n",
|
||||||
|
"available_packages = pkg_resources.working_set\n",
|
||||||
|
"pandas_ver = None\n",
|
||||||
|
"for dist in list(available_packages):\n",
|
||||||
|
" if dist.key == 'pandas':\n",
|
||||||
|
" pandas_ver = dist.version\n",
|
||||||
|
"pandas_dep = 'pandas'\n",
|
||||||
|
"if pandas_ver:\n",
|
||||||
|
" pandas_dep = 'pandas=={}'.format(pandas_ver)\n",
|
||||||
|
"\n",
|
||||||
|
"# Note: we build shap at commit 690245 for Tesla K80 GPUs\n",
|
||||||
|
"env.docker.base_dockerfile = f\"\"\"\n",
|
||||||
|
"FROM nvidia/cuda:10.2-devel-ubuntu18.04\n",
|
||||||
|
"ENV PATH=\"/root/miniconda3/bin:${{PATH}}\"\n",
|
||||||
|
"ARG PATH=\"/root/miniconda3/bin:${{PATH}}\"\n",
|
||||||
"RUN apt-get update && \\\n",
|
"RUN apt-get update && \\\n",
|
||||||
"apt-get install -y fuse && \\\n",
|
"apt-get install -y fuse && \\\n",
|
||||||
"apt-get install -y build-essential && \\\n",
|
"apt-get install -y build-essential && \\\n",
|
||||||
"apt-get install -y python3-dev && \\\n",
|
"apt-get install -y python3-dev && \\\n",
|
||||||
"source activate rapids && \\\n",
|
"apt-get install -y wget && \\\n",
|
||||||
|
"apt-get install -y git && \\\n",
|
||||||
|
"rm -rf /var/lib/apt/lists/* && \\\n",
|
||||||
|
"wget \\\n",
|
||||||
|
"https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh && \\\n",
|
||||||
|
"mkdir /root/.conda && \\\n",
|
||||||
|
"bash Miniconda3-latest-Linux-x86_64.sh -b && \\\n",
|
||||||
|
"rm -f Miniconda3-latest-Linux-x86_64.sh && \\\n",
|
||||||
|
"conda init bash && \\\n",
|
||||||
|
". ~/.bashrc && \\\n",
|
||||||
|
"conda create -n shapgpu python=3.8 && \\\n",
|
||||||
|
"conda activate shapgpu && \\\n",
|
||||||
"apt-get install -y g++ && \\\n",
|
"apt-get install -y g++ && \\\n",
|
||||||
"printenv && \\\n",
|
"printenv && \\\n",
|
||||||
"echo \"which nvcc: \" && \\\n",
|
"echo \"which nvcc: \" && \\\n",
|
||||||
"which nvcc && \\\n",
|
"which nvcc && \\\n",
|
||||||
"pip install azureml-defaults && \\\n",
|
"pip install azureml-defaults && \\\n",
|
||||||
"pip install azureml-telemetry && \\\n",
|
"pip install azureml-telemetry && \\\n",
|
||||||
|
"pip install azureml-interpret && \\\n",
|
||||||
|
"pip install {pandas_dep} && \\\n",
|
||||||
"cd /usr/local/src && \\\n",
|
"cd /usr/local/src && \\\n",
|
||||||
"git clone https://github.com/slundberg/shap && \\\n",
|
"git clone https://github.com/slundberg/shap.git --single-branch && \\\n",
|
||||||
"cd shap && \\\n",
|
"cd shap && \\\n",
|
||||||
|
"git reset --hard 690245c6ab043edf40cfce3d8438a62e29ab599f && \\\n",
|
||||||
"mkdir build && \\\n",
|
"mkdir build && \\\n",
|
||||||
"python setup.py install --user && \\\n",
|
"python setup.py install --user && \\\n",
|
||||||
"pip uninstall -y xgboost && \\\n",
|
"pip uninstall -y xgboost && \\\n",
|
||||||
"rm /conda/envs/rapids/lib/libxgboost.so && \\\n",
|
"conda install py-xgboost==1.3.3 && \\\n",
|
||||||
"pip install xgboost==1.4.2\n",
|
"pip uninstall -y numpy && \\\n",
|
||||||
|
"conda install numpy==1.20.3 \\\n",
|
||||||
"\"\"\"\n",
|
"\"\"\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"env.python.user_managed_dependencies = True\n",
|
"env.python.user_managed_dependencies = True\n",
|
||||||
|
"env.python.interpreter_path = '/root/miniconda3/envs/shapgpu/bin/python'\n",
|
||||||
"\n",
|
"\n",
|
||||||
"from azureml.core import Run\n",
|
"from azureml.core import Run\n",
|
||||||
"from azureml.core import ScriptRunConfig\n",
|
"from azureml.core import ScriptRunConfig\n",
|
||||||
@@ -266,6 +298,176 @@
|
|||||||
"run = experiment.submit(config=src)\n",
|
"run = experiment.submit(config=src)\n",
|
||||||
"run"
|
"run"
|
||||||
]
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Note: if you need to cancel a run, you can follow [these instructions](https://aka.ms/aml-docs-cancel-run)."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"%%time\n",
|
||||||
|
"# Shows output of the run on stdout.\n",
|
||||||
|
"run.wait_for_completion(show_output=True)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"run.get_metrics()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Download \n",
|
||||||
|
"1. Download model explanation data."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.interpret import ExplanationClient\n",
|
||||||
|
"\n",
|
||||||
|
"# Get model explanation data\n",
|
||||||
|
"client = ExplanationClient.from_run(run)\n",
|
||||||
|
"global_explanation = client.download_model_explanation()\n",
|
||||||
|
"local_importance_values = global_explanation.local_importance_values\n",
|
||||||
|
"expected_values = global_explanation.expected_values"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Get the top k (e.g., 4) most important features with their importance values\n",
|
||||||
|
"global_explanation_topk = client.download_model_explanation(top_k=4)\n",
|
||||||
|
"global_importance_values = global_explanation_topk.get_ranked_global_values()\n",
|
||||||
|
"global_importance_names = global_explanation_topk.get_ranked_global_names()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"print('global importance values: {}'.format(global_importance_values))\n",
|
||||||
|
"print('global importance names: {}'.format(global_importance_names))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"2. Download model file."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Retrieve model for visualization and deployment\n",
|
||||||
|
"from azureml.core.model import Model\n",
|
||||||
|
"import joblib\n",
|
||||||
|
"original_model = Model(ws, 'xgboost_with_gpu_tree_explainer')\n",
|
||||||
|
"model_path = original_model.download(exist_ok=True)\n",
|
||||||
|
"original_model = joblib.load(model_path)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"3. Download test dataset."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Retrieve x_test for visualization\n",
|
||||||
|
"x_test_path = './x_shap_adult_census.pkl'\n",
|
||||||
|
"run.download_file('x_shap_adult_census.pkl', output_file_path=x_test_path)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"x_test = joblib.load('x_shap_adult_census.pkl')"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Visualize\n",
|
||||||
|
"Load the visualization dashboard"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from raiwidgets import ExplanationDashboard"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from interpret_community.common.model_wrapper import wrap_model\n",
|
||||||
|
"from interpret_community.dataset.dataset_wrapper import DatasetWrapper\n",
|
||||||
|
"# note we need to wrap the XGBoost model to output predictions and probabilities in the scikit-learn format\n",
|
||||||
|
"class WrappedXGBoostModel(object):\n",
|
||||||
|
" \"\"\"A class for wrapping an XGBoost model to output integer predicted classes.\"\"\"\n",
|
||||||
|
"\n",
|
||||||
|
" def __init__(self, model):\n",
|
||||||
|
" self.model = model\n",
|
||||||
|
"\n",
|
||||||
|
" def predict(self, dataset):\n",
|
||||||
|
" return self.model.predict(dataset).astype(int)\n",
|
||||||
|
"\n",
|
||||||
|
" def predict_proba(self, dataset):\n",
|
||||||
|
" return self.model.predict_proba(dataset)\n",
|
||||||
|
"\n",
|
||||||
|
"wrapped_model = WrappedXGBoostModel(wrap_model(original_model, DatasetWrapper(x_test), model_task='classification'))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"ExplanationDashboard(global_explanation, wrapped_model, dataset=x_test)"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
|
|||||||
@@ -1,5 +1,18 @@
|
|||||||
name: train-explain-model-gpu-tree-explainer
|
name: train-explain-model-gpu-tree-explainer
|
||||||
dependencies:
|
dependencies:
|
||||||
|
- py-xgboost==1.3.3
|
||||||
- pip:
|
- pip:
|
||||||
- azureml-sdk
|
- azureml-sdk
|
||||||
- azureml-interpret
|
- azureml-interpret
|
||||||
|
- flask
|
||||||
|
- flask-cors
|
||||||
|
- gevent>=1.3.6
|
||||||
|
- jinja2
|
||||||
|
- ipython
|
||||||
|
- matplotlib
|
||||||
|
- ipywidgets
|
||||||
|
- raiwidgets~=0.19.0
|
||||||
|
- itsdangerous==2.0.1
|
||||||
|
- markupsafe<2.1.0
|
||||||
|
- scipy>=1.5.3
|
||||||
|
- protobuf==3.20.0
|
||||||
|
|||||||
@@ -249,6 +249,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"from azureml.core.runconfig import RunConfiguration\n",
|
"from azureml.core.runconfig import RunConfiguration\n",
|
||||||
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||||
|
"import sys\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Create a new RunConfig object\n",
|
"# Create a new RunConfig object\n",
|
||||||
"run_config = RunConfiguration(framework=\"python\")\n",
|
"run_config = RunConfiguration(framework=\"python\")\n",
|
||||||
@@ -260,6 +261,8 @@
|
|||||||
" 'azureml-defaults', 'azureml-telemetry', 'azureml-interpret'\n",
|
" 'azureml-defaults', 'azureml-telemetry', 'azureml-interpret'\n",
|
||||||
"]\n",
|
"]\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"python_version = '{0}.{1}'.format(sys.version_info[0], sys.version_info[1])\n",
|
||||||
|
"\n",
|
||||||
"# Note: this is to pin the scikit-learn and pandas versions to be same as notebook.\n",
|
"# Note: this is to pin the scikit-learn and pandas versions to be same as notebook.\n",
|
||||||
"# In production scenario user would choose their dependencies\n",
|
"# In production scenario user would choose their dependencies\n",
|
||||||
"import pkg_resources\n",
|
"import pkg_resources\n",
|
||||||
@@ -283,7 +286,7 @@
|
|||||||
"# environment, otherwise if a model is trained or deployed in a different environment this can\n",
|
"# environment, otherwise if a model is trained or deployed in a different environment this can\n",
|
||||||
"# cause errors. Please take extra care when specifying your dependencies in a production environment.\n",
|
"# cause errors. Please take extra care when specifying your dependencies in a production environment.\n",
|
||||||
"azureml_pip_packages.extend([sklearn_dep, pandas_dep])\n",
|
"azureml_pip_packages.extend([sklearn_dep, pandas_dep])\n",
|
||||||
"run_config.environment.python.conda_dependencies = CondaDependencies.create(pip_packages=azureml_pip_packages)\n",
|
"run_config.environment.python.conda_dependencies = CondaDependencies.create(pip_packages=azureml_pip_packages, python_version=python_version)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"from azureml.core import ScriptRunConfig\n",
|
"from azureml.core import ScriptRunConfig\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
|||||||
@@ -11,4 +11,8 @@ dependencies:
|
|||||||
- matplotlib
|
- matplotlib
|
||||||
- azureml-dataset-runtime
|
- azureml-dataset-runtime
|
||||||
- ipywidgets
|
- ipywidgets
|
||||||
- raiwidgets~=0.15.0
|
- raiwidgets~=0.19.0
|
||||||
|
- itsdangerous==2.0.1
|
||||||
|
- markupsafe<2.1.0
|
||||||
|
- scipy>=1.5.3
|
||||||
|
- protobuf==3.20.0
|
||||||
|
|||||||
@@ -10,4 +10,9 @@ dependencies:
|
|||||||
- ipython
|
- ipython
|
||||||
- matplotlib
|
- matplotlib
|
||||||
- ipywidgets
|
- ipywidgets
|
||||||
- raiwidgets~=0.15.0
|
- raiwidgets~=0.19.0
|
||||||
|
- packaging>=20.9
|
||||||
|
- itsdangerous==2.0.1
|
||||||
|
- markupsafe<2.1.0
|
||||||
|
- scipy>=1.5.3
|
||||||
|
- protobuf==3.20.0
|
||||||
|
|||||||
@@ -18,7 +18,9 @@ def init():
|
|||||||
original_model_path = Model.get_model_path('local_deploy_model')
|
original_model_path = Model.get_model_path('local_deploy_model')
|
||||||
scoring_explainer_path = Model.get_model_path('IBM_attrition_explainer')
|
scoring_explainer_path = Model.get_model_path('IBM_attrition_explainer')
|
||||||
|
|
||||||
|
# Load the original model into the environment
|
||||||
original_model = joblib.load(original_model_path)
|
original_model = joblib.load(original_model_path)
|
||||||
|
# Load the scoring explainer into the environment
|
||||||
scoring_explainer = joblib.load(scoring_explainer_path)
|
scoring_explainer = joblib.load(scoring_explainer_path)
|
||||||
|
|
||||||
|
|
||||||
@@ -29,5 +31,15 @@ def run(raw_data):
|
|||||||
predictions = original_model.predict(data)
|
predictions = original_model.predict(data)
|
||||||
# Retrieve model explanations
|
# Retrieve model explanations
|
||||||
local_importance_values = scoring_explainer.explain(data)
|
local_importance_values = scoring_explainer.explain(data)
|
||||||
|
# Retrieve the feature names, which we may want to return to the user.
|
||||||
|
# Note: you can also get the raw_features and engineered_features
|
||||||
|
# by calling scoring_explainer.raw_features and
|
||||||
|
# scoring_explainer.engineered_features but you may need to pass
|
||||||
|
# the raw or engineered feature names in the ScoringExplainer
|
||||||
|
# constructor, depending on if you are using feature maps or
|
||||||
|
# transformations on the original explainer.
|
||||||
|
features = scoring_explainer.features
|
||||||
# You can return any data type as long as it is JSON-serializable
|
# You can return any data type as long as it is JSON-serializable
|
||||||
return {'predictions': predictions.tolist(), 'local_importance_values': local_importance_values}
|
return {'predictions': predictions.tolist(),
|
||||||
|
'local_importance_values': local_importance_values,
|
||||||
|
'features': features}
|
||||||
|
|||||||
@@ -340,17 +340,29 @@
|
|||||||
"available_packages = pkg_resources.working_set\n",
|
"available_packages = pkg_resources.working_set\n",
|
||||||
"sklearn_ver = None\n",
|
"sklearn_ver = None\n",
|
||||||
"pandas_ver = None\n",
|
"pandas_ver = None\n",
|
||||||
|
"numpy_ver = None\n",
|
||||||
|
"numba_ver = None\n",
|
||||||
"for dist in available_packages:\n",
|
"for dist in available_packages:\n",
|
||||||
" if dist.key == 'scikit-learn':\n",
|
" if dist.key == 'scikit-learn':\n",
|
||||||
" sklearn_ver = dist.version\n",
|
" sklearn_ver = dist.version\n",
|
||||||
|
" elif dist.key == 'numpy':\n",
|
||||||
|
" numpy_ver = dist.version\n",
|
||||||
|
" elif dist.key == 'numba':\n",
|
||||||
|
" numba_ver = dist.version\n",
|
||||||
" elif dist.key == 'pandas':\n",
|
" elif dist.key == 'pandas':\n",
|
||||||
" pandas_ver = dist.version\n",
|
" pandas_ver = dist.version\n",
|
||||||
"sklearn_dep = 'scikit-learn'\n",
|
"sklearn_dep = 'scikit-learn'\n",
|
||||||
"pandas_dep = 'pandas'\n",
|
"pandas_dep = 'pandas'\n",
|
||||||
|
"numpy_dep = 'numpy'\n",
|
||||||
|
"numba_dep = 'numba'\n",
|
||||||
"if sklearn_ver:\n",
|
"if sklearn_ver:\n",
|
||||||
" sklearn_dep = 'scikit-learn=={}'.format(sklearn_ver)\n",
|
" sklearn_dep = 'scikit-learn=={}'.format(sklearn_ver)\n",
|
||||||
"if pandas_ver:\n",
|
"if pandas_ver:\n",
|
||||||
" pandas_dep = 'pandas=={}'.format(pandas_ver)\n",
|
" pandas_dep = 'pandas=={}'.format(pandas_ver)\n",
|
||||||
|
"if numpy_ver:\n",
|
||||||
|
" numpy_dep = 'numpy=={}'.format(numpy_ver)\n",
|
||||||
|
"if numba_ver:\n",
|
||||||
|
" numba_dep = 'numba=={}'.format(numba_ver)\n",
|
||||||
"# Specify CondaDependencies obj\n",
|
"# Specify CondaDependencies obj\n",
|
||||||
"# The CondaDependencies specifies the conda and pip packages that are installed in the environment\n",
|
"# The CondaDependencies specifies the conda and pip packages that are installed in the environment\n",
|
||||||
"# the submitted job is run in. Note the remote environment(s) needs to be similar to the local\n",
|
"# the submitted job is run in. Note the remote environment(s) needs to be similar to the local\n",
|
||||||
@@ -358,7 +370,8 @@
|
|||||||
"# cause errors. Please take extra care when specifying your dependencies in a production environment.\n",
|
"# cause errors. Please take extra care when specifying your dependencies in a production environment.\n",
|
||||||
"myenv = CondaDependencies.create(\n",
|
"myenv = CondaDependencies.create(\n",
|
||||||
" python_version=python_version,\n",
|
" python_version=python_version,\n",
|
||||||
" pip_packages=['pyyaml', sklearn_dep, pandas_dep] + azureml_pip_packages)\n",
|
" conda_packages=['pip==20.2.4', numpy_dep],\n",
|
||||||
|
" pip_packages=['pyyaml', sklearn_dep, pandas_dep, numba_dep] + azureml_pip_packages)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"with open(\"myenv.yml\",\"w\") as f:\n",
|
"with open(\"myenv.yml\",\"w\") as f:\n",
|
||||||
" f.write(myenv.serialize_to_string())\n",
|
" f.write(myenv.serialize_to_string())\n",
|
||||||
@@ -391,7 +404,7 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"aciconfig = AciWebservice.deploy_configuration(cpu_cores=1, \n",
|
"aciconfig = AciWebservice.deploy_configuration(cpu_cores=1, \n",
|
||||||
" memory_gb=1, \n",
|
" memory_gb=2, \n",
|
||||||
" tags={\"data\": \"IBM_Attrition\", \n",
|
" tags={\"data\": \"IBM_Attrition\", \n",
|
||||||
" \"method\" : \"local_explanation\"}, \n",
|
" \"method\" : \"local_explanation\"}, \n",
|
||||||
" description='Get local explanations for IBM Employee Attrition data')\n",
|
" description='Get local explanations for IBM Employee Attrition data')\n",
|
||||||
@@ -415,8 +428,8 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"import requests\n",
|
|
||||||
"import json\n",
|
"import json\n",
|
||||||
|
"from raiutils.webservice import post_with_retries\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Create data to test service with\n",
|
"# Create data to test service with\n",
|
||||||
@@ -428,7 +441,7 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"# Send request to service\n",
|
"# Send request to service\n",
|
||||||
"print(\"POST to url\", service.scoring_uri)\n",
|
"print(\"POST to url\", service.scoring_uri)\n",
|
||||||
"resp = requests.post(service.scoring_uri, sample_data, headers=headers)\n",
|
"resp = post_with_retries(service.scoring_uri, sample_data, headers)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Can covert back to Python objects from json string if desired\n",
|
"# Can covert back to Python objects from json string if desired\n",
|
||||||
"print(\"prediction:\", resp.text)\n",
|
"print(\"prediction:\", resp.text)\n",
|
||||||
|
|||||||
@@ -10,4 +10,9 @@ dependencies:
|
|||||||
- ipython
|
- ipython
|
||||||
- matplotlib
|
- matplotlib
|
||||||
- ipywidgets
|
- ipywidgets
|
||||||
- raiwidgets~=0.15.0
|
- raiwidgets~=0.19.0
|
||||||
|
- packaging>=20.9
|
||||||
|
- itsdangerous==2.0.1
|
||||||
|
- markupsafe<2.1.0
|
||||||
|
- scipy>=1.5.3
|
||||||
|
- protobuf==3.20.0
|
||||||
|
|||||||
@@ -513,7 +513,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"import requests\n",
|
"from raiutils.webservice import post_with_retries\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Create data to test service with\n",
|
"# Create data to test service with\n",
|
||||||
"examples = x_test[:4]\n",
|
"examples = x_test[:4]\n",
|
||||||
@@ -523,7 +523,7 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"# Send request to service\n",
|
"# Send request to service\n",
|
||||||
"print(\"POST to url\", service.scoring_uri)\n",
|
"print(\"POST to url\", service.scoring_uri)\n",
|
||||||
"resp = requests.post(service.scoring_uri, input_data, headers=headers)\n",
|
"resp = post_with_retries(service.scoring_uri, input_data, headers)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Can covert back to Python objects from json string if desired\n",
|
"# Can covert back to Python objects from json string if desired\n",
|
||||||
"print(\"prediction:\", resp.text)"
|
"print(\"prediction:\", resp.text)"
|
||||||
|
|||||||
@@ -12,4 +12,8 @@ dependencies:
|
|||||||
- azureml-dataset-runtime
|
- azureml-dataset-runtime
|
||||||
- azureml-core
|
- azureml-core
|
||||||
- ipywidgets
|
- ipywidgets
|
||||||
- raiwidgets~=0.15.0
|
- raiwidgets~=0.19.0
|
||||||
|
- itsdangerous==2.0.1
|
||||||
|
- markupsafe<2.1.0
|
||||||
|
- scipy>=1.5.3
|
||||||
|
- protobuf==3.20.0
|
||||||
|
|||||||
@@ -3,3 +3,4 @@ dependencies:
|
|||||||
- pip:
|
- pip:
|
||||||
- azureml-sdk
|
- azureml-sdk
|
||||||
- azureml-widgets
|
- azureml-widgets
|
||||||
|
- protobuf==3.20.0
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
# DisableDockerDetector "Disabled to unblock PRs until the owner can fix the file. Not used in any prod deployments - only as a documentation for the customers"
|
||||||
FROM rocker/tidyverse:4.0.0-ubuntu18.04
|
FROM rocker/tidyverse:4.0.0-ubuntu18.04
|
||||||
|
|
||||||
# Install python
|
# Install python
|
||||||
|
|||||||
@@ -5,17 +5,6 @@ import argparse
|
|||||||
import os
|
import os
|
||||||
from azureml.core import Run
|
from azureml.core import Run
|
||||||
|
|
||||||
|
|
||||||
def get_dict(dict_str):
|
|
||||||
pairs = dict_str.strip("{}").split(r'\;')
|
|
||||||
new_dict = {}
|
|
||||||
for pair in pairs:
|
|
||||||
key, value = pair.strip().split(":")
|
|
||||||
new_dict[key.strip().strip("'")] = value.strip().strip("'")
|
|
||||||
|
|
||||||
return new_dict
|
|
||||||
|
|
||||||
|
|
||||||
print("Cleans the input data")
|
print("Cleans the input data")
|
||||||
|
|
||||||
# Get the input green_taxi_data. To learn more about how to access dataset in your script, please
|
# Get the input green_taxi_data. To learn more about how to access dataset in your script, please
|
||||||
@@ -23,7 +12,6 @@ print("Cleans the input data")
|
|||||||
run = Run.get_context()
|
run = Run.get_context()
|
||||||
raw_data = run.input_datasets["raw_data"]
|
raw_data = run.input_datasets["raw_data"]
|
||||||
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser("cleanse")
|
parser = argparse.ArgumentParser("cleanse")
|
||||||
parser.add_argument("--output_cleanse", type=str, help="cleaned taxi data directory")
|
parser.add_argument("--output_cleanse", type=str, help="cleaned taxi data directory")
|
||||||
parser.add_argument("--useful_columns", type=str, help="useful columns to keep")
|
parser.add_argument("--useful_columns", type=str, help="useful columns to keep")
|
||||||
@@ -38,8 +26,8 @@ print("Argument 3(output cleansed taxi data path): %s" % args.output_cleanse)
|
|||||||
# These functions ensure that null data is removed from the dataset,
|
# These functions ensure that null data is removed from the dataset,
|
||||||
# which will help increase machine learning model accuracy.
|
# which will help increase machine learning model accuracy.
|
||||||
|
|
||||||
useful_columns = [s.strip().strip("'") for s in args.useful_columns.strip("[]").split(r'\;')]
|
useful_columns = eval(args.useful_columns.replace(';', ','))
|
||||||
columns = get_dict(args.columns)
|
columns = eval(args.columns.replace(';', ','))
|
||||||
|
|
||||||
new_df = (raw_data.to_pandas_dataframe()
|
new_df = (raw_data.to_pandas_dataframe()
|
||||||
.dropna(how='all')
|
.dropna(how='all')
|
||||||
|
|||||||
@@ -359,7 +359,9 @@
|
|||||||
"from azureml.core import Environment\n",
|
"from azureml.core import Environment\n",
|
||||||
"from azureml.core.runconfig import CondaDependencies, DEFAULT_CPU_IMAGE\n",
|
"from azureml.core.runconfig import CondaDependencies, DEFAULT_CPU_IMAGE\n",
|
||||||
"\n",
|
"\n",
|
||||||
"batch_conda_deps = CondaDependencies.create(pip_packages=[\"tensorflow==1.15.2\", \"pillow\", \n",
|
"batch_conda_deps = CondaDependencies.create(python_version=\"3.7\",\n",
|
||||||
|
" conda_packages=['pip==20.2.4'],\n",
|
||||||
|
" pip_packages=[\"tensorflow==1.15.2\", \"pillow\", \"protobuf==3.20.1\",\n",
|
||||||
" \"azureml-core\", \"azureml-dataset-runtime[fuse]\"])\n",
|
" \"azureml-core\", \"azureml-dataset-runtime[fuse]\"])\n",
|
||||||
"batch_env = Environment(name=\"batch_environment\")\n",
|
"batch_env = Environment(name=\"batch_environment\")\n",
|
||||||
"batch_env.python.conda_dependencies = batch_conda_deps\n",
|
"batch_env.python.conda_dependencies = batch_conda_deps\n",
|
||||||
|
|||||||
@@ -308,7 +308,9 @@
|
|||||||
"from azureml.core import Environment\n",
|
"from azureml.core import Environment\n",
|
||||||
"from azureml.core.runconfig import CondaDependencies\n",
|
"from azureml.core.runconfig import CondaDependencies\n",
|
||||||
"\n",
|
"\n",
|
||||||
"predict_conda_deps = CondaDependencies.create(pip_packages=[\"scikit-learn==0.20.3\",\n",
|
"predict_conda_deps = CondaDependencies.create(python_version=\"3.7\", \n",
|
||||||
|
" conda_packages=['pip==20.2.4'],\n",
|
||||||
|
" pip_packages=[\"scikit-learn==0.20.3\",\n",
|
||||||
" \"azureml-core\", \"azureml-dataset-runtime[pandas,fuse]\"])\n",
|
" \"azureml-core\", \"azureml-dataset-runtime[pandas,fuse]\"])\n",
|
||||||
"\n",
|
"\n",
|
||||||
"predict_env = Environment(name=\"predict_environment\")\n",
|
"predict_env = Environment(name=\"predict_environment\")\n",
|
||||||
|
|||||||
@@ -308,7 +308,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"cd = CondaDependencies()\n",
|
"cd = CondaDependencies.create(python_version=\"3.7\", conda_packages=['pip==20.2.4'])\n",
|
||||||
"\n",
|
"\n",
|
||||||
"cd.add_channel(\"conda-forge\")\n",
|
"cd.add_channel(\"conda-forge\")\n",
|
||||||
"cd.add_conda_package(\"ffmpeg==4.0.2\")\n",
|
"cd.add_conda_package(\"ffmpeg==4.0.2\")\n",
|
||||||
@@ -401,13 +401,12 @@
|
|||||||
"from azureml.core import Environment\n",
|
"from azureml.core import Environment\n",
|
||||||
"from azureml.core.runconfig import DEFAULT_GPU_IMAGE\n",
|
"from azureml.core.runconfig import DEFAULT_GPU_IMAGE\n",
|
||||||
"\n",
|
"\n",
|
||||||
"parallel_cd = CondaDependencies()\n",
|
"parallel_cd = CondaDependencies.create(python_version=\"3.7\", conda_packages=['pip==20.2.4', 'numpy==1.19'])\n",
|
||||||
"\n",
|
"\n",
|
||||||
"parallel_cd.add_channel(\"pytorch\")\n",
|
"parallel_cd.add_channel(\"pytorch\")\n",
|
||||||
"parallel_cd.add_conda_package(\"pytorch\")\n",
|
"parallel_cd.add_conda_package(\"pytorch\")\n",
|
||||||
"parallel_cd.add_conda_package(\"torchvision\")\n",
|
"parallel_cd.add_conda_package(\"torchvision\")\n",
|
||||||
"parallel_cd.add_conda_package(\"pillow<7\") # needed for torchvision==0.4.0\n",
|
"parallel_cd.add_conda_package(\"pillow<7\") # needed for torchvision==0.4.0\n",
|
||||||
"parallel_cd.add_pip_package(\"azureml-core\")\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"styleenvironment = Environment(name=\"styleenvironment\")\n",
|
"styleenvironment = Environment(name=\"styleenvironment\")\n",
|
||||||
"styleenvironment.python.conda_dependencies=parallel_cd\n",
|
"styleenvironment.python.conda_dependencies=parallel_cd\n",
|
||||||
|
|||||||
@@ -254,6 +254,7 @@
|
|||||||
"- conda-forge\n",
|
"- conda-forge\n",
|
||||||
"dependencies:\n",
|
"dependencies:\n",
|
||||||
"- python=3.6.2\n",
|
"- python=3.6.2\n",
|
||||||
|
"- pip=21.3.1\n",
|
||||||
"- pip:\n",
|
"- pip:\n",
|
||||||
" - azureml-defaults\n",
|
" - azureml-defaults\n",
|
||||||
" - azureml-opendatasets\n",
|
" - azureml-opendatasets\n",
|
||||||
@@ -553,7 +554,7 @@
|
|||||||
"cd = CondaDependencies.create()\n",
|
"cd = CondaDependencies.create()\n",
|
||||||
"cd.add_conda_package('numpy')\n",
|
"cd.add_conda_package('numpy')\n",
|
||||||
"cd.add_pip_package('chainer==5.1.0')\n",
|
"cd.add_pip_package('chainer==5.1.0')\n",
|
||||||
"cd.add_pip_package(\"azureml-defaults\")\n",
|
"cd.add_pip_package(\"azureml-defaults==1.43.0\")\n",
|
||||||
"cd.add_pip_package(\"azureml-opendatasets\")\n",
|
"cd.add_pip_package(\"azureml-opendatasets\")\n",
|
||||||
"cd.save_to_file(base_directory='./', conda_file_path='myenv.yml')\n",
|
"cd.save_to_file(base_directory='./', conda_file_path='myenv.yml')\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -587,7 +588,7 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"aciconfig = AciWebservice.deploy_configuration(cpu_cores=1,\n",
|
"aciconfig = AciWebservice.deploy_configuration(cpu_cores=1,\n",
|
||||||
" auth_enabled=True, # this flag generates API keys to secure access\n",
|
" auth_enabled=True, # this flag generates API keys to secure access\n",
|
||||||
" memory_gb=1,\n",
|
" memory_gb=2,\n",
|
||||||
" tags={'name': 'mnist', 'framework': 'Chainer'},\n",
|
" tags={'name': 'mnist', 'framework': 'Chainer'},\n",
|
||||||
" description='Chainer DNN with MNIST')\n",
|
" description='Chainer DNN with MNIST')\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
|||||||
@@ -163,7 +163,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"fastai_env.docker.base_image = \"fastdotai/fastai:latest\"\n",
|
"fastai_env.docker.base_image = \"fastdotai/fastai:2021-02-11\"\n",
|
||||||
"fastai_env.python.user_managed_dependencies = True"
|
"fastai_env.python.user_managed_dependencies = True"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -199,7 +199,7 @@
|
|||||||
"Specify docker steps as a string:\n",
|
"Specify docker steps as a string:\n",
|
||||||
"```python \n",
|
"```python \n",
|
||||||
"dockerfile = r\"\"\" \\\n",
|
"dockerfile = r\"\"\" \\\n",
|
||||||
"FROM mcr.microsoft.com/azureml/base:intelmpi2018.3-ubuntu16.04\n",
|
"FROM mcr.microsoft.com/azureml/openmpi4.1.0-ubuntu20.04\n",
|
||||||
"RUN echo \"Hello from custom container!\" \\\n",
|
"RUN echo \"Hello from custom container!\" \\\n",
|
||||||
"\"\"\"\n",
|
"\"\"\"\n",
|
||||||
"```\n",
|
"```\n",
|
||||||
|
|||||||
@@ -430,13 +430,15 @@
|
|||||||
"channels:\n",
|
"channels:\n",
|
||||||
"- conda-forge\n",
|
"- conda-forge\n",
|
||||||
"dependencies:\n",
|
"dependencies:\n",
|
||||||
"- python=3.6.2\n",
|
"- python=3.7\n",
|
||||||
|
"- pip=21.3.1\n",
|
||||||
"- pip:\n",
|
"- pip:\n",
|
||||||
" - h5py<=2.10.0\n",
|
" - h5py<=2.10.0\n",
|
||||||
" - azureml-defaults\n",
|
" - azureml-defaults\n",
|
||||||
" - tensorflow-gpu==2.0.0\n",
|
" - tensorflow-gpu==2.0.0\n",
|
||||||
" - keras<=2.3.1\n",
|
" - keras<=2.3.1\n",
|
||||||
" - matplotlib"
|
" - matplotlib\n",
|
||||||
|
" - protobuf==3.20.1"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -983,11 +985,12 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||||
"\n",
|
"\n",
|
||||||
"cd = CondaDependencies.create()\n",
|
"cd = CondaDependencies.create(python_version=\"3.7\")\n",
|
||||||
"cd.add_tensorflow_conda_package()\n",
|
"cd.add_tensorflow_conda_package()\n",
|
||||||
"cd.add_conda_package('h5py<=2.10.0')\n",
|
"cd.add_conda_package('h5py<=2.10.0')\n",
|
||||||
"cd.add_conda_package('keras<=2.3.1')\n",
|
"cd.add_conda_package('keras<=2.3.1')\n",
|
||||||
"cd.add_pip_package(\"azureml-defaults\")\n",
|
"cd.add_pip_package(\"azureml-defaults\")\n",
|
||||||
|
"cd.add_pip_package(\"protobuf==3.20.1\")\n",
|
||||||
"cd.save_to_file(base_directory='./', conda_file_path='myenv.yml')\n",
|
"cd.save_to_file(base_directory='./', conda_file_path='myenv.yml')\n",
|
||||||
"\n",
|
"\n",
|
||||||
"print(cd.serialize_to_string())"
|
"print(cd.serialize_to_string())"
|
||||||
|
|||||||