mirror of
https://github.com/Azure/MachineLearningNotebooks.git
synced 2025-12-20 09:37:04 -05:00
Compare commits
54 Commits
shbijlan-u
...
azureml-sd
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ee5d0239a3 | ||
|
|
388111cedc | ||
|
|
b86191ed7f | ||
|
|
22753486de | ||
|
|
cf1d1dbf01 | ||
|
|
2e45d9800d | ||
|
|
a9a8de02ec | ||
|
|
dd8339e650 | ||
|
|
1594ee64a1 | ||
|
|
83ed8222d2 | ||
|
|
b0aa91acce | ||
|
|
5928ba83bb | ||
|
|
ffa3a43979 | ||
|
|
7ce79a43f1 | ||
|
|
edcc50ab0c | ||
|
|
4a391522d0 | ||
|
|
1903f78285 | ||
|
|
a4dfcc4693 | ||
|
|
faffb3fef7 | ||
|
|
6c6227c403 | ||
|
|
e3be364e7a | ||
|
|
90e20a60e9 | ||
|
|
33a4eacf1d | ||
|
|
e30b53fddc | ||
|
|
95b0392ed2 | ||
|
|
796798cb49 | ||
|
|
08b0ba7854 | ||
|
|
ceaf82acc6 | ||
|
|
dadc93cfe5 | ||
|
|
c7076bf95c | ||
|
|
ebdffd5626 | ||
|
|
d123880562 | ||
|
|
4864e8ea60 | ||
|
|
c86db0d7fd | ||
|
|
ccfbbb3b14 | ||
|
|
c42ba64b15 | ||
|
|
6d8bf32243 | ||
|
|
9094da4085 | ||
|
|
ebf9d2855c | ||
|
|
1bbd78eb33 | ||
|
|
77f5a69e04 | ||
|
|
ce82af2ab0 | ||
|
|
2a2d2efa17 | ||
|
|
dd494e9cac | ||
|
|
352adb7487 | ||
|
|
aebe34b4e8 | ||
|
|
c7e1241e20 | ||
|
|
6529298c24 | ||
|
|
e2dddfde85 | ||
|
|
36d96f96ec | ||
|
|
7ebcfea5a3 | ||
|
|
b20bfed33a | ||
|
|
a66a92e338 | ||
|
|
c56c2c3525 |
@@ -103,7 +103,7 @@
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"\n",
|
||||
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.42.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -188,13 +188,6 @@
|
||||
"### Script to process data and train model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The _process_data.py_ script used in the step below is a slightly modified implementation of [RAPIDS Mortgage E2E example](https://github.com/rapidsai/notebooks-contrib/blob/master/intermediate_notebooks/E2E/mortgage/mortgage_e2e.ipynb)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -373,7 +366,7 @@
|
||||
"run_config.target = gpu_cluster_name\n",
|
||||
"run_config.environment.docker.enabled = True\n",
|
||||
"run_config.environment.docker.gpu_support = True\n",
|
||||
"run_config.environment.docker.base_image = \"mcr.microsoft.com/azureml/base-gpu:intelmpi2018.3-cuda10.0-cudnn7-ubuntu16.04\"\n",
|
||||
"run_config.environment.docker.base_image = \"mcr.microsoft.com/azureml/openmpi4.1.0-cuda11.1-cudnn8-ubuntu20.04\"\n",
|
||||
"run_config.environment.spark.precache_packages = False\n",
|
||||
"run_config.data_references={'data':data_ref.to_config()}"
|
||||
]
|
||||
|
||||
@@ -49,7 +49,7 @@
|
||||
"* `fairlearn>=0.6.2` (pre-v0.5.0 will work with minor modifications)\n",
|
||||
"* `joblib`\n",
|
||||
"* `liac-arff`\n",
|
||||
"* `raiwidgets~=0.7.0`\n",
|
||||
"* `raiwidgets`\n",
|
||||
"\n",
|
||||
"Fairlearn relies on features introduced in v0.22.1 of `scikit-learn`. If you have an older version already installed, please uncomment and run the following cell:"
|
||||
]
|
||||
|
||||
@@ -6,4 +6,6 @@ dependencies:
|
||||
- fairlearn>=0.6.2
|
||||
- joblib
|
||||
- liac-arff
|
||||
- raiwidgets~=0.7.0
|
||||
- raiwidgets~=0.18.1
|
||||
- itsdangerous==2.0.1
|
||||
- markupsafe<2.1.0
|
||||
|
||||
@@ -51,7 +51,7 @@
|
||||
"* `fairlearn>=0.6.2` (also works for pre-v0.5.0 with slight modifications)\n",
|
||||
"* `joblib`\n",
|
||||
"* `liac-arff`\n",
|
||||
"* `raiwidgets~=0.7.0`\n",
|
||||
"* `raiwidgets`\n",
|
||||
"\n",
|
||||
"Fairlearn relies on features introduced in v0.22.1 of `scikit-learn`. If you have an older version already installed, please uncomment and run the following cell:"
|
||||
]
|
||||
|
||||
@@ -6,4 +6,6 @@ dependencies:
|
||||
- fairlearn>=0.6.2
|
||||
- joblib
|
||||
- liac-arff
|
||||
- raiwidgets~=0.7.0
|
||||
- raiwidgets~=0.18.1
|
||||
- itsdangerous==2.0.1
|
||||
- markupsafe<2.1.0
|
||||
|
||||
@@ -1,30 +1,31 @@
|
||||
name: azure_automl
|
||||
channels:
|
||||
- conda-forge
|
||||
- pytorch
|
||||
- main
|
||||
dependencies:
|
||||
# The python interpreter version.
|
||||
# Currently Azure ML only supports 3.5.2 and later.
|
||||
- pip==21.1.2
|
||||
- python>=3.5.2,<3.8
|
||||
- nb_conda
|
||||
- boto3==1.15.18
|
||||
- matplotlib==2.1.0
|
||||
- numpy==1.18.5
|
||||
- cython
|
||||
- urllib3<1.24
|
||||
- scipy>=1.4.1,<=1.5.2
|
||||
- scikit-learn==0.22.1
|
||||
- pandas==0.25.1
|
||||
- py-xgboost<=0.90
|
||||
- conda-forge::fbprophet==0.5
|
||||
- holidays==0.9.11
|
||||
# Currently Azure ML only supports 3.6.0 and later.
|
||||
- pip==20.2.4
|
||||
- python>=3.6,<3.9
|
||||
- matplotlib==3.2.1
|
||||
- py-xgboost==1.3.3
|
||||
- pytorch::pytorch=1.4.0
|
||||
- conda-forge::fbprophet==0.7.1
|
||||
- cudatoolkit=10.1.243
|
||||
- tornado==6.1.0
|
||||
- scipy==1.5.2
|
||||
- notebook
|
||||
- pywin32==227
|
||||
- PySocks==1.7.1
|
||||
- jsonschema==4.5.1
|
||||
- conda-forge::pyqt==5.12.3
|
||||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azureml-widgets~=1.34.0
|
||||
- azureml-widgets~=1.42.0
|
||||
- pytorch-transformers==1.0.0
|
||||
- spacy==2.1.8
|
||||
- spacy==2.2.4
|
||||
- pystan==2.19.1.1
|
||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||
- -r https://automlresources-prod.azureedge.net/validated-requirements/1.34.0/validated_win32_requirements.txt [--no-deps]
|
||||
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.42.0/validated_win32_requirements.txt [--no-deps]
|
||||
- arch==4.14
|
||||
|
||||
@@ -1,30 +1,33 @@
|
||||
name: azure_automl
|
||||
channels:
|
||||
- conda-forge
|
||||
- pytorch
|
||||
- main
|
||||
dependencies:
|
||||
# The python interpreter version.
|
||||
# Currently Azure ML only supports 3.5.2 and later.
|
||||
- pip==21.1.2
|
||||
- python>=3.5.2,<3.8
|
||||
- nb_conda
|
||||
- boto3==1.15.18
|
||||
- matplotlib==2.1.0
|
||||
- numpy==1.18.5
|
||||
- cython
|
||||
- urllib3<1.24
|
||||
# Currently Azure ML only supports 3.6.0 and later.
|
||||
- pip==20.2.4
|
||||
- python>=3.6,<3.9
|
||||
- boto3==1.20.19
|
||||
- botocore<=1.23.19
|
||||
- matplotlib==3.2.1
|
||||
- numpy==1.19.5
|
||||
- cython==0.29.14
|
||||
- urllib3==1.26.7
|
||||
- scipy>=1.4.1,<=1.5.2
|
||||
- scikit-learn==0.22.1
|
||||
- pandas==0.25.1
|
||||
- py-xgboost<=0.90
|
||||
- conda-forge::fbprophet==0.5
|
||||
- holidays==0.9.11
|
||||
- py-xgboost<=1.3.3
|
||||
- holidays==0.10.3
|
||||
- conda-forge::fbprophet==0.7.1
|
||||
- pytorch::pytorch=1.4.0
|
||||
- cudatoolkit=10.1.243
|
||||
- tornado==6.1.0
|
||||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azureml-widgets~=1.34.0
|
||||
- azureml-widgets~=1.42.0
|
||||
- pytorch-transformers==1.0.0
|
||||
- spacy==2.1.8
|
||||
- spacy==2.2.4
|
||||
- pystan==2.19.1.1
|
||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||
- -r https://automlresources-prod.azureedge.net/validated-requirements/1.34.0/validated_linux_requirements.txt [--no-deps]
|
||||
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.42.0/validated_linux_requirements.txt [--no-deps]
|
||||
- arch==4.14
|
||||
|
||||
@@ -1,31 +1,34 @@
|
||||
name: azure_automl
|
||||
channels:
|
||||
- conda-forge
|
||||
- pytorch
|
||||
- main
|
||||
dependencies:
|
||||
# The python interpreter version.
|
||||
# Currently Azure ML only supports 3.5.2 and later.
|
||||
- pip==21.1.2
|
||||
# Currently Azure ML only supports 3.6.0 and later.
|
||||
- pip==20.2.4
|
||||
- nomkl
|
||||
- python>=3.5.2,<3.8
|
||||
- nb_conda
|
||||
- boto3==1.15.18
|
||||
- matplotlib==2.1.0
|
||||
- numpy==1.18.5
|
||||
- cython
|
||||
- urllib3<1.24
|
||||
- python>=3.6,<3.9
|
||||
- boto3==1.20.19
|
||||
- botocore<=1.23.19
|
||||
- matplotlib==3.2.1
|
||||
- numpy==1.19.5
|
||||
- cython==0.29.14
|
||||
- urllib3==1.26.7
|
||||
- scipy>=1.4.1,<=1.5.2
|
||||
- scikit-learn==0.22.1
|
||||
- pandas==0.25.1
|
||||
- py-xgboost<=0.90
|
||||
- conda-forge::fbprophet==0.5
|
||||
- holidays==0.9.11
|
||||
- py-xgboost<=1.3.3
|
||||
- holidays==0.10.3
|
||||
- conda-forge::fbprophet==0.7.1
|
||||
- pytorch::pytorch=1.4.0
|
||||
- cudatoolkit=9.0
|
||||
- tornado==6.1.0
|
||||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azureml-widgets~=1.34.0
|
||||
- azureml-widgets~=1.42.0
|
||||
- pytorch-transformers==1.0.0
|
||||
- spacy==2.1.8
|
||||
- spacy==2.2.4
|
||||
- pystan==2.19.1.1
|
||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||
- -r https://automlresources-prod.azureedge.net/validated-requirements/1.34.0/validated_darwin_requirements.txt [--no-deps]
|
||||
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.42.0/validated_darwin_requirements.txt [--no-deps]
|
||||
- arch==4.14
|
||||
|
||||
@@ -3,7 +3,7 @@ import platform
|
||||
|
||||
try:
|
||||
import conda
|
||||
except:
|
||||
except Exception:
|
||||
print('Failed to import conda.')
|
||||
print('This setup is usually run from the base conda environment.')
|
||||
print('You can activate the base environment using the command "conda activate base"')
|
||||
|
||||
@@ -1,21 +1,5 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -30,6 +14,7 @@
|
||||
"1. [Results](#Results)\n",
|
||||
"1. [Deploy](#Deploy)\n",
|
||||
"1. [Test](#Test)\n",
|
||||
"1. [Use auto-generated code for retraining](#Using-the-auto-generated-model-training-code-for-retraining-on-new-data)\n",
|
||||
"1. [Acknowledgements](#Acknowledgements)"
|
||||
]
|
||||
},
|
||||
@@ -55,6 +40,7 @@
|
||||
"7. Create a container image.\n",
|
||||
"8. Create an Azure Container Instance (ACI) service.\n",
|
||||
"9. Test the ACI service.\n",
|
||||
"10. Leverage the auto generated training code and use it for retraining on an updated dataset\n",
|
||||
"\n",
|
||||
"In addition this notebook showcases the following features\n",
|
||||
"- **Blocking** certain pipelines\n",
|
||||
@@ -74,9 +60,12 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"name": "automl-import"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"import logging\n",
|
||||
"\n",
|
||||
"from matplotlib import pyplot as plt\n",
|
||||
@@ -98,16 +87,6 @@
|
||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -137,24 +116,27 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"name": "ws-setup"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# choose a name for experiment\n",
|
||||
"experiment_name = 'automl-classification-bmarketing-all'\n",
|
||||
"experiment_name = \"automl-classification-bmarketing-all\"\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace'] = ws.name\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Experiment Name'] = experiment.name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||
"output[\"Workspace\"] = ws.name\n",
|
||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||
"output[\"Location\"] = ws.location\n",
|
||||
"output[\"Experiment Name\"] = experiment.name\n",
|
||||
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
@@ -175,7 +157,9 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||
@@ -187,12 +171,12 @@
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
||||
" print('Found existing cluster, use it.')\n",
|
||||
" print(\"Found existing cluster, use it.\")\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
|
||||
" max_nodes=6)\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||
" vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
|
||||
" )\n",
|
||||
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
"compute_target.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
@@ -225,7 +209,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data = pd.read_csv(\"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/bankmarketing_train.csv\")\n",
|
||||
"data = pd.read_csv(\n",
|
||||
" \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/bankmarketing_train.csv\"\n",
|
||||
")\n",
|
||||
"data.head()"
|
||||
]
|
||||
},
|
||||
@@ -240,7 +226,12 @@
|
||||
"\n",
|
||||
"missing_rate = 0.75\n",
|
||||
"n_missing_samples = int(np.floor(data.shape[0] * missing_rate))\n",
|
||||
"missing_samples = np.hstack((np.zeros(data.shape[0] - n_missing_samples, dtype=np.bool), np.ones(n_missing_samples, dtype=np.bool)))\n",
|
||||
"missing_samples = np.hstack(\n",
|
||||
" (\n",
|
||||
" np.zeros(data.shape[0] - n_missing_samples, dtype=np.bool),\n",
|
||||
" np.ones(n_missing_samples, dtype=np.bool),\n",
|
||||
" )\n",
|
||||
")\n",
|
||||
"rng = np.random.RandomState(0)\n",
|
||||
"rng.shuffle(missing_samples)\n",
|
||||
"missing_features = rng.randint(0, data.shape[1], n_missing_samples)\n",
|
||||
@@ -253,19 +244,21 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"if not os.path.isdir('data'):\n",
|
||||
" os.mkdir('data')\n",
|
||||
" \n",
|
||||
"if not os.path.isdir(\"data\"):\n",
|
||||
" os.mkdir(\"data\")\n",
|
||||
"# Save the train data to a csv to be uploaded to the datastore\n",
|
||||
"pd.DataFrame(data).to_csv(\"data/train_data.csv\", index=False)\n",
|
||||
"\n",
|
||||
"ds = ws.get_default_datastore()\n",
|
||||
"ds.upload(src_dir='./data', target_path='bankmarketing', overwrite=True, show_progress=True)\n",
|
||||
"\n",
|
||||
"ds.upload(\n",
|
||||
" src_dir=\"./data\", target_path=\"bankmarketing\", overwrite=True, show_progress=True\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Upload the training data as a tabular dataset for access during training on remote compute\n",
|
||||
"train_data = Dataset.Tabular.from_delimited_files(path=ds.path('bankmarketing/train_data.csv'))\n",
|
||||
"train_data = Dataset.Tabular.from_delimited_files(\n",
|
||||
" path=ds.path(\"bankmarketing/train_data.csv\")\n",
|
||||
")\n",
|
||||
"label = \"y\""
|
||||
]
|
||||
},
|
||||
@@ -325,6 +318,7 @@
|
||||
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
||||
"|**training_data**|Input dataset, containing both features and label column.|\n",
|
||||
"|**label_column_name**|The name of the label column.|\n",
|
||||
"|**enable_code_generation**|Flag to enable generation of training code for each of the models that AutoML is creating.\n",
|
||||
"\n",
|
||||
"**_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric)"
|
||||
]
|
||||
@@ -342,27 +336,31 @@
|
||||
" \"max_concurrent_iterations\": 4,\n",
|
||||
" \"max_cores_per_iteration\": -1,\n",
|
||||
" # \"n_cross_validations\": 2,\n",
|
||||
" \"primary_metric\": 'AUC_weighted',\n",
|
||||
" \"featurization\": 'auto',\n",
|
||||
" \"primary_metric\": \"AUC_weighted\",\n",
|
||||
" \"featurization\": \"auto\",\n",
|
||||
" \"verbosity\": logging.INFO,\n",
|
||||
" \"enable_code_generation\": True,\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task = 'classification',\n",
|
||||
" debug_log = 'automl_errors.log',\n",
|
||||
"automl_config = AutoMLConfig(\n",
|
||||
" task=\"classification\",\n",
|
||||
" debug_log=\"automl_errors.log\",\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" experiment_exit_score=0.9984,\n",
|
||||
" blocked_models = ['KNN','LinearSVM'],\n",
|
||||
" blocked_models=[\"KNN\", \"LinearSVM\"],\n",
|
||||
" enable_onnx_compatible_models=True,\n",
|
||||
" training_data=train_data,\n",
|
||||
" label_column_name=label,\n",
|
||||
" validation_data=validation_dataset,\n",
|
||||
" **automl_settings\n",
|
||||
" **automl_settings,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"Call the `submit` method on the experiment object and pass the run configuration. Execution of local runs is synchronous. Depending on the data and the number of iterations this can run for a while. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous."
|
||||
]
|
||||
@@ -370,7 +368,9 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"name": "experiment-submit"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run = experiment.submit(automl_config, show_output=False)"
|
||||
@@ -378,7 +378,9 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"Run the following cell to access previous runs. Uncomment the cell below and update the run_id."
|
||||
]
|
||||
@@ -410,7 +412,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"best_run_customized, fitted_model_customized = remote_run.get_output()"
|
||||
"# Retrieve the best Run object\n",
|
||||
"best_run = remote_run.get_best_child()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -419,7 +422,7 @@
|
||||
"source": [
|
||||
"## Transparency\n",
|
||||
"\n",
|
||||
"View updated featurization summary"
|
||||
"View featurization summary for the best model - to study how different features were transformed. This is stored as a JSON file in the outputs directory for the run."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -428,36 +431,16 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"custom_featurizer = fitted_model_customized.named_steps['datatransformer']\n",
|
||||
"df = custom_featurizer.get_featurization_summary()\n",
|
||||
"pd.DataFrame(data=df)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Set `is_user_friendly=False` to get a more detailed summary for the transforms being applied."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"df = custom_featurizer.get_featurization_summary(is_user_friendly=False)\n",
|
||||
"pd.DataFrame(data=df)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"df = custom_featurizer.get_stats_feature_type_summary()\n",
|
||||
"pd.DataFrame(data=df)"
|
||||
"# Download the featurization summary JSON file locally\n",
|
||||
"best_run.download_file(\n",
|
||||
" \"outputs/featurization_summary.json\", \"featurization_summary.json\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Render the JSON as a pandas DataFrame\n",
|
||||
"with open(\"featurization_summary.json\", \"r\") as f:\n",
|
||||
" records = json.load(f)\n",
|
||||
"\n",
|
||||
"pd.DataFrame.from_records(records)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -470,10 +453,13 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"name": "run-details"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.widgets import RunDetails\n",
|
||||
"\n",
|
||||
"RunDetails(remote_run).show()"
|
||||
]
|
||||
},
|
||||
@@ -493,13 +479,16 @@
|
||||
"source": [
|
||||
"# Wait for the best model explanation run to complete\n",
|
||||
"from azureml.core.run import Run\n",
|
||||
"\n",
|
||||
"model_explainability_run_id = remote_run.id + \"_\" + \"ModelExplain\"\n",
|
||||
"print(model_explainability_run_id)\n",
|
||||
"model_explainability_run = Run(experiment=experiment, run_id=model_explainability_run_id)\n",
|
||||
"model_explainability_run = Run(\n",
|
||||
" experiment=experiment, run_id=model_explainability_run_id\n",
|
||||
")\n",
|
||||
"model_explainability_run.wait_for_completion()\n",
|
||||
"\n",
|
||||
"# Get the best run object\n",
|
||||
"best_run, fitted_model = remote_run.get_output()"
|
||||
"best_run = remote_run.get_best_child()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -576,6 +565,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.automl.runtime.onnx_convert import OnnxConverter\n",
|
||||
"\n",
|
||||
"onnx_fl_path = \"./best_model.onnx\"\n",
|
||||
"OnnxConverter.save_onnx_model(onnx_mdl, onnx_fl_path)"
|
||||
]
|
||||
@@ -600,13 +590,17 @@
|
||||
"\n",
|
||||
"from azureml.automl.runtime.onnx_convert import OnnxInferenceHelper\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_onnx_res(run):\n",
|
||||
" res_path = 'onnx_resource.json'\n",
|
||||
" run.download_file(name=constants.MODEL_RESOURCE_PATH_ONNX, output_file_path=res_path)\n",
|
||||
" res_path = \"onnx_resource.json\"\n",
|
||||
" run.download_file(\n",
|
||||
" name=constants.MODEL_RESOURCE_PATH_ONNX, output_file_path=res_path\n",
|
||||
" )\n",
|
||||
" with open(res_path) as f:\n",
|
||||
" result = json.load(f)\n",
|
||||
" return result\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"if sys.version_info < OnnxConvertConstants.OnnxIncompatiblePythonVersion:\n",
|
||||
" test_df = test_dataset.to_pandas_dataframe()\n",
|
||||
" mdl_bytes = onnx_mdl.SerializeToString()\n",
|
||||
@@ -618,7 +612,7 @@
|
||||
" print(pred_onnx)\n",
|
||||
" print(pred_prob_onnx)\n",
|
||||
"else:\n",
|
||||
" print('Please use Python version 3.6 or 3.7 to run the inference helper.')"
|
||||
" print(\"Please use Python version 3.6 or 3.7 to run the inference helper.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -629,7 +623,16 @@
|
||||
"\n",
|
||||
"### Retrieve the Best Model\n",
|
||||
"\n",
|
||||
"Below we select the best pipeline from our iterations. The `get_output` method returns the best run and the fitted model. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*."
|
||||
"Below we select the best pipeline from our iterations. The `get_best_child` method returns the Run object for the best model based on the default primary metric. There are additional flags that can be passed to the method if we want to retrieve the best Run based on any of the other supported metrics, or if we are just interested in the best run among the ONNX compatible runs. As always, you can execute `??remote_run.get_best_child` in a new cell to view the source or docs for the function."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"??remote_run.get_best_child"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -649,7 +652,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"best_run, fitted_model = remote_run.get_output()"
|
||||
"best_run = remote_run.get_best_child()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -658,11 +661,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model_name = best_run.properties['model_name']\n",
|
||||
"model_name = best_run.properties[\"model_name\"]\n",
|
||||
"\n",
|
||||
"script_file_name = 'inference/score.py'\n",
|
||||
"script_file_name = \"inference/score.py\"\n",
|
||||
"\n",
|
||||
"best_run.download_file('outputs/scoring_file_v_1_0_0.py', 'inference/score.py')"
|
||||
"best_run.download_file(\"outputs/scoring_file_v_1_0_0.py\", \"inference/score.py\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -679,11 +682,15 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"description = 'AutoML Model trained on bank marketing data to predict if a client will subscribe to a term deposit'\n",
|
||||
"description = \"AutoML Model trained on bank marketing data to predict if a client will subscribe to a term deposit\"\n",
|
||||
"tags = None\n",
|
||||
"model = remote_run.register_model(model_name = model_name, description = description, tags = tags)\n",
|
||||
"model = remote_run.register_model(\n",
|
||||
" model_name=model_name, description=description, tags=tags\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(remote_run.model_id) # This will be written to the script file later in the notebook."
|
||||
"print(\n",
|
||||
" remote_run.model_id\n",
|
||||
") # This will be written to the script file later in the notebook."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -701,16 +708,20 @@
|
||||
"source": [
|
||||
"from azureml.core.model import InferenceConfig\n",
|
||||
"from azureml.core.webservice import AciWebservice\n",
|
||||
"from azureml.core.webservice import Webservice\n",
|
||||
"from azureml.core.model import Model\n",
|
||||
"from azureml.core.environment import Environment\n",
|
||||
"\n",
|
||||
"inference_config = InferenceConfig(entry_script=script_file_name)\n",
|
||||
"\n",
|
||||
"aciconfig = AciWebservice.deploy_configuration(cpu_cores = 2, \n",
|
||||
"aciconfig = AciWebservice.deploy_configuration(\n",
|
||||
" cpu_cores=2,\n",
|
||||
" memory_gb=2,\n",
|
||||
" tags = {'area': \"bmData\", 'type': \"automl_classification\"}, \n",
|
||||
" description = 'sample service for Automl Classification')\n",
|
||||
" tags={\"area\": \"bmData\", \"type\": \"automl_classification\"},\n",
|
||||
" description=\"sample service for Automl Classification\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"aci_service_name = 'automl-sample-bankmarketing-all'\n",
|
||||
"aci_service_name = model_name.lower()\n",
|
||||
"print(aci_service_name)\n",
|
||||
"aci_service = Model.deploy(ws, aci_service_name, [model], inference_config, aciconfig)\n",
|
||||
"aci_service.wait_for_deployment(True)\n",
|
||||
@@ -762,8 +773,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"X_test = test_dataset.drop_columns(columns=['y'])\n",
|
||||
"y_test = test_dataset.keep_columns(columns=['y'], validate=True)\n",
|
||||
"X_test = test_dataset.drop_columns(columns=[\"y\"])\n",
|
||||
"y_test = test_dataset.keep_columns(columns=[\"y\"], validate=True)\n",
|
||||
"test_dataset.take(5).to_pandas_dataframe()"
|
||||
]
|
||||
},
|
||||
@@ -785,13 +796,13 @@
|
||||
"source": [
|
||||
"import requests\n",
|
||||
"\n",
|
||||
"X_test_json = X_test.to_json(orient='records')\n",
|
||||
"data = \"{\\\"data\\\": \" + X_test_json +\"}\"\n",
|
||||
"headers = {'Content-Type': 'application/json'}\n",
|
||||
"X_test_json = X_test.to_json(orient=\"records\")\n",
|
||||
"data = '{\"data\": ' + X_test_json + \"}\"\n",
|
||||
"headers = {\"Content-Type\": \"application/json\"}\n",
|
||||
"\n",
|
||||
"resp = requests.post(aci_service.scoring_uri, data, headers=headers)\n",
|
||||
"\n",
|
||||
"y_pred = json.loads(json.loads(resp.text))['result']"
|
||||
"y_pred = json.loads(json.loads(resp.text))[\"result\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -817,7 +828,9 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%matplotlib notebook\n",
|
||||
@@ -825,19 +838,25 @@
|
||||
"import itertools\n",
|
||||
"\n",
|
||||
"cf = confusion_matrix(actual, y_pred)\n",
|
||||
"plt.imshow(cf,cmap=plt.cm.Blues,interpolation='nearest')\n",
|
||||
"plt.imshow(cf, cmap=plt.cm.Blues, interpolation=\"nearest\")\n",
|
||||
"plt.colorbar()\n",
|
||||
"plt.title('Confusion Matrix')\n",
|
||||
"plt.xlabel('Predicted')\n",
|
||||
"plt.ylabel('Actual')\n",
|
||||
"class_labels = ['no','yes']\n",
|
||||
"plt.title(\"Confusion Matrix\")\n",
|
||||
"plt.xlabel(\"Predicted\")\n",
|
||||
"plt.ylabel(\"Actual\")\n",
|
||||
"class_labels = [\"no\", \"yes\"]\n",
|
||||
"tick_marks = np.arange(len(class_labels))\n",
|
||||
"plt.xticks(tick_marks, class_labels)\n",
|
||||
"plt.yticks([-0.5,0,1,1.5],['','no','yes',''])\n",
|
||||
"plt.yticks([-0.5, 0, 1, 1.5], [\"\", \"no\", \"yes\", \"\"])\n",
|
||||
"# plotting text value inside cells\n",
|
||||
"thresh = cf.max() / 2.\n",
|
||||
"thresh = cf.max() / 2.0\n",
|
||||
"for i, j in itertools.product(range(cf.shape[0]), range(cf.shape[1])):\n",
|
||||
" plt.text(j,i,format(cf[i,j],'d'),horizontalalignment='center',color='white' if cf[i,j] >thresh else 'black')\n",
|
||||
" plt.text(\n",
|
||||
" j,\n",
|
||||
" i,\n",
|
||||
" format(cf[i, j], \"d\"),\n",
|
||||
" horizontalalignment=\"center\",\n",
|
||||
" color=\"white\" if cf[i, j] > thresh else \"black\",\n",
|
||||
" )\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
@@ -859,6 +878,142 @@
|
||||
"aci_service.delete()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Using the auto generated model training code for retraining on new data\n",
|
||||
"\n",
|
||||
"Because we enabled code generation when the original experiment was created, we now have access to the code that was used to generate any of the AutoML tried models. Below we'll be using the generated training script of the best model to retrain on a new dataset.\n",
|
||||
"\n",
|
||||
"For this demo, we'll begin by creating new retraining dataset by combining the Train & Validation datasets that were used in the original experiment."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"original_train_data = pd.read_csv(\n",
|
||||
" \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/bankmarketing_train.csv\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"valid_data = pd.read_csv(\n",
|
||||
" \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/bankmarketing_validate.csv\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# we'll emulate an updated dataset for retraining by combining the Train & Validation datasets into a new one\n",
|
||||
"retrain_pd = pd.concat([original_train_data, valid_data])\n",
|
||||
"retrain_pd.to_csv(\"data/retrain_data.csv\", index=False)\n",
|
||||
"ds.upload_files(\n",
|
||||
" files=[\"data/retrain_data.csv\"],\n",
|
||||
" target_path=\"bankmarketing/\",\n",
|
||||
" overwrite=True,\n",
|
||||
" show_progress=True,\n",
|
||||
")\n",
|
||||
"retrain_dataset = Dataset.Tabular.from_delimited_files(\n",
|
||||
" path=ds.path(\"bankmarketing/retrain_data.csv\")\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# after creating and uploading the retraining dataset, let's register it with the workspace for reuse\n",
|
||||
"retrain_dataset = retrain_dataset.register(\n",
|
||||
" workspace=ws,\n",
|
||||
" name=\"Bankmarketing_retrain\",\n",
|
||||
" description=\"Updated training dataset, includes validation data\",\n",
|
||||
" create_new_version=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Next, we'll download the generated script for the best run and use it for retraining. For more advanced scenarios, you can customize the training script as you need: change the featurization pipeline, change the learner algorithm or its hyperparameters, etc. \n",
|
||||
"\n",
|
||||
"For this exercise, we'll leave the script as it was generated."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# download the autogenerated training script into the generated_code folder\n",
|
||||
"best_run.download_file(\n",
|
||||
" \"outputs/generated_code/script.py\", \"generated_code/training_script.py\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# view the contents of the autogenerated training script\n",
|
||||
"! cat generated_code/training_script.py"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import uuid\n",
|
||||
"from azureml.core import ScriptRunConfig\n",
|
||||
"from azureml._restclient.models import RunTypeV2\n",
|
||||
"from azureml._restclient.models.create_run_dto import CreateRunDto\n",
|
||||
"from azureml._restclient.run_client import RunClient\n",
|
||||
"\n",
|
||||
"codegen_runid = str(uuid.uuid4())\n",
|
||||
"client = RunClient(\n",
|
||||
" experiment.workspace.service_context,\n",
|
||||
" experiment.name,\n",
|
||||
" codegen_runid,\n",
|
||||
" experiment_id=experiment.id,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# override the training_dataset_id to point to our new retraining dataset we just registered above\n",
|
||||
"dataset_arguments = [\"--training_dataset_id\", retrain_dataset.id]\n",
|
||||
"\n",
|
||||
"# create the retraining run as a child of the AutoML generated training run\n",
|
||||
"create_run_dto = CreateRunDto(\n",
|
||||
" run_id=codegen_runid,\n",
|
||||
" parent_run_id=best_run.id,\n",
|
||||
" description=\"AutoML Codegen Script Run using an updated training dataset\",\n",
|
||||
" target=cpu_cluster_name,\n",
|
||||
" run_type_v2=RunTypeV2(orchestrator=\"Execution\", traits=[\"automl-codegen\"]),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# the script for retraining run is pointing to the AutoML generated script\n",
|
||||
"src = ScriptRunConfig(\n",
|
||||
" source_directory=\"generated_code\",\n",
|
||||
" script=\"training_script.py\",\n",
|
||||
" arguments=dataset_arguments,\n",
|
||||
" compute_target=cpu_cluster_name,\n",
|
||||
" environment=best_run.get_environment(),\n",
|
||||
")\n",
|
||||
"run_dto = client.create_run(run_id=codegen_runid, create_run_dto=create_run_dto)\n",
|
||||
"\n",
|
||||
"# submit the experiment\n",
|
||||
"retraining_run = experiment.submit(config=src, run_id=codegen_runid)\n",
|
||||
"retraining_run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"After the run completes, we can get download/test/deploy to the model it has built."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retraining_run.wait_for_completion()\n",
|
||||
"\n",
|
||||
"retraining_run.download_file(\"outputs/model.pkl\", \"generated_code/model.pkl\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -901,6 +1056,9 @@
|
||||
],
|
||||
"friendly_name": "Automated ML run with basic edition features.",
|
||||
"index_order": 5,
|
||||
"kernel_info": {
|
||||
"name": "python3-azureml"
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
@@ -916,7 +1074,10 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.7"
|
||||
"version": "3.6.9"
|
||||
},
|
||||
"nteract": {
|
||||
"version": "nteract-front-end@1.0.0"
|
||||
},
|
||||
"tags": [
|
||||
"featurization",
|
||||
@@ -927,5 +1088,5 @@
|
||||
"task": "Classification"
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat_minor": 1
|
||||
}
|
||||
@@ -1,21 +1,5 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -87,16 +71,6 @@
|
||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -106,18 +80,19 @@
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# choose a name for experiment\n",
|
||||
"experiment_name = 'automl-classification-ccard-remote'\n",
|
||||
"experiment_name = \"automl-classification-ccard-remote\"\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace'] = ws.name\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Experiment Name'] = experiment.name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||
"output[\"Workspace\"] = ws.name\n",
|
||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||
"output[\"Location\"] = ws.location\n",
|
||||
"output[\"Experiment Name\"] = experiment.name\n",
|
||||
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
@@ -150,12 +125,12 @@
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
||||
" print('Found existing cluster, use it.')\n",
|
||||
" print(\"Found existing cluster, use it.\")\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
|
||||
" max_nodes=6)\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||
" vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
|
||||
" )\n",
|
||||
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
"compute_target.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
@@ -178,13 +153,15 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"name": "load-data"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/creditcard.csv\"\n",
|
||||
"dataset = Dataset.Tabular.from_delimited_files(data)\n",
|
||||
"training_data, validation_data = dataset.random_split(percentage=0.8, seed=223)\n",
|
||||
"label_column_name = 'Class'"
|
||||
"label_column_name = \"Class\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -210,24 +187,27 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"name": "automl-config"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"automl_settings = {\n",
|
||||
" \"n_cross_validations\": 3,\n",
|
||||
" \"primary_metric\": 'AUC_weighted',\n",
|
||||
" \"primary_metric\": \"average_precision_score_weighted\",\n",
|
||||
" \"enable_early_stopping\": True,\n",
|
||||
" \"max_concurrent_iterations\": 2, # This is a limit for testing purpose, please increase it as per cluster size\n",
|
||||
" \"experiment_timeout_hours\": 0.25, # This is a time limit for testing purposes, remove it for real use cases, this will drastically limit ablity to find the best model possible\n",
|
||||
" \"verbosity\": logging.INFO,\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task = 'classification',\n",
|
||||
" debug_log = 'automl_errors.log',\n",
|
||||
"automl_config = AutoMLConfig(\n",
|
||||
" task=\"classification\",\n",
|
||||
" debug_log=\"automl_errors.log\",\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" training_data=training_data,\n",
|
||||
" label_column_name=label_column_name,\n",
|
||||
" **automl_settings\n",
|
||||
" **automl_settings,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -287,6 +267,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.widgets import RunDetails\n",
|
||||
"\n",
|
||||
"RunDetails(remote_run).show()"
|
||||
]
|
||||
},
|
||||
@@ -353,8 +334,12 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# convert the test data to dataframe\n",
|
||||
"X_test_df = validation_data.drop_columns(columns=[label_column_name]).to_pandas_dataframe()\n",
|
||||
"y_test_df = validation_data.keep_columns(columns=[label_column_name], validate=True).to_pandas_dataframe()"
|
||||
"X_test_df = validation_data.drop_columns(\n",
|
||||
" columns=[label_column_name]\n",
|
||||
").to_pandas_dataframe()\n",
|
||||
"y_test_df = validation_data.keep_columns(\n",
|
||||
" columns=[label_column_name], validate=True\n",
|
||||
").to_pandas_dataframe()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -389,19 +374,25 @@
|
||||
"import itertools\n",
|
||||
"\n",
|
||||
"cf = confusion_matrix(y_test_df.values, y_pred)\n",
|
||||
"plt.imshow(cf,cmap=plt.cm.Blues,interpolation='nearest')\n",
|
||||
"plt.imshow(cf, cmap=plt.cm.Blues, interpolation=\"nearest\")\n",
|
||||
"plt.colorbar()\n",
|
||||
"plt.title('Confusion Matrix')\n",
|
||||
"plt.xlabel('Predicted')\n",
|
||||
"plt.ylabel('Actual')\n",
|
||||
"class_labels = ['False','True']\n",
|
||||
"plt.title(\"Confusion Matrix\")\n",
|
||||
"plt.xlabel(\"Predicted\")\n",
|
||||
"plt.ylabel(\"Actual\")\n",
|
||||
"class_labels = [\"False\", \"True\"]\n",
|
||||
"tick_marks = np.arange(len(class_labels))\n",
|
||||
"plt.xticks(tick_marks, class_labels)\n",
|
||||
"plt.yticks([-0.5,0,1,1.5],['','False','True',''])\n",
|
||||
"plt.yticks([-0.5, 0, 1, 1.5], [\"\", \"False\", \"True\", \"\"])\n",
|
||||
"# plotting text value inside cells\n",
|
||||
"thresh = cf.max() / 2.\n",
|
||||
"thresh = cf.max() / 2.0\n",
|
||||
"for i, j in itertools.product(range(cf.shape[0]), range(cf.shape[1])):\n",
|
||||
" plt.text(j,i,format(cf[i,j],'d'),horizontalalignment='center',color='white' if cf[i,j] >thresh else 'black')\n",
|
||||
" plt.text(\n",
|
||||
" j,\n",
|
||||
" i,\n",
|
||||
" format(cf[i, j], \"d\"),\n",
|
||||
" horizontalalignment=\"center\",\n",
|
||||
" color=\"white\" if cf[i, j] > thresh else \"black\",\n",
|
||||
" )\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -1,21 +1,5 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -63,6 +47,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"import logging\n",
|
||||
"import os\n",
|
||||
"import shutil\n",
|
||||
@@ -90,16 +75,6 @@
|
||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -116,18 +91,19 @@
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# Choose an experiment name.\n",
|
||||
"experiment_name = 'automl-classification-text-dnn'\n",
|
||||
"experiment_name = \"automl-classification-text-dnn\"\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace Name'] = ws.name\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Experiment Name'] = experiment.name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||
"output[\"Workspace Name\"] = ws.name\n",
|
||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||
"output[\"Location\"] = ws.location\n",
|
||||
"output[\"Experiment Name\"] = experiment.name\n",
|
||||
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
@@ -160,13 +136,16 @@
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
||||
" print('Found existing cluster, use it.')\n",
|
||||
" print(\"Found existing cluster, use it.\")\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_NC6\", # CPU for BiLSTM, such as \"STANDARD_DS12_V2\" \n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||
" vm_size=\"STANDARD_NC6\", # CPU for BiLSTM, such as \"STANDARD_D2_V2\"\n",
|
||||
" # To use BERT (this is recommended for best performance), select a GPU such as \"STANDARD_NC6\"\n",
|
||||
" # or similar GPU option\n",
|
||||
" # available in your workspace\n",
|
||||
" max_nodes = num_nodes)\n",
|
||||
" idle_seconds_before_scaledown=60,\n",
|
||||
" max_nodes=num_nodes,\n",
|
||||
" )\n",
|
||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
"compute_target.wait_for_completion(show_output=True)"
|
||||
@@ -188,38 +167,52 @@
|
||||
"source": [
|
||||
"data_dir = \"text-dnn-data\" # Local directory to store data\n",
|
||||
"blobstore_datadir = data_dir # Blob store directory to store data in\n",
|
||||
"target_column_name = 'y'\n",
|
||||
"feature_column_name = 'X'\n",
|
||||
"target_column_name = \"y\"\n",
|
||||
"feature_column_name = \"X\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_20newsgroups_data():\n",
|
||||
" '''Fetches 20 Newsgroups data from scikit-learn\n",
|
||||
" \"\"\"Fetches 20 Newsgroups data from scikit-learn\n",
|
||||
" Returns them in form of pandas dataframes\n",
|
||||
" '''\n",
|
||||
" remove = ('headers', 'footers', 'quotes')\n",
|
||||
" \"\"\"\n",
|
||||
" remove = (\"headers\", \"footers\", \"quotes\")\n",
|
||||
" categories = [\n",
|
||||
" 'rec.sport.baseball',\n",
|
||||
" 'rec.sport.hockey',\n",
|
||||
" 'comp.graphics',\n",
|
||||
" 'sci.space',\n",
|
||||
" \"rec.sport.baseball\",\n",
|
||||
" \"rec.sport.hockey\",\n",
|
||||
" \"comp.graphics\",\n",
|
||||
" \"sci.space\",\n",
|
||||
" ]\n",
|
||||
"\n",
|
||||
" data = fetch_20newsgroups(subset = 'train', categories = categories,\n",
|
||||
" shuffle = True, random_state = 42,\n",
|
||||
" remove = remove)\n",
|
||||
" data = pd.DataFrame({feature_column_name: data.data, target_column_name: data.target})\n",
|
||||
" data = fetch_20newsgroups(\n",
|
||||
" subset=\"train\",\n",
|
||||
" categories=categories,\n",
|
||||
" shuffle=True,\n",
|
||||
" random_state=42,\n",
|
||||
" remove=remove,\n",
|
||||
" )\n",
|
||||
" data = pd.DataFrame(\n",
|
||||
" {feature_column_name: data.data, target_column_name: data.target}\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" data_train = data[:200]\n",
|
||||
" data_test = data[200:300]\n",
|
||||
"\n",
|
||||
" data_train = remove_blanks_20news(data_train, feature_column_name, target_column_name)\n",
|
||||
" data_train = remove_blanks_20news(\n",
|
||||
" data_train, feature_column_name, target_column_name\n",
|
||||
" )\n",
|
||||
" data_test = remove_blanks_20news(data_test, feature_column_name, target_column_name)\n",
|
||||
"\n",
|
||||
" return data_train, data_test\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def remove_blanks_20news(data, feature_column_name, target_column_name):\n",
|
||||
"\n",
|
||||
" data[feature_column_name] = data[feature_column_name].replace(r'\\n', ' ', regex=True).apply(lambda x: x.strip())\n",
|
||||
" data = data[data[feature_column_name] != '']\n",
|
||||
" data[feature_column_name] = (\n",
|
||||
" data[feature_column_name]\n",
|
||||
" .replace(r\"\\n\", \" \", regex=True)\n",
|
||||
" .apply(lambda x: x.strip())\n",
|
||||
" )\n",
|
||||
" data = data[data[feature_column_name] != \"\"]\n",
|
||||
"\n",
|
||||
" return data"
|
||||
]
|
||||
@@ -242,15 +235,14 @@
|
||||
"if not os.path.isdir(data_dir):\n",
|
||||
" os.mkdir(data_dir)\n",
|
||||
"\n",
|
||||
"train_data_fname = data_dir + '/train_data.csv'\n",
|
||||
"test_data_fname = data_dir + '/test_data.csv'\n",
|
||||
"train_data_fname = data_dir + \"/train_data.csv\"\n",
|
||||
"test_data_fname = data_dir + \"/test_data.csv\"\n",
|
||||
"\n",
|
||||
"data_train.to_csv(train_data_fname, index=False)\n",
|
||||
"data_test.to_csv(test_data_fname, index=False)\n",
|
||||
"\n",
|
||||
"datastore = ws.get_default_datastore()\n",
|
||||
"datastore.upload(src_dir=data_dir, target_path=blobstore_datadir,\n",
|
||||
" overwrite=True)"
|
||||
"datastore.upload(src_dir=data_dir, target_path=blobstore_datadir, overwrite=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -259,7 +251,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"train_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, blobstore_datadir + '/train_data.csv')])"
|
||||
"train_dataset = Dataset.Tabular.from_delimited_files(\n",
|
||||
" path=[(datastore, blobstore_datadir + \"/train_data.csv\")]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -284,7 +278,7 @@
|
||||
"source": [
|
||||
"automl_settings = {\n",
|
||||
" \"experiment_timeout_minutes\": 30,\n",
|
||||
" \"primary_metric\": 'AUC_weighted',\n",
|
||||
" \"primary_metric\": \"accuracy\",\n",
|
||||
" \"max_concurrent_iterations\": num_nodes,\n",
|
||||
" \"max_cores_per_iteration\": -1,\n",
|
||||
" \"enable_dnn\": True,\n",
|
||||
@@ -295,13 +289,14 @@
|
||||
" \"enable_stack_ensemble\": False,\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task = 'classification',\n",
|
||||
" debug_log = 'automl_errors.log',\n",
|
||||
"automl_config = AutoMLConfig(\n",
|
||||
" task=\"classification\",\n",
|
||||
" debug_log=\"automl_errors.log\",\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" training_data=train_dataset,\n",
|
||||
" label_column_name=target_column_name,\n",
|
||||
" blocked_models = ['LightGBM', 'XGBoostClassifier'],\n",
|
||||
" **automl_settings\n",
|
||||
" blocked_models=[\"LightGBM\", \"XGBoostClassifier\"],\n",
|
||||
" **automl_settings,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -340,8 +335,8 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can test the model locally to get a feel of the input/output. When the model contains BERT, this step will require pytorch and pytorch-transformers installed in your local environment. The exact versions of these packages can be found in the **automl_env.yml** file located in the local copy of your MachineLearningNotebooks folder here:\n",
|
||||
"MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/automl_env.yml"
|
||||
"For local inferencing, you can load the model locally via. the method `remote_run.get_output()`. For more information on the arguments expected by this method, you can run `remote_run.get_output??`.\n",
|
||||
"Note that when the model contains BERT, this step will require pytorch and pytorch-transformers installed in your local environment. The exact versions of these packages can be found in the **automl_env.yml** file located in the local copy of your azureml-examples folder here: \"azureml-examples/python-sdk/tutorials/automl-with-azureml\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -350,7 +345,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"best_run, fitted_model = automl_run.get_output()"
|
||||
"# Retrieve the best Run object\n",
|
||||
"best_run = automl_run.get_best_child()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -366,10 +362,17 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"text_transformations_used = []\n",
|
||||
"for column_group in fitted_model.named_steps['datatransformer'].get_featurization_summary():\n",
|
||||
" text_transformations_used.extend(column_group['Transformations'])\n",
|
||||
"text_transformations_used"
|
||||
"# Download the featurization summary JSON file locally\n",
|
||||
"best_run.download_file(\n",
|
||||
" \"outputs/featurization_summary.json\", \"featurization_summary.json\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Render the JSON as a pandas DataFrame\n",
|
||||
"with open(\"featurization_summary.json\", \"r\") as f:\n",
|
||||
" records = json.load(f)\n",
|
||||
"\n",
|
||||
"featurization_summary = pd.DataFrame.from_records(records)\n",
|
||||
"featurization_summary[\"Transformations\"].tolist()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -394,7 +397,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"summary_df = get_result_df(automl_run)\n",
|
||||
"best_dnn_run_id = summary_df['run_id'].iloc[0]\n",
|
||||
"best_dnn_run_id = summary_df[\"run_id\"].iloc[0]\n",
|
||||
"best_dnn_run = Run(experiment, best_dnn_run_id)"
|
||||
]
|
||||
},
|
||||
@@ -404,11 +407,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model_dir = 'Model' # Local folder where the model will be stored temporarily\n",
|
||||
"model_dir = \"Model\" # Local folder where the model will be stored temporarily\n",
|
||||
"if not os.path.isdir(model_dir):\n",
|
||||
" os.mkdir(model_dir)\n",
|
||||
"\n",
|
||||
"best_dnn_run.download_file('outputs/model.pkl', model_dir + '/model.pkl')"
|
||||
"best_dnn_run.download_file(\"outputs/model.pkl\", model_dir + \"/model.pkl\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -425,11 +428,10 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Register the model\n",
|
||||
"model_name = 'textDNN-20News'\n",
|
||||
"model = Model.register(model_path = model_dir + '/model.pkl',\n",
|
||||
" model_name = model_name,\n",
|
||||
" tags=None,\n",
|
||||
" workspace=ws)"
|
||||
"model_name = \"textDNN-20News\"\n",
|
||||
"model = Model.register(\n",
|
||||
" model_path=model_dir + \"/model.pkl\", model_name=model_name, tags=None, workspace=ws\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -454,7 +456,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"test_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, blobstore_datadir + '/test_data.csv')])\n",
|
||||
"test_dataset = Dataset.Tabular.from_delimited_files(\n",
|
||||
" path=[(datastore, blobstore_datadir + \"/test_data.csv\")]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# preview the first 3 rows of the dataset\n",
|
||||
"test_dataset.take(3).to_pandas_dataframe()"
|
||||
@@ -475,9 +479,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"script_folder = os.path.join(os.getcwd(), 'inference')\n",
|
||||
"script_folder = os.path.join(os.getcwd(), \"inference\")\n",
|
||||
"os.makedirs(script_folder, exist_ok=True)\n",
|
||||
"shutil.copy('infer.py', script_folder)"
|
||||
"shutil.copy(\"infer.py\", script_folder)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -486,8 +490,15 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"test_run = run_inference(test_experiment, compute_target, script_folder, best_dnn_run,\n",
|
||||
" test_dataset, target_column_name, model_name)"
|
||||
"test_run = run_inference(\n",
|
||||
" test_experiment,\n",
|
||||
" compute_target,\n",
|
||||
" script_folder,\n",
|
||||
" best_dnn_run,\n",
|
||||
" test_dataset,\n",
|
||||
" target_column_name,\n",
|
||||
" model_name,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -4,52 +4,65 @@ from azureml.train.estimator import Estimator
|
||||
from azureml.core.run import Run
|
||||
|
||||
|
||||
def run_inference(test_experiment, compute_target, script_folder, train_run,
|
||||
test_dataset, target_column_name, model_name):
|
||||
def run_inference(
|
||||
test_experiment,
|
||||
compute_target,
|
||||
script_folder,
|
||||
train_run,
|
||||
test_dataset,
|
||||
target_column_name,
|
||||
model_name,
|
||||
):
|
||||
|
||||
inference_env = train_run.get_environment()
|
||||
|
||||
est = Estimator(source_directory=script_folder,
|
||||
entry_script='infer.py',
|
||||
est = Estimator(
|
||||
source_directory=script_folder,
|
||||
entry_script="infer.py",
|
||||
script_params={
|
||||
'--target_column_name': target_column_name,
|
||||
'--model_name': model_name
|
||||
"--target_column_name": target_column_name,
|
||||
"--model_name": model_name,
|
||||
},
|
||||
inputs=[
|
||||
test_dataset.as_named_input('test_data')
|
||||
],
|
||||
inputs=[test_dataset.as_named_input("test_data")],
|
||||
compute_target=compute_target,
|
||||
environment_definition=inference_env)
|
||||
environment_definition=inference_env,
|
||||
)
|
||||
|
||||
run = test_experiment.submit(
|
||||
est, tags={
|
||||
'training_run_id': train_run.id,
|
||||
'run_algorithm': train_run.properties['run_algorithm'],
|
||||
'valid_score': train_run.properties['score'],
|
||||
'primary_metric': train_run.properties['primary_metric']
|
||||
})
|
||||
est,
|
||||
tags={
|
||||
"training_run_id": train_run.id,
|
||||
"run_algorithm": train_run.properties["run_algorithm"],
|
||||
"valid_score": train_run.properties["score"],
|
||||
"primary_metric": train_run.properties["primary_metric"],
|
||||
},
|
||||
)
|
||||
|
||||
run.log("run_algorithm", run.tags['run_algorithm'])
|
||||
run.log("run_algorithm", run.tags["run_algorithm"])
|
||||
return run
|
||||
|
||||
|
||||
def get_result_df(remote_run):
|
||||
|
||||
children = list(remote_run.get_children(recursive=True))
|
||||
summary_df = pd.DataFrame(index=['run_id', 'run_algorithm',
|
||||
'primary_metric', 'Score'])
|
||||
summary_df = pd.DataFrame(
|
||||
index=["run_id", "run_algorithm", "primary_metric", "Score"]
|
||||
)
|
||||
goal_minimize = False
|
||||
for run in children:
|
||||
if('run_algorithm' in run.properties and 'score' in run.properties):
|
||||
summary_df[run.id] = [run.id, run.properties['run_algorithm'],
|
||||
run.properties['primary_metric'],
|
||||
float(run.properties['score'])]
|
||||
if('goal' in run.properties):
|
||||
goal_minimize = run.properties['goal'].split('_')[-1] == 'min'
|
||||
if "run_algorithm" in run.properties and "score" in run.properties:
|
||||
summary_df[run.id] = [
|
||||
run.id,
|
||||
run.properties["run_algorithm"],
|
||||
run.properties["primary_metric"],
|
||||
float(run.properties["score"]),
|
||||
]
|
||||
if "goal" in run.properties:
|
||||
goal_minimize = run.properties["goal"].split("_")[-1] == "min"
|
||||
|
||||
summary_df = summary_df.T.sort_values(
|
||||
'Score',
|
||||
ascending=goal_minimize).drop_duplicates(['run_algorithm'])
|
||||
summary_df = summary_df.set_index('run_algorithm')
|
||||
"Score", ascending=goal_minimize
|
||||
).drop_duplicates(["run_algorithm"])
|
||||
summary_df = summary_df.set_index("run_algorithm")
|
||||
|
||||
return summary_df
|
||||
|
||||
@@ -12,19 +12,22 @@ from azureml.core.model import Model
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'--target_column_name', type=str, dest='target_column_name',
|
||||
help='Target Column Name')
|
||||
"--target_column_name",
|
||||
type=str,
|
||||
dest="target_column_name",
|
||||
help="Target Column Name",
|
||||
)
|
||||
parser.add_argument(
|
||||
'--model_name', type=str, dest='model_name',
|
||||
help='Name of registered model')
|
||||
"--model_name", type=str, dest="model_name", help="Name of registered model"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
target_column_name = args.target_column_name
|
||||
model_name = args.model_name
|
||||
|
||||
print('args passed are: ')
|
||||
print('Target column name: ', target_column_name)
|
||||
print('Name of registered model: ', model_name)
|
||||
print("args passed are: ")
|
||||
print("Target column name: ", target_column_name)
|
||||
print("Name of registered model: ", model_name)
|
||||
|
||||
model_path = Model.get_model_path(model_name)
|
||||
# deserialize the model file back into a sklearn model
|
||||
@@ -32,13 +35,16 @@ model = joblib.load(model_path)
|
||||
|
||||
run = Run.get_context()
|
||||
# get input dataset by name
|
||||
test_dataset = run.input_datasets['test_data']
|
||||
test_dataset = run.input_datasets["test_data"]
|
||||
|
||||
X_test_df = test_dataset.drop_columns(columns=[target_column_name]) \
|
||||
.to_pandas_dataframe()
|
||||
y_test_df = test_dataset.with_timestamp_columns(None) \
|
||||
.keep_columns(columns=[target_column_name]) \
|
||||
X_test_df = test_dataset.drop_columns(
|
||||
columns=[target_column_name]
|
||||
).to_pandas_dataframe()
|
||||
y_test_df = (
|
||||
test_dataset.with_timestamp_columns(None)
|
||||
.keep_columns(columns=[target_column_name])
|
||||
.to_pandas_dataframe()
|
||||
)
|
||||
|
||||
predicted = model.predict_proba(X_test_df)
|
||||
|
||||
@@ -47,11 +53,13 @@ if isinstance(predicted, pd.DataFrame):
|
||||
|
||||
# Use the AutoML scoring module
|
||||
train_labels = model.classes_
|
||||
class_labels = np.unique(np.concatenate((y_test_df.values, np.reshape(train_labels, (-1, 1)))))
|
||||
class_labels = np.unique(
|
||||
np.concatenate((y_test_df.values, np.reshape(train_labels, (-1, 1))))
|
||||
)
|
||||
classification_metrics = list(constants.CLASSIFICATION_SCALAR_SET)
|
||||
scores = scoring.score_classification(y_test_df.values, predicted,
|
||||
classification_metrics,
|
||||
class_labels, train_labels)
|
||||
scores = scoring.score_classification(
|
||||
y_test_df.values, predicted, classification_metrics, class_labels, train_labels
|
||||
)
|
||||
|
||||
print("scores:")
|
||||
print(scores)
|
||||
|
||||
@@ -1,20 +1,5 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved. \n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -75,16 +60,6 @@
|
||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -118,17 +93,18 @@
|
||||
"dstor = ws.get_default_datastore()\n",
|
||||
"\n",
|
||||
"# Choose a name for the run history container in the workspace.\n",
|
||||
"experiment_name = 'retrain-noaaweather'\n",
|
||||
"experiment_name = \"retrain-noaaweather\"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace'] = ws.name\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Run History Name'] = experiment_name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||
"output[\"Workspace\"] = ws.name\n",
|
||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||
"output[\"Location\"] = ws.location\n",
|
||||
"output[\"Run History Name\"] = experiment_name\n",
|
||||
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
@@ -164,12 +140,12 @@
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
||||
" print('Found existing cluster, use it.')\n",
|
||||
" print(\"Found existing cluster, use it.\")\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
|
||||
" max_nodes=4)\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||
" vm_size=\"STANDARD_DS12_V2\", max_nodes=4\n",
|
||||
" )\n",
|
||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
"compute_target.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
@@ -196,12 +172,19 @@
|
||||
"\n",
|
||||
"conda_run_config.environment.docker.enabled = True\n",
|
||||
"\n",
|
||||
"cd = CondaDependencies.create(pip_packages=['azureml-sdk[automl]', 'applicationinsights', 'azureml-opendatasets', 'azureml-defaults'], \n",
|
||||
" conda_packages=['numpy==1.16.2'], \n",
|
||||
" pin_sdk_version=False)\n",
|
||||
"cd = CondaDependencies.create(\n",
|
||||
" pip_packages=[\n",
|
||||
" \"azureml-sdk[automl]\",\n",
|
||||
" \"applicationinsights\",\n",
|
||||
" \"azureml-opendatasets\",\n",
|
||||
" \"azureml-defaults\",\n",
|
||||
" ],\n",
|
||||
" conda_packages=[\"numpy==1.19.5\"],\n",
|
||||
" pin_sdk_version=False,\n",
|
||||
")\n",
|
||||
"conda_run_config.environment.python.conda_dependencies = cd\n",
|
||||
"\n",
|
||||
"print('run config is ready')"
|
||||
"print(\"run config is ready\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -242,12 +225,14 @@
|
||||
"from azureml.pipeline.steps import PythonScriptStep\n",
|
||||
"\n",
|
||||
"ds_name = PipelineParameter(name=\"ds_name\", default_value=dataset)\n",
|
||||
"upload_data_step = PythonScriptStep(script_name=\"upload_weather_data.py\", \n",
|
||||
"upload_data_step = PythonScriptStep(\n",
|
||||
" script_name=\"upload_weather_data.py\",\n",
|
||||
" allow_reuse=False,\n",
|
||||
" name=\"upload_weather_data\",\n",
|
||||
" arguments=[\"--ds_name\", ds_name],\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" runconfig=conda_run_config)"
|
||||
" runconfig=conda_run_config,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -264,10 +249,11 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data_pipeline = Pipeline(\n",
|
||||
" description=\"pipeline_with_uploaddata\",\n",
|
||||
" workspace=ws, \n",
|
||||
" steps=[upload_data_step])\n",
|
||||
"data_pipeline_run = experiment.submit(data_pipeline, pipeline_parameters={\"ds_name\":dataset})"
|
||||
" description=\"pipeline_with_uploaddata\", workspace=ws, steps=[upload_data_step]\n",
|
||||
")\n",
|
||||
"data_pipeline_run = experiment.submit(\n",
|
||||
" data_pipeline, pipeline_parameters={\"ds_name\": dataset}\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -307,13 +293,14 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data_prep_step = PythonScriptStep(script_name=\"check_data.py\", \n",
|
||||
"data_prep_step = PythonScriptStep(\n",
|
||||
" script_name=\"check_data.py\",\n",
|
||||
" allow_reuse=False,\n",
|
||||
" name=\"check_data\",\n",
|
||||
" arguments=[\"--ds_name\", ds_name,\n",
|
||||
" \"--model_name\", model_name],\n",
|
||||
" arguments=[\"--ds_name\", ds_name, \"--model_name\", model_name],\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" runconfig=conda_run_config)"
|
||||
" runconfig=conda_run_config,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -323,6 +310,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Dataset\n",
|
||||
"\n",
|
||||
"train_ds = Dataset.get_by_name(ws, dataset)\n",
|
||||
"train_ds = train_ds.drop_columns([\"partition_date\"])"
|
||||
]
|
||||
@@ -348,20 +336,21 @@
|
||||
" \"iteration_timeout_minutes\": 10,\n",
|
||||
" \"experiment_timeout_hours\": 0.25,\n",
|
||||
" \"n_cross_validations\": 3,\n",
|
||||
" \"primary_metric\": 'normalized_root_mean_squared_error',\n",
|
||||
" \"primary_metric\": \"r2_score\",\n",
|
||||
" \"max_concurrent_iterations\": 3,\n",
|
||||
" \"max_cores_per_iteration\": -1,\n",
|
||||
" \"verbosity\": logging.INFO,\n",
|
||||
" \"enable_early_stopping\": True\n",
|
||||
" \"enable_early_stopping\": True,\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task = 'regression',\n",
|
||||
" debug_log = 'automl_errors.log',\n",
|
||||
"automl_config = AutoMLConfig(\n",
|
||||
" task=\"regression\",\n",
|
||||
" debug_log=\"automl_errors.log\",\n",
|
||||
" path=\".\",\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" training_data=train_ds,\n",
|
||||
" label_column_name=target_column_name,\n",
|
||||
" **automl_settings\n",
|
||||
" **automl_settings,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -373,17 +362,21 @@
|
||||
"source": [
|
||||
"from azureml.pipeline.core import PipelineData, TrainingOutput\n",
|
||||
"\n",
|
||||
"metrics_output_name = 'metrics_output'\n",
|
||||
"best_model_output_name = 'best_model_output'\n",
|
||||
"metrics_output_name = \"metrics_output\"\n",
|
||||
"best_model_output_name = \"best_model_output\"\n",
|
||||
"\n",
|
||||
"metrics_data = PipelineData(name='metrics_data',\n",
|
||||
"metrics_data = PipelineData(\n",
|
||||
" name=\"metrics_data\",\n",
|
||||
" datastore=dstor,\n",
|
||||
" pipeline_output_name=metrics_output_name,\n",
|
||||
" training_output=TrainingOutput(type='Metrics'))\n",
|
||||
"model_data = PipelineData(name='model_data',\n",
|
||||
" training_output=TrainingOutput(type=\"Metrics\"),\n",
|
||||
")\n",
|
||||
"model_data = PipelineData(\n",
|
||||
" name=\"model_data\",\n",
|
||||
" datastore=dstor,\n",
|
||||
" pipeline_output_name=best_model_output_name,\n",
|
||||
" training_output=TrainingOutput(type='Model'))"
|
||||
" training_output=TrainingOutput(type=\"Model\"),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -393,10 +386,11 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"automl_step = AutoMLStep(\n",
|
||||
" name='automl_module',\n",
|
||||
" name=\"automl_module\",\n",
|
||||
" automl_config=automl_config,\n",
|
||||
" outputs=[metrics_data, model_data],\n",
|
||||
" allow_reuse=False)"
|
||||
" allow_reuse=False,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -413,13 +407,22 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"register_model_step = PythonScriptStep(script_name=\"register_model.py\",\n",
|
||||
"register_model_step = PythonScriptStep(\n",
|
||||
" script_name=\"register_model.py\",\n",
|
||||
" name=\"register_model\",\n",
|
||||
" allow_reuse=False,\n",
|
||||
" arguments=[\"--model_name\", model_name, \"--model_path\", model_data, \"--ds_name\", ds_name],\n",
|
||||
" arguments=[\n",
|
||||
" \"--model_name\",\n",
|
||||
" model_name,\n",
|
||||
" \"--model_path\",\n",
|
||||
" model_data,\n",
|
||||
" \"--ds_name\",\n",
|
||||
" ds_name,\n",
|
||||
" ],\n",
|
||||
" inputs=[model_data],\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" runconfig=conda_run_config)"
|
||||
" runconfig=conda_run_config,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -438,7 +441,8 @@
|
||||
"training_pipeline = Pipeline(\n",
|
||||
" description=\"training_pipeline\",\n",
|
||||
" workspace=ws,\n",
|
||||
" steps=[data_prep_step, automl_step, register_model_step])"
|
||||
" steps=[data_prep_step, automl_step, register_model_step],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -447,8 +451,10 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"training_pipeline_run = experiment.submit(training_pipeline, pipeline_parameters={\n",
|
||||
" \"ds_name\": dataset, \"model_name\": \"noaaweatherds\"})"
|
||||
"training_pipeline_run = experiment.submit(\n",
|
||||
" training_pipeline,\n",
|
||||
" pipeline_parameters={\"ds_name\": dataset, \"model_name\": \"noaaweatherds\"},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -477,8 +483,8 @@
|
||||
"pipeline_name = \"Retraining-Pipeline-NOAAWeather\"\n",
|
||||
"\n",
|
||||
"published_pipeline = training_pipeline.publish(\n",
|
||||
" name=pipeline_name, \n",
|
||||
" description=\"Pipeline that retrains AutoML model\")\n",
|
||||
" name=pipeline_name, description=\"Pipeline that retrains AutoML model\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"published_pipeline"
|
||||
]
|
||||
@@ -490,13 +496,17 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.pipeline.core import Schedule\n",
|
||||
"schedule = Schedule.create(workspace=ws, name=\"RetrainingSchedule\",\n",
|
||||
"\n",
|
||||
"schedule = Schedule.create(\n",
|
||||
" workspace=ws,\n",
|
||||
" name=\"RetrainingSchedule\",\n",
|
||||
" pipeline_parameters={\"ds_name\": dataset, \"model_name\": \"noaaweatherds\"},\n",
|
||||
" pipeline_id=published_pipeline.id,\n",
|
||||
" experiment_name=experiment_name,\n",
|
||||
" datastore=dstor,\n",
|
||||
" wait_for_provisioning=True,\n",
|
||||
" polling_interval=1440)"
|
||||
" polling_interval=1440,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -520,8 +530,8 @@
|
||||
"pipeline_name = \"DataIngestion-Pipeline-NOAAWeather\"\n",
|
||||
"\n",
|
||||
"published_pipeline = training_pipeline.publish(\n",
|
||||
" name=pipeline_name, \n",
|
||||
" description=\"Pipeline that updates NOAAWeather Dataset\")\n",
|
||||
" name=pipeline_name, description=\"Pipeline that updates NOAAWeather Dataset\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"published_pipeline"
|
||||
]
|
||||
@@ -533,13 +543,17 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.pipeline.core import Schedule\n",
|
||||
"schedule = Schedule.create(workspace=ws, name=\"RetrainingSchedule-DataIngestion\",\n",
|
||||
"\n",
|
||||
"schedule = Schedule.create(\n",
|
||||
" workspace=ws,\n",
|
||||
" name=\"RetrainingSchedule-DataIngestion\",\n",
|
||||
" pipeline_parameters={\"ds_name\": dataset},\n",
|
||||
" pipeline_id=published_pipeline.id,\n",
|
||||
" experiment_name=experiment_name,\n",
|
||||
" datastore=dstor,\n",
|
||||
" wait_for_provisioning=True,\n",
|
||||
" polling_interval=1440)"
|
||||
" polling_interval=1440,\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -25,9 +25,11 @@ datasets = [(Dataset.Scenario.TRAINING, train_ds)]
|
||||
|
||||
# Register model with training dataset
|
||||
|
||||
model = Model.register(workspace=ws,
|
||||
model = Model.register(
|
||||
workspace=ws,
|
||||
model_path=args.model_path,
|
||||
model_name=args.model_name,
|
||||
datasets=datasets)
|
||||
datasets=datasets,
|
||||
)
|
||||
|
||||
print("Registered version {0} of model {1}".format(model.version, model.name))
|
||||
|
||||
@@ -16,26 +16,82 @@ if type(run) == _OfflineRun:
|
||||
else:
|
||||
ws = run.experiment.workspace
|
||||
|
||||
usaf_list = ['725724', '722149', '723090', '722159', '723910', '720279',
|
||||
'725513', '725254', '726430', '720381', '723074', '726682',
|
||||
'725486', '727883', '723177', '722075', '723086', '724053',
|
||||
'725070', '722073', '726060', '725224', '725260', '724520',
|
||||
'720305', '724020', '726510', '725126', '722523', '703333',
|
||||
'722249', '722728', '725483', '722972', '724975', '742079',
|
||||
'727468', '722193', '725624', '722030', '726380', '720309',
|
||||
'722071', '720326', '725415', '724504', '725665', '725424',
|
||||
'725066']
|
||||
usaf_list = [
|
||||
"725724",
|
||||
"722149",
|
||||
"723090",
|
||||
"722159",
|
||||
"723910",
|
||||
"720279",
|
||||
"725513",
|
||||
"725254",
|
||||
"726430",
|
||||
"720381",
|
||||
"723074",
|
||||
"726682",
|
||||
"725486",
|
||||
"727883",
|
||||
"723177",
|
||||
"722075",
|
||||
"723086",
|
||||
"724053",
|
||||
"725070",
|
||||
"722073",
|
||||
"726060",
|
||||
"725224",
|
||||
"725260",
|
||||
"724520",
|
||||
"720305",
|
||||
"724020",
|
||||
"726510",
|
||||
"725126",
|
||||
"722523",
|
||||
"703333",
|
||||
"722249",
|
||||
"722728",
|
||||
"725483",
|
||||
"722972",
|
||||
"724975",
|
||||
"742079",
|
||||
"727468",
|
||||
"722193",
|
||||
"725624",
|
||||
"722030",
|
||||
"726380",
|
||||
"720309",
|
||||
"722071",
|
||||
"720326",
|
||||
"725415",
|
||||
"724504",
|
||||
"725665",
|
||||
"725424",
|
||||
"725066",
|
||||
]
|
||||
|
||||
|
||||
def get_noaa_data(start_time, end_time):
|
||||
columns = ['usaf', 'wban', 'datetime', 'latitude', 'longitude', 'elevation',
|
||||
'windAngle', 'windSpeed', 'temperature', 'stationName', 'p_k']
|
||||
columns = [
|
||||
"usaf",
|
||||
"wban",
|
||||
"datetime",
|
||||
"latitude",
|
||||
"longitude",
|
||||
"elevation",
|
||||
"windAngle",
|
||||
"windSpeed",
|
||||
"temperature",
|
||||
"stationName",
|
||||
"p_k",
|
||||
]
|
||||
isd = NoaaIsdWeather(start_time, end_time, cols=columns)
|
||||
noaa_df = isd.to_pandas_dataframe()
|
||||
df_filtered = noaa_df[noaa_df["usaf"].isin(usaf_list)]
|
||||
df_filtered.reset_index(drop=True)
|
||||
print("Received {0} rows of training data between {1} and {2}".format(
|
||||
df_filtered.shape[0], start_time, end_time))
|
||||
print(
|
||||
"Received {0} rows of training data between {1} and {2}".format(
|
||||
df_filtered.shape[0], start_time, end_time
|
||||
)
|
||||
)
|
||||
return df_filtered
|
||||
|
||||
|
||||
@@ -54,11 +110,12 @@ end_time = datetime.utcnow()
|
||||
try:
|
||||
ds = Dataset.get_by_name(ws, args.ds_name)
|
||||
end_time_last_slice = ds.data_changed_time.replace(tzinfo=None)
|
||||
print("Dataset {0} last updated on {1}".format(args.ds_name,
|
||||
end_time_last_slice))
|
||||
print("Dataset {0} last updated on {1}".format(args.ds_name, end_time_last_slice))
|
||||
except Exception:
|
||||
print(traceback.format_exc())
|
||||
print("Dataset with name {0} not found, registering new dataset.".format(args.ds_name))
|
||||
print(
|
||||
"Dataset with name {0} not found, registering new dataset.".format(args.ds_name)
|
||||
)
|
||||
register_dataset = True
|
||||
end_time = datetime(2021, 5, 1, 0, 0)
|
||||
end_time_last_slice = end_time - relativedelta(weeks=2)
|
||||
@@ -66,26 +123,35 @@ except Exception:
|
||||
train_df = get_noaa_data(end_time_last_slice, end_time)
|
||||
|
||||
if train_df.size > 0:
|
||||
print("Received {0} rows of new data after {1}.".format(
|
||||
train_df.shape[0], end_time_last_slice))
|
||||
folder_name = "{}/{:04d}/{:02d}/{:02d}/{:02d}/{:02d}/{:02d}".format(args.ds_name, end_time.year,
|
||||
end_time.month, end_time.day,
|
||||
end_time.hour, end_time.minute,
|
||||
end_time.second)
|
||||
print(
|
||||
"Received {0} rows of new data after {1}.".format(
|
||||
train_df.shape[0], end_time_last_slice
|
||||
)
|
||||
)
|
||||
folder_name = "{}/{:04d}/{:02d}/{:02d}/{:02d}/{:02d}/{:02d}".format(
|
||||
args.ds_name,
|
||||
end_time.year,
|
||||
end_time.month,
|
||||
end_time.day,
|
||||
end_time.hour,
|
||||
end_time.minute,
|
||||
end_time.second,
|
||||
)
|
||||
file_path = "{0}/data.csv".format(folder_name)
|
||||
|
||||
# Add a new partition to the registered dataset
|
||||
os.makedirs(folder_name, exist_ok=True)
|
||||
train_df.to_csv(file_path, index=False)
|
||||
|
||||
dstor.upload_files(files=[file_path],
|
||||
target_path=folder_name,
|
||||
overwrite=True,
|
||||
show_progress=True)
|
||||
dstor.upload_files(
|
||||
files=[file_path], target_path=folder_name, overwrite=True, show_progress=True
|
||||
)
|
||||
else:
|
||||
print("No new data since {0}.".format(end_time_last_slice))
|
||||
|
||||
if register_dataset:
|
||||
ds = Dataset.Tabular.from_delimited_files(dstor.path("{}/**/*.csv".format(
|
||||
args.ds_name)), partition_format='/{partition_date:yyyy/MM/dd/HH/mm/ss}/data.csv')
|
||||
ds = Dataset.Tabular.from_delimited_files(
|
||||
dstor.path("{}/**/*.csv".format(args.ds_name)),
|
||||
partition_format="/{partition_date:yyyy/MM/dd/HH/mm/ss}/data.csv",
|
||||
)
|
||||
ds.register(ws, name=args.ds_name)
|
||||
|
||||
@@ -1,17 +1,21 @@
|
||||
name: azure_automl_experimental
|
||||
dependencies:
|
||||
# The python interpreter version.
|
||||
# Currently Azure ML only supports 3.5.2 and later.
|
||||
- pip<=19.3.1
|
||||
- python>=3.5.2,<3.8
|
||||
- nb_conda
|
||||
- cython
|
||||
- urllib3<1.24
|
||||
# Currently Azure ML only supports 3.6.0 and later.
|
||||
- pip<=20.2.4
|
||||
- python>=3.6.0,<3.9
|
||||
- cython==0.29.14
|
||||
- urllib3==1.26.7
|
||||
- PyJWT < 2.0.0
|
||||
- numpy==1.18.5
|
||||
- pywin32==227
|
||||
- cryptography<37.0.0
|
||||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azure-mgmt-core==1.3.0
|
||||
- azure-core==1.21.1
|
||||
- azure-identity==1.7.0
|
||||
- azureml-defaults
|
||||
- azureml-sdk
|
||||
- azureml-widgets
|
||||
|
||||
@@ -1,18 +1,23 @@
|
||||
name: azure_automl_experimental
|
||||
channels:
|
||||
- conda-forge
|
||||
- main
|
||||
dependencies:
|
||||
# The python interpreter version.
|
||||
# Currently Azure ML only supports 3.5.2 and later.
|
||||
- pip<=19.3.1
|
||||
# Currently Azure ML only supports 3.6.0 and later.
|
||||
- pip<=20.2.4
|
||||
- nomkl
|
||||
- python>=3.5.2,<3.8
|
||||
- nb_conda
|
||||
- cython
|
||||
- urllib3<1.24
|
||||
- python>=3.6.0,<3.9
|
||||
- urllib3==1.26.7
|
||||
- PyJWT < 2.0.0
|
||||
- numpy==1.18.5
|
||||
- numpy==1.19.5
|
||||
- cryptography<37.0.0
|
||||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azure-mgmt-core==1.3.0
|
||||
- azure-core==1.21.1
|
||||
- azure-identity==1.7.0
|
||||
- azureml-defaults
|
||||
- azureml-sdk
|
||||
- azureml-widgets
|
||||
|
||||
@@ -92,7 +92,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.42.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -115,7 +115,7 @@
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Experiment Name'] = experiment.name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"pd.set_option('display.max_colwidth', None)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
|
||||
@@ -91,7 +91,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.42.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -180,6 +180,29 @@
|
||||
"label = \"ERP\"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The split data will be used in the remote compute by ModelProxy and locally to compare results.\n",
|
||||
"So, we need to persist the split data to avoid descrepencies from different package versions in the local and remote."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ds = ws.get_default_datastore()\n",
|
||||
"\n",
|
||||
"train_data = Dataset.Tabular.register_pandas_dataframe(\n",
|
||||
" train_data.to_pandas_dataframe(), target=(ds, \"machineTrainData\"), name=\"train_data\")\n",
|
||||
"\n",
|
||||
"test_data = Dataset.Tabular.register_pandas_dataframe(\n",
|
||||
" test_data.to_pandas_dataframe(), target=(ds, \"machineTestData\"), name=\"test_data\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -304,7 +327,8 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Show hyperparameters\n",
|
||||
"Show the model pipeline used for the best run with its hyperparameters."
|
||||
"Show the model pipeline used for the best run with its hyperparameters.\n",
|
||||
"For ensemble pipelines it shows the iterations and algorithms that are ensembled."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -313,8 +337,19 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"run_properties = json.loads(best_run.get_details()['properties']['pipeline_script'])\n",
|
||||
"print(json.dumps(run_properties, indent = 1)) "
|
||||
"run_properties = best_run.get_details()['properties']\n",
|
||||
"pipeline_script = json.loads(run_properties['pipeline_script'])\n",
|
||||
"print(json.dumps(pipeline_script, indent = 1)) \n",
|
||||
"\n",
|
||||
"if 'ensembled_iterations' in run_properties:\n",
|
||||
" print(\"\")\n",
|
||||
" print(\"Ensembled Iterations\")\n",
|
||||
" print(run_properties['ensembled_iterations'])\n",
|
||||
" \n",
|
||||
"if 'ensembled_algorithms' in run_properties:\n",
|
||||
" print(\"\")\n",
|
||||
" print(\"Ensembled Algorithms\")\n",
|
||||
" print(run_properties['ensembled_algorithms'])"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 22 KiB |
@@ -0,0 +1,171 @@
|
||||
from typing import Any, Dict, Optional, List
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
from matplotlib import pyplot as plt
|
||||
from matplotlib.backends.backend_pdf import PdfPages
|
||||
|
||||
from azureml.automl.core.shared import constants
|
||||
from azureml.automl.core.shared.types import GrainType
|
||||
from azureml.automl.runtime.shared.score import scoring
|
||||
|
||||
GRAIN = "time_series_id"
|
||||
BACKTEST_ITER = "backtest_iteration"
|
||||
ACTUALS = "actual_level"
|
||||
PREDICTIONS = "predicted_level"
|
||||
ALL_GRAINS = "all_sets"
|
||||
|
||||
FORECASTS_FILE = "forecast.csv"
|
||||
SCORES_FILE = "scores.csv"
|
||||
PLOTS_FILE = "plots_fcst_vs_actual.pdf"
|
||||
RE_INVALID_SYMBOLS = re.compile("[: ]")
|
||||
|
||||
|
||||
def _compute_metrics(df: pd.DataFrame, metrics: List[str]):
|
||||
"""
|
||||
Compute metrics for one data frame.
|
||||
|
||||
:param df: The data frame which contains actual_level and predicted_level columns.
|
||||
:return: The data frame with two columns - metric_name and metric.
|
||||
"""
|
||||
scores = scoring.score_regression(
|
||||
y_test=df[ACTUALS], y_pred=df[PREDICTIONS], metrics=metrics
|
||||
)
|
||||
metrics_df = pd.DataFrame(list(scores.items()), columns=["metric_name", "metric"])
|
||||
metrics_df.sort_values(["metric_name"], inplace=True)
|
||||
metrics_df.reset_index(drop=True, inplace=True)
|
||||
return metrics_df
|
||||
|
||||
|
||||
def _format_grain_name(grain: GrainType) -> str:
|
||||
"""
|
||||
Convert grain name to string.
|
||||
|
||||
:param grain: the grain name.
|
||||
:return: the string representation of the given grain.
|
||||
"""
|
||||
if not isinstance(grain, tuple) and not isinstance(grain, list):
|
||||
return str(grain)
|
||||
grain = list(map(str, grain))
|
||||
return "|".join(grain)
|
||||
|
||||
|
||||
def compute_all_metrics(
|
||||
fcst_df: pd.DataFrame,
|
||||
ts_id_colnames: List[str],
|
||||
metric_names: Optional[List[set]] = None,
|
||||
):
|
||||
"""
|
||||
Calculate metrics per grain.
|
||||
|
||||
:param fcst_df: forecast data frame. Must contain 2 columns: 'actual_level' and 'predicted_level'
|
||||
:param metric_names: (optional) the list of metric names to return
|
||||
:param ts_id_colnames: (optional) list of grain column names
|
||||
:return: dictionary of summary table for all tests and final decision on stationary vs nonstaionary
|
||||
"""
|
||||
if not metric_names:
|
||||
metric_names = list(constants.Metric.SCALAR_REGRESSION_SET)
|
||||
|
||||
if ts_id_colnames is None:
|
||||
ts_id_colnames = []
|
||||
|
||||
metrics_list = []
|
||||
if ts_id_colnames:
|
||||
for grain, df in fcst_df.groupby(ts_id_colnames):
|
||||
one_grain_metrics_df = _compute_metrics(df, metric_names)
|
||||
one_grain_metrics_df[GRAIN] = _format_grain_name(grain)
|
||||
metrics_list.append(one_grain_metrics_df)
|
||||
|
||||
# overall metrics
|
||||
one_grain_metrics_df = _compute_metrics(fcst_df, metric_names)
|
||||
one_grain_metrics_df[GRAIN] = ALL_GRAINS
|
||||
metrics_list.append(one_grain_metrics_df)
|
||||
|
||||
# collect into a data frame
|
||||
return pd.concat(metrics_list)
|
||||
|
||||
|
||||
def _draw_one_plot(
|
||||
df: pd.DataFrame,
|
||||
time_column_name: str,
|
||||
grain_column_names: List[str],
|
||||
pdf: PdfPages,
|
||||
) -> None:
|
||||
"""
|
||||
Draw the single plot.
|
||||
|
||||
:param df: The data frame with the data to build plot.
|
||||
:param time_column_name: The name of a time column.
|
||||
:param grain_column_names: The name of grain columns.
|
||||
:param pdf: The pdf backend used to render the plot.
|
||||
"""
|
||||
fig, _ = plt.subplots(figsize=(20, 10))
|
||||
df = df.set_index(time_column_name)
|
||||
plt.plot(df[[ACTUALS, PREDICTIONS]])
|
||||
plt.xticks(rotation=45)
|
||||
iteration = df[BACKTEST_ITER].iloc[0]
|
||||
if grain_column_names:
|
||||
grain_name = [df[grain].iloc[0] for grain in grain_column_names]
|
||||
plt.title(f"Time series ID: {_format_grain_name(grain_name)} {iteration}")
|
||||
plt.legend(["actual", "forecast"])
|
||||
plt.close(fig)
|
||||
pdf.savefig(fig)
|
||||
|
||||
|
||||
def calculate_scores_and_build_plots(
|
||||
input_dir: str, output_dir: str, automl_settings: Dict[str, Any]
|
||||
):
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
grains = automl_settings.get(constants.TimeSeries.TIME_SERIES_ID_COLUMN_NAMES)
|
||||
time_column_name = automl_settings.get(constants.TimeSeries.TIME_COLUMN_NAME)
|
||||
if grains is None:
|
||||
grains = []
|
||||
if isinstance(grains, str):
|
||||
grains = [grains]
|
||||
while BACKTEST_ITER in grains:
|
||||
grains.remove(BACKTEST_ITER)
|
||||
|
||||
dfs = []
|
||||
for fle in os.listdir(input_dir):
|
||||
file_path = os.path.join(input_dir, fle)
|
||||
if os.path.isfile(file_path) and file_path.endswith(".csv"):
|
||||
df_iter = pd.read_csv(file_path, parse_dates=[time_column_name])
|
||||
for _, iteration in df_iter.groupby(BACKTEST_ITER):
|
||||
dfs.append(iteration)
|
||||
forecast_df = pd.concat(dfs, sort=False, ignore_index=True)
|
||||
# To make sure plots are in order, sort the predictions by grain and iteration.
|
||||
ts_index = grains + [BACKTEST_ITER]
|
||||
forecast_df.sort_values(by=ts_index, inplace=True)
|
||||
pdf = PdfPages(os.path.join(output_dir, PLOTS_FILE))
|
||||
for _, one_forecast in forecast_df.groupby(ts_index):
|
||||
_draw_one_plot(one_forecast, time_column_name, grains, pdf)
|
||||
pdf.close()
|
||||
forecast_df.to_csv(os.path.join(output_dir, FORECASTS_FILE), index=False)
|
||||
# Remove np.NaN and np.inf from the prediction and actuals data.
|
||||
forecast_df.replace([np.inf, -np.inf], np.nan, inplace=True)
|
||||
forecast_df.dropna(subset=[ACTUALS, PREDICTIONS], inplace=True)
|
||||
metrics = compute_all_metrics(forecast_df, grains + [BACKTEST_ITER])
|
||||
metrics.to_csv(os.path.join(output_dir, SCORES_FILE), index=False)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = {"forecasts": "--forecasts", "scores_out": "--output-dir"}
|
||||
parser = argparse.ArgumentParser("Parsing input arguments.")
|
||||
for argname, arg in args.items():
|
||||
parser.add_argument(arg, dest=argname, required=True)
|
||||
parsed_args, _ = parser.parse_known_args()
|
||||
input_dir = parsed_args.forecasts
|
||||
output_dir = parsed_args.scores_out
|
||||
with open(
|
||||
os.path.join(
|
||||
os.path.dirname(os.path.realpath(__file__)), "automl_settings.json"
|
||||
)
|
||||
) as json_file:
|
||||
automl_settings = json.load(json_file)
|
||||
calculate_scores_and_build_plots(input_dir, output_dir, automl_settings)
|
||||
@@ -0,0 +1,726 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Many Models with Backtesting - Automated ML\n",
|
||||
"**_Backtest many models time series forecasts with Automated Machine Learning_**\n",
|
||||
"\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For this notebook we are using a synthetic dataset to demonstrate the back testing in many model scenario. This allows us to check historical performance of AutoML on a historical data. To do that we step back on the backtesting period by the data set several times and split the data to train and test sets. Then these data sets are used for training and evaluation of model.<br>\n",
|
||||
"\n",
|
||||
"Thus, it is a quick way of evaluating AutoML as if it was in production. Here, we do not test historical performance of a particular model, for this see the [notebook](../forecasting-backtest-single-model/auto-ml-forecasting-backtest-single-model.ipynb). Instead, the best model for every backtest iteration can be different since AutoML chooses the best model for a given training set.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"**NOTE: There are limits on how many runs we can do in parallel per workspace, and we currently recommend to set the parallelism to maximum of 320 runs per experiment per workspace. If users want to have more parallelism and increase this limit they might encounter Too Many Requests errors (HTTP 429).**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Prerequisites\n",
|
||||
"You'll need to create a compute Instance by following the instructions in the [EnvironmentSetup.md](../Setup_Resources/EnvironmentSetup.md)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 1.0 Set up workspace, datastore, experiment"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613003526897
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"import azureml.core\n",
|
||||
"from azureml.core import Workspace, Datastore\n",
|
||||
"import numpy as np\n",
|
||||
"import pandas as pd\n",
|
||||
"\n",
|
||||
"from pandas.tseries.frequencies import to_offset\n",
|
||||
"\n",
|
||||
"# Set up your workspace\n",
|
||||
"ws = Workspace.from_config()\n",
|
||||
"ws.get_details()\n",
|
||||
"\n",
|
||||
"# Set up your datastores\n",
|
||||
"dstore = ws.get_default_datastore()\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output[\"SDK version\"] = azureml.core.VERSION\n",
|
||||
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||
"output[\"Workspace\"] = ws.name\n",
|
||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||
"output[\"Location\"] = ws.location\n",
|
||||
"output[\"Default datastore name\"] = dstore.name\n",
|
||||
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This notebook is compatible with Azure ML SDK version 1.35.1 or later."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Choose an experiment"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613003540729
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Experiment\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, \"automl-many-models-backtest\")\n",
|
||||
"\n",
|
||||
"print(\"Experiment name: \" + experiment.name)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 2.0 Data\n",
|
||||
"\n",
|
||||
"#### 2.1 Data generation\n",
|
||||
"For this notebook we will generate the artificial data set with two [time series IDs](https://docs.microsoft.com/en-us/python/api/azureml-automl-core/azureml.automl.core.forecasting_parameters.forecastingparameters?view=azure-ml-py). Then we will generate backtest folds and will upload it to the default BLOB storage and create a [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# simulate data: 2 grains - 700\n",
|
||||
"TIME_COLNAME = \"date\"\n",
|
||||
"TARGET_COLNAME = \"value\"\n",
|
||||
"TIME_SERIES_ID_COLNAME = \"ts_id\"\n",
|
||||
"\n",
|
||||
"sample_size = 700\n",
|
||||
"# Set the random seed for reproducibility of results.\n",
|
||||
"np.random.seed(20)\n",
|
||||
"X1 = pd.DataFrame(\n",
|
||||
" {\n",
|
||||
" TIME_COLNAME: pd.date_range(start=\"2018-01-01\", periods=sample_size),\n",
|
||||
" TARGET_COLNAME: np.random.normal(loc=100, scale=20, size=sample_size),\n",
|
||||
" TIME_SERIES_ID_COLNAME: \"ts_A\",\n",
|
||||
" }\n",
|
||||
")\n",
|
||||
"X2 = pd.DataFrame(\n",
|
||||
" {\n",
|
||||
" TIME_COLNAME: pd.date_range(start=\"2018-01-01\", periods=sample_size),\n",
|
||||
" TARGET_COLNAME: np.random.normal(loc=100, scale=20, size=sample_size),\n",
|
||||
" TIME_SERIES_ID_COLNAME: \"ts_B\",\n",
|
||||
" }\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"X = pd.concat([X1, X2], ignore_index=True, sort=False)\n",
|
||||
"print(\"Simulated dataset contains {} rows \\n\".format(X.shape[0]))\n",
|
||||
"X.head()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we will generate 8 backtesting folds with backtesting period of 7 days and with the same forecasting horizon. We will add the column \"backtest_iteration\", which will identify the backtesting period by the last training date."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"offset_type = \"7D\"\n",
|
||||
"NUMBER_OF_BACKTESTS = 8 # number of train/test sets to generate\n",
|
||||
"\n",
|
||||
"dfs_train = []\n",
|
||||
"dfs_test = []\n",
|
||||
"for ts_id, df_one in X.groupby(TIME_SERIES_ID_COLNAME):\n",
|
||||
"\n",
|
||||
" data_end = df_one[TIME_COLNAME].max()\n",
|
||||
"\n",
|
||||
" for i in range(NUMBER_OF_BACKTESTS):\n",
|
||||
" train_cutoff_date = data_end - to_offset(offset_type)\n",
|
||||
" df_one = df_one.copy()\n",
|
||||
" df_one[\"backtest_iteration\"] = \"iteration_\" + str(train_cutoff_date)\n",
|
||||
" train = df_one[df_one[TIME_COLNAME] <= train_cutoff_date]\n",
|
||||
" test = df_one[\n",
|
||||
" (df_one[TIME_COLNAME] > train_cutoff_date)\n",
|
||||
" & (df_one[TIME_COLNAME] <= data_end)\n",
|
||||
" ]\n",
|
||||
" data_end = train[TIME_COLNAME].max()\n",
|
||||
" dfs_train.append(train)\n",
|
||||
" dfs_test.append(test)\n",
|
||||
"\n",
|
||||
"X_train = pd.concat(dfs_train, sort=False, ignore_index=True)\n",
|
||||
"X_test = pd.concat(dfs_test, sort=False, ignore_index=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### 2.2 Create the Tabular Data Set.\n",
|
||||
"\n",
|
||||
"A Datastore is a place where data can be stored that is then made accessible to a compute either by means of mounting or copying the data to the compute target.\n",
|
||||
"\n",
|
||||
"Please refer to [Datastore](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.datastore(class)?view=azure-ml-py) documentation on how to access data from Datastore.\n",
|
||||
"\n",
|
||||
"In this next step, we will upload the data and create a TabularDataset."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.data.dataset_factory import TabularDatasetFactory\n",
|
||||
"\n",
|
||||
"ds = ws.get_default_datastore()\n",
|
||||
"# Upload saved data to the default data store.\n",
|
||||
"train_data = TabularDatasetFactory.register_pandas_dataframe(\n",
|
||||
" X_train, target=(ds, \"data_mm\"), name=\"data_train\"\n",
|
||||
")\n",
|
||||
"test_data = TabularDatasetFactory.register_pandas_dataframe(\n",
|
||||
" X_test, target=(ds, \"data_mm\"), name=\"data_test\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 3.0 Build the training pipeline\n",
|
||||
"Now that the dataset, WorkSpace, and datastore are set up, we can put together a pipeline for training.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Choose a compute target\n",
|
||||
"\n",
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
||||
"\n",
|
||||
"\\*\\*Creation of AmlCompute takes approximately 5 minutes.**\n",
|
||||
"\n",
|
||||
"If the AmlCompute with that name is already in your workspace this code will skip the creation process. As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this [article](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613007037308
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||
"\n",
|
||||
"# Name your cluster\n",
|
||||
"compute_name = \"backtest-mm\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"if compute_name in ws.compute_targets:\n",
|
||||
" compute_target = ws.compute_targets[compute_name]\n",
|
||||
" if compute_target and type(compute_target) is AmlCompute:\n",
|
||||
" print(\"Found compute target: \" + compute_name)\n",
|
||||
"else:\n",
|
||||
" print(\"Creating a new compute target...\")\n",
|
||||
" provisioning_config = AmlCompute.provisioning_configuration(\n",
|
||||
" vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
|
||||
" )\n",
|
||||
" # Create the compute target\n",
|
||||
" compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)\n",
|
||||
"\n",
|
||||
" # Can poll for a minimum number of nodes and for a specific timeout.\n",
|
||||
" # If no min node count is provided it will use the scale settings for the cluster\n",
|
||||
" compute_target.wait_for_completion(\n",
|
||||
" show_output=True, min_node_count=None, timeout_in_minutes=20\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" # For a more detailed view of current cluster status, use the 'status' property\n",
|
||||
" print(compute_target.status.serialize())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Set up training parameters\n",
|
||||
"\n",
|
||||
"This dictionary defines the AutoML and many models settings. For this forecasting task we need to define several settings including the name of the time column, the maximum forecast horizon, and the partition column name definition. Please note, that in this case we are setting grain_column_names to be the time series ID column plus iteration, because we want to train a separate model for each time series and iteration.\n",
|
||||
"\n",
|
||||
"| Property | Description|\n",
|
||||
"| :--------------- | :------------------- |\n",
|
||||
"| **task** | forecasting |\n",
|
||||
"| **primary_metric** | This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>normalized_root_mean_squared_error</i><br><i>normalized_mean_absolute_error</i> |\n",
|
||||
"| **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||
"| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||
"| **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||
"| **label_column_name** | The name of the label column. |\n",
|
||||
"| **forecast_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n",
|
||||
"| **n_cross_validations** | Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way. |\n",
|
||||
"| **time_column_name** | The name of your time column. |\n",
|
||||
"| **time_series_id_column_names** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |\n",
|
||||
"| **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). |\n",
|
||||
"| **partition_column_names** | The names of columns used to group your models. For timeseries, the groups must not split up individual time-series. That is, each group must contain one or more whole time-series. |"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613007061544
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.train.automl.runtime._many_models.many_models_parameters import (\n",
|
||||
" ManyModelsTrainParameters,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"partition_column_names = [TIME_SERIES_ID_COLNAME, \"backtest_iteration\"]\n",
|
||||
"automl_settings = {\n",
|
||||
" \"task\": \"forecasting\",\n",
|
||||
" \"primary_metric\": \"normalized_root_mean_squared_error\",\n",
|
||||
" \"iteration_timeout_minutes\": 10, # This needs to be changed based on the dataset. We ask customer to explore how long training is taking before settings this value\n",
|
||||
" \"iterations\": 15,\n",
|
||||
" \"experiment_timeout_hours\": 0.25, # This also needs to be changed based on the dataset. For larger data set this number needs to be bigger.\n",
|
||||
" \"label_column_name\": TARGET_COLNAME,\n",
|
||||
" \"n_cross_validations\": 3,\n",
|
||||
" \"time_column_name\": TIME_COLNAME,\n",
|
||||
" \"forecast_horizon\": 6,\n",
|
||||
" \"time_series_id_column_names\": partition_column_names,\n",
|
||||
" \"track_child_runs\": False,\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"mm_paramters = ManyModelsTrainParameters(\n",
|
||||
" automl_settings=automl_settings, partition_column_names=partition_column_names\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Set up many models pipeline"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Parallel run step is leveraged to train multiple models at once. To configure the ParallelRunConfig you will need to determine the appropriate number of workers and nodes for your use case. The process_count_per_node is based off the number of cores of the compute VM. The node_count will determine the number of master nodes to use, increasing the node count will speed up the training process.\n",
|
||||
"\n",
|
||||
"| Property | Description|\n",
|
||||
"| :--------------- | :------------------- |\n",
|
||||
"| **experiment** | The experiment used for training. |\n",
|
||||
"| **train_data** | The file dataset to be used as input to the training run. |\n",
|
||||
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with 3 and increase the node_count if the training time is taking too long. |\n",
|
||||
"| **process_count_per_node** | Process count per node, we recommend 2:1 ratio for number of cores: number of processes per node. eg. If node has 16 cores then configure 8 or less process count per node or optimal performance. |\n",
|
||||
"| **train_pipeline_parameters** | The set of configuration parameters defined in the previous section. |\n",
|
||||
"\n",
|
||||
"Calling this method will create a new aggregated dataset which is generated dynamically on pipeline execution."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"training_pipeline_steps = AutoMLPipelineBuilder.get_many_models_train_steps(\n",
|
||||
" experiment=experiment,\n",
|
||||
" train_data=train_data,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" node_count=2,\n",
|
||||
" process_count_per_node=2,\n",
|
||||
" run_invocation_timeout=920,\n",
|
||||
" train_pipeline_parameters=mm_paramters,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.pipeline.core import Pipeline\n",
|
||||
"\n",
|
||||
"training_pipeline = Pipeline(ws, steps=training_pipeline_steps)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Submit the pipeline to run\n",
|
||||
"Next we submit our pipeline to run. The whole training pipeline takes about 20 minutes using a STANDARD_DS12_V2 VM with our current ParallelRunConfig setting."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"training_run = experiment.submit(training_pipeline)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"training_run.wait_for_completion(show_output=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Check the run status, if training_run is in completed state, continue to next section. Otherwise, check the portal for failures."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 4.0 Backtesting\n",
|
||||
"Now that we selected the best AutoML model for each backtest fold, we will use these models to generate the forecasts and compare with the actuals."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Set up output dataset for inference data\n",
|
||||
"Output of inference can be represented as [OutputFileDatasetConfig](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.output_dataset_config.outputdatasetconfig?view=azure-ml-py) object and OutputFileDatasetConfig can be registered as a dataset. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.data import OutputFileDatasetConfig\n",
|
||||
"\n",
|
||||
"output_inference_data_ds = OutputFileDatasetConfig(\n",
|
||||
" name=\"many_models_inference_output\",\n",
|
||||
" destination=(dstore, \"backtesting/inference_data/\"),\n",
|
||||
").register_on_complete(name=\"backtesting_data_ds\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For many models we need to provide the ManyModelsInferenceParameters object.\n",
|
||||
"\n",
|
||||
"#### ManyModelsInferenceParameters arguments\n",
|
||||
"| Property | Description|\n",
|
||||
"| :--------------- | :------------------- |\n",
|
||||
"| **partition_column_names** | List of column names that identifies groups. |\n",
|
||||
"| **target_column_name** | \\[Optional\\] Column name only if the inference dataset has the target. |\n",
|
||||
"| **time_column_name** | Column name only if it is timeseries. |\n",
|
||||
"| **many_models_run_id** | \\[Optional\\] Many models pipeline run id where models were trained. |\n",
|
||||
"\n",
|
||||
"#### get_many_models_batch_inference_steps arguments\n",
|
||||
"| Property | Description|\n",
|
||||
"| :--------------- | :------------------- |\n",
|
||||
"| **experiment** | The experiment used for inference run. |\n",
|
||||
"| **inference_data** | The data to use for inferencing. It should be the same schema as used for training.\n",
|
||||
"| **compute_target** | The compute target that runs the inference pipeline.|\n",
|
||||
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku). |\n",
|
||||
"| **process_count_per_node** | The number of processes per node.\n",
|
||||
"| **train_run_id** | \\[Optional\\] The run id of the hierarchy training, by default it is the latest successful training many model run in the experiment. |\n",
|
||||
"| **train_experiment_name** | \\[Optional\\] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline. |\n",
|
||||
"| **process_count_per_node** | \\[Optional\\] The number of processes per node, by default it's 4. |"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder\n",
|
||||
"from azureml.train.automl.runtime._many_models.many_models_parameters import (\n",
|
||||
" ManyModelsInferenceParameters,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"mm_parameters = ManyModelsInferenceParameters(\n",
|
||||
" partition_column_names=partition_column_names,\n",
|
||||
" time_column_name=TIME_COLNAME,\n",
|
||||
" target_column_name=TARGET_COLNAME,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"inference_steps = AutoMLPipelineBuilder.get_many_models_batch_inference_steps(\n",
|
||||
" experiment=experiment,\n",
|
||||
" inference_data=test_data,\n",
|
||||
" node_count=2,\n",
|
||||
" process_count_per_node=2,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" run_invocation_timeout=300,\n",
|
||||
" output_datastore=output_inference_data_ds,\n",
|
||||
" train_run_id=training_run.id,\n",
|
||||
" train_experiment_name=training_run.experiment.name,\n",
|
||||
" inference_pipeline_parameters=mm_parameters,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.pipeline.core import Pipeline\n",
|
||||
"\n",
|
||||
"inference_pipeline = Pipeline(ws, steps=inference_steps)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"inference_run = experiment.submit(inference_pipeline)\n",
|
||||
"inference_run.wait_for_completion(show_output=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 5.0 Retrieve results and calculate metrics\n",
|
||||
"\n",
|
||||
"The pipeline returns one file with the predictions for each times series ID and outputs the result to the forecasting_output Blob container. The details of the blob container is listed in 'forecasting_output.txt' under Outputs+logs. \n",
|
||||
"\n",
|
||||
"The next code snippet does the following:\n",
|
||||
"1. Downloads the contents of the output folder that is passed in the parallel run step \n",
|
||||
"2. Reads the parallel_run_step.txt file that has the predictions as pandas dataframe \n",
|
||||
"3. Saves the table in csv format and \n",
|
||||
"4. Displays the top 10 rows of the predictions"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.contrib.automl.pipeline.steps.utilities import get_output_from_mm_pipeline\n",
|
||||
"\n",
|
||||
"forecasting_results_name = \"forecasting_results\"\n",
|
||||
"forecasting_output_name = \"many_models_inference_output\"\n",
|
||||
"forecast_file = get_output_from_mm_pipeline(\n",
|
||||
" inference_run, forecasting_results_name, forecasting_output_name\n",
|
||||
")\n",
|
||||
"df = pd.read_csv(forecast_file, delimiter=\" \", header=None, parse_dates=[0])\n",
|
||||
"df.columns = list(X_train.columns) + [\"predicted_level\"]\n",
|
||||
"print(\n",
|
||||
" \"Prediction has \", df.shape[0], \" rows. Here the first 10 rows are being displayed.\"\n",
|
||||
")\n",
|
||||
"# Save the scv file with header to read it in the next step.\n",
|
||||
"df.rename(columns={TARGET_COLNAME: \"actual_level\"}, inplace=True)\n",
|
||||
"df.to_csv(os.path.join(forecasting_results_name, \"forecast.csv\"), index=False)\n",
|
||||
"df.head(10)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## View metrics\n",
|
||||
"We will read in the obtained results and run the helper script, which will generate metrics and create the plots of predicted versus actual values."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from assets.score import calculate_scores_and_build_plots\n",
|
||||
"\n",
|
||||
"backtesting_results = \"backtesting_mm_results\"\n",
|
||||
"os.makedirs(backtesting_results, exist_ok=True)\n",
|
||||
"calculate_scores_and_build_plots(\n",
|
||||
" forecasting_results_name, backtesting_results, automl_settings\n",
|
||||
")\n",
|
||||
"pd.DataFrame({\"File\": os.listdir(backtesting_results)})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The directory contains a set of files with results:\n",
|
||||
"- forecast.csv contains forecasts for all backtest iterations. The backtest_iteration column contains iteration identifier with the last training date as a suffix\n",
|
||||
"- scores.csv contains all metrics. If data set contains several time series, the metrics are given for all combinations of time series id and iterations, as well as scores for all iterations and time series ids, which are marked as \"all_sets\"\n",
|
||||
"- plots_fcst_vs_actual.pdf contains the predictions vs forecast plots for each iteration and, eash time series is saved as separate plot.\n",
|
||||
"\n",
|
||||
"For demonstration purposes we will display the table of metrics for one of the time series with ID \"ts0\". We will create the utility function, which will build the table with metrics."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def get_metrics_for_ts(all_metrics, ts):\n",
|
||||
" \"\"\"\n",
|
||||
" Get the metrics for the time series with ID ts and return it as pandas data frame.\n",
|
||||
"\n",
|
||||
" :param all_metrics: The table with all the metrics.\n",
|
||||
" :param ts: The ID of a time series of interest.\n",
|
||||
" :return: The pandas DataFrame with metrics for one time series.\n",
|
||||
" \"\"\"\n",
|
||||
" results_df = None\n",
|
||||
" for ts_id, one_series in all_metrics.groupby(\"time_series_id\"):\n",
|
||||
" if not ts_id.startswith(ts):\n",
|
||||
" continue\n",
|
||||
" iteration = ts_id.split(\"|\")[-1]\n",
|
||||
" df = one_series[[\"metric_name\", \"metric\"]]\n",
|
||||
" df.rename({\"metric\": iteration}, axis=1, inplace=True)\n",
|
||||
" df.set_index(\"metric_name\", inplace=True)\n",
|
||||
" if results_df is None:\n",
|
||||
" results_df = df\n",
|
||||
" else:\n",
|
||||
" results_df = results_df.merge(\n",
|
||||
" df, how=\"inner\", left_index=True, right_index=True\n",
|
||||
" )\n",
|
||||
" results_df.sort_index(axis=1, inplace=True)\n",
|
||||
" return results_df\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"metrics_df = pd.read_csv(os.path.join(backtesting_results, \"scores.csv\"))\n",
|
||||
"ts = \"ts_A\"\n",
|
||||
"get_metrics_for_ts(metrics_df, ts)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Forecast vs actuals plots."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from IPython.display import IFrame\n",
|
||||
"\n",
|
||||
"IFrame(\"./backtesting_mm_results/plots_fcst_vs_actual.pdf\", width=800, height=300)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "jialiu"
|
||||
}
|
||||
],
|
||||
"categories": [
|
||||
"how-to-use-azureml",
|
||||
"automated-machine-learning"
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -0,0 +1,4 @@
|
||||
name: auto-ml-forecasting-backtest-many-models
|
||||
dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 22 KiB |
@@ -0,0 +1,45 @@
|
||||
import argparse
|
||||
import os
|
||||
|
||||
import pandas as pd
|
||||
|
||||
import azureml.train.automl.runtime._hts.hts_runtime_utilities as hru
|
||||
|
||||
from azureml.core import Run
|
||||
from azureml.core.dataset import Dataset
|
||||
|
||||
# Parse the arguments.
|
||||
args = {
|
||||
"step_size": "--step-size",
|
||||
"step_number": "--step-number",
|
||||
"time_column_name": "--time-column-name",
|
||||
"time_series_id_column_names": "--time-series-id-column-names",
|
||||
"out_dir": "--output-dir",
|
||||
}
|
||||
parser = argparse.ArgumentParser("Parsing input arguments.")
|
||||
for argname, arg in args.items():
|
||||
parser.add_argument(arg, dest=argname, required=True)
|
||||
parsed_args, _ = parser.parse_known_args()
|
||||
step_number = int(parsed_args.step_number)
|
||||
step_size = int(parsed_args.step_size)
|
||||
# Create the working dirrectory to store the temporary csv files.
|
||||
working_dir = parsed_args.out_dir
|
||||
os.makedirs(working_dir, exist_ok=True)
|
||||
# Set input and output
|
||||
script_run = Run.get_context()
|
||||
input_dataset = script_run.input_datasets["training_data"]
|
||||
X_train = input_dataset.to_pandas_dataframe()
|
||||
# Split the data.
|
||||
for i in range(step_number):
|
||||
file_name = os.path.join(working_dir, "backtest_{}.csv".format(i))
|
||||
if parsed_args.time_series_id_column_names:
|
||||
dfs = []
|
||||
for _, one_series in X_train.groupby([parsed_args.time_series_id_column_names]):
|
||||
one_series = one_series.sort_values(
|
||||
by=[parsed_args.time_column_name], inplace=False
|
||||
)
|
||||
dfs.append(one_series.iloc[: len(one_series) - step_size * i])
|
||||
pd.concat(dfs, sort=False, ignore_index=True).to_csv(file_name, index=False)
|
||||
else:
|
||||
X_train.sort_values(by=[parsed_args.time_column_name], inplace=True)
|
||||
X_train.iloc[: len(X_train) - step_size * i].to_csv(file_name, index=False)
|
||||
@@ -0,0 +1,173 @@
|
||||
# ---------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# ---------------------------------------------------------
|
||||
"""The batch script needed for back testing of models using PRS."""
|
||||
import argparse
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import pickle
|
||||
import re
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from azureml.core.experiment import Experiment
|
||||
from azureml.core.model import Model
|
||||
from azureml.core.run import Run
|
||||
from azureml.automl.core.shared import constants
|
||||
from azureml.automl.runtime.shared.score import scoring
|
||||
from azureml.train.automl import AutoMLConfig
|
||||
|
||||
RE_INVALID_SYMBOLS = re.compile(r"[:\s]")
|
||||
|
||||
model_name = None
|
||||
target_column_name = None
|
||||
current_step_run = None
|
||||
output_dir = None
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _get_automl_settings():
|
||||
with open(
|
||||
os.path.join(
|
||||
os.path.dirname(os.path.realpath(__file__)), "automl_settings.json"
|
||||
)
|
||||
) as json_file:
|
||||
return json.load(json_file)
|
||||
|
||||
|
||||
def init():
|
||||
global model_name
|
||||
global target_column_name
|
||||
global output_dir
|
||||
global automl_settings
|
||||
global model_uid
|
||||
logger.info("Initialization of the run.")
|
||||
parser = argparse.ArgumentParser("Parsing input arguments.")
|
||||
parser.add_argument("--output-dir", dest="out", required=True)
|
||||
parser.add_argument("--model-name", dest="model", default=None)
|
||||
parser.add_argument("--model-uid", dest="model_uid", default=None)
|
||||
|
||||
parsed_args, _ = parser.parse_known_args()
|
||||
model_name = parsed_args.model
|
||||
automl_settings = _get_automl_settings()
|
||||
target_column_name = automl_settings.get("label_column_name")
|
||||
output_dir = parsed_args.out
|
||||
model_uid = parsed_args.model_uid
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
os.environ["AUTOML_IGNORE_PACKAGE_VERSION_INCOMPATIBILITIES".lower()] = "True"
|
||||
|
||||
|
||||
def get_run():
|
||||
global current_step_run
|
||||
if current_step_run is None:
|
||||
current_step_run = Run.get_context()
|
||||
return current_step_run
|
||||
|
||||
|
||||
def run_backtest(data_input_name: str, file_name: str, experiment: Experiment):
|
||||
"""Re-train the model and return metrics."""
|
||||
data_input = pd.read_csv(
|
||||
data_input_name,
|
||||
parse_dates=[automl_settings[constants.TimeSeries.TIME_COLUMN_NAME]],
|
||||
)
|
||||
print(data_input.head())
|
||||
if not automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES):
|
||||
# There is no grains.
|
||||
data_input.sort_values(
|
||||
[automl_settings[constants.TimeSeries.TIME_COLUMN_NAME]], inplace=True
|
||||
)
|
||||
X_train = data_input.iloc[: -automl_settings["max_horizon"]]
|
||||
y_train = X_train.pop(target_column_name).values
|
||||
X_test = data_input.iloc[-automl_settings["max_horizon"] :]
|
||||
y_test = X_test.pop(target_column_name).values
|
||||
else:
|
||||
# The data contain grains.
|
||||
dfs_train = []
|
||||
dfs_test = []
|
||||
for _, one_series in data_input.groupby(
|
||||
automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES)
|
||||
):
|
||||
one_series.sort_values(
|
||||
[automl_settings[constants.TimeSeries.TIME_COLUMN_NAME]], inplace=True
|
||||
)
|
||||
dfs_train.append(one_series.iloc[: -automl_settings["max_horizon"]])
|
||||
dfs_test.append(one_series.iloc[-automl_settings["max_horizon"] :])
|
||||
X_train = pd.concat(dfs_train, sort=False, ignore_index=True)
|
||||
y_train = X_train.pop(target_column_name).values
|
||||
X_test = pd.concat(dfs_test, sort=False, ignore_index=True)
|
||||
y_test = X_test.pop(target_column_name).values
|
||||
|
||||
last_training_date = str(
|
||||
X_train[automl_settings[constants.TimeSeries.TIME_COLUMN_NAME]].max()
|
||||
)
|
||||
|
||||
if file_name:
|
||||
# If file name is provided, we will load model and retrain it on backtest data.
|
||||
with open(file_name, "rb") as fp:
|
||||
fitted_model = pickle.load(fp)
|
||||
fitted_model.fit(X_train, y_train)
|
||||
else:
|
||||
# We will run the experiment and select the best model.
|
||||
X_train[target_column_name] = y_train
|
||||
automl_config = AutoMLConfig(training_data=X_train, **automl_settings)
|
||||
automl_run = current_step_run.submit_child(automl_config, show_output=True)
|
||||
best_run, fitted_model = automl_run.get_output()
|
||||
# As we have generated models, we need to register them for the future use.
|
||||
description = "Backtest model example"
|
||||
tags = {"last_training_date": last_training_date, "experiment": experiment.name}
|
||||
if model_uid:
|
||||
tags["model_uid"] = model_uid
|
||||
automl_run.register_model(
|
||||
model_name=best_run.properties["model_name"],
|
||||
description=description,
|
||||
tags=tags,
|
||||
)
|
||||
print(f"The model {best_run.properties['model_name']} was registered.")
|
||||
|
||||
_, x_pred = fitted_model.forecast(X_test)
|
||||
x_pred.reset_index(inplace=True, drop=False)
|
||||
columns = [automl_settings[constants.TimeSeries.TIME_COLUMN_NAME]]
|
||||
if automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES):
|
||||
# We know that fitted_model.grain_column_names is a list.
|
||||
columns.extend(fitted_model.grain_column_names)
|
||||
columns.append(constants.TimeSeriesInternal.DUMMY_TARGET_COLUMN)
|
||||
# Remove featurized columns.
|
||||
x_pred = x_pred[columns]
|
||||
x_pred.rename(
|
||||
{constants.TimeSeriesInternal.DUMMY_TARGET_COLUMN: "predicted_level"},
|
||||
axis=1,
|
||||
inplace=True,
|
||||
)
|
||||
x_pred["actual_level"] = y_test
|
||||
x_pred["backtest_iteration"] = f"iteration_{last_training_date}"
|
||||
date_safe = RE_INVALID_SYMBOLS.sub("_", last_training_date)
|
||||
x_pred.to_csv(os.path.join(output_dir, f"iteration_{date_safe}.csv"), index=False)
|
||||
return x_pred
|
||||
|
||||
|
||||
def run(input_files):
|
||||
"""Run the script"""
|
||||
logger.info("Running mini batch.")
|
||||
ws = get_run().experiment.workspace
|
||||
file_name = None
|
||||
if model_name:
|
||||
models = Model.list(ws, name=model_name)
|
||||
cloud_model = None
|
||||
if models:
|
||||
for one_mod in models:
|
||||
if cloud_model is None or one_mod.version > cloud_model.version:
|
||||
logger.info(
|
||||
"Using existing model from the workspace. Model version: {}".format(
|
||||
one_mod.version
|
||||
)
|
||||
)
|
||||
cloud_model = one_mod
|
||||
file_name = cloud_model.download(exist_ok=True)
|
||||
|
||||
forecasts = []
|
||||
logger.info("Running backtest.")
|
||||
for input_file in input_files:
|
||||
forecasts.append(run_backtest(input_file, file_name, get_run().experiment))
|
||||
return pd.concat(forecasts)
|
||||
@@ -0,0 +1,171 @@
|
||||
from typing import Any, Dict, Optional, List
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
from matplotlib import pyplot as plt
|
||||
from matplotlib.backends.backend_pdf import PdfPages
|
||||
|
||||
from azureml.automl.core.shared import constants
|
||||
from azureml.automl.core.shared.types import GrainType
|
||||
from azureml.automl.runtime.shared.score import scoring
|
||||
|
||||
GRAIN = "time_series_id"
|
||||
BACKTEST_ITER = "backtest_iteration"
|
||||
ACTUALS = "actual_level"
|
||||
PREDICTIONS = "predicted_level"
|
||||
ALL_GRAINS = "all_sets"
|
||||
|
||||
FORECASTS_FILE = "forecast.csv"
|
||||
SCORES_FILE = "scores.csv"
|
||||
PLOTS_FILE = "plots_fcst_vs_actual.pdf"
|
||||
RE_INVALID_SYMBOLS = re.compile("[: ]")
|
||||
|
||||
|
||||
def _compute_metrics(df: pd.DataFrame, metrics: List[str]):
|
||||
"""
|
||||
Compute metrics for one data frame.
|
||||
|
||||
:param df: The data frame which contains actual_level and predicted_level columns.
|
||||
:return: The data frame with two columns - metric_name and metric.
|
||||
"""
|
||||
scores = scoring.score_regression(
|
||||
y_test=df[ACTUALS], y_pred=df[PREDICTIONS], metrics=metrics
|
||||
)
|
||||
metrics_df = pd.DataFrame(list(scores.items()), columns=["metric_name", "metric"])
|
||||
metrics_df.sort_values(["metric_name"], inplace=True)
|
||||
metrics_df.reset_index(drop=True, inplace=True)
|
||||
return metrics_df
|
||||
|
||||
|
||||
def _format_grain_name(grain: GrainType) -> str:
|
||||
"""
|
||||
Convert grain name to string.
|
||||
|
||||
:param grain: the grain name.
|
||||
:return: the string representation of the given grain.
|
||||
"""
|
||||
if not isinstance(grain, tuple) and not isinstance(grain, list):
|
||||
return str(grain)
|
||||
grain = list(map(str, grain))
|
||||
return "|".join(grain)
|
||||
|
||||
|
||||
def compute_all_metrics(
|
||||
fcst_df: pd.DataFrame,
|
||||
ts_id_colnames: List[str],
|
||||
metric_names: Optional[List[set]] = None,
|
||||
):
|
||||
"""
|
||||
Calculate metrics per grain.
|
||||
|
||||
:param fcst_df: forecast data frame. Must contain 2 columns: 'actual_level' and 'predicted_level'
|
||||
:param metric_names: (optional) the list of metric names to return
|
||||
:param ts_id_colnames: (optional) list of grain column names
|
||||
:return: dictionary of summary table for all tests and final decision on stationary vs nonstaionary
|
||||
"""
|
||||
if not metric_names:
|
||||
metric_names = list(constants.Metric.SCALAR_REGRESSION_SET)
|
||||
|
||||
if ts_id_colnames is None:
|
||||
ts_id_colnames = []
|
||||
|
||||
metrics_list = []
|
||||
if ts_id_colnames:
|
||||
for grain, df in fcst_df.groupby(ts_id_colnames):
|
||||
one_grain_metrics_df = _compute_metrics(df, metric_names)
|
||||
one_grain_metrics_df[GRAIN] = _format_grain_name(grain)
|
||||
metrics_list.append(one_grain_metrics_df)
|
||||
|
||||
# overall metrics
|
||||
one_grain_metrics_df = _compute_metrics(fcst_df, metric_names)
|
||||
one_grain_metrics_df[GRAIN] = ALL_GRAINS
|
||||
metrics_list.append(one_grain_metrics_df)
|
||||
|
||||
# collect into a data frame
|
||||
return pd.concat(metrics_list)
|
||||
|
||||
|
||||
def _draw_one_plot(
|
||||
df: pd.DataFrame,
|
||||
time_column_name: str,
|
||||
grain_column_names: List[str],
|
||||
pdf: PdfPages,
|
||||
) -> None:
|
||||
"""
|
||||
Draw the single plot.
|
||||
|
||||
:param df: The data frame with the data to build plot.
|
||||
:param time_column_name: The name of a time column.
|
||||
:param grain_column_names: The name of grain columns.
|
||||
:param pdf: The pdf backend used to render the plot.
|
||||
"""
|
||||
fig, _ = plt.subplots(figsize=(20, 10))
|
||||
df = df.set_index(time_column_name)
|
||||
plt.plot(df[[ACTUALS, PREDICTIONS]])
|
||||
plt.xticks(rotation=45)
|
||||
iteration = df[BACKTEST_ITER].iloc[0]
|
||||
if grain_column_names:
|
||||
grain_name = [df[grain].iloc[0] for grain in grain_column_names]
|
||||
plt.title(f"Time series ID: {_format_grain_name(grain_name)} {iteration}")
|
||||
plt.legend(["actual", "forecast"])
|
||||
plt.close(fig)
|
||||
pdf.savefig(fig)
|
||||
|
||||
|
||||
def calculate_scores_and_build_plots(
|
||||
input_dir: str, output_dir: str, automl_settings: Dict[str, Any]
|
||||
):
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
grains = automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES)
|
||||
time_column_name = automl_settings.get(constants.TimeSeries.TIME_COLUMN_NAME)
|
||||
if grains is None:
|
||||
grains = []
|
||||
if isinstance(grains, str):
|
||||
grains = [grains]
|
||||
while BACKTEST_ITER in grains:
|
||||
grains.remove(BACKTEST_ITER)
|
||||
|
||||
dfs = []
|
||||
for fle in os.listdir(input_dir):
|
||||
file_path = os.path.join(input_dir, fle)
|
||||
if os.path.isfile(file_path) and file_path.endswith(".csv"):
|
||||
df_iter = pd.read_csv(file_path, parse_dates=[time_column_name])
|
||||
for _, iteration in df_iter.groupby(BACKTEST_ITER):
|
||||
dfs.append(iteration)
|
||||
forecast_df = pd.concat(dfs, sort=False, ignore_index=True)
|
||||
# To make sure plots are in order, sort the predictions by grain and iteration.
|
||||
ts_index = grains + [BACKTEST_ITER]
|
||||
forecast_df.sort_values(by=ts_index, inplace=True)
|
||||
pdf = PdfPages(os.path.join(output_dir, PLOTS_FILE))
|
||||
for _, one_forecast in forecast_df.groupby(ts_index):
|
||||
_draw_one_plot(one_forecast, time_column_name, grains, pdf)
|
||||
pdf.close()
|
||||
forecast_df.to_csv(os.path.join(output_dir, FORECASTS_FILE), index=False)
|
||||
# Remove np.NaN and np.inf from the prediction and actuals data.
|
||||
forecast_df.replace([np.inf, -np.inf], np.nan, inplace=True)
|
||||
forecast_df.dropna(subset=[ACTUALS, PREDICTIONS], inplace=True)
|
||||
metrics = compute_all_metrics(forecast_df, grains + [BACKTEST_ITER])
|
||||
metrics.to_csv(os.path.join(output_dir, SCORES_FILE), index=False)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = {"forecasts": "--forecasts", "scores_out": "--output-dir"}
|
||||
parser = argparse.ArgumentParser("Parsing input arguments.")
|
||||
for argname, arg in args.items():
|
||||
parser.add_argument(arg, dest=argname, required=True)
|
||||
parsed_args, _ = parser.parse_known_args()
|
||||
input_dir = parsed_args.forecasts
|
||||
output_dir = parsed_args.scores_out
|
||||
with open(
|
||||
os.path.join(
|
||||
os.path.dirname(os.path.realpath(__file__)), "automl_settings.json"
|
||||
)
|
||||
) as json_file:
|
||||
automl_settings = json.load(json_file)
|
||||
calculate_scores_and_build_plots(input_dir, output_dir, automl_settings)
|
||||
@@ -0,0 +1,720 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License.\n",
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Automated MachineLearning\n",
|
||||
"_**The model backtesting**_\n",
|
||||
"\n",
|
||||
"## Contents\n",
|
||||
"1. [Introduction](#Introduction)\n",
|
||||
"2. [Setup](#Setup)\n",
|
||||
"3. [Data](#Data)\n",
|
||||
"4. [Prepare remote compute and data.](#prepare_remote)\n",
|
||||
"5. [Create the configuration for AutoML backtesting](#train)\n",
|
||||
"6. [Backtest AutoML](#backtest_automl)\n",
|
||||
"7. [View metrics](#Metrics)\n",
|
||||
"8. [Backtest the best model](#backtest_model)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Introduction\n",
|
||||
"Model backtesting is used to evaluate its performance on historical data. To do that we step back on the backtesting period by the data set several times and split the data to train and test sets. Then these data sets are used for training and evaluation of model.<br>\n",
|
||||
"This notebook is intended to demonstrate backtesting on a single model, this is the best solution for small data sets with a few or one time series in it. For scenarios where we would like to choose the best AutoML model for every backtest iteration, please see [AutoML Forecasting Backtest Many Models Example](../forecasting-backtest-many-models/auto-ml-forecasting-backtest-many-models.ipynb) notebook.\n",
|
||||
"\n",
|
||||
"This notebook demonstrates two ways of backtesting:\n",
|
||||
"- AutoML backtesting: we will train separate AutoML models for historical data\n",
|
||||
"- Model backtesting: from the first run we will select the best model trained on the most recent data, retrain it on the past data and evaluate."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import numpy as np\n",
|
||||
"import pandas as pd\n",
|
||||
"import shutil\n",
|
||||
"\n",
|
||||
"import azureml.core\n",
|
||||
"from azureml.core import Experiment, Model, Workspace"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This notebook is compatible with Azure ML SDK version 1.35.1 or later."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As part of the setup you have already created a <b>Workspace</b>."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||
"output[\"Workspace\"] = ws.name\n",
|
||||
"output[\"SKU\"] = ws.sku\n",
|
||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||
"output[\"Location\"] = ws.location\n",
|
||||
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Data\n",
|
||||
"For the demonstration purposes we will simulate one year of daily data. To do this we need to specify the following parameters: time column name, time series ID column names and label column name. Our intention is to forecast for two weeks ahead."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"TIME_COLUMN_NAME = \"date\"\n",
|
||||
"TIME_SERIES_ID_COLUMN_NAMES = \"time_series_id\"\n",
|
||||
"LABEL_COLUMN_NAME = \"y\"\n",
|
||||
"FORECAST_HORIZON = 14\n",
|
||||
"FREQUENCY = \"D\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def simulate_timeseries_data(\n",
|
||||
" train_len: int,\n",
|
||||
" test_len: int,\n",
|
||||
" time_column_name: str,\n",
|
||||
" target_column_name: str,\n",
|
||||
" time_series_id_column_name: str,\n",
|
||||
" time_series_number: int = 1,\n",
|
||||
" freq: str = \"H\",\n",
|
||||
"):\n",
|
||||
" \"\"\"\n",
|
||||
" Return the time series of designed length.\n",
|
||||
"\n",
|
||||
" :param train_len: The length of training data (one series).\n",
|
||||
" :type train_len: int\n",
|
||||
" :param test_len: The length of testing data (one series).\n",
|
||||
" :type test_len: int\n",
|
||||
" :param time_column_name: The desired name of a time column.\n",
|
||||
" :type time_column_name: str\n",
|
||||
" :param time_series_number: The number of time series in the data set.\n",
|
||||
" :type time_series_number: int\n",
|
||||
" :param freq: The frequency string representing pandas offset.\n",
|
||||
" see https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html\n",
|
||||
" :type freq: str\n",
|
||||
" :returns: the tuple of train and test data sets.\n",
|
||||
" :rtype: tuple\n",
|
||||
"\n",
|
||||
" \"\"\"\n",
|
||||
" data_train = [] # type: List[pd.DataFrame]\n",
|
||||
" data_test = [] # type: List[pd.DataFrame]\n",
|
||||
" data_length = train_len + test_len\n",
|
||||
" for i in range(time_series_number):\n",
|
||||
" X = pd.DataFrame(\n",
|
||||
" {\n",
|
||||
" time_column_name: pd.date_range(\n",
|
||||
" start=\"2000-01-01\", periods=data_length, freq=freq\n",
|
||||
" ),\n",
|
||||
" target_column_name: np.arange(data_length).astype(float)\n",
|
||||
" + np.random.rand(data_length)\n",
|
||||
" + i * 5,\n",
|
||||
" \"ext_predictor\": np.asarray(range(42, 42 + data_length)),\n",
|
||||
" time_series_id_column_name: np.repeat(\"ts{}\".format(i), data_length),\n",
|
||||
" }\n",
|
||||
" )\n",
|
||||
" data_train.append(X[:train_len])\n",
|
||||
" data_test.append(X[train_len:])\n",
|
||||
" train = pd.concat(data_train)\n",
|
||||
" label_train = train.pop(target_column_name).values\n",
|
||||
" test = pd.concat(data_test)\n",
|
||||
" label_test = test.pop(target_column_name).values\n",
|
||||
" return train, label_train, test, label_test\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"n_test_periods = FORECAST_HORIZON\n",
|
||||
"n_train_periods = 365\n",
|
||||
"X_train, y_train, X_test, y_test = simulate_timeseries_data(\n",
|
||||
" train_len=n_train_periods,\n",
|
||||
" test_len=n_test_periods,\n",
|
||||
" time_column_name=TIME_COLUMN_NAME,\n",
|
||||
" target_column_name=LABEL_COLUMN_NAME,\n",
|
||||
" time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAMES,\n",
|
||||
" time_series_number=2,\n",
|
||||
" freq=FREQUENCY,\n",
|
||||
")\n",
|
||||
"X_train[LABEL_COLUMN_NAME] = y_train"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's see what the training data looks like."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"X_train.tail()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Prepare remote compute and data. <a id=\"prepare_remote\"></a>\n",
|
||||
"The [Machine Learning service workspace](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-workspace), is paired with the storage account, which contains the default data store. We will use it to upload the artificial data and create [tabular dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py) for training. A tabular dataset defines a series of lazily-evaluated, immutable operations to load data from the data source into tabular representation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.data.dataset_factory import TabularDatasetFactory\n",
|
||||
"\n",
|
||||
"ds = ws.get_default_datastore()\n",
|
||||
"# Upload saved data to the default data store.\n",
|
||||
"train_data = TabularDatasetFactory.register_pandas_dataframe(\n",
|
||||
" X_train, target=(ds, \"data\"), name=\"data_backtest\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You will need to create a compute target for backtesting. In this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute), you create AmlCompute as your training compute resource.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||
"\n",
|
||||
"# Choose a name for your CPU cluster\n",
|
||||
"amlcompute_cluster_name = \"backtest-cluster\"\n",
|
||||
"\n",
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
||||
" print(\"Found existing cluster, use it.\")\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||
" vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
|
||||
" )\n",
|
||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
"compute_target.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create the configuration for AutoML backtesting <a id=\"train\"></a>\n",
|
||||
"\n",
|
||||
"This dictionary defines the AutoML and many models settings. For this forecasting task we need to define several settings including the name of the time column, the maximum forecast horizon, and the partition column name definition.\n",
|
||||
"\n",
|
||||
"| Property | Description|\n",
|
||||
"| :--------------- | :------------------- |\n",
|
||||
"| **task** | forecasting |\n",
|
||||
"| **primary_metric** | This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>normalized_root_mean_squared_error</i><br><i>normalized_mean_absolute_error</i> |\n",
|
||||
"| **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||
"| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||
"| **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||
"| **label_column_name** | The name of the label column. |\n",
|
||||
"| **max_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n",
|
||||
"| **n_cross_validations** | Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way. |\n",
|
||||
"| **time_column_name** | The name of your time column. |\n",
|
||||
"| **grain_column_names** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"automl_settings = {\n",
|
||||
" \"task\": \"forecasting\",\n",
|
||||
" \"primary_metric\": \"normalized_root_mean_squared_error\",\n",
|
||||
" \"iteration_timeout_minutes\": 10, # This needs to be changed based on the dataset. We ask customer to explore how long training is taking before settings this value\n",
|
||||
" \"iterations\": 15,\n",
|
||||
" \"experiment_timeout_hours\": 1, # This also needs to be changed based on the dataset. For larger data set this number needs to be bigger.\n",
|
||||
" \"label_column_name\": LABEL_COLUMN_NAME,\n",
|
||||
" \"n_cross_validations\": 3,\n",
|
||||
" \"time_column_name\": TIME_COLUMN_NAME,\n",
|
||||
" \"max_horizon\": FORECAST_HORIZON,\n",
|
||||
" \"track_child_runs\": False,\n",
|
||||
" \"grain_column_names\": TIME_SERIES_ID_COLUMN_NAMES,\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Backtest AutoML <a id=\"backtest_automl\"></a>\n",
|
||||
"First we set backtesting parameters: we will step back by 30 days and will make 5 such steps; for each step we will forecast for next two weeks."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# The number of periods to step back on each backtest iteration.\n",
|
||||
"BACKTESTING_PERIOD = 30\n",
|
||||
"# The number of times we will back test the model.\n",
|
||||
"NUMBER_OF_BACKTESTS = 5"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To train AutoML on backtesting folds we will use the [Azure Machine Learning pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/concept-ml-pipelines). It will generate backtest folds, then train model for each of them and calculate the accuracy metrics. To run pipeline, you also need to create an <b>Experiment</b>. An Experiment corresponds to a prediction problem you are trying to solve (here, it is a forecasting), while a Run corresponds to a specific approach to the problem."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from uuid import uuid1\n",
|
||||
"\n",
|
||||
"from pipeline_helper import get_backtest_pipeline\n",
|
||||
"\n",
|
||||
"pipeline_exp = Experiment(ws, \"automl-backtesting\")\n",
|
||||
"\n",
|
||||
"# We will create the unique identifier to mark our models.\n",
|
||||
"model_uid = str(uuid1())\n",
|
||||
"\n",
|
||||
"pipeline = get_backtest_pipeline(\n",
|
||||
" experiment=pipeline_exp,\n",
|
||||
" dataset=train_data,\n",
|
||||
" # The STANDARD_DS12_V2 has 4 vCPU per node, we will set 2 process per node to be safe.\n",
|
||||
" process_per_node=2,\n",
|
||||
" # The maximum number of nodes for our compute is 6.\n",
|
||||
" node_count=6,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" automl_settings=automl_settings,\n",
|
||||
" step_size=BACKTESTING_PERIOD,\n",
|
||||
" step_number=NUMBER_OF_BACKTESTS,\n",
|
||||
" model_uid=model_uid,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Run the pipeline and wait for results."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pipeline_run = pipeline_exp.submit(pipeline)\n",
|
||||
"pipeline_run.wait_for_completion(show_output=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"After the run is complete, we can download the results. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"metrics_output = pipeline_run.get_pipeline_output(\"results\")\n",
|
||||
"metrics_output.download(\"backtest_metrics\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## View metrics<a id=\"Metrics\"></a>\n",
|
||||
"To distinguish these metrics from the model backtest, which we will obtain in the next section, we will move the directory with metrics out of the backtest_metrics and will remove the parent folder. We will create the utility function for that."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def copy_scoring_directory(new_name):\n",
|
||||
" scores_path = os.path.join(\"backtest_metrics\", \"azureml\")\n",
|
||||
" directory_list = [os.path.join(scores_path, d) for d in os.listdir(scores_path)]\n",
|
||||
" latest_file = max(directory_list, key=os.path.getctime)\n",
|
||||
" print(\n",
|
||||
" f\"The output directory {latest_file} was created on {pd.Timestamp(os.path.getctime(latest_file), unit='s')} GMT.\"\n",
|
||||
" )\n",
|
||||
" shutil.move(os.path.join(latest_file, \"results\"), new_name)\n",
|
||||
" shutil.rmtree(\"backtest_metrics\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Move the directory and list its contents."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"copy_scoring_directory(\"automl_backtest\")\n",
|
||||
"pd.DataFrame({\"File\": os.listdir(\"automl_backtest\")})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The directory contains a set of files with results:\n",
|
||||
"- forecast.csv contains forecasts for all backtest iterations. The backtest_iteration column contains iteration identifier with the last training date as a suffix\n",
|
||||
"- scores.csv contains all metrics. If data set contains several time series, the metrics are given for all combinations of time series id and iterations, as well as scores for all iterations and time series id are marked as \"all_sets\"\n",
|
||||
"- plots_fcst_vs_actual.pdf contains the predictions vs forecast plots for each iteration and time series.\n",
|
||||
"\n",
|
||||
"For demonstration purposes we will display the table of metrics for one of the time series with ID \"ts0\". Again, we will create the utility function, which will be re used in model backtesting."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def get_metrics_for_ts(all_metrics, ts):\n",
|
||||
" \"\"\"\n",
|
||||
" Get the metrics for the time series with ID ts and return it as pandas data frame.\n",
|
||||
"\n",
|
||||
" :param all_metrics: The table with all the metrics.\n",
|
||||
" :param ts: The ID of a time series of interest.\n",
|
||||
" :return: The pandas DataFrame with metrics for one time series.\n",
|
||||
" \"\"\"\n",
|
||||
" results_df = None\n",
|
||||
" for ts_id, one_series in all_metrics.groupby(\"time_series_id\"):\n",
|
||||
" if not ts_id.startswith(ts):\n",
|
||||
" continue\n",
|
||||
" iteration = ts_id.split(\"|\")[-1]\n",
|
||||
" df = one_series[[\"metric_name\", \"metric\"]]\n",
|
||||
" df.rename({\"metric\": iteration}, axis=1, inplace=True)\n",
|
||||
" df.set_index(\"metric_name\", inplace=True)\n",
|
||||
" if results_df is None:\n",
|
||||
" results_df = df\n",
|
||||
" else:\n",
|
||||
" results_df = results_df.merge(\n",
|
||||
" df, how=\"inner\", left_index=True, right_index=True\n",
|
||||
" )\n",
|
||||
" results_df.sort_index(axis=1, inplace=True)\n",
|
||||
" return results_df\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"metrics_df = pd.read_csv(os.path.join(\"automl_backtest\", \"scores.csv\"))\n",
|
||||
"ts_id = \"ts0\"\n",
|
||||
"get_metrics_for_ts(metrics_df, ts_id)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Forecast vs actuals plots."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from IPython.display import IFrame\n",
|
||||
"\n",
|
||||
"IFrame(\"./automl_backtest/plots_fcst_vs_actual.pdf\", width=800, height=300)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# <font color='blue'>Backtest the best model</font> <a id=\"backtest_model\"></a>\n",
|
||||
"\n",
|
||||
"For model backtesting we will use the same parameters we used to backtest AutoML. All the models, we have obtained in the previous run were registered in our workspace. To identify the model, each was assigned a tag with the last trainig date."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model_list = Model.list(ws, tags={\"experiment\": \"automl-backtesting\"})\n",
|
||||
"model_data = {\"name\": [], \"last_training_date\": []}\n",
|
||||
"for model in model_list:\n",
|
||||
" if (\n",
|
||||
" \"last_training_date\" not in model.tags\n",
|
||||
" or \"model_uid\" not in model.tags\n",
|
||||
" or model.tags[\"model_uid\"] != model_uid\n",
|
||||
" ):\n",
|
||||
" continue\n",
|
||||
" model_data[\"name\"].append(model.name)\n",
|
||||
" model_data[\"last_training_date\"].append(\n",
|
||||
" pd.Timestamp(model.tags[\"last_training_date\"])\n",
|
||||
" )\n",
|
||||
"df_models = pd.DataFrame(model_data)\n",
|
||||
"df_models.sort_values([\"last_training_date\"], inplace=True)\n",
|
||||
"df_models.reset_index(inplace=True, drop=True)\n",
|
||||
"df_models"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We will backtest the model trained on the most recet data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model_name = df_models[\"name\"].iloc[-1]\n",
|
||||
"model_name"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Retrain the models.\n",
|
||||
"Assemble the pipeline, which will retrain the best model from AutoML run on historical data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pipeline_exp = Experiment(ws, \"model-backtesting\")\n",
|
||||
"\n",
|
||||
"pipeline = get_backtest_pipeline(\n",
|
||||
" experiment=pipeline_exp,\n",
|
||||
" dataset=train_data,\n",
|
||||
" # The STANDARD_DS12_V2 has 4 vCPU per node, we will set 2 process per node to be safe.\n",
|
||||
" process_per_node=2,\n",
|
||||
" # The maximum number of nodes for our compute is 6.\n",
|
||||
" node_count=6,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" automl_settings=automl_settings,\n",
|
||||
" step_size=BACKTESTING_PERIOD,\n",
|
||||
" step_number=NUMBER_OF_BACKTESTS,\n",
|
||||
" model_name=model_name,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Launch the backtesting pipeline."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pipeline_run = pipeline_exp.submit(pipeline)\n",
|
||||
"pipeline_run.wait_for_completion(show_output=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The metrics are stored in the pipeline output named \"score\". The next code will download the table with metrics."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"metrics_output = pipeline_run.get_pipeline_output(\"results\")\n",
|
||||
"metrics_output.download(\"backtest_metrics\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Again, we will copy the data files from the downloaded directory, but in this case we will call the folder \"model_backtest\"; it will contain the same files as the one for AutoML backtesting."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"copy_scoring_directory(\"model_backtest\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Finally, we will display the metrics."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model_metrics_df = pd.read_csv(os.path.join(\"model_backtest\", \"scores.csv\"))\n",
|
||||
"get_metrics_for_ts(model_metrics_df, ts_id)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Forecast vs actuals plots."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from IPython.display import IFrame\n",
|
||||
"\n",
|
||||
"IFrame(\"./model_backtest/plots_fcst_vs_actual.pdf\", width=800, height=300)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "jialiu"
|
||||
}
|
||||
],
|
||||
"category": "tutorial",
|
||||
"compute": [
|
||||
"Remote"
|
||||
],
|
||||
"datasets": [
|
||||
"None"
|
||||
],
|
||||
"deployment": [
|
||||
"None"
|
||||
],
|
||||
"exclude_from_index": false,
|
||||
"framework": [
|
||||
"Azure ML AutoML"
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -0,0 +1,4 @@
|
||||
name: auto-ml-forecasting-backtest-single-model
|
||||
dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
@@ -0,0 +1,166 @@
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
import os
|
||||
|
||||
import azureml.train.automl.runtime._hts.hts_runtime_utilities as hru
|
||||
|
||||
from azureml._restclient.jasmine_client import JasmineClient
|
||||
from azureml.contrib.automl.pipeline.steps import utilities
|
||||
from azureml.core import RunConfiguration
|
||||
from azureml.core.compute import ComputeTarget
|
||||
from azureml.core.experiment import Experiment
|
||||
from azureml.data import LinkTabularOutputDatasetConfig, TabularDataset
|
||||
from azureml.pipeline.core import Pipeline, PipelineData, PipelineParameter
|
||||
from azureml.pipeline.steps import ParallelRunConfig, ParallelRunStep, PythonScriptStep
|
||||
from azureml.train.automl.constants import Scenarios
|
||||
from azureml.data.dataset_consumption_config import DatasetConsumptionConfig
|
||||
|
||||
|
||||
PROJECT_FOLDER = "assets"
|
||||
SETTINGS_FILE = "automl_settings.json"
|
||||
|
||||
|
||||
def get_backtest_pipeline(
|
||||
experiment: Experiment,
|
||||
dataset: TabularDataset,
|
||||
process_per_node: int,
|
||||
node_count: int,
|
||||
compute_target: ComputeTarget,
|
||||
automl_settings: Dict[str, Any],
|
||||
step_size: int,
|
||||
step_number: int,
|
||||
model_name: Optional[str] = None,
|
||||
model_uid: Optional[str] = None,
|
||||
) -> Pipeline:
|
||||
"""
|
||||
:param experiment: The experiment used to run the pipeline.
|
||||
:param dataset: Tabular data set to be used for model training.
|
||||
:param process_per_node: The number of processes per node. Generally it should be the number of cores
|
||||
on the node divided by two.
|
||||
:param node_count: The number of nodes to be used.
|
||||
:param compute_target: The compute target to be used to run the pipeline.
|
||||
:param model_name: The name of a model to be back tested.
|
||||
:param automl_settings: The dictionary with automl settings.
|
||||
:param step_size: The number of periods to step back in backtesting.
|
||||
:param step_number: The number of backtesting iterations.
|
||||
:param model_uid: The uid to mark models from this run of the experiment.
|
||||
:return: The pipeline to be used for model retraining.
|
||||
**Note:** The output will be uploaded in the pipeline output
|
||||
called 'score'.
|
||||
"""
|
||||
jasmine_client = JasmineClient(
|
||||
service_context=experiment.workspace.service_context,
|
||||
experiment_name=experiment.name,
|
||||
experiment_id=experiment.id,
|
||||
)
|
||||
env = jasmine_client.get_curated_environment(
|
||||
scenario=Scenarios.AUTOML,
|
||||
enable_dnn=False,
|
||||
enable_gpu=False,
|
||||
compute=compute_target,
|
||||
compute_sku=experiment.workspace.compute_targets.get(
|
||||
compute_target.name
|
||||
).vm_size,
|
||||
)
|
||||
data_results = PipelineData(
|
||||
name="results", datastore=None, pipeline_output_name="results"
|
||||
)
|
||||
############################################################
|
||||
# Split the data set using python script.
|
||||
############################################################
|
||||
run_config = RunConfiguration()
|
||||
run_config.docker.use_docker = True
|
||||
run_config.environment = env
|
||||
|
||||
split_data = PipelineData(name="split_data_output", datastore=None).as_dataset()
|
||||
split_step = PythonScriptStep(
|
||||
name="split_data_for_backtest",
|
||||
script_name="data_split.py",
|
||||
inputs=[dataset.as_named_input("training_data")],
|
||||
outputs=[split_data],
|
||||
source_directory=PROJECT_FOLDER,
|
||||
arguments=[
|
||||
"--step-size",
|
||||
step_size,
|
||||
"--step-number",
|
||||
step_number,
|
||||
"--time-column-name",
|
||||
automl_settings.get("time_column_name"),
|
||||
"--time-series-id-column-names",
|
||||
automl_settings.get("grain_column_names"),
|
||||
"--output-dir",
|
||||
split_data,
|
||||
],
|
||||
runconfig=run_config,
|
||||
compute_target=compute_target,
|
||||
allow_reuse=False,
|
||||
)
|
||||
############################################################
|
||||
# We will do the backtest the parallel run step.
|
||||
############################################################
|
||||
settings_path = os.path.join(PROJECT_FOLDER, SETTINGS_FILE)
|
||||
hru.dump_object_to_json(automl_settings, settings_path)
|
||||
mini_batch_size = PipelineParameter(name="batch_size_param", default_value=str(1))
|
||||
back_test_config = ParallelRunConfig(
|
||||
source_directory=PROJECT_FOLDER,
|
||||
entry_script="retrain_models.py",
|
||||
mini_batch_size=mini_batch_size,
|
||||
error_threshold=-1,
|
||||
output_action="append_row",
|
||||
append_row_file_name="outputs.txt",
|
||||
compute_target=compute_target,
|
||||
environment=env,
|
||||
process_count_per_node=process_per_node,
|
||||
run_invocation_timeout=3600,
|
||||
node_count=node_count,
|
||||
)
|
||||
forecasts = PipelineData(name="forecasts", datastore=None)
|
||||
if model_name:
|
||||
parallel_step_name = "{}-backtest".format(model_name.replace("_", "-"))
|
||||
else:
|
||||
parallel_step_name = "AutoML-backtest"
|
||||
|
||||
prs_args = [
|
||||
"--target_column_name",
|
||||
automl_settings.get("label_column_name"),
|
||||
"--output-dir",
|
||||
forecasts,
|
||||
]
|
||||
if model_name is not None:
|
||||
prs_args.append("--model-name")
|
||||
prs_args.append(model_name)
|
||||
if model_uid is not None:
|
||||
prs_args.append("--model-uid")
|
||||
prs_args.append(model_uid)
|
||||
backtest_prs = ParallelRunStep(
|
||||
name=parallel_step_name,
|
||||
parallel_run_config=back_test_config,
|
||||
arguments=prs_args,
|
||||
inputs=[split_data],
|
||||
output=forecasts,
|
||||
allow_reuse=False,
|
||||
)
|
||||
############################################################
|
||||
# Then we collect the output and return it as scores output.
|
||||
############################################################
|
||||
collection_step = PythonScriptStep(
|
||||
name="score",
|
||||
script_name="score.py",
|
||||
inputs=[forecasts.as_mount()],
|
||||
outputs=[data_results],
|
||||
source_directory=PROJECT_FOLDER,
|
||||
arguments=[
|
||||
"--forecasts",
|
||||
forecasts,
|
||||
"--output-dir",
|
||||
data_results,
|
||||
],
|
||||
runconfig=run_config,
|
||||
compute_target=compute_target,
|
||||
allow_reuse=False,
|
||||
)
|
||||
# Build and return the pipeline.
|
||||
return Pipeline(
|
||||
workspace=experiment.workspace,
|
||||
steps=[split_step, backtest_prs, collection_step],
|
||||
)
|
||||
@@ -1,20 +0,0 @@
|
||||
DATE,grain,BeerProduction
|
||||
2017-01-01,grain,9049
|
||||
2017-02-01,grain,10458
|
||||
2017-03-01,grain,12489
|
||||
2017-04-01,grain,11499
|
||||
2017-05-01,grain,13553
|
||||
2017-06-01,grain,14740
|
||||
2017-07-01,grain,11424
|
||||
2017-08-01,grain,13412
|
||||
2017-09-01,grain,11917
|
||||
2017-10-01,grain,12721
|
||||
2017-11-01,grain,13272
|
||||
2017-12-01,grain,14278
|
||||
2018-01-01,grain,9572
|
||||
2018-02-01,grain,10423
|
||||
2018-03-01,grain,12667
|
||||
2018-04-01,grain,11904
|
||||
2018-05-01,grain,14120
|
||||
2018-06-01,grain,14565
|
||||
2018-07-01,grain,12622
|
||||
|
@@ -1,301 +0,0 @@
|
||||
DATE,grain,BeerProduction
|
||||
1992-01-01,grain,3459
|
||||
1992-02-01,grain,3458
|
||||
1992-03-01,grain,4002
|
||||
1992-04-01,grain,4564
|
||||
1992-05-01,grain,4221
|
||||
1992-06-01,grain,4529
|
||||
1992-07-01,grain,4466
|
||||
1992-08-01,grain,4137
|
||||
1992-09-01,grain,4126
|
||||
1992-10-01,grain,4259
|
||||
1992-11-01,grain,4240
|
||||
1992-12-01,grain,4936
|
||||
1993-01-01,grain,3031
|
||||
1993-02-01,grain,3261
|
||||
1993-03-01,grain,4160
|
||||
1993-04-01,grain,4377
|
||||
1993-05-01,grain,4307
|
||||
1993-06-01,grain,4696
|
||||
1993-07-01,grain,4458
|
||||
1993-08-01,grain,4457
|
||||
1993-09-01,grain,4364
|
||||
1993-10-01,grain,4236
|
||||
1993-11-01,grain,4500
|
||||
1993-12-01,grain,4974
|
||||
1994-01-01,grain,3075
|
||||
1994-02-01,grain,3377
|
||||
1994-03-01,grain,4443
|
||||
1994-04-01,grain,4261
|
||||
1994-05-01,grain,4460
|
||||
1994-06-01,grain,4985
|
||||
1994-07-01,grain,4324
|
||||
1994-08-01,grain,4719
|
||||
1994-09-01,grain,4374
|
||||
1994-10-01,grain,4248
|
||||
1994-11-01,grain,4784
|
||||
1994-12-01,grain,4971
|
||||
1995-01-01,grain,3370
|
||||
1995-02-01,grain,3484
|
||||
1995-03-01,grain,4269
|
||||
1995-04-01,grain,3994
|
||||
1995-05-01,grain,4715
|
||||
1995-06-01,grain,4974
|
||||
1995-07-01,grain,4223
|
||||
1995-08-01,grain,5000
|
||||
1995-09-01,grain,4235
|
||||
1995-10-01,grain,4554
|
||||
1995-11-01,grain,4851
|
||||
1995-12-01,grain,4826
|
||||
1996-01-01,grain,3699
|
||||
1996-02-01,grain,3983
|
||||
1996-03-01,grain,4262
|
||||
1996-04-01,grain,4619
|
||||
1996-05-01,grain,5219
|
||||
1996-06-01,grain,4836
|
||||
1996-07-01,grain,4941
|
||||
1996-08-01,grain,5062
|
||||
1996-09-01,grain,4365
|
||||
1996-10-01,grain,5012
|
||||
1996-11-01,grain,4850
|
||||
1996-12-01,grain,5097
|
||||
1997-01-01,grain,3758
|
||||
1997-02-01,grain,3825
|
||||
1997-03-01,grain,4454
|
||||
1997-04-01,grain,4635
|
||||
1997-05-01,grain,5210
|
||||
1997-06-01,grain,5057
|
||||
1997-07-01,grain,5231
|
||||
1997-08-01,grain,5034
|
||||
1997-09-01,grain,4970
|
||||
1997-10-01,grain,5342
|
||||
1997-11-01,grain,4831
|
||||
1997-12-01,grain,5965
|
||||
1998-01-01,grain,3796
|
||||
1998-02-01,grain,4019
|
||||
1998-03-01,grain,4898
|
||||
1998-04-01,grain,5090
|
||||
1998-05-01,grain,5237
|
||||
1998-06-01,grain,5447
|
||||
1998-07-01,grain,5435
|
||||
1998-08-01,grain,5107
|
||||
1998-09-01,grain,5515
|
||||
1998-10-01,grain,5583
|
||||
1998-11-01,grain,5346
|
||||
1998-12-01,grain,6286
|
||||
1999-01-01,grain,4032
|
||||
1999-02-01,grain,4435
|
||||
1999-03-01,grain,5479
|
||||
1999-04-01,grain,5483
|
||||
1999-05-01,grain,5587
|
||||
1999-06-01,grain,6176
|
||||
1999-07-01,grain,5621
|
||||
1999-08-01,grain,5889
|
||||
1999-09-01,grain,5828
|
||||
1999-10-01,grain,5849
|
||||
1999-11-01,grain,6180
|
||||
1999-12-01,grain,6771
|
||||
2000-01-01,grain,4243
|
||||
2000-02-01,grain,4952
|
||||
2000-03-01,grain,6008
|
||||
2000-04-01,grain,5353
|
||||
2000-05-01,grain,6435
|
||||
2000-06-01,grain,6673
|
||||
2000-07-01,grain,5636
|
||||
2000-08-01,grain,6630
|
||||
2000-09-01,grain,5887
|
||||
2000-10-01,grain,6322
|
||||
2000-11-01,grain,6520
|
||||
2000-12-01,grain,6678
|
||||
2001-01-01,grain,5082
|
||||
2001-02-01,grain,5216
|
||||
2001-03-01,grain,5893
|
||||
2001-04-01,grain,5894
|
||||
2001-05-01,grain,6799
|
||||
2001-06-01,grain,6667
|
||||
2001-07-01,grain,6374
|
||||
2001-08-01,grain,6840
|
||||
2001-09-01,grain,5575
|
||||
2001-10-01,grain,6545
|
||||
2001-11-01,grain,6789
|
||||
2001-12-01,grain,7180
|
||||
2002-01-01,grain,5117
|
||||
2002-02-01,grain,5442
|
||||
2002-03-01,grain,6337
|
||||
2002-04-01,grain,6525
|
||||
2002-05-01,grain,7216
|
||||
2002-06-01,grain,6761
|
||||
2002-07-01,grain,6958
|
||||
2002-08-01,grain,7070
|
||||
2002-09-01,grain,6148
|
||||
2002-10-01,grain,6924
|
||||
2002-11-01,grain,6716
|
||||
2002-12-01,grain,7975
|
||||
2003-01-01,grain,5326
|
||||
2003-02-01,grain,5609
|
||||
2003-03-01,grain,6414
|
||||
2003-04-01,grain,6741
|
||||
2003-05-01,grain,7144
|
||||
2003-06-01,grain,7133
|
||||
2003-07-01,grain,7568
|
||||
2003-08-01,grain,7266
|
||||
2003-09-01,grain,6634
|
||||
2003-10-01,grain,7626
|
||||
2003-11-01,grain,6843
|
||||
2003-12-01,grain,8540
|
||||
2004-01-01,grain,5629
|
||||
2004-02-01,grain,5898
|
||||
2004-03-01,grain,7045
|
||||
2004-04-01,grain,7094
|
||||
2004-05-01,grain,7333
|
||||
2004-06-01,grain,7918
|
||||
2004-07-01,grain,7289
|
||||
2004-08-01,grain,7396
|
||||
2004-09-01,grain,7259
|
||||
2004-10-01,grain,7268
|
||||
2004-11-01,grain,7731
|
||||
2004-12-01,grain,9058
|
||||
2005-01-01,grain,5557
|
||||
2005-02-01,grain,6237
|
||||
2005-03-01,grain,7723
|
||||
2005-04-01,grain,7262
|
||||
2005-05-01,grain,8241
|
||||
2005-06-01,grain,8757
|
||||
2005-07-01,grain,7352
|
||||
2005-08-01,grain,8496
|
||||
2005-09-01,grain,7741
|
||||
2005-10-01,grain,7710
|
||||
2005-11-01,grain,8247
|
||||
2005-12-01,grain,8902
|
||||
2006-01-01,grain,6066
|
||||
2006-02-01,grain,6590
|
||||
2006-03-01,grain,7923
|
||||
2006-04-01,grain,7335
|
||||
2006-05-01,grain,8843
|
||||
2006-06-01,grain,9327
|
||||
2006-07-01,grain,7792
|
||||
2006-08-01,grain,9156
|
||||
2006-09-01,grain,8037
|
||||
2006-10-01,grain,8640
|
||||
2006-11-01,grain,9128
|
||||
2006-12-01,grain,9545
|
||||
2007-01-01,grain,6627
|
||||
2007-02-01,grain,6743
|
||||
2007-03-01,grain,8195
|
||||
2007-04-01,grain,7828
|
||||
2007-05-01,grain,9570
|
||||
2007-06-01,grain,9484
|
||||
2007-07-01,grain,8608
|
||||
2007-08-01,grain,9543
|
||||
2007-09-01,grain,8123
|
||||
2007-10-01,grain,9649
|
||||
2007-11-01,grain,9390
|
||||
2007-12-01,grain,10065
|
||||
2008-01-01,grain,7093
|
||||
2008-02-01,grain,7483
|
||||
2008-03-01,grain,8365
|
||||
2008-04-01,grain,8895
|
||||
2008-05-01,grain,9794
|
||||
2008-06-01,grain,9977
|
||||
2008-07-01,grain,9553
|
||||
2008-08-01,grain,9375
|
||||
2008-09-01,grain,9225
|
||||
2008-10-01,grain,9948
|
||||
2008-11-01,grain,8758
|
||||
2008-12-01,grain,10839
|
||||
2009-01-01,grain,7266
|
||||
2009-02-01,grain,7578
|
||||
2009-03-01,grain,8688
|
||||
2009-04-01,grain,9162
|
||||
2009-05-01,grain,9369
|
||||
2009-06-01,grain,10167
|
||||
2009-07-01,grain,9507
|
||||
2009-08-01,grain,8923
|
||||
2009-09-01,grain,9272
|
||||
2009-10-01,grain,9075
|
||||
2009-11-01,grain,8949
|
||||
2009-12-01,grain,10843
|
||||
2010-01-01,grain,6558
|
||||
2010-02-01,grain,7481
|
||||
2010-03-01,grain,9475
|
||||
2010-04-01,grain,9424
|
||||
2010-05-01,grain,9351
|
||||
2010-06-01,grain,10552
|
||||
2010-07-01,grain,9077
|
||||
2010-08-01,grain,9273
|
||||
2010-09-01,grain,9420
|
||||
2010-10-01,grain,9413
|
||||
2010-11-01,grain,9866
|
||||
2010-12-01,grain,11455
|
||||
2011-01-01,grain,6901
|
||||
2011-02-01,grain,8014
|
||||
2011-03-01,grain,9832
|
||||
2011-04-01,grain,9281
|
||||
2011-05-01,grain,9967
|
||||
2011-06-01,grain,11344
|
||||
2011-07-01,grain,9106
|
||||
2011-08-01,grain,10469
|
||||
2011-09-01,grain,10085
|
||||
2011-10-01,grain,9612
|
||||
2011-11-01,grain,10328
|
||||
2011-12-01,grain,11483
|
||||
2012-01-01,grain,7486
|
||||
2012-02-01,grain,8641
|
||||
2012-03-01,grain,9709
|
||||
2012-04-01,grain,9423
|
||||
2012-05-01,grain,11342
|
||||
2012-06-01,grain,11274
|
||||
2012-07-01,grain,9845
|
||||
2012-08-01,grain,11163
|
||||
2012-09-01,grain,9532
|
||||
2012-10-01,grain,10754
|
||||
2012-11-01,grain,10953
|
||||
2012-12-01,grain,11922
|
||||
2013-01-01,grain,8395
|
||||
2013-02-01,grain,8888
|
||||
2013-03-01,grain,10110
|
||||
2013-04-01,grain,10493
|
||||
2013-05-01,grain,12218
|
||||
2013-06-01,grain,11385
|
||||
2013-07-01,grain,11186
|
||||
2013-08-01,grain,11462
|
||||
2013-09-01,grain,10494
|
||||
2013-10-01,grain,11540
|
||||
2013-11-01,grain,11138
|
||||
2013-12-01,grain,12709
|
||||
2014-01-01,grain,8557
|
||||
2014-02-01,grain,9059
|
||||
2014-03-01,grain,10055
|
||||
2014-04-01,grain,10977
|
||||
2014-05-01,grain,11792
|
||||
2014-06-01,grain,11904
|
||||
2014-07-01,grain,10965
|
||||
2014-08-01,grain,10981
|
||||
2014-09-01,grain,10828
|
||||
2014-10-01,grain,11817
|
||||
2014-11-01,grain,10470
|
||||
2014-12-01,grain,13310
|
||||
2015-01-01,grain,8400
|
||||
2015-02-01,grain,9062
|
||||
2015-03-01,grain,10722
|
||||
2015-04-01,grain,11107
|
||||
2015-05-01,grain,11508
|
||||
2015-06-01,grain,12904
|
||||
2015-07-01,grain,11869
|
||||
2015-08-01,grain,11224
|
||||
2015-09-01,grain,12022
|
||||
2015-10-01,grain,11983
|
||||
2015-11-01,grain,11506
|
||||
2015-12-01,grain,14183
|
||||
2016-01-01,grain,8650
|
||||
2016-02-01,grain,10323
|
||||
2016-03-01,grain,12110
|
||||
2016-04-01,grain,11424
|
||||
2016-05-01,grain,12243
|
||||
2016-06-01,grain,13686
|
||||
2016-07-01,grain,10956
|
||||
2016-08-01,grain,12706
|
||||
2016-09-01,grain,12279
|
||||
2016-10-01,grain,11914
|
||||
2016-11-01,grain,13025
|
||||
2016-12-01,grain,14431
|
||||
|
@@ -1,4 +0,0 @@
|
||||
name: auto-ml-forecasting-beer-remote
|
||||
dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
@@ -1,138 +0,0 @@
|
||||
import pandas as pd
|
||||
from azureml.core import Environment
|
||||
from azureml.core.conda_dependencies import CondaDependencies
|
||||
from azureml.train.estimator import Estimator
|
||||
from azureml.core.run import Run
|
||||
from azureml.automl.core.shared import constants
|
||||
|
||||
|
||||
def split_fraction_by_grain(df, fraction, time_column_name,
|
||||
grain_column_names=None):
|
||||
if not grain_column_names:
|
||||
df['tmp_grain_column'] = 'grain'
|
||||
grain_column_names = ['tmp_grain_column']
|
||||
|
||||
"""Group df by grain and split on last n rows for each group."""
|
||||
df_grouped = (df.sort_values(time_column_name)
|
||||
.groupby(grain_column_names, group_keys=False))
|
||||
|
||||
df_head = df_grouped.apply(lambda dfg: dfg.iloc[:-int(len(dfg) *
|
||||
fraction)] if fraction > 0 else dfg)
|
||||
|
||||
df_tail = df_grouped.apply(lambda dfg: dfg.iloc[-int(len(dfg) *
|
||||
fraction):] if fraction > 0 else dfg[:0])
|
||||
|
||||
if 'tmp_grain_column' in grain_column_names:
|
||||
for df2 in (df, df_head, df_tail):
|
||||
df2.drop('tmp_grain_column', axis=1, inplace=True)
|
||||
|
||||
grain_column_names.remove('tmp_grain_column')
|
||||
|
||||
return df_head, df_tail
|
||||
|
||||
|
||||
def split_full_for_forecasting(df, time_column_name,
|
||||
grain_column_names=None, test_split=0.2):
|
||||
index_name = df.index.name
|
||||
|
||||
# Assumes that there isn't already a column called tmpindex
|
||||
|
||||
df['tmpindex'] = df.index
|
||||
|
||||
train_df, test_df = split_fraction_by_grain(
|
||||
df, test_split, time_column_name, grain_column_names)
|
||||
|
||||
train_df = train_df.set_index('tmpindex')
|
||||
train_df.index.name = index_name
|
||||
|
||||
test_df = test_df.set_index('tmpindex')
|
||||
test_df.index.name = index_name
|
||||
|
||||
df.drop('tmpindex', axis=1, inplace=True)
|
||||
|
||||
return train_df, test_df
|
||||
|
||||
|
||||
def get_result_df(remote_run):
|
||||
children = list(remote_run.get_children(recursive=True))
|
||||
summary_df = pd.DataFrame(index=['run_id', 'run_algorithm',
|
||||
'primary_metric', 'Score'])
|
||||
goal_minimize = False
|
||||
for run in children:
|
||||
if run.get_status().lower() == constants.RunState.COMPLETE_RUN \
|
||||
and 'run_algorithm' in run.properties and 'score' in run.properties:
|
||||
# We only count in the completed child runs.
|
||||
summary_df[run.id] = [run.id, run.properties['run_algorithm'],
|
||||
run.properties['primary_metric'],
|
||||
float(run.properties['score'])]
|
||||
if ('goal' in run.properties):
|
||||
goal_minimize = run.properties['goal'].split('_')[-1] == 'min'
|
||||
|
||||
summary_df = summary_df.T.sort_values(
|
||||
'Score',
|
||||
ascending=goal_minimize).drop_duplicates(['run_algorithm'])
|
||||
summary_df = summary_df.set_index('run_algorithm')
|
||||
return summary_df
|
||||
|
||||
|
||||
def run_inference(test_experiment, compute_target, script_folder, train_run,
|
||||
test_dataset, lookback_dataset, max_horizon,
|
||||
target_column_name, time_column_name, freq):
|
||||
model_base_name = 'model.pkl'
|
||||
if 'model_data_location' in train_run.properties:
|
||||
model_location = train_run.properties['model_data_location']
|
||||
_, model_base_name = model_location.rsplit('/', 1)
|
||||
train_run.download_file('outputs/{}'.format(model_base_name), 'inference/{}'.format(model_base_name))
|
||||
train_run.download_file('outputs/conda_env_v_1_0_0.yml', 'inference/condafile.yml')
|
||||
|
||||
inference_env = Environment("myenv")
|
||||
inference_env.docker.enabled = True
|
||||
inference_env.python.conda_dependencies = CondaDependencies(
|
||||
conda_dependencies_file_path='inference/condafile.yml')
|
||||
|
||||
est = Estimator(source_directory=script_folder,
|
||||
entry_script='infer.py',
|
||||
script_params={
|
||||
'--max_horizon': max_horizon,
|
||||
'--target_column_name': target_column_name,
|
||||
'--time_column_name': time_column_name,
|
||||
'--frequency': freq,
|
||||
'--model_path': model_base_name
|
||||
},
|
||||
inputs=[test_dataset.as_named_input('test_data'),
|
||||
lookback_dataset.as_named_input('lookback_data')],
|
||||
compute_target=compute_target,
|
||||
environment_definition=inference_env)
|
||||
|
||||
run = test_experiment.submit(
|
||||
est, tags={
|
||||
'training_run_id': train_run.id,
|
||||
'run_algorithm': train_run.properties['run_algorithm'],
|
||||
'valid_score': train_run.properties['score'],
|
||||
'primary_metric': train_run.properties['primary_metric']
|
||||
})
|
||||
|
||||
run.log("run_algorithm", run.tags['run_algorithm'])
|
||||
return run
|
||||
|
||||
|
||||
def run_multiple_inferences(summary_df, train_experiment, test_experiment,
|
||||
compute_target, script_folder, test_dataset,
|
||||
lookback_dataset, max_horizon, target_column_name,
|
||||
time_column_name, freq):
|
||||
for run_name, run_summary in summary_df.iterrows():
|
||||
print(run_name)
|
||||
print(run_summary)
|
||||
run_id = run_summary.run_id
|
||||
train_run = Run(train_experiment, run_id)
|
||||
|
||||
test_run = run_inference(
|
||||
test_experiment, compute_target, script_folder, train_run,
|
||||
test_dataset, lookback_dataset, max_horizon, target_column_name,
|
||||
time_column_name, freq)
|
||||
|
||||
print(test_run)
|
||||
summary_df.loc[summary_df.run_id == run_id,
|
||||
'test_run_id'] = test_run.id
|
||||
|
||||
return summary_df
|
||||
@@ -64,22 +64,23 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"import pandas as pd\n",
|
||||
"import numpy as np\n",
|
||||
"import json\n",
|
||||
"import logging\n",
|
||||
"\n",
|
||||
"from azureml.core import Workspace, Experiment, Dataset\n",
|
||||
"from azureml.train.automl import AutoMLConfig\n",
|
||||
"from datetime import datetime\n",
|
||||
"from azureml.automl.core.featurization import FeaturizationConfig"
|
||||
"\n",
|
||||
"import azureml.core\n",
|
||||
"import numpy as np\n",
|
||||
"import pandas as pd\n",
|
||||
"from azureml.automl.core.featurization import FeaturizationConfig\n",
|
||||
"from azureml.core import Dataset, Experiment, Workspace\n",
|
||||
"from azureml.train.automl import AutoMLConfig"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||
"This notebook is compatible with Azure ML SDK version 1.35.0 or later."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -88,7 +89,6 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -108,19 +108,20 @@
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# choose a name for the run history container in the workspace\n",
|
||||
"experiment_name = 'automl-bikeshareforecasting'\n",
|
||||
"experiment_name = \"automl-bikeshareforecasting\"\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace'] = ws.name\n",
|
||||
"output['SKU'] = ws.sku\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Run History Name'] = experiment_name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||
"output[\"Workspace\"] = ws.name\n",
|
||||
"output[\"SKU\"] = ws.sku\n",
|
||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||
"output[\"Location\"] = ws.location\n",
|
||||
"output[\"Run History Name\"] = experiment_name\n",
|
||||
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
@@ -153,10 +154,11 @@
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
||||
" print('Found existing cluster, use it.')\n",
|
||||
" print(\"Found existing cluster, use it.\")\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
|
||||
" max_nodes=4)\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||
" vm_size=\"STANDARD_DS12_V2\", max_nodes=4\n",
|
||||
" )\n",
|
||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
"compute_target.wait_for_completion(show_output=True)"
|
||||
@@ -178,7 +180,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"datastore = ws.get_default_datastore()\n",
|
||||
"datastore.upload_files(files = ['./bike-no.csv'], target_path = 'dataset/', overwrite = True,show_progress = True)"
|
||||
"datastore.upload_files(\n",
|
||||
" files=[\"./bike-no.csv\"], target_path=\"dataset/\", overwrite=True, show_progress=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -198,8 +202,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"target_column_name = 'cnt'\n",
|
||||
"time_column_name = 'date'"
|
||||
"target_column_name = \"cnt\"\n",
|
||||
"time_column_name = \"date\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -208,10 +212,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'dataset/bike-no.csv')]).with_timestamp_columns(fine_grain_timestamp=time_column_name) \n",
|
||||
"dataset = Dataset.Tabular.from_delimited_files(\n",
|
||||
" path=[(datastore, \"dataset/bike-no.csv\")]\n",
|
||||
").with_timestamp_columns(fine_grain_timestamp=time_column_name)\n",
|
||||
"\n",
|
||||
"# Drop the columns 'casual' and 'registered' as these columns are a breakdown of the total and therefore a leak.\n",
|
||||
"dataset = dataset.drop_columns(columns=['casual', 'registered'])\n",
|
||||
"dataset = dataset.drop_columns(columns=[\"casual\", \"registered\"])\n",
|
||||
"\n",
|
||||
"dataset.take(5).to_pandas_dataframe().reset_index(drop=True)"
|
||||
]
|
||||
@@ -320,7 +326,7 @@
|
||||
"source": [
|
||||
"featurization_config = FeaturizationConfig()\n",
|
||||
"# Force the target column, to be integer type.\n",
|
||||
"featurization_config.add_prediction_transform_type('Integer')"
|
||||
"featurization_config.add_prediction_transform_type(\"Integer\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -337,18 +343,20 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
|
||||
"\n",
|
||||
"forecasting_parameters = ForecastingParameters(\n",
|
||||
" time_column_name=time_column_name,\n",
|
||||
" forecast_horizon=forecast_horizon,\n",
|
||||
" country_or_region_for_holidays='US', # set country_or_region will trigger holiday featurizer\n",
|
||||
" target_lags='auto', # use heuristic based lag setting\n",
|
||||
" freq='D' # Set the forecast frequency to be daily\n",
|
||||
" country_or_region_for_holidays=\"US\", # set country_or_region will trigger holiday featurizer\n",
|
||||
" target_lags=\"auto\", # use heuristic based lag setting\n",
|
||||
" freq=\"D\", # Set the forecast frequency to be daily\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task='forecasting', \n",
|
||||
" primary_metric='normalized_root_mean_squared_error',\n",
|
||||
"automl_config = AutoMLConfig(\n",
|
||||
" task=\"forecasting\",\n",
|
||||
" primary_metric=\"normalized_root_mean_squared_error\",\n",
|
||||
" featurization=featurization_config,\n",
|
||||
" blocked_models = ['ExtremeRandomTrees'], \n",
|
||||
" blocked_models=[\"ExtremeRandomTrees\"],\n",
|
||||
" experiment_timeout_hours=0.3,\n",
|
||||
" training_data=train,\n",
|
||||
" label_column_name=target_column_name,\n",
|
||||
@@ -358,7 +366,8 @@
|
||||
" max_concurrent_iterations=4,\n",
|
||||
" max_cores_per_iteration=-1,\n",
|
||||
" verbosity=logging.INFO,\n",
|
||||
" forecasting_parameters=forecasting_parameters)"
|
||||
" forecasting_parameters=forecasting_parameters,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -390,8 +399,8 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Retrieve the Best Model\n",
|
||||
"Below we select the best model from all the training iterations using get_output method."
|
||||
"### Retrieve the Best Run details\n",
|
||||
"Below we retrieve the best Run object from among all the runs in the experiment."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -400,8 +409,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"best_run, fitted_model = remote_run.get_output()\n",
|
||||
"fitted_model.steps"
|
||||
"best_run = remote_run.get_best_child()\n",
|
||||
"best_run"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -410,7 +419,7 @@
|
||||
"source": [
|
||||
"## Featurization\n",
|
||||
"\n",
|
||||
"You can access the engineered feature names generated in time-series featurization. Note that a number of named holiday periods are represented. We recommend that you have at least one year of data when using this feature to ensure that all yearly holidays are captured in the training featurization."
|
||||
"We can look at the engineered feature names generated in time-series featurization via. the JSON file named 'engineered_feature_names.json' under the run outputs. Note that a number of named holiday periods are represented. We recommend that you have at least one year of data when using this feature to ensure that all yearly holidays are captured in the training featurization."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -419,7 +428,14 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"fitted_model.named_steps['timeseriestransformer'].get_engineered_feature_names()"
|
||||
"# Download the JSON file locally\n",
|
||||
"best_run.download_file(\n",
|
||||
" \"outputs/engineered_feature_names.json\", \"engineered_feature_names.json\"\n",
|
||||
")\n",
|
||||
"with open(\"engineered_feature_names.json\", \"r\") as f:\n",
|
||||
" records = json.load(f)\n",
|
||||
"\n",
|
||||
"records"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -443,10 +459,26 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Get the featurization summary as a list of JSON\n",
|
||||
"featurization_summary = fitted_model.named_steps['timeseriestransformer'].get_featurization_summary()\n",
|
||||
"# View the featurization summary as a pandas dataframe\n",
|
||||
"pd.DataFrame.from_records(featurization_summary)"
|
||||
"# Download the featurization summary JSON file locally\n",
|
||||
"best_run.download_file(\n",
|
||||
" \"outputs/featurization_summary.json\", \"featurization_summary.json\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Render the JSON as a pandas DataFrame\n",
|
||||
"with open(\"featurization_summary.json\", \"r\") as f:\n",
|
||||
" records = json.load(f)\n",
|
||||
"fs = pd.DataFrame.from_records(records)\n",
|
||||
"\n",
|
||||
"# View a summary of the featurization\n",
|
||||
"fs[\n",
|
||||
" [\n",
|
||||
" \"RawFeatureName\",\n",
|
||||
" \"TypeDetected\",\n",
|
||||
" \"Dropped\",\n",
|
||||
" \"EngineeredFeatureCount\",\n",
|
||||
" \"Transformations\",\n",
|
||||
" ]\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -491,9 +523,9 @@
|
||||
"import os\n",
|
||||
"import shutil\n",
|
||||
"\n",
|
||||
"script_folder = os.path.join(os.getcwd(), 'forecast')\n",
|
||||
"script_folder = os.path.join(os.getcwd(), \"forecast\")\n",
|
||||
"os.makedirs(script_folder, exist_ok=True)\n",
|
||||
"shutil.copy('forecasting_script.py', script_folder)"
|
||||
"shutil.copy(\"forecasting_script.py\", script_folder)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -511,7 +543,9 @@
|
||||
"source": [
|
||||
"from run_forecast import run_rolling_forecast\n",
|
||||
"\n",
|
||||
"remote_run = run_rolling_forecast(test_experiment, compute_target, best_run, test, target_column_name)\n",
|
||||
"remote_run = run_rolling_forecast(\n",
|
||||
" test_experiment, compute_target, best_run, test, target_column_name\n",
|
||||
")\n",
|
||||
"remote_run"
|
||||
]
|
||||
},
|
||||
@@ -538,8 +572,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run.download_file('outputs/predictions.csv', 'predictions.csv')\n",
|
||||
"df_all = pd.read_csv('predictions.csv')"
|
||||
"remote_run.download_file(\"outputs/predictions.csv\", \"predictions.csv\")\n",
|
||||
"df_all = pd.read_csv(\"predictions.csv\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -556,18 +590,23 @@
|
||||
"# use automl metrics module\n",
|
||||
"scores = scoring.score_regression(\n",
|
||||
" y_test=df_all[target_column_name],\n",
|
||||
" y_pred=df_all['predicted'],\n",
|
||||
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET))\n",
|
||||
" y_pred=df_all[\"predicted\"],\n",
|
||||
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(\"[Test data scores]\\n\")\n",
|
||||
"for key, value in scores.items():\n",
|
||||
" print('{}: {:.3f}'.format(key, value))\n",
|
||||
" print(\"{}: {:.3f}\".format(key, value))\n",
|
||||
"\n",
|
||||
"# Plot outputs\n",
|
||||
"%matplotlib inline\n",
|
||||
"test_pred = plt.scatter(df_all[target_column_name], df_all['predicted'], color='b')\n",
|
||||
"test_test = plt.scatter(df_all[target_column_name], df_all[target_column_name], color='g')\n",
|
||||
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n",
|
||||
"test_pred = plt.scatter(df_all[target_column_name], df_all[\"predicted\"], color=\"b\")\n",
|
||||
"test_test = plt.scatter(\n",
|
||||
" df_all[target_column_name], df_all[target_column_name], color=\"g\"\n",
|
||||
")\n",
|
||||
"plt.legend(\n",
|
||||
" (test_pred, test_test), (\"prediction\", \"truth\"), loc=\"upper left\", fontsize=8\n",
|
||||
")\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
@@ -588,10 +627,18 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from metrics_helper import MAPE, APE\n",
|
||||
"df_all.groupby('horizon_origin').apply(\n",
|
||||
" lambda df: pd.Series({'MAPE': MAPE(df[target_column_name], df['predicted']),\n",
|
||||
" 'RMSE': np.sqrt(mean_squared_error(df[target_column_name], df['predicted'])),\n",
|
||||
" 'MAE': mean_absolute_error(df[target_column_name], df['predicted'])}))"
|
||||
"\n",
|
||||
"df_all.groupby(\"horizon_origin\").apply(\n",
|
||||
" lambda df: pd.Series(\n",
|
||||
" {\n",
|
||||
" \"MAPE\": MAPE(df[target_column_name], df[\"predicted\"]),\n",
|
||||
" \"RMSE\": np.sqrt(\n",
|
||||
" mean_squared_error(df[target_column_name], df[\"predicted\"])\n",
|
||||
" ),\n",
|
||||
" \"MAE\": mean_absolute_error(df[target_column_name], df[\"predicted\"]),\n",
|
||||
" }\n",
|
||||
" )\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -607,15 +654,18 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"df_all_APE = df_all.assign(APE=APE(df_all[target_column_name], df_all['predicted']))\n",
|
||||
"APEs = [df_all_APE[df_all['horizon_origin'] == h].APE.values for h in range(1, forecast_horizon + 1)]\n",
|
||||
"df_all_APE = df_all.assign(APE=APE(df_all[target_column_name], df_all[\"predicted\"]))\n",
|
||||
"APEs = [\n",
|
||||
" df_all_APE[df_all[\"horizon_origin\"] == h].APE.values\n",
|
||||
" for h in range(1, forecast_horizon + 1)\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"%matplotlib inline\n",
|
||||
"plt.boxplot(APEs)\n",
|
||||
"plt.yscale('log')\n",
|
||||
"plt.xlabel('horizon')\n",
|
||||
"plt.ylabel('APE (%)')\n",
|
||||
"plt.title('Absolute Percentage Errors by Forecast Horizon')\n",
|
||||
"plt.yscale(\"log\")\n",
|
||||
"plt.xlabel(\"horizon\")\n",
|
||||
"plt.ylabel(\"APE (%)\")\n",
|
||||
"plt.title(\"Absolute Percentage Errors by Forecast Horizon\")\n",
|
||||
"\n",
|
||||
"plt.show()"
|
||||
]
|
||||
|
||||
@@ -4,11 +4,14 @@ from sklearn.externals import joblib
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'--target_column_name', type=str, dest='target_column_name',
|
||||
help='Target Column Name')
|
||||
"--target_column_name",
|
||||
type=str,
|
||||
dest="target_column_name",
|
||||
help="Target Column Name",
|
||||
)
|
||||
parser.add_argument(
|
||||
'--test_dataset', type=str, dest='test_dataset',
|
||||
help='Test Dataset')
|
||||
"--test_dataset", type=str, dest="test_dataset", help="Test Dataset"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
target_column_name = args.target_column_name
|
||||
@@ -20,19 +23,30 @@ ws = run.experiment.workspace
|
||||
# get the input dataset by id
|
||||
test_dataset = Dataset.get_by_id(ws, id=test_dataset_id)
|
||||
|
||||
X_test_df = test_dataset.drop_columns(columns=[target_column_name]).to_pandas_dataframe().reset_index(drop=True)
|
||||
y_test_df = test_dataset.with_timestamp_columns(None).keep_columns(columns=[target_column_name]).to_pandas_dataframe()
|
||||
X_test_df = (
|
||||
test_dataset.drop_columns(columns=[target_column_name])
|
||||
.to_pandas_dataframe()
|
||||
.reset_index(drop=True)
|
||||
)
|
||||
y_test_df = (
|
||||
test_dataset.with_timestamp_columns(None)
|
||||
.keep_columns(columns=[target_column_name])
|
||||
.to_pandas_dataframe()
|
||||
)
|
||||
|
||||
fitted_model = joblib.load('model.pkl')
|
||||
fitted_model = joblib.load("model.pkl")
|
||||
|
||||
y_pred, X_trans = fitted_model.rolling_evaluation(X_test_df, y_test_df.values)
|
||||
|
||||
# Add predictions, actuals, and horizon relative to rolling origin to the test feature data
|
||||
assign_dict = {'horizon_origin': X_trans['horizon_origin'].values, 'predicted': y_pred,
|
||||
target_column_name: y_test_df[target_column_name].values}
|
||||
assign_dict = {
|
||||
"horizon_origin": X_trans["horizon_origin"].values,
|
||||
"predicted": y_pred,
|
||||
target_column_name: y_test_df[target_column_name].values,
|
||||
}
|
||||
df_all = X_test_df.assign(**assign_dict)
|
||||
|
||||
file_name = 'outputs/predictions.csv'
|
||||
file_name = "outputs/predictions.csv"
|
||||
export_csv = df_all.to_csv(file_name, header=True)
|
||||
|
||||
# Upload the predictions into artifacts
|
||||
|
||||
@@ -1,32 +1,40 @@
|
||||
from azureml.core import ScriptRunConfig
|
||||
|
||||
|
||||
def run_rolling_forecast(test_experiment, compute_target, train_run,
|
||||
test_dataset, target_column_name,
|
||||
inference_folder='./forecast'):
|
||||
train_run.download_file('outputs/model.pkl',
|
||||
inference_folder + '/model.pkl')
|
||||
def run_rolling_forecast(
|
||||
test_experiment,
|
||||
compute_target,
|
||||
train_run,
|
||||
test_dataset,
|
||||
target_column_name,
|
||||
inference_folder="./forecast",
|
||||
):
|
||||
train_run.download_file("outputs/model.pkl", inference_folder + "/model.pkl")
|
||||
|
||||
inference_env = train_run.get_environment()
|
||||
|
||||
config = ScriptRunConfig(source_directory=inference_folder,
|
||||
script='forecasting_script.py',
|
||||
arguments=['--target_column_name',
|
||||
config = ScriptRunConfig(
|
||||
source_directory=inference_folder,
|
||||
script="forecasting_script.py",
|
||||
arguments=[
|
||||
"--target_column_name",
|
||||
target_column_name,
|
||||
'--test_dataset',
|
||||
test_dataset.as_named_input(test_dataset.name)],
|
||||
"--test_dataset",
|
||||
test_dataset.as_named_input(test_dataset.name),
|
||||
],
|
||||
compute_target=compute_target,
|
||||
environment=inference_env)
|
||||
environment=inference_env,
|
||||
)
|
||||
|
||||
run = test_experiment.submit(config,
|
||||
tags={'training_run_id':
|
||||
train_run.id,
|
||||
'run_algorithm':
|
||||
train_run.properties['run_algorithm'],
|
||||
'valid_score':
|
||||
train_run.properties['score'],
|
||||
'primary_metric':
|
||||
train_run.properties['primary_metric']})
|
||||
run = test_experiment.submit(
|
||||
config,
|
||||
tags={
|
||||
"training_run_id": train_run.id,
|
||||
"run_algorithm": train_run.properties["run_algorithm"],
|
||||
"valid_score": train_run.properties["score"],
|
||||
"primary_metric": train_run.properties["primary_metric"],
|
||||
},
|
||||
)
|
||||
|
||||
run.log("run_algorithm", run.tags['run_algorithm'])
|
||||
run.log("run_algorithm", run.tags["run_algorithm"])
|
||||
return run
|
||||
|
||||
@@ -68,6 +68,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"import logging\n",
|
||||
"\n",
|
||||
"from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score\n",
|
||||
@@ -90,7 +91,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||
"This notebook is compatible with Azure ML SDK version 1.35.0 or later."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -99,7 +100,6 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -119,7 +119,7 @@
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# choose a name for the run history container in the workspace\n",
|
||||
"experiment_name = 'automl-forecasting-energydemand'\n",
|
||||
"experiment_name = \"automl-forecasting-energydemand\"\n",
|
||||
"\n",
|
||||
"# # project folder\n",
|
||||
"# project_folder = './sample_projects/automl-forecasting-energy-demand'\n",
|
||||
@@ -127,13 +127,14 @@
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace'] = ws.name\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Run History Name'] = experiment_name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||
"output[\"Workspace\"] = ws.name\n",
|
||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||
"output[\"Location\"] = ws.location\n",
|
||||
"output[\"Run History Name\"] = experiment_name\n",
|
||||
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
@@ -166,10 +167,11 @@
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
||||
" print('Found existing cluster, use it.')\n",
|
||||
" print(\"Found existing cluster, use it.\")\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
|
||||
" max_nodes=6)\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||
" vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
|
||||
" )\n",
|
||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
"compute_target.wait_for_completion(show_output=True)"
|
||||
@@ -204,8 +206,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"target_column_name = 'demand'\n",
|
||||
"time_column_name = 'timeStamp'"
|
||||
"target_column_name = \"demand\"\n",
|
||||
"time_column_name = \"timeStamp\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -214,7 +216,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"dataset = Dataset.Tabular.from_delimited_files(path = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/nyc_energy.csv\").with_timestamp_columns(fine_grain_timestamp=time_column_name) \n",
|
||||
"dataset = Dataset.Tabular.from_delimited_files(\n",
|
||||
" path=\"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/nyc_energy.csv\"\n",
|
||||
").with_timestamp_columns(fine_grain_timestamp=time_column_name)\n",
|
||||
"dataset.take(5).to_pandas_dataframe().reset_index(drop=True)"
|
||||
]
|
||||
},
|
||||
@@ -343,15 +347,17 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
|
||||
"\n",
|
||||
"forecasting_parameters = ForecastingParameters(\n",
|
||||
" time_column_name=time_column_name,\n",
|
||||
" forecast_horizon=forecast_horizon,\n",
|
||||
" freq='H' # Set the forecast frequency to be hourly\n",
|
||||
" freq=\"H\", # Set the forecast frequency to be hourly\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task='forecasting', \n",
|
||||
" primary_metric='normalized_root_mean_squared_error',\n",
|
||||
" blocked_models = ['ExtremeRandomTrees', 'AutoArima', 'Prophet'], \n",
|
||||
"automl_config = AutoMLConfig(\n",
|
||||
" task=\"forecasting\",\n",
|
||||
" primary_metric=\"normalized_root_mean_squared_error\",\n",
|
||||
" blocked_models=[\"ExtremeRandomTrees\", \"AutoArima\", \"Prophet\"],\n",
|
||||
" experiment_timeout_hours=0.3,\n",
|
||||
" training_data=train,\n",
|
||||
" label_column_name=target_column_name,\n",
|
||||
@@ -359,7 +365,8 @@
|
||||
" enable_early_stopping=True,\n",
|
||||
" n_cross_validations=3,\n",
|
||||
" verbosity=logging.INFO,\n",
|
||||
" forecasting_parameters=forecasting_parameters)"
|
||||
" forecasting_parameters=forecasting_parameters,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -392,8 +399,8 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Retrieve the Best Model\n",
|
||||
"Below we select the best model from all the training iterations using get_output method."
|
||||
"## Retrieve the Best Run details\n",
|
||||
"Below we retrieve the best Run object from among all the runs in the experiment."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -402,8 +409,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"best_run, fitted_model = remote_run.get_output()\n",
|
||||
"fitted_model.steps"
|
||||
"best_run = remote_run.get_best_child()\n",
|
||||
"best_run"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -411,7 +418,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Featurization\n",
|
||||
"You can access the engineered feature names generated in time-series featurization."
|
||||
"We can look at the engineered feature names generated in time-series featurization via. the JSON file named 'engineered_feature_names.json' under the run outputs."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -420,7 +427,14 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"fitted_model.named_steps['timeseriestransformer'].get_engineered_feature_names()"
|
||||
"# Download the JSON file locally\n",
|
||||
"best_run.download_file(\n",
|
||||
" \"outputs/engineered_feature_names.json\", \"engineered_feature_names.json\"\n",
|
||||
")\n",
|
||||
"with open(\"engineered_feature_names.json\", \"r\") as f:\n",
|
||||
" records = json.load(f)\n",
|
||||
"\n",
|
||||
"records"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -443,10 +457,26 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Get the featurization summary as a list of JSON\n",
|
||||
"featurization_summary = fitted_model.named_steps['timeseriestransformer'].get_featurization_summary()\n",
|
||||
"# View the featurization summary as a pandas dataframe\n",
|
||||
"pd.DataFrame.from_records(featurization_summary)"
|
||||
"# Download the featurization summary JSON file locally\n",
|
||||
"best_run.download_file(\n",
|
||||
" \"outputs/featurization_summary.json\", \"featurization_summary.json\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Render the JSON as a pandas DataFrame\n",
|
||||
"with open(\"featurization_summary.json\", \"r\") as f:\n",
|
||||
" records = json.load(f)\n",
|
||||
"fs = pd.DataFrame.from_records(records)\n",
|
||||
"\n",
|
||||
"# View a summary of the featurization\n",
|
||||
"fs[\n",
|
||||
" [\n",
|
||||
" \"RawFeatureName\",\n",
|
||||
" \"TypeDetected\",\n",
|
||||
" \"Dropped\",\n",
|
||||
" \"EngineeredFeatureCount\",\n",
|
||||
" \"Transformations\",\n",
|
||||
" ]\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -473,7 +503,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Retreiving forecasts from the model\n",
|
||||
"### Retrieving forecasts from the model\n",
|
||||
"We have created a function called `run_forecast` that submits the test data to the best model determined during the training run and retrieves forecasts. This function uses a helper script `forecasting_script` which is uploaded and expecuted on the remote compute."
|
||||
]
|
||||
},
|
||||
@@ -484,15 +514,18 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from run_forecast import run_remote_inference\n",
|
||||
"remote_run_infer = run_remote_inference(test_experiment=test_experiment,\n",
|
||||
"\n",
|
||||
"remote_run_infer = run_remote_inference(\n",
|
||||
" test_experiment=test_experiment,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" train_run=best_run,\n",
|
||||
" test_dataset=test,\n",
|
||||
" target_column_name=target_column_name)\n",
|
||||
" target_column_name=target_column_name,\n",
|
||||
")\n",
|
||||
"remote_run_infer.wait_for_completion(show_output=False)\n",
|
||||
"\n",
|
||||
"# download the inference output file to the local machine\n",
|
||||
"remote_run_infer.download_file('outputs/predictions.csv', 'predictions.csv')"
|
||||
"remote_run_infer.download_file(\"outputs/predictions.csv\", \"predictions.csv\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -510,7 +543,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# load forecast data frame\n",
|
||||
"fcst_df = pd.read_csv('predictions.csv', parse_dates=[time_column_name])\n",
|
||||
"fcst_df = pd.read_csv(\"predictions.csv\", parse_dates=[time_column_name])\n",
|
||||
"fcst_df.head()"
|
||||
]
|
||||
},
|
||||
@@ -527,18 +560,23 @@
|
||||
"# use automl metrics module\n",
|
||||
"scores = scoring.score_regression(\n",
|
||||
" y_test=fcst_df[target_column_name],\n",
|
||||
" y_pred=fcst_df['predicted'],\n",
|
||||
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET))\n",
|
||||
" y_pred=fcst_df[\"predicted\"],\n",
|
||||
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(\"[Test data scores]\\n\")\n",
|
||||
"for key, value in scores.items():\n",
|
||||
" print('{}: {:.3f}'.format(key, value))\n",
|
||||
" print(\"{}: {:.3f}\".format(key, value))\n",
|
||||
"\n",
|
||||
"# Plot outputs\n",
|
||||
"%matplotlib inline\n",
|
||||
"test_pred = plt.scatter(fcst_df[target_column_name], fcst_df['predicted'], color='b')\n",
|
||||
"test_test = plt.scatter(fcst_df[target_column_name], fcst_df[target_column_name], color='g')\n",
|
||||
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n",
|
||||
"test_pred = plt.scatter(fcst_df[target_column_name], fcst_df[\"predicted\"], color=\"b\")\n",
|
||||
"test_test = plt.scatter(\n",
|
||||
" fcst_df[target_column_name], fcst_df[target_column_name], color=\"g\"\n",
|
||||
")\n",
|
||||
"plt.legend(\n",
|
||||
" (test_pred, test_test), (\"prediction\", \"truth\"), loc=\"upper left\", fontsize=8\n",
|
||||
")\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
@@ -567,13 +605,24 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"advanced_forecasting_parameters = ForecastingParameters(\n",
|
||||
" time_column_name=time_column_name, forecast_horizon=forecast_horizon,\n",
|
||||
" target_lags=12, target_rolling_window_size=4\n",
|
||||
" time_column_name=time_column_name,\n",
|
||||
" forecast_horizon=forecast_horizon,\n",
|
||||
" target_lags=12,\n",
|
||||
" target_rolling_window_size=4,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task='forecasting', \n",
|
||||
" primary_metric='normalized_root_mean_squared_error',\n",
|
||||
" blocked_models = ['ElasticNet','ExtremeRandomTrees','GradientBoosting','XGBoostRegressor','ExtremeRandomTrees', 'AutoArima', 'Prophet'], #These models are blocked for tutorial purposes, remove this for real use cases. \n",
|
||||
"automl_config = AutoMLConfig(\n",
|
||||
" task=\"forecasting\",\n",
|
||||
" primary_metric=\"normalized_root_mean_squared_error\",\n",
|
||||
" blocked_models=[\n",
|
||||
" \"ElasticNet\",\n",
|
||||
" \"ExtremeRandomTrees\",\n",
|
||||
" \"GradientBoosting\",\n",
|
||||
" \"XGBoostRegressor\",\n",
|
||||
" \"ExtremeRandomTrees\",\n",
|
||||
" \"AutoArima\",\n",
|
||||
" \"Prophet\",\n",
|
||||
" ], # These models are blocked for tutorial purposes, remove this for real use cases.\n",
|
||||
" experiment_timeout_hours=0.3,\n",
|
||||
" training_data=train,\n",
|
||||
" label_column_name=target_column_name,\n",
|
||||
@@ -581,7 +630,8 @@
|
||||
" enable_early_stopping=True,\n",
|
||||
" n_cross_validations=3,\n",
|
||||
" verbosity=logging.INFO,\n",
|
||||
" forecasting_parameters=advanced_forecasting_parameters)"
|
||||
" forecasting_parameters=advanced_forecasting_parameters,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -613,7 +663,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Retrieve the Best Model"
|
||||
"### Retrieve the Best Run details"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -622,7 +672,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"best_run_lags, fitted_model_lags = advanced_remote_run.get_output()"
|
||||
"best_run_lags = remote_run.get_best_child()\n",
|
||||
"best_run_lags"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -640,16 +691,20 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"test_experiment_advanced = Experiment(ws, experiment_name + \"_inference_advanced\")\n",
|
||||
"advanced_remote_run_infer = run_remote_inference(test_experiment=test_experiment_advanced,\n",
|
||||
"advanced_remote_run_infer = run_remote_inference(\n",
|
||||
" test_experiment=test_experiment_advanced,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" train_run=best_run_lags,\n",
|
||||
" test_dataset=test,\n",
|
||||
" target_column_name=target_column_name,\n",
|
||||
" inference_folder='./forecast_advanced')\n",
|
||||
" inference_folder=\"./forecast_advanced\",\n",
|
||||
")\n",
|
||||
"advanced_remote_run_infer.wait_for_completion(show_output=False)\n",
|
||||
"\n",
|
||||
"# download the inference output file to the local machine\n",
|
||||
"advanced_remote_run_infer.download_file('outputs/predictions.csv', 'predictions_advanced.csv')"
|
||||
"advanced_remote_run_infer.download_file(\n",
|
||||
" \"outputs/predictions.csv\", \"predictions_advanced.csv\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -658,7 +713,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"fcst_adv_df = pd.read_csv('predictions_advanced.csv', parse_dates=[time_column_name])\n",
|
||||
"fcst_adv_df = pd.read_csv(\"predictions_advanced.csv\", parse_dates=[time_column_name])\n",
|
||||
"fcst_adv_df.head()"
|
||||
]
|
||||
},
|
||||
@@ -675,18 +730,25 @@
|
||||
"# use automl metrics module\n",
|
||||
"scores = scoring.score_regression(\n",
|
||||
" y_test=fcst_adv_df[target_column_name],\n",
|
||||
" y_pred=fcst_adv_df['predicted'],\n",
|
||||
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET))\n",
|
||||
" y_pred=fcst_adv_df[\"predicted\"],\n",
|
||||
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(\"[Test data scores]\\n\")\n",
|
||||
"for key, value in scores.items():\n",
|
||||
" print('{}: {:.3f}'.format(key, value))\n",
|
||||
" print(\"{}: {:.3f}\".format(key, value))\n",
|
||||
"\n",
|
||||
"# Plot outputs\n",
|
||||
"%matplotlib inline\n",
|
||||
"test_pred = plt.scatter(fcst_adv_df[target_column_name], fcst_adv_df['predicted'], color='b')\n",
|
||||
"test_test = plt.scatter(fcst_adv_df[target_column_name], fcst_adv_df[target_column_name], color='g')\n",
|
||||
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n",
|
||||
"test_pred = plt.scatter(\n",
|
||||
" fcst_adv_df[target_column_name], fcst_adv_df[\"predicted\"], color=\"b\"\n",
|
||||
")\n",
|
||||
"test_test = plt.scatter(\n",
|
||||
" fcst_adv_df[target_column_name], fcst_adv_df[target_column_name], color=\"g\"\n",
|
||||
")\n",
|
||||
"plt.legend(\n",
|
||||
" (test_pred, test_test), (\"prediction\", \"truth\"), loc=\"upper left\", fontsize=8\n",
|
||||
")\n",
|
||||
"plt.show()"
|
||||
]
|
||||
}
|
||||
|
||||
@@ -5,62 +5,20 @@ compute instance.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
from azureml.core import Dataset, Run
|
||||
from azureml.automl.core.shared.constants import TimeSeriesInternal
|
||||
from sklearn.externals import joblib
|
||||
from pandas.tseries.frequencies import to_offset
|
||||
|
||||
|
||||
def align_outputs(y_predicted, X_trans, X_test, y_test, target_column_name,
|
||||
predicted_column_name='predicted',
|
||||
horizon_colname='horizon_origin'):
|
||||
"""
|
||||
Demonstrates how to get the output aligned to the inputs
|
||||
using pandas indexes. Helps understand what happened if
|
||||
the output's shape differs from the input shape, or if
|
||||
the data got re-sorted by time and grain during forecasting.
|
||||
|
||||
Typical causes of misalignment are:
|
||||
* we predicted some periods that were missing in actuals -> drop from eval
|
||||
* model was asked to predict past max_horizon -> increase max horizon
|
||||
* data at start of X_test was needed for lags -> provide previous periods
|
||||
"""
|
||||
|
||||
if (horizon_colname in X_trans):
|
||||
df_fcst = pd.DataFrame({predicted_column_name: y_predicted,
|
||||
horizon_colname: X_trans[horizon_colname]})
|
||||
else:
|
||||
df_fcst = pd.DataFrame({predicted_column_name: y_predicted})
|
||||
|
||||
# y and X outputs are aligned by forecast() function contract
|
||||
df_fcst.index = X_trans.index
|
||||
|
||||
# align original X_test to y_test
|
||||
X_test_full = X_test.copy()
|
||||
X_test_full[target_column_name] = y_test
|
||||
|
||||
# X_test_full's index does not include origin, so reset for merge
|
||||
df_fcst.reset_index(inplace=True)
|
||||
X_test_full = X_test_full.reset_index().drop(columns='index')
|
||||
together = df_fcst.merge(X_test_full, how='right')
|
||||
|
||||
# drop rows where prediction or actuals are nan
|
||||
# happens because of missing actuals
|
||||
# or at edges of time due to lags/rolling windows
|
||||
clean = together[together[[target_column_name,
|
||||
predicted_column_name]].notnull().all(axis=1)]
|
||||
return(clean)
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'--target_column_name', type=str, dest='target_column_name',
|
||||
help='Target Column Name')
|
||||
"--target_column_name",
|
||||
type=str,
|
||||
dest="target_column_name",
|
||||
help="Target Column Name",
|
||||
)
|
||||
parser.add_argument(
|
||||
'--test_dataset', type=str, dest='test_dataset',
|
||||
help='Test Dataset')
|
||||
"--test_dataset", type=str, dest="test_dataset", help="Test Dataset"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
target_column_name = args.target_column_name
|
||||
@@ -76,14 +34,28 @@ X_test = test_dataset.to_pandas_dataframe().reset_index(drop=True)
|
||||
y_test = X_test.pop(target_column_name).values
|
||||
|
||||
# generate forecast
|
||||
fitted_model = joblib.load('model.pkl')
|
||||
y_predictions, X_trans = fitted_model.forecast(X_test)
|
||||
fitted_model = joblib.load("model.pkl")
|
||||
# We have default quantiles values set as below(95th percentile)
|
||||
quantiles = [0.025, 0.5, 0.975]
|
||||
predicted_column_name = "predicted"
|
||||
PI = "prediction_interval"
|
||||
fitted_model.quantiles = quantiles
|
||||
pred_quantiles = fitted_model.forecast_quantiles(X_test)
|
||||
pred_quantiles[PI] = pred_quantiles[[min(quantiles), max(quantiles)]].apply(
|
||||
lambda x: "[{}, {}]".format(x[0], x[1]), axis=1
|
||||
)
|
||||
X_test[target_column_name] = y_test
|
||||
X_test[PI] = pred_quantiles[PI]
|
||||
X_test[predicted_column_name] = pred_quantiles[0.5]
|
||||
# drop rows where prediction or actuals are nan
|
||||
# happens because of missing actuals
|
||||
# or at edges of time due to lags/rolling windows
|
||||
clean = X_test[
|
||||
X_test[[target_column_name, predicted_column_name]].notnull().all(axis=1)
|
||||
]
|
||||
|
||||
# align output
|
||||
df_all = align_outputs(y_predictions, X_trans, X_test, y_test, target_column_name)
|
||||
|
||||
file_name = 'outputs/predictions.csv'
|
||||
export_csv = df_all.to_csv(file_name, header=True, index=False) # added Index
|
||||
file_name = "outputs/predictions.csv"
|
||||
export_csv = clean.to_csv(file_name, header=True, index=False) # added Index
|
||||
|
||||
# Upload the predictions into artifacts
|
||||
run.upload_file(name=file_name, path_or_stream=file_name)
|
||||
|
||||
@@ -3,36 +3,47 @@ import shutil
|
||||
from azureml.core import ScriptRunConfig
|
||||
|
||||
|
||||
def run_remote_inference(test_experiment, compute_target, train_run,
|
||||
test_dataset, target_column_name, inference_folder='./forecast'):
|
||||
def run_remote_inference(
|
||||
test_experiment,
|
||||
compute_target,
|
||||
train_run,
|
||||
test_dataset,
|
||||
target_column_name,
|
||||
inference_folder="./forecast",
|
||||
):
|
||||
# Create local directory to copy the model.pkl and forecsting_script.py files into.
|
||||
# These files will be uploaded to and executed on the compute instance.
|
||||
os.makedirs(inference_folder, exist_ok=True)
|
||||
shutil.copy('forecasting_script.py', inference_folder)
|
||||
shutil.copy("forecasting_script.py", inference_folder)
|
||||
|
||||
train_run.download_file('outputs/model.pkl',
|
||||
os.path.join(inference_folder, 'model.pkl'))
|
||||
train_run.download_file(
|
||||
"outputs/model.pkl", os.path.join(inference_folder, "model.pkl")
|
||||
)
|
||||
|
||||
inference_env = train_run.get_environment()
|
||||
|
||||
config = ScriptRunConfig(source_directory=inference_folder,
|
||||
script='forecasting_script.py',
|
||||
arguments=['--target_column_name',
|
||||
config = ScriptRunConfig(
|
||||
source_directory=inference_folder,
|
||||
script="forecasting_script.py",
|
||||
arguments=[
|
||||
"--target_column_name",
|
||||
target_column_name,
|
||||
'--test_dataset',
|
||||
test_dataset.as_named_input(test_dataset.name)],
|
||||
"--test_dataset",
|
||||
test_dataset.as_named_input(test_dataset.name),
|
||||
],
|
||||
compute_target=compute_target,
|
||||
environment=inference_env)
|
||||
environment=inference_env,
|
||||
)
|
||||
|
||||
run = test_experiment.submit(config,
|
||||
tags={'training_run_id':
|
||||
train_run.id,
|
||||
'run_algorithm':
|
||||
train_run.properties['run_algorithm'],
|
||||
'valid_score':
|
||||
train_run.properties['score'],
|
||||
'primary_metric':
|
||||
train_run.properties['primary_metric']})
|
||||
run = test_experiment.submit(
|
||||
config,
|
||||
tags={
|
||||
"training_run_id": train_run.id,
|
||||
"run_algorithm": train_run.properties["run_algorithm"],
|
||||
"valid_score": train_run.properties["score"],
|
||||
"primary_metric": train_run.properties["primary_metric"],
|
||||
},
|
||||
)
|
||||
|
||||
run.log("run_algorithm", run.tags['run_algorithm'])
|
||||
run.log("run_algorithm", run.tags["run_algorithm"])
|
||||
return run
|
||||
|
||||
@@ -85,7 +85,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||
"This notebook is compatible with Azure ML SDK version 1.35.0 or later."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -94,7 +94,6 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -111,19 +110,20 @@
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# choose a name for the run history container in the workspace\n",
|
||||
"experiment_name = 'automl-forecast-function-demo'\n",
|
||||
"experiment_name = \"automl-forecast-function-demo\"\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace'] = ws.name\n",
|
||||
"output['SKU'] = ws.sku\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Run History Name'] = experiment_name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||
"output[\"Workspace\"] = ws.name\n",
|
||||
"output[\"SKU\"] = ws.sku\n",
|
||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||
"output[\"Location\"] = ws.location\n",
|
||||
"output[\"Run History Name\"] = experiment_name\n",
|
||||
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
@@ -141,17 +141,20 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"TIME_COLUMN_NAME = 'date'\n",
|
||||
"TIME_SERIES_ID_COLUMN_NAME = 'time_series_id'\n",
|
||||
"TARGET_COLUMN_NAME = 'y'\n",
|
||||
"TIME_COLUMN_NAME = \"date\"\n",
|
||||
"TIME_SERIES_ID_COLUMN_NAME = \"time_series_id\"\n",
|
||||
"TARGET_COLUMN_NAME = \"y\"\n",
|
||||
"\n",
|
||||
"def get_timeseries(train_len: int,\n",
|
||||
"\n",
|
||||
"def get_timeseries(\n",
|
||||
" train_len: int,\n",
|
||||
" test_len: int,\n",
|
||||
" time_column_name: str,\n",
|
||||
" target_column_name: str,\n",
|
||||
" time_series_id_column_name: str,\n",
|
||||
" time_series_number: int = 1,\n",
|
||||
" freq: str = 'H'):\n",
|
||||
" freq: str = \"H\",\n",
|
||||
"):\n",
|
||||
" \"\"\"\n",
|
||||
" Return the time series of designed length.\n",
|
||||
"\n",
|
||||
@@ -174,14 +177,18 @@
|
||||
" data_test = [] # type: List[pd.DataFrame]\n",
|
||||
" data_length = train_len + test_len\n",
|
||||
" for i in range(time_series_number):\n",
|
||||
" X = pd.DataFrame({\n",
|
||||
" time_column_name: pd.date_range(start='2000-01-01',\n",
|
||||
" periods=data_length,\n",
|
||||
" freq=freq),\n",
|
||||
" target_column_name: np.arange(data_length).astype(float) + np.random.rand(data_length) + i*5,\n",
|
||||
" 'ext_predictor': np.asarray(range(42, 42 + data_length)),\n",
|
||||
" time_series_id_column_name: np.repeat('ts{}'.format(i), data_length)\n",
|
||||
" })\n",
|
||||
" X = pd.DataFrame(\n",
|
||||
" {\n",
|
||||
" time_column_name: pd.date_range(\n",
|
||||
" start=\"2000-01-01\", periods=data_length, freq=freq\n",
|
||||
" ),\n",
|
||||
" target_column_name: np.arange(data_length).astype(float)\n",
|
||||
" + np.random.rand(data_length)\n",
|
||||
" + i * 5,\n",
|
||||
" \"ext_predictor\": np.asarray(range(42, 42 + data_length)),\n",
|
||||
" time_series_id_column_name: np.repeat(\"ts{}\".format(i), data_length),\n",
|
||||
" }\n",
|
||||
" )\n",
|
||||
" data_train.append(X[:train_len])\n",
|
||||
" data_test.append(X[train_len:])\n",
|
||||
" X_train = pd.concat(data_train)\n",
|
||||
@@ -190,14 +197,17 @@
|
||||
" y_test = X_test.pop(target_column_name).values\n",
|
||||
" return X_train, y_train, X_test, y_test\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"n_test_periods = 6\n",
|
||||
"n_train_periods = 30\n",
|
||||
"X_train, y_train, X_test, y_test = get_timeseries(train_len=n_train_periods,\n",
|
||||
"X_train, y_train, X_test, y_test = get_timeseries(\n",
|
||||
" train_len=n_train_periods,\n",
|
||||
" test_len=n_test_periods,\n",
|
||||
" time_column_name=TIME_COLUMN_NAME,\n",
|
||||
" target_column_name=TARGET_COLUMN_NAME,\n",
|
||||
" time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAME,\n",
|
||||
" time_series_number=2)"
|
||||
" time_series_number=2,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -224,11 +234,12 @@
|
||||
"source": [
|
||||
"# plot the example time series\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"\n",
|
||||
"whole_data = X_train.copy()\n",
|
||||
"target_label = 'y'\n",
|
||||
"target_label = \"y\"\n",
|
||||
"whole_data[target_label] = y_train\n",
|
||||
"for g in whole_data.groupby('time_series_id'): \n",
|
||||
" plt.plot(g[1]['date'].values, g[1]['y'].values, label=g[0])\n",
|
||||
"for g in whole_data.groupby(\"time_series_id\"):\n",
|
||||
" plt.plot(g[1][\"date\"].values, g[1][\"y\"].values, label=g[0])\n",
|
||||
"plt.legend()\n",
|
||||
"plt.show()"
|
||||
]
|
||||
@@ -250,12 +261,12 @@
|
||||
"# We need to save thw artificial data and then upload them to default workspace datastore.\n",
|
||||
"DATA_PATH = \"fc_fn_data\"\n",
|
||||
"DATA_PATH_X = \"{}/data_train.csv\".format(DATA_PATH)\n",
|
||||
"if not os.path.isdir('data'):\n",
|
||||
" os.mkdir('data')\n",
|
||||
"if not os.path.isdir(\"data\"):\n",
|
||||
" os.mkdir(\"data\")\n",
|
||||
"pd.DataFrame(whole_data).to_csv(\"data/data_train.csv\", index=False)\n",
|
||||
"# Upload saved data to the default data store.\n",
|
||||
"ds = ws.get_default_datastore()\n",
|
||||
"ds.upload(src_dir='./data', target_path=DATA_PATH, overwrite=True, show_progress=True)\n",
|
||||
"ds.upload(src_dir=\"./data\", target_path=DATA_PATH, overwrite=True, show_progress=True)\n",
|
||||
"train_data = Dataset.Tabular.from_delimited_files(path=ds.path(DATA_PATH_X))"
|
||||
]
|
||||
},
|
||||
@@ -283,10 +294,11 @@
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
||||
" print('Found existing cluster, use it.')\n",
|
||||
" print(\"Found existing cluster, use it.\")\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
|
||||
" max_nodes=6)\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||
" vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
|
||||
" )\n",
|
||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
"compute_target.wait_for_completion(show_output=True)"
|
||||
@@ -315,6 +327,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
|
||||
"\n",
|
||||
"lags = [1, 2, 3]\n",
|
||||
"forecast_horizon = n_test_periods\n",
|
||||
"forecasting_parameters = ForecastingParameters(\n",
|
||||
@@ -322,7 +335,7 @@
|
||||
" forecast_horizon=forecast_horizon,\n",
|
||||
" time_series_id_column_names=[TIME_SERIES_ID_COLUMN_NAME],\n",
|
||||
" target_lags=lags,\n",
|
||||
" freq='H' # Set the forecast frequency to be hourly\n",
|
||||
" freq=\"H\", # Set the forecast frequency to be hourly\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -344,9 +357,10 @@
|
||||
"from azureml.train.automl import AutoMLConfig\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task='forecasting',\n",
|
||||
" debug_log='automl_forecasting_function.log',\n",
|
||||
" primary_metric='normalized_root_mean_squared_error',\n",
|
||||
"automl_config = AutoMLConfig(\n",
|
||||
" task=\"forecasting\",\n",
|
||||
" debug_log=\"automl_forecasting_function.log\",\n",
|
||||
" primary_metric=\"normalized_root_mean_squared_error\",\n",
|
||||
" experiment_timeout_hours=0.25,\n",
|
||||
" enable_early_stopping=True,\n",
|
||||
" training_data=train_data,\n",
|
||||
@@ -356,7 +370,8 @@
|
||||
" max_concurrent_iterations=4,\n",
|
||||
" max_cores_per_iteration=-1,\n",
|
||||
" label_column_name=target_label,\n",
|
||||
" forecasting_parameters=forecasting_parameters)\n",
|
||||
" forecasting_parameters=forecasting_parameters,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"remote_run = experiment.submit(automl_config, show_output=False)"
|
||||
]
|
||||
@@ -536,12 +551,14 @@
|
||||
"source": [
|
||||
"# generate the same kind of test data we trained on,\n",
|
||||
"# but now make the train set much longer, so that the test set will be in the future\n",
|
||||
"X_context, y_context, X_away, y_away = get_timeseries(train_len=42, # train data was 30 steps long\n",
|
||||
"X_context, y_context, X_away, y_away = get_timeseries(\n",
|
||||
" train_len=42, # train data was 30 steps long\n",
|
||||
" test_len=4,\n",
|
||||
" time_column_name=TIME_COLUMN_NAME,\n",
|
||||
" target_column_name=TARGET_COLUMN_NAME,\n",
|
||||
" time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAME,\n",
|
||||
" time_series_number=2)\n",
|
||||
" time_series_number=2,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# end of the data we trained on\n",
|
||||
"print(X_train.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].max())\n",
|
||||
@@ -584,7 +601,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def make_forecasting_query(fulldata, time_column_name, target_column_name, forecast_origin, horizon, lookback):\n",
|
||||
"def make_forecasting_query(\n",
|
||||
" fulldata, time_column_name, target_column_name, forecast_origin, horizon, lookback\n",
|
||||
"):\n",
|
||||
"\n",
|
||||
" \"\"\"\n",
|
||||
" This function will take the full dataset, and create the query\n",
|
||||
@@ -599,14 +618,14 @@
|
||||
" target_column_name: string which column (must be in fulldata) is to be forecast\n",
|
||||
" forecast_origin: datetime type the last time we (pretend to) have target values\n",
|
||||
" horizon: timedelta how far forward, in time units (not periods)\n",
|
||||
" lookback: timedelta how far back does the model look?\n",
|
||||
" lookback: timedelta how far back does the model look\n",
|
||||
"\n",
|
||||
" Example:\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" ```\n",
|
||||
"\n",
|
||||
" forecast_origin = pd.to_datetime('2012-09-01') + pd.DateOffset(days=5) # forecast 5 days after end of training\n",
|
||||
" forecast_origin = pd.to_datetime(\"2012-09-01\") + pd.DateOffset(days=5) # forecast 5 days after end of training\n",
|
||||
" print(forecast_origin)\n",
|
||||
"\n",
|
||||
" X_query, y_query = make_forecasting_query(data,\n",
|
||||
@@ -618,28 +637,30 @@
|
||||
" ```\n",
|
||||
" \"\"\"\n",
|
||||
"\n",
|
||||
" X_past = fulldata[ (fulldata[ time_column_name ] > forecast_origin - lookback) &\n",
|
||||
" (fulldata[ time_column_name ] <= forecast_origin)\n",
|
||||
" X_past = fulldata[\n",
|
||||
" (fulldata[time_column_name] > forecast_origin - lookback)\n",
|
||||
" & (fulldata[time_column_name] <= forecast_origin)\n",
|
||||
" ]\n",
|
||||
"\n",
|
||||
" X_future = fulldata[ (fulldata[ time_column_name ] > forecast_origin) &\n",
|
||||
" (fulldata[ time_column_name ] <= forecast_origin + horizon)\n",
|
||||
" X_future = fulldata[\n",
|
||||
" (fulldata[time_column_name] > forecast_origin)\n",
|
||||
" & (fulldata[time_column_name] <= forecast_origin + horizon)\n",
|
||||
" ]\n",
|
||||
"\n",
|
||||
" y_past = X_past.pop(target_column_name).values.astype(np.float)\n",
|
||||
" y_future = X_future.pop(target_column_name).values.astype(np.float)\n",
|
||||
"\n",
|
||||
" # Now take y_future and turn it into question marks\n",
|
||||
" y_query = y_future.copy().astype(np.float) # because sometimes life hands you an int\n",
|
||||
" y_query = y_future.copy().astype(\n",
|
||||
" np.float\n",
|
||||
" ) # because sometimes life hands you an int\n",
|
||||
" y_query.fill(np.NaN)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" print(\"X_past is \" + str(X_past.shape) + \" - shaped\")\n",
|
||||
" print(\"X_future is \" + str(X_future.shape) + \" - shaped\")\n",
|
||||
" print(\"y_past is \" + str(y_past.shape) + \" - shaped\")\n",
|
||||
" print(\"y_query is \" + str(y_query.shape) + \" - shaped\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" X_pred = pd.concat([X_past, X_future])\n",
|
||||
" y_pred = np.concatenate([y_past, y_query])\n",
|
||||
" return X_pred, y_pred"
|
||||
@@ -658,8 +679,16 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(X_context.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].agg(['min','max','count']))\n",
|
||||
"print(X_away.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].agg(['min','max','count']))\n",
|
||||
"print(\n",
|
||||
" X_context.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].agg(\n",
|
||||
" [\"min\", \"max\", \"count\"]\n",
|
||||
" )\n",
|
||||
")\n",
|
||||
"print(\n",
|
||||
" X_away.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].agg(\n",
|
||||
" [\"min\", \"max\", \"count\"]\n",
|
||||
" )\n",
|
||||
")\n",
|
||||
"X_context.tail(5)"
|
||||
]
|
||||
},
|
||||
@@ -692,8 +721,9 @@
|
||||
"horizon = pd.DateOffset(hours=forecast_horizon)\n",
|
||||
"\n",
|
||||
"# now make the forecast query from context (refer to figure)\n",
|
||||
"X_pred, y_pred = make_forecasting_query(fulldata, TIME_COLUMN_NAME, TARGET_COLUMN_NAME,\n",
|
||||
" forecast_origin, horizon, lookback)\n",
|
||||
"X_pred, y_pred = make_forecasting_query(\n",
|
||||
" fulldata, TIME_COLUMN_NAME, TARGET_COLUMN_NAME, forecast_origin, horizon, lookback\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# show the forecast request aligned\n",
|
||||
"X_show = X_pred.copy()\n",
|
||||
@@ -720,7 +750,7 @@
|
||||
"# show the forecast aligned\n",
|
||||
"X_show = xy_away.reset_index()\n",
|
||||
"# without the generated features\n",
|
||||
"X_show[['date', 'time_series_id', 'ext_predictor', '_automl_target_col']]\n",
|
||||
"X_show[[\"date\", \"time_series_id\", \"ext_predictor\", \"_automl_target_col\"]]\n",
|
||||
"# prediction is in _automl_target_col"
|
||||
]
|
||||
},
|
||||
@@ -751,12 +781,14 @@
|
||||
"source": [
|
||||
"# generate the same kind of test data we trained on, but with a single time-series and test period twice as long\n",
|
||||
"# as the forecast_horizon.\n",
|
||||
"_, _, X_test_long, y_test_long = get_timeseries(train_len=n_train_periods,\n",
|
||||
"_, _, X_test_long, y_test_long = get_timeseries(\n",
|
||||
" train_len=n_train_periods,\n",
|
||||
" test_len=forecast_horizon * 2,\n",
|
||||
" time_column_name=TIME_COLUMN_NAME,\n",
|
||||
" target_column_name=TARGET_COLUMN_NAME,\n",
|
||||
" time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAME,\n",
|
||||
" time_series_number=1)\n",
|
||||
" time_series_number=1,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(X_test_long.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].min())\n",
|
||||
"print(X_test_long.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].max())"
|
||||
@@ -781,7 +813,9 @@
|
||||
"source": [
|
||||
"# What forecast() function does in this case is equivalent to iterating it twice over the test set as the following.\n",
|
||||
"y_pred1, _ = fitted_model.forecast(X_test_long[:forecast_horizon])\n",
|
||||
"y_pred_all, _ = fitted_model.forecast(X_test_long, np.concatenate((y_pred1, np.full(forecast_horizon, np.nan))))\n",
|
||||
"y_pred_all, _ = fitted_model.forecast(\n",
|
||||
" X_test_long, np.concatenate((y_pred1, np.full(forecast_horizon, np.nan)))\n",
|
||||
")\n",
|
||||
"np.array_equal(y_pred_all, y_pred_long)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
},
|
||||
"source": [
|
||||
"# Automated Machine Learning\n",
|
||||
"**Beer Production Forecasting**\n",
|
||||
"**Github DAU Forecasting**\n",
|
||||
"\n",
|
||||
"## Contents\n",
|
||||
"1. [Introduction](#Introduction)\n",
|
||||
@@ -48,7 +48,7 @@
|
||||
},
|
||||
"source": [
|
||||
"## Introduction\n",
|
||||
"This notebook demonstrates demand forecasting for Beer Production Dataset using AutoML.\n",
|
||||
"This notebook demonstrates demand forecasting for Github Daily Active Users Dataset using AutoML.\n",
|
||||
"\n",
|
||||
"AutoML highlights here include using Deep Learning forecasts, Arima, Prophet, Remote Execution and Remote Inferencing, and working with the `forecast` function. Please also look at the additional forecasting notebooks, which document lagging, rolling windows, forecast quantiles, other ways to use the forecast function, and forecaster deployment.\n",
|
||||
"\n",
|
||||
@@ -57,7 +57,7 @@
|
||||
"Notebook synopsis:\n",
|
||||
"\n",
|
||||
"1. Creating an Experiment in an existing Workspace\n",
|
||||
"2. Configuration and remote run of AutoML for a time-series model exploring Regression learners, Arima, Prophet and DNNs\n",
|
||||
"2. Configuration and remote run of AutoML for a time-series model exploring DNNs\n",
|
||||
"4. Evaluating the fitted model using a rolling test "
|
||||
]
|
||||
},
|
||||
@@ -92,8 +92,7 @@
|
||||
"# Squash warning messages for cleaner output in the notebook\n",
|
||||
"warnings.showwarning = lambda *args, **kwargs: None\n",
|
||||
"\n",
|
||||
"from azureml.core.workspace import Workspace\n",
|
||||
"from azureml.core.experiment import Experiment\n",
|
||||
"from azureml.core import Workspace, Experiment, Dataset\n",
|
||||
"from azureml.train.automl import AutoMLConfig\n",
|
||||
"from matplotlib import pyplot as plt\n",
|
||||
"from sklearn.metrics import mean_absolute_error, mean_squared_error\n",
|
||||
@@ -104,7 +103,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||
"This notebook is compatible with Azure ML SDK version 1.35.0 or later."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -113,7 +112,6 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -139,18 +137,19 @@
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# choose a name for the run history container in the workspace\n",
|
||||
"experiment_name = 'beer-remote-cpu'\n",
|
||||
"experiment_name = \"github-remote-cpu\"\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace'] = ws.name\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Run History Name'] = experiment_name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||
"output[\"Workspace\"] = ws.name\n",
|
||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||
"output[\"Location\"] = ws.location\n",
|
||||
"output[\"Run History Name\"] = experiment_name\n",
|
||||
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
@@ -180,15 +179,16 @@
|
||||
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||
"\n",
|
||||
"# Choose a name for your CPU cluster\n",
|
||||
"cpu_cluster_name = \"beer-cluster\"\n",
|
||||
"cpu_cluster_name = \"github-cluster\"\n",
|
||||
"\n",
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
||||
" print('Found existing cluster, use it.')\n",
|
||||
" print(\"Found existing cluster, use it.\")\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
|
||||
" max_nodes=4)\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||
" vm_size=\"STANDARD_DS12_V2\", max_nodes=4\n",
|
||||
" )\n",
|
||||
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
"compute_target.wait_for_completion(show_output=True)"
|
||||
@@ -202,7 +202,7 @@
|
||||
},
|
||||
"source": [
|
||||
"## Data\n",
|
||||
"Read Beer demand data from file, and preview data."
|
||||
"Read Github DAU data from file, and preview data."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -245,17 +245,19 @@
|
||||
"plt.tight_layout()\n",
|
||||
"\n",
|
||||
"plt.subplot(2, 1, 1)\n",
|
||||
"plt.title('Beer Production By Year')\n",
|
||||
"df = pd.read_csv(\"Beer_no_valid_split_train.csv\", parse_dates=True, index_col= 'DATE').drop(columns='grain')\n",
|
||||
"test_df = pd.read_csv(\"Beer_no_valid_split_test.csv\", parse_dates=True, index_col= 'DATE').drop(columns='grain')\n",
|
||||
"plt.title(\"Github Daily Active User By Year\")\n",
|
||||
"df = pd.read_csv(\"github_dau_2011-2018_train.csv\", parse_dates=True, index_col=\"date\")\n",
|
||||
"test_df = pd.read_csv(\n",
|
||||
" \"github_dau_2011-2018_test.csv\", parse_dates=True, index_col=\"date\"\n",
|
||||
")\n",
|
||||
"plt.plot(df)\n",
|
||||
"\n",
|
||||
"plt.subplot(2, 1, 2)\n",
|
||||
"plt.title('Beer Production By Month')\n",
|
||||
"plt.title(\"Github Daily Active User By Month\")\n",
|
||||
"groups = df.groupby(df.index.month)\n",
|
||||
"months = concat([DataFrame(x[1].values) for x in groups], axis=1)\n",
|
||||
"months = DataFrame(months)\n",
|
||||
"months.columns = range(1,13)\n",
|
||||
"months.columns = range(1, 49)\n",
|
||||
"months.boxplot()\n",
|
||||
"\n",
|
||||
"plt.show()"
|
||||
@@ -270,10 +272,10 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"target_column_name = 'BeerProduction'\n",
|
||||
"time_column_name = 'DATE'\n",
|
||||
"target_column_name = \"count\"\n",
|
||||
"time_column_name = \"date\"\n",
|
||||
"time_series_id_column_names = []\n",
|
||||
"freq = 'M' #Monthly data"
|
||||
"freq = \"D\" # Daily data"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -296,19 +298,22 @@
|
||||
"from helper import split_full_for_forecasting\n",
|
||||
"\n",
|
||||
"train, valid = split_full_for_forecasting(df, time_column_name)\n",
|
||||
"train.to_csv(\"train.csv\")\n",
|
||||
"valid.to_csv(\"valid.csv\")\n",
|
||||
"test_df.to_csv(\"test.csv\")\n",
|
||||
"\n",
|
||||
"# Reset index to create a Tabualr Dataset.\n",
|
||||
"train.reset_index(inplace=True)\n",
|
||||
"valid.reset_index(inplace=True)\n",
|
||||
"test_df.reset_index(inplace=True)\n",
|
||||
"\n",
|
||||
"datastore = ws.get_default_datastore()\n",
|
||||
"datastore.upload_files(files = ['./train.csv'], target_path = 'beer-dataset/tabular/', overwrite = True,show_progress = True)\n",
|
||||
"datastore.upload_files(files = ['./valid.csv'], target_path = 'beer-dataset/tabular/', overwrite = True,show_progress = True)\n",
|
||||
"datastore.upload_files(files = ['./test.csv'], target_path = 'beer-dataset/tabular/', overwrite = True,show_progress = True)\n",
|
||||
"\n",
|
||||
"from azureml.core import Dataset\n",
|
||||
"train_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'beer-dataset/tabular/train.csv')])\n",
|
||||
"valid_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'beer-dataset/tabular/valid.csv')])\n",
|
||||
"test_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'beer-dataset/tabular/test.csv')])"
|
||||
"train_dataset = Dataset.Tabular.register_pandas_dataframe(\n",
|
||||
" train, target=(datastore, \"dataset/\"), name=\"Github_DAU_train\"\n",
|
||||
")\n",
|
||||
"valid_dataset = Dataset.Tabular.register_pandas_dataframe(\n",
|
||||
" valid, target=(datastore, \"dataset/\"), name=\"Github_DAU_valid\"\n",
|
||||
")\n",
|
||||
"test_dataset = Dataset.Tabular.register_pandas_dataframe(\n",
|
||||
" test_df, target=(datastore, \"dataset/\"), name=\"Github_DAU_test\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -366,15 +371,17 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
|
||||
"\n",
|
||||
"forecasting_parameters = ForecastingParameters(\n",
|
||||
" time_column_name=time_column_name,\n",
|
||||
" forecast_horizon=forecast_horizon,\n",
|
||||
" freq='MS' # Set the forecast frequency to be monthly (start of the month)\n",
|
||||
" freq=\"D\", # Set the forecast frequency to be daily\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# We will disable the enable_early_stopping flag to ensure the DNN model is recommended for demonstration purpose.\n",
|
||||
"automl_config = AutoMLConfig(task='forecasting',\n",
|
||||
" primary_metric='normalized_root_mean_squared_error',\n",
|
||||
"# To only allow the TCNForecaster we set the allowed_models parameter to reflect this.\n",
|
||||
"automl_config = AutoMLConfig(\n",
|
||||
" task=\"forecasting\",\n",
|
||||
" primary_metric=\"normalized_root_mean_squared_error\",\n",
|
||||
" experiment_timeout_hours=1,\n",
|
||||
" training_data=train_dataset,\n",
|
||||
" label_column_name=target_column_name,\n",
|
||||
@@ -384,8 +391,9 @@
|
||||
" max_concurrent_iterations=4,\n",
|
||||
" max_cores_per_iteration=-1,\n",
|
||||
" enable_dnn=True,\n",
|
||||
" enable_early_stopping=False,\n",
|
||||
" forecasting_parameters=forecasting_parameters)"
|
||||
" allowed_models=[\"TCNForecaster\"],\n",
|
||||
" forecasting_parameters=forecasting_parameters,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -455,6 +463,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from helper import get_result_df\n",
|
||||
"\n",
|
||||
"summary_df = get_result_df(remote_run)\n",
|
||||
"summary_df"
|
||||
]
|
||||
@@ -470,11 +479,14 @@
|
||||
"source": [
|
||||
"from azureml.core.run import Run\n",
|
||||
"from azureml.widgets import RunDetails\n",
|
||||
"forecast_model = 'TCNForecaster'\n",
|
||||
"if not forecast_model in summary_df['run_id']:\n",
|
||||
" forecast_model = 'ForecastTCN'\n",
|
||||
"\n",
|
||||
"best_dnn_run_id = summary_df['run_id'][forecast_model]\n",
|
||||
"forecast_model = \"TCNForecaster\"\n",
|
||||
"if not forecast_model in summary_df[\"run_id\"]:\n",
|
||||
" forecast_model = \"ForecastTCN\"\n",
|
||||
"\n",
|
||||
"best_dnn_run_id = summary_df[summary_df[\"Score\"] == summary_df[\"Score\"].min()][\n",
|
||||
" \"run_id\"\n",
|
||||
"][forecast_model]\n",
|
||||
"best_dnn_run = Run(experiment, best_dnn_run_id)"
|
||||
]
|
||||
},
|
||||
@@ -535,8 +547,6 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Dataset\n",
|
||||
"test_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'beer-dataset/tabular/test.csv')])\n",
|
||||
"# preview the first 3 rows of the dataset\n",
|
||||
"test_dataset.take(5).to_pandas_dataframe()"
|
||||
]
|
||||
@@ -547,7 +557,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"compute_target = ws.compute_targets['beer-cluster']\n",
|
||||
"compute_target = ws.compute_targets[\"github-cluster\"]\n",
|
||||
"test_experiment = Experiment(ws, experiment_name + \"_test\")"
|
||||
]
|
||||
},
|
||||
@@ -563,9 +573,9 @@
|
||||
"import os\n",
|
||||
"import shutil\n",
|
||||
"\n",
|
||||
"script_folder = os.path.join(os.getcwd(), 'inference')\n",
|
||||
"script_folder = os.path.join(os.getcwd(), \"inference\")\n",
|
||||
"os.makedirs(script_folder, exist_ok=True)\n",
|
||||
"shutil.copy('infer.py', script_folder)"
|
||||
"shutil.copy(\"infer.py\", script_folder)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -576,8 +586,18 @@
|
||||
"source": [
|
||||
"from helper import run_inference\n",
|
||||
"\n",
|
||||
"test_run = run_inference(test_experiment, compute_target, script_folder, best_dnn_run, test_dataset, valid_dataset, forecast_horizon,\n",
|
||||
" target_column_name, time_column_name, freq)"
|
||||
"test_run = run_inference(\n",
|
||||
" test_experiment,\n",
|
||||
" compute_target,\n",
|
||||
" script_folder,\n",
|
||||
" best_dnn_run,\n",
|
||||
" test_dataset,\n",
|
||||
" valid_dataset,\n",
|
||||
" forecast_horizon,\n",
|
||||
" target_column_name,\n",
|
||||
" time_column_name,\n",
|
||||
" freq,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -597,8 +617,19 @@
|
||||
"source": [
|
||||
"from helper import run_multiple_inferences\n",
|
||||
"\n",
|
||||
"summary_df = run_multiple_inferences(summary_df, experiment, test_experiment, compute_target, script_folder, test_dataset, \n",
|
||||
" valid_dataset, forecast_horizon, target_column_name, time_column_name, freq)"
|
||||
"summary_df = run_multiple_inferences(\n",
|
||||
" summary_df,\n",
|
||||
" experiment,\n",
|
||||
" test_experiment,\n",
|
||||
" compute_target,\n",
|
||||
" script_folder,\n",
|
||||
" test_dataset,\n",
|
||||
" valid_dataset,\n",
|
||||
" forecast_horizon,\n",
|
||||
" target_column_name,\n",
|
||||
" time_column_name,\n",
|
||||
" freq,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -618,7 +649,7 @@
|
||||
" test_run = Run(test_experiment, test_run_id)\n",
|
||||
" test_run.wait_for_completion()\n",
|
||||
" test_score = test_run.get_metrics()[run_summary.primary_metric]\n",
|
||||
" summary_df.loc[summary_df.run_id == run_id, 'Test Score'] = test_score\n",
|
||||
" summary_df.loc[summary_df.run_id == run_id, \"Test Score\"] = test_score\n",
|
||||
" print(\"Test Score: \", test_score)"
|
||||
]
|
||||
},
|
||||
@@ -0,0 +1,4 @@
|
||||
name: auto-ml-forecasting-github-dau
|
||||
dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
@@ -0,0 +1,455 @@
|
||||
date,count,day_of_week,month_of_year,holiday
|
||||
2017-06-04,104663,6.0,5.0,0.0
|
||||
2017-06-05,155824,0.0,5.0,0.0
|
||||
2017-06-06,164908,1.0,5.0,0.0
|
||||
2017-06-07,170309,2.0,5.0,0.0
|
||||
2017-06-08,164256,3.0,5.0,0.0
|
||||
2017-06-09,153406,4.0,5.0,0.0
|
||||
2017-06-10,97024,5.0,5.0,0.0
|
||||
2017-06-11,103442,6.0,5.0,0.0
|
||||
2017-06-12,160768,0.0,5.0,0.0
|
||||
2017-06-13,166288,1.0,5.0,0.0
|
||||
2017-06-14,163819,2.0,5.0,0.0
|
||||
2017-06-15,157593,3.0,5.0,0.0
|
||||
2017-06-16,149259,4.0,5.0,0.0
|
||||
2017-06-17,95579,5.0,5.0,0.0
|
||||
2017-06-18,98723,6.0,5.0,0.0
|
||||
2017-06-19,159076,0.0,5.0,0.0
|
||||
2017-06-20,163340,1.0,5.0,0.0
|
||||
2017-06-21,163344,2.0,5.0,0.0
|
||||
2017-06-22,159528,3.0,5.0,0.0
|
||||
2017-06-23,146563,4.0,5.0,0.0
|
||||
2017-06-24,92631,5.0,5.0,0.0
|
||||
2017-06-25,96549,6.0,5.0,0.0
|
||||
2017-06-26,153249,0.0,5.0,0.0
|
||||
2017-06-27,160357,1.0,5.0,0.0
|
||||
2017-06-28,159941,2.0,5.0,0.0
|
||||
2017-06-29,156781,3.0,5.0,0.0
|
||||
2017-06-30,144709,4.0,5.0,0.0
|
||||
2017-07-01,89101,5.0,6.0,0.0
|
||||
2017-07-02,93046,6.0,6.0,0.0
|
||||
2017-07-03,144113,0.0,6.0,0.0
|
||||
2017-07-04,143061,1.0,6.0,1.0
|
||||
2017-07-05,154603,2.0,6.0,0.0
|
||||
2017-07-06,157200,3.0,6.0,0.0
|
||||
2017-07-07,147213,4.0,6.0,0.0
|
||||
2017-07-08,92348,5.0,6.0,0.0
|
||||
2017-07-09,97018,6.0,6.0,0.0
|
||||
2017-07-10,157192,0.0,6.0,0.0
|
||||
2017-07-11,161819,1.0,6.0,0.0
|
||||
2017-07-12,161998,2.0,6.0,0.0
|
||||
2017-07-13,160280,3.0,6.0,0.0
|
||||
2017-07-14,146818,4.0,6.0,0.0
|
||||
2017-07-15,93041,5.0,6.0,0.0
|
||||
2017-07-16,97505,6.0,6.0,0.0
|
||||
2017-07-17,156167,0.0,6.0,0.0
|
||||
2017-07-18,162855,1.0,6.0,0.0
|
||||
2017-07-19,162519,2.0,6.0,0.0
|
||||
2017-07-20,159941,3.0,6.0,0.0
|
||||
2017-07-21,148460,4.0,6.0,0.0
|
||||
2017-07-22,93431,5.0,6.0,0.0
|
||||
2017-07-23,98553,6.0,6.0,0.0
|
||||
2017-07-24,156202,0.0,6.0,0.0
|
||||
2017-07-25,162503,1.0,6.0,0.0
|
||||
2017-07-26,158479,2.0,6.0,0.0
|
||||
2017-07-27,158192,3.0,6.0,0.0
|
||||
2017-07-28,147108,4.0,6.0,0.0
|
||||
2017-07-29,93799,5.0,6.0,0.0
|
||||
2017-07-30,97920,6.0,6.0,0.0
|
||||
2017-07-31,152197,0.0,6.0,0.0
|
||||
2017-08-01,158477,1.0,7.0,0.0
|
||||
2017-08-02,159089,2.0,7.0,0.0
|
||||
2017-08-03,157182,3.0,7.0,0.0
|
||||
2017-08-04,146345,4.0,7.0,0.0
|
||||
2017-08-05,92534,5.0,7.0,0.0
|
||||
2017-08-06,97128,6.0,7.0,0.0
|
||||
2017-08-07,151359,0.0,7.0,0.0
|
||||
2017-08-08,159895,1.0,7.0,0.0
|
||||
2017-08-09,158329,2.0,7.0,0.0
|
||||
2017-08-10,155468,3.0,7.0,0.0
|
||||
2017-08-11,144914,4.0,7.0,0.0
|
||||
2017-08-12,92258,5.0,7.0,0.0
|
||||
2017-08-13,95933,6.0,7.0,0.0
|
||||
2017-08-14,147706,0.0,7.0,0.0
|
||||
2017-08-15,151115,1.0,7.0,0.0
|
||||
2017-08-16,157640,2.0,7.0,0.0
|
||||
2017-08-17,156600,3.0,7.0,0.0
|
||||
2017-08-18,146980,4.0,7.0,0.0
|
||||
2017-08-19,94592,5.0,7.0,0.0
|
||||
2017-08-20,99320,6.0,7.0,0.0
|
||||
2017-08-21,145727,0.0,7.0,0.0
|
||||
2017-08-22,160260,1.0,7.0,0.0
|
||||
2017-08-23,160440,2.0,7.0,0.0
|
||||
2017-08-24,157830,3.0,7.0,0.0
|
||||
2017-08-25,145822,4.0,7.0,0.0
|
||||
2017-08-26,94706,5.0,7.0,0.0
|
||||
2017-08-27,99047,6.0,7.0,0.0
|
||||
2017-08-28,152112,0.0,7.0,0.0
|
||||
2017-08-29,162440,1.0,7.0,0.0
|
||||
2017-08-30,162902,2.0,7.0,0.0
|
||||
2017-08-31,159498,3.0,7.0,0.0
|
||||
2017-09-01,145689,4.0,8.0,0.0
|
||||
2017-09-02,93589,5.0,8.0,0.0
|
||||
2017-09-03,100058,6.0,8.0,0.0
|
||||
2017-09-04,140865,0.0,8.0,1.0
|
||||
2017-09-05,165715,1.0,8.0,0.0
|
||||
2017-09-06,167463,2.0,8.0,0.0
|
||||
2017-09-07,164811,3.0,8.0,0.0
|
||||
2017-09-08,156157,4.0,8.0,0.0
|
||||
2017-09-09,101358,5.0,8.0,0.0
|
||||
2017-09-10,107915,6.0,8.0,0.0
|
||||
2017-09-11,167845,0.0,8.0,0.0
|
||||
2017-09-12,172756,1.0,8.0,0.0
|
||||
2017-09-13,172851,2.0,8.0,0.0
|
||||
2017-09-14,171675,3.0,8.0,0.0
|
||||
2017-09-15,159266,4.0,8.0,0.0
|
||||
2017-09-16,103547,5.0,8.0,0.0
|
||||
2017-09-17,110964,6.0,8.0,0.0
|
||||
2017-09-18,170976,0.0,8.0,0.0
|
||||
2017-09-19,177864,1.0,8.0,0.0
|
||||
2017-09-20,173567,2.0,8.0,0.0
|
||||
2017-09-21,172017,3.0,8.0,0.0
|
||||
2017-09-22,161357,4.0,8.0,0.0
|
||||
2017-09-23,104681,5.0,8.0,0.0
|
||||
2017-09-24,111711,6.0,8.0,0.0
|
||||
2017-09-25,173517,0.0,8.0,0.0
|
||||
2017-09-26,180049,1.0,8.0,0.0
|
||||
2017-09-27,178307,2.0,8.0,0.0
|
||||
2017-09-28,174157,3.0,8.0,0.0
|
||||
2017-09-29,161707,4.0,8.0,0.0
|
||||
2017-09-30,110536,5.0,8.0,0.0
|
||||
2017-10-01,106505,6.0,9.0,0.0
|
||||
2017-10-02,157565,0.0,9.0,0.0
|
||||
2017-10-03,164764,1.0,9.0,0.0
|
||||
2017-10-04,163383,2.0,9.0,0.0
|
||||
2017-10-05,162847,3.0,9.0,0.0
|
||||
2017-10-06,153575,4.0,9.0,0.0
|
||||
2017-10-07,107472,5.0,9.0,0.0
|
||||
2017-10-08,116127,6.0,9.0,0.0
|
||||
2017-10-09,174457,0.0,9.0,1.0
|
||||
2017-10-10,185217,1.0,9.0,0.0
|
||||
2017-10-11,185120,2.0,9.0,0.0
|
||||
2017-10-12,180844,3.0,9.0,0.0
|
||||
2017-10-13,170178,4.0,9.0,0.0
|
||||
2017-10-14,112754,5.0,9.0,0.0
|
||||
2017-10-15,121251,6.0,9.0,0.0
|
||||
2017-10-16,183906,0.0,9.0,0.0
|
||||
2017-10-17,188945,1.0,9.0,0.0
|
||||
2017-10-18,187297,2.0,9.0,0.0
|
||||
2017-10-19,183867,3.0,9.0,0.0
|
||||
2017-10-20,173021,4.0,9.0,0.0
|
||||
2017-10-21,115851,5.0,9.0,0.0
|
||||
2017-10-22,126088,6.0,9.0,0.0
|
||||
2017-10-23,189452,0.0,9.0,0.0
|
||||
2017-10-24,194412,1.0,9.0,0.0
|
||||
2017-10-25,192293,2.0,9.0,0.0
|
||||
2017-10-26,190163,3.0,9.0,0.0
|
||||
2017-10-27,177053,4.0,9.0,0.0
|
||||
2017-10-28,114934,5.0,9.0,0.0
|
||||
2017-10-29,125289,6.0,9.0,0.0
|
||||
2017-10-30,189245,0.0,9.0,0.0
|
||||
2017-10-31,191480,1.0,9.0,0.0
|
||||
2017-11-01,182281,2.0,10.0,0.0
|
||||
2017-11-02,186351,3.0,10.0,0.0
|
||||
2017-11-03,175422,4.0,10.0,0.0
|
||||
2017-11-04,118160,5.0,10.0,0.0
|
||||
2017-11-05,127602,6.0,10.0,0.0
|
||||
2017-11-06,191067,0.0,10.0,0.0
|
||||
2017-11-07,197083,1.0,10.0,0.0
|
||||
2017-11-08,194333,2.0,10.0,0.0
|
||||
2017-11-09,193914,3.0,10.0,0.0
|
||||
2017-11-10,179933,4.0,10.0,1.0
|
||||
2017-11-11,121346,5.0,10.0,0.0
|
||||
2017-11-12,131900,6.0,10.0,0.0
|
||||
2017-11-13,196969,0.0,10.0,0.0
|
||||
2017-11-14,201949,1.0,10.0,0.0
|
||||
2017-11-15,198424,2.0,10.0,0.0
|
||||
2017-11-16,196902,3.0,10.0,0.0
|
||||
2017-11-17,183893,4.0,10.0,0.0
|
||||
2017-11-18,122767,5.0,10.0,0.0
|
||||
2017-11-19,130890,6.0,10.0,0.0
|
||||
2017-11-20,194515,0.0,10.0,0.0
|
||||
2017-11-21,198601,1.0,10.0,0.0
|
||||
2017-11-22,191041,2.0,10.0,0.0
|
||||
2017-11-23,170321,3.0,10.0,1.0
|
||||
2017-11-24,155623,4.0,10.0,0.0
|
||||
2017-11-25,115759,5.0,10.0,0.0
|
||||
2017-11-26,128771,6.0,10.0,0.0
|
||||
2017-11-27,199419,0.0,10.0,0.0
|
||||
2017-11-28,207253,1.0,10.0,0.0
|
||||
2017-11-29,205406,2.0,10.0,0.0
|
||||
2017-11-30,200674,3.0,10.0,0.0
|
||||
2017-12-01,187017,4.0,11.0,0.0
|
||||
2017-12-02,129735,5.0,11.0,0.0
|
||||
2017-12-03,139120,6.0,11.0,0.0
|
||||
2017-12-04,205505,0.0,11.0,0.0
|
||||
2017-12-05,208218,1.0,11.0,0.0
|
||||
2017-12-06,202480,2.0,11.0,0.0
|
||||
2017-12-07,197822,3.0,11.0,0.0
|
||||
2017-12-08,180686,4.0,11.0,0.0
|
||||
2017-12-09,123667,5.0,11.0,0.0
|
||||
2017-12-10,130987,6.0,11.0,0.0
|
||||
2017-12-11,193901,0.0,11.0,0.0
|
||||
2017-12-12,194997,1.0,11.0,0.0
|
||||
2017-12-13,192063,2.0,11.0,0.0
|
||||
2017-12-14,186496,3.0,11.0,0.0
|
||||
2017-12-15,170812,4.0,11.0,0.0
|
||||
2017-12-16,110474,5.0,11.0,0.0
|
||||
2017-12-17,118165,6.0,11.0,0.0
|
||||
2017-12-18,176843,0.0,11.0,0.0
|
||||
2017-12-19,179550,1.0,11.0,0.0
|
||||
2017-12-20,173506,2.0,11.0,0.0
|
||||
2017-12-21,165910,3.0,11.0,0.0
|
||||
2017-12-22,145886,4.0,11.0,0.0
|
||||
2017-12-23,95246,5.0,11.0,0.0
|
||||
2017-12-24,88781,6.0,11.0,0.0
|
||||
2017-12-25,98189,0.0,11.0,1.0
|
||||
2017-12-26,121383,1.0,11.0,0.0
|
||||
2017-12-27,135300,2.0,11.0,0.0
|
||||
2017-12-28,136827,3.0,11.0,0.0
|
||||
2017-12-29,127700,4.0,11.0,0.0
|
||||
2017-12-30,93014,5.0,11.0,0.0
|
||||
2017-12-31,82878,6.0,11.0,0.0
|
||||
2018-01-01,86419,0.0,0.0,1.0
|
||||
2018-01-02,147428,1.0,0.0,0.0
|
||||
2018-01-03,162193,2.0,0.0,0.0
|
||||
2018-01-04,163784,3.0,0.0,0.0
|
||||
2018-01-05,158606,4.0,0.0,0.0
|
||||
2018-01-06,113467,5.0,0.0,0.0
|
||||
2018-01-07,118313,6.0,0.0,0.0
|
||||
2018-01-08,175623,0.0,0.0,0.0
|
||||
2018-01-09,183880,1.0,0.0,0.0
|
||||
2018-01-10,183945,2.0,0.0,0.0
|
||||
2018-01-11,181769,3.0,0.0,0.0
|
||||
2018-01-12,170552,4.0,0.0,0.0
|
||||
2018-01-13,115707,5.0,0.0,0.0
|
||||
2018-01-14,121191,6.0,0.0,0.0
|
||||
2018-01-15,176127,0.0,0.0,1.0
|
||||
2018-01-16,188032,1.0,0.0,0.0
|
||||
2018-01-17,189871,2.0,0.0,0.0
|
||||
2018-01-18,189348,3.0,0.0,0.0
|
||||
2018-01-19,177456,4.0,0.0,0.0
|
||||
2018-01-20,123321,5.0,0.0,0.0
|
||||
2018-01-21,128306,6.0,0.0,0.0
|
||||
2018-01-22,186132,0.0,0.0,0.0
|
||||
2018-01-23,197618,1.0,0.0,0.0
|
||||
2018-01-24,196402,2.0,0.0,0.0
|
||||
2018-01-25,192722,3.0,0.0,0.0
|
||||
2018-01-26,179415,4.0,0.0,0.0
|
||||
2018-01-27,125769,5.0,0.0,0.0
|
||||
2018-01-28,133306,6.0,0.0,0.0
|
||||
2018-01-29,194151,0.0,0.0,0.0
|
||||
2018-01-30,198680,1.0,0.0,0.0
|
||||
2018-01-31,198652,2.0,0.0,0.0
|
||||
2018-02-01,195472,3.0,1.0,0.0
|
||||
2018-02-02,183173,4.0,1.0,0.0
|
||||
2018-02-03,124276,5.0,1.0,0.0
|
||||
2018-02-04,129054,6.0,1.0,0.0
|
||||
2018-02-05,190024,0.0,1.0,0.0
|
||||
2018-02-06,198658,1.0,1.0,0.0
|
||||
2018-02-07,198272,2.0,1.0,0.0
|
||||
2018-02-08,195339,3.0,1.0,0.0
|
||||
2018-02-09,183086,4.0,1.0,0.0
|
||||
2018-02-10,122536,5.0,1.0,0.0
|
||||
2018-02-11,133033,6.0,1.0,0.0
|
||||
2018-02-12,185386,0.0,1.0,0.0
|
||||
2018-02-13,184789,1.0,1.0,0.0
|
||||
2018-02-14,176089,2.0,1.0,0.0
|
||||
2018-02-15,171317,3.0,1.0,0.0
|
||||
2018-02-16,162693,4.0,1.0,0.0
|
||||
2018-02-17,116342,5.0,1.0,0.0
|
||||
2018-02-18,122466,6.0,1.0,0.0
|
||||
2018-02-19,172364,0.0,1.0,1.0
|
||||
2018-02-20,185896,1.0,1.0,0.0
|
||||
2018-02-21,188166,2.0,1.0,0.0
|
||||
2018-02-22,189427,3.0,1.0,0.0
|
||||
2018-02-23,178732,4.0,1.0,0.0
|
||||
2018-02-24,132664,5.0,1.0,0.0
|
||||
2018-02-25,134008,6.0,1.0,0.0
|
||||
2018-02-26,200075,0.0,1.0,0.0
|
||||
2018-02-27,207996,1.0,1.0,0.0
|
||||
2018-02-28,204416,2.0,1.0,0.0
|
||||
2018-03-01,201320,3.0,2.0,0.0
|
||||
2018-03-02,188205,4.0,2.0,0.0
|
||||
2018-03-03,131162,5.0,2.0,0.0
|
||||
2018-03-04,138320,6.0,2.0,0.0
|
||||
2018-03-05,207326,0.0,2.0,0.0
|
||||
2018-03-06,212462,1.0,2.0,0.0
|
||||
2018-03-07,209357,2.0,2.0,0.0
|
||||
2018-03-08,194876,3.0,2.0,0.0
|
||||
2018-03-09,193761,4.0,2.0,0.0
|
||||
2018-03-10,133449,5.0,2.0,0.0
|
||||
2018-03-11,142258,6.0,2.0,0.0
|
||||
2018-03-12,208753,0.0,2.0,0.0
|
||||
2018-03-13,210602,1.0,2.0,0.0
|
||||
2018-03-14,214236,2.0,2.0,0.0
|
||||
2018-03-15,210761,3.0,2.0,0.0
|
||||
2018-03-16,196619,4.0,2.0,0.0
|
||||
2018-03-17,133056,5.0,2.0,0.0
|
||||
2018-03-18,141335,6.0,2.0,0.0
|
||||
2018-03-19,211580,0.0,2.0,0.0
|
||||
2018-03-20,219051,1.0,2.0,0.0
|
||||
2018-03-21,215435,2.0,2.0,0.0
|
||||
2018-03-22,211961,3.0,2.0,0.0
|
||||
2018-03-23,196009,4.0,2.0,0.0
|
||||
2018-03-24,132390,5.0,2.0,0.0
|
||||
2018-03-25,140021,6.0,2.0,0.0
|
||||
2018-03-26,205273,0.0,2.0,0.0
|
||||
2018-03-27,212686,1.0,2.0,0.0
|
||||
2018-03-28,210683,2.0,2.0,0.0
|
||||
2018-03-29,189044,3.0,2.0,0.0
|
||||
2018-03-30,170256,4.0,2.0,0.0
|
||||
2018-03-31,125999,5.0,2.0,0.0
|
||||
2018-04-01,126749,6.0,3.0,0.0
|
||||
2018-04-02,186546,0.0,3.0,0.0
|
||||
2018-04-03,207905,1.0,3.0,0.0
|
||||
2018-04-04,201528,2.0,3.0,0.0
|
||||
2018-04-05,188580,3.0,3.0,0.0
|
||||
2018-04-06,173714,4.0,3.0,0.0
|
||||
2018-04-07,125723,5.0,3.0,0.0
|
||||
2018-04-08,142545,6.0,3.0,0.0
|
||||
2018-04-09,204767,0.0,3.0,0.0
|
||||
2018-04-10,212048,1.0,3.0,0.0
|
||||
2018-04-11,210517,2.0,3.0,0.0
|
||||
2018-04-12,206924,3.0,3.0,0.0
|
||||
2018-04-13,191679,4.0,3.0,0.0
|
||||
2018-04-14,126394,5.0,3.0,0.0
|
||||
2018-04-15,137279,6.0,3.0,0.0
|
||||
2018-04-16,208085,0.0,3.0,0.0
|
||||
2018-04-17,213273,1.0,3.0,0.0
|
||||
2018-04-18,211580,2.0,3.0,0.0
|
||||
2018-04-19,206037,3.0,3.0,0.0
|
||||
2018-04-20,191211,4.0,3.0,0.0
|
||||
2018-04-21,125564,5.0,3.0,0.0
|
||||
2018-04-22,136469,6.0,3.0,0.0
|
||||
2018-04-23,206288,0.0,3.0,0.0
|
||||
2018-04-24,212115,1.0,3.0,0.0
|
||||
2018-04-25,207948,2.0,3.0,0.0
|
||||
2018-04-26,205759,3.0,3.0,0.0
|
||||
2018-04-27,181330,4.0,3.0,0.0
|
||||
2018-04-28,130046,5.0,3.0,0.0
|
||||
2018-04-29,120802,6.0,3.0,0.0
|
||||
2018-04-30,170390,0.0,3.0,0.0
|
||||
2018-05-01,169054,1.0,4.0,0.0
|
||||
2018-05-02,197891,2.0,4.0,0.0
|
||||
2018-05-03,199820,3.0,4.0,0.0
|
||||
2018-05-04,186783,4.0,4.0,0.0
|
||||
2018-05-05,124420,5.0,4.0,0.0
|
||||
2018-05-06,130666,6.0,4.0,0.0
|
||||
2018-05-07,196014,0.0,4.0,0.0
|
||||
2018-05-08,203058,1.0,4.0,0.0
|
||||
2018-05-09,198582,2.0,4.0,0.0
|
||||
2018-05-10,191321,3.0,4.0,0.0
|
||||
2018-05-11,183639,4.0,4.0,0.0
|
||||
2018-05-12,122023,5.0,4.0,0.0
|
||||
2018-05-13,128775,6.0,4.0,0.0
|
||||
2018-05-14,199104,0.0,4.0,0.0
|
||||
2018-05-15,200658,1.0,4.0,0.0
|
||||
2018-05-16,201541,2.0,4.0,0.0
|
||||
2018-05-17,196886,3.0,4.0,0.0
|
||||
2018-05-18,188597,4.0,4.0,0.0
|
||||
2018-05-19,121392,5.0,4.0,0.0
|
||||
2018-05-20,126981,6.0,4.0,0.0
|
||||
2018-05-21,189291,0.0,4.0,0.0
|
||||
2018-05-22,203038,1.0,4.0,0.0
|
||||
2018-05-23,205330,2.0,4.0,0.0
|
||||
2018-05-24,199208,3.0,4.0,0.0
|
||||
2018-05-25,187768,4.0,4.0,0.0
|
||||
2018-05-26,117635,5.0,4.0,0.0
|
||||
2018-05-27,124352,6.0,4.0,0.0
|
||||
2018-05-28,180398,0.0,4.0,1.0
|
||||
2018-05-29,194170,1.0,4.0,0.0
|
||||
2018-05-30,200281,2.0,4.0,0.0
|
||||
2018-05-31,197244,3.0,4.0,0.0
|
||||
2018-06-01,184037,4.0,5.0,0.0
|
||||
2018-06-02,121135,5.0,5.0,0.0
|
||||
2018-06-03,129389,6.0,5.0,0.0
|
||||
2018-06-04,200331,0.0,5.0,0.0
|
||||
2018-06-05,207735,1.0,5.0,0.0
|
||||
2018-06-06,203354,2.0,5.0,0.0
|
||||
2018-06-07,200520,3.0,5.0,0.0
|
||||
2018-06-08,182038,4.0,5.0,0.0
|
||||
2018-06-09,120164,5.0,5.0,0.0
|
||||
2018-06-10,125256,6.0,5.0,0.0
|
||||
2018-06-11,194786,0.0,5.0,0.0
|
||||
2018-06-12,200815,1.0,5.0,0.0
|
||||
2018-06-13,197740,2.0,5.0,0.0
|
||||
2018-06-14,192294,3.0,5.0,0.0
|
||||
2018-06-15,173587,4.0,5.0,0.0
|
||||
2018-06-16,105955,5.0,5.0,0.0
|
||||
2018-06-17,110780,6.0,5.0,0.0
|
||||
2018-06-18,174582,0.0,5.0,0.0
|
||||
2018-06-19,193310,1.0,5.0,0.0
|
||||
2018-06-20,193062,2.0,5.0,0.0
|
||||
2018-06-21,187986,3.0,5.0,0.0
|
||||
2018-06-22,173606,4.0,5.0,0.0
|
||||
2018-06-23,111795,5.0,5.0,0.0
|
||||
2018-06-24,116134,6.0,5.0,0.0
|
||||
2018-06-25,185919,0.0,5.0,0.0
|
||||
2018-06-26,193142,1.0,5.0,0.0
|
||||
2018-06-27,188114,2.0,5.0,0.0
|
||||
2018-06-28,183737,3.0,5.0,0.0
|
||||
2018-06-29,171496,4.0,5.0,0.0
|
||||
2018-06-30,107210,5.0,5.0,0.0
|
||||
2018-07-01,111053,6.0,6.0,0.0
|
||||
2018-07-02,176198,0.0,6.0,0.0
|
||||
2018-07-03,184040,1.0,6.0,0.0
|
||||
2018-07-04,169783,2.0,6.0,1.0
|
||||
2018-07-05,177996,3.0,6.0,0.0
|
||||
2018-07-06,167378,4.0,6.0,0.0
|
||||
2018-07-07,106401,5.0,6.0,0.0
|
||||
2018-07-08,112327,6.0,6.0,0.0
|
||||
2018-07-09,182835,0.0,6.0,0.0
|
||||
2018-07-10,187694,1.0,6.0,0.0
|
||||
2018-07-11,185762,2.0,6.0,0.0
|
||||
2018-07-12,184099,3.0,6.0,0.0
|
||||
2018-07-13,170860,4.0,6.0,0.0
|
||||
2018-07-14,106799,5.0,6.0,0.0
|
||||
2018-07-15,108475,6.0,6.0,0.0
|
||||
2018-07-16,175704,0.0,6.0,0.0
|
||||
2018-07-17,183596,1.0,6.0,0.0
|
||||
2018-07-18,179897,2.0,6.0,0.0
|
||||
2018-07-19,183373,3.0,6.0,0.0
|
||||
2018-07-20,169626,4.0,6.0,0.0
|
||||
2018-07-21,106785,5.0,6.0,0.0
|
||||
2018-07-22,112387,6.0,6.0,0.0
|
||||
2018-07-23,180572,0.0,6.0,0.0
|
||||
2018-07-24,186943,1.0,6.0,0.0
|
||||
2018-07-25,185744,2.0,6.0,0.0
|
||||
2018-07-26,183117,3.0,6.0,0.0
|
||||
2018-07-27,168526,4.0,6.0,0.0
|
||||
2018-07-28,105936,5.0,6.0,0.0
|
||||
2018-07-29,111708,6.0,6.0,0.0
|
||||
2018-07-30,179950,0.0,6.0,0.0
|
||||
2018-07-31,185930,1.0,6.0,0.0
|
||||
2018-08-01,183366,2.0,7.0,0.0
|
||||
2018-08-02,182412,3.0,7.0,0.0
|
||||
2018-08-03,173429,4.0,7.0,0.0
|
||||
2018-08-04,106108,5.0,7.0,0.0
|
||||
2018-08-05,110059,6.0,7.0,0.0
|
||||
2018-08-06,178355,0.0,7.0,0.0
|
||||
2018-08-07,185518,1.0,7.0,0.0
|
||||
2018-08-08,183204,2.0,7.0,0.0
|
||||
2018-08-09,181276,3.0,7.0,0.0
|
||||
2018-08-10,168297,4.0,7.0,0.0
|
||||
2018-08-11,106488,5.0,7.0,0.0
|
||||
2018-08-12,111786,6.0,7.0,0.0
|
||||
2018-08-13,178620,0.0,7.0,0.0
|
||||
2018-08-14,181922,1.0,7.0,0.0
|
||||
2018-08-15,172198,2.0,7.0,0.0
|
||||
2018-08-16,177367,3.0,7.0,0.0
|
||||
2018-08-17,166550,4.0,7.0,0.0
|
||||
2018-08-18,107011,5.0,7.0,0.0
|
||||
2018-08-19,112299,6.0,7.0,0.0
|
||||
2018-08-20,176718,0.0,7.0,0.0
|
||||
2018-08-21,182562,1.0,7.0,0.0
|
||||
2018-08-22,181484,2.0,7.0,0.0
|
||||
2018-08-23,180317,3.0,7.0,0.0
|
||||
2018-08-24,170197,4.0,7.0,0.0
|
||||
2018-08-25,109383,5.0,7.0,0.0
|
||||
2018-08-26,113373,6.0,7.0,0.0
|
||||
2018-08-27,180142,0.0,7.0,0.0
|
||||
2018-08-28,191628,1.0,7.0,0.0
|
||||
2018-08-29,191149,2.0,7.0,0.0
|
||||
2018-08-30,187503,3.0,7.0,0.0
|
||||
2018-08-31,172280,4.0,7.0,0.0
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,176 @@
|
||||
import pandas as pd
|
||||
from azureml.core import Environment
|
||||
from azureml.core.conda_dependencies import CondaDependencies
|
||||
from azureml.train.estimator import Estimator
|
||||
from azureml.core.run import Run
|
||||
from azureml.automl.core.shared import constants
|
||||
|
||||
|
||||
def split_fraction_by_grain(df, fraction, time_column_name, grain_column_names=None):
|
||||
if not grain_column_names:
|
||||
df["tmp_grain_column"] = "grain"
|
||||
grain_column_names = ["tmp_grain_column"]
|
||||
|
||||
"""Group df by grain and split on last n rows for each group."""
|
||||
df_grouped = df.sort_values(time_column_name).groupby(
|
||||
grain_column_names, group_keys=False
|
||||
)
|
||||
|
||||
df_head = df_grouped.apply(
|
||||
lambda dfg: dfg.iloc[: -int(len(dfg) * fraction)] if fraction > 0 else dfg
|
||||
)
|
||||
|
||||
df_tail = df_grouped.apply(
|
||||
lambda dfg: dfg.iloc[-int(len(dfg) * fraction) :] if fraction > 0 else dfg[:0]
|
||||
)
|
||||
|
||||
if "tmp_grain_column" in grain_column_names:
|
||||
for df2 in (df, df_head, df_tail):
|
||||
df2.drop("tmp_grain_column", axis=1, inplace=True)
|
||||
|
||||
grain_column_names.remove("tmp_grain_column")
|
||||
|
||||
return df_head, df_tail
|
||||
|
||||
|
||||
def split_full_for_forecasting(
|
||||
df, time_column_name, grain_column_names=None, test_split=0.2
|
||||
):
|
||||
index_name = df.index.name
|
||||
|
||||
# Assumes that there isn't already a column called tmpindex
|
||||
|
||||
df["tmpindex"] = df.index
|
||||
|
||||
train_df, test_df = split_fraction_by_grain(
|
||||
df, test_split, time_column_name, grain_column_names
|
||||
)
|
||||
|
||||
train_df = train_df.set_index("tmpindex")
|
||||
train_df.index.name = index_name
|
||||
|
||||
test_df = test_df.set_index("tmpindex")
|
||||
test_df.index.name = index_name
|
||||
|
||||
df.drop("tmpindex", axis=1, inplace=True)
|
||||
|
||||
return train_df, test_df
|
||||
|
||||
|
||||
def get_result_df(remote_run):
|
||||
children = list(remote_run.get_children(recursive=True))
|
||||
summary_df = pd.DataFrame(
|
||||
index=["run_id", "run_algorithm", "primary_metric", "Score"]
|
||||
)
|
||||
goal_minimize = False
|
||||
for run in children:
|
||||
if (
|
||||
run.get_status().lower() == constants.RunState.COMPLETE_RUN
|
||||
and "run_algorithm" in run.properties
|
||||
and "score" in run.properties
|
||||
):
|
||||
# We only count in the completed child runs.
|
||||
summary_df[run.id] = [
|
||||
run.id,
|
||||
run.properties["run_algorithm"],
|
||||
run.properties["primary_metric"],
|
||||
float(run.properties["score"]),
|
||||
]
|
||||
if "goal" in run.properties:
|
||||
goal_minimize = run.properties["goal"].split("_")[-1] == "min"
|
||||
|
||||
summary_df = summary_df.T.sort_values("Score", ascending=goal_minimize)
|
||||
summary_df = summary_df.set_index("run_algorithm")
|
||||
return summary_df
|
||||
|
||||
|
||||
def run_inference(
|
||||
test_experiment,
|
||||
compute_target,
|
||||
script_folder,
|
||||
train_run,
|
||||
test_dataset,
|
||||
lookback_dataset,
|
||||
max_horizon,
|
||||
target_column_name,
|
||||
time_column_name,
|
||||
freq,
|
||||
):
|
||||
model_base_name = "model.pkl"
|
||||
if "model_data_location" in train_run.properties:
|
||||
model_location = train_run.properties["model_data_location"]
|
||||
_, model_base_name = model_location.rsplit("/", 1)
|
||||
train_run.download_file(
|
||||
"outputs/{}".format(model_base_name), "inference/{}".format(model_base_name)
|
||||
)
|
||||
|
||||
inference_env = train_run.get_environment()
|
||||
|
||||
est = Estimator(
|
||||
source_directory=script_folder,
|
||||
entry_script="infer.py",
|
||||
script_params={
|
||||
"--max_horizon": max_horizon,
|
||||
"--target_column_name": target_column_name,
|
||||
"--time_column_name": time_column_name,
|
||||
"--frequency": freq,
|
||||
"--model_path": model_base_name,
|
||||
},
|
||||
inputs=[
|
||||
test_dataset.as_named_input("test_data"),
|
||||
lookback_dataset.as_named_input("lookback_data"),
|
||||
],
|
||||
compute_target=compute_target,
|
||||
environment_definition=inference_env,
|
||||
)
|
||||
|
||||
run = test_experiment.submit(
|
||||
est,
|
||||
tags={
|
||||
"training_run_id": train_run.id,
|
||||
"run_algorithm": train_run.properties["run_algorithm"],
|
||||
"valid_score": train_run.properties["score"],
|
||||
"primary_metric": train_run.properties["primary_metric"],
|
||||
},
|
||||
)
|
||||
|
||||
run.log("run_algorithm", run.tags["run_algorithm"])
|
||||
return run
|
||||
|
||||
|
||||
def run_multiple_inferences(
|
||||
summary_df,
|
||||
train_experiment,
|
||||
test_experiment,
|
||||
compute_target,
|
||||
script_folder,
|
||||
test_dataset,
|
||||
lookback_dataset,
|
||||
max_horizon,
|
||||
target_column_name,
|
||||
time_column_name,
|
||||
freq,
|
||||
):
|
||||
for run_name, run_summary in summary_df.iterrows():
|
||||
print(run_name)
|
||||
print(run_summary)
|
||||
run_id = run_summary.run_id
|
||||
train_run = Run(train_experiment, run_id)
|
||||
|
||||
test_run = run_inference(
|
||||
test_experiment,
|
||||
compute_target,
|
||||
script_folder,
|
||||
train_run,
|
||||
test_dataset,
|
||||
lookback_dataset,
|
||||
max_horizon,
|
||||
target_column_name,
|
||||
time_column_name,
|
||||
freq,
|
||||
)
|
||||
|
||||
print(test_run)
|
||||
summary_df.loc[summary_df.run_id == run_id, "test_run_id"] = test_run.id
|
||||
|
||||
return summary_df
|
||||
@@ -19,9 +19,14 @@ except ImportError:
|
||||
_torch_present = False
|
||||
|
||||
|
||||
def align_outputs(y_predicted, X_trans, X_test, y_test,
|
||||
predicted_column_name='predicted',
|
||||
horizon_colname='horizon_origin'):
|
||||
def align_outputs(
|
||||
y_predicted,
|
||||
X_trans,
|
||||
X_test,
|
||||
y_test,
|
||||
predicted_column_name="predicted",
|
||||
horizon_colname="horizon_origin",
|
||||
):
|
||||
"""
|
||||
Demonstrates how to get the output aligned to the inputs
|
||||
using pandas indexes. Helps understand what happened if
|
||||
@@ -33,9 +38,13 @@ def align_outputs(y_predicted, X_trans, X_test, y_test,
|
||||
* model was asked to predict past max_horizon -> increase max horizon
|
||||
* data at start of X_test was needed for lags -> provide previous periods
|
||||
"""
|
||||
if (horizon_colname in X_trans):
|
||||
df_fcst = pd.DataFrame({predicted_column_name: y_predicted,
|
||||
horizon_colname: X_trans[horizon_colname]})
|
||||
if horizon_colname in X_trans:
|
||||
df_fcst = pd.DataFrame(
|
||||
{
|
||||
predicted_column_name: y_predicted,
|
||||
horizon_colname: X_trans[horizon_colname],
|
||||
}
|
||||
)
|
||||
else:
|
||||
df_fcst = pd.DataFrame({predicted_column_name: y_predicted})
|
||||
|
||||
@@ -48,20 +57,21 @@ def align_outputs(y_predicted, X_trans, X_test, y_test,
|
||||
|
||||
# X_test_full's index does not include origin, so reset for merge
|
||||
df_fcst.reset_index(inplace=True)
|
||||
X_test_full = X_test_full.reset_index().drop(columns='index')
|
||||
together = df_fcst.merge(X_test_full, how='right')
|
||||
X_test_full = X_test_full.reset_index().drop(columns="index")
|
||||
together = df_fcst.merge(X_test_full, how="right")
|
||||
|
||||
# drop rows where prediction or actuals are nan
|
||||
# happens because of missing actuals
|
||||
# or at edges of time due to lags/rolling windows
|
||||
clean = together[together[[target_column_name,
|
||||
predicted_column_name]].notnull().all(axis=1)]
|
||||
return (clean)
|
||||
clean = together[
|
||||
together[[target_column_name, predicted_column_name]].notnull().all(axis=1)
|
||||
]
|
||||
return clean
|
||||
|
||||
|
||||
def do_rolling_forecast_with_lookback(fitted_model, X_test, y_test,
|
||||
max_horizon, X_lookback, y_lookback,
|
||||
freq='D'):
|
||||
def do_rolling_forecast_with_lookback(
|
||||
fitted_model, X_test, y_test, max_horizon, X_lookback, y_lookback, freq="D"
|
||||
):
|
||||
"""
|
||||
Produce forecasts on a rolling origin over the given test set.
|
||||
|
||||
@@ -83,22 +93,28 @@ def do_rolling_forecast_with_lookback(fitted_model, X_test, y_test,
|
||||
horizon_time = origin_time + max_horizon * to_offset(freq)
|
||||
|
||||
# Extract test data from an expanding window up-to the horizon
|
||||
expand_wind = (X[time_column_name] < horizon_time)
|
||||
expand_wind = X[time_column_name] < horizon_time
|
||||
X_test_expand = X[expand_wind]
|
||||
y_query_expand = np.zeros(len(X_test_expand)).astype(np.float)
|
||||
y_query_expand.fill(np.NaN)
|
||||
|
||||
if origin_time != X[time_column_name].min():
|
||||
# Set the context by including actuals up-to the origin time
|
||||
test_context_expand_wind = (X[time_column_name] < origin_time)
|
||||
context_expand_wind = (X_test_expand[time_column_name] < origin_time)
|
||||
test_context_expand_wind = X[time_column_name] < origin_time
|
||||
context_expand_wind = X_test_expand[time_column_name] < origin_time
|
||||
y_query_expand[context_expand_wind] = y[test_context_expand_wind]
|
||||
|
||||
# Print some debug info
|
||||
print("Horizon_time:", horizon_time,
|
||||
" origin_time: ", origin_time,
|
||||
" max_horizon: ", max_horizon,
|
||||
" freq: ", freq)
|
||||
print(
|
||||
"Horizon_time:",
|
||||
horizon_time,
|
||||
" origin_time: ",
|
||||
origin_time,
|
||||
" max_horizon: ",
|
||||
max_horizon,
|
||||
" freq: ",
|
||||
freq,
|
||||
)
|
||||
print("expand_wind: ", expand_wind)
|
||||
print("y_query_expand")
|
||||
print(y_query_expand)
|
||||
@@ -124,9 +140,14 @@ def do_rolling_forecast_with_lookback(fitted_model, X_test, y_test,
|
||||
trans_tindex = X_trans.index.get_level_values(time_column_name)
|
||||
trans_roll_wind = (trans_tindex >= origin_time) & (trans_tindex < horizon_time)
|
||||
test_roll_wind = expand_wind & (X[time_column_name] >= origin_time)
|
||||
df_list.append(align_outputs(
|
||||
y_fcst[trans_roll_wind], X_trans[trans_roll_wind],
|
||||
X[test_roll_wind], y[test_roll_wind]))
|
||||
df_list.append(
|
||||
align_outputs(
|
||||
y_fcst[trans_roll_wind],
|
||||
X_trans[trans_roll_wind],
|
||||
X[test_roll_wind],
|
||||
y[test_roll_wind],
|
||||
)
|
||||
)
|
||||
|
||||
# Advance the origin time
|
||||
origin_time = horizon_time
|
||||
@@ -134,7 +155,7 @@ def do_rolling_forecast_with_lookback(fitted_model, X_test, y_test,
|
||||
return pd.concat(df_list, ignore_index=True)
|
||||
|
||||
|
||||
def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq='D'):
|
||||
def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq="D"):
|
||||
"""
|
||||
Produce forecasts on a rolling origin over the given test set.
|
||||
|
||||
@@ -153,23 +174,28 @@ def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq='D'):
|
||||
horizon_time = origin_time + max_horizon * to_offset(freq)
|
||||
|
||||
# Extract test data from an expanding window up-to the horizon
|
||||
expand_wind = (X_test[time_column_name] < horizon_time)
|
||||
expand_wind = X_test[time_column_name] < horizon_time
|
||||
X_test_expand = X_test[expand_wind]
|
||||
y_query_expand = np.zeros(len(X_test_expand)).astype(np.float)
|
||||
y_query_expand.fill(np.NaN)
|
||||
|
||||
if origin_time != X_test[time_column_name].min():
|
||||
# Set the context by including actuals up-to the origin time
|
||||
test_context_expand_wind = (X_test[time_column_name] < origin_time)
|
||||
context_expand_wind = (X_test_expand[time_column_name] < origin_time)
|
||||
y_query_expand[context_expand_wind] = y_test[
|
||||
test_context_expand_wind]
|
||||
test_context_expand_wind = X_test[time_column_name] < origin_time
|
||||
context_expand_wind = X_test_expand[time_column_name] < origin_time
|
||||
y_query_expand[context_expand_wind] = y_test[test_context_expand_wind]
|
||||
|
||||
# Print some debug info
|
||||
print("Horizon_time:", horizon_time,
|
||||
" origin_time: ", origin_time,
|
||||
" max_horizon: ", max_horizon,
|
||||
" freq: ", freq)
|
||||
print(
|
||||
"Horizon_time:",
|
||||
horizon_time,
|
||||
" origin_time: ",
|
||||
origin_time,
|
||||
" max_horizon: ",
|
||||
max_horizon,
|
||||
" freq: ",
|
||||
freq,
|
||||
)
|
||||
print("expand_wind: ", expand_wind)
|
||||
print("y_query_expand")
|
||||
print(y_query_expand)
|
||||
@@ -193,10 +219,14 @@ def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq='D'):
|
||||
trans_tindex = X_trans.index.get_level_values(time_column_name)
|
||||
trans_roll_wind = (trans_tindex >= origin_time) & (trans_tindex < horizon_time)
|
||||
test_roll_wind = expand_wind & (X_test[time_column_name] >= origin_time)
|
||||
df_list.append(align_outputs(y_fcst[trans_roll_wind],
|
||||
df_list.append(
|
||||
align_outputs(
|
||||
y_fcst[trans_roll_wind],
|
||||
X_trans[trans_roll_wind],
|
||||
X_test[test_roll_wind],
|
||||
y_test[test_roll_wind]))
|
||||
y_test[test_roll_wind],
|
||||
)
|
||||
)
|
||||
|
||||
# Advance the origin time
|
||||
origin_time = horizon_time
|
||||
@@ -230,20 +260,31 @@ def map_location_cuda(storage, loc):
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'--max_horizon', type=int, dest='max_horizon',
|
||||
default=10, help='Max Horizon for forecasting')
|
||||
"--max_horizon",
|
||||
type=int,
|
||||
dest="max_horizon",
|
||||
default=10,
|
||||
help="Max Horizon for forecasting",
|
||||
)
|
||||
parser.add_argument(
|
||||
'--target_column_name', type=str, dest='target_column_name',
|
||||
help='Target Column Name')
|
||||
"--target_column_name",
|
||||
type=str,
|
||||
dest="target_column_name",
|
||||
help="Target Column Name",
|
||||
)
|
||||
parser.add_argument(
|
||||
'--time_column_name', type=str, dest='time_column_name',
|
||||
help='Time Column Name')
|
||||
"--time_column_name", type=str, dest="time_column_name", help="Time Column Name"
|
||||
)
|
||||
parser.add_argument(
|
||||
'--frequency', type=str, dest='freq',
|
||||
help='Frequency of prediction')
|
||||
"--frequency", type=str, dest="freq", help="Frequency of prediction"
|
||||
)
|
||||
parser.add_argument(
|
||||
'--model_path', type=str, dest='model_path',
|
||||
default='model.pkl', help='Filename of model to be loaded')
|
||||
"--model_path",
|
||||
type=str,
|
||||
dest="model_path",
|
||||
default="model.pkl",
|
||||
help="Filename of model to be loaded",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
max_horizon = args.max_horizon
|
||||
@@ -252,7 +293,7 @@ time_column_name = args.time_column_name
|
||||
freq = args.freq
|
||||
model_path = args.model_path
|
||||
|
||||
print('args passed are: ')
|
||||
print("args passed are: ")
|
||||
print(max_horizon)
|
||||
print(target_column_name)
|
||||
print(time_column_name)
|
||||
@@ -261,39 +302,41 @@ print(model_path)
|
||||
|
||||
run = Run.get_context()
|
||||
# get input dataset by name
|
||||
test_dataset = run.input_datasets['test_data']
|
||||
lookback_dataset = run.input_datasets['lookback_data']
|
||||
test_dataset = run.input_datasets["test_data"]
|
||||
lookback_dataset = run.input_datasets["lookback_data"]
|
||||
|
||||
grain_column_names = []
|
||||
|
||||
df = test_dataset.to_pandas_dataframe()
|
||||
|
||||
print('Read df')
|
||||
print("Read df")
|
||||
print(df)
|
||||
|
||||
X_test_df = test_dataset.drop_columns(columns=[target_column_name])
|
||||
y_test_df = test_dataset.with_timestamp_columns(
|
||||
None).keep_columns(columns=[target_column_name])
|
||||
y_test_df = test_dataset.with_timestamp_columns(None).keep_columns(
|
||||
columns=[target_column_name]
|
||||
)
|
||||
|
||||
X_lookback_df = lookback_dataset.drop_columns(columns=[target_column_name])
|
||||
y_lookback_df = lookback_dataset.with_timestamp_columns(
|
||||
None).keep_columns(columns=[target_column_name])
|
||||
y_lookback_df = lookback_dataset.with_timestamp_columns(None).keep_columns(
|
||||
columns=[target_column_name]
|
||||
)
|
||||
|
||||
_, ext = os.path.splitext(model_path)
|
||||
if ext == '.pt':
|
||||
if ext == ".pt":
|
||||
# Load the fc-tcn torch model.
|
||||
assert _torch_present
|
||||
if torch.cuda.is_available():
|
||||
map_location = map_location_cuda
|
||||
else:
|
||||
map_location = 'cpu'
|
||||
with open(model_path, 'rb') as fh:
|
||||
map_location = "cpu"
|
||||
with open(model_path, "rb") as fh:
|
||||
fitted_model = torch.load(fh, map_location=map_location)
|
||||
else:
|
||||
# Load the sklearn pipeline.
|
||||
fitted_model = joblib.load(model_path)
|
||||
|
||||
if hasattr(fitted_model, 'get_lookback'):
|
||||
if hasattr(fitted_model, "get_lookback"):
|
||||
lookback = fitted_model.get_lookback()
|
||||
df_all = do_rolling_forecast_with_lookback(
|
||||
fitted_model,
|
||||
@@ -302,26 +345,28 @@ if hasattr(fitted_model, 'get_lookback'):
|
||||
max_horizon,
|
||||
X_lookback_df.to_pandas_dataframe()[-lookback:],
|
||||
y_lookback_df.to_pandas_dataframe().values.T[0][-lookback:],
|
||||
freq)
|
||||
freq,
|
||||
)
|
||||
else:
|
||||
df_all = do_rolling_forecast(
|
||||
fitted_model,
|
||||
X_test_df.to_pandas_dataframe(),
|
||||
y_test_df.to_pandas_dataframe().values.T[0],
|
||||
max_horizon,
|
||||
freq)
|
||||
freq,
|
||||
)
|
||||
|
||||
print(df_all)
|
||||
|
||||
print("target values:::")
|
||||
print(df_all[target_column_name])
|
||||
print("predicted values:::")
|
||||
print(df_all['predicted'])
|
||||
print(df_all["predicted"])
|
||||
|
||||
# Use the AutoML scoring module
|
||||
regression_metrics = list(constants.REGRESSION_SCALAR_SET)
|
||||
y_test = np.array(df_all[target_column_name])
|
||||
y_pred = np.array(df_all['predicted'])
|
||||
y_pred = np.array(df_all["predicted"])
|
||||
scores = scoring.score_regression(y_test, y_pred, regression_metrics)
|
||||
|
||||
print("scores:")
|
||||
@@ -331,12 +376,11 @@ for key, value in scores.items():
|
||||
run.log(key, value)
|
||||
|
||||
print("Simple forecasting model")
|
||||
rmse = np.sqrt(mean_squared_error(
|
||||
df_all[target_column_name], df_all['predicted']))
|
||||
rmse = np.sqrt(mean_squared_error(df_all[target_column_name], df_all["predicted"]))
|
||||
print("[Test Data] \nRoot Mean squared error: %.2f" % rmse)
|
||||
mae = mean_absolute_error(df_all[target_column_name], df_all['predicted'])
|
||||
print('mean_absolute_error score: %.2f' % mae)
|
||||
print('MAPE: %.2f' % MAPE(df_all[target_column_name], df_all['predicted']))
|
||||
mae = mean_absolute_error(df_all[target_column_name], df_all["predicted"])
|
||||
print("mean_absolute_error score: %.2f" % mae)
|
||||
print("MAPE: %.2f" % MAPE(df_all[target_column_name], df_all["predicted"]))
|
||||
|
||||
run.log('rmse', rmse)
|
||||
run.log('mae', mae)
|
||||
run.log("rmse", rmse)
|
||||
run.log("mae", mae)
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,640 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Hierarchical Time Series - Automated ML\n",
|
||||
"**_Generate hierarchical time series forecasts with Automated Machine Learning_**\n",
|
||||
"\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For this notebook we are using a synthetic dataset portraying sales data to predict the the quantity of a vartiety of product skus across several states, stores, and product categories.\n",
|
||||
"\n",
|
||||
"**NOTE: There are limits on how many runs we can do in parallel per workspace, and we currently recommend to set the parallelism to maximum of 320 runs per experiment per workspace. If users want to have more parallelism and increase this limit they might encounter Too Many Requests errors (HTTP 429).**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Prerequisites\n",
|
||||
"You'll need to create a compute Instance by following the instructions in the [EnvironmentSetup.md](../Setup_Resources/EnvironmentSetup.md)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 1.0 Set up workspace, datastore, experiment"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613003526897
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"from azureml.core import Workspace, Datastore\n",
|
||||
"import pandas as pd\n",
|
||||
"\n",
|
||||
"# Set up your workspace\n",
|
||||
"ws = Workspace.from_config()\n",
|
||||
"ws.get_details()\n",
|
||||
"\n",
|
||||
"# Set up your datastores\n",
|
||||
"dstore = ws.get_default_datastore()\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output[\"SDK version\"] = azureml.core.VERSION\n",
|
||||
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||
"output[\"Workspace\"] = ws.name\n",
|
||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||
"output[\"Location\"] = ws.location\n",
|
||||
"output[\"Default datastore name\"] = dstore.name\n",
|
||||
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Choose an experiment"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613003540729
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Experiment\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, \"automl-hts\")\n",
|
||||
"\n",
|
||||
"print(\"Experiment name: \" + experiment.name)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 2.0 Data\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"nteract": {
|
||||
"transient": {
|
||||
"deleting": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"### Upload local csv files to datastore\n",
|
||||
"You can upload your train and inference csv files to the default datastore in your workspace. \n",
|
||||
"\n",
|
||||
"A Datastore is a place where data can be stored that is then made accessible to a compute either by means of mounting or copying the data to the compute target.\n",
|
||||
"Please refer to [Datastore](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.datastore.datastore?view=azure-ml-py) documentation on how to access data from Datastore."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"datastore_path = \"hts-sample\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"datastore = ws.get_default_datastore()\n",
|
||||
"datastore"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create the TabularDatasets \n",
|
||||
"\n",
|
||||
"Datasets in Azure Machine Learning are references to specific data in a Datastore. The data can be retrieved as a [TabularDatasets](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py). We will read in the data as a pandas DataFrame, upload to the data store and register them to your Workspace using ```register_pandas_dataframe``` so they can be called as an input into the training pipeline. We will use the inference dataset as part of the forecasting pipeline. The step need only be completed once."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613007017296
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.data.dataset_factory import TabularDatasetFactory\n",
|
||||
"\n",
|
||||
"registered_train = TabularDatasetFactory.register_pandas_dataframe(\n",
|
||||
" pd.read_csv(\"Data/hts-sample-train.csv\"),\n",
|
||||
" target=(datastore, \"hts-sample\"),\n",
|
||||
" name=\"hts-sales-train\",\n",
|
||||
")\n",
|
||||
"registered_inference = TabularDatasetFactory.register_pandas_dataframe(\n",
|
||||
" pd.read_csv(\"Data/hts-sample-test.csv\"),\n",
|
||||
" target=(datastore, \"hts-sample\"),\n",
|
||||
" name=\"hts-sales-test\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 3.0 Build the training pipeline\n",
|
||||
"Now that the dataset, WorkSpace, and datastore are set up, we can put together a pipeline for training.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Choose a compute target\n",
|
||||
"\n",
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
||||
"\n",
|
||||
"\\*\\*Creation of AmlCompute takes approximately 5 minutes.**\n",
|
||||
"\n",
|
||||
"If the AmlCompute with that name is already in your workspace this code will skip the creation process. As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this [article](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613007037308
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||
"\n",
|
||||
"# Name your cluster\n",
|
||||
"compute_name = \"hts-compute\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"if compute_name in ws.compute_targets:\n",
|
||||
" compute_target = ws.compute_targets[compute_name]\n",
|
||||
" if compute_target and type(compute_target) is AmlCompute:\n",
|
||||
" print(\"Found compute target: \" + compute_name)\n",
|
||||
"else:\n",
|
||||
" print(\"Creating a new compute target...\")\n",
|
||||
" provisioning_config = AmlCompute.provisioning_configuration(\n",
|
||||
" vm_size=\"STANDARD_D16S_V3\", max_nodes=20\n",
|
||||
" )\n",
|
||||
" # Create the compute target\n",
|
||||
" compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)\n",
|
||||
"\n",
|
||||
" # Can poll for a minimum number of nodes and for a specific timeout.\n",
|
||||
" # If no min node count is provided it will use the scale settings for the cluster\n",
|
||||
" compute_target.wait_for_completion(\n",
|
||||
" show_output=True, min_node_count=None, timeout_in_minutes=20\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" # For a more detailed view of current cluster status, use the 'status' property\n",
|
||||
" print(compute_target.status.serialize())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Set up training parameters\n",
|
||||
"\n",
|
||||
"This dictionary defines the AutoML and hierarchy settings. For this forecasting task we need to define several settings inncluding the name of the time column, the maximum forecast horizon, the hierarchy definition, and the level of the hierarchy at which to train.\n",
|
||||
"\n",
|
||||
"| Property | Description|\n",
|
||||
"| :--------------- | :------------------- |\n",
|
||||
"| **task** | forecasting |\n",
|
||||
"| **primary_metric** | This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i> |\n",
|
||||
"| **blocked_models** | Blocked models won't be used by AutoML. |\n",
|
||||
"| **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||
"| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||
"| **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||
"| **label_column_name** | The name of the label column. |\n",
|
||||
"| **forecast_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n",
|
||||
"| **n_cross_validations** | Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way. |\n",
|
||||
"| **enable_early_stopping** | Flag to enable early termination if the score is not improving in the short term. |\n",
|
||||
"| **time_column_name** | The name of your time column. |\n",
|
||||
"| **hierarchy_column_names** | The names of columns that define the hierarchical structure of the data from highest level to most granular. |\n",
|
||||
"| **training_level** | The level of the hierarchy to be used for training models. |\n",
|
||||
"| **enable_engineered_explanations** | Engineered feature explanations will be downloaded if enable_engineered_explanations flag is set to True. By default it is set to False to save storage space. |\n",
|
||||
"| **time_series_id_column_name** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |\n",
|
||||
"| **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). |\n",
|
||||
"| **pipeline_fetch_max_batch_size** | Determines how many pipelines (training algorithms) to fetch at a time for training, this helps reduce throttling when training at large scale. |\n",
|
||||
"| **model_explainability** | Flag to disable explaining the best automated ML model at the end of all training iterations. The default is True and will block non-explainable models which may impact the forecast accuracy. For more information, see [Interpretability: model explanations in automated machine learning](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-machine-learning-interpretability-automl). |"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613007061544
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.train.automl.runtime._hts.hts_parameters import HTSTrainParameters\n",
|
||||
"\n",
|
||||
"model_explainability = True\n",
|
||||
"\n",
|
||||
"engineered_explanations = False\n",
|
||||
"# Define your hierarchy. Adjust the settings below based on your dataset.\n",
|
||||
"hierarchy = [\"state\", \"store_id\", \"product_category\", \"SKU\"]\n",
|
||||
"training_level = \"SKU\"\n",
|
||||
"\n",
|
||||
"# Set your forecast parameters. Adjust the settings below based on your dataset.\n",
|
||||
"time_column_name = \"date\"\n",
|
||||
"label_column_name = \"quantity\"\n",
|
||||
"forecast_horizon = 7\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"automl_settings = {\n",
|
||||
" \"task\": \"forecasting\",\n",
|
||||
" \"primary_metric\": \"normalized_root_mean_squared_error\",\n",
|
||||
" \"label_column_name\": label_column_name,\n",
|
||||
" \"time_column_name\": time_column_name,\n",
|
||||
" \"forecast_horizon\": forecast_horizon,\n",
|
||||
" \"hierarchy_column_names\": hierarchy,\n",
|
||||
" \"hierarchy_training_level\": training_level,\n",
|
||||
" \"track_child_runs\": False,\n",
|
||||
" \"pipeline_fetch_max_batch_size\": 15,\n",
|
||||
" \"model_explainability\": model_explainability,\n",
|
||||
" # The following settings are specific to this sample and should be adjusted according to your own needs.\n",
|
||||
" \"iteration_timeout_minutes\": 10,\n",
|
||||
" \"iterations\": 10,\n",
|
||||
" \"n_cross_validations\": 2,\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"hts_parameters = HTSTrainParameters(\n",
|
||||
" automl_settings=automl_settings,\n",
|
||||
" hierarchy_column_names=hierarchy,\n",
|
||||
" training_level=training_level,\n",
|
||||
" enable_engineered_explanations=engineered_explanations,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Set up hierarchy training pipeline"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Parallel run step is leveraged to train the hierarchy. To configure the ParallelRunConfig you will need to determine the appropriate number of workers and nodes for your use case. The `process_count_per_node` is based off the number of cores of the compute VM. The node_count will determine the number of master nodes to use, increasing the node count will speed up the training process.\n",
|
||||
"\n",
|
||||
"* **experiment:** The experiment used for training.\n",
|
||||
"* **train_data:** The tabular dataset to be used as input to the training run.\n",
|
||||
"* **node_count:** The number of compute nodes to be used for running the user script. We recommend to start with 3 and increase the node_count if the training time is taking too long.\n",
|
||||
"* **process_count_per_node:** Process count per node, we recommend 2:1 ratio for number of cores: number of processes per node. eg. If node has 16 cores then configure 8 or less process count per node or optimal performance.\n",
|
||||
"* **train_pipeline_parameters:** The set of configuration parameters defined in the previous section. \n",
|
||||
"\n",
|
||||
"Calling this method will create a new aggregated dataset which is generated dynamically on pipeline execution."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"training_pipeline_steps = AutoMLPipelineBuilder.get_many_models_train_steps(\n",
|
||||
" experiment=experiment,\n",
|
||||
" train_data=registered_train,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" node_count=2,\n",
|
||||
" process_count_per_node=8,\n",
|
||||
" train_pipeline_parameters=hts_parameters,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.pipeline.core import Pipeline\n",
|
||||
"\n",
|
||||
"training_pipeline = Pipeline(ws, steps=training_pipeline_steps)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Submit the pipeline to run\n",
|
||||
"Next we submit our pipeline to run. The whole training pipeline takes about 1h using a Standard_D16_V3 VM with our current ParallelRunConfig setting."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"training_run = experiment.submit(training_pipeline)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"training_run.wait_for_completion(show_output=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Check the run status, if training_run is in completed state, continue to forecasting. If training_run is in another state, check the portal for failures."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### [Optional] Get the explanations\n",
|
||||
"First we need to download the explanations to the local disk."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"if model_explainability:\n",
|
||||
" expl_output = training_run.get_pipeline_output(\"explanations\")\n",
|
||||
" expl_output.download(\"training_explanations\")\n",
|
||||
"else:\n",
|
||||
" print(\n",
|
||||
" \"Model explanations are available only if model_explainability is set to True.\"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The explanations are downloaded to the \"training_explanations/azureml\" directory."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if model_explainability:\n",
|
||||
" explanations_dirrectory = os.listdir(\n",
|
||||
" os.path.join(\"training_explanations\", \"azureml\")\n",
|
||||
" )\n",
|
||||
" if len(explanations_dirrectory) > 1:\n",
|
||||
" print(\n",
|
||||
" \"Warning! The directory contains multiple explanations, only the first one will be displayed.\"\n",
|
||||
" )\n",
|
||||
" print(\"The explanations are located at {}.\".format(explanations_dirrectory[0]))\n",
|
||||
" # Now we will list all the explanations.\n",
|
||||
" explanation_path = os.path.join(\n",
|
||||
" \"training_explanations\",\n",
|
||||
" \"azureml\",\n",
|
||||
" explanations_dirrectory[0],\n",
|
||||
" \"training_explanations\",\n",
|
||||
" )\n",
|
||||
" print(\"Available explanations\")\n",
|
||||
" print(\"==============================\")\n",
|
||||
" print(\"\\n\".join(os.listdir(explanation_path)))\n",
|
||||
"else:\n",
|
||||
" print(\n",
|
||||
" \"Model explanations are available only if model_explainability is set to True.\"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"View the explanations on \"state\" level."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from IPython.display import display\n",
|
||||
"\n",
|
||||
"explanation_type = \"raw\"\n",
|
||||
"level = \"state\"\n",
|
||||
"\n",
|
||||
"if model_explainability:\n",
|
||||
" display(\n",
|
||||
" pd.read_csv(\n",
|
||||
" os.path.join(explanation_path, \"{}_explanations_{}.csv\").format(\n",
|
||||
" explanation_type, level\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 5.0 Forecasting\n",
|
||||
"For hierarchical forecasting we need to provide the HTSInferenceParameters object.\n",
|
||||
"#### HTSInferenceParameters arguments\n",
|
||||
"* **hierarchy_forecast_level:** The default level of the hierarchy to produce prediction/forecast on.\n",
|
||||
"* **allocation_method:** \\[Optional] The disaggregation method to use if the hierarchy forecast level specified is below the define hierarchy training level. <br><i>(average historical proportions) 'average_historical_proportions'</i><br><i>(proportions of the historical averages) 'proportions_of_historical_average'</i>\n",
|
||||
"\n",
|
||||
"#### get_many_models_batch_inference_steps arguments\n",
|
||||
"* **experiment:** The experiment used for inference run.\n",
|
||||
"* **inference_data:** The data to use for inferencing. It should be the same schema as used for training.\n",
|
||||
"* **compute_target:** The compute target that runs the inference pipeline.\n",
|
||||
"* **node_count:** The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku).\n",
|
||||
"* **process_count_per_node:** The number of processes per node.\n",
|
||||
"* **train_run_id:** \\[Optional] The run id of the hierarchy training, by default it is the latest successful training hts run in the experiment.\n",
|
||||
"* **train_experiment_name:** \\[Optional] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline.\n",
|
||||
"* **process_count_per_node:** \\[Optional] The number of processes per node, by default it's 4."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.train.automl.runtime._hts.hts_parameters import HTSInferenceParameters\n",
|
||||
"\n",
|
||||
"inference_parameters = HTSInferenceParameters(\n",
|
||||
" hierarchy_forecast_level=\"store_id\", # The setting is specific to this dataset and should be changed based on your dataset.\n",
|
||||
" allocation_method=\"proportions_of_historical_average\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"steps = AutoMLPipelineBuilder.get_many_models_batch_inference_steps(\n",
|
||||
" experiment=experiment,\n",
|
||||
" inference_data=registered_inference,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" inference_pipeline_parameters=inference_parameters,\n",
|
||||
" node_count=2,\n",
|
||||
" process_count_per_node=8,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.pipeline.core import Pipeline\n",
|
||||
"\n",
|
||||
"inference_pipeline = Pipeline(ws, steps=steps)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"inference_run = experiment.submit(inference_pipeline)\n",
|
||||
"inference_run.wait_for_completion(show_output=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Retrieve results\n",
|
||||
"\n",
|
||||
"Forecast results can be retrieved through the following code. The prediction results summary and the actual predictions are downloaded in forecast_results folder"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"forecasts = inference_run.get_pipeline_output(\"forecasts\")\n",
|
||||
"forecasts.download(\"forecast_results\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Resbumit the Pipeline\n",
|
||||
"\n",
|
||||
"The inference pipeline can be submitted with different configurations."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"inference_run = experiment.submit(\n",
|
||||
" inference_pipeline, pipeline_parameters={\"hierarchy_forecast_level\": \"state\"}\n",
|
||||
")\n",
|
||||
"inference_run.wait_for_completion(show_output=False)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "jialiu"
|
||||
}
|
||||
],
|
||||
"categories": [
|
||||
"how-to-use-azureml",
|
||||
"automated-machine-learning"
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -0,0 +1,4 @@
|
||||
name: auto-ml-forecasting-hierarchical-timeseries
|
||||
dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
@@ -0,0 +1,857 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Many Models - Automated ML\n",
|
||||
"**_Generate many models time series forecasts with Automated Machine Learning_**\n",
|
||||
"\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For this notebook we are using a synthetic dataset portraying sales data to predict the the quantity of a vartiety of product skus across several states, stores, and product categories.\n",
|
||||
"\n",
|
||||
"**NOTE: There are limits on how many runs we can do in parallel per workspace, and we currently recommend to set the parallelism to maximum of 320 runs per experiment per workspace. If users want to have more parallelism and increase this limit they might encounter Too Many Requests errors (HTTP 429).**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Prerequisites\n",
|
||||
"You'll need to create a compute Instance by following the instructions in the [EnvironmentSetup.md](../Setup_Resources/EnvironmentSetup.md)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 1.0 Set up workspace, datastore, experiment"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613003526897
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"from azureml.core import Workspace, Datastore\n",
|
||||
"import pandas as pd\n",
|
||||
"\n",
|
||||
"# Set up your workspace\n",
|
||||
"ws = Workspace.from_config()\n",
|
||||
"ws.get_details()\n",
|
||||
"\n",
|
||||
"# Set up your datastores\n",
|
||||
"dstore = ws.get_default_datastore()\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output[\"SDK version\"] = azureml.core.VERSION\n",
|
||||
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||
"output[\"Workspace\"] = ws.name\n",
|
||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||
"output[\"Location\"] = ws.location\n",
|
||||
"output[\"Default datastore name\"] = dstore.name\n",
|
||||
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Choose an experiment"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613003540729
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Experiment\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, \"automl-many-models\")\n",
|
||||
"\n",
|
||||
"print(\"Experiment name: \" + experiment.name)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 2.0 Data\n",
|
||||
"\n",
|
||||
"This notebook uses simulated orange juice sales data to walk you through the process of training many models on Azure Machine Learning using Automated ML. \n",
|
||||
"\n",
|
||||
"The time series data used in this example was simulated based on the University of Chicago's Dominick's Finer Foods dataset which featured two years of sales of 3 different orange juice brands for individual stores. The full simulated dataset includes 3,991 stores with 3 orange juice brands each thus allowing 11,973 models to be trained to showcase the power of the many models pattern.\n",
|
||||
"\n",
|
||||
" \n",
|
||||
"In this notebook, two datasets will be created: one with all 11,973 files and one with only 10 files that can be used to quickly test and debug. For each dataset, you'll be walked through the process of:\n",
|
||||
"\n",
|
||||
"1. Registering the blob container as a Datastore to the Workspace\n",
|
||||
"2. Registering a tabular dataset to the Workspace"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"nteract": {
|
||||
"transient": {
|
||||
"deleting": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"### 2.1 Data Preparation\n",
|
||||
"The OJ data is available in the public blob container. The data is split to be used for training and for inferencing. For the current dataset, the data was split on time column ('WeekStarting') before and after '1992-5-28' .\n",
|
||||
"\n",
|
||||
"The container has\n",
|
||||
"<ol>\n",
|
||||
" <li><b>'oj-data-tabular'</b> and <b>'oj-inference-tabular'</b> folders that contains training and inference data respectively for the 11,973 models. </li>\n",
|
||||
" <li>It also has <b>'oj-data-small-tabular'</b> and <b>'oj-inference-small-tabular'</b> folders that has training and inference data for 10 models.</li>\n",
|
||||
"</ol>\n",
|
||||
"\n",
|
||||
"To create the [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py) needed for the ParallelRunStep, you first need to register the blob container to the workspace."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"nteract": {
|
||||
"transient": {
|
||||
"deleting": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"<b> To use your own data, put your own data in a blobstore folder. As shown it can be one file or multiple files. We can then register datastore using that blob as shown below.\n",
|
||||
" \n",
|
||||
"<h3> How sample data in blob store looks like</h3>\n",
|
||||
"\n",
|
||||
"['oj-data-tabular'](https://ms.portal.azure.com/#blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)</b>\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"['oj-inference-tabular'](https://ms.portal.azure.com/#blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"['oj-data-small-tabular'](https://ms.portal.azure.com/#blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"['oj-inference-small-tabular'](https://ms.portal.azure.com/#blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)\n",
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### 2.2 Register the blob container as DataStore\n",
|
||||
"\n",
|
||||
"A Datastore is a place where data can be stored that is then made accessible to a compute either by means of mounting or copying the data to the compute target.\n",
|
||||
"\n",
|
||||
"Please refer to [Datastore](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.datastore(class)?view=azure-ml-py) documentation on how to access data from Datastore.\n",
|
||||
"\n",
|
||||
"In this next step, we will be registering blob storage as datastore to the Workspace."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Datastore\n",
|
||||
"\n",
|
||||
"# Please change the following to point to your own blob container and pass in account_key\n",
|
||||
"blob_datastore_name = \"automl_many_models\"\n",
|
||||
"container_name = \"automl-sample-notebook-data\"\n",
|
||||
"account_name = \"automlsamplenotebookdata\"\n",
|
||||
"\n",
|
||||
"oj_datastore = Datastore.register_azure_blob_container(\n",
|
||||
" workspace=ws,\n",
|
||||
" datastore_name=blob_datastore_name,\n",
|
||||
" container_name=container_name,\n",
|
||||
" account_name=account_name,\n",
|
||||
" create_if_not_exists=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### 2.3 Using tabular datasets \n",
|
||||
"\n",
|
||||
"Now that the datastore is available from the Workspace, [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py) can be created. Datasets in Azure Machine Learning are references to specific data in a Datastore. We are using TabularDataset, so that users who have their data which can be in one or many files (*.parquet or *.csv) and have not split up data according to group columns needed for training, can do so using out of box support for 'partiion_by' feature of TabularDataset shown in section 5.0 below."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613007017296
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Dataset\n",
|
||||
"\n",
|
||||
"ds_name_small = \"oj-data-small-tabular\"\n",
|
||||
"input_ds_small = Dataset.Tabular.from_delimited_files(\n",
|
||||
" path=oj_datastore.path(ds_name_small + \"/\"), validate=False\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"inference_name_small = \"oj-inference-small-tabular\"\n",
|
||||
"inference_ds_small = Dataset.Tabular.from_delimited_files(\n",
|
||||
" path=oj_datastore.path(inference_name_small + \"/\"), validate=False\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### 2.4 Configure data with ``OutputFileDatasetConfig`` objects\n",
|
||||
"This step shows how to configure output data from a pipeline step. One of the use cases for this step is when you want to do some preprocessing before feeding the data to training step. Intermediate data (or output of a step) is represented by an ``OutputFileDatasetConfig`` object. ``output_data`` is produced as the output of a step. Optionally, this data can be registered as a dataset by calling the ``register_on_complete`` method. If you create an ``OutputFileDatasetConfig`` in one step and use it as an input to another step, that data dependency between steps creates an implicit execution order in the pipeline.\n",
|
||||
"\n",
|
||||
"``OutputFileDatasetConfig`` objects return a directory, and by default write output to the default datastore of the workspace.\n",
|
||||
"\n",
|
||||
"Since instance creation for class ``OutputTabularDatasetConfig`` is not allowed, we first create an instance of this class. Then we use the ``read_parquet_files`` method to read the parquet file into ``OutputTabularDatasetConfig``."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.data.output_dataset_config import OutputFileDatasetConfig\n",
|
||||
"\n",
|
||||
"output_data = OutputFileDatasetConfig(\n",
|
||||
" name=\"processed_data\", destination=(dstore, \"outputdataset/{run-id}/{output-name}\")\n",
|
||||
").as_upload()\n",
|
||||
"# output_data_dataset = output_data.register_on_complete(\n",
|
||||
"# name='processed_data', description = 'files from prev step')\n",
|
||||
"output_data = output_data.read_parquet_files()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 3.0 Build the training pipeline\n",
|
||||
"Now that the dataset, WorkSpace, and datastore are set up, we can put together a pipeline for training.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Choose a compute target\n",
|
||||
"\n",
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
||||
"\n",
|
||||
"\\*\\*Creation of AmlCompute takes approximately 5 minutes.**\n",
|
||||
"\n",
|
||||
"If the AmlCompute with that name is already in your workspace this code will skip the creation process. As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this [article](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613007037308
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||
"\n",
|
||||
"# Name your cluster\n",
|
||||
"compute_name = \"mm-compute\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"if compute_name in ws.compute_targets:\n",
|
||||
" compute_target = ws.compute_targets[compute_name]\n",
|
||||
" if compute_target and type(compute_target) is AmlCompute:\n",
|
||||
" print(\"Found compute target: \" + compute_name)\n",
|
||||
"else:\n",
|
||||
" print(\"Creating a new compute target...\")\n",
|
||||
" provisioning_config = AmlCompute.provisioning_configuration(\n",
|
||||
" vm_size=\"STANDARD_D16S_V3\", max_nodes=20\n",
|
||||
" )\n",
|
||||
" # Create the compute target\n",
|
||||
" compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)\n",
|
||||
"\n",
|
||||
" # Can poll for a minimum number of nodes and for a specific timeout.\n",
|
||||
" # If no min node count is provided it will use the scale settings for the cluster\n",
|
||||
" compute_target.wait_for_completion(\n",
|
||||
" show_output=True, min_node_count=None, timeout_in_minutes=20\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" # For a more detailed view of current cluster status, use the 'status' property\n",
|
||||
" print(compute_target.status.serialize())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Configure the training run's environment\n",
|
||||
"The next step is making sure that the remote training run has all the dependencies needed by the training steps. Dependencies and the runtime context are set by creating and configuring a RunConfiguration object.\n",
|
||||
"\n",
|
||||
"The code below shows two options for handling dependencies. As presented, with ``USE_CURATED_ENV = True``, the configuration is based on a [curated environment](https://docs.microsoft.com/en-us/azure/machine-learning/resource-curated-environments). Curated environments have prebuilt Docker images in the [Microsoft Container Registry](https://hub.docker.com/publishers/microsoftowner). For more information, see [Azure Machine Learning curated environments](https://docs.microsoft.com/en-us/azure/machine-learning/resource-curated-environments).\n",
|
||||
"\n",
|
||||
"The path taken if you change ``USE_CURATED_ENV`` to False shows the pattern for explicitly setting your dependencies. In that scenario, a new custom Docker image will be created and registered in an Azure Container Registry within your resource group (see [Introduction to private Docker container registries in Azure](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-intro)). Building and registering this image can take quite a few minutes."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.runconfig import RunConfiguration\n",
|
||||
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||
"from azureml.core import Environment\n",
|
||||
"\n",
|
||||
"aml_run_config = RunConfiguration()\n",
|
||||
"aml_run_config.target = compute_target\n",
|
||||
"\n",
|
||||
"USE_CURATED_ENV = True\n",
|
||||
"if USE_CURATED_ENV:\n",
|
||||
" curated_environment = Environment.get(\n",
|
||||
" workspace=ws, name=\"AzureML-sklearn-0.24-ubuntu18.04-py37-cpu\"\n",
|
||||
" )\n",
|
||||
" aml_run_config.environment = curated_environment\n",
|
||||
"else:\n",
|
||||
" aml_run_config.environment.python.user_managed_dependencies = False\n",
|
||||
"\n",
|
||||
" # Add some packages relied on by data prep step\n",
|
||||
" aml_run_config.environment.python.conda_dependencies = CondaDependencies.create(\n",
|
||||
" conda_packages=[\"pandas\", \"scikit-learn\"],\n",
|
||||
" pip_packages=[\"azureml-sdk\", \"azureml-dataset-runtime[fuse,pandas]\"],\n",
|
||||
" pin_sdk_version=False,\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Set up training parameters\n",
|
||||
"\n",
|
||||
"This dictionary defines the AutoML and many models settings. For this forecasting task we need to define several settings inncluding the name of the time column, the maximum forecast horizon, and the partition column name definition.\n",
|
||||
"\n",
|
||||
"| Property | Description|\n",
|
||||
"| :--------------- | :------------------- |\n",
|
||||
"| **task** | forecasting |\n",
|
||||
"| **primary_metric** | This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i> |\n",
|
||||
"| **blocked_models** | Blocked models won't be used by AutoML. |\n",
|
||||
"| **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||
"| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||
"| **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. |\n",
|
||||
"| **label_column_name** | The name of the label column. |\n",
|
||||
"| **forecast_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n",
|
||||
"| **n_cross_validations** | Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way. |\n",
|
||||
"| **enable_early_stopping** | Flag to enable early termination if the score is not improving in the short term. |\n",
|
||||
"| **time_column_name** | The name of your time column. |\n",
|
||||
"| **enable_engineered_explanations** | Engineered feature explanations will be downloaded if enable_engineered_explanations flag is set to True. By default it is set to False to save storage space. |\n",
|
||||
"| **time_series_id_column_names** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |\n",
|
||||
"| **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). |\n",
|
||||
"| **pipeline_fetch_max_batch_size** | Determines how many pipelines (training algorithms) to fetch at a time for training, this helps reduce throttling when training at large scale. |\n",
|
||||
"| **partition_column_names** | The names of columns used to group your models. For timeseries, the groups must not split up individual time-series. That is, each group must contain one or more whole time-series. |"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"gather": {
|
||||
"logged": 1613007061544
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.train.automl.runtime._many_models.many_models_parameters import (\n",
|
||||
" ManyModelsTrainParameters,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"partition_column_names = [\"Store\", \"Brand\"]\n",
|
||||
"automl_settings = {\n",
|
||||
" \"task\": \"forecasting\",\n",
|
||||
" \"primary_metric\": \"normalized_root_mean_squared_error\",\n",
|
||||
" \"iteration_timeout_minutes\": 10, # This needs to be changed based on the dataset. We ask customer to explore how long training is taking before settings this value\n",
|
||||
" \"iterations\": 15,\n",
|
||||
" \"experiment_timeout_hours\": 0.25,\n",
|
||||
" \"label_column_name\": \"Quantity\",\n",
|
||||
" \"n_cross_validations\": 3,\n",
|
||||
" \"time_column_name\": \"WeekStarting\",\n",
|
||||
" \"drop_column_names\": \"Revenue\",\n",
|
||||
" \"forecast_horizon\": 6,\n",
|
||||
" \"time_series_id_column_names\": partition_column_names,\n",
|
||||
" \"track_child_runs\": False,\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"mm_paramters = ManyModelsTrainParameters(\n",
|
||||
" automl_settings=automl_settings, partition_column_names=partition_column_names\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Construct your pipeline steps\n",
|
||||
"Once you have the compute resource and environment created, you're ready to define your pipeline's steps. There are many built-in steps available via the Azure Machine Learning SDK, as you can see on the [reference documentation for the azureml.pipeline.steps package](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps?view=azure-ml-py). The most flexible class is [PythonScriptStep](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.python_script_step.pythonscriptstep?view=azure-ml-py), which runs a Python script.\n",
|
||||
"\n",
|
||||
"Your data preparation code is in a subdirectory (in this example, \"data_preprocessing_tabular.py\" in the directory \"./scripts\"). As part of the pipeline creation process, this directory is zipped and uploaded to the compute_target and the step runs the script specified as the value for ``script_name``.\n",
|
||||
"\n",
|
||||
"The ``arguments`` values specify the inputs and outputs of the step. In the example below, the baseline data is the ``input_ds_small`` dataset. The script data_preprocessing_tabular.py does whatever data-transformation tasks are appropriate to the task at hand and outputs the data to ``output_data``, of type ``OutputFileDatasetConfig``. For more information, see [Moving data into and between ML pipeline steps (Python)](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-move-data-in-out-of-pipelines). The step will run on the machine defined by ``compute_target``, using the configuration ``aml_run_config``.\n",
|
||||
"\n",
|
||||
"Reuse of previous results (``allow_reuse``) is key when using pipelines in a collaborative environment since eliminating unnecessary reruns offers agility. Reuse is the default behavior when the ``script_name``, ``inputs``, and the parameters of a step remain the same. When reuse is allowed, results from the previous run are immediately sent to the next step. If ``allow_reuse`` is set to False, a new run will always be generated for this step during pipeline execution.\n",
|
||||
"\n",
|
||||
"> Note that we only support partitioned FileDataset and TabularDataset without partition when using such output as input."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.pipeline.steps import PythonScriptStep\n",
|
||||
"\n",
|
||||
"dataprep_source_dir = \"./scripts\"\n",
|
||||
"entry_point = \"data_preprocessing_tabular.py\"\n",
|
||||
"ds_input = input_ds_small.as_named_input(\"train_10_models\")\n",
|
||||
"\n",
|
||||
"data_prep_step = PythonScriptStep(\n",
|
||||
" script_name=entry_point,\n",
|
||||
" source_directory=dataprep_source_dir,\n",
|
||||
" arguments=[\"--input\", ds_input, \"--output\", output_data],\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" runconfig=aml_run_config,\n",
|
||||
" allow_reuse=False,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"input_ds_small = output_data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Set up many models pipeline"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Parallel run step is leveraged to train multiple models at once. To configure the ParallelRunConfig you will need to determine the appropriate number of workers and nodes for your use case. The process_count_per_node is based off the number of cores of the compute VM. The node_count will determine the number of master nodes to use, increasing the node count will speed up the training process.\n",
|
||||
"\n",
|
||||
"| Property | Description|\n",
|
||||
"| :--------------- | :------------------- |\n",
|
||||
"| **experiment** | The experiment used for training. |\n",
|
||||
"| **train_data** | The file dataset to be used as input to the training run. |\n",
|
||||
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with 3 and increase the node_count if the training time is taking too long. |\n",
|
||||
"| **process_count_per_node** | Process count per node, we recommend 2:1 ratio for number of cores: number of processes per node. eg. If node has 16 cores then configure 8 or less process count per node or optimal performance. |\n",
|
||||
"| **train_pipeline_parameters** | The set of configuration parameters defined in the previous section. |\n",
|
||||
"\n",
|
||||
"Calling this method will create a new aggregated dataset which is generated dynamically on pipeline execution."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"training_pipeline_steps = AutoMLPipelineBuilder.get_many_models_train_steps(\n",
|
||||
" experiment=experiment,\n",
|
||||
" train_data=input_ds_small,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" node_count=2,\n",
|
||||
" process_count_per_node=8,\n",
|
||||
" run_invocation_timeout=920,\n",
|
||||
" train_pipeline_parameters=mm_paramters,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.pipeline.core import Pipeline\n",
|
||||
"\n",
|
||||
"training_pipeline = Pipeline(ws, steps=training_pipeline_steps)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Submit the pipeline to run\n",
|
||||
"Next we submit our pipeline to run. The whole training pipeline takes about 40m using a STANDARD_D16S_V3 VM with our current ParallelRunConfig setting."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"training_run = experiment.submit(training_pipeline)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"training_run.wait_for_completion(show_output=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Check the run status, if training_run is in completed state, continue to forecasting. If training_run is in another state, check the portal for failures."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 5.0 Publish and schedule the train pipeline (Optional)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 5.1 Publish the pipeline\n",
|
||||
"\n",
|
||||
"Once you have a pipeline you're happy with, you can publish a pipeline so you can call it programmatically later on. See this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-your-first-pipeline#publish-a-pipeline) for additional information on publishing and calling pipelines."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# published_pipeline = training_pipeline.publish(name = 'automl_train_many_models',\n",
|
||||
"# description = 'train many models',\n",
|
||||
"# version = '1',\n",
|
||||
"# continue_on_step_failure = False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 7.2 Schedule the pipeline\n",
|
||||
"You can also [schedule the pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-schedule-pipelines) to run on a time-based or change-based schedule. This could be used to automatically retrain models every month or based on another trigger such as data drift."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# from azureml.pipeline.core import Schedule, ScheduleRecurrence\n",
|
||||
"\n",
|
||||
"# training_pipeline_id = published_pipeline.id\n",
|
||||
"\n",
|
||||
"# recurrence = ScheduleRecurrence(frequency=\"Month\", interval=1, start_time=\"2020-01-01T09:00:00\")\n",
|
||||
"# recurring_schedule = Schedule.create(ws, name=\"automl_training_recurring_schedule\",\n",
|
||||
"# description=\"Schedule Training Pipeline to run on the first day of every month\",\n",
|
||||
"# pipeline_id=training_pipeline_id,\n",
|
||||
"# experiment_name=experiment.name,\n",
|
||||
"# recurrence=recurrence)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 6.0 Forecasting"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Set up output dataset for inference data\n",
|
||||
"Output of inference can be represented as [OutputFileDatasetConfig](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.output_dataset_config.outputdatasetconfig?view=azure-ml-py) object and OutputFileDatasetConfig can be registered as a dataset. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.data import OutputFileDatasetConfig\n",
|
||||
"\n",
|
||||
"output_inference_data_ds = OutputFileDatasetConfig(\n",
|
||||
" name=\"many_models_inference_output\", destination=(dstore, \"oj/inference_data/\")\n",
|
||||
").register_on_complete(name=\"oj_inference_data_ds\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For many models we need to provide the ManyModelsInferenceParameters object.\n",
|
||||
"\n",
|
||||
"#### ManyModelsInferenceParameters arguments\n",
|
||||
"| Property | Description|\n",
|
||||
"| :--------------- | :------------------- |\n",
|
||||
"| **partition_column_names** | List of column names that identifies groups. |\n",
|
||||
"| **target_column_name** | \\[Optional] Column name only if the inference dataset has the target. |\n",
|
||||
"| **time_column_name** | \\[Optional] Column name only if it is timeseries. |\n",
|
||||
"| **many_models_run_id** | \\[Optional] Many models run id where models were trained. |\n",
|
||||
"\n",
|
||||
"#### get_many_models_batch_inference_steps arguments\n",
|
||||
"| Property | Description|\n",
|
||||
"| :--------------- | :------------------- |\n",
|
||||
"| **experiment** | The experiment used for inference run. |\n",
|
||||
"| **inference_data** | The data to use for inferencing. It should be the same schema as used for training.\n",
|
||||
"| **compute_target** The compute target that runs the inference pipeline.|\n",
|
||||
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku). |\n",
|
||||
"| **process_count_per_node** The number of processes per node.\n",
|
||||
"| **train_run_id** | \\[Optional] The run id of the hierarchy training, by default it is the latest successful training many model run in the experiment. |\n",
|
||||
"| **train_experiment_name** | \\[Optional] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline. |\n",
|
||||
"| **process_count_per_node** | \\[Optional] The number of processes per node, by default it's 4. |"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder\n",
|
||||
"from azureml.train.automl.runtime._many_models.many_models_parameters import (\n",
|
||||
" ManyModelsInferenceParameters,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"mm_parameters = ManyModelsInferenceParameters(\n",
|
||||
" partition_column_names=[\"Store\", \"Brand\"],\n",
|
||||
" time_column_name=\"WeekStarting\",\n",
|
||||
" target_column_name=\"Quantity\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"inference_steps = AutoMLPipelineBuilder.get_many_models_batch_inference_steps(\n",
|
||||
" experiment=experiment,\n",
|
||||
" inference_data=inference_ds_small,\n",
|
||||
" node_count=2,\n",
|
||||
" process_count_per_node=8,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" run_invocation_timeout=300,\n",
|
||||
" output_datastore=output_inference_data_ds,\n",
|
||||
" train_run_id=training_run.id,\n",
|
||||
" train_experiment_name=training_run.experiment.name,\n",
|
||||
" inference_pipeline_parameters=mm_parameters,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.pipeline.core import Pipeline\n",
|
||||
"\n",
|
||||
"inference_pipeline = Pipeline(ws, steps=inference_steps)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"inference_run = experiment.submit(inference_pipeline)\n",
|
||||
"inference_run.wait_for_completion(show_output=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Retrieve results\n",
|
||||
"\n",
|
||||
"The forecasting pipeline forecasts the orange juice quantity for a Store by Brand. The pipeline returns one file with the predictions for each store and outputs the result to the forecasting_output Blob container. The details of the blob container is listed in 'forecasting_output.txt' under Outputs+logs. \n",
|
||||
"\n",
|
||||
"The following code snippet:\n",
|
||||
"1. Downloads the contents of the output folder that is passed in the parallel run step \n",
|
||||
"2. Reads the parallel_run_step.txt file that has the predictions as pandas dataframe and \n",
|
||||
"3. Displays the top 10 rows of the predictions"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.contrib.automl.pipeline.steps.utilities import get_output_from_mm_pipeline\n",
|
||||
"\n",
|
||||
"forecasting_results_name = \"forecasting_results\"\n",
|
||||
"forecasting_output_name = \"many_models_inference_output\"\n",
|
||||
"forecast_file = get_output_from_mm_pipeline(\n",
|
||||
" inference_run, forecasting_results_name, forecasting_output_name\n",
|
||||
")\n",
|
||||
"df = pd.read_csv(forecast_file, delimiter=\" \", header=None)\n",
|
||||
"df.columns = [\n",
|
||||
" \"Week Starting\",\n",
|
||||
" \"Store\",\n",
|
||||
" \"Brand\",\n",
|
||||
" \"Quantity\",\n",
|
||||
" \"Advert\",\n",
|
||||
" \"Price\",\n",
|
||||
" \"Revenue\",\n",
|
||||
" \"Predicted\",\n",
|
||||
"]\n",
|
||||
"print(\n",
|
||||
" \"Prediction has \", df.shape[0], \" rows. Here the first 10 rows are being displayed.\"\n",
|
||||
")\n",
|
||||
"df.head(10)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 7.0 Publish and schedule the inference pipeline (Optional)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 7.1 Publish the pipeline\n",
|
||||
"\n",
|
||||
"Once you have a pipeline you're happy with, you can publish a pipeline so you can call it programmatically later on. See this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-your-first-pipeline#publish-a-pipeline) for additional information on publishing and calling pipelines."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# published_pipeline_inf = inference_pipeline.publish(name = 'automl_forecast_many_models',\n",
|
||||
"# description = 'forecast many models',\n",
|
||||
"# version = '1',\n",
|
||||
"# continue_on_step_failure = False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 7.2 Schedule the pipeline\n",
|
||||
"You can also [schedule the pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-schedule-pipelines) to run on a time-based or change-based schedule. This could be used to automatically retrain or forecast models every month or based on another trigger such as data drift."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# from azureml.pipeline.core import Schedule, ScheduleRecurrence\n",
|
||||
"\n",
|
||||
"# forecasting_pipeline_id = published_pipeline.id\n",
|
||||
"\n",
|
||||
"# recurrence = ScheduleRecurrence(frequency=\"Month\", interval=1, start_time=\"2020-01-01T09:00:00\")\n",
|
||||
"# recurring_schedule = Schedule.create(ws, name=\"automl_forecasting_recurring_schedule\",\n",
|
||||
"# description=\"Schedule Forecasting Pipeline to run on the first day of every week\",\n",
|
||||
"# pipeline_id=forecasting_pipeline_id,\n",
|
||||
"# experiment_name=experiment.name,\n",
|
||||
"# recurrence=recurrence)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "jialiu"
|
||||
}
|
||||
],
|
||||
"categories": [
|
||||
"how-to-use-azureml",
|
||||
"automated-machine-learning"
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -0,0 +1,4 @@
|
||||
name: auto-ml-forecasting-many-models
|
||||
dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 176 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 165 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 162 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 166 KiB |
@@ -58,21 +58,22 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"import pandas as pd\n",
|
||||
"import json\n",
|
||||
"import logging\n",
|
||||
"\n",
|
||||
"from azureml.core.workspace import Workspace\n",
|
||||
"import azureml.core\n",
|
||||
"import pandas as pd\n",
|
||||
"from azureml.automl.core.featurization import FeaturizationConfig\n",
|
||||
"from azureml.core.experiment import Experiment\n",
|
||||
"from azureml.train.automl import AutoMLConfig\n",
|
||||
"from azureml.automl.core.featurization import FeaturizationConfig"
|
||||
"from azureml.core.workspace import Workspace\n",
|
||||
"from azureml.train.automl import AutoMLConfig"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||
"This notebook is compatible with Azure ML SDK version 1.35.0 or later."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -81,7 +82,6 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -101,19 +101,20 @@
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# choose a name for the run history container in the workspace\n",
|
||||
"experiment_name = 'automl-ojforecasting'\n",
|
||||
"experiment_name = \"automl-ojforecasting\"\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace'] = ws.name\n",
|
||||
"output['SKU'] = ws.sku\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Run History Name'] = experiment_name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||
"output[\"Workspace\"] = ws.name\n",
|
||||
"output[\"SKU\"] = ws.sku\n",
|
||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||
"output[\"Location\"] = ws.location\n",
|
||||
"output[\"Run History Name\"] = experiment_name\n",
|
||||
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
@@ -146,10 +147,11 @@
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
||||
" print('Found existing cluster, use it.')\n",
|
||||
" print(\"Found existing cluster, use it.\")\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D12_V2',\n",
|
||||
" max_nodes=6)\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||
" vm_size=\"STANDARD_D12_V2\", max_nodes=6\n",
|
||||
" )\n",
|
||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
"compute_target.wait_for_completion(show_output=True)"
|
||||
@@ -169,11 +171,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"time_column_name = 'WeekStarting'\n",
|
||||
"time_column_name = \"WeekStarting\"\n",
|
||||
"data = pd.read_csv(\"dominicks_OJ.csv\", parse_dates=[time_column_name])\n",
|
||||
"\n",
|
||||
"# Drop the columns 'logQuantity' as it is a leaky feature.\n",
|
||||
"data.drop('logQuantity', axis=1, inplace=True)\n",
|
||||
"data.drop(\"logQuantity\", axis=1, inplace=True)\n",
|
||||
"\n",
|
||||
"data.head()"
|
||||
]
|
||||
@@ -193,9 +195,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"time_series_id_column_names = ['Store', 'Brand']\n",
|
||||
"time_series_id_column_names = [\"Store\", \"Brand\"]\n",
|
||||
"nseries = data.groupby(time_series_id_column_names).ngroups\n",
|
||||
"print('Data contains {0} individual time-series.'.format(nseries))"
|
||||
"print(\"Data contains {0} individual time-series.\".format(nseries))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -214,7 +216,7 @@
|
||||
"use_stores = [2, 5, 8]\n",
|
||||
"data_subset = data[data.Store.isin(use_stores)]\n",
|
||||
"nseries = data_subset.groupby(time_series_id_column_names).ngroups\n",
|
||||
"print('Data subset contains {0} individual time-series.'.format(nseries))"
|
||||
"print(\"Data subset contains {0} individual time-series.\".format(nseries))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -233,14 +235,17 @@
|
||||
"source": [
|
||||
"n_test_periods = 20\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def split_last_n_by_series_id(df, n):\n",
|
||||
" \"\"\"Group df by series identifiers and split on last n rows for each group.\"\"\"\n",
|
||||
" df_grouped = (df.sort_values(time_column_name) # Sort by ascending time\n",
|
||||
" .groupby(time_series_id_column_names, group_keys=False))\n",
|
||||
" df_grouped = df.sort_values(time_column_name).groupby( # Sort by ascending time\n",
|
||||
" time_series_id_column_names, group_keys=False\n",
|
||||
" )\n",
|
||||
" df_head = df_grouped.apply(lambda dfg: dfg.iloc[:-n])\n",
|
||||
" df_tail = df_grouped.apply(lambda dfg: dfg.iloc[-n:])\n",
|
||||
" return df_head, df_tail\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"train, test = split_last_n_by_series_id(data_subset, n_test_periods)"
|
||||
]
|
||||
},
|
||||
@@ -258,18 +263,15 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"train.to_csv (r'./dominicks_OJ_train.csv', index = None, header=True)\n",
|
||||
"test.to_csv (r'./dominicks_OJ_test.csv', index = None, header=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.data.dataset_factory import TabularDatasetFactory\n",
|
||||
"\n",
|
||||
"datastore = ws.get_default_datastore()\n",
|
||||
"datastore.upload_files(files = ['./dominicks_OJ_train.csv', './dominicks_OJ_test.csv'], target_path = 'dataset/', overwrite = True,show_progress = True)"
|
||||
"train_dataset = TabularDatasetFactory.register_pandas_dataframe(\n",
|
||||
" train, target=(datastore, \"dataset/\"), name=\"dominicks_OJ_train\"\n",
|
||||
")\n",
|
||||
"test_dataset = TabularDatasetFactory.register_pandas_dataframe(\n",
|
||||
" test, target=(datastore, \"dataset/\"), name=\"dominicks_OJ_test\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -279,17 +281,6 @@
|
||||
"### Create dataset for training"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.dataset import Dataset\n",
|
||||
"train_dataset = Dataset.Tabular.from_delimited_files(path=datastore.path('dataset/dominicks_OJ_train.csv'))\n",
|
||||
"test_dataset = Dataset.Tabular.from_delimited_files(path=datastore.path('dataset/dominicks_OJ_test.csv'))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -323,7 +314,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"target_column_name = 'Quantity'"
|
||||
"target_column_name = \"Quantity\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -351,13 +342,17 @@
|
||||
"source": [
|
||||
"featurization_config = FeaturizationConfig()\n",
|
||||
"# Force the CPWVOL5 feature to be numeric type.\n",
|
||||
"featurization_config.add_column_purpose('CPWVOL5', 'Numeric')\n",
|
||||
"featurization_config.add_column_purpose(\"CPWVOL5\", \"Numeric\")\n",
|
||||
"# Fill missing values in the target column, Quantity, with zeros.\n",
|
||||
"featurization_config.add_transformer_params('Imputer', ['Quantity'], {\"strategy\": \"constant\", \"fill_value\": 0})\n",
|
||||
"featurization_config.add_transformer_params(\n",
|
||||
" \"Imputer\", [\"Quantity\"], {\"strategy\": \"constant\", \"fill_value\": 0}\n",
|
||||
")\n",
|
||||
"# Fill missing values in the INCOME column with median value.\n",
|
||||
"featurization_config.add_transformer_params('Imputer', ['INCOME'], {\"strategy\": \"median\"})\n",
|
||||
"featurization_config.add_transformer_params(\n",
|
||||
" \"Imputer\", [\"INCOME\"], {\"strategy\": \"median\"}\n",
|
||||
")\n",
|
||||
"# Fill missing values in the Price column with forward fill (last value carried forward).\n",
|
||||
"featurization_config.add_transformer_params('Imputer', ['Price'], {\"strategy\": \"ffill\"})"
|
||||
"featurization_config.add_transformer_params(\"Imputer\", [\"Price\"], {\"strategy\": \"ffill\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -423,16 +418,18 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
|
||||
"\n",
|
||||
"forecasting_parameters = ForecastingParameters(\n",
|
||||
" time_column_name=time_column_name,\n",
|
||||
" forecast_horizon=n_test_periods,\n",
|
||||
" time_series_id_column_names=time_series_id_column_names,\n",
|
||||
" freq='W-THU' # Set the forecast frequency to be weekly (start on each Thursday)\n",
|
||||
" freq=\"W-THU\", # Set the forecast frequency to be weekly (start on each Thursday)\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task='forecasting',\n",
|
||||
" debug_log='automl_oj_sales_errors.log',\n",
|
||||
" primary_metric='normalized_mean_absolute_error',\n",
|
||||
"automl_config = AutoMLConfig(\n",
|
||||
" task=\"forecasting\",\n",
|
||||
" debug_log=\"automl_oj_sales_errors.log\",\n",
|
||||
" primary_metric=\"normalized_mean_absolute_error\",\n",
|
||||
" experiment_timeout_hours=0.25,\n",
|
||||
" training_data=train_dataset,\n",
|
||||
" label_column_name=target_column_name,\n",
|
||||
@@ -442,7 +439,8 @@
|
||||
" n_cross_validations=3,\n",
|
||||
" verbosity=logging.INFO,\n",
|
||||
" max_cores_per_iteration=-1,\n",
|
||||
" forecasting_parameters=forecasting_parameters)"
|
||||
" forecasting_parameters=forecasting_parameters,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -475,8 +473,8 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Retrieve the Best Model\n",
|
||||
"Each run within an Experiment stores serialized (i.e. pickled) pipelines from the AutoML iterations. We can now retrieve the pipeline with the best performance on the validation dataset:"
|
||||
"### Retrieve the Best Run details\n",
|
||||
"Below we retrieve the best Run object from among all the runs in the experiment."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -485,9 +483,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"best_run, fitted_model = remote_run.get_output()\n",
|
||||
"print(fitted_model.steps)\n",
|
||||
"model_name = best_run.properties['model_name']"
|
||||
"best_run = remote_run.get_best_child()\n",
|
||||
"model_name = best_run.properties[\"model_name\"]\n",
|
||||
"best_run"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -505,16 +503,26 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"custom_featurizer = fitted_model.named_steps['timeseriestransformer']"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"custom_featurizer.get_featurization_summary()"
|
||||
"# Download the featurization summary JSON file locally\n",
|
||||
"best_run.download_file(\n",
|
||||
" \"outputs/featurization_summary.json\", \"featurization_summary.json\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Render the JSON as a pandas DataFrame\n",
|
||||
"with open(\"featurization_summary.json\", \"r\") as f:\n",
|
||||
" records = json.load(f)\n",
|
||||
"fs = pd.DataFrame.from_records(records)\n",
|
||||
"\n",
|
||||
"# View a summary of the featurization\n",
|
||||
"fs[\n",
|
||||
" [\n",
|
||||
" \"RawFeatureName\",\n",
|
||||
" \"TypeDetected\",\n",
|
||||
" \"Dropped\",\n",
|
||||
" \"EngineeredFeatureCount\",\n",
|
||||
" \"Transformations\",\n",
|
||||
" ]\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -541,7 +549,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Retreiving forecasts from the model\n",
|
||||
"### Retrieving forecasts from the model\n",
|
||||
"We have created a function called `run_forecast` that submits the test data to the best model determined during the training run and retrieves forecasts. This function uses a helper script `forecasting_script` which is uploaded and expecuted on the remote compute."
|
||||
]
|
||||
},
|
||||
@@ -559,15 +567,18 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from run_forecast import run_remote_inference\n",
|
||||
"remote_run_infer = run_remote_inference(test_experiment=test_experiment, \n",
|
||||
"\n",
|
||||
"remote_run_infer = run_remote_inference(\n",
|
||||
" test_experiment=test_experiment,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" train_run=best_run,\n",
|
||||
" test_dataset=test_dataset,\n",
|
||||
" target_column_name=target_column_name)\n",
|
||||
" target_column_name=target_column_name,\n",
|
||||
")\n",
|
||||
"remote_run_infer.wait_for_completion(show_output=False)\n",
|
||||
"\n",
|
||||
"# download the forecast file to the local machine\n",
|
||||
"remote_run_infer.download_file('outputs/predictions.csv', 'predictions.csv')"
|
||||
"remote_run_infer.download_file(\"outputs/predictions.csv\", \"predictions.csv\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -588,7 +599,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# load forecast data frame\n",
|
||||
"fcst_df = pd.read_csv('predictions.csv', parse_dates=[time_column_name])\n",
|
||||
"fcst_df = pd.read_csv(\"predictions.csv\", parse_dates=[time_column_name])\n",
|
||||
"fcst_df.head()"
|
||||
]
|
||||
},
|
||||
@@ -605,18 +616,23 @@
|
||||
"# use automl scoring module\n",
|
||||
"scores = scoring.score_regression(\n",
|
||||
" y_test=fcst_df[target_column_name],\n",
|
||||
" y_pred=fcst_df['predicted'],\n",
|
||||
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET))\n",
|
||||
" y_pred=fcst_df[\"predicted\"],\n",
|
||||
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(\"[Test data scores]\\n\")\n",
|
||||
"for key, value in scores.items():\n",
|
||||
" print('{}: {:.3f}'.format(key, value))\n",
|
||||
" print(\"{}: {:.3f}\".format(key, value))\n",
|
||||
"\n",
|
||||
"# Plot outputs\n",
|
||||
"%matplotlib inline\n",
|
||||
"test_pred = plt.scatter(fcst_df[target_column_name], fcst_df['predicted'], color='b')\n",
|
||||
"test_test = plt.scatter(fcst_df[target_column_name], fcst_df[target_column_name], color='g')\n",
|
||||
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n",
|
||||
"test_pred = plt.scatter(fcst_df[target_column_name], fcst_df[\"predicted\"], color=\"b\")\n",
|
||||
"test_test = plt.scatter(\n",
|
||||
" fcst_df[target_column_name], fcst_df[target_column_name], color=\"g\"\n",
|
||||
")\n",
|
||||
"plt.legend(\n",
|
||||
" (test_pred, test_test), (\"prediction\", \"truth\"), loc=\"upper left\", fontsize=8\n",
|
||||
")\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
@@ -640,9 +656,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"description = 'AutoML OJ forecaster'\n",
|
||||
"description = \"AutoML OJ forecaster\"\n",
|
||||
"tags = None\n",
|
||||
"model = remote_run.register_model(model_name = model_name, description = description, tags = tags)\n",
|
||||
"model = remote_run.register_model(\n",
|
||||
" model_name=model_name, description=description, tags=tags\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(remote_run.model_id)"
|
||||
]
|
||||
@@ -662,8 +680,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"script_file_name = 'score_fcast.py'\n",
|
||||
"best_run.download_file('outputs/scoring_file_v_1_0_0.py', script_file_name)"
|
||||
"script_file_name = \"score_fcast.py\"\n",
|
||||
"best_run.download_file(\"outputs/scoring_file_v_1_0_0.py\", script_file_name)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -684,15 +702,18 @@
|
||||
"from azureml.core.webservice import Webservice\n",
|
||||
"from azureml.core.model import Model\n",
|
||||
"\n",
|
||||
"inference_config = InferenceConfig(environment = best_run.get_environment(), \n",
|
||||
" entry_script = script_file_name)\n",
|
||||
"inference_config = InferenceConfig(\n",
|
||||
" environment=best_run.get_environment(), entry_script=script_file_name\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"aciconfig = AciWebservice.deploy_configuration(cpu_cores = 2, \n",
|
||||
"aciconfig = AciWebservice.deploy_configuration(\n",
|
||||
" cpu_cores=2,\n",
|
||||
" memory_gb=4,\n",
|
||||
" tags = {'type': \"automl-forecasting\"},\n",
|
||||
" description = \"Automl forecasting sample service\")\n",
|
||||
" tags={\"type\": \"automl-forecasting\"},\n",
|
||||
" description=\"Automl forecasting sample service\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"aci_service_name = 'automl-oj-forecast-01'\n",
|
||||
"aci_service_name = \"automl-oj-forecast-01\"\n",
|
||||
"print(aci_service_name)\n",
|
||||
"aci_service = Model.deploy(ws, aci_service_name, [model], inference_config, aciconfig)\n",
|
||||
"aci_service.wait_for_deployment(True)\n",
|
||||
@@ -722,20 +743,27 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"X_query = test.copy()\n",
|
||||
"X_query.pop(target_column_name)\n",
|
||||
"# We have to convert datetime to string, because Timestamps cannot be serialized to JSON.\n",
|
||||
"X_query[time_column_name] = X_query[time_column_name].astype(str)\n",
|
||||
"# The Service object accept the complex dictionary, which is internally converted to JSON string.\n",
|
||||
"# The section 'data' contains the data frame in the form of dictionary.\n",
|
||||
"test_sample = json.dumps({\"data\": json.loads(X_query.to_json(orient=\"records\"))})\n",
|
||||
"sample_quantiles = [0.025, 0.975]\n",
|
||||
"test_sample = json.dumps(\n",
|
||||
" {\"data\": X_query.to_dict(orient=\"records\"), \"quantiles\": sample_quantiles}\n",
|
||||
")\n",
|
||||
"response = aci_service.run(input_data=test_sample)\n",
|
||||
"# translate from networkese to datascientese\n",
|
||||
"try:\n",
|
||||
" res_dict = json.loads(response)\n",
|
||||
" y_fcst_all = pd.DataFrame(res_dict['index'])\n",
|
||||
" y_fcst_all[time_column_name] = pd.to_datetime(y_fcst_all[time_column_name], unit = 'ms')\n",
|
||||
" y_fcst_all['forecast'] = res_dict['forecast'] \n",
|
||||
" y_fcst_all = pd.DataFrame(res_dict[\"index\"])\n",
|
||||
" y_fcst_all[time_column_name] = pd.to_datetime(\n",
|
||||
" y_fcst_all[time_column_name], unit=\"ms\"\n",
|
||||
" )\n",
|
||||
" y_fcst_all[\"forecast\"] = res_dict[\"forecast\"]\n",
|
||||
" y_fcst_all[\"prediction_interval\"] = res_dict[\"prediction_interval\"]\n",
|
||||
"except:\n",
|
||||
" print(res_dict)"
|
||||
]
|
||||
@@ -762,7 +790,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"serv = Webservice(ws, 'automl-oj-forecast-01')\n",
|
||||
"serv = Webservice(ws, \"automl-oj-forecast-01\")\n",
|
||||
"serv.delete() # don't do it accidentally"
|
||||
]
|
||||
}
|
||||
|
||||
@@ -5,62 +5,20 @@ compute instance.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
from azureml.core import Dataset, Run
|
||||
from azureml.automl.core.shared.constants import TimeSeriesInternal
|
||||
from sklearn.externals import joblib
|
||||
from pandas.tseries.frequencies import to_offset
|
||||
|
||||
|
||||
def align_outputs(y_predicted, X_trans, X_test, y_test, target_column_name,
|
||||
predicted_column_name='predicted',
|
||||
horizon_colname='horizon_origin'):
|
||||
"""
|
||||
Demonstrates how to get the output aligned to the inputs
|
||||
using pandas indexes. Helps understand what happened if
|
||||
the output's shape differs from the input shape, or if
|
||||
the data got re-sorted by time and grain during forecasting.
|
||||
|
||||
Typical causes of misalignment are:
|
||||
* we predicted some periods that were missing in actuals -> drop from eval
|
||||
* model was asked to predict past max_horizon -> increase max horizon
|
||||
* data at start of X_test was needed for lags -> provide previous periods
|
||||
"""
|
||||
|
||||
if (horizon_colname in X_trans):
|
||||
df_fcst = pd.DataFrame({predicted_column_name: y_predicted,
|
||||
horizon_colname: X_trans[horizon_colname]})
|
||||
else:
|
||||
df_fcst = pd.DataFrame({predicted_column_name: y_predicted})
|
||||
|
||||
# y and X outputs are aligned by forecast() function contract
|
||||
df_fcst.index = X_trans.index
|
||||
|
||||
# align original X_test to y_test
|
||||
X_test_full = X_test.copy()
|
||||
X_test_full[target_column_name] = y_test
|
||||
|
||||
# X_test_full's index does not include origin, so reset for merge
|
||||
df_fcst.reset_index(inplace=True)
|
||||
X_test_full = X_test_full.reset_index().drop(columns='index')
|
||||
together = df_fcst.merge(X_test_full, how='right')
|
||||
|
||||
# drop rows where prediction or actuals are nan
|
||||
# happens because of missing actuals
|
||||
# or at edges of time due to lags/rolling windows
|
||||
clean = together[together[[target_column_name,
|
||||
predicted_column_name]].notnull().all(axis=1)]
|
||||
return(clean)
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'--target_column_name', type=str, dest='target_column_name',
|
||||
help='Target Column Name')
|
||||
"--target_column_name",
|
||||
type=str,
|
||||
dest="target_column_name",
|
||||
help="Target Column Name",
|
||||
)
|
||||
parser.add_argument(
|
||||
'--test_dataset', type=str, dest='test_dataset',
|
||||
help='Test Dataset')
|
||||
"--test_dataset", type=str, dest="test_dataset", help="Test Dataset"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
target_column_name = args.target_column_name
|
||||
@@ -76,14 +34,28 @@ X_test = test_dataset.to_pandas_dataframe().reset_index(drop=True)
|
||||
y_test = X_test.pop(target_column_name).values
|
||||
|
||||
# generate forecast
|
||||
fitted_model = joblib.load('model.pkl')
|
||||
y_predictions, X_trans = fitted_model.forecast(X_test)
|
||||
fitted_model = joblib.load("model.pkl")
|
||||
# We have default quantiles values set as below(95th percentile)
|
||||
quantiles = [0.025, 0.5, 0.975]
|
||||
predicted_column_name = "predicted"
|
||||
PI = "prediction_interval"
|
||||
fitted_model.quantiles = quantiles
|
||||
pred_quantiles = fitted_model.forecast_quantiles(X_test)
|
||||
pred_quantiles[PI] = pred_quantiles[[min(quantiles), max(quantiles)]].apply(
|
||||
lambda x: "[{}, {}]".format(x[0], x[1]), axis=1
|
||||
)
|
||||
X_test[target_column_name] = y_test
|
||||
X_test[PI] = pred_quantiles[PI]
|
||||
X_test[predicted_column_name] = pred_quantiles[0.5]
|
||||
# drop rows where prediction or actuals are nan
|
||||
# happens because of missing actuals
|
||||
# or at edges of time due to lags/rolling windows
|
||||
clean = X_test[
|
||||
X_test[[target_column_name, predicted_column_name]].notnull().all(axis=1)
|
||||
]
|
||||
|
||||
# align output
|
||||
df_all = align_outputs(y_predictions, X_trans, X_test, y_test, target_column_name)
|
||||
|
||||
file_name = 'outputs/predictions.csv'
|
||||
export_csv = df_all.to_csv(file_name, header=True, index=False) # added Index
|
||||
file_name = "outputs/predictions.csv"
|
||||
export_csv = clean.to_csv(file_name, header=True, index=False) # added Index
|
||||
|
||||
# Upload the predictions into artifacts
|
||||
run.upload_file(name=file_name, path_or_stream=file_name)
|
||||
|
||||
@@ -3,36 +3,47 @@ import shutil
|
||||
from azureml.core import ScriptRunConfig
|
||||
|
||||
|
||||
def run_remote_inference(test_experiment, compute_target, train_run,
|
||||
test_dataset, target_column_name, inference_folder='./forecast'):
|
||||
def run_remote_inference(
|
||||
test_experiment,
|
||||
compute_target,
|
||||
train_run,
|
||||
test_dataset,
|
||||
target_column_name,
|
||||
inference_folder="./forecast",
|
||||
):
|
||||
# Create local directory to copy the model.pkl and forecsting_script.py files into.
|
||||
# These files will be uploaded to and executed on the compute instance.
|
||||
os.makedirs(inference_folder, exist_ok=True)
|
||||
shutil.copy('forecasting_script.py', inference_folder)
|
||||
shutil.copy("forecasting_script.py", inference_folder)
|
||||
|
||||
train_run.download_file('outputs/model.pkl',
|
||||
os.path.join(inference_folder, 'model.pkl'))
|
||||
train_run.download_file(
|
||||
"outputs/model.pkl", os.path.join(inference_folder, "model.pkl")
|
||||
)
|
||||
|
||||
inference_env = train_run.get_environment()
|
||||
|
||||
config = ScriptRunConfig(source_directory=inference_folder,
|
||||
script='forecasting_script.py',
|
||||
arguments=['--target_column_name',
|
||||
config = ScriptRunConfig(
|
||||
source_directory=inference_folder,
|
||||
script="forecasting_script.py",
|
||||
arguments=[
|
||||
"--target_column_name",
|
||||
target_column_name,
|
||||
'--test_dataset',
|
||||
test_dataset.as_named_input(test_dataset.name)],
|
||||
"--test_dataset",
|
||||
test_dataset.as_named_input(test_dataset.name),
|
||||
],
|
||||
compute_target=compute_target,
|
||||
environment=inference_env)
|
||||
environment=inference_env,
|
||||
)
|
||||
|
||||
run = test_experiment.submit(config,
|
||||
tags={'training_run_id':
|
||||
train_run.id,
|
||||
'run_algorithm':
|
||||
train_run.properties['run_algorithm'],
|
||||
'valid_score':
|
||||
train_run.properties['score'],
|
||||
'primary_metric':
|
||||
train_run.properties['primary_metric']})
|
||||
run = test_experiment.submit(
|
||||
config,
|
||||
tags={
|
||||
"training_run_id": train_run.id,
|
||||
"run_algorithm": train_run.properties["run_algorithm"],
|
||||
"valid_score": train_run.properties["score"],
|
||||
"primary_metric": train_run.properties["primary_metric"],
|
||||
},
|
||||
)
|
||||
|
||||
run.log("run_algorithm", run.tags['run_algorithm'])
|
||||
run.log("run_algorithm", run.tags["run_algorithm"])
|
||||
return run
|
||||
|
||||
@@ -0,0 +1,823 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Training and Inferencing AutoML Forecasting Model Using Pipelines"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Introduction\n",
|
||||
"\n",
|
||||
"In this notebook, we demonstrate how to use piplines to train and inference on AutoML Forecasting model. Two pipelines will be created: one for training AutoML model, and the other is for inference on AutoML model. We'll also demonstrate how to schedule the inference pipeline so you can get inference results periodically (with refreshed test dataset). Make sure you have executed the configuration notebook before running this notebook. In this notebook you will learn how to:\n",
|
||||
"\n",
|
||||
"- Configure AutoML using AutoMLConfig for forecasting tasks using pipeline AutoMLSteps.\n",
|
||||
"- Create and register an AutoML model using AzureML pipeline.\n",
|
||||
"- Inference and schdelue the pipeline using registered model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"As part of the setup you have already created an Azure ML `Workspace` object. For AutoML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"import logging\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from matplotlib import pyplot as plt\n",
|
||||
"import pandas as pd\n",
|
||||
"\n",
|
||||
"import azureml.core\n",
|
||||
"from azureml.core.experiment import Experiment\n",
|
||||
"from azureml.core.workspace import Workspace\n",
|
||||
"from azureml.train.automl import AutoMLConfig"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.38.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Accessing the Azure ML workspace requires authentication with Azure.\n",
|
||||
"\n",
|
||||
"The default authentication is interactive authentication using the default tenant. Executing the ws = Workspace.from_config() line in the cell below will prompt for authentication the first time that it is run.\n",
|
||||
"\n",
|
||||
"If you have multiple Azure tenants, you can specify the tenant by replacing the ws = Workspace.from_config() line in the cell below with the following:\n",
|
||||
"```\n",
|
||||
"from azureml.core.authentication import InteractiveLoginAuthentication\n",
|
||||
"auth = InteractiveLoginAuthentication(tenant_id = 'mytenantid')\n",
|
||||
"ws = Workspace.from_config(auth = auth)\n",
|
||||
"```\n",
|
||||
"If you need to run in an environment where interactive login is not possible, you can use Service Principal authentication by replacing the ws = Workspace.from_config() line in the cell below with the following:\n",
|
||||
"```\n",
|
||||
"from azureml.core.authentication import ServicePrincipalAuthentication\n",
|
||||
"auth = ServicePrincipalAuthentication('mytenantid', 'myappid', 'mypassword')\n",
|
||||
"ws = Workspace.from_config(auth = auth)\n",
|
||||
"```\n",
|
||||
"For more details, see aka.ms/aml-notebook-auth"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ws = Workspace.from_config()\n",
|
||||
"dstor = ws.get_default_datastore()\n",
|
||||
"\n",
|
||||
"# Choose a name for the run history container in the workspace.\n",
|
||||
"experiment_name = \"forecasting-pipeline\"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||
"output[\"Workspace\"] = ws.name\n",
|
||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||
"output[\"Location\"] = ws.location\n",
|
||||
"output[\"Run History Name\"] = experiment_name\n",
|
||||
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Compute"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Compute \n",
|
||||
"\n",
|
||||
"#### Create or Attach existing AmlCompute\n",
|
||||
"\n",
|
||||
"You will need to create a compute target for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||
"\n",
|
||||
"#### Creation of AmlCompute takes approximately 5 minutes. \n",
|
||||
"If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
|
||||
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||
"\n",
|
||||
"# Choose a name for your CPU cluster\n",
|
||||
"amlcompute_cluster_name = \"forecast-step-cluster\"\n",
|
||||
"\n",
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
||||
" print(\"Found existing cluster, use it.\")\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||
" vm_size=\"STANDARD_DS12_V2\", max_nodes=4\n",
|
||||
" )\n",
|
||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
||||
"compute_target.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Data\n",
|
||||
"You are now ready to load the historical orange juice sales data. For demonstration purposes, we extract sales time-series for just a few of the stores. We will load the CSV file into a plain pandas DataFrame; the time column in the CSV is called _WeekStarting_, so it will be specially parsed into the datetime type."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"time_column_name = \"WeekStarting\"\n",
|
||||
"train = pd.read_csv(\"oj-train.csv\", parse_dates=[time_column_name])\n",
|
||||
"\n",
|
||||
"train.head()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Each row in the DataFrame holds a quantity of weekly sales for an OJ brand at a single store. The data also includes the sales price, a flag indicating if the OJ brand was advertised in the store that week, and some customer demographic information based on the store location. For historical reasons, the data also include the logarithm of the sales quantity. The Dominick's grocery data is commonly used to illustrate econometric modeling techniques where logarithms of quantities are generally preferred. \n",
|
||||
"\n",
|
||||
"The task is now to build a time-series model for the _Quantity_ column. It is important to note that this dataset is comprised of many individual time-series - one for each unique combination of _Store_ and _Brand_. To distinguish the individual time-series, we define the **time_series_id_column_names** - the columns whose values determine the boundaries between time-series: "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"time_series_id_column_names = [\"Store\", \"Brand\"]\n",
|
||||
"nseries = train.groupby(time_series_id_column_names).ngroups\n",
|
||||
"print(\"Data contains {0} individual time-series.\".format(nseries))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Test Splitting\n",
|
||||
"We now split the data into a training and a testing set for later forecast prediction. The test set will contain the final 4 weeks of observed sales for each time-series. The splits should be stratified by series, so we use a group-by statement on the time series identifier columns."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"n_test_periods = 4\n",
|
||||
"\n",
|
||||
"test = pd.read_csv(\"oj-test.csv\", parse_dates=[time_column_name])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Upload data to datastore\n",
|
||||
"The [Machine Learning service workspace](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-workspace), is paired with the storage account, which contains the default data store. We will use it to upload the train and test data and create [tabular datasets](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py) for training and testing. A tabular dataset defines a series of lazily-evaluated, immutable operations to load data from the data source into tabular representation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.data.dataset_factory import TabularDatasetFactory\n",
|
||||
"\n",
|
||||
"datastore = ws.get_default_datastore()\n",
|
||||
"train_dataset = TabularDatasetFactory.register_pandas_dataframe(\n",
|
||||
" train, target=(datastore, \"dataset/\"), name=\"dominicks_OJ_train\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"test_dataset = TabularDatasetFactory.register_pandas_dataframe(\n",
|
||||
" test, target=(datastore, \"dataset/\"), name=\"dominicks_OJ_test\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Training"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Modeling\n",
|
||||
"\n",
|
||||
"For forecasting tasks, AutoML uses pre-processing and estimation steps that are specific to time-series. AutoML will undertake the following pre-processing steps:\n",
|
||||
"* Detect time-series sample frequency (e.g. hourly, daily, weekly) and create new records for absent time points to make the series regular. A regular time series has a well-defined frequency and has a value at every sample point in a contiguous time span \n",
|
||||
"* Impute missing values in the target (via forward-fill) and feature columns (using median column values) \n",
|
||||
"* Create features based on time series identifiers to enable fixed effects across different series\n",
|
||||
"* Create time-based features to assist in learning seasonal patterns\n",
|
||||
"* Encode categorical variables to numeric quantities\n",
|
||||
"\n",
|
||||
"In this notebook, AutoML will train a single, regression-type model across **all** time-series in a given training set. This allows the model to generalize across related series. If you're looking for training multiple models for different time-series, please see the many-models notebook.\n",
|
||||
"\n",
|
||||
"You are almost ready to start an AutoML training job. First, we need to define the target column."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"target_column_name = \"Quantity\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Forecasting Parameters\n",
|
||||
"To define forecasting parameters for your experiment training, you can leverage the ForecastingParameters class. The table below details the forecasting parameter we will be passing into our experiment.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"|Property|Description|\n",
|
||||
"|-|-|\n",
|
||||
"|**time_column_name**|The name of your time column.|\n",
|
||||
"|**forecast_horizon**|The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly).|\n",
|
||||
"|**time_series_id_column_names**|The column names used to uniquely identify the time series in data that has multiple rows with the same timestamp. If the time series identifiers are not defined, the data set is assumed to be one time series.|\n",
|
||||
"|**freq**|Forecast frequency. This optional parameter represents the period with which the forecast is desired, for example, daily, weekly, yearly, etc. Use this parameter for the correction of time series containing irregular data points or for padding of short time series. The frequency needs to be a pandas offset alias. Please refer to [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects) for more information."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
|
||||
"\n",
|
||||
"forecasting_parameters = ForecastingParameters(\n",
|
||||
" time_column_name=time_column_name,\n",
|
||||
" forecast_horizon=n_test_periods,\n",
|
||||
" time_series_id_column_names=time_series_id_column_names,\n",
|
||||
" freq=\"W-THU\", # Set the forecast frequency to be weekly (start on each Thursday)\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(\n",
|
||||
" task=\"forecasting\",\n",
|
||||
" debug_log=\"automl_oj_sales_errors.log\",\n",
|
||||
" primary_metric=\"normalized_mean_absolute_error\",\n",
|
||||
" experiment_timeout_hours=0.25,\n",
|
||||
" training_data=train_dataset,\n",
|
||||
" label_column_name=target_column_name,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" enable_early_stopping=True,\n",
|
||||
" n_cross_validations=5,\n",
|
||||
" verbosity=logging.INFO,\n",
|
||||
" max_cores_per_iteration=-1,\n",
|
||||
" forecasting_parameters=forecasting_parameters,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.pipeline.core import PipelineData, TrainingOutput\n",
|
||||
"from azureml.pipeline.steps import AutoMLStep\n",
|
||||
"from azureml.pipeline.core import Pipeline, PipelineParameter\n",
|
||||
"from azureml.pipeline.steps import PythonScriptStep\n",
|
||||
"\n",
|
||||
"metrics_output_name = \"metrics_output\"\n",
|
||||
"best_model_output_name = \"best_model_output\"\n",
|
||||
"model_file_name = \"model_file\"\n",
|
||||
"metrics_data_name = \"metrics_data\"\n",
|
||||
"\n",
|
||||
"metrics_data = PipelineData(\n",
|
||||
" name=metrics_data_name,\n",
|
||||
" datastore=datastore,\n",
|
||||
" pipeline_output_name=metrics_output_name,\n",
|
||||
" training_output=TrainingOutput(type=\"Metrics\"),\n",
|
||||
")\n",
|
||||
"model_data = PipelineData(\n",
|
||||
" name=model_file_name,\n",
|
||||
" datastore=datastore,\n",
|
||||
" pipeline_output_name=best_model_output_name,\n",
|
||||
" training_output=TrainingOutput(type=\"Model\"),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"automl_step = AutoMLStep(\n",
|
||||
" name=\"automl_module\",\n",
|
||||
" automl_config=automl_config,\n",
|
||||
" outputs=[metrics_data, model_data],\n",
|
||||
" allow_reuse=False,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Register Model Step"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Run Configuration and Environment\n",
|
||||
"To have a pipeline step run, we first need an environment to run the jobs. The environment can be build using the following code."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.runconfig import CondaDependencies, RunConfiguration\n",
|
||||
"\n",
|
||||
"# create a new RunConfig object\n",
|
||||
"conda_run_config = RunConfiguration(framework=\"python\")\n",
|
||||
"\n",
|
||||
"# Set compute target to AmlCompute\n",
|
||||
"conda_run_config.target = compute_target\n",
|
||||
"\n",
|
||||
"conda_run_config.docker.use_docker = True\n",
|
||||
"\n",
|
||||
"cd = CondaDependencies.create(\n",
|
||||
" pip_packages=[\n",
|
||||
" \"azureml-sdk[automl]\",\n",
|
||||
" \"applicationinsights\",\n",
|
||||
" \"azureml-opendatasets\",\n",
|
||||
" \"azureml-defaults\",\n",
|
||||
" ],\n",
|
||||
" conda_packages=[\"numpy==1.19.5\"],\n",
|
||||
" pin_sdk_version=False,\n",
|
||||
")\n",
|
||||
"conda_run_config.environment.python.conda_dependencies = cd\n",
|
||||
"\n",
|
||||
"print(\"run config is ready\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Step to register the model.\n",
|
||||
"The following code generates a step to register the model to the workspace from previous step. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.pipeline.core import PipelineData\n",
|
||||
"\n",
|
||||
"# The model name with which to register the trained model in the workspace.\n",
|
||||
"model_name_str = \"ojmodel\"\n",
|
||||
"model_name = PipelineParameter(\"model_name\", default_value=model_name_str)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"register_model_step = PythonScriptStep(\n",
|
||||
" script_name=\"register_model.py\",\n",
|
||||
" name=\"register_model\",\n",
|
||||
" source_directory=\"scripts\",\n",
|
||||
" allow_reuse=False,\n",
|
||||
" arguments=[\n",
|
||||
" \"--model_name\",\n",
|
||||
" model_name,\n",
|
||||
" \"--model_path\",\n",
|
||||
" model_data,\n",
|
||||
" \"--ds_name\",\n",
|
||||
" \"dominicks_OJ_train\",\n",
|
||||
" ],\n",
|
||||
" inputs=[model_data],\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" runconfig=conda_run_config,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Build the Pipeline"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"training_pipeline = Pipeline(\n",
|
||||
" description=\"training_pipeline\",\n",
|
||||
" workspace=ws,\n",
|
||||
" steps=[automl_step, register_model_step],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Submit Pipeline Run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"training_pipeline_run = experiment.submit(training_pipeline)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"training_pipeline_run.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Get metrics for each runs"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"output_dir = \"train_output\"\n",
|
||||
"pipeline_output = training_pipeline_run.get_pipeline_output(\"metrics_output\")\n",
|
||||
"pipeline_output.download(output_dir)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"file_path = os.path.join(output_dir, pipeline_output.path_on_datastore)\n",
|
||||
"with open(file_path) as f:\n",
|
||||
" metrics = json.load(f)\n",
|
||||
"for run_id, metrics in metrics.items():\n",
|
||||
" print(\"{}: {}\".format(run_id, metrics[\"normalized_root_mean_squared_error\"][0]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Inference"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"There are several ways to do the inference, for here we will demonstrate how to use the registered model and pipeline to do the inference. (how to register a model https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.model.model?view=azure-ml-py)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Get Inference Pipeline Environment\n",
|
||||
"To trigger an inference pipeline run, we first need a running environment for run that contains all the appropriate packages for the model unpickling. This environment can be either assess from the training run or using the `yml` file that comes with the model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Model\n",
|
||||
"\n",
|
||||
"model = Model(ws, model_name_str)\n",
|
||||
"download_path = model.download(model_name_str, exist_ok=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"After all the files are downloaded, we can generate the run config for inference runs."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Environment, RunConfiguration\n",
|
||||
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||
"\n",
|
||||
"env_file = os.path.join(download_path, \"conda_env_v_1_0_0.yml\")\n",
|
||||
"inference_env = Environment(\"oj-inference-env\")\n",
|
||||
"inference_env.python.conda_dependencies = CondaDependencies(\n",
|
||||
" conda_dependencies_file_path=env_file\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"[Optional] The enviroment can also be assessed from the training run using `get_environment()` API."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"After we have the environment for the inference, we could build run config based on this environment."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"run_config = RunConfiguration()\n",
|
||||
"run_config.environment = inference_env"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Build and submit the inference pipeline"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The inference pipeline will create two different format of outputs, 1) a tabular dataset that contains the prediction and 2) an `OutputFileDatasetConfig` that can be used for the sequential pipeline steps."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.data import OutputFileDatasetConfig\n",
|
||||
"\n",
|
||||
"output_data = OutputFileDatasetConfig(name=\"prediction_result\")\n",
|
||||
"\n",
|
||||
"output_ds_name = \"oj-output\"\n",
|
||||
"\n",
|
||||
"inference_step = PythonScriptStep(\n",
|
||||
" name=\"infer-results\",\n",
|
||||
" source_directory=\"scripts\",\n",
|
||||
" script_name=\"infer.py\",\n",
|
||||
" arguments=[\n",
|
||||
" \"--model_name\",\n",
|
||||
" model_name_str,\n",
|
||||
" \"--ouput_dataset_name\",\n",
|
||||
" output_ds_name,\n",
|
||||
" \"--test_dataset_name\",\n",
|
||||
" test_dataset.name,\n",
|
||||
" \"--target_column_name\",\n",
|
||||
" target_column_name,\n",
|
||||
" \"--output_path\",\n",
|
||||
" output_data,\n",
|
||||
" ],\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" allow_reuse=False,\n",
|
||||
" runconfig=run_config,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"inference_pipeline = Pipeline(ws, [inference_step])\n",
|
||||
"inference_run = experiment.submit(inference_pipeline)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"inference_run.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Get the predicted data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Dataset\n",
|
||||
"\n",
|
||||
"inference_ds = Dataset.get_by_name(ws, output_ds_name)\n",
|
||||
"inference_df = inference_ds.to_pandas_dataframe()\n",
|
||||
"inference_df.tail(5)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Schedule Pipeline"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This section is about how to schedule a pipeline for periodically predictions. For more info about pipeline schedule and pipeline endpoint, please follow this [notebook](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-setup-schedule-for-a-published-pipeline.ipynb)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"inference_published_pipeline = inference_pipeline.publish(\n",
|
||||
" name=\"OJ Inference Test\", description=\"OJ Inference Test\"\n",
|
||||
")\n",
|
||||
"print(\"Newly published pipeline id: {}\".format(inference_published_pipeline.id))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If `test_dataset` is going to refresh every 4 weeks before Friday 16:00 and we want to predict every 4 weeks (forecast_horizon), we can schedule our pipeline to run every 4 weeks at 16:00 to get daily inference results. You can refresh your test dataset (a newer version will be created) periodically when new data is available (i.e. target column in test dataset would have values in the beginning as context data, and followed by NaNs to be predicted). The inference pipeline will pick up context to further improve the forecast accuracy."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# schedule\n",
|
||||
"\n",
|
||||
"from azureml.pipeline.core.schedule import ScheduleRecurrence, Schedule\n",
|
||||
"\n",
|
||||
"recurrence = ScheduleRecurrence(\n",
|
||||
" frequency=\"Week\", interval=4, week_days=[\"Friday\"], hours=[16], minutes=[0]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"schedule = Schedule.create(\n",
|
||||
" workspace=ws,\n",
|
||||
" name=\"OJ_Inference_schedule\",\n",
|
||||
" pipeline_id=inference_published_pipeline.id,\n",
|
||||
" experiment_name=\"Schedule-run-OJ\",\n",
|
||||
" recurrence=recurrence,\n",
|
||||
" wait_for_provisioning=True,\n",
|
||||
" description=\"Schedule Run\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# You may want to make sure that the schedule is provisioned properly\n",
|
||||
"# before making any further changes to the schedule\n",
|
||||
"\n",
|
||||
"print(\"Created schedule with id: {}\".format(schedule.id))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### [Optional] Disable schedule"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"schedule.disable()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "jialiu"
|
||||
}
|
||||
],
|
||||
"category": "tutorial",
|
||||
"celltoolbar": "Raw Cell Format",
|
||||
"compute": [
|
||||
"Remote"
|
||||
],
|
||||
"datasets": [
|
||||
"Orange Juice Sales"
|
||||
],
|
||||
"deployment": [
|
||||
"Azure Container Instance"
|
||||
],
|
||||
"exclude_from_index": false,
|
||||
"framework": [
|
||||
"Azure ML AutoML"
|
||||
],
|
||||
"friendly_name": "Forecasting orange juice sales with deployment",
|
||||
"index_order": 1,
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.9"
|
||||
},
|
||||
"tags": [
|
||||
"None"
|
||||
],
|
||||
"task": "Forecasting"
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -0,0 +1,4 @@
|
||||
name: auto-ml-forecasting-pipelines
|
||||
dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
@@ -0,0 +1,37 @@
|
||||
WeekStarting,Store,Brand,Advert,Price,Age60,COLLEGE,INCOME,Hincome150,Large HH,Minorities,WorkingWoman,SSTRDIST,SSTRVOL,CPDIST5,CPWVOL5
|
||||
1992-09-10,2,dominicks,0,2.09,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-09-10,2,minute.maid,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-09-10,2,tropicana,0,2.64,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-09-10,5,dominicks,0,1.85,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-09-10,5,minute.maid,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-09-10,5,tropicana,0,2.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-09-10,8,dominicks,0,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-09-10,8,minute.maid,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-09-10,8,tropicana,0,2.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-09-17,2,dominicks,0,1.77,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-09-17,2,minute.maid,0,2.83,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-09-17,2,tropicana,0,3.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-09-17,5,dominicks,0,1.85,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-09-17,5,minute.maid,0,2.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-09-17,5,tropicana,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-09-17,8,dominicks,0,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-09-17,8,minute.maid,0,2.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-09-17,8,tropicana,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-09-24,2,dominicks,0,1.49,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-09-24,2,minute.maid,0,2.67,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-09-24,2,tropicana,1,2.79,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-09-24,5,dominicks,0,1.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-09-24,5,minute.maid,0,2.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-09-24,5,tropicana,1,2.78,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-09-24,8,dominicks,0,1.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-09-24,8,minute.maid,0,2.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-09-24,8,tropicana,1,2.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-10-01,2,dominicks,0,1.82,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-10-01,2,minute.maid,1,2.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-10-01,2,tropicana,0,2.97,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-10-01,5,dominicks,0,1.85,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-10-01,5,minute.maid,1,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-10-01,5,tropicana,0,2.78,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-10-01,8,dominicks,0,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-10-01,8,minute.maid,1,2.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-10-01,8,tropicana,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
|
@@ -0,0 +1,997 @@
|
||||
WeekStarting,Store,Brand,Quantity,Advert,Price,Age60,COLLEGE,INCOME,Hincome150,Large HH,Minorities,WorkingWoman,SSTRDIST,SSTRVOL,CPDIST5,CPWVOL5
|
||||
1990-06-14,2,dominicks,10560,1,1.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-06-14,2,minute.maid,4480,0,3.17,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-06-14,2,tropicana,8256,0,3.87,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-06-14,5,dominicks,1792,1,1.59,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-06-14,5,minute.maid,4224,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-06-14,5,tropicana,5888,0,3.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-06-14,8,dominicks,14336,1,1.59,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-06-14,8,minute.maid,6080,0,2.62,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-06-14,8,tropicana,8896,0,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-06-21,8,dominicks,6400,0,2.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-06-21,8,minute.maid,51968,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-06-21,8,tropicana,7296,0,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-06-28,5,dominicks,2496,0,2.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-06-28,5,minute.maid,4352,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-06-28,5,tropicana,6976,0,3.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-06-28,8,dominicks,3968,0,2.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-06-28,8,minute.maid,4928,0,2.62,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-06-28,8,tropicana,10368,0,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-07-05,5,dominicks,2944,0,2.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-07-05,5,minute.maid,4928,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-07-05,5,tropicana,6528,0,3.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-07-05,8,dominicks,4352,0,2.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-07-05,8,minute.maid,5312,0,2.62,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-07-05,8,tropicana,6976,0,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-07-12,5,dominicks,1024,0,2.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-07-12,5,minute.maid,31168,1,2.59,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-07-12,5,tropicana,4928,0,3.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-07-12,8,dominicks,3520,0,2.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-07-12,8,minute.maid,39424,1,2.59,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-07-12,8,tropicana,6464,0,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-07-19,8,dominicks,6464,0,2.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-07-19,8,minute.maid,5568,0,2.62,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-07-19,8,tropicana,8192,0,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-07-26,2,dominicks,8000,0,2.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-07-26,2,minute.maid,4672,0,3.17,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-07-26,2,tropicana,6144,0,3.87,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-07-26,5,dominicks,4224,0,2.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-07-26,5,minute.maid,10048,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-07-26,5,tropicana,5312,0,3.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-07-26,8,dominicks,5952,0,2.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-07-26,8,minute.maid,14592,0,2.62,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-07-26,8,tropicana,7936,0,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-08-02,2,dominicks,6848,1,2.09,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-08-02,2,minute.maid,20160,1,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-08-02,2,tropicana,3840,0,3.87,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-08-02,5,dominicks,4544,1,2.09,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-08-02,5,minute.maid,21760,1,2.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-08-02,5,tropicana,5120,0,3.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-08-02,8,dominicks,8832,1,2.09,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-08-02,8,minute.maid,22208,1,2.39,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-08-02,8,tropicana,6656,0,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-08-09,2,dominicks,2880,0,2.09,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-08-09,2,minute.maid,2688,0,3.17,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-08-09,2,tropicana,8000,0,3.87,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-08-09,5,dominicks,1728,0,2.09,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-08-09,5,minute.maid,4544,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-08-09,5,tropicana,7936,0,3.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-08-09,8,dominicks,7232,0,2.09,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-08-09,8,minute.maid,5760,0,2.62,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-08-09,8,tropicana,8256,0,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-08-16,5,dominicks,1216,0,2.09,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-08-16,5,minute.maid,52224,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-08-16,5,tropicana,6080,0,3.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-08-16,8,dominicks,5504,0,2.09,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-08-16,8,minute.maid,54016,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-08-16,8,tropicana,5568,0,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-08-23,2,dominicks,1600,0,2.09,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-08-23,2,minute.maid,3008,0,3.17,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-08-23,2,tropicana,8896,0,3.87,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-08-23,5,dominicks,1152,0,2.09,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-08-23,5,minute.maid,3584,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-08-23,5,tropicana,4160,0,3.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-08-23,8,dominicks,4800,0,2.09,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-08-23,8,minute.maid,5824,0,2.62,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-08-23,8,tropicana,7488,0,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-08-30,2,dominicks,25344,1,1.89,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-08-30,2,minute.maid,4672,0,3.17,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-08-30,2,tropicana,7168,0,3.87,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-08-30,5,dominicks,30144,1,1.89,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-08-30,5,minute.maid,5120,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-08-30,5,tropicana,5888,0,3.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-08-30,8,dominicks,52672,1,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-08-30,8,minute.maid,6528,0,2.62,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-08-30,8,tropicana,6144,0,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-09-06,2,dominicks,10752,0,1.89,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-09-06,2,minute.maid,2752,0,3.17,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-09-06,2,tropicana,10880,0,3.29,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-09-06,5,dominicks,8960,0,1.89,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-09-06,5,minute.maid,4416,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-09-06,5,tropicana,9536,0,3.29,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-09-06,8,dominicks,16448,0,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-09-06,8,minute.maid,5440,0,2.62,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-09-06,8,tropicana,11008,0,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-09-13,2,dominicks,6656,0,1.89,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-09-13,2,minute.maid,26176,1,2.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-09-13,2,tropicana,7744,0,3.29,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-09-13,5,dominicks,8192,0,1.89,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-09-13,5,minute.maid,30208,1,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-09-13,5,tropicana,8320,0,3.29,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-09-13,8,dominicks,19072,0,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-09-13,8,minute.maid,36544,1,2.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-09-13,8,tropicana,5760,0,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-09-20,2,dominicks,6592,0,1.79,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-09-20,2,minute.maid,3712,0,3.17,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-09-20,2,tropicana,8512,0,3.29,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-09-20,5,dominicks,6528,0,1.79,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-09-20,5,minute.maid,4160,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-09-20,5,tropicana,8000,0,3.29,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-09-20,8,dominicks,13376,0,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-09-20,8,minute.maid,3776,0,2.62,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-09-20,8,tropicana,10112,0,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-09-27,5,dominicks,34688,1,1.79,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-09-27,5,minute.maid,4992,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-09-27,5,tropicana,5824,0,3.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-09-27,8,dominicks,61440,1,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-09-27,8,minute.maid,5504,0,2.62,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-09-27,8,tropicana,8448,0,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-10-04,5,dominicks,4672,0,1.79,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-10-04,5,minute.maid,13952,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-10-04,5,tropicana,10624,1,3.29,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-10-04,8,dominicks,13760,0,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-10-04,8,minute.maid,12416,0,2.62,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-10-04,8,tropicana,8448,1,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-10-11,2,dominicks,1728,0,2.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-10-11,2,minute.maid,30656,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-10-11,2,tropicana,5504,0,3.29,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-10-11,5,dominicks,1088,0,2.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-10-11,5,minute.maid,47680,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-10-11,5,tropicana,6656,0,3.29,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-10-11,8,dominicks,3136,0,2.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-10-11,8,minute.maid,53696,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-10-11,8,tropicana,7424,0,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-10-18,2,dominicks,33792,1,1.24,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-10-18,2,minute.maid,3840,0,2.98,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-10-18,2,tropicana,5888,0,3.56,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-10-18,5,dominicks,69440,1,1.24,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-10-18,5,minute.maid,7616,0,2.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-10-18,5,tropicana,5184,0,3.51,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-10-18,8,dominicks,186176,1,1.14,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-10-18,8,minute.maid,5696,0,2.51,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-10-18,8,tropicana,5824,0,3.04,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-10-25,2,dominicks,1920,0,1.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-10-25,2,minute.maid,2816,0,3.17,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-10-25,2,tropicana,8384,0,3.56,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-10-25,5,dominicks,1280,0,1.59,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-10-25,5,minute.maid,8896,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-10-25,5,tropicana,4928,0,3.51,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-10-25,8,dominicks,3712,0,1.59,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-10-25,8,minute.maid,4864,0,2.62,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-10-25,8,tropicana,6656,0,3.04,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-11-01,2,dominicks,8960,1,1.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-11-01,2,minute.maid,23104,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-11-01,2,tropicana,5952,0,3.56,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-11-01,5,dominicks,35456,1,1.59,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-11-01,5,minute.maid,28544,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-11-01,5,tropicana,5888,0,3.51,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-11-01,8,dominicks,35776,1,1.59,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-11-01,8,minute.maid,37184,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-11-01,8,tropicana,6272,0,3.04,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-11-08,2,dominicks,11392,0,1.29,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-11-08,2,minute.maid,3392,0,3.17,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-11-08,2,tropicana,6848,0,3.56,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-11-08,5,dominicks,13824,0,1.29,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-11-08,5,minute.maid,5440,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-11-08,5,tropicana,5312,0,3.51,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-11-08,8,dominicks,26880,0,1.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-11-08,8,minute.maid,5504,0,2.62,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-11-08,8,tropicana,6912,0,3.04,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-11-15,2,dominicks,28416,0,0.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-11-15,2,minute.maid,26304,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-11-15,2,tropicana,9216,0,3.87,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-11-15,5,dominicks,14208,0,0.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-11-15,5,minute.maid,52416,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-11-15,5,tropicana,9984,0,3.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-11-15,8,dominicks,71680,0,0.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-11-15,8,minute.maid,51008,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-11-15,8,tropicana,10496,0,3.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-11-22,2,dominicks,17152,1,1.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-11-22,2,minute.maid,6336,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-11-22,2,tropicana,12160,0,2.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-11-22,5,dominicks,29312,1,1.59,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-11-22,5,minute.maid,11712,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-11-22,5,tropicana,8448,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-11-22,8,dominicks,25088,1,1.59,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-11-22,8,minute.maid,11072,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-11-22,8,tropicana,11840,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-11-29,2,dominicks,26560,1,2.49,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-11-29,2,minute.maid,9920,0,3.17,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-11-29,2,tropicana,12672,0,2.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-11-29,5,dominicks,52992,1,2.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-11-29,5,minute.maid,13952,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-11-29,5,tropicana,10880,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-11-29,8,dominicks,91456,1,2.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-11-29,8,minute.maid,12160,0,2.62,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-11-29,8,tropicana,9664,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-12-06,2,dominicks,6336,0,2.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-12-06,2,minute.maid,25280,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-12-06,2,tropicana,6528,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-12-06,5,dominicks,15680,0,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-12-06,5,minute.maid,36160,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-12-06,5,tropicana,5696,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-12-06,8,dominicks,23808,0,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-12-06,8,minute.maid,30528,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-12-06,8,tropicana,6272,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-12-13,2,dominicks,26368,1,1.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-12-13,2,minute.maid,14848,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-12-13,2,tropicana,6144,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-12-13,5,dominicks,43520,1,1.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-12-13,5,minute.maid,12864,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-12-13,5,tropicana,5696,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-12-13,8,dominicks,89856,1,1.39,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-12-13,8,minute.maid,12096,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-12-13,8,tropicana,7168,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-12-20,2,dominicks,896,0,2.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-12-20,2,minute.maid,12288,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-12-20,2,tropicana,21120,0,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-12-20,5,dominicks,3904,0,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-12-20,5,minute.maid,22208,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-12-20,5,tropicana,32384,0,2.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-12-20,8,dominicks,12224,0,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-12-20,8,minute.maid,16448,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-12-20,8,tropicana,29504,0,2.39,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-12-27,2,dominicks,1472,0,2.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-12-27,2,minute.maid,6272,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-12-27,2,tropicana,12416,0,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1990-12-27,5,dominicks,896,0,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-12-27,5,minute.maid,9984,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-12-27,5,tropicana,10752,0,2.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1990-12-27,8,dominicks,3776,0,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-12-27,8,minute.maid,9344,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1990-12-27,8,tropicana,8704,0,2.39,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-01-03,2,dominicks,1344,0,2.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-01-03,2,minute.maid,9152,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-01-03,2,tropicana,9472,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-01-03,5,dominicks,2240,0,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-01-03,5,minute.maid,14016,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-01-03,5,tropicana,6912,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-01-03,8,dominicks,13824,0,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-01-03,8,minute.maid,16128,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-01-03,8,tropicana,9280,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-01-10,2,dominicks,111680,1,0.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-01-10,2,minute.maid,4160,0,2.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-01-10,2,tropicana,17920,0,2.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-01-10,5,dominicks,125760,1,0.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-01-10,5,minute.maid,6080,0,2.46,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-01-10,5,tropicana,13440,0,2.59,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-01-10,8,dominicks,251072,1,0.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-01-10,8,minute.maid,5376,0,2.17,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-01-10,8,tropicana,12224,0,2.59,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-01-17,2,dominicks,1856,0,2.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-01-17,2,minute.maid,10176,0,2.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-01-17,2,tropicana,9408,0,2.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-01-17,5,dominicks,1408,0,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-01-17,5,minute.maid,7808,0,2.46,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-01-17,5,tropicana,7808,0,2.59,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-01-17,8,dominicks,4864,0,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-01-17,8,minute.maid,6656,0,2.17,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-01-17,8,tropicana,10368,0,2.59,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-01-24,2,dominicks,5568,0,2.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-01-24,2,minute.maid,29056,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-01-24,2,tropicana,6272,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-01-24,5,dominicks,7232,0,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-01-24,5,minute.maid,40896,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-01-24,5,tropicana,5248,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-01-24,8,dominicks,10176,0,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-01-24,8,minute.maid,59712,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-01-24,8,tropicana,8128,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-01-31,2,dominicks,32064,1,1.49,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-01-31,2,minute.maid,7104,0,2.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-01-31,2,tropicana,6912,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-01-31,5,dominicks,41216,1,1.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-01-31,5,minute.maid,6272,0,2.46,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-01-31,5,tropicana,6208,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-01-31,8,dominicks,105344,1,1.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-01-31,8,minute.maid,9856,0,2.17,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-01-31,8,tropicana,5952,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-02-07,2,dominicks,4352,0,1.49,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-02-07,2,minute.maid,7488,0,2.49,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-02-07,2,tropicana,16768,0,2.49,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-02-07,5,dominicks,9024,0,1.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-02-07,5,minute.maid,7872,0,2.41,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-02-07,5,tropicana,21440,0,2.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-02-07,8,dominicks,33600,0,1.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-02-07,8,minute.maid,6720,0,2.12,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-02-07,8,tropicana,21696,0,2.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-02-14,2,dominicks,704,0,2.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-02-14,2,minute.maid,4224,0,2.49,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-02-14,2,tropicana,6272,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-02-14,5,dominicks,1600,0,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-02-14,5,minute.maid,6144,0,2.41,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-02-14,5,tropicana,7360,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-02-14,8,dominicks,4736,0,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-02-14,8,minute.maid,4224,0,2.12,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-02-14,8,tropicana,7808,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-02-21,2,dominicks,13760,0,2.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-02-21,2,minute.maid,8960,0,2.49,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-02-21,2,tropicana,7936,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-02-21,5,dominicks,2496,0,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-02-21,5,minute.maid,8448,0,2.41,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-02-21,5,tropicana,6720,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-02-21,8,dominicks,10304,0,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-02-21,8,minute.maid,9728,0,2.12,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-02-21,8,tropicana,8128,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-02-28,2,dominicks,43328,1,1.09,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-02-28,2,minute.maid,22464,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-02-28,2,tropicana,6144,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-02-28,5,dominicks,6336,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-02-28,5,minute.maid,18688,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-02-28,5,tropicana,6656,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-02-28,8,dominicks,5056,1,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-02-28,8,minute.maid,40320,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-02-28,8,tropicana,7424,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-03-07,2,dominicks,57600,1,1.09,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-03-07,2,minute.maid,3840,0,2.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-03-07,2,tropicana,7936,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-03-07,5,dominicks,56384,1,1.09,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-03-07,5,minute.maid,6272,0,2.46,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-03-07,5,tropicana,6016,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-03-07,8,dominicks,179968,1,0.94,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-03-07,8,minute.maid,5120,0,2.17,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-03-07,8,tropicana,5952,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-03-14,2,dominicks,704,0,2.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-03-14,2,minute.maid,12992,0,2.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-03-14,2,tropicana,7808,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-03-14,5,dominicks,1600,0,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-03-14,5,minute.maid,12096,0,2.46,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-03-14,5,tropicana,6144,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-03-14,8,dominicks,4992,0,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-03-14,8,minute.maid,19264,0,2.17,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-03-14,8,tropicana,7616,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-03-21,2,dominicks,6016,0,2.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-03-21,2,minute.maid,70144,1,1.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-03-21,2,tropicana,6080,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-03-21,5,dominicks,2944,0,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-03-21,5,minute.maid,73216,1,1.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-03-21,5,tropicana,4928,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-03-21,8,dominicks,6400,0,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-03-21,8,minute.maid,170432,1,1.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-03-21,8,tropicana,5312,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-03-28,2,dominicks,10368,1,1.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-03-28,2,minute.maid,21248,0,1.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-03-28,2,tropicana,42176,1,1.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-03-28,5,dominicks,13504,1,1.59,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-03-28,5,minute.maid,18944,0,1.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-03-28,5,tropicana,67712,1,1.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-03-28,8,dominicks,14912,1,1.59,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-03-28,8,minute.maid,39680,0,1.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-03-28,8,tropicana,161792,1,1.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-04-04,2,dominicks,12608,0,1.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-04-04,2,minute.maid,5696,1,2.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-04-04,2,tropicana,4928,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-04-04,5,dominicks,5376,0,1.59,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-04-04,5,minute.maid,6400,1,2.46,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-04-04,5,tropicana,8640,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-04-04,8,dominicks,34624,0,1.59,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-04-04,8,minute.maid,8128,1,2.17,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-04-04,8,tropicana,17280,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-04-11,2,dominicks,6336,0,1.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-04-11,2,minute.maid,7680,0,2.09,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-04-11,2,tropicana,29504,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-04-11,5,dominicks,6656,0,1.59,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-04-11,5,minute.maid,8640,0,2.09,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-04-11,5,tropicana,35520,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-04-11,8,dominicks,10368,0,1.59,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-04-11,8,minute.maid,9088,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-04-11,8,tropicana,47040,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-04-18,2,dominicks,140736,1,0.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-04-18,2,minute.maid,6336,0,2.09,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-04-18,2,tropicana,9984,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-04-18,5,dominicks,95680,1,0.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-04-18,5,minute.maid,7296,0,2.09,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-04-18,5,tropicana,9664,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-04-18,8,dominicks,194880,1,0.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-04-18,8,minute.maid,6720,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-04-18,8,tropicana,14464,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-04-25,2,dominicks,960,1,2.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-04-25,2,minute.maid,8576,0,2.09,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-04-25,2,tropicana,35200,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-04-25,5,dominicks,896,1,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-04-25,5,minute.maid,12480,0,2.09,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-04-25,5,tropicana,49088,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-04-25,8,dominicks,5696,1,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-04-25,8,minute.maid,7552,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-04-25,8,tropicana,52928,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-05-02,2,dominicks,1216,0,2.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-05-02,2,minute.maid,15104,0,2.09,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-05-02,2,tropicana,23936,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-05-02,5,dominicks,1728,0,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-05-02,5,minute.maid,14144,0,2.09,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-05-02,5,tropicana,14912,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-05-02,8,dominicks,7168,0,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-05-02,8,minute.maid,24768,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-05-02,8,tropicana,21184,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-05-09,2,dominicks,1664,0,2.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-05-09,2,minute.maid,76480,1,1.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-05-09,2,tropicana,7104,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-05-09,5,dominicks,1280,0,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-05-09,5,minute.maid,88256,1,1.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-05-09,5,tropicana,6464,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-05-09,8,dominicks,2880,0,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-05-09,8,minute.maid,183296,1,1.39,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-05-09,8,tropicana,7360,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-05-16,2,dominicks,4992,0,2.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-05-16,2,minute.maid,5056,0,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-05-16,2,tropicana,24512,1,2.29,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-05-16,5,dominicks,5696,0,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-05-16,5,minute.maid,6848,0,2.26,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-05-16,5,tropicana,25024,1,2.29,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-05-16,8,dominicks,12288,0,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-05-16,8,minute.maid,8896,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-05-16,8,tropicana,15744,1,2.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-05-23,2,dominicks,27968,1,1.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-05-23,2,minute.maid,4736,0,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-05-23,2,tropicana,6336,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-05-23,5,dominicks,28288,1,1.59,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-05-23,5,minute.maid,7808,0,2.26,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-05-23,5,tropicana,6272,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-05-30,2,dominicks,12160,0,1.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-05-30,2,minute.maid,4480,0,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-05-30,2,tropicana,6080,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-05-30,5,dominicks,4864,0,1.59,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-05-30,5,minute.maid,6272,0,2.26,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-05-30,5,tropicana,5056,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-06-06,2,dominicks,2240,0,2.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-06-06,2,minute.maid,4032,0,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-06-06,2,tropicana,33536,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-06-06,5,dominicks,2880,0,2.09,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-06-06,5,minute.maid,6144,0,2.26,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-06-06,5,tropicana,47616,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-06-06,8,dominicks,9280,0,1.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-06-06,8,minute.maid,6656,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-06-06,8,tropicana,46912,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-06-13,2,dominicks,5504,1,1.49,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-06-13,2,minute.maid,14784,1,1.79,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-06-13,2,tropicana,13248,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-06-13,5,dominicks,5760,1,1.41,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-06-13,5,minute.maid,27776,1,1.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-06-13,5,tropicana,13888,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-06-13,8,dominicks,25856,1,1.26,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-06-13,8,minute.maid,35456,1,1.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-06-13,8,tropicana,18240,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-06-20,2,dominicks,8832,0,1.29,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-06-20,2,minute.maid,12096,0,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-06-20,2,tropicana,6208,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-06-20,5,dominicks,15040,0,1.29,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-06-20,5,minute.maid,20800,0,2.26,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-06-20,5,tropicana,6144,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-06-20,8,dominicks,19264,0,1.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-06-20,8,minute.maid,17408,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-06-20,8,tropicana,6464,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-06-27,2,dominicks,2624,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-06-27,2,minute.maid,41792,1,1.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-06-27,2,tropicana,10624,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-06-27,5,dominicks,5120,0,1.89,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-06-27,5,minute.maid,45696,1,1.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-06-27,5,tropicana,9344,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-06-27,8,dominicks,6848,0,1.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-06-27,8,minute.maid,75520,1,1.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-06-27,8,tropicana,8512,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-07-04,2,dominicks,10432,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-07-04,2,minute.maid,10560,0,1.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-07-04,2,tropicana,44672,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-07-04,5,dominicks,3264,0,1.89,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-07-04,5,minute.maid,14336,0,1.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-07-04,5,tropicana,32896,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-07-04,8,dominicks,12928,0,1.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-07-04,8,minute.maid,21632,0,1.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-07-04,8,tropicana,28416,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-07-11,5,dominicks,9536,1,1.59,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-07-11,5,minute.maid,4928,0,2.26,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-07-11,5,tropicana,21056,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-07-11,8,dominicks,44032,1,1.59,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-07-11,8,minute.maid,8384,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-07-11,8,tropicana,16960,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-07-18,2,dominicks,8320,0,1.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-07-18,2,minute.maid,4224,0,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-07-18,2,tropicana,20096,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-07-18,5,dominicks,6208,0,1.59,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-07-18,5,minute.maid,4608,0,2.26,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-07-18,5,tropicana,15360,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-07-18,8,dominicks,25408,0,1.59,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-07-18,8,minute.maid,9920,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-07-18,8,tropicana,8320,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-07-25,2,dominicks,6784,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-07-25,2,minute.maid,2880,0,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-07-25,2,tropicana,9152,1,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-07-25,5,dominicks,6592,0,1.89,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-07-25,5,minute.maid,5248,0,2.26,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-07-25,5,tropicana,8000,1,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-07-25,8,dominicks,38336,0,1.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-07-25,8,minute.maid,6592,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-07-25,8,tropicana,11136,1,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-08-01,2,dominicks,60544,1,0.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-08-01,2,minute.maid,3968,0,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-08-01,2,tropicana,21952,0,2.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-08-01,5,dominicks,63552,1,0.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-08-01,5,minute.maid,4224,0,2.26,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-08-01,5,tropicana,21120,0,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-08-01,8,dominicks,152384,1,0.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-08-01,8,minute.maid,7168,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-08-01,8,tropicana,27712,0,2.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-08-08,2,dominicks,20608,0,0.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-08-08,2,minute.maid,3712,0,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-08-08,2,tropicana,13568,0,2.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-08-08,5,dominicks,27968,0,0.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-08-08,5,minute.maid,4288,0,2.26,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-08-08,5,tropicana,11904,0,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-08-08,8,dominicks,54464,0,0.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-08-08,8,minute.maid,6208,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-08-08,8,tropicana,7744,0,2.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-08-15,5,dominicks,21760,1,1.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-08-15,5,minute.maid,16896,0,2.26,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-08-15,5,tropicana,5056,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-08-15,8,dominicks,47680,1,1.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-08-15,8,minute.maid,30528,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-08-15,8,tropicana,5184,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-08-22,5,dominicks,2688,0,1.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-08-22,5,minute.maid,77184,1,1.29,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-08-22,5,tropicana,4608,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-08-22,8,dominicks,14720,0,1.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-08-22,8,minute.maid,155840,1,1.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-08-22,8,tropicana,6272,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-08-29,2,dominicks,16064,0,1.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-08-29,2,minute.maid,2816,0,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-08-29,2,tropicana,4160,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-08-29,5,dominicks,10432,0,1.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-08-29,5,minute.maid,5184,0,2.26,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-08-29,5,tropicana,6016,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-08-29,8,dominicks,53248,0,1.39,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-08-29,8,minute.maid,10752,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-08-29,8,tropicana,7744,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-09-05,2,dominicks,12480,0,1.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-09-05,2,minute.maid,4288,0,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-09-05,2,tropicana,39424,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-09-05,5,dominicks,9792,0,1.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-09-05,5,minute.maid,5248,0,2.26,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-09-05,5,tropicana,50752,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-09-05,8,dominicks,40576,0,1.39,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-09-05,8,minute.maid,6976,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-09-05,8,tropicana,53184,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-09-12,2,dominicks,17024,0,1.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-09-12,2,minute.maid,18240,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-09-12,2,tropicana,5632,0,3.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-09-12,5,dominicks,8448,0,1.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-09-12,5,minute.maid,20672,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-09-12,5,tropicana,5632,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-09-12,8,dominicks,25856,0,1.39,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-09-12,8,minute.maid,31872,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-09-12,8,tropicana,6784,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-09-19,2,dominicks,13440,1,1.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-09-19,2,minute.maid,7360,0,1.95,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-09-19,2,tropicana,9024,1,2.68,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-09-19,8,dominicks,24064,1,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-09-19,8,minute.maid,5312,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-09-19,8,tropicana,8000,1,2.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-09-26,2,dominicks,10112,0,1.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-09-26,2,minute.maid,7808,0,1.83,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-09-26,2,tropicana,6016,0,3.44,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-09-26,5,dominicks,6912,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-09-26,5,minute.maid,12352,0,1.89,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-09-26,5,tropicana,6400,0,3.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-09-26,8,dominicks,15680,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-09-26,8,minute.maid,33344,0,1.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-09-26,8,tropicana,6592,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-10-03,2,dominicks,9088,0,1.56,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-10-03,2,minute.maid,13504,0,1.79,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-10-03,2,tropicana,7744,0,3.14,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-10-03,5,dominicks,8256,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-10-03,5,minute.maid,12032,0,1.79,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-10-03,5,tropicana,5440,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-10-03,8,dominicks,16576,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-10-03,8,minute.maid,13504,0,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-10-03,8,tropicana,5248,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-10-10,2,dominicks,22848,1,1.49,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-10-10,2,minute.maid,10048,0,1.91,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-10-10,2,tropicana,6784,0,3.07,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-10-10,5,dominicks,28672,1,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-10-10,5,minute.maid,13440,0,1.79,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-10-10,5,tropicana,8128,0,2.94,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-10-10,8,dominicks,49664,1,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-10-10,8,minute.maid,13504,0,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-10-10,8,tropicana,6592,0,2.94,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-10-17,2,dominicks,6976,0,1.65,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-10-17,2,minute.maid,135936,1,1.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-10-17,2,tropicana,6784,0,3.07,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-10-17,8,dominicks,10752,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-10-17,8,minute.maid,335808,1,1.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-10-17,8,tropicana,5888,0,2.94,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-10-24,2,dominicks,4160,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-10-24,2,minute.maid,5056,0,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-10-24,2,tropicana,6272,0,3.07,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-10-24,5,dominicks,4416,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-10-24,5,minute.maid,5824,0,2.26,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-10-24,5,tropicana,7232,0,2.94,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-10-24,8,dominicks,9792,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-10-24,8,minute.maid,13120,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-10-24,8,tropicana,6336,0,2.94,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-10-31,2,dominicks,3328,0,1.83,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-10-31,2,minute.maid,27968,0,1.49,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-10-31,2,tropicana,5312,0,3.07,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-10-31,5,dominicks,1856,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-10-31,5,minute.maid,50112,0,1.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-10-31,5,tropicana,7168,0,2.94,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-10-31,8,dominicks,7104,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-10-31,8,minute.maid,49664,0,1.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-10-31,8,tropicana,5888,0,2.94,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-11-07,2,dominicks,12096,1,1.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-11-07,2,minute.maid,4736,0,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-11-07,2,tropicana,9216,0,3.11,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-11-07,5,dominicks,6528,1,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-11-07,5,minute.maid,5184,0,2.26,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-11-07,5,tropicana,7872,0,2.94,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-11-07,8,dominicks,9216,1,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-11-07,8,minute.maid,10880,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-11-07,8,tropicana,6080,0,2.94,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-11-14,2,dominicks,6208,0,1.76,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-11-14,2,minute.maid,7808,0,2.14,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-11-14,2,tropicana,7296,0,3.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-11-14,5,dominicks,6080,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-11-14,5,minute.maid,8384,0,2.26,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-11-14,5,tropicana,7552,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-11-14,8,dominicks,12608,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-11-14,8,minute.maid,9984,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-11-14,8,tropicana,6848,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-11-21,2,dominicks,3008,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-11-21,2,minute.maid,12480,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-11-21,2,tropicana,34240,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-11-21,5,dominicks,3456,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-11-21,5,minute.maid,10112,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-11-21,5,tropicana,69504,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-11-21,8,dominicks,16448,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-11-21,8,minute.maid,9216,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-11-21,8,tropicana,54016,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-11-28,2,dominicks,19456,1,1.5,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-11-28,2,minute.maid,9664,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-11-28,2,tropicana,7168,0,2.64,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-11-28,5,dominicks,25856,1,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-11-28,5,minute.maid,8384,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-11-28,5,tropicana,8960,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-11-28,8,dominicks,27968,1,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-11-28,8,minute.maid,7680,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-11-28,8,tropicana,10368,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-12-05,2,dominicks,16768,0,1.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-12-05,2,minute.maid,7168,0,2.06,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-12-05,2,tropicana,6080,0,3.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-12-05,5,dominicks,25728,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-12-05,5,minute.maid,11456,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-12-05,5,tropicana,6912,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-12-05,8,dominicks,37824,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-12-05,8,minute.maid,7296,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-12-05,8,tropicana,5568,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-12-12,2,dominicks,13568,1,1.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-12-12,2,minute.maid,4480,0,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-12-12,2,tropicana,5120,0,3.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-12-12,5,dominicks,23552,1,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-12-12,5,minute.maid,5952,0,2.26,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-12-12,5,tropicana,6656,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-12-12,8,dominicks,33664,1,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-12-12,8,minute.maid,8192,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-12-12,8,tropicana,4864,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-12-19,2,dominicks,6080,0,1.61,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-12-19,2,minute.maid,5952,0,2.22,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-12-19,2,tropicana,8320,0,2.74,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-12-19,5,dominicks,2944,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-12-19,5,minute.maid,8512,0,2.26,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-12-19,5,tropicana,8192,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-12-19,8,dominicks,17728,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-12-19,8,minute.maid,6080,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-12-19,8,tropicana,7232,0,2.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-12-26,2,dominicks,10432,1,1.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-12-26,2,minute.maid,21696,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-12-26,2,tropicana,17728,0,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1991-12-26,5,dominicks,5888,1,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-12-26,5,minute.maid,27968,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-12-26,5,tropicana,13440,0,2.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1991-12-26,8,dominicks,25088,1,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-12-26,8,minute.maid,15040,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1991-12-26,8,tropicana,15232,0,2.39,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-01-02,2,dominicks,11712,0,1.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-01-02,2,minute.maid,12032,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-01-02,2,tropicana,13120,0,2.35,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-01-02,5,dominicks,6848,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-01-02,5,minute.maid,24000,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-01-02,5,tropicana,12160,0,2.39,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-01-02,8,dominicks,13184,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-01-02,8,minute.maid,9472,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-01-02,8,tropicana,47040,0,2.39,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-01-09,2,dominicks,4032,0,1.76,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-01-09,2,minute.maid,7040,0,2.12,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-01-09,2,tropicana,13120,0,2.29,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-01-09,5,dominicks,1792,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-01-09,5,minute.maid,6848,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-01-09,5,tropicana,11840,0,2.29,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-01-09,8,dominicks,3136,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-01-09,8,minute.maid,5888,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-01-09,8,tropicana,9280,0,2.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-01-16,2,dominicks,6336,0,1.82,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-01-16,2,minute.maid,10240,1,2.49,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-01-16,2,tropicana,9792,0,2.43,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-01-16,5,dominicks,5248,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-01-16,5,minute.maid,15104,1,2.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-01-16,5,tropicana,8640,0,2.29,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-01-16,8,dominicks,5696,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-01-16,8,minute.maid,14336,1,2.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-01-16,8,tropicana,6720,0,2.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-01-23,2,dominicks,13632,0,1.47,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-01-23,2,minute.maid,6848,1,2.49,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-01-23,2,tropicana,3520,0,3.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-01-23,5,dominicks,16768,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-01-23,5,minute.maid,11392,1,2.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-01-23,5,tropicana,5888,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-01-23,8,dominicks,19008,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-01-23,8,minute.maid,11712,1,2.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-01-23,8,tropicana,5056,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-01-30,2,dominicks,45120,0,1.29,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-01-30,2,minute.maid,3968,0,2.61,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-01-30,2,tropicana,5504,0,3.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-01-30,5,dominicks,52160,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-01-30,5,minute.maid,5824,0,2.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-01-30,5,tropicana,7424,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-01-30,8,dominicks,121664,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-01-30,8,minute.maid,7936,0,2.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-01-30,8,tropicana,6080,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-02-06,2,dominicks,9984,0,1.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-02-06,2,minute.maid,5888,0,2.26,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-02-06,2,tropicana,6720,0,3.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-02-06,5,dominicks,16640,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-02-06,5,minute.maid,7488,0,2.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-02-06,5,tropicana,5632,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-02-06,8,dominicks,38848,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-02-06,8,minute.maid,5184,0,2.39,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-02-06,8,tropicana,10496,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-02-13,2,dominicks,4800,0,1.82,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-02-13,2,minute.maid,6208,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-02-13,2,tropicana,20224,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-02-13,5,dominicks,1344,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-02-13,5,minute.maid,8320,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-02-13,5,tropicana,33600,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-02-13,8,dominicks,6144,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-02-13,8,minute.maid,7168,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-02-13,8,tropicana,39040,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-02-20,2,dominicks,11776,0,1.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-02-20,2,minute.maid,72256,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-02-20,2,tropicana,5056,0,3.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-02-20,5,dominicks,4608,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-02-20,5,minute.maid,99904,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-02-20,5,tropicana,5376,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-02-20,8,dominicks,13632,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-02-20,8,minute.maid,216064,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-02-20,8,tropicana,4480,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-02-27,2,dominicks,11584,0,1.54,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-02-27,2,minute.maid,11520,0,2.11,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-02-27,2,tropicana,43584,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-02-27,5,dominicks,12672,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-02-27,5,minute.maid,6976,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-02-27,5,tropicana,54272,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-02-27,8,dominicks,9792,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-02-27,8,minute.maid,15040,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-02-27,8,tropicana,61760,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-03-05,2,dominicks,51264,1,1.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-03-05,2,minute.maid,5824,0,2.35,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-03-05,2,tropicana,25728,0,1.79,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-03-05,5,dominicks,48640,1,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-03-05,5,minute.maid,9984,0,2.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-03-05,5,tropicana,33600,0,1.79,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-03-05,8,dominicks,86912,1,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-03-05,8,minute.maid,11840,0,2.39,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-03-05,8,tropicana,15360,0,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-03-12,2,dominicks,14976,0,1.44,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-03-12,2,minute.maid,19392,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-03-12,2,tropicana,31808,0,1.79,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-03-12,5,dominicks,13248,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-03-12,5,minute.maid,32832,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-03-12,5,tropicana,24448,0,1.79,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-03-12,8,dominicks,24512,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-03-12,8,minute.maid,25472,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-03-12,8,tropicana,54976,0,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-03-19,2,dominicks,30784,0,1.59,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-03-19,2,minute.maid,9536,0,2.1,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-03-19,2,tropicana,20736,0,1.91,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-03-19,5,dominicks,29248,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-03-19,5,minute.maid,8128,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-03-19,5,tropicana,22784,0,1.79,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-03-19,8,dominicks,58048,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-03-19,8,minute.maid,16384,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-03-19,8,tropicana,34368,0,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-03-26,2,dominicks,12480,0,1.6,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-03-26,2,minute.maid,5312,0,2.28,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-03-26,2,tropicana,15168,0,2.81,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-03-26,5,dominicks,4608,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-03-26,5,minute.maid,6464,0,2.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-03-26,5,tropicana,19008,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-03-26,8,dominicks,13952,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-03-26,8,minute.maid,20480,0,2.39,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-03-26,8,tropicana,10752,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-04-02,2,dominicks,3264,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-04-02,2,minute.maid,14528,1,1.9,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-04-02,2,tropicana,28096,1,2.5,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-04-02,5,dominicks,3136,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-04-02,5,minute.maid,36800,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-04-02,5,tropicana,15808,1,2.5,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-04-02,8,dominicks,15168,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-04-02,8,minute.maid,34688,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-04-02,8,tropicana,20096,1,2.5,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-04-09,2,dominicks,8768,0,1.48,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-04-09,2,minute.maid,12416,0,2.12,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-04-09,2,tropicana,12416,0,2.58,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-04-09,5,dominicks,13184,0,1.58,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-04-09,5,minute.maid,12928,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-04-09,5,tropicana,14144,0,2.5,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-04-09,8,dominicks,14592,0,1.58,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-04-09,8,minute.maid,22400,0,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-04-09,8,tropicana,16192,0,2.5,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-04-16,2,dominicks,70848,1,1.29,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-04-16,2,minute.maid,5376,0,2.79,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-04-16,2,tropicana,5376,0,3.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-04-16,5,dominicks,67712,1,1.29,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-04-16,5,minute.maid,7424,0,2.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-04-16,5,tropicana,9600,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-04-16,8,dominicks,145088,1,1.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-04-16,8,minute.maid,7808,0,2.39,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-04-16,8,tropicana,6528,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-04-23,2,dominicks,18560,0,1.42,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-04-23,2,minute.maid,19008,1,2.09,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-04-23,2,tropicana,9792,0,2.67,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-04-23,5,dominicks,18880,0,1.29,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-04-23,5,minute.maid,34176,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-04-23,5,tropicana,10112,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-04-23,8,dominicks,43712,0,1.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-04-23,8,minute.maid,48064,1,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-04-23,8,tropicana,8320,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-04-30,2,dominicks,9152,0,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-04-30,2,minute.maid,3904,0,2.79,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-04-30,2,tropicana,16960,1,2.39,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-04-30,5,dominicks,6208,0,1.89,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-04-30,5,minute.maid,4160,0,2.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-04-30,5,tropicana,31872,1,2.24,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-04-30,8,dominicks,20608,0,1.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-04-30,8,minute.maid,7360,0,2.39,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-04-30,8,tropicana,30784,1,2.16,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-05-07,2,dominicks,9600,0,2.0,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-05-07,2,minute.maid,6336,0,2.79,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-05-07,2,tropicana,8320,0,3.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-05-07,5,dominicks,5952,0,1.89,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-05-07,5,minute.maid,5952,0,2.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-05-07,5,tropicana,9280,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-05-07,8,dominicks,18752,0,1.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-05-07,8,minute.maid,6272,0,2.39,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-05-07,8,tropicana,18048,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-05-14,2,dominicks,4800,0,2.09,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-05-14,2,minute.maid,5440,0,2.79,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-05-14,2,tropicana,6912,0,3.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-05-14,5,dominicks,4160,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-05-14,5,minute.maid,6528,0,2.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-05-14,5,tropicana,7680,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-05-14,8,dominicks,20160,0,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-05-14,8,minute.maid,6400,0,2.39,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-05-14,8,tropicana,12864,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-05-21,2,dominicks,9664,0,1.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-05-21,2,minute.maid,22400,1,2.09,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-05-21,2,tropicana,6976,0,3.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-05-21,5,dominicks,23488,0,1.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-05-21,5,minute.maid,30656,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-05-21,5,tropicana,8704,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-05-21,8,dominicks,18688,0,1.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-05-21,8,minute.maid,54592,1,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-05-21,8,tropicana,7168,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-05-28,2,dominicks,45568,0,1.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-05-28,2,minute.maid,3968,0,2.84,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-05-28,2,tropicana,7232,0,3.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-05-28,5,dominicks,60480,0,1.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-05-28,5,minute.maid,6656,0,2.66,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-05-28,5,tropicana,9920,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-05-28,8,dominicks,133824,0,1.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-05-28,8,minute.maid,8128,0,2.39,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-05-28,8,tropicana,9024,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-06-04,2,dominicks,20992,0,1.74,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-06-04,2,minute.maid,3264,0,2.89,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-06-04,2,tropicana,51520,1,2.49,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-06-04,5,dominicks,20416,0,1.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-06-04,5,minute.maid,4416,0,2.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-06-04,5,tropicana,91968,1,2.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-06-04,8,dominicks,63488,0,1.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-06-04,8,minute.maid,4928,0,2.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-06-04,8,tropicana,84992,1,2.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-06-11,2,dominicks,6592,0,2.09,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-06-11,2,minute.maid,4352,0,2.89,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-06-11,2,tropicana,22272,0,2.21,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-06-11,5,dominicks,6336,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-06-11,5,minute.maid,5696,0,2.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-06-11,5,tropicana,44096,0,2.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-06-11,8,dominicks,71040,0,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-06-11,8,minute.maid,5440,0,2.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-06-11,8,tropicana,14144,0,2.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-06-18,2,dominicks,4992,0,2.05,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-06-18,2,minute.maid,4480,0,2.89,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-06-18,2,tropicana,46144,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-06-25,2,dominicks,8064,0,1.24,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-06-25,2,minute.maid,3840,0,2.52,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-06-25,2,tropicana,4352,1,3.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-06-25,5,dominicks,1408,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-06-25,5,minute.maid,5696,0,2.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-06-25,5,tropicana,7296,1,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-06-25,8,dominicks,15360,0,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-06-25,8,minute.maid,5888,0,2.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-06-25,8,tropicana,7488,1,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-07-02,2,dominicks,7360,0,1.61,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-07-02,2,minute.maid,13312,1,2.0,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-07-02,2,tropicana,17280,0,2.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-07-02,5,dominicks,4672,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-07-02,5,minute.maid,39680,1,2.01,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-07-02,5,tropicana,12928,0,2.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-07-02,8,dominicks,17728,0,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-07-02,8,minute.maid,23872,1,2.02,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-07-02,8,tropicana,12352,0,2.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-07-09,2,dominicks,10048,0,1.4,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-07-09,2,minute.maid,3776,1,2.33,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-07-09,2,tropicana,5696,0,3.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-07-09,5,dominicks,19520,0,1.29,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-07-09,5,minute.maid,6208,1,2.19,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-07-09,5,tropicana,6848,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-07-09,8,dominicks,24256,0,1.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-07-09,8,minute.maid,6848,1,2.19,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-07-09,8,tropicana,5696,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-07-16,2,dominicks,10112,0,1.91,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-07-16,2,minute.maid,4800,0,2.89,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-07-16,2,tropicana,6848,0,3.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-07-16,5,dominicks,7872,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-07-16,5,minute.maid,7872,0,2.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-07-16,5,tropicana,8064,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-07-16,8,dominicks,19968,0,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-07-16,8,minute.maid,8192,0,2.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-07-16,8,tropicana,7680,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-07-23,2,dominicks,9152,0,1.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-07-23,2,minute.maid,24960,1,2.29,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-07-23,2,tropicana,4416,0,3.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-07-23,5,dominicks,5184,0,1.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-07-23,5,minute.maid,54528,1,2.29,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-07-23,5,tropicana,4992,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-07-23,8,dominicks,15936,0,1.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-07-23,8,minute.maid,55040,1,2.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-07-23,8,tropicana,5440,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-07-30,2,dominicks,36288,1,1.49,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-07-30,2,minute.maid,4544,0,2.86,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-07-30,2,tropicana,4672,0,3.16,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-07-30,5,dominicks,42240,1,1.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-07-30,5,minute.maid,6400,0,2.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-07-30,5,tropicana,7360,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-07-30,8,dominicks,76352,1,1.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-07-30,8,minute.maid,6528,0,2.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-07-30,8,tropicana,5632,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-08-06,2,dominicks,3776,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-08-06,2,minute.maid,3968,1,2.81,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-08-06,2,tropicana,7168,1,3.09,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-08-06,5,dominicks,6592,1,1.89,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-08-06,5,minute.maid,5888,1,2.65,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-08-06,5,tropicana,8384,1,2.89,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-08-06,8,dominicks,17408,1,1.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-08-06,8,minute.maid,6208,1,2.45,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-08-06,8,tropicana,8960,1,2.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-08-13,2,dominicks,3328,0,1.97,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-08-13,2,minute.maid,49600,1,1.99,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-08-13,2,tropicana,5056,0,3.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-08-13,5,dominicks,2112,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-08-13,5,minute.maid,56384,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-08-13,5,tropicana,8832,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-08-13,8,dominicks,17536,0,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-08-13,8,minute.maid,94720,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-08-13,8,tropicana,6080,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-08-20,2,dominicks,13824,0,1.36,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-08-20,2,minute.maid,23488,1,1.94,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-08-20,2,tropicana,13376,1,2.79,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-08-20,5,dominicks,21248,0,1.79,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-08-20,5,minute.maid,27072,1,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-08-20,5,tropicana,17728,1,2.79,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-08-20,8,dominicks,31232,0,1.59,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-08-20,8,minute.maid,55552,1,1.99,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-08-20,8,tropicana,8576,1,2.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-08-27,2,dominicks,9024,0,1.19,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-08-27,2,minute.maid,19008,0,1.69,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-08-27,2,tropicana,8128,0,2.75,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-08-27,5,dominicks,1856,0,1.29,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-08-27,5,minute.maid,3840,0,1.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-08-27,5,tropicana,9600,0,2.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-08-27,8,dominicks,19200,0,1.29,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-08-27,8,minute.maid,18688,0,1.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-08-27,8,tropicana,8000,0,2.89,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-09-03,2,dominicks,2048,0,2.09,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-09-03,2,minute.maid,11584,0,1.81,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-09-03,2,tropicana,19456,1,2.49,0.232864734,0.248934934,10.55320518,0.463887065,0.103953406,0.114279949,0.303585347,2.110122129,1.142857143,1.927279669,0.37692661299999997
|
||||
1992-09-03,5,dominicks,3712,0,1.99,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-09-03,5,minute.maid,6144,0,1.69,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-09-03,5,tropicana,25664,1,2.49,0.117368032,0.32122573,10.92237097,0.535883355,0.103091585,0.053875277,0.410568032,3.801997814,0.681818182,1.600573425,0.736306837
|
||||
1992-09-03,8,dominicks,12800,0,1.79,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-09-03,8,minute.maid,14656,0,1.69,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
1992-09-03,8,tropicana,21760,1,2.49,0.252394035,0.095173274,10.59700966,0.054227156,0.131749698,0.035243328,0.283074736,2.636332801,1.5,2.905384316,0.641015947
|
||||
|
@@ -0,0 +1,170 @@
|
||||
import argparse
|
||||
from datetime import datetime
|
||||
import os
|
||||
import uuid
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
from pandas.tseries.frequencies import to_offset
|
||||
from sklearn.externals import joblib
|
||||
from sklearn.metrics import mean_absolute_error, mean_squared_error
|
||||
|
||||
from azureml.data.dataset_factory import TabularDatasetFactory
|
||||
from azureml.automl.runtime.shared.score import scoring, constants as metrics_constants
|
||||
import azureml.automl.core.shared.constants as constants
|
||||
from azureml.core import Run, Dataset, Model
|
||||
|
||||
try:
|
||||
import torch
|
||||
|
||||
_torch_present = True
|
||||
except ImportError:
|
||||
_torch_present = False
|
||||
|
||||
|
||||
def infer_forecasting_dataset_tcn(
|
||||
X_test,
|
||||
y_test,
|
||||
model,
|
||||
output_path,
|
||||
output_dataset_name="results",
|
||||
):
|
||||
|
||||
y_pred, df_all = model.forecast(X_test, y_test)
|
||||
|
||||
run = Run.get_context()
|
||||
|
||||
registered_train = TabularDatasetFactory.register_pandas_dataframe(
|
||||
df_all,
|
||||
target=(
|
||||
run.experiment.workspace.get_default_datastore(),
|
||||
datetime.now().strftime("%Y-%m-%d-") + str(uuid.uuid4())[:6],
|
||||
),
|
||||
name=output_dataset_name,
|
||||
)
|
||||
df_all.to_csv(os.path.join(output_path, output_dataset_name + ".csv"), index=False)
|
||||
|
||||
|
||||
def map_location_cuda(storage, loc):
|
||||
return storage.cuda()
|
||||
|
||||
|
||||
def get_model(model_path, model_file_name):
|
||||
# _, ext = os.path.splitext(model_path)
|
||||
model_full_path = os.path.join(model_path, model_file_name)
|
||||
print(model_full_path)
|
||||
if model_file_name.endswith("pt"):
|
||||
# Load the fc-tcn torch model.
|
||||
assert _torch_present, "Loading DNN models needs torch to be presented."
|
||||
if torch.cuda.is_available():
|
||||
map_location = map_location_cuda
|
||||
else:
|
||||
map_location = "cpu"
|
||||
with open(model_full_path, "rb") as fh:
|
||||
fitted_model = torch.load(fh, map_location=map_location)
|
||||
else:
|
||||
# Load the sklearn pipeline.
|
||||
fitted_model = joblib.load(model_full_path)
|
||||
return fitted_model
|
||||
|
||||
|
||||
def get_args():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--model_name",
|
||||
type=str,
|
||||
dest="model_name",
|
||||
help="Model to be loaded",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--ouput_dataset_name",
|
||||
type=str,
|
||||
dest="ouput_dataset_name",
|
||||
default="results",
|
||||
help="Dataset name of the final output",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--target_column_name",
|
||||
type=str,
|
||||
dest="target_column_name",
|
||||
help="The target column name.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--test_dataset_name",
|
||||
type=str,
|
||||
dest="test_dataset_name",
|
||||
default="results",
|
||||
help="Dataset name of the final output",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--output_path",
|
||||
type=str,
|
||||
dest="output_path",
|
||||
default="results",
|
||||
help="The output path",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
|
||||
def get_data(
|
||||
run,
|
||||
fitted_model,
|
||||
target_column_name,
|
||||
test_dataset_name,
|
||||
):
|
||||
|
||||
# get input dataset by name
|
||||
test_dataset = Dataset.get_by_name(run.experiment.workspace, test_dataset_name)
|
||||
test_df = test_dataset.to_pandas_dataframe()
|
||||
if target_column_name in test_df:
|
||||
y_test = test_df.pop(target_column_name)
|
||||
else:
|
||||
y_test = np.full(test_df.shape[0], np.nan)
|
||||
|
||||
return test_df, y_test
|
||||
|
||||
|
||||
def get_model_filename(run, model_name, model_path):
|
||||
model = Model(run.experiment.workspace, model_name)
|
||||
if "model_file_name" in model.tags:
|
||||
return model.tags["model_file_name"]
|
||||
is_pkl = True
|
||||
if model.tags.get("algorithm") == "TCNForecaster" or os.path.exists(
|
||||
os.path.join(model_path, "model.pt")
|
||||
):
|
||||
is_pkl = False
|
||||
return "model.pkl" if is_pkl else "model.pt"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run = Run.get_context()
|
||||
|
||||
args = get_args()
|
||||
model_name = args.model_name
|
||||
ouput_dataset_name = args.ouput_dataset_name
|
||||
test_dataset_name = args.test_dataset_name
|
||||
target_column_name = args.target_column_name
|
||||
print("args passed are: ")
|
||||
|
||||
print(model_name)
|
||||
print(test_dataset_name)
|
||||
print(ouput_dataset_name)
|
||||
print(target_column_name)
|
||||
|
||||
model_path = Model.get_model_path(model_name)
|
||||
model_file_name = get_model_filename(run, model_name, model_path)
|
||||
print(model_file_name)
|
||||
fitted_model = get_model(model_path, model_file_name)
|
||||
|
||||
X_test_df, y_test = get_data(
|
||||
run,
|
||||
fitted_model,
|
||||
target_column_name,
|
||||
test_dataset_name,
|
||||
)
|
||||
|
||||
infer_forecasting_dataset_tcn(
|
||||
X_test_df, y_test, fitted_model, args.output_path, ouput_dataset_name
|
||||
)
|
||||
@@ -0,0 +1,64 @@
|
||||
import argparse
|
||||
import os
|
||||
import uuid
|
||||
import shutil
|
||||
from azureml.core.model import Model, Dataset
|
||||
from azureml.core.run import Run, _OfflineRun
|
||||
from azureml.core import Workspace
|
||||
import azureml.automl.core.shared.constants as constants
|
||||
from azureml.train.automl.run import AutoMLRun
|
||||
|
||||
|
||||
def get_best_automl_run(pipeline_run):
|
||||
all_children = [c for c in pipeline_run.get_children()]
|
||||
automl_step = [
|
||||
c for c in all_children if c.properties.get("runTemplate") == "AutoML"
|
||||
]
|
||||
for c in all_children:
|
||||
print(c, c.properties)
|
||||
automlrun = AutoMLRun(pipeline_run.experiment, automl_step[0].id)
|
||||
best = automlrun.get_best_child()
|
||||
return best
|
||||
|
||||
|
||||
def get_model_path(model_artifact_path):
|
||||
return model_artifact_path.split("/")[1]
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--model_name")
|
||||
parser.add_argument("--model_path")
|
||||
parser.add_argument("--ds_name")
|
||||
args = parser.parse_args()
|
||||
|
||||
print("Argument 1(model_name): %s" % args.model_name)
|
||||
print("Argument 2(model_path): %s" % args.model_path)
|
||||
print("Argument 3(ds_name): %s" % args.ds_name)
|
||||
|
||||
run = Run.get_context()
|
||||
ws = None
|
||||
if type(run) == _OfflineRun:
|
||||
ws = Workspace.from_config()
|
||||
else:
|
||||
ws = run.experiment.workspace
|
||||
|
||||
train_ds = Dataset.get_by_name(ws, args.ds_name)
|
||||
datasets = [(Dataset.Scenario.TRAINING, train_ds)]
|
||||
new_dir = str(uuid.uuid4())
|
||||
os.makedirs(new_dir)
|
||||
|
||||
# Register model with training dataset
|
||||
best_run = get_best_automl_run(run.parent)
|
||||
model_artifact_path = best_run.properties[constants.PROPERTY_KEY_OF_MODEL_PATH]
|
||||
algo = best_run.properties.get("run_algorithm")
|
||||
model_artifact_dir = model_artifact_path.split("/")[0]
|
||||
model_file_name = model_artifact_path.split("/")[1]
|
||||
model = best_run.register_model(
|
||||
args.model_name,
|
||||
model_path=model_artifact_dir,
|
||||
datasets=datasets,
|
||||
tags={"algorithm": algo, "model_file_name": model_file_name},
|
||||
)
|
||||
|
||||
print("Registered version {0} of model {1}".format(model.version, model.name))
|
||||
@@ -56,16 +56,18 @@
|
||||
"from statsmodels.graphics.tsaplots import plot_acf, plot_pacf\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"from pandas.plotting import register_matplotlib_converters\n",
|
||||
"\n",
|
||||
"register_matplotlib_converters() # fixes the future warning issue\n",
|
||||
"\n",
|
||||
"from helper_functions import unit_root_test_wrapper\n",
|
||||
"from statsmodels.tools.sm_exceptions import InterpolationWarning\n",
|
||||
"warnings.simplefilter('ignore', InterpolationWarning)\n",
|
||||
"\n",
|
||||
"warnings.simplefilter(\"ignore\", InterpolationWarning)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# set printing options\n",
|
||||
"pd.set_option('display.max_columns', 500)\n",
|
||||
"pd.set_option('display.width', 1000)"
|
||||
"pd.set_option(\"display.max_columns\", 500)\n",
|
||||
"pd.set_option(\"display.width\", 1000)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -75,15 +77,15 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# load data\n",
|
||||
"main_data_loc = 'data'\n",
|
||||
"train_file_name = 'S4248SM144SCEN.csv'\n",
|
||||
"main_data_loc = \"data\"\n",
|
||||
"train_file_name = \"S4248SM144SCEN.csv\"\n",
|
||||
"\n",
|
||||
"TARGET_COLNAME = 'S4248SM144SCEN'\n",
|
||||
"TIME_COLNAME = 'observation_date'\n",
|
||||
"COVID_PERIOD_START = '2020-03-01'\n",
|
||||
"TARGET_COLNAME = \"S4248SM144SCEN\"\n",
|
||||
"TIME_COLNAME = \"observation_date\"\n",
|
||||
"COVID_PERIOD_START = \"2020-03-01\"\n",
|
||||
"\n",
|
||||
"df = pd.read_csv(os.path.join(main_data_loc, train_file_name))\n",
|
||||
"df[TIME_COLNAME] = pd.to_datetime(df[TIME_COLNAME], format='%Y-%m-%d')\n",
|
||||
"df[TIME_COLNAME] = pd.to_datetime(df[TIME_COLNAME], format=\"%Y-%m-%d\")\n",
|
||||
"df.sort_values(by=TIME_COLNAME, inplace=True)\n",
|
||||
"df.set_index(TIME_COLNAME, inplace=True)\n",
|
||||
"df.head(2)"
|
||||
@@ -98,7 +100,7 @@
|
||||
"# plot the entire dataset\n",
|
||||
"fig, ax = plt.subplots(figsize=(6, 2), dpi=180)\n",
|
||||
"ax.plot(df)\n",
|
||||
"ax.title.set_text('Original Data Series')\n",
|
||||
"ax.title.set_text(\"Original Data Series\")\n",
|
||||
"locs, labels = plt.xticks()\n",
|
||||
"plt.xticks(rotation=45)"
|
||||
]
|
||||
@@ -119,7 +121,7 @@
|
||||
"# plot the entire dataset in first differences\n",
|
||||
"fig, ax = plt.subplots(figsize=(6, 2), dpi=180)\n",
|
||||
"ax.plot(df.diff().dropna())\n",
|
||||
"ax.title.set_text('Data in first differences')\n",
|
||||
"ax.title.set_text(\"Data in first differences\")\n",
|
||||
"locs, labels = plt.xticks()\n",
|
||||
"plt.xticks(rotation=45)"
|
||||
]
|
||||
@@ -153,7 +155,7 @@
|
||||
"# plot the entire dataset in first differences\n",
|
||||
"fig, ax = plt.subplots(figsize=(6, 2), dpi=180)\n",
|
||||
"ax.plot(df.diff().dropna())\n",
|
||||
"ax.title.set_text('Data in first differences')\n",
|
||||
"ax.title.set_text(\"Data in first differences\")\n",
|
||||
"locs, labels = plt.xticks()\n",
|
||||
"plt.xticks(rotation=45)"
|
||||
]
|
||||
@@ -176,8 +178,8 @@
|
||||
"\n",
|
||||
"# plot the entire dataset in first differences\n",
|
||||
"fig, ax = plt.subplots(figsize=(6, 2), dpi=180)\n",
|
||||
"ax.plot(df['2015-01-01':].diff().dropna())\n",
|
||||
"ax.title.set_text('Data in first differences')\n",
|
||||
"ax.plot(df[\"2015-01-01\":].diff().dropna())\n",
|
||||
"ax.title.set_text(\"Data in first differences\")\n",
|
||||
"locs, labels = plt.xticks()\n",
|
||||
"plt.xticks(rotation=45)"
|
||||
]
|
||||
@@ -245,10 +247,10 @@
|
||||
"source": [
|
||||
"# unit root tests\n",
|
||||
"test = unit_root_test_wrapper(df[TARGET_COLNAME])\n",
|
||||
"print('---------------', '\\n')\n",
|
||||
"print('Summary table', '\\n', test['summary'], '\\n')\n",
|
||||
"print('Is the {} series stationary?: {}'.format(TARGET_COLNAME, test['stationary']))\n",
|
||||
"print('---------------', '\\n')"
|
||||
"print(\"---------------\", \"\\n\")\n",
|
||||
"print(\"Summary table\", \"\\n\", test[\"summary\"], \"\\n\")\n",
|
||||
"print(\"Is the {} series stationary?: {}\".format(TARGET_COLNAME, test[\"stationary\"]))\n",
|
||||
"print(\"---------------\", \"\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -285,10 +287,10 @@
|
||||
"source": [
|
||||
"# unit root tests\n",
|
||||
"test = unit_root_test_wrapper(df[TARGET_COLNAME].diff().dropna())\n",
|
||||
"print('---------------', '\\n')\n",
|
||||
"print('Summary table', '\\n', test['summary'], '\\n')\n",
|
||||
"print('Is the {} series stationary?: {}'.format(TARGET_COLNAME, test['stationary']))\n",
|
||||
"print('---------------', '\\n')"
|
||||
"print(\"---------------\", \"\\n\")\n",
|
||||
"print(\"Summary table\", \"\\n\", test[\"summary\"], \"\\n\")\n",
|
||||
"print(\"Is the {} series stationary?: {}\".format(TARGET_COLNAME, test[\"stationary\"]))\n",
|
||||
"print(\"---------------\", \"\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -307,11 +309,11 @@
|
||||
"# plot original and stationary data\n",
|
||||
"fig = plt.figure(figsize=(10, 10))\n",
|
||||
"ax1 = fig.add_subplot(211)\n",
|
||||
"ax1.plot(df[TARGET_COLNAME], '-b')\n",
|
||||
"ax1.plot(df[TARGET_COLNAME], \"-b\")\n",
|
||||
"ax2 = fig.add_subplot(212)\n",
|
||||
"ax2.plot(df[TARGET_COLNAME].diff().dropna(), '-b')\n",
|
||||
"ax1.title.set_text('Original data')\n",
|
||||
"ax2.title.set_text('Data in first differences')"
|
||||
"ax2.plot(df[TARGET_COLNAME].diff().dropna(), \"-b\")\n",
|
||||
"ax1.title.set_text(\"Original data\")\n",
|
||||
"ax2.title.set_text(\"Data in first differences\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -51,7 +51,7 @@
|
||||
"from azureml.core.compute import AmlCompute\n",
|
||||
"from azureml.core.compute import ComputeTarget\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"from helper_functions import (ts_train_test_split, compute_metrics)\n",
|
||||
"from helper_functions import ts_train_test_split, compute_metrics\n",
|
||||
"\n",
|
||||
"import azureml.core\n",
|
||||
"from azureml.core.workspace import Workspace\n",
|
||||
@@ -61,8 +61,8 @@
|
||||
"\n",
|
||||
"# set printing options\n",
|
||||
"np.set_printoptions(precision=4, suppress=True, linewidth=100)\n",
|
||||
"pd.set_option('display.max_columns', 500)\n",
|
||||
"pd.set_option('display.width', 1000)"
|
||||
"pd.set_option(\"display.max_columns\", 500)\n",
|
||||
"pd.set_option(\"display.width\", 1000)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -85,23 +85,28 @@
|
||||
"found = False\n",
|
||||
"# Check if this compute target already exists in the workspace.\n",
|
||||
"cts = ws.compute_targets\n",
|
||||
"if amlcompute_cluster_name in cts and cts[amlcompute_cluster_name].type == 'AmlCompute':\n",
|
||||
"if amlcompute_cluster_name in cts and cts[amlcompute_cluster_name].type == \"AmlCompute\":\n",
|
||||
" found = True\n",
|
||||
" print('Found existing compute target.')\n",
|
||||
" print(\"Found existing compute target.\")\n",
|
||||
" compute_target = cts[amlcompute_cluster_name]\n",
|
||||
"\n",
|
||||
"if not found:\n",
|
||||
" print('Creating a new compute target...')\n",
|
||||
" provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_D2_V2\",\n",
|
||||
" max_nodes = 6)\n",
|
||||
" print(\"Creating a new compute target...\")\n",
|
||||
" provisioning_config = AmlCompute.provisioning_configuration(\n",
|
||||
" vm_size=\"STANDARD_D2_V2\", max_nodes=6\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" # Create the cluster.\\n\",\n",
|
||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, provisioning_config)\n",
|
||||
" compute_target = ComputeTarget.create(\n",
|
||||
" ws, amlcompute_cluster_name, provisioning_config\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print('Checking cluster status...')\n",
|
||||
"print(\"Checking cluster status...\")\n",
|
||||
"# Can poll for a minimum number of nodes and for a specific timeout.\n",
|
||||
"# If no min_node_count is provided, it will use the scale settings for the cluster.\n",
|
||||
"compute_target.wait_for_completion(show_output = True, min_node_count = None, timeout_in_minutes = 20)"
|
||||
"compute_target.wait_for_completion(\n",
|
||||
" show_output=True, min_node_count=None, timeout_in_minutes=20\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -119,16 +124,18 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"main_data_loc = 'data'\n",
|
||||
"train_file_name = 'S4248SM144SCEN.csv'\n",
|
||||
"main_data_loc = \"data\"\n",
|
||||
"train_file_name = \"S4248SM144SCEN.csv\"\n",
|
||||
"\n",
|
||||
"TARGET_COLNAME = \"S4248SM144SCEN\"\n",
|
||||
"TIME_COLNAME = \"observation_date\"\n",
|
||||
"COVID_PERIOD_START = '2020-03-01' # start of the covid period. To be excluded from evaluation.\n",
|
||||
"COVID_PERIOD_START = (\n",
|
||||
" \"2020-03-01\" # start of the covid period. To be excluded from evaluation.\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# load data\n",
|
||||
"df = pd.read_csv(os.path.join(main_data_loc, train_file_name))\n",
|
||||
"df[TIME_COLNAME] = pd.to_datetime(df[TIME_COLNAME], format='%Y-%m-%d')\n",
|
||||
"df[TIME_COLNAME] = pd.to_datetime(df[TIME_COLNAME], format=\"%Y-%m-%d\")\n",
|
||||
"df.sort_values(by=TIME_COLNAME, inplace=True)\n",
|
||||
"\n",
|
||||
"# remove the Covid period\n",
|
||||
@@ -202,24 +209,28 @@
|
||||
"source": [
|
||||
"# choose a name for the run history container in the workspace\n",
|
||||
"if isinstance(TARGET_LAGS, list):\n",
|
||||
" TARGET_LAGS_STR = '-'.join(map(str, TARGET_LAGS)) if (len(TARGET_LAGS) > 0) else None\n",
|
||||
" TARGET_LAGS_STR = (\n",
|
||||
" \"-\".join(map(str, TARGET_LAGS)) if (len(TARGET_LAGS) > 0) else None\n",
|
||||
" )\n",
|
||||
"else:\n",
|
||||
" TARGET_LAGS_STR = TARGET_LAGS\n",
|
||||
"\n",
|
||||
"experiment_desc = 'diff-{}_lags-{}_STL-{}'.format(DIFFERENCE_SERIES, TARGET_LAGS_STR, STL_TYPE)\n",
|
||||
"experiment_name = 'alcohol_{}'.format(experiment_desc)\n",
|
||||
"experiment_desc = \"diff-{}_lags-{}_STL-{}\".format(\n",
|
||||
" DIFFERENCE_SERIES, TARGET_LAGS_STR, STL_TYPE\n",
|
||||
")\n",
|
||||
"experiment_name = \"alcohol_{}\".format(experiment_desc)\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['SDK version'] = azureml.core.VERSION\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace'] = ws.name\n",
|
||||
"output['SKU'] = ws.sku\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Run History Name'] = experiment_name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"output[\"SDK version\"] = azureml.core.VERSION\n",
|
||||
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||
"output[\"Workspace\"] = ws.name\n",
|
||||
"output[\"SKU\"] = ws.sku\n",
|
||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||
"output[\"Location\"] = ws.location\n",
|
||||
"output[\"Run History Name\"] = experiment_name\n",
|
||||
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||
"print(outputDf.T)"
|
||||
]
|
||||
},
|
||||
@@ -230,7 +241,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# create output directory\n",
|
||||
"output_dir = 'experiment_output/{}'.format(experiment_desc)\n",
|
||||
"output_dir = \"experiment_output/{}\".format(experiment_desc)\n",
|
||||
"if not os.path.exists(output_dir):\n",
|
||||
" os.makedirs(output_dir)"
|
||||
]
|
||||
@@ -257,15 +268,19 @@
|
||||
"# split the data into train and test set\n",
|
||||
"if DIFFERENCE_SERIES:\n",
|
||||
" # generate train/inference sets using data in first differences\n",
|
||||
" df_train, df_test = ts_train_test_split(df_input=df_delta,\n",
|
||||
" df_train, df_test = ts_train_test_split(\n",
|
||||
" df_input=df_delta,\n",
|
||||
" n=FORECAST_HORIZON,\n",
|
||||
" time_colname=TIME_COLNAME,\n",
|
||||
" ts_id_colnames=TIME_SERIES_ID_COLNAMES)\n",
|
||||
" ts_id_colnames=TIME_SERIES_ID_COLNAMES,\n",
|
||||
" )\n",
|
||||
"else:\n",
|
||||
" df_train, df_test = ts_train_test_split(df_input=df,\n",
|
||||
" df_train, df_test = ts_train_test_split(\n",
|
||||
" df_input=df,\n",
|
||||
" n=FORECAST_HORIZON,\n",
|
||||
" time_colname=TIME_COLNAME,\n",
|
||||
" ts_id_colnames=TIME_SERIES_ID_COLNAMES)"
|
||||
" ts_id_colnames=TIME_SERIES_ID_COLNAMES,\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -286,12 +301,27 @@
|
||||
"df_test.to_csv(\"test.csv\", index=False)\n",
|
||||
"\n",
|
||||
"datastore = ws.get_default_datastore()\n",
|
||||
"datastore.upload_files(files = ['./train.csv'], target_path = 'uni-recipe-dataset/tabular/', overwrite = True,show_progress = True)\n",
|
||||
"datastore.upload_files(files = ['./test.csv'], target_path = 'uni-recipe-dataset/tabular/', overwrite = True,show_progress = True)\n",
|
||||
"datastore.upload_files(\n",
|
||||
" files=[\"./train.csv\"],\n",
|
||||
" target_path=\"uni-recipe-dataset/tabular/\",\n",
|
||||
" overwrite=True,\n",
|
||||
" show_progress=True,\n",
|
||||
")\n",
|
||||
"datastore.upload_files(\n",
|
||||
" files=[\"./test.csv\"],\n",
|
||||
" target_path=\"uni-recipe-dataset/tabular/\",\n",
|
||||
" overwrite=True,\n",
|
||||
" show_progress=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"from azureml.core import Dataset\n",
|
||||
"train_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'uni-recipe-dataset/tabular/train.csv')])\n",
|
||||
"test_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'uni-recipe-dataset/tabular/test.csv')])\n",
|
||||
"\n",
|
||||
"train_dataset = Dataset.Tabular.from_delimited_files(\n",
|
||||
" path=[(datastore, \"uni-recipe-dataset/tabular/train.csv\")]\n",
|
||||
")\n",
|
||||
"test_dataset = Dataset.Tabular.from_delimited_files(\n",
|
||||
" path=[(datastore, \"uni-recipe-dataset/tabular/test.csv\")]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# print the first 5 rows of the Dataset\n",
|
||||
"train_dataset.to_pandas_dataframe().reset_index(drop=True).head(5)"
|
||||
@@ -311,17 +341,18 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"time_series_settings = {\n",
|
||||
" 'time_column_name': TIME_COLNAME,\n",
|
||||
" 'forecast_horizon': FORECAST_HORIZON,\n",
|
||||
" 'target_lags': TARGET_LAGS,\n",
|
||||
" 'use_stl': STL_TYPE,\n",
|
||||
" 'blocked_models': BLOCKED_MODELS,\n",
|
||||
" 'time_series_id_column_names': TIME_SERIES_ID_COLNAMES\n",
|
||||
" \"time_column_name\": TIME_COLNAME,\n",
|
||||
" \"forecast_horizon\": FORECAST_HORIZON,\n",
|
||||
" \"target_lags\": TARGET_LAGS,\n",
|
||||
" \"use_stl\": STL_TYPE,\n",
|
||||
" \"blocked_models\": BLOCKED_MODELS,\n",
|
||||
" \"time_series_id_column_names\": TIME_SERIES_ID_COLNAMES,\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task='forecasting',\n",
|
||||
" debug_log='sample_experiment.log',\n",
|
||||
" primary_metric='normalized_root_mean_squared_error',\n",
|
||||
"automl_config = AutoMLConfig(\n",
|
||||
" task=\"forecasting\",\n",
|
||||
" debug_log=\"sample_experiment.log\",\n",
|
||||
" primary_metric=\"normalized_root_mean_squared_error\",\n",
|
||||
" experiment_timeout_minutes=20,\n",
|
||||
" iteration_timeout_minutes=5,\n",
|
||||
" enable_early_stopping=True,\n",
|
||||
@@ -331,7 +362,8 @@
|
||||
" verbosity=logging.INFO,\n",
|
||||
" max_cores_per_iteration=-1,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" **time_series_settings)"
|
||||
" **time_series_settings,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -355,8 +387,8 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Retrieve the best model\n",
|
||||
"Below we select the best model from all the training iterations using get_output method."
|
||||
"### Retrieve the Best Run details\n",
|
||||
"Below we retrieve the best Run object from among all the runs in the experiment."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -365,8 +397,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"best_run, fitted_model = remote_run.get_output()\n",
|
||||
"fitted_model.steps"
|
||||
"best_run = remote_run.get_best_child()\n",
|
||||
"best_run"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -404,14 +436,17 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from run_forecast import run_remote_inference\n",
|
||||
"remote_run = run_remote_inference(test_experiment=test_experiment, \n",
|
||||
"\n",
|
||||
"remote_run = run_remote_inference(\n",
|
||||
" test_experiment=test_experiment,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" train_run=best_run,\n",
|
||||
" test_dataset=test_dataset,\n",
|
||||
" target_column_name=TARGET_COLNAME)\n",
|
||||
" target_column_name=TARGET_COLNAME,\n",
|
||||
")\n",
|
||||
"remote_run.wait_for_completion(show_output=False)\n",
|
||||
"\n",
|
||||
"remote_run.download_file('outputs/predictions.csv', f'{output_dir}/predictions.csv')"
|
||||
"remote_run.download_file(\"outputs/predictions.csv\", f\"{output_dir}/predictions.csv\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -428,7 +463,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"X_trans = pd.read_csv(f'{output_dir}/predictions.csv', parse_dates=[TIME_COLNAME])\n",
|
||||
"X_trans = pd.read_csv(f\"{output_dir}/predictions.csv\", parse_dates=[TIME_COLNAME])\n",
|
||||
"X_trans.head()"
|
||||
]
|
||||
},
|
||||
@@ -442,13 +477,13 @@
|
||||
"def convert_fcst_diff_to_levels(fcst, yt, df_orig):\n",
|
||||
" \"\"\"Convert forecast from first differences to levels.\"\"\"\n",
|
||||
" fcst = fcst.reset_index(drop=False, inplace=False)\n",
|
||||
" fcst['predicted_level'] = fcst['predicted'].cumsum()\n",
|
||||
" fcst['predicted_level'] = fcst['predicted_level'].astype(float) + float(yt)\n",
|
||||
" fcst[\"predicted_level\"] = fcst[\"predicted\"].cumsum()\n",
|
||||
" fcst[\"predicted_level\"] = fcst[\"predicted_level\"].astype(float) + float(yt)\n",
|
||||
" # merge actuals\n",
|
||||
" out = pd.merge(fcst,\n",
|
||||
" df_orig[[TIME_COLNAME, TARGET_COLNAME]], \n",
|
||||
" on=[TIME_COLNAME], how='inner')\n",
|
||||
" out.rename(columns={TARGET_COLNAME: 'actual_level'}, inplace=True)\n",
|
||||
" out = pd.merge(\n",
|
||||
" fcst, df_orig[[TIME_COLNAME, TARGET_COLNAME]], on=[TIME_COLNAME], how=\"inner\"\n",
|
||||
" )\n",
|
||||
" out.rename(columns={TARGET_COLNAME: \"actual_level\"}, inplace=True)\n",
|
||||
" return out"
|
||||
]
|
||||
},
|
||||
@@ -461,13 +496,13 @@
|
||||
"if DIFFERENCE_SERIES:\n",
|
||||
" # convert forecast in differences to the levels\n",
|
||||
" INFORMATION_SET_DATE = max(df_train[TIME_COLNAME])\n",
|
||||
" YT = df.query('{} == @INFORMATION_SET_DATE'.format(TIME_COLNAME))[TARGET_COLNAME]\n",
|
||||
" YT = df.query(\"{} == @INFORMATION_SET_DATE\".format(TIME_COLNAME))[TARGET_COLNAME]\n",
|
||||
"\n",
|
||||
" fcst_df = convert_fcst_diff_to_levels(fcst=X_trans, yt=YT, df_orig=df)\n",
|
||||
"else:\n",
|
||||
" fcst_df = X_trans.copy()\n",
|
||||
" fcst_df['actual_level'] = y_test\n",
|
||||
" fcst_df['predicted_level'] = y_predictions\n",
|
||||
" fcst_df[\"actual_level\"] = y_test\n",
|
||||
" fcst_df[\"predicted_level\"] = y_predictions\n",
|
||||
"\n",
|
||||
"del X_trans"
|
||||
]
|
||||
@@ -486,13 +521,11 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# compute metrics\n",
|
||||
"metrics_df = compute_metrics(fcst_df=fcst_df,\n",
|
||||
" metric_name=None,\n",
|
||||
" ts_id_colnames=None)\n",
|
||||
"metrics_df = compute_metrics(fcst_df=fcst_df, metric_name=None, ts_id_colnames=None)\n",
|
||||
"# save output\n",
|
||||
"metrics_file_name = '{}_metrics.csv'.format(experiment_name)\n",
|
||||
"fcst_file_name = '{}_forecst.csv'.format(experiment_name)\n",
|
||||
"plot_file_name = '{}_plot.pdf'.format(experiment_name)\n",
|
||||
"metrics_file_name = \"{}_metrics.csv\".format(experiment_name)\n",
|
||||
"fcst_file_name = \"{}_forecst.csv\".format(experiment_name)\n",
|
||||
"plot_file_name = \"{}_plot.pdf\".format(experiment_name)\n",
|
||||
"\n",
|
||||
"metrics_df.to_csv(os.path.join(output_dir, metrics_file_name), index=True)\n",
|
||||
"fcst_df.to_csv(os.path.join(output_dir, fcst_file_name), index=True)"
|
||||
@@ -517,9 +550,9 @@
|
||||
"\n",
|
||||
"# generate and save plots\n",
|
||||
"fig, ax = plt.subplots(dpi=180)\n",
|
||||
"ax.plot(plot_df[TARGET_COLNAME], '-g', label='Historical')\n",
|
||||
"ax.plot(fcst_df['actual_level'], '-b', label='Actual')\n",
|
||||
"ax.plot(fcst_df['predicted_level'], '-r', label='Forecast')\n",
|
||||
"ax.plot(plot_df[TARGET_COLNAME], \"-g\", label=\"Historical\")\n",
|
||||
"ax.plot(fcst_df[\"actual_level\"], \"-b\", label=\"Actual\")\n",
|
||||
"ax.plot(fcst_df[\"predicted_level\"], \"-r\", label=\"Forecast\")\n",
|
||||
"ax.legend()\n",
|
||||
"ax.set_title(\"Forecast vs Actuals\")\n",
|
||||
"ax.set_xlabel(TIME_COLNAME)\n",
|
||||
|
||||
@@ -11,11 +11,14 @@ from sklearn.externals import joblib
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'--target_column_name', type=str, dest='target_column_name',
|
||||
help='Target Column Name')
|
||||
"--target_column_name",
|
||||
type=str,
|
||||
dest="target_column_name",
|
||||
help="Target Column Name",
|
||||
)
|
||||
parser.add_argument(
|
||||
'--test_dataset', type=str, dest='test_dataset',
|
||||
help='Test Dataset')
|
||||
"--test_dataset", type=str, dest="test_dataset", help="Test Dataset"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
target_column_name = args.target_column_name
|
||||
@@ -27,20 +30,41 @@ ws = run.experiment.workspace
|
||||
# get the input dataset by id
|
||||
test_dataset = Dataset.get_by_id(ws, id=test_dataset_id)
|
||||
|
||||
X_test_df = test_dataset.drop_columns(columns=[target_column_name]).to_pandas_dataframe().reset_index(drop=True)
|
||||
y_test_df = test_dataset.with_timestamp_columns(None).keep_columns(columns=[target_column_name]).to_pandas_dataframe()
|
||||
X_test = (
|
||||
test_dataset.drop_columns(columns=[target_column_name])
|
||||
.to_pandas_dataframe()
|
||||
.reset_index(drop=True)
|
||||
)
|
||||
y_test_df = (
|
||||
test_dataset.with_timestamp_columns(None)
|
||||
.keep_columns(columns=[target_column_name])
|
||||
.to_pandas_dataframe()
|
||||
)
|
||||
|
||||
# generate forecast
|
||||
fitted_model = joblib.load('model.pkl')
|
||||
y_pred, X_trans = fitted_model.forecast(X_test_df)
|
||||
fitted_model = joblib.load("model.pkl")
|
||||
# We have default quantiles values set as below(95th percentile)
|
||||
quantiles = [0.025, 0.5, 0.975]
|
||||
predicted_column_name = "predicted"
|
||||
PI = "prediction_interval"
|
||||
fitted_model.quantiles = quantiles
|
||||
pred_quantiles = fitted_model.forecast_quantiles(X_test)
|
||||
pred_quantiles[PI] = pred_quantiles[[min(quantiles), max(quantiles)]].apply(
|
||||
lambda x: "[{}, {}]".format(x[0], x[1]), axis=1
|
||||
)
|
||||
X_test[target_column_name] = y_test_df[target_column_name]
|
||||
X_test[PI] = pred_quantiles[PI]
|
||||
X_test[predicted_column_name] = pred_quantiles[0.5]
|
||||
# drop rows where prediction or actuals are nan
|
||||
# happens because of missing actuals
|
||||
# or at edges of time due to lags/rolling windows
|
||||
clean = X_test[
|
||||
X_test[[target_column_name, predicted_column_name]].notnull().all(axis=1)
|
||||
]
|
||||
clean.rename(columns={target_column_name: "actual"}, inplace=True)
|
||||
|
||||
# rename target column
|
||||
X_trans.reset_index(drop=False, inplace=True)
|
||||
X_trans.rename(columns={TimeSeriesInternal.DUMMY_TARGET_COLUMN: 'predicted'}, inplace=True)
|
||||
X_trans['actual'] = y_test_df[target_column_name].values
|
||||
|
||||
file_name = 'outputs/predictions.csv'
|
||||
export_csv = X_trans.to_csv(file_name, header=True, index=False) # added Index
|
||||
file_name = "outputs/predictions.csv"
|
||||
export_csv = clean.to_csv(file_name, header=True, index=False) # added Index
|
||||
|
||||
# Upload the predictions into artifacts
|
||||
run.upload_file(name=file_name, path_or_stream=file_name)
|
||||
|
||||
@@ -15,22 +15,25 @@ def adf_test(series, **kw):
|
||||
:param series: series to test
|
||||
:return: dictionary of results
|
||||
"""
|
||||
if 'lags' in kw.keys():
|
||||
msg = 'Lag order of {} detected. Running the ADF test...'.format(str(kw['lags']))
|
||||
if "lags" in kw.keys():
|
||||
msg = "Lag order of {} detected. Running the ADF test...".format(
|
||||
str(kw["lags"])
|
||||
)
|
||||
print(msg)
|
||||
statistic, pval, critval, resstore = stattools.adfuller(series,
|
||||
maxlag=kw['lags'],
|
||||
autolag=kw['autolag'],
|
||||
store=kw['store'])
|
||||
statistic, pval, critval, resstore = stattools.adfuller(
|
||||
series, maxlag=kw["lags"], autolag=kw["autolag"], store=kw["store"]
|
||||
)
|
||||
else:
|
||||
statistic, pval, critval, resstore = stattools.adfuller(series,
|
||||
autolag=kw['IC'],
|
||||
store=kw['store'])
|
||||
statistic, pval, critval, resstore = stattools.adfuller(
|
||||
series, autolag=kw["IC"], store=kw["store"]
|
||||
)
|
||||
|
||||
output = {'statistic': statistic,
|
||||
'pval': pval,
|
||||
'critical': critval,
|
||||
'resstore': resstore}
|
||||
output = {
|
||||
"statistic": statistic,
|
||||
"pval": pval,
|
||||
"critical": critval,
|
||||
"resstore": resstore,
|
||||
}
|
||||
return output
|
||||
|
||||
|
||||
@@ -41,22 +44,23 @@ def kpss_test(series, **kw):
|
||||
:param series: series to test
|
||||
:return: dictionary of results
|
||||
"""
|
||||
if kw['store']:
|
||||
statistic, p_value, critical_values, rstore = stattools.kpss(series,
|
||||
regression=kw['reg_type'],
|
||||
lags=kw['lags'],
|
||||
store=kw['store'])
|
||||
if kw["store"]:
|
||||
statistic, p_value, critical_values, rstore = stattools.kpss(
|
||||
series, regression=kw["reg_type"], nlags=kw["lags"], store=kw["store"]
|
||||
)
|
||||
else:
|
||||
statistic, p_value, lags, critical_values = stattools.kpss(series,
|
||||
regression=kw['reg_type'],
|
||||
lags=kw['lags'])
|
||||
output = {'statistic': statistic,
|
||||
'pval': p_value,
|
||||
'critical': critical_values,
|
||||
'lags': rstore.lags if kw['store'] else lags}
|
||||
statistic, p_value, lags, critical_values = stattools.kpss(
|
||||
series, regression=kw["reg_type"], nlags=kw["lags"]
|
||||
)
|
||||
output = {
|
||||
"statistic": statistic,
|
||||
"pval": p_value,
|
||||
"critical": critical_values,
|
||||
"lags": rstore.lags if kw["store"] else lags,
|
||||
}
|
||||
|
||||
if kw['store']:
|
||||
output.update({'resstore': rstore})
|
||||
if kw["store"]:
|
||||
output.update({"resstore": rstore})
|
||||
return output
|
||||
|
||||
|
||||
@@ -75,9 +79,9 @@ def format_test_output(test_name, test_res, H0_unit_root=True):
|
||||
If test failed (test_res is None), return empty dictionary.
|
||||
"""
|
||||
# Check if the test failed by trying to extract the test statistic
|
||||
if test_name in ('ADF', 'KPSS'):
|
||||
if test_name in ("ADF", "KPSS"):
|
||||
try:
|
||||
test_res['statistic']
|
||||
test_res["statistic"]
|
||||
except BaseException:
|
||||
test_res = None
|
||||
else:
|
||||
@@ -90,32 +94,32 @@ def format_test_output(test_name, test_res, H0_unit_root=True):
|
||||
return {}
|
||||
|
||||
# extract necessary information
|
||||
if test_name in ('ADF', 'KPSS'):
|
||||
statistic = test_res['statistic']
|
||||
crit_val = test_res['critical']['5%']
|
||||
p_val = test_res['pval']
|
||||
lags = test_res['resstore'].usedlag if test_name == 'ADF' else test_res['lags']
|
||||
if test_name in ("ADF", "KPSS"):
|
||||
statistic = test_res["statistic"]
|
||||
crit_val = test_res["critical"]["5%"]
|
||||
p_val = test_res["pval"]
|
||||
lags = test_res["resstore"].usedlag if test_name == "ADF" else test_res["lags"]
|
||||
else:
|
||||
statistic = test_res.stat
|
||||
crit_val = test_res.critical_values['5%']
|
||||
crit_val = test_res.critical_values["5%"]
|
||||
p_val = test_res.pvalue
|
||||
lags = test_res.lags
|
||||
|
||||
if H0_unit_root:
|
||||
H0 = 'The process is non-stationary'
|
||||
H0 = "The process is non-stationary"
|
||||
stationary = "yes" if p_val < 0.05 else "not"
|
||||
else:
|
||||
H0 = 'The process is stationary'
|
||||
H0 = "The process is stationary"
|
||||
stationary = "yes" if p_val > 0.05 else "not"
|
||||
|
||||
out = {
|
||||
'test_name': test_name,
|
||||
'statistic': statistic,
|
||||
'crit_val': crit_val,
|
||||
'p_val': p_val,
|
||||
'lags': int(lags),
|
||||
'stationary': stationary,
|
||||
'Null Hypothesis': H0
|
||||
"test_name": test_name,
|
||||
"statistic": statistic,
|
||||
"crit_val": crit_val,
|
||||
"p_val": p_val,
|
||||
"lags": int(lags),
|
||||
"stationary": stationary,
|
||||
"Null Hypothesis": H0,
|
||||
}
|
||||
return out
|
||||
|
||||
@@ -136,22 +140,15 @@ def unit_root_test_wrapper(series, lags=None):
|
||||
:return: dictionary of summary table for all tests and final decision on stationary vs nonstaionary
|
||||
"""
|
||||
# setting for ADF and KPSS tests
|
||||
adf_settings = {
|
||||
'IC': 'AIC',
|
||||
'store': True
|
||||
}
|
||||
adf_settings = {"IC": "AIC", "store": True}
|
||||
|
||||
kpss_settings = {
|
||||
'reg_type': 'c',
|
||||
'lags': 'auto',
|
||||
'store': True
|
||||
}
|
||||
kpss_settings = {"reg_type": "c", "lags": "auto", "store": True}
|
||||
|
||||
arch_test_settings = {} # settings for PP, ADF GLS and ZA tests
|
||||
if lags is not None:
|
||||
adf_settings.update({'lags': lags, 'autolag': None})
|
||||
kpss_settings.update({'lags:': lags})
|
||||
arch_test_settings = {'lags': lags}
|
||||
adf_settings.update({"lags": lags, "autolag": None})
|
||||
kpss_settings.update({"lags:": lags})
|
||||
arch_test_settings = {"lags": lags}
|
||||
# Run individual tests
|
||||
adf = adf_test(series, **adf_settings) # ADF test
|
||||
kpss = kpss_test(series, **kpss_settings) # KPSS test
|
||||
@@ -160,14 +157,26 @@ def unit_root_test_wrapper(series, lags=None):
|
||||
za = unitroot.ZivotAndrews(series, **arch_test_settings) # Zivot-Andrews test
|
||||
|
||||
# generate output table
|
||||
adf_dict = format_test_output(test_name='ADF', test_res=adf, H0_unit_root=True)
|
||||
kpss_dict = format_test_output(test_name='KPSS', test_res=kpss, H0_unit_root=False)
|
||||
pp_dict = format_test_output(test_name='Philips Perron', test_res=pp, H0_unit_root=True)
|
||||
adfgls_dict = format_test_output(test_name='ADF GLS', test_res=adfgls, H0_unit_root=True)
|
||||
za_dict = format_test_output(test_name='Zivot-Andrews', test_res=za, H0_unit_root=True)
|
||||
adf_dict = format_test_output(test_name="ADF", test_res=adf, H0_unit_root=True)
|
||||
kpss_dict = format_test_output(test_name="KPSS", test_res=kpss, H0_unit_root=False)
|
||||
pp_dict = format_test_output(
|
||||
test_name="Philips Perron", test_res=pp, H0_unit_root=True
|
||||
)
|
||||
adfgls_dict = format_test_output(
|
||||
test_name="ADF GLS", test_res=adfgls, H0_unit_root=True
|
||||
)
|
||||
za_dict = format_test_output(
|
||||
test_name="Zivot-Andrews", test_res=za, H0_unit_root=True
|
||||
)
|
||||
|
||||
test_dict = {'ADF': adf_dict, 'KPSS': kpss_dict, 'PP': pp_dict, 'ADF GLS': adfgls_dict, 'ZA': za_dict}
|
||||
test_sum = pd.DataFrame.from_dict(test_dict, orient='index').reset_index(drop=True)
|
||||
test_dict = {
|
||||
"ADF": adf_dict,
|
||||
"KPSS": kpss_dict,
|
||||
"PP": pp_dict,
|
||||
"ADF GLS": adfgls_dict,
|
||||
"ZA": za_dict,
|
||||
}
|
||||
test_sum = pd.DataFrame.from_dict(test_dict, orient="index").reset_index(drop=True)
|
||||
|
||||
# decision based on the majority rule
|
||||
if test_sum.shape[0] > 0:
|
||||
@@ -176,9 +185,9 @@ def unit_root_test_wrapper(series, lags=None):
|
||||
ratio = 1 # all tests fail, assume the series is stationary
|
||||
|
||||
# Majority rule. If the ratio is exactly 0.5, assume the series in non-stationary.
|
||||
stationary = 'YES' if (ratio > 0.5) else 'NO'
|
||||
stationary = "YES" if (ratio > 0.5) else "NO"
|
||||
|
||||
out = {'summary': test_sum, 'stationary': stationary}
|
||||
out = {"summary": test_sum, "stationary": stationary}
|
||||
return out
|
||||
|
||||
|
||||
@@ -196,10 +205,12 @@ def ts_train_test_split(df_input, n, time_colname, ts_id_colnames=None):
|
||||
ts_id_colnames = []
|
||||
ts_id_colnames_original = ts_id_colnames.copy()
|
||||
if len(ts_id_colnames) == 0:
|
||||
ts_id_colnames = ['Grain']
|
||||
df_input[ts_id_colnames[0]] = 'dummy'
|
||||
ts_id_colnames = ["Grain"]
|
||||
df_input[ts_id_colnames[0]] = "dummy"
|
||||
# Sort by ascending time
|
||||
df_grouped = (df_input.sort_values(time_colname).groupby(ts_id_colnames, group_keys=False))
|
||||
df_grouped = df_input.sort_values(time_colname).groupby(
|
||||
ts_id_colnames, group_keys=False
|
||||
)
|
||||
df_head = df_grouped.apply(lambda dfg: dfg.iloc[:-n])
|
||||
df_tail = df_grouped.apply(lambda dfg: dfg.iloc[-n:])
|
||||
# drop group column name if it was not originally provided
|
||||
@@ -221,30 +232,32 @@ def compute_metrics(fcst_df, metric_name=None, ts_id_colnames=None):
|
||||
if ts_id_colnames is None:
|
||||
ts_id_colnames = []
|
||||
if len(ts_id_colnames) == 0:
|
||||
ts_id_colnames = ['TS_ID']
|
||||
fcst_df[ts_id_colnames[0]] = 'dummy'
|
||||
ts_id_colnames = ["TS_ID"]
|
||||
fcst_df[ts_id_colnames[0]] = "dummy"
|
||||
metrics_list = []
|
||||
for grain, df in fcst_df.groupby(ts_id_colnames):
|
||||
try:
|
||||
scores = scoring.score_regression(
|
||||
y_test=df['actual_level'],
|
||||
y_pred=df['predicted_level'],
|
||||
metrics=list(constants.Metric.SCALAR_REGRESSION_SET))
|
||||
y_test=df["actual_level"],
|
||||
y_pred=df["predicted_level"],
|
||||
metrics=list(constants.Metric.SCALAR_REGRESSION_SET),
|
||||
)
|
||||
except BaseException:
|
||||
msg = '{}: metrics calculation failed.'.format(grain)
|
||||
msg = "{}: metrics calculation failed.".format(grain)
|
||||
print(msg)
|
||||
scores = {}
|
||||
one_grain_metrics_df = pd.DataFrame(list(scores.items()), columns=['metric_name', 'metric']).\
|
||||
sort_values(['metric_name'])
|
||||
one_grain_metrics_df = pd.DataFrame(
|
||||
list(scores.items()), columns=["metric_name", "metric"]
|
||||
).sort_values(["metric_name"])
|
||||
one_grain_metrics_df.reset_index(inplace=True, drop=True)
|
||||
if len(ts_id_colnames) < 2:
|
||||
one_grain_metrics_df['grain'] = ts_id_colnames[0]
|
||||
one_grain_metrics_df["grain"] = ts_id_colnames[0]
|
||||
else:
|
||||
one_grain_metrics_df['grain'] = "|".join(list(grain))
|
||||
one_grain_metrics_df["grain"] = "|".join(list(grain))
|
||||
|
||||
metrics_list.append(one_grain_metrics_df)
|
||||
# collect into a data frame
|
||||
grain_metrics = pd.concat(metrics_list)
|
||||
if metric_name is not None:
|
||||
grain_metrics = grain_metrics.query('metric_name == @metric_name')
|
||||
grain_metrics = grain_metrics.query("metric_name == @metric_name")
|
||||
return grain_metrics
|
||||
|
||||
@@ -3,36 +3,47 @@ import shutil
|
||||
from azureml.core import ScriptRunConfig
|
||||
|
||||
|
||||
def run_remote_inference(test_experiment, compute_target, train_run,
|
||||
test_dataset, target_column_name, inference_folder='./forecast'):
|
||||
def run_remote_inference(
|
||||
test_experiment,
|
||||
compute_target,
|
||||
train_run,
|
||||
test_dataset,
|
||||
target_column_name,
|
||||
inference_folder="./forecast",
|
||||
):
|
||||
# Create local directory to copy the model.pkl and forecsting_script.py files into.
|
||||
# These files will be uploaded to and executed on the compute instance.
|
||||
os.makedirs(inference_folder, exist_ok=True)
|
||||
shutil.copy('forecasting_script.py', inference_folder)
|
||||
shutil.copy("forecasting_script.py", inference_folder)
|
||||
|
||||
train_run.download_file('outputs/model.pkl',
|
||||
os.path.join(inference_folder, 'model.pkl'))
|
||||
train_run.download_file(
|
||||
"outputs/model.pkl", os.path.join(inference_folder, "model.pkl")
|
||||
)
|
||||
|
||||
inference_env = train_run.get_environment()
|
||||
|
||||
config = ScriptRunConfig(source_directory=inference_folder,
|
||||
script='forecasting_script.py',
|
||||
arguments=['--target_column_name',
|
||||
config = ScriptRunConfig(
|
||||
source_directory=inference_folder,
|
||||
script="forecasting_script.py",
|
||||
arguments=[
|
||||
"--target_column_name",
|
||||
target_column_name,
|
||||
'--test_dataset',
|
||||
test_dataset.as_named_input(test_dataset.name)],
|
||||
"--test_dataset",
|
||||
test_dataset.as_named_input(test_dataset.name),
|
||||
],
|
||||
compute_target=compute_target,
|
||||
environment=inference_env)
|
||||
environment=inference_env,
|
||||
)
|
||||
|
||||
run = test_experiment.submit(config,
|
||||
tags={'training_run_id':
|
||||
train_run.id,
|
||||
'run_algorithm':
|
||||
train_run.properties['run_algorithm'],
|
||||
'valid_score':
|
||||
train_run.properties['score'],
|
||||
'primary_metric':
|
||||
train_run.properties['primary_metric']})
|
||||
run = test_experiment.submit(
|
||||
config,
|
||||
tags={
|
||||
"training_run_id": train_run.id,
|
||||
"run_algorithm": train_run.properties["run_algorithm"],
|
||||
"valid_score": train_run.properties["score"],
|
||||
"primary_metric": train_run.properties["primary_metric"],
|
||||
},
|
||||
)
|
||||
|
||||
run.log("run_algorithm", run.tags['run_algorithm'])
|
||||
run.log("run_algorithm", run.tags["run_algorithm"])
|
||||
return run
|
||||
|
||||
@@ -1,21 +1,5 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -90,16 +74,6 @@
|
||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -109,18 +83,19 @@
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# choose a name for experiment\n",
|
||||
"experiment_name = 'automl-classification-ccard-local'\n",
|
||||
"experiment_name = \"automl-classification-ccard-local\"\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace'] = ws.name\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Experiment Name'] = experiment.name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||
"output[\"Workspace\"] = ws.name\n",
|
||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||
"output[\"Location\"] = ws.location\n",
|
||||
"output[\"Experiment Name\"] = experiment.name\n",
|
||||
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
@@ -142,7 +117,7 @@
|
||||
"data = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/creditcard.csv\"\n",
|
||||
"dataset = Dataset.Tabular.from_delimited_files(data)\n",
|
||||
"training_data, validation_data = dataset.random_split(percentage=0.8, seed=223)\n",
|
||||
"label_column_name = 'Class'"
|
||||
"label_column_name = \"Class\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -168,22 +143,25 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"name": "enable-ensemble"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"automl_settings = {\n",
|
||||
" \"n_cross_validations\": 3,\n",
|
||||
" \"primary_metric\": 'AUC_weighted',\n",
|
||||
" \"primary_metric\": \"average_precision_score_weighted\",\n",
|
||||
" \"experiment_timeout_hours\": 0.25, # This is a time limit for testing purposes, remove it for real use cases, this will drastically limit ability to find the best model possible\n",
|
||||
" \"verbosity\": logging.INFO,\n",
|
||||
" \"enable_stack_ensemble\": False\n",
|
||||
" \"enable_stack_ensemble\": False,\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task = 'classification',\n",
|
||||
" debug_log = 'automl_errors.log',\n",
|
||||
"automl_config = AutoMLConfig(\n",
|
||||
" task=\"classification\",\n",
|
||||
" debug_log=\"automl_errors.log\",\n",
|
||||
" training_data=training_data,\n",
|
||||
" label_column_name=label_column_name,\n",
|
||||
" **automl_settings\n",
|
||||
" **automl_settings,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -240,6 +218,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.widgets import RunDetails\n",
|
||||
"\n",
|
||||
"RunDetails(local_run).show()"
|
||||
]
|
||||
},
|
||||
@@ -288,8 +267,12 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# convert the test data to dataframe\n",
|
||||
"X_test_df = validation_data.drop_columns(columns=[label_column_name]).to_pandas_dataframe()\n",
|
||||
"y_test_df = validation_data.keep_columns(columns=[label_column_name], validate=True).to_pandas_dataframe()"
|
||||
"X_test_df = validation_data.drop_columns(\n",
|
||||
" columns=[label_column_name]\n",
|
||||
").to_pandas_dataframe()\n",
|
||||
"y_test_df = validation_data.keep_columns(\n",
|
||||
" columns=[label_column_name], validate=True\n",
|
||||
").to_pandas_dataframe()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -324,19 +307,25 @@
|
||||
"import itertools\n",
|
||||
"\n",
|
||||
"cf = confusion_matrix(y_test_df.values, y_pred)\n",
|
||||
"plt.imshow(cf,cmap=plt.cm.Blues,interpolation='nearest')\n",
|
||||
"plt.imshow(cf, cmap=plt.cm.Blues, interpolation=\"nearest\")\n",
|
||||
"plt.colorbar()\n",
|
||||
"plt.title('Confusion Matrix')\n",
|
||||
"plt.xlabel('Predicted')\n",
|
||||
"plt.ylabel('Actual')\n",
|
||||
"class_labels = ['False','True']\n",
|
||||
"plt.title(\"Confusion Matrix\")\n",
|
||||
"plt.xlabel(\"Predicted\")\n",
|
||||
"plt.ylabel(\"Actual\")\n",
|
||||
"class_labels = [\"False\", \"True\"]\n",
|
||||
"tick_marks = np.arange(len(class_labels))\n",
|
||||
"plt.xticks(tick_marks, class_labels)\n",
|
||||
"plt.yticks([-0.5,0,1,1.5],['','False','True',''])\n",
|
||||
"plt.yticks([-0.5, 0, 1, 1.5], [\"\", \"False\", \"True\", \"\"])\n",
|
||||
"# plotting text value inside cells\n",
|
||||
"thresh = cf.max() / 2.\n",
|
||||
"thresh = cf.max() / 2.0\n",
|
||||
"for i, j in itertools.product(range(cf.shape[0]), range(cf.shape[1])):\n",
|
||||
" plt.text(j,i,format(cf[i,j],'d'),horizontalalignment='center',color='white' if cf[i,j] >thresh else 'black')\n",
|
||||
" plt.text(\n",
|
||||
" j,\n",
|
||||
" i,\n",
|
||||
" format(cf[i, j], \"d\"),\n",
|
||||
" horizontalalignment=\"center\",\n",
|
||||
" color=\"white\" if cf[i, j] > thresh else \"black\",\n",
|
||||
" )\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
@@ -363,7 +352,10 @@
|
||||
"client = ExplanationClient.from_run(best_run)\n",
|
||||
"engineered_explanations = client.download_model_explanation(raw=False)\n",
|
||||
"print(engineered_explanations.get_feature_importance_dict())\n",
|
||||
"print(\"You can visualize the engineered explanations under the 'Explanations (preview)' tab in the AutoML run at:-\\n\" + best_run.get_portal_url())"
|
||||
"print(\n",
|
||||
" \"You can visualize the engineered explanations under the 'Explanations (preview)' tab in the AutoML run at:-\\n\"\n",
|
||||
" + best_run.get_portal_url()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -382,7 +374,10 @@
|
||||
"source": [
|
||||
"raw_explanations = client.download_model_explanation(raw=True)\n",
|
||||
"print(raw_explanations.get_feature_importance_dict())\n",
|
||||
"print(\"You can visualize the raw explanations under the 'Explanations (preview)' tab in the AutoML run at:-\\n\" + best_run.get_portal_url())"
|
||||
"print(\n",
|
||||
" \"You can visualize the raw explanations under the 'Explanations (preview)' tab in the AutoML run at:-\\n\"\n",
|
||||
" + best_run.get_portal_url()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -398,7 +393,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"automl_run, fitted_model = local_run.get_output(metric='accuracy')"
|
||||
"automl_run, fitted_model = local_run.get_output(metric=\"accuracy\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -432,12 +427,18 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.train.automl.runtime.automl_explain_utilities import automl_setup_model_explanations\n",
|
||||
"from azureml.train.automl.runtime.automl_explain_utilities import (\n",
|
||||
" automl_setup_model_explanations,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"automl_explainer_setup_obj = automl_setup_model_explanations(fitted_model, X=X_train, \n",
|
||||
" X_test=X_test, y=y_train, \n",
|
||||
" task='classification',\n",
|
||||
" automl_run=automl_run)"
|
||||
"automl_explainer_setup_obj = automl_setup_model_explanations(\n",
|
||||
" fitted_model,\n",
|
||||
" X=X_train,\n",
|
||||
" X_test=X_test,\n",
|
||||
" y=y_train,\n",
|
||||
" task=\"classification\",\n",
|
||||
" automl_run=automl_run,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -455,13 +456,18 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.interpret.mimic_wrapper import MimicWrapper\n",
|
||||
"explainer = MimicWrapper(ws, automl_explainer_setup_obj.automl_estimator,\n",
|
||||
"\n",
|
||||
"explainer = MimicWrapper(\n",
|
||||
" ws,\n",
|
||||
" automl_explainer_setup_obj.automl_estimator,\n",
|
||||
" explainable_model=automl_explainer_setup_obj.surrogate_model,\n",
|
||||
" init_dataset=automl_explainer_setup_obj.X_transform, run=automl_explainer_setup_obj.automl_run,\n",
|
||||
" init_dataset=automl_explainer_setup_obj.X_transform,\n",
|
||||
" run=automl_explainer_setup_obj.automl_run,\n",
|
||||
" features=automl_explainer_setup_obj.engineered_feature_names,\n",
|
||||
" feature_maps=[automl_explainer_setup_obj.feature_map],\n",
|
||||
" classes=automl_explainer_setup_obj.classes,\n",
|
||||
" explainer_kwargs=automl_explainer_setup_obj.surrogate_model_params)"
|
||||
" explainer_kwargs=automl_explainer_setup_obj.surrogate_model_params,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -479,9 +485,14 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Compute the engineered explanations\n",
|
||||
"engineered_explanations = explainer.explain(['local', 'global'], eval_dataset=automl_explainer_setup_obj.X_test_transform)\n",
|
||||
"engineered_explanations = explainer.explain(\n",
|
||||
" [\"local\", \"global\"], eval_dataset=automl_explainer_setup_obj.X_test_transform\n",
|
||||
")\n",
|
||||
"print(engineered_explanations.get_feature_importance_dict())\n",
|
||||
"print(\"You can visualize the engineered explanations under the 'Explanations (preview)' tab in the AutoML run at:-\\n\" + automl_run.get_portal_url())"
|
||||
"print(\n",
|
||||
" \"You can visualize the engineered explanations under the 'Explanations (preview)' tab in the AutoML run at:-\\n\"\n",
|
||||
" + automl_run.get_portal_url()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -499,12 +510,18 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Compute the raw explanations\n",
|
||||
"raw_explanations = explainer.explain(['local', 'global'], get_raw=True,\n",
|
||||
"raw_explanations = explainer.explain(\n",
|
||||
" [\"local\", \"global\"],\n",
|
||||
" get_raw=True,\n",
|
||||
" raw_feature_names=automl_explainer_setup_obj.raw_feature_names,\n",
|
||||
" eval_dataset=automl_explainer_setup_obj.X_test_transform,\n",
|
||||
" raw_eval_dataset=automl_explainer_setup_obj.X_test_raw)\n",
|
||||
" raw_eval_dataset=automl_explainer_setup_obj.X_test_raw,\n",
|
||||
")\n",
|
||||
"print(raw_explanations.get_feature_importance_dict())\n",
|
||||
"print(\"You can visualize the raw explanations under the 'Explanations (preview)' tab in the AutoML run at:-\\n\" + automl_run.get_portal_url())"
|
||||
"print(\n",
|
||||
" \"You can visualize the raw explanations under the 'Explanations (preview)' tab in the AutoML run at:-\\n\"\n",
|
||||
" + automl_run.get_portal_url()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -524,15 +541,17 @@
|
||||
"import joblib\n",
|
||||
"\n",
|
||||
"# Initialize the ScoringExplainer\n",
|
||||
"scoring_explainer = TreeScoringExplainer(explainer.explainer, feature_maps=[automl_explainer_setup_obj.feature_map])\n",
|
||||
"scoring_explainer = TreeScoringExplainer(\n",
|
||||
" explainer.explainer, feature_maps=[automl_explainer_setup_obj.feature_map]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Pickle scoring explainer locally to './scoring_explainer.pkl'\n",
|
||||
"scoring_explainer_file_name = 'scoring_explainer.pkl'\n",
|
||||
"with open(scoring_explainer_file_name, 'wb') as stream:\n",
|
||||
"scoring_explainer_file_name = \"scoring_explainer.pkl\"\n",
|
||||
"with open(scoring_explainer_file_name, \"wb\") as stream:\n",
|
||||
" joblib.dump(scoring_explainer, stream)\n",
|
||||
"\n",
|
||||
"# Upload the scoring explainer to the automl run\n",
|
||||
"automl_run.upload_file('outputs/scoring_explainer.pkl', scoring_explainer_file_name)"
|
||||
"automl_run.upload_file(\"outputs/scoring_explainer.pkl\", scoring_explainer_file_name)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -551,10 +570,12 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Register trained automl model present in the 'outputs' folder in the artifacts\n",
|
||||
"original_model = automl_run.register_model(model_name='automl_model', \n",
|
||||
" model_path='outputs/model.pkl')\n",
|
||||
"scoring_explainer_model = automl_run.register_model(model_name='scoring_explainer',\n",
|
||||
" model_path='outputs/scoring_explainer.pkl')"
|
||||
"original_model = automl_run.register_model(\n",
|
||||
" model_name=\"automl_model\", model_path=\"outputs/model.pkl\"\n",
|
||||
")\n",
|
||||
"scoring_explainer_model = automl_run.register_model(\n",
|
||||
" model_name=\"scoring_explainer\", model_path=\"outputs/scoring_explainer.pkl\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -575,7 +596,7 @@
|
||||
"from azureml.automl.core.shared import constants\n",
|
||||
"from azureml.core.environment import Environment\n",
|
||||
"\n",
|
||||
"automl_run.download_file(constants.CONDA_ENV_FILE_PATH, 'myenv.yml')\n",
|
||||
"automl_run.download_file(constants.CONDA_ENV_FILE_PATH, \"myenv.yml\")\n",
|
||||
"myenv = Environment.from_conda_specification(name=\"myenv\", file_path=\"myenv.yml\")\n",
|
||||
"myenv"
|
||||
]
|
||||
@@ -598,7 +619,9 @@
|
||||
"import joblib\n",
|
||||
"import pandas as pd\n",
|
||||
"from azureml.core.model import Model\n",
|
||||
"from azureml.train.automl.runtime.automl_explain_utilities import automl_setup_model_explanations\n",
|
||||
"from azureml.train.automl.runtime.automl_explain_utilities import (\n",
|
||||
" automl_setup_model_explanations,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def init():\n",
|
||||
@@ -607,28 +630,35 @@
|
||||
"\n",
|
||||
" # Retrieve the path to the model file using the model name\n",
|
||||
" # Assume original model is named original_prediction_model\n",
|
||||
" automl_model_path = Model.get_model_path('automl_model')\n",
|
||||
" scoring_explainer_path = Model.get_model_path('scoring_explainer')\n",
|
||||
" automl_model_path = Model.get_model_path(\"automl_model\")\n",
|
||||
" scoring_explainer_path = Model.get_model_path(\"scoring_explainer\")\n",
|
||||
"\n",
|
||||
" automl_model = joblib.load(automl_model_path)\n",
|
||||
" scoring_explainer = joblib.load(scoring_explainer_path)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def run(raw_data):\n",
|
||||
" data = pd.read_json(raw_data, orient='records') \n",
|
||||
" data = pd.read_json(raw_data, orient=\"records\")\n",
|
||||
" # Make prediction\n",
|
||||
" predictions = automl_model.predict(data)\n",
|
||||
" # Setup for inferencing explanations\n",
|
||||
" automl_explainer_setup_obj = automl_setup_model_explanations(automl_model,\n",
|
||||
" X_test=data, task='classification')\n",
|
||||
" automl_explainer_setup_obj = automl_setup_model_explanations(\n",
|
||||
" automl_model, X_test=data, task=\"classification\"\n",
|
||||
" )\n",
|
||||
" # Retrieve model explanations for engineered explanations\n",
|
||||
" engineered_local_importance_values = scoring_explainer.explain(automl_explainer_setup_obj.X_test_transform)\n",
|
||||
" engineered_local_importance_values = scoring_explainer.explain(\n",
|
||||
" automl_explainer_setup_obj.X_test_transform\n",
|
||||
" )\n",
|
||||
" # Retrieve model explanations for raw explanations\n",
|
||||
" raw_local_importance_values = scoring_explainer.explain(automl_explainer_setup_obj.X_test_transform, get_raw=True)\n",
|
||||
" raw_local_importance_values = scoring_explainer.explain(\n",
|
||||
" automl_explainer_setup_obj.X_test_transform, get_raw=True\n",
|
||||
" )\n",
|
||||
" # You can return any data type as long as it is JSON-serializable\n",
|
||||
" return {'predictions': predictions.tolist(),\n",
|
||||
" 'engineered_local_importance_values': engineered_local_importance_values,\n",
|
||||
" 'raw_local_importance_values': raw_local_importance_values}\n"
|
||||
" return {\n",
|
||||
" \"predictions\": predictions.tolist(),\n",
|
||||
" \"engineered_local_importance_values\": engineered_local_importance_values,\n",
|
||||
" \"raw_local_importance_values\": raw_local_importance_values,\n",
|
||||
" }"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -647,7 +677,7 @@
|
||||
"source": [
|
||||
"from azureml.core.model import InferenceConfig\n",
|
||||
"\n",
|
||||
"inf_config = InferenceConfig(entry_script='score.py', environment=myenv)"
|
||||
"inf_config = InferenceConfig(entry_script=\"score.py\", environment=myenv)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -668,17 +698,17 @@
|
||||
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||
"\n",
|
||||
"# Choose a name for your cluster.\n",
|
||||
"aks_name = 'scoring-explain'\n",
|
||||
"aks_name = \"scoring-explain\"\n",
|
||||
"\n",
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" aks_target = ComputeTarget(workspace=ws, name=aks_name)\n",
|
||||
" print('Found existing cluster, use it.')\n",
|
||||
" print(\"Found existing cluster, use it.\")\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" prov_config = AksCompute.provisioning_configuration(vm_size='STANDARD_D3_V2')\n",
|
||||
" aks_target = ComputeTarget.create(workspace=ws, \n",
|
||||
" name=aks_name,\n",
|
||||
" provisioning_configuration=prov_config)\n",
|
||||
" prov_config = AksCompute.provisioning_configuration(vm_size=\"STANDARD_D3_V2\")\n",
|
||||
" aks_target = ComputeTarget.create(\n",
|
||||
" workspace=ws, name=aks_name, provisioning_configuration=prov_config\n",
|
||||
" )\n",
|
||||
"aks_target.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
@@ -708,14 +738,16 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"aks_service_name ='model-scoring-local-aks'\n",
|
||||
"aks_service_name = \"model-scoring-local-aks\"\n",
|
||||
"\n",
|
||||
"aks_service = Model.deploy(workspace=ws,\n",
|
||||
"aks_service = Model.deploy(\n",
|
||||
" workspace=ws,\n",
|
||||
" name=aks_service_name,\n",
|
||||
" models=[scoring_explainer_model, original_model],\n",
|
||||
" inference_config=inf_config,\n",
|
||||
" deployment_config=aks_config,\n",
|
||||
" deployment_target=aks_target)\n",
|
||||
" deployment_target=aks_target,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"aks_service.wait_for_deployment(show_output=True)\n",
|
||||
"print(aks_service.state)"
|
||||
@@ -752,18 +784,24 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Serialize the first row of the test data into json\n",
|
||||
"X_test_json = X_test_df[:1].to_json(orient='records')\n",
|
||||
"X_test_json = X_test_df[:1].to_json(orient=\"records\")\n",
|
||||
"print(X_test_json)\n",
|
||||
"\n",
|
||||
"# Call the service to get the predictions and the engineered and raw explanations\n",
|
||||
"output = aks_service.run(X_test_json)\n",
|
||||
"\n",
|
||||
"# Print the predicted value\n",
|
||||
"print('predictions:\\n{}\\n'.format(output['predictions']))\n",
|
||||
"print(\"predictions:\\n{}\\n\".format(output[\"predictions\"]))\n",
|
||||
"# Print the engineered feature importances for the predicted value\n",
|
||||
"print('engineered_local_importance_values:\\n{}\\n'.format(output['engineered_local_importance_values']))\n",
|
||||
"print(\n",
|
||||
" \"engineered_local_importance_values:\\n{}\\n\".format(\n",
|
||||
" output[\"engineered_local_importance_values\"]\n",
|
||||
" )\n",
|
||||
")\n",
|
||||
"# Print the raw feature importances for the predicted value\n",
|
||||
"print('raw_local_importance_values:\\n{}\\n'.format(output['raw_local_importance_values']))\n"
|
||||
"print(\n",
|
||||
" \"raw_local_importance_values:\\n{}\\n\".format(output[\"raw_local_importance_values\"])\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -1,21 +1,5 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -68,6 +52,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"import logging\n",
|
||||
"\n",
|
||||
"from matplotlib import pyplot as plt\n",
|
||||
@@ -77,6 +62,7 @@
|
||||
"import azureml.core\n",
|
||||
"from azureml.core.experiment import Experiment\n",
|
||||
"from azureml.core.workspace import Workspace\n",
|
||||
"\n",
|
||||
"from azureml.automl.core.featurization import FeaturizationConfig\n",
|
||||
"from azureml.train.automl import AutoMLConfig\n",
|
||||
"from azureml.core.dataset import Dataset"
|
||||
@@ -89,16 +75,6 @@
|
||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -108,17 +84,18 @@
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# Choose a name for the experiment.\n",
|
||||
"experiment_name = 'automl-regression-hardware-explain'\n",
|
||||
"experiment_name = \"automl-regression-hardware-explain\"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace Name'] = ws.name\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Experiment Name'] = experiment.name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||
"output[\"Workspace Name\"] = ws.name\n",
|
||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||
"output[\"Location\"] = ws.location\n",
|
||||
"output[\"Experiment Name\"] = experiment.name\n",
|
||||
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
@@ -151,12 +128,12 @@
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
||||
" print('Found existing cluster, use it.')\n",
|
||||
" print(\"Found existing cluster, use it.\")\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
|
||||
" max_nodes=4)\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||
" vm_size=\"STANDARD_DS12_V2\", max_nodes=4\n",
|
||||
" )\n",
|
||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
"compute_target.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
@@ -175,7 +152,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data = 'https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/machineData.csv'\n",
|
||||
"data = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/machineData.csv\"\n",
|
||||
"\n",
|
||||
"dataset = Dataset.Tabular.from_delimited_files(data)\n",
|
||||
"\n",
|
||||
@@ -184,12 +161,20 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"# Register the train dataset with your workspace\n",
|
||||
"train_data.register(workspace = ws, name = 'machineData_train_dataset',\n",
|
||||
" description = 'hardware performance training data',\n",
|
||||
" create_new_version=True)\n",
|
||||
"train_data.register(\n",
|
||||
" workspace=ws,\n",
|
||||
" name=\"machineData_train_dataset\",\n",
|
||||
" description=\"hardware performance training data\",\n",
|
||||
" create_new_version=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Register the test dataset with your workspace\n",
|
||||
"test_data.register(workspace = ws, name = 'machineData_test_dataset', description = 'hardware performance test data', create_new_version=True)\n",
|
||||
"test_data.register(\n",
|
||||
" workspace=ws,\n",
|
||||
" name=\"machineData_test_dataset\",\n",
|
||||
" description=\"hardware performance test data\",\n",
|
||||
" create_new_version=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"label = \"ERP\"\n",
|
||||
"\n",
|
||||
@@ -248,14 +233,18 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"featurization_config = FeaturizationConfig()\n",
|
||||
"featurization_config.blocked_transformers = ['LabelEncoder']\n",
|
||||
"featurization_config.blocked_transformers = [\"LabelEncoder\"]\n",
|
||||
"# featurization_config.drop_columns = ['MMIN']\n",
|
||||
"featurization_config.add_column_purpose('MYCT', 'Numeric')\n",
|
||||
"featurization_config.add_column_purpose('VendorName', 'CategoricalHash')\n",
|
||||
"featurization_config.add_column_purpose(\"MYCT\", \"Numeric\")\n",
|
||||
"featurization_config.add_column_purpose(\"VendorName\", \"CategoricalHash\")\n",
|
||||
"# default strategy mean, add transformer param for for 3 columns\n",
|
||||
"featurization_config.add_transformer_params('Imputer', ['CACH'], {\"strategy\": \"median\"})\n",
|
||||
"featurization_config.add_transformer_params('Imputer', ['CHMIN'], {\"strategy\": \"median\"})\n",
|
||||
"featurization_config.add_transformer_params('Imputer', ['PRP'], {\"strategy\": \"most_frequent\"})\n",
|
||||
"featurization_config.add_transformer_params(\"Imputer\", [\"CACH\"], {\"strategy\": \"median\"})\n",
|
||||
"featurization_config.add_transformer_params(\n",
|
||||
" \"Imputer\", [\"CHMIN\"], {\"strategy\": \"median\"}\n",
|
||||
")\n",
|
||||
"featurization_config.add_transformer_params(\n",
|
||||
" \"Imputer\", [\"PRP\"], {\"strategy\": \"most_frequent\"}\n",
|
||||
")\n",
|
||||
"# featurization_config.add_transformer_params('HashOneHotEncoder', [], {\"number_of_bits\": 3})"
|
||||
]
|
||||
},
|
||||
@@ -275,17 +264,18 @@
|
||||
" \"max_concurrent_iterations\": 4,\n",
|
||||
" \"max_cores_per_iteration\": -1,\n",
|
||||
" \"n_cross_validations\": 5,\n",
|
||||
" \"primary_metric\": 'normalized_root_mean_squared_error',\n",
|
||||
" \"verbosity\": logging.INFO\n",
|
||||
" \"primary_metric\": \"normalized_root_mean_squared_error\",\n",
|
||||
" \"verbosity\": logging.INFO,\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task = 'regression',\n",
|
||||
" debug_log = 'automl_errors.log',\n",
|
||||
"automl_config = AutoMLConfig(\n",
|
||||
" task=\"regression\",\n",
|
||||
" debug_log=\"automl_errors.log\",\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" featurization=featurization_config,\n",
|
||||
" training_data=train_data,\n",
|
||||
" label_column_name=label,\n",
|
||||
" **automl_settings\n",
|
||||
" **automl_settings,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -339,16 +329,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"best_run, fitted_model = remote_run.get_output()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"best_run_customized, fitted_model_customized = remote_run.get_output()"
|
||||
"# Retrieve the best Run object\n",
|
||||
"best_run = remote_run.get_best_child()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -357,7 +339,7 @@
|
||||
"source": [
|
||||
"## Transparency\n",
|
||||
"\n",
|
||||
"View updated featurization summary"
|
||||
"View featurization summary for the best model - to study how different features were transformed. This is stored as a JSON file in the outputs directory for the run."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -366,41 +348,16 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"custom_featurizer = fitted_model_customized.named_steps['datatransformer']"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"custom_featurizer.get_featurization_summary()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"is_user_friendly=False allows for more detailed summary for transforms being applied"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"custom_featurizer.get_featurization_summary(is_user_friendly=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"custom_featurizer.get_stats_feature_type_summary()"
|
||||
"# Download the featurization summary JSON file locally\n",
|
||||
"best_run.download_file(\n",
|
||||
" \"outputs/featurization_summary.json\", \"featurization_summary.json\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Render the JSON as a pandas DataFrame\n",
|
||||
"with open(\"featurization_summary.json\", \"r\") as f:\n",
|
||||
" records = json.load(f)\n",
|
||||
"\n",
|
||||
"pd.DataFrame.from_records(records)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -428,6 +385,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.widgets import RunDetails\n",
|
||||
"\n",
|
||||
"RunDetails(remote_run).show()"
|
||||
]
|
||||
},
|
||||
@@ -475,7 +433,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"with open('train_explainer.py', 'r') as cefr:\n",
|
||||
"with open(\"train_explainer.py\", \"r\") as cefr:\n",
|
||||
" print(cefr.read())"
|
||||
]
|
||||
},
|
||||
@@ -497,32 +455,36 @@
|
||||
"import os\n",
|
||||
"\n",
|
||||
"# create script folder\n",
|
||||
"script_folder = './sample_projects/automl-regression-hardware'\n",
|
||||
"script_folder = \"./sample_projects/automl-regression-hardware\"\n",
|
||||
"if not os.path.exists(script_folder):\n",
|
||||
" os.makedirs(script_folder)\n",
|
||||
"\n",
|
||||
"# Copy the sample script to script folder.\n",
|
||||
"shutil.copy('train_explainer.py', script_folder)\n",
|
||||
"shutil.copy(\"train_explainer.py\", script_folder)\n",
|
||||
"\n",
|
||||
"# Create the explainer script that will run on the remote compute.\n",
|
||||
"script_file_name = script_folder + '/train_explainer.py'\n",
|
||||
"script_file_name = script_folder + \"/train_explainer.py\"\n",
|
||||
"\n",
|
||||
"# Open the sample script for modification\n",
|
||||
"with open(script_file_name, 'r') as cefr:\n",
|
||||
"with open(script_file_name, \"r\") as cefr:\n",
|
||||
" content = cefr.read()\n",
|
||||
"\n",
|
||||
"# Replace the values in train_explainer.py file with the appropriate values\n",
|
||||
"content = content.replace('<<experiment_name>>', automl_run.experiment.name) # your experiment name.\n",
|
||||
"content = content.replace('<<run_id>>', automl_run.id) # Run-id of the AutoML run for which you want to explain the model.\n",
|
||||
"content = content.replace('<<target_column_name>>', 'ERP') # Your target column name\n",
|
||||
"content = content.replace('<<task>>', 'regression') # Training task type\n",
|
||||
"content = content.replace(\n",
|
||||
" \"<<experiment_name>>\", automl_run.experiment.name\n",
|
||||
") # your experiment name.\n",
|
||||
"content = content.replace(\n",
|
||||
" \"<<run_id>>\", automl_run.id\n",
|
||||
") # Run-id of the AutoML run for which you want to explain the model.\n",
|
||||
"content = content.replace(\"<<target_column_name>>\", \"ERP\") # Your target column name\n",
|
||||
"content = content.replace(\"<<task>>\", \"regression\") # Training task type\n",
|
||||
"# Name of your training dataset register with your workspace\n",
|
||||
"content = content.replace('<<train_dataset_name>>', 'machineData_train_dataset') \n",
|
||||
"content = content.replace(\"<<train_dataset_name>>\", \"machineData_train_dataset\")\n",
|
||||
"# Name of your test dataset register with your workspace\n",
|
||||
"content = content.replace('<<test_dataset_name>>', 'machineData_test_dataset')\n",
|
||||
"content = content.replace(\"<<test_dataset_name>>\", \"machineData_test_dataset\")\n",
|
||||
"\n",
|
||||
"# Write sample file into your script folder.\n",
|
||||
"with open(script_file_name, 'w') as cefw:\n",
|
||||
"with open(script_file_name, \"w\") as cefw:\n",
|
||||
" cefw.write(content)"
|
||||
]
|
||||
},
|
||||
@@ -540,6 +502,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.runconfig import RunConfiguration\n",
|
||||
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||
"import pkg_resources\n",
|
||||
"\n",
|
||||
"# create a new RunConfig object\n",
|
||||
"conda_run_config = RunConfiguration(framework=\"python\")\n",
|
||||
@@ -549,7 +513,7 @@
|
||||
"conda_run_config.environment.docker.enabled = True\n",
|
||||
"\n",
|
||||
"# specify CondaDependencies obj\n",
|
||||
"conda_run_config.environment.python.conda_dependencies = automl_run.get_environment().python.conda_dependencies"
|
||||
"conda_run_config.environment = automl_run.get_environment()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -569,9 +533,11 @@
|
||||
"# Now submit a run on AmlCompute for model explanations\n",
|
||||
"from azureml.core.script_run_config import ScriptRunConfig\n",
|
||||
"\n",
|
||||
"script_run_config = ScriptRunConfig(source_directory=script_folder,\n",
|
||||
" script='train_explainer.py',\n",
|
||||
" run_config=conda_run_config)\n",
|
||||
"script_run_config = ScriptRunConfig(\n",
|
||||
" source_directory=script_folder,\n",
|
||||
" script=\"train_explainer.py\",\n",
|
||||
" run_config=conda_run_config,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"run = experiment.submit(script_run_config)\n",
|
||||
"\n",
|
||||
@@ -613,10 +579,16 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.interpret import ExplanationClient\n",
|
||||
"\n",
|
||||
"client = ExplanationClient.from_run(automl_run)\n",
|
||||
"engineered_explanations = client.download_model_explanation(raw=False, comment='engineered explanations')\n",
|
||||
"engineered_explanations = client.download_model_explanation(\n",
|
||||
" raw=False, comment=\"engineered explanations\"\n",
|
||||
")\n",
|
||||
"print(engineered_explanations.get_feature_importance_dict())\n",
|
||||
"print(\"You can visualize the engineered explanations under the 'Explanations (preview)' tab in the AutoML run at:-\\n\" + automl_run.get_portal_url())"
|
||||
"print(\n",
|
||||
" \"You can visualize the engineered explanations under the 'Explanations (preview)' tab in the AutoML run at:-\\n\"\n",
|
||||
" + automl_run.get_portal_url()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -633,9 +605,14 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"raw_explanations = client.download_model_explanation(raw=True, comment='raw explanations')\n",
|
||||
"raw_explanations = client.download_model_explanation(\n",
|
||||
" raw=True, comment=\"raw explanations\"\n",
|
||||
")\n",
|
||||
"print(raw_explanations.get_feature_importance_dict())\n",
|
||||
"print(\"You can visualize the raw explanations under the 'Explanations (preview)' tab in the AutoML run at:-\\n\" + automl_run.get_portal_url())"
|
||||
"print(\n",
|
||||
" \"You can visualize the raw explanations under the 'Explanations (preview)' tab in the AutoML run at:-\\n\"\n",
|
||||
" + automl_run.get_portal_url()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -657,33 +634,12 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Register trained automl model present in the 'outputs' folder in the artifacts\n",
|
||||
"original_model = automl_run.register_model(model_name='automl_model', \n",
|
||||
" model_path='outputs/model.pkl')\n",
|
||||
"scoring_explainer_model = automl_run.register_model(model_name='scoring_explainer',\n",
|
||||
" model_path='outputs/scoring_explainer.pkl')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create the conda dependencies for setting up the service\n",
|
||||
"We need to create the conda dependencies comprising of the *azureml* packages using the training environment from the *automl_run*."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"conda_dep = automl_run.get_environment().python.conda_dependencies\n",
|
||||
"\n",
|
||||
"with open(\"myenv.yml\",\"w\") as f:\n",
|
||||
" f.write(conda_dep.serialize_to_string())\n",
|
||||
"\n",
|
||||
"with open(\"myenv.yml\",\"r\") as f:\n",
|
||||
" print(f.read())"
|
||||
"original_model = automl_run.register_model(\n",
|
||||
" model_name=\"automl_model\", model_path=\"outputs/model.pkl\"\n",
|
||||
")\n",
|
||||
"scoring_explainer_model = automl_run.register_model(\n",
|
||||
" model_name=\"scoring_explainer\", model_path=\"outputs/scoring_explainer.pkl\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -708,7 +664,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Deploy the service\n",
|
||||
"In the cell below, we deploy the service using the conda file and the scoring file from the previous steps. "
|
||||
"In the cell below, we deploy the service using the automl training environment and the scoring file from the previous steps. "
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -717,22 +673,30 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.webservice import Webservice\n",
|
||||
"from azureml.core.model import InferenceConfig\n",
|
||||
"from azureml.core.webservice import AciWebservice\n",
|
||||
"from azureml.core.model import Model\n",
|
||||
"from azureml.core.environment import Environment\n",
|
||||
"\n",
|
||||
"aciconfig = AciWebservice.deploy_configuration(cpu_cores=2, \n",
|
||||
"aciconfig = AciWebservice.deploy_configuration(\n",
|
||||
" cpu_cores=2,\n",
|
||||
" memory_gb=2,\n",
|
||||
" tags={\"data\": \"Machine Data\", \n",
|
||||
" \"method\" : \"local_explanation\"}, \n",
|
||||
" description='Get local explanations for Machine test data')\n",
|
||||
" tags={\"data\": \"Machine Data\", \"method\": \"local_explanation\"},\n",
|
||||
" description=\"Get local explanations for Machine test data\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"myenv = Environment.from_conda_specification(name=\"myenv\", file_path=\"myenv.yml\")\n",
|
||||
"myenv = automl_run.get_environment()\n",
|
||||
"inference_config = InferenceConfig(entry_script=\"score_explain.py\", environment=myenv)\n",
|
||||
"\n",
|
||||
"# Use configs and models generated above\n",
|
||||
"service = Model.deploy(ws, 'model-scoring', [scoring_explainer_model, original_model], inference_config, aciconfig)\n",
|
||||
"service = Model.deploy(\n",
|
||||
" ws,\n",
|
||||
" \"model-scoring\",\n",
|
||||
" [scoring_explainer_model, original_model],\n",
|
||||
" inference_config,\n",
|
||||
" aciconfig,\n",
|
||||
")\n",
|
||||
"service.wait_for_deployment(show_output=True)"
|
||||
]
|
||||
},
|
||||
@@ -766,19 +730,19 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"if service.state == 'Healthy':\n",
|
||||
"if service.state == \"Healthy\":\n",
|
||||
" X_test = test_data.drop_columns([label]).to_pandas_dataframe()\n",
|
||||
" # Serialize the first row of the test data into json\n",
|
||||
" X_test_json = X_test[:1].to_json(orient='records')\n",
|
||||
" X_test_json = X_test[:1].to_json(orient=\"records\")\n",
|
||||
" print(X_test_json)\n",
|
||||
" # Call the service to get the predictions and the engineered and raw explanations\n",
|
||||
" output = service.run(X_test_json)\n",
|
||||
" # Print the predicted value\n",
|
||||
" print(output['predictions'])\n",
|
||||
" print(output[\"predictions\"])\n",
|
||||
" # Print the engineered feature importances for the predicted value\n",
|
||||
" print(output['engineered_local_importance_values'])\n",
|
||||
" print(output[\"engineered_local_importance_values\"])\n",
|
||||
" # Print the raw feature importances for the predicted value\n",
|
||||
" print(output['raw_local_importance_values'])"
|
||||
" print(output[\"raw_local_importance_values\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -814,14 +778,14 @@
|
||||
"# preview the first 3 rows of the dataset\n",
|
||||
"\n",
|
||||
"test_data = test_data.to_pandas_dataframe()\n",
|
||||
"y_test = test_data['ERP'].fillna(0)\n",
|
||||
"test_data = test_data.drop('ERP', 1)\n",
|
||||
"y_test = test_data[\"ERP\"].fillna(0)\n",
|
||||
"test_data = test_data.drop(\"ERP\", 1)\n",
|
||||
"test_data = test_data.fillna(0)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"train_data = train_data.to_pandas_dataframe()\n",
|
||||
"y_train = train_data['ERP'].fillna(0)\n",
|
||||
"train_data = train_data.drop('ERP', 1)\n",
|
||||
"y_train = train_data[\"ERP\"].fillna(0)\n",
|
||||
"train_data = train_data.drop(\"ERP\", 1)\n",
|
||||
"train_data = train_data.fillna(0)"
|
||||
]
|
||||
},
|
||||
@@ -848,27 +812,41 @@
|
||||
"from sklearn.metrics import mean_squared_error, r2_score\n",
|
||||
"\n",
|
||||
"# Set up a multi-plot chart.\n",
|
||||
"f, (a0, a1) = plt.subplots(1, 2, gridspec_kw = {'width_ratios':[1, 1], 'wspace':0, 'hspace': 0})\n",
|
||||
"f.suptitle('Regression Residual Values', fontsize = 18)\n",
|
||||
"f, (a0, a1) = plt.subplots(\n",
|
||||
" 1, 2, gridspec_kw={\"width_ratios\": [1, 1], \"wspace\": 0, \"hspace\": 0}\n",
|
||||
")\n",
|
||||
"f.suptitle(\"Regression Residual Values\", fontsize=18)\n",
|
||||
"f.set_figheight(6)\n",
|
||||
"f.set_figwidth(16)\n",
|
||||
"\n",
|
||||
"# Plot residual values of training set.\n",
|
||||
"a0.axis([0, 360, -100, 100])\n",
|
||||
"a0.plot(y_residual_train, 'bo', alpha = 0.5)\n",
|
||||
"a0.plot([-10,360],[0,0], 'r-', lw = 3)\n",
|
||||
"a0.text(16,170,'RMSE = {0:.2f}'.format(np.sqrt(mean_squared_error(y_train, y_pred_train))), fontsize = 12)\n",
|
||||
"a0.text(16,140,'R2 score = {0:.2f}'.format(r2_score(y_train, y_pred_train)),fontsize = 12)\n",
|
||||
"a0.set_xlabel('Training samples', fontsize = 12)\n",
|
||||
"a0.set_ylabel('Residual Values', fontsize = 12)\n",
|
||||
"a0.plot(y_residual_train, \"bo\", alpha=0.5)\n",
|
||||
"a0.plot([-10, 360], [0, 0], \"r-\", lw=3)\n",
|
||||
"a0.text(\n",
|
||||
" 16,\n",
|
||||
" 170,\n",
|
||||
" \"RMSE = {0:.2f}\".format(np.sqrt(mean_squared_error(y_train, y_pred_train))),\n",
|
||||
" fontsize=12,\n",
|
||||
")\n",
|
||||
"a0.text(\n",
|
||||
" 16, 140, \"R2 score = {0:.2f}\".format(r2_score(y_train, y_pred_train)), fontsize=12\n",
|
||||
")\n",
|
||||
"a0.set_xlabel(\"Training samples\", fontsize=12)\n",
|
||||
"a0.set_ylabel(\"Residual Values\", fontsize=12)\n",
|
||||
"\n",
|
||||
"# Plot residual values of test set.\n",
|
||||
"a1.axis([0, 90, -100, 100])\n",
|
||||
"a1.plot(y_residual_test, 'bo', alpha = 0.5)\n",
|
||||
"a1.plot([-10,360],[0,0], 'r-', lw = 3)\n",
|
||||
"a1.text(5,170,'RMSE = {0:.2f}'.format(np.sqrt(mean_squared_error(y_test, y_pred_test))), fontsize = 12)\n",
|
||||
"a1.text(5,140,'R2 score = {0:.2f}'.format(r2_score(y_test, y_pred_test)),fontsize = 12)\n",
|
||||
"a1.set_xlabel('Test samples', fontsize = 12)\n",
|
||||
"a1.plot(y_residual_test, \"bo\", alpha=0.5)\n",
|
||||
"a1.plot([-10, 360], [0, 0], \"r-\", lw=3)\n",
|
||||
"a1.text(\n",
|
||||
" 5,\n",
|
||||
" 170,\n",
|
||||
" \"RMSE = {0:.2f}\".format(np.sqrt(mean_squared_error(y_test, y_pred_test))),\n",
|
||||
" fontsize=12,\n",
|
||||
")\n",
|
||||
"a1.text(5, 140, \"R2 score = {0:.2f}\".format(r2_score(y_test, y_pred_test)), fontsize=12)\n",
|
||||
"a1.set_xlabel(\"Test samples\", fontsize=12)\n",
|
||||
"a1.set_yticklabels([])\n",
|
||||
"\n",
|
||||
"plt.show()"
|
||||
@@ -881,9 +859,11 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%matplotlib inline\n",
|
||||
"test_pred = plt.scatter(y_test, y_pred_test, color='')\n",
|
||||
"test_test = plt.scatter(y_test, y_test, color='g')\n",
|
||||
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n",
|
||||
"test_pred = plt.scatter(y_test, y_pred_test, color=\"\")\n",
|
||||
"test_test = plt.scatter(y_test, y_test, color=\"g\")\n",
|
||||
"plt.legend(\n",
|
||||
" (test_pred, test_test), (\"prediction\", \"truth\"), loc=\"upper left\", fontsize=8\n",
|
||||
")\n",
|
||||
"plt.show()"
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
import pandas as pd
|
||||
import joblib
|
||||
from azureml.core.model import Model
|
||||
from azureml.train.automl.runtime.automl_explain_utilities import automl_setup_model_explanations
|
||||
from azureml.train.automl.runtime.automl_explain_utilities import (
|
||||
automl_setup_model_explanations,
|
||||
)
|
||||
import scipy as sp
|
||||
|
||||
|
||||
def init():
|
||||
@@ -11,26 +14,55 @@ def init():
|
||||
|
||||
# Retrieve the path to the model file using the model name
|
||||
# Assume original model is named original_prediction_model
|
||||
automl_model_path = Model.get_model_path('automl_model')
|
||||
scoring_explainer_path = Model.get_model_path('scoring_explainer')
|
||||
automl_model_path = Model.get_model_path("automl_model")
|
||||
scoring_explainer_path = Model.get_model_path("scoring_explainer")
|
||||
|
||||
automl_model = joblib.load(automl_model_path)
|
||||
scoring_explainer = joblib.load(scoring_explainer_path)
|
||||
|
||||
|
||||
def is_multi_dimensional(matrix):
|
||||
if hasattr(matrix, "ndim") and matrix.ndim > 1:
|
||||
return True
|
||||
if hasattr(matrix, "shape") and matrix.shape[1]:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def convert_matrix(matrix):
|
||||
if sp.sparse.issparse(matrix):
|
||||
matrix = matrix.todense()
|
||||
if is_multi_dimensional(matrix):
|
||||
matrix = matrix.tolist()
|
||||
return matrix
|
||||
|
||||
|
||||
def run(raw_data):
|
||||
# Get predictions and explanations for each data point
|
||||
data = pd.read_json(raw_data, orient='records')
|
||||
data = pd.read_json(raw_data, orient="records")
|
||||
# Make prediction
|
||||
predictions = automl_model.predict(data)
|
||||
# Setup for inferencing explanations
|
||||
automl_explainer_setup_obj = automl_setup_model_explanations(automl_model,
|
||||
X_test=data, task='regression')
|
||||
automl_explainer_setup_obj = automl_setup_model_explanations(
|
||||
automl_model, X_test=data, task="regression"
|
||||
)
|
||||
# Retrieve model explanations for engineered explanations
|
||||
engineered_local_importance_values = scoring_explainer.explain(automl_explainer_setup_obj.X_test_transform)
|
||||
engineered_local_importance_values = scoring_explainer.explain(
|
||||
automl_explainer_setup_obj.X_test_transform
|
||||
)
|
||||
engineered_local_importance_values = convert_matrix(
|
||||
engineered_local_importance_values
|
||||
)
|
||||
|
||||
# Retrieve model explanations for raw explanations
|
||||
raw_local_importance_values = scoring_explainer.explain(automl_explainer_setup_obj.X_test_transform, get_raw=True)
|
||||
raw_local_importance_values = scoring_explainer.explain(
|
||||
automl_explainer_setup_obj.X_test_transform, get_raw=True
|
||||
)
|
||||
raw_local_importance_values = convert_matrix(raw_local_importance_values)
|
||||
|
||||
# You can return any data type as long as it is JSON-serializable
|
||||
return {'predictions': predictions.tolist(),
|
||||
'engineered_local_importance_values': engineered_local_importance_values,
|
||||
'raw_local_importance_values': raw_local_importance_values}
|
||||
return {
|
||||
"predictions": predictions.tolist(),
|
||||
"engineered_local_importance_values": engineered_local_importance_values,
|
||||
"raw_local_importance_values": raw_local_importance_values,
|
||||
}
|
||||
|
||||
@@ -10,11 +10,13 @@ from azureml.core.dataset import Dataset
|
||||
from azureml.core.run import Run
|
||||
from azureml.interpret.mimic_wrapper import MimicWrapper
|
||||
from azureml.interpret.scoring.scoring_explainer import TreeScoringExplainer
|
||||
from azureml.train.automl.runtime.automl_explain_utilities import automl_setup_model_explanations, \
|
||||
automl_check_model_if_explainable
|
||||
from azureml.train.automl.runtime.automl_explain_utilities import (
|
||||
automl_setup_model_explanations,
|
||||
automl_check_model_if_explainable,
|
||||
)
|
||||
|
||||
|
||||
OUTPUT_DIR = './outputs/'
|
||||
OUTPUT_DIR = "./outputs/"
|
||||
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
||||
|
||||
# Get workspace from the run context
|
||||
@@ -22,63 +24,77 @@ run = Run.get_context()
|
||||
ws = run.experiment.workspace
|
||||
|
||||
# Get the AutoML run object from the experiment name and the workspace
|
||||
experiment = Experiment(ws, '<<experiment_name>>')
|
||||
automl_run = Run(experiment=experiment, run_id='<<run_id>>')
|
||||
experiment = Experiment(ws, "<<experiment_name>>")
|
||||
automl_run = Run(experiment=experiment, run_id="<<run_id>>")
|
||||
|
||||
# Check if this AutoML model is explainable
|
||||
if not automl_check_model_if_explainable(automl_run):
|
||||
raise Exception("Model explanations are currently not supported for " + automl_run.get_properties().get(
|
||||
'run_algorithm'))
|
||||
raise Exception(
|
||||
"Model explanations are currently not supported for "
|
||||
+ automl_run.get_properties().get("run_algorithm")
|
||||
)
|
||||
|
||||
# Download the best model from the artifact store
|
||||
automl_run.download_file(name=MODEL_PATH, output_file_path='model.pkl')
|
||||
automl_run.download_file(name=MODEL_PATH, output_file_path="model.pkl")
|
||||
|
||||
# Load the AutoML model into memory
|
||||
fitted_model = joblib.load('model.pkl')
|
||||
fitted_model = joblib.load("model.pkl")
|
||||
|
||||
# Get the train dataset from the workspace
|
||||
train_dataset = Dataset.get_by_name(workspace=ws, name='<<train_dataset_name>>')
|
||||
train_dataset = Dataset.get_by_name(workspace=ws, name="<<train_dataset_name>>")
|
||||
# Drop the labeled column to get the training set.
|
||||
X_train = train_dataset.drop_columns(columns=['<<target_column_name>>'])
|
||||
y_train = train_dataset.keep_columns(columns=['<<target_column_name>>'], validate=True)
|
||||
X_train = train_dataset.drop_columns(columns=["<<target_column_name>>"])
|
||||
y_train = train_dataset.keep_columns(columns=["<<target_column_name>>"], validate=True)
|
||||
|
||||
# Get the test dataset from the workspace
|
||||
test_dataset = Dataset.get_by_name(workspace=ws, name='<<test_dataset_name>>')
|
||||
test_dataset = Dataset.get_by_name(workspace=ws, name="<<test_dataset_name>>")
|
||||
# Drop the labeled column to get the testing set.
|
||||
X_test = test_dataset.drop_columns(columns=['<<target_column_name>>'])
|
||||
X_test = test_dataset.drop_columns(columns=["<<target_column_name>>"])
|
||||
|
||||
# Setup the class for explaining the AutoML models
|
||||
automl_explainer_setup_obj = automl_setup_model_explanations(fitted_model, '<<task>>',
|
||||
X=X_train, X_test=X_test,
|
||||
y=y_train,
|
||||
automl_run=automl_run)
|
||||
automl_explainer_setup_obj = automl_setup_model_explanations(
|
||||
fitted_model, "<<task>>", X=X_train, X_test=X_test, y=y_train, automl_run=automl_run
|
||||
)
|
||||
|
||||
# Initialize the Mimic Explainer
|
||||
explainer = MimicWrapper(ws, automl_explainer_setup_obj.automl_estimator, LGBMExplainableModel,
|
||||
explainer = MimicWrapper(
|
||||
ws,
|
||||
automl_explainer_setup_obj.automl_estimator,
|
||||
LGBMExplainableModel,
|
||||
init_dataset=automl_explainer_setup_obj.X_transform,
|
||||
run=automl_explainer_setup_obj.automl_run,
|
||||
features=automl_explainer_setup_obj.engineered_feature_names,
|
||||
feature_maps=[automl_explainer_setup_obj.feature_map],
|
||||
classes=automl_explainer_setup_obj.classes)
|
||||
classes=automl_explainer_setup_obj.classes,
|
||||
)
|
||||
|
||||
# Compute the engineered explanations
|
||||
engineered_explanations = explainer.explain(['local', 'global'], tag='engineered explanations',
|
||||
eval_dataset=automl_explainer_setup_obj.X_test_transform)
|
||||
engineered_explanations = explainer.explain(
|
||||
["local", "global"],
|
||||
tag="engineered explanations",
|
||||
eval_dataset=automl_explainer_setup_obj.X_test_transform,
|
||||
)
|
||||
|
||||
# Compute the raw explanations
|
||||
raw_explanations = explainer.explain(['local', 'global'], get_raw=True, tag='raw explanations',
|
||||
raw_explanations = explainer.explain(
|
||||
["local", "global"],
|
||||
get_raw=True,
|
||||
tag="raw explanations",
|
||||
raw_feature_names=automl_explainer_setup_obj.raw_feature_names,
|
||||
eval_dataset=automl_explainer_setup_obj.X_test_transform,
|
||||
raw_eval_dataset=automl_explainer_setup_obj.X_test_raw)
|
||||
raw_eval_dataset=automl_explainer_setup_obj.X_test_raw,
|
||||
)
|
||||
|
||||
print("Engineered and raw explanations computed successfully")
|
||||
|
||||
# Initialize the ScoringExplainer
|
||||
scoring_explainer = TreeScoringExplainer(explainer.explainer, feature_maps=[automl_explainer_setup_obj.feature_map])
|
||||
scoring_explainer = TreeScoringExplainer(
|
||||
explainer.explainer, feature_maps=[automl_explainer_setup_obj.feature_map]
|
||||
)
|
||||
|
||||
# Pickle scoring explainer locally
|
||||
with open('scoring_explainer.pkl', 'wb') as stream:
|
||||
with open("scoring_explainer.pkl", "wb") as stream:
|
||||
joblib.dump(scoring_explainer, stream)
|
||||
|
||||
# Upload the scoring explainer to the automl run
|
||||
automl_run.upload_file('outputs/scoring_explainer.pkl', 'scoring_explainer.pkl')
|
||||
automl_run.upload_file("outputs/scoring_explainer.pkl", "scoring_explainer.pkl")
|
||||
|
||||
@@ -1,21 +1,5 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -86,16 +70,6 @@
|
||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -105,18 +79,19 @@
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# Choose a name for the experiment.\n",
|
||||
"experiment_name = 'automl-regression'\n",
|
||||
"experiment_name = \"automl-regression\"\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace'] = ws.name\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Run History Name'] = experiment_name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||
"output[\"Workspace\"] = ws.name\n",
|
||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||
"output[\"Location\"] = ws.location\n",
|
||||
"output[\"Run History Name\"] = experiment_name\n",
|
||||
"output[\"SDK Version\"] = azureml.core.VERSION\n",
|
||||
"pd.set_option(\"display.max_colwidth\", None)\n",
|
||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
@@ -143,10 +118,11 @@
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
||||
" print('Found existing cluster, use it.')\n",
|
||||
" print(\"Found existing cluster, use it.\")\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
|
||||
" max_nodes=4)\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||
" vm_size=\"STANDARD_DS12_V2\", max_nodes=4\n",
|
||||
" )\n",
|
||||
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
"compute_target.wait_for_completion(show_output=True)"
|
||||
@@ -179,7 +155,7 @@
|
||||
"# Split the dataset into train and test datasets\n",
|
||||
"train_data, test_data = dataset.random_split(percentage=0.8, seed=223)\n",
|
||||
"\n",
|
||||
"label = \"ERP\"\n"
|
||||
"label = \"ERP\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -213,7 +189,7 @@
|
||||
"source": [
|
||||
"automl_settings = {\n",
|
||||
" \"n_cross_validations\": 3,\n",
|
||||
" \"primary_metric\": 'normalized_root_mean_squared_error',\n",
|
||||
" \"primary_metric\": \"r2_score\",\n",
|
||||
" \"enable_early_stopping\": True,\n",
|
||||
" \"experiment_timeout_hours\": 0.3, # for real scenarios we reccommend a timeout of at least one hour\n",
|
||||
" \"max_concurrent_iterations\": 4,\n",
|
||||
@@ -221,11 +197,12 @@
|
||||
" \"verbosity\": logging.INFO,\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task = 'regression',\n",
|
||||
"automl_config = AutoMLConfig(\n",
|
||||
" task=\"regression\",\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" training_data=train_data,\n",
|
||||
" label_column_name=label,\n",
|
||||
" **automl_settings\n",
|
||||
" **automl_settings,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -281,6 +258,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.widgets import RunDetails\n",
|
||||
"\n",
|
||||
"RunDetails(remote_run).show()"
|
||||
]
|
||||
},
|
||||
@@ -366,12 +344,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"y_test = test_data.keep_columns('ERP').to_pandas_dataframe()\n",
|
||||
"test_data = test_data.drop_columns('ERP').to_pandas_dataframe()\n",
|
||||
"y_test = test_data.keep_columns(\"ERP\").to_pandas_dataframe()\n",
|
||||
"test_data = test_data.drop_columns(\"ERP\").to_pandas_dataframe()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"y_train = train_data.keep_columns('ERP').to_pandas_dataframe()\n",
|
||||
"train_data = train_data.drop_columns('ERP').to_pandas_dataframe()\n"
|
||||
"y_train = train_data.keep_columns(\"ERP\").to_pandas_dataframe()\n",
|
||||
"train_data = train_data.drop_columns(\"ERP\").to_pandas_dataframe()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -397,27 +375,41 @@
|
||||
"from sklearn.metrics import mean_squared_error, r2_score\n",
|
||||
"\n",
|
||||
"# Set up a multi-plot chart.\n",
|
||||
"f, (a0, a1) = plt.subplots(1, 2, gridspec_kw = {'width_ratios':[1, 1], 'wspace':0, 'hspace': 0})\n",
|
||||
"f.suptitle('Regression Residual Values', fontsize = 18)\n",
|
||||
"f, (a0, a1) = plt.subplots(\n",
|
||||
" 1, 2, gridspec_kw={\"width_ratios\": [1, 1], \"wspace\": 0, \"hspace\": 0}\n",
|
||||
")\n",
|
||||
"f.suptitle(\"Regression Residual Values\", fontsize=18)\n",
|
||||
"f.set_figheight(6)\n",
|
||||
"f.set_figwidth(16)\n",
|
||||
"\n",
|
||||
"# Plot residual values of training set.\n",
|
||||
"a0.axis([0, 360, -100, 100])\n",
|
||||
"a0.plot(y_residual_train, 'bo', alpha = 0.5)\n",
|
||||
"a0.plot([-10,360],[0,0], 'r-', lw = 3)\n",
|
||||
"a0.text(16,170,'RMSE = {0:.2f}'.format(np.sqrt(mean_squared_error(y_train, y_pred_train))), fontsize = 12)\n",
|
||||
"a0.text(16,140,'R2 score = {0:.2f}'.format(r2_score(y_train, y_pred_train)),fontsize = 12)\n",
|
||||
"a0.set_xlabel('Training samples', fontsize = 12)\n",
|
||||
"a0.set_ylabel('Residual Values', fontsize = 12)\n",
|
||||
"a0.plot(y_residual_train, \"bo\", alpha=0.5)\n",
|
||||
"a0.plot([-10, 360], [0, 0], \"r-\", lw=3)\n",
|
||||
"a0.text(\n",
|
||||
" 16,\n",
|
||||
" 170,\n",
|
||||
" \"RMSE = {0:.2f}\".format(np.sqrt(mean_squared_error(y_train, y_pred_train))),\n",
|
||||
" fontsize=12,\n",
|
||||
")\n",
|
||||
"a0.text(\n",
|
||||
" 16, 140, \"R2 score = {0:.2f}\".format(r2_score(y_train, y_pred_train)), fontsize=12\n",
|
||||
")\n",
|
||||
"a0.set_xlabel(\"Training samples\", fontsize=12)\n",
|
||||
"a0.set_ylabel(\"Residual Values\", fontsize=12)\n",
|
||||
"\n",
|
||||
"# Plot residual values of test set.\n",
|
||||
"a1.axis([0, 90, -100, 100])\n",
|
||||
"a1.plot(y_residual_test, 'bo', alpha = 0.5)\n",
|
||||
"a1.plot([-10,360],[0,0], 'r-', lw = 3)\n",
|
||||
"a1.text(5,170,'RMSE = {0:.2f}'.format(np.sqrt(mean_squared_error(y_test, y_pred_test))), fontsize = 12)\n",
|
||||
"a1.text(5,140,'R2 score = {0:.2f}'.format(r2_score(y_test, y_pred_test)),fontsize = 12)\n",
|
||||
"a1.set_xlabel('Test samples', fontsize = 12)\n",
|
||||
"a1.plot(y_residual_test, \"bo\", alpha=0.5)\n",
|
||||
"a1.plot([-10, 360], [0, 0], \"r-\", lw=3)\n",
|
||||
"a1.text(\n",
|
||||
" 5,\n",
|
||||
" 170,\n",
|
||||
" \"RMSE = {0:.2f}\".format(np.sqrt(mean_squared_error(y_test, y_pred_test))),\n",
|
||||
" fontsize=12,\n",
|
||||
")\n",
|
||||
"a1.text(5, 140, \"R2 score = {0:.2f}\".format(r2_score(y_test, y_pred_test)), fontsize=12)\n",
|
||||
"a1.set_xlabel(\"Test samples\", fontsize=12)\n",
|
||||
"a1.set_yticklabels([])\n",
|
||||
"\n",
|
||||
"plt.show()"
|
||||
@@ -430,9 +422,11 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%matplotlib inline\n",
|
||||
"test_pred = plt.scatter(y_test, y_pred_test, color='')\n",
|
||||
"test_test = plt.scatter(y_test, y_test, color='g')\n",
|
||||
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n",
|
||||
"test_pred = plt.scatter(y_test, y_pred_test, color=\"\")\n",
|
||||
"test_test = plt.scatter(y_test, y_test, color=\"g\")\n",
|
||||
"plt.legend(\n",
|
||||
" (test_pred, test_test), (\"prediction\", \"truth\"), loc=\"upper left\", fontsize=8\n",
|
||||
")\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -82,7 +82,7 @@
|
||||
"source": [
|
||||
"## Create trained model\n",
|
||||
"\n",
|
||||
"For this example, we will train a small model on scikit-learn's [diabetes dataset](https://scikit-learn.org/stable/datasets/index.html#diabetes-dataset). "
|
||||
"For this example, we will train a small model on scikit-learn's [diabetes dataset](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_diabetes.html). "
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -279,7 +279,9 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"environment = Environment('my-sklearn-environment')\n",
|
||||
"environment.python.conda_dependencies = CondaDependencies.create(pip_packages=[\n",
|
||||
"environment.python.conda_dependencies = CondaDependencies.create(conda_packages=[\n",
|
||||
" 'pip==20.2.4'],\n",
|
||||
" pip_packages=[\n",
|
||||
" 'azureml-defaults',\n",
|
||||
" 'inference-schema[numpy-support]',\n",
|
||||
" 'joblib',\n",
|
||||
@@ -478,7 +480,9 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"environment = Environment('my-sklearn-environment')\n",
|
||||
"environment.python.conda_dependencies = CondaDependencies.create(pip_packages=[\n",
|
||||
"environment.python.conda_dependencies = CondaDependencies.create(conda_packages=[\n",
|
||||
" 'pip==20.2.4'],\n",
|
||||
" pip_packages=[\n",
|
||||
" 'azureml-defaults',\n",
|
||||
" 'inference-schema[numpy-support]',\n",
|
||||
" 'joblib',\n",
|
||||
|
||||
@@ -81,7 +81,7 @@
|
||||
"source": [
|
||||
"## Create trained model\n",
|
||||
"\n",
|
||||
"For this example, we will train a small model on scikit-learn's [diabetes dataset](https://scikit-learn.org/stable/datasets/index.html#diabetes-dataset). "
|
||||
"For this example, we will train a small model on scikit-learn's [diabetes dataset](https://scikit-learn.org/stable/datasets/toy_dataset.html#diabetes-dataset). "
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -263,7 +263,7 @@
|
||||
"\n",
|
||||
"# explicitly set base_image to None when setting base_dockerfile\n",
|
||||
"myenv.docker.base_image = None\n",
|
||||
"myenv.docker.base_dockerfile = \"FROM mcr.microsoft.com/azureml/base:intelmpi2018.3-ubuntu16.04\\nRUN echo \\\"this is test\\\"\"\n",
|
||||
"myenv.docker.base_dockerfile = \"FROM mcr.microsoft.com/azureml/openmpi4.1.0-ubuntu20.04\\nRUN echo \\\"this is test\\\"\"\n",
|
||||
"myenv.inferencing_stack_version = \"latest\"\n",
|
||||
"\n",
|
||||
"inference_config = InferenceConfig(source_directory=source_directory,\n",
|
||||
|
||||
@@ -105,11 +105,13 @@
|
||||
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||
"\n",
|
||||
"environment=Environment('my-sklearn-environment')\n",
|
||||
"environment.python.conda_dependencies = CondaDependencies.create(pip_packages=[\n",
|
||||
"environment.python.conda_dependencies = CondaDependencies.create(conda_packages=[\n",
|
||||
" 'pip==20.2.4'],\n",
|
||||
" pip_packages=[\n",
|
||||
" 'azureml-defaults',\n",
|
||||
" 'inference-schema[numpy-support]',\n",
|
||||
" 'numpy',\n",
|
||||
" 'scikit-learn==0.19.1',\n",
|
||||
" 'scikit-learn==0.22.1',\n",
|
||||
" 'scipy'\n",
|
||||
"])"
|
||||
]
|
||||
|
||||
@@ -172,7 +172,7 @@
|
||||
"source": [
|
||||
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||
"\n",
|
||||
"myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn==0.20.3'],\n",
|
||||
"myenv = CondaDependencies.create(conda_packages=['numpy==1.19.5','scikit-learn==0.22.1'],\n",
|
||||
" pip_packages=['azureml-defaults'])\n",
|
||||
"\n",
|
||||
"with open(\"myenv.yml\",\"w\") as f:\n",
|
||||
|
||||
@@ -70,7 +70,7 @@
|
||||
"\n",
|
||||
"import urllib.request\n",
|
||||
"\n",
|
||||
"onnx_model_url = \"https://github.com/onnx/models/blob/master/vision/body_analysis/emotion_ferplus/model/emotion-ferplus-7.tar.gz?raw=true\"\n",
|
||||
"onnx_model_url = \"https://github.com/onnx/models/blob/main/vision/body_analysis/emotion_ferplus/model/emotion-ferplus-7.tar.gz?raw=true\"\n",
|
||||
"\n",
|
||||
"urllib.request.urlretrieve(onnx_model_url, filename=\"emotion-ferplus-7.tar.gz\")\n",
|
||||
"\n",
|
||||
|
||||
@@ -70,7 +70,7 @@
|
||||
"\n",
|
||||
"import urllib.request\n",
|
||||
"\n",
|
||||
"onnx_model_url = \"https://github.com/onnx/models/blob/master/vision/classification/mnist/model/mnist-7.tar.gz?raw=true\"\n",
|
||||
"onnx_model_url = \"https://github.com/onnx/models/blob/main/vision/classification/mnist/model/mnist-7.tar.gz?raw=true\"\n",
|
||||
"\n",
|
||||
"urllib.request.urlretrieve(onnx_model_url, filename=\"mnist-7.tar.gz\")"
|
||||
]
|
||||
|
||||
@@ -240,7 +240,8 @@
|
||||
"# Please see [Azure ML Containers repository](https://github.com/Azure/AzureML-Containers#featured-tags)\n",
|
||||
"# for open-sourced GPU base images.\n",
|
||||
"env.docker.base_image = DEFAULT_GPU_IMAGE\n",
|
||||
"env.python.conda_dependencies = CondaDependencies.create(conda_packages=['tensorflow-gpu==1.12.0','numpy'],\n",
|
||||
"env.python.conda_dependencies = CondaDependencies.create(python_version=\"3.6.2\", \n",
|
||||
" conda_packages=['tensorflow-gpu==1.12.0','numpy'],\n",
|
||||
" pip_packages=['azureml-contrib-services', 'azureml-defaults'])\n",
|
||||
"\n",
|
||||
"inference_config = InferenceConfig(entry_script=\"score.py\", environment=env)\n",
|
||||
|
||||
@@ -109,7 +109,7 @@
|
||||
"from azureml.core import Environment\n",
|
||||
"from azureml.core.conda_dependencies import CondaDependencies \n",
|
||||
"\n",
|
||||
"conda_deps = CondaDependencies.create(conda_packages=['numpy', 'scikit-learn==0.19.1', 'scipy'], pip_packages=['azureml-defaults', 'inference-schema'])\n",
|
||||
"conda_deps = CondaDependencies.create(conda_packages=['numpy', 'scikit-learn==0.22.1', 'scipy'], pip_packages=['azureml-defaults', 'inference-schema'])\n",
|
||||
"myenv = Environment(name='myenv')\n",
|
||||
"myenv.python.conda_dependencies = conda_deps"
|
||||
]
|
||||
|
||||
@@ -109,7 +109,7 @@
|
||||
"from azureml.core import Environment\n",
|
||||
"from azureml.core.conda_dependencies import CondaDependencies \n",
|
||||
"\n",
|
||||
"conda_deps = CondaDependencies.create(conda_packages=['numpy','scikit-learn==0.19.1','scipy'], pip_packages=['azureml-defaults', 'inference-schema'])\n",
|
||||
"conda_deps = CondaDependencies.create(conda_packages=['numpy','scikit-learn==0.22.1','scipy'], pip_packages=['azureml-defaults', 'inference-schema'])\n",
|
||||
"myenv = Environment(name='myenv')\n",
|
||||
"myenv.python.conda_dependencies = conda_deps"
|
||||
]
|
||||
@@ -295,12 +295,14 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"environment = Environment('my-sklearn-environment')\n",
|
||||
"environment.python.conda_dependencies = CondaDependencies.create(pip_packages=[\n",
|
||||
"environment.python.conda_dependencies = CondaDependencies.create(conda_packages=[\n",
|
||||
" 'pip==20.2.4'],\n",
|
||||
" pip_packages=[\n",
|
||||
" 'azureml-defaults',\n",
|
||||
" 'inference-schema[numpy-support]',\n",
|
||||
" 'joblib',\n",
|
||||
" 'numpy',\n",
|
||||
" 'scikit-learn==0.19.1',\n",
|
||||
" 'scikit-learn==0.22.1',\n",
|
||||
" 'scipy'\n",
|
||||
"])\n",
|
||||
"inference_config = InferenceConfig(entry_script='score.py', environment=environment)\n",
|
||||
|
||||
@@ -0,0 +1,44 @@
|
||||
# Copyright (c) Microsoft. All rights reserved.
|
||||
# Licensed under the MIT license.
|
||||
|
||||
from azureml.core.run import Run
|
||||
import joblib
|
||||
import os
|
||||
import shap
|
||||
import xgboost
|
||||
|
||||
OUTPUT_DIR = './outputs/'
|
||||
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
||||
|
||||
run = Run.get_context()
|
||||
|
||||
# get a dataset on income prediction
|
||||
X, y = shap.datasets.adult()
|
||||
|
||||
# train an XGBoost model (but any other tree model type should work)
|
||||
model = xgboost.XGBClassifier()
|
||||
model.fit(X, y)
|
||||
|
||||
explainer = shap.explainers.GPUTree(model, X)
|
||||
X_shap = X[:100]
|
||||
shap_values = explainer(X_shap)
|
||||
|
||||
print("computed shap values:")
|
||||
print(shap_values)
|
||||
|
||||
# write X_shap out as a pickle file for later visualization
|
||||
x_shap_pkl = 'x_shap.pkl'
|
||||
with open(x_shap_pkl, 'wb') as file:
|
||||
joblib.dump(value=X_shap, filename=os.path.join(OUTPUT_DIR, x_shap_pkl))
|
||||
run.upload_file('x_shap_adult_census.pkl', os.path.join(OUTPUT_DIR, x_shap_pkl))
|
||||
|
||||
model_file_name = 'xgboost_.pkl'
|
||||
# save model in the outputs folder so it automatically gets uploaded
|
||||
with open(model_file_name, 'wb') as file:
|
||||
joblib.dump(value=model, filename=os.path.join(OUTPUT_DIR,
|
||||
model_file_name))
|
||||
|
||||
# register the model
|
||||
run.upload_file('xgboost_model.pkl', os.path.join('./outputs/', model_file_name))
|
||||
original_model = run.register_model(model_name='xgboost_with_gpu_tree_explainer',
|
||||
model_path='xgboost_model.pkl')
|
||||
@@ -0,0 +1,297 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Explain tree-based models on GPU using GPUTreeExplainer\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"_**This notebook illustrates how to use shap's GPUTreeExplainer on an Azure GPU machine.**_\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Problem: Train a tree-based model and explain the model on an Azure GPU machine using the GPUTreeExplainer.\n",
|
||||
"\n",
|
||||
"---\n",
|
||||
"\n",
|
||||
"## Table of Contents\n",
|
||||
"\n",
|
||||
"1. [Introduction](#Introduction)\n",
|
||||
"1. [Setup](#Setup)\n",
|
||||
"1. [Run model explainer locally at training time](#Explain)\n",
|
||||
" 1. Apply feature transformations\n",
|
||||
" 1. Train a binary classification model\n",
|
||||
" 1. Explain the model on raw features\n",
|
||||
" 1. Generate global explanations\n",
|
||||
" 1. Generate local explanations\n",
|
||||
"1. [Visualize explanations](#Visualize)\n",
|
||||
"1. [Deploy model and scoring explainer](#Deploy)\n",
|
||||
"1. [Next steps](#Next)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Introduction\n",
|
||||
"This notebook demonstrates how to use the GPUTreeExplainer on some simple datasets. Like the TreeExplainer, the GPUTreeExplainer is specifically designed for tree-based machine learning models, but it is designed to accelerate the computations using NVIDIA GPUs.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Make sure you have executed the [configuration](../../../configuration.ipynb) before running this notebook.\n",
|
||||
"\n",
|
||||
"Notebook synopsis:\n",
|
||||
"\n",
|
||||
"1. Creating an Experiment in an existing Workspace\n",
|
||||
"2. Configuration and remote run with a GPU machine"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import logging\n",
|
||||
"import os\n",
|
||||
"import shutil\n",
|
||||
"\n",
|
||||
"import pandas as pd\n",
|
||||
"\n",
|
||||
"import azureml.core\n",
|
||||
"from azureml.core.experiment import Experiment\n",
|
||||
"from azureml.core.workspace import Workspace\n",
|
||||
"from azureml.core.dataset import Dataset\n",
|
||||
"from azureml.core.compute import AmlCompute\n",
|
||||
"from azureml.core.compute import ComputeTarget\n",
|
||||
"from azureml.core.run import Run\n",
|
||||
"from azureml.core.model import Model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.42.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As part of the setup you have already created a <b>Workspace</b>. To run the script, you also need to create an <b>Experiment</b>. An Experiment corresponds to a prediction problem you are trying to solve, while a Run corresponds to a specific approach to the problem."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# Choose an experiment name.\n",
|
||||
"experiment_name = 'gpu-tree-explainer'\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace Name'] = ws.name\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Experiment Name'] = experiment.name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create project directory\n",
|
||||
"\n",
|
||||
"Create a directory that will contain all the necessary code from your local machine that you will need access to on the remote resource. This includes the training script, and any additional files your training script depends on"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import shutil\n",
|
||||
"\n",
|
||||
"project_folder = './azureml-shap-gpu-tree-explainer'\n",
|
||||
"os.makedirs(project_folder, exist_ok=True)\n",
|
||||
"shutil.copy('gpu_tree_explainer.py', project_folder)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set up a compute cluster\n",
|
||||
"This section uses a user-provided compute cluster (named \"gpu-shap-cluster\" in this example). If a cluster with this name does not exist in the user's workspace, the below code will create a new cluster. You can choose the parameters of the cluster as mentioned in the comments."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||
"\n",
|
||||
"num_nodes = 1\n",
|
||||
"\n",
|
||||
"# Choose a name for your cluster.\n",
|
||||
"amlcompute_cluster_name = \"gpu-shap-cluster\"\n",
|
||||
"\n",
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
||||
" print('Found existing cluster, use it.')\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_NC6\",\n",
|
||||
" # To use GPUTreeExplainer, select a GPU such as \"STANDARD_NC6\" \n",
|
||||
" # or similar GPU option\n",
|
||||
" # available in your workspace\n",
|
||||
" max_nodes = num_nodes)\n",
|
||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
"compute_target.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Configure & Run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.runconfig import RunConfiguration\n",
|
||||
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||
"\n",
|
||||
"# Create a new RunConfig object\n",
|
||||
"run_config = RunConfiguration(framework=\"python\")\n",
|
||||
"\n",
|
||||
"# Set compute target to AmlCompute target created in previous step\n",
|
||||
"run_config.target = amlcompute_cluster_name\n",
|
||||
"\n",
|
||||
"from azureml.core import Environment\n",
|
||||
"\n",
|
||||
"environment_name = \"shap-gpu-tree\"\n",
|
||||
"\n",
|
||||
"env = Environment(environment_name)\n",
|
||||
"\n",
|
||||
"env.docker.enabled = True\n",
|
||||
"env.docker.base_image = None\n",
|
||||
"env.docker.base_dockerfile = \"\"\"\n",
|
||||
"FROM rapidsai/rapidsai:cuda10.0-devel-ubuntu18.04\n",
|
||||
"RUN apt-get update && \\\n",
|
||||
"apt-get install -y fuse && \\\n",
|
||||
"apt-get install -y build-essential && \\\n",
|
||||
"apt-get install -y python3-dev && \\\n",
|
||||
"source activate rapids && \\\n",
|
||||
"apt-get install -y g++ && \\\n",
|
||||
"printenv && \\\n",
|
||||
"echo \"which nvcc: \" && \\\n",
|
||||
"which nvcc && \\\n",
|
||||
"pip install azureml-defaults && \\\n",
|
||||
"pip install azureml-telemetry && \\\n",
|
||||
"cd /usr/local/src && \\\n",
|
||||
"git clone https://github.com/slundberg/shap && \\\n",
|
||||
"cd shap && \\\n",
|
||||
"mkdir build && \\\n",
|
||||
"python setup.py install --user && \\\n",
|
||||
"pip uninstall -y xgboost && \\\n",
|
||||
"rm /conda/envs/rapids/lib/libxgboost.so && \\\n",
|
||||
"pip install xgboost==1.4.2\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"env.python.user_managed_dependencies = True\n",
|
||||
"\n",
|
||||
"from azureml.core import Run\n",
|
||||
"from azureml.core import ScriptRunConfig\n",
|
||||
"\n",
|
||||
"src = ScriptRunConfig(source_directory=project_folder, \n",
|
||||
" script='gpu_tree_explainer.py', \n",
|
||||
" compute_target=amlcompute_cluster_name,\n",
|
||||
" environment=env) \n",
|
||||
"run = experiment.submit(config=src)\n",
|
||||
"run"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "ilmat"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -0,0 +1,5 @@
|
||||
name: train-explain-model-gpu-tree-explainer
|
||||
dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
- azureml-interpret
|
||||
@@ -249,6 +249,7 @@
|
||||
"source": [
|
||||
"from azureml.core.runconfig import RunConfiguration\n",
|
||||
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||
"import sys\n",
|
||||
"\n",
|
||||
"# Create a new RunConfig object\n",
|
||||
"run_config = RunConfiguration(framework=\"python\")\n",
|
||||
@@ -260,6 +261,8 @@
|
||||
" 'azureml-defaults', 'azureml-telemetry', 'azureml-interpret'\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"python_version = '{0}.{1}'.format(sys.version_info[0], sys.version_info[1])\n",
|
||||
"\n",
|
||||
"# Note: this is to pin the scikit-learn and pandas versions to be same as notebook.\n",
|
||||
"# In production scenario user would choose their dependencies\n",
|
||||
"import pkg_resources\n",
|
||||
@@ -283,7 +286,7 @@
|
||||
"# environment, otherwise if a model is trained or deployed in a different environment this can\n",
|
||||
"# cause errors. Please take extra care when specifying your dependencies in a production environment.\n",
|
||||
"azureml_pip_packages.extend([sklearn_dep, pandas_dep])\n",
|
||||
"run_config.environment.python.conda_dependencies = CondaDependencies.create(pip_packages=azureml_pip_packages)\n",
|
||||
"run_config.environment.python.conda_dependencies = CondaDependencies.create(pip_packages=azureml_pip_packages, python_version=python_version)\n",
|
||||
"\n",
|
||||
"from azureml.core import ScriptRunConfig\n",
|
||||
"\n",
|
||||
|
||||
@@ -11,4 +11,6 @@ dependencies:
|
||||
- matplotlib
|
||||
- azureml-dataset-runtime
|
||||
- ipywidgets
|
||||
- raiwidgets~=0.7.0
|
||||
- raiwidgets~=0.18.1
|
||||
- itsdangerous==2.0.1
|
||||
- markupsafe<2.1.0
|
||||
|
||||
@@ -10,4 +10,7 @@ dependencies:
|
||||
- ipython
|
||||
- matplotlib
|
||||
- ipywidgets
|
||||
- raiwidgets~=0.7.0
|
||||
- raiwidgets~=0.18.1
|
||||
- packaging>=20.9
|
||||
- itsdangerous==2.0.1
|
||||
- markupsafe<2.1.0
|
||||
|
||||
@@ -324,6 +324,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.conda_dependencies import CondaDependencies \n",
|
||||
"import sys\n",
|
||||
"\n",
|
||||
"# azureml-defaults is required to host the model as a web service.\n",
|
||||
"azureml_pip_packages = [\n",
|
||||
@@ -331,6 +332,7 @@
|
||||
" 'azureml-interpret'\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"python_version = '{0}.{1}'.format(sys.version_info[0], sys.version_info[1])\n",
|
||||
"\n",
|
||||
"# Note: this is to pin the scikit-learn and pandas versions to be same as notebook.\n",
|
||||
"# In production scenario user would choose their dependencies\n",
|
||||
@@ -354,7 +356,10 @@
|
||||
"# the submitted job is run in. Note the remote environment(s) needs to be similar to the local\n",
|
||||
"# environment, otherwise if a model is trained or deployed in a different environment this can\n",
|
||||
"# cause errors. Please take extra care when specifying your dependencies in a production environment.\n",
|
||||
"myenv = CondaDependencies.create(pip_packages=['pyyaml', sklearn_dep, pandas_dep] + azureml_pip_packages)\n",
|
||||
"myenv = CondaDependencies.create(\n",
|
||||
" python_version=python_version,\n",
|
||||
" conda_packages=['pip==20.2.4'],\n",
|
||||
" pip_packages=['pyyaml', sklearn_dep, pandas_dep] + azureml_pip_packages)\n",
|
||||
"\n",
|
||||
"with open(\"myenv.yml\",\"w\") as f:\n",
|
||||
" f.write(myenv.serialize_to_string())\n",
|
||||
@@ -387,7 +392,7 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"aciconfig = AciWebservice.deploy_configuration(cpu_cores=1, \n",
|
||||
" memory_gb=1, \n",
|
||||
" memory_gb=2, \n",
|
||||
" tags={\"data\": \"IBM_Attrition\", \n",
|
||||
" \"method\" : \"local_explanation\"}, \n",
|
||||
" description='Get local explanations for IBM Employee Attrition data')\n",
|
||||
@@ -411,8 +416,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import requests\n",
|
||||
"import json\n",
|
||||
"from raiutils.webservice import post_with_retries\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Create data to test service with\n",
|
||||
@@ -424,7 +429,7 @@
|
||||
"\n",
|
||||
"# Send request to service\n",
|
||||
"print(\"POST to url\", service.scoring_uri)\n",
|
||||
"resp = requests.post(service.scoring_uri, sample_data, headers=headers)\n",
|
||||
"resp = post_with_retries(service.scoring_uri, sample_data, headers)\n",
|
||||
"\n",
|
||||
"# Can covert back to Python objects from json string if desired\n",
|
||||
"print(\"prediction:\", resp.text)\n",
|
||||
|
||||
@@ -10,4 +10,7 @@ dependencies:
|
||||
- ipython
|
||||
- matplotlib
|
||||
- ipywidgets
|
||||
- raiwidgets~=0.7.0
|
||||
- raiwidgets~=0.18.1
|
||||
- packaging>=20.9
|
||||
- itsdangerous==2.0.1
|
||||
- markupsafe<2.1.0
|
||||
|
||||
@@ -251,6 +251,7 @@
|
||||
"from azureml.core.runconfig import RunConfiguration\n",
|
||||
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||
"from azureml.core.runconfig import DEFAULT_CPU_IMAGE\n",
|
||||
"import sys\n",
|
||||
"\n",
|
||||
"# Create a new runconfig object\n",
|
||||
"run_config = RunConfiguration()\n",
|
||||
@@ -268,7 +269,7 @@
|
||||
" 'azureml-defaults', 'azureml-telemetry', 'azureml-interpret'\n",
|
||||
"]\n",
|
||||
" \n",
|
||||
"\n",
|
||||
"python_version = '{0}.{1}'.format(sys.version_info[0], sys.version_info[1])\n",
|
||||
"\n",
|
||||
"# Note: this is to pin the scikit-learn version to be same as notebook.\n",
|
||||
"# In production scenario user would choose their dependencies\n",
|
||||
@@ -293,7 +294,10 @@
|
||||
"# environment, otherwise if a model is trained or deployed in a different environment this can\n",
|
||||
"# cause errors. Please take extra care when specifying your dependencies in a production environment.\n",
|
||||
"azureml_pip_packages.extend(['pyyaml', sklearn_dep, pandas_dep])\n",
|
||||
"run_config.environment.python.conda_dependencies = CondaDependencies.create(pip_packages=azureml_pip_packages)\n",
|
||||
"run_config.environment.python.conda_dependencies = CondaDependencies.create(\n",
|
||||
" python_version=python_version,\n",
|
||||
" pip_packages=azureml_pip_packages)\n",
|
||||
"\n",
|
||||
"# Now submit a run on AmlCompute\n",
|
||||
"from azureml.core.script_run_config import ScriptRunConfig\n",
|
||||
"\n",
|
||||
@@ -453,7 +457,7 @@
|
||||
"# environment, otherwise if a model is trained or deployed in a different environment this can\n",
|
||||
"# cause errors. Please take extra care when specifying your dependencies in a production environment.\n",
|
||||
"azureml_pip_packages.extend(['pyyaml', sklearn_dep, pandas_dep])\n",
|
||||
"myenv = CondaDependencies.create(pip_packages=azureml_pip_packages)\n",
|
||||
"myenv = CondaDependencies.create(python_version=python_version, pip_packages=azureml_pip_packages)\n",
|
||||
"\n",
|
||||
"with open(\"myenv.yml\",\"w\") as f:\n",
|
||||
" f.write(myenv.serialize_to_string())\n",
|
||||
@@ -509,7 +513,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import requests\n",
|
||||
"from raiutils.webservice import post_with_retries\n",
|
||||
"\n",
|
||||
"# Create data to test service with\n",
|
||||
"examples = x_test[:4]\n",
|
||||
@@ -519,7 +523,7 @@
|
||||
"\n",
|
||||
"# Send request to service\n",
|
||||
"print(\"POST to url\", service.scoring_uri)\n",
|
||||
"resp = requests.post(service.scoring_uri, input_data, headers=headers)\n",
|
||||
"resp = post_with_retries(service.scoring_uri, input_data, headers)\n",
|
||||
"\n",
|
||||
"# Can covert back to Python objects from json string if desired\n",
|
||||
"print(\"prediction:\", resp.text)"
|
||||
|
||||
@@ -12,4 +12,6 @@ dependencies:
|
||||
- azureml-dataset-runtime
|
||||
- azureml-core
|
||||
- ipywidgets
|
||||
- raiwidgets~=0.7.0
|
||||
- raiwidgets~=0.18.1
|
||||
- itsdangerous==2.0.1
|
||||
- markupsafe<2.1.0
|
||||
|
||||
Binary file not shown.
@@ -63,6 +63,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import requests\n",
|
||||
"import tempfile\n",
|
||||
"import azureml.core\n",
|
||||
"from azureml.core import Workspace, Experiment, Datastore\n",
|
||||
"from azureml.widgets import RunDetails\n",
|
||||
@@ -158,9 +160,14 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# download data file from remote\n",
|
||||
"response = requests.get(\"https://dprepdata.blob.core.windows.net/demo/Titanic.csv\")\n",
|
||||
"titanic_file = os.path.join(tempfile.mkdtemp(), \"Titanic.csv\")\n",
|
||||
"with open(titanic_file, \"w\") as f:\n",
|
||||
" f.write(response.content.decode(\"utf-8\"))\n",
|
||||
"# get_default_datastore() gets the default Azure Blob Store associated with your workspace.\n",
|
||||
"# Here we are reusing the def_blob_store object we obtained earlier\n",
|
||||
"def_blob_store.upload_files([\"./20news.pkl\"], target_path=\"20newsgroups\", overwrite=True)\n",
|
||||
"def_blob_store.upload_files([titanic_file], target_path=\"titanic\", overwrite=True)\n",
|
||||
"print(\"Upload call completed\")"
|
||||
]
|
||||
},
|
||||
@@ -286,7 +293,7 @@
|
||||
"- [**AzureBatchStep**](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.azurebatch_step.azurebatchstep?view=azure-ml-py): Creates a step for submitting jobs to Azure Batch\n",
|
||||
"- [**EstimatorStep**](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.estimator_step.estimatorstep?view=azure-ml-py): Adds a step to run Estimator in a Pipeline.\n",
|
||||
"- [**MpiStep**](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.mpi_step.mpistep?view=azure-ml-py): Adds a step to run a MPI job in a Pipeline.\n",
|
||||
"- [**AutoMLStep**](https://docs.microsoft.com/en-us/python/api/azureml-train-automl/azureml.train.automl.automlstep?view=azure-ml-py): Creates a AutoML step in a Pipeline.\n",
|
||||
"- [**AutoMLStep**](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.automlstep?view=azure-ml-py): Creates a AutoML step in a Pipeline.\n",
|
||||
"\n",
|
||||
"The following code will create a PythonScriptStep to be executed in the Azure Machine Learning Compute we created above using train.py, one of the files already made available in the `source_directory`.\n",
|
||||
"\n",
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user