Compare commits

...

18 Commits

Author SHA1 Message Date
amlrelsa-ms
71e061b193 update samples from Release-114 as a part of 1.38.0 SDK stable release 2022-02-16 16:32:55 +00:00
Harneet Virk
9094da4085 Merge pull request #1684 from Azure/release_update/Release-122
update samples from Release-122 as a part of  SDK release
2022-02-14 11:38:49 -08:00
amlrelsa-ms
ebf9d2855c update samples from Release-122 as a part of SDK release 2022-02-14 19:24:27 +00:00
v-pbavanari
1bbd78eb33 update samples from Release-121 as a part of SDK release (#1678)
Co-authored-by: amlrelsa-ms <amlrelsa@microsoft.com>
2022-02-02 12:28:49 -05:00
v-pbavanari
77f5a69e04 update samples from Release-120 as a part of SDK release (#1676)
Co-authored-by: amlrelsa-ms <amlrelsa@microsoft.com>
2022-01-28 12:51:49 -05:00
raja7592
ce82af2ab0 update samples from Release-118 as a part of SDK release (#1673)
Co-authored-by: amlrelsa-ms <amlrelsa@microsoft.com>
2022-01-24 20:07:35 -05:00
Harneet Virk
2a2d2efa17 Merge pull request #1658 from Azure/release_update/Release-117
Update samples from Release sdk 1.37.0 as a part of  SDK release
2021-12-13 10:36:08 -08:00
amlrelsa-ms
dd494e9cac update samples from Release-117 as a part of SDK release 2021-12-13 16:57:22 +00:00
Harneet Virk
352adb7487 Merge pull request #1629 from Azure/release_update/Release-116
Update samples from Release as a part of SDK release 1.36.0
2021-11-08 09:48:25 -08:00
amlrelsa-ms
aebe34b4e8 update samples from Release-116 as a part of SDK release 2021-11-08 16:09:41 +00:00
Harneet Virk
c7e1241e20 Merge pull request #1612 from Azure/release_update/Release-115
Update samples from Release-115 as a part of  SDK release
2021-10-11 12:01:59 -07:00
amlrelsa-ms
6529298c24 update samples from Release-115 as a part of SDK release 2021-10-11 16:09:57 +00:00
Harneet Virk
e2dddfde85 Merge pull request #1601 from Azure/release_update/Release-114
update samples from Release-114 as a part of  SDK release
2021-09-29 14:21:59 -07:00
amlrelsa-ms
36d96f96ec update samples from Release-114 as a part of SDK release 2021-09-29 20:16:51 +00:00
Harneet Virk
7ebcfea5a3 Merge pull request #1600 from Azure/release_update/Release-113
update samples from Release-113 as a part of  SDK release
2021-09-28 12:53:57 -07:00
amlrelsa-ms
b20bfed33a update samples from Release-113 as a part of SDK release 2021-09-28 19:44:58 +00:00
Harneet Virk
a66a92e338 Merge pull request #1597 from Azure/release_update/Release-112
update samples from Release-112 as a part of  SDK release
2021-09-24 14:44:53 -07:00
amlrelsa-ms
c56c2c3525 update samples from Release-112 as a part of SDK release 2021-09-24 21:40:44 +00:00
171 changed files with 57844 additions and 9780 deletions

View File

@@ -103,7 +103,7 @@
"source": [ "source": [
"import azureml.core\n", "import azureml.core\n",
"\n", "\n",
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n", "print(\"This notebook was created using version 1.38.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
] ]
}, },

View File

@@ -6,4 +6,4 @@ dependencies:
- fairlearn>=0.6.2 - fairlearn>=0.6.2
- joblib - joblib
- liac-arff - liac-arff
- raiwidgets~=0.7.0 - raiwidgets~=0.16.0

View File

@@ -6,4 +6,4 @@ dependencies:
- fairlearn>=0.6.2 - fairlearn>=0.6.2
- joblib - joblib
- liac-arff - liac-arff
- raiwidgets~=0.7.0 - raiwidgets~=0.16.0

View File

@@ -4,7 +4,6 @@ dependencies:
# Currently Azure ML only supports 3.5.2 and later. # Currently Azure ML only supports 3.5.2 and later.
- pip==21.1.2 - pip==21.1.2
- python>=3.5.2,<3.8 - python>=3.5.2,<3.8
- nb_conda
- boto3==1.15.18 - boto3==1.15.18
- matplotlib==2.1.0 - matplotlib==2.1.0
- numpy==1.18.5 - numpy==1.18.5
@@ -22,9 +21,9 @@ dependencies:
- pip: - pip:
# Required packages for AzureML execution, history, and data preparation. # Required packages for AzureML execution, history, and data preparation.
- azureml-widgets~=1.34.0 - azureml-widgets~=1.38.0
- pytorch-transformers==1.0.0 - pytorch-transformers==1.0.0
- spacy==2.1.8 - spacy==2.1.8
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz - https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
- -r https://automlresources-prod.azureedge.net/validated-requirements/1.34.0/validated_win32_requirements.txt [--no-deps] - -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.38.0/validated_win32_requirements.txt [--no-deps]
- arch==4.14 - arch==4.14

View File

@@ -4,7 +4,6 @@ dependencies:
# Currently Azure ML only supports 3.5.2 and later. # Currently Azure ML only supports 3.5.2 and later.
- pip==21.1.2 - pip==21.1.2
- python>=3.5.2,<3.8 - python>=3.5.2,<3.8
- nb_conda
- boto3==1.15.18 - boto3==1.15.18
- matplotlib==2.1.0 - matplotlib==2.1.0
- numpy==1.18.5 - numpy==1.18.5
@@ -22,9 +21,9 @@ dependencies:
- pip: - pip:
# Required packages for AzureML execution, history, and data preparation. # Required packages for AzureML execution, history, and data preparation.
- azureml-widgets~=1.34.0 - azureml-widgets~=1.38.0
- pytorch-transformers==1.0.0 - pytorch-transformers==1.0.0
- spacy==2.1.8 - spacy==2.1.8
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz - https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
- -r https://automlresources-prod.azureedge.net/validated-requirements/1.34.0/validated_linux_requirements.txt [--no-deps] - -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.38.0/validated_linux_requirements.txt [--no-deps]
- arch==4.14 - arch==4.14

View File

@@ -5,7 +5,6 @@ dependencies:
- pip==21.1.2 - pip==21.1.2
- nomkl - nomkl
- python>=3.5.2,<3.8 - python>=3.5.2,<3.8
- nb_conda
- boto3==1.15.18 - boto3==1.15.18
- matplotlib==2.1.0 - matplotlib==2.1.0
- numpy==1.18.5 - numpy==1.18.5
@@ -23,9 +22,9 @@ dependencies:
- pip: - pip:
# Required packages for AzureML execution, history, and data preparation. # Required packages for AzureML execution, history, and data preparation.
- azureml-widgets~=1.34.0 - azureml-widgets~=1.38.0
- pytorch-transformers==1.0.0 - pytorch-transformers==1.0.0
- spacy==2.1.8 - spacy==2.1.8
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz - https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
- -r https://automlresources-prod.azureedge.net/validated-requirements/1.34.0/validated_darwin_requirements.txt [--no-deps] - -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.38.0/validated_darwin_requirements.txt [--no-deps]
- arch==4.14 - arch==4.14

View File

@@ -3,7 +3,7 @@ import platform
try: try:
import conda import conda
except: except Exception:
print('Failed to import conda.') print('Failed to import conda.')
print('This setup is usually run from the base conda environment.') print('This setup is usually run from the base conda environment.')
print('You can activate the base environment using the command "conda activate base"') print('You can activate the base environment using the command "conda activate base"')

View File

@@ -1,21 +1,5 @@
{ {
"cells": [ "cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing.png)"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@@ -77,6 +61,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"import json\n",
"import logging\n", "import logging\n",
"\n", "\n",
"from matplotlib import pyplot as plt\n", "from matplotlib import pyplot as plt\n",
@@ -98,16 +83,6 @@
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK." "This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
] ]
}, },
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@@ -143,18 +118,18 @@
"ws = Workspace.from_config()\n", "ws = Workspace.from_config()\n",
"\n", "\n",
"# choose a name for experiment\n", "# choose a name for experiment\n",
"experiment_name = 'automl-classification-bmarketing-all'\n", "experiment_name = \"automl-classification-bmarketing-all\"\n",
"\n", "\n",
"experiment=Experiment(ws, experiment_name)\n", "experiment = Experiment(ws, experiment_name)\n",
"\n", "\n",
"output = {}\n", "output = {}\n",
"output['Subscription ID'] = ws.subscription_id\n", "output[\"Subscription ID\"] = ws.subscription_id\n",
"output['Workspace'] = ws.name\n", "output[\"Workspace\"] = ws.name\n",
"output['Resource Group'] = ws.resource_group\n", "output[\"Resource Group\"] = ws.resource_group\n",
"output['Location'] = ws.location\n", "output[\"Location\"] = ws.location\n",
"output['Experiment Name'] = experiment.name\n", "output[\"Experiment Name\"] = experiment.name\n",
"pd.set_option('display.max_colwidth', -1)\n", "pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data = output, index = [''])\n", "outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T" "outputDf.T"
] ]
}, },
@@ -175,7 +150,9 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {}, "metadata": {
"tags": []
},
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.core.compute import ComputeTarget, AmlCompute\n", "from azureml.core.compute import ComputeTarget, AmlCompute\n",
@@ -187,12 +164,12 @@
"# Verify that cluster does not exist already\n", "# Verify that cluster does not exist already\n",
"try:\n", "try:\n",
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n", " compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
" print('Found existing cluster, use it.')\n", " print(\"Found existing cluster, use it.\")\n",
"except ComputeTargetException:\n", "except ComputeTargetException:\n",
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n", " compute_config = AmlCompute.provisioning_configuration(\n",
" max_nodes=6)\n", " vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
" )\n",
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n", " compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
"\n",
"compute_target.wait_for_completion(show_output=True)" "compute_target.wait_for_completion(show_output=True)"
] ]
}, },
@@ -225,7 +202,9 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"data = pd.read_csv(\"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/bankmarketing_train.csv\")\n", "data = pd.read_csv(\n",
" \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/bankmarketing_train.csv\"\n",
")\n",
"data.head()" "data.head()"
] ]
}, },
@@ -240,7 +219,12 @@
"\n", "\n",
"missing_rate = 0.75\n", "missing_rate = 0.75\n",
"n_missing_samples = int(np.floor(data.shape[0] * missing_rate))\n", "n_missing_samples = int(np.floor(data.shape[0] * missing_rate))\n",
"missing_samples = np.hstack((np.zeros(data.shape[0] - n_missing_samples, dtype=np.bool), np.ones(n_missing_samples, dtype=np.bool)))\n", "missing_samples = np.hstack(\n",
" (\n",
" np.zeros(data.shape[0] - n_missing_samples, dtype=np.bool),\n",
" np.ones(n_missing_samples, dtype=np.bool),\n",
" )\n",
")\n",
"rng = np.random.RandomState(0)\n", "rng = np.random.RandomState(0)\n",
"rng.shuffle(missing_samples)\n", "rng.shuffle(missing_samples)\n",
"missing_features = rng.randint(0, data.shape[1], n_missing_samples)\n", "missing_features = rng.randint(0, data.shape[1], n_missing_samples)\n",
@@ -253,19 +237,21 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"if not os.path.isdir('data'):\n", "if not os.path.isdir(\"data\"):\n",
" os.mkdir('data')\n", " os.mkdir(\"data\")\n",
" \n",
"# Save the train data to a csv to be uploaded to the datastore\n", "# Save the train data to a csv to be uploaded to the datastore\n",
"pd.DataFrame(data).to_csv(\"data/train_data.csv\", index=False)\n", "pd.DataFrame(data).to_csv(\"data/train_data.csv\", index=False)\n",
"\n", "\n",
"ds = ws.get_default_datastore()\n", "ds = ws.get_default_datastore()\n",
"ds.upload(src_dir='./data', target_path='bankmarketing', overwrite=True, show_progress=True)\n", "ds.upload(\n",
" src_dir=\"./data\", target_path=\"bankmarketing\", overwrite=True, show_progress=True\n",
")\n",
"\n", "\n",
" \n",
"\n", "\n",
"# Upload the training data as a tabular dataset for access during training on remote compute\n", "# Upload the training data as a tabular dataset for access during training on remote compute\n",
"train_data = Dataset.Tabular.from_delimited_files(path=ds.path('bankmarketing/train_data.csv'))\n", "train_data = Dataset.Tabular.from_delimited_files(\n",
" path=ds.path(\"bankmarketing/train_data.csv\")\n",
")\n",
"label = \"y\"" "label = \"y\""
] ]
}, },
@@ -336,33 +322,36 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"automl_settings = {\n", "automl_settings = {\n",
" \"experiment_timeout_hours\" : 0.3,\n", " \"experiment_timeout_hours\": 0.3,\n",
" \"enable_early_stopping\" : True,\n", " \"enable_early_stopping\": True,\n",
" \"iteration_timeout_minutes\": 5,\n", " \"iteration_timeout_minutes\": 5,\n",
" \"max_concurrent_iterations\": 4,\n", " \"max_concurrent_iterations\": 4,\n",
" \"max_cores_per_iteration\": -1,\n", " \"max_cores_per_iteration\": -1,\n",
" #\"n_cross_validations\": 2,\n", " # \"n_cross_validations\": 2,\n",
" \"primary_metric\": 'AUC_weighted',\n", " \"primary_metric\": \"AUC_weighted\",\n",
" \"featurization\": 'auto',\n", " \"featurization\": \"auto\",\n",
" \"verbosity\": logging.INFO,\n", " \"verbosity\": logging.INFO,\n",
"}\n", "}\n",
"\n", "\n",
"automl_config = AutoMLConfig(task = 'classification',\n", "automl_config = AutoMLConfig(\n",
" debug_log = 'automl_errors.log',\n", " task=\"classification\",\n",
" debug_log=\"automl_errors.log\",\n",
" compute_target=compute_target,\n", " compute_target=compute_target,\n",
" experiment_exit_score = 0.9984,\n", " experiment_exit_score=0.9984,\n",
" blocked_models = ['KNN','LinearSVM'],\n", " blocked_models=[\"KNN\", \"LinearSVM\"],\n",
" enable_onnx_compatible_models=True,\n", " enable_onnx_compatible_models=True,\n",
" training_data = train_data,\n", " training_data=train_data,\n",
" label_column_name = label,\n", " label_column_name=label,\n",
" validation_data = validation_dataset,\n", " validation_data=validation_dataset,\n",
" **automl_settings\n", " **automl_settings,\n",
" )" ")"
] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {
"tags": []
},
"source": [ "source": [
"Call the `submit` method on the experiment object and pass the run configuration. Execution of local runs is synchronous. Depending on the data and the number of iterations this can run for a while. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous." "Call the `submit` method on the experiment object and pass the run configuration. Execution of local runs is synchronous. Depending on the data and the number of iterations this can run for a while. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous."
] ]
@@ -373,12 +362,14 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"remote_run = experiment.submit(automl_config, show_output = False)" "remote_run = experiment.submit(automl_config, show_output=False)"
] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {
"tags": []
},
"source": [ "source": [
"Run the following cell to access previous runs. Uncomment the cell below and update the run_id." "Run the following cell to access previous runs. Uncomment the cell below and update the run_id."
] ]
@@ -389,9 +380,9 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"#from azureml.train.automl.run import AutoMLRun\n", "# from azureml.train.automl.run import AutoMLRun\n",
"#remote_run = AutoMLRun(experiment=experiment, run_id='<run_ID_goes_here')\n", "# remote_run = AutoMLRun(experiment=experiment, run_id='<run_ID_goes_here')\n",
"#remote_run" "# remote_run"
] ]
}, },
{ {
@@ -410,7 +401,8 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"best_run_customized, fitted_model_customized = remote_run.get_output()" "# Retrieve the best Run object\n",
"best_run = remote_run.get_best_child()"
] ]
}, },
{ {
@@ -419,7 +411,7 @@
"source": [ "source": [
"## Transparency\n", "## Transparency\n",
"\n", "\n",
"View updated featurization summary" "View featurization summary for the best model - to study how different features were transformed. This is stored as a JSON file in the outputs directory for the run."
] ]
}, },
{ {
@@ -428,36 +420,16 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"custom_featurizer = fitted_model_customized.named_steps['datatransformer']\n", "# Download the featurization summary JSON file locally\n",
"df = custom_featurizer.get_featurization_summary()\n", "best_run.download_file(\n",
"pd.DataFrame(data=df)" " \"outputs/featurization_summary.json\", \"featurization_summary.json\"\n",
] ")\n",
}, "\n",
{ "# Render the JSON as a pandas DataFrame\n",
"cell_type": "markdown", "with open(\"featurization_summary.json\", \"r\") as f:\n",
"metadata": {}, " records = json.load(f)\n",
"source": [ "\n",
"Set `is_user_friendly=False` to get a more detailed summary for the transforms being applied." "pd.DataFrame.from_records(records)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df = custom_featurizer.get_featurization_summary(is_user_friendly=False)\n",
"pd.DataFrame(data=df)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df = custom_featurizer.get_stats_feature_type_summary()\n",
"pd.DataFrame(data=df)"
] ]
}, },
{ {
@@ -474,7 +446,8 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.widgets import RunDetails\n", "from azureml.widgets import RunDetails\n",
"RunDetails(remote_run).show() " "\n",
"RunDetails(remote_run).show()"
] ]
}, },
{ {
@@ -493,13 +466,16 @@
"source": [ "source": [
"# Wait for the best model explanation run to complete\n", "# Wait for the best model explanation run to complete\n",
"from azureml.core.run import Run\n", "from azureml.core.run import Run\n",
"\n",
"model_explainability_run_id = remote_run.id + \"_\" + \"ModelExplain\"\n", "model_explainability_run_id = remote_run.id + \"_\" + \"ModelExplain\"\n",
"print(model_explainability_run_id)\n", "print(model_explainability_run_id)\n",
"model_explainability_run = Run(experiment=experiment, run_id=model_explainability_run_id)\n", "model_explainability_run = Run(\n",
" experiment=experiment, run_id=model_explainability_run_id\n",
")\n",
"model_explainability_run.wait_for_completion()\n", "model_explainability_run.wait_for_completion()\n",
"\n", "\n",
"# Get the best run object\n", "# Get the best run object\n",
"best_run, fitted_model = remote_run.get_output()" "best_run = remote_run.get_best_child()"
] ]
}, },
{ {
@@ -576,6 +552,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.automl.runtime.onnx_convert import OnnxConverter\n", "from azureml.automl.runtime.onnx_convert import OnnxConverter\n",
"\n",
"onnx_fl_path = \"./best_model.onnx\"\n", "onnx_fl_path = \"./best_model.onnx\"\n",
"OnnxConverter.save_onnx_model(onnx_mdl, onnx_fl_path)" "OnnxConverter.save_onnx_model(onnx_mdl, onnx_fl_path)"
] ]
@@ -600,13 +577,17 @@
"\n", "\n",
"from azureml.automl.runtime.onnx_convert import OnnxInferenceHelper\n", "from azureml.automl.runtime.onnx_convert import OnnxInferenceHelper\n",
"\n", "\n",
"\n",
"def get_onnx_res(run):\n", "def get_onnx_res(run):\n",
" res_path = 'onnx_resource.json'\n", " res_path = \"onnx_resource.json\"\n",
" run.download_file(name=constants.MODEL_RESOURCE_PATH_ONNX, output_file_path=res_path)\n", " run.download_file(\n",
" name=constants.MODEL_RESOURCE_PATH_ONNX, output_file_path=res_path\n",
" )\n",
" with open(res_path) as f:\n", " with open(res_path) as f:\n",
" result = json.load(f)\n", " result = json.load(f)\n",
" return result\n", " return result\n",
"\n", "\n",
"\n",
"if sys.version_info < OnnxConvertConstants.OnnxIncompatiblePythonVersion:\n", "if sys.version_info < OnnxConvertConstants.OnnxIncompatiblePythonVersion:\n",
" test_df = test_dataset.to_pandas_dataframe()\n", " test_df = test_dataset.to_pandas_dataframe()\n",
" mdl_bytes = onnx_mdl.SerializeToString()\n", " mdl_bytes = onnx_mdl.SerializeToString()\n",
@@ -618,7 +599,7 @@
" print(pred_onnx)\n", " print(pred_onnx)\n",
" print(pred_prob_onnx)\n", " print(pred_prob_onnx)\n",
"else:\n", "else:\n",
" print('Please use Python version 3.6 or 3.7 to run the inference helper.')" " print(\"Please use Python version 3.6 or 3.7 to run the inference helper.\")"
] ]
}, },
{ {
@@ -629,7 +610,16 @@
"\n", "\n",
"### Retrieve the Best Model\n", "### Retrieve the Best Model\n",
"\n", "\n",
"Below we select the best pipeline from our iterations. The `get_output` method returns the best run and the fitted model. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*." "Below we select the best pipeline from our iterations. The `get_best_child` method returns the Run object for the best model based on the default primary metric. There are additional flags that can be passed to the method if we want to retrieve the best Run based on any of the other supported metrics, or if we are just interested in the best run among the ONNX compatible runs. As always, you can execute `??remote_run.get_best_child` in a new cell to view the source or docs for the function."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"??remote_run.get_best_child"
] ]
}, },
{ {
@@ -649,7 +639,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"best_run, fitted_model = remote_run.get_output()" "best_run = remote_run.get_best_child()"
] ]
}, },
{ {
@@ -658,11 +648,11 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"model_name = best_run.properties['model_name']\n", "model_name = best_run.properties[\"model_name\"]\n",
"\n", "\n",
"script_file_name = 'inference/score.py'\n", "script_file_name = \"inference/score.py\"\n",
"\n", "\n",
"best_run.download_file('outputs/scoring_file_v_1_0_0.py', 'inference/score.py')" "best_run.download_file(\"outputs/scoring_file_v_1_0_0.py\", \"inference/score.py\")"
] ]
}, },
{ {
@@ -679,11 +669,15 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"description = 'AutoML Model trained on bank marketing data to predict if a client will subscribe to a term deposit'\n", "description = \"AutoML Model trained on bank marketing data to predict if a client will subscribe to a term deposit\"\n",
"tags = None\n", "tags = None\n",
"model = remote_run.register_model(model_name = model_name, description = description, tags = tags)\n", "model = remote_run.register_model(\n",
" model_name=model_name, description=description, tags=tags\n",
")\n",
"\n", "\n",
"print(remote_run.model_id) # This will be written to the script file later in the notebook." "print(\n",
" remote_run.model_id\n",
") # This will be written to the script file later in the notebook."
] ]
}, },
{ {
@@ -701,16 +695,20 @@
"source": [ "source": [
"from azureml.core.model import InferenceConfig\n", "from azureml.core.model import InferenceConfig\n",
"from azureml.core.webservice import AciWebservice\n", "from azureml.core.webservice import AciWebservice\n",
"from azureml.core.webservice import Webservice\n",
"from azureml.core.model import Model\n", "from azureml.core.model import Model\n",
"from azureml.core.environment import Environment\n",
"\n", "\n",
"inference_config = InferenceConfig(entry_script=script_file_name)\n", "inference_config = InferenceConfig(entry_script=script_file_name)\n",
"\n", "\n",
"aciconfig = AciWebservice.deploy_configuration(cpu_cores = 2, \n", "aciconfig = AciWebservice.deploy_configuration(\n",
" memory_gb = 2, \n", " cpu_cores=2,\n",
" tags = {'area': \"bmData\", 'type': \"automl_classification\"}, \n", " memory_gb=2,\n",
" description = 'sample service for Automl Classification')\n", " tags={\"area\": \"bmData\", \"type\": \"automl_classification\"},\n",
" description=\"sample service for Automl Classification\",\n",
")\n",
"\n", "\n",
"aci_service_name = 'automl-sample-bankmarketing-all'\n", "aci_service_name = model_name.lower()\n",
"print(aci_service_name)\n", "print(aci_service_name)\n",
"aci_service = Model.deploy(ws, aci_service_name, [model], inference_config, aciconfig)\n", "aci_service = Model.deploy(ws, aci_service_name, [model], inference_config, aciconfig)\n",
"aci_service.wait_for_deployment(True)\n", "aci_service.wait_for_deployment(True)\n",
@@ -732,7 +730,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"#aci_service.get_logs()" "# aci_service.get_logs()"
] ]
}, },
{ {
@@ -762,8 +760,8 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"X_test = test_dataset.drop_columns(columns=['y'])\n", "X_test = test_dataset.drop_columns(columns=[\"y\"])\n",
"y_test = test_dataset.keep_columns(columns=['y'], validate=True)\n", "y_test = test_dataset.keep_columns(columns=[\"y\"], validate=True)\n",
"test_dataset.take(5).to_pandas_dataframe()" "test_dataset.take(5).to_pandas_dataframe()"
] ]
}, },
@@ -785,13 +783,13 @@
"source": [ "source": [
"import requests\n", "import requests\n",
"\n", "\n",
"X_test_json = X_test.to_json(orient='records')\n", "X_test_json = X_test.to_json(orient=\"records\")\n",
"data = \"{\\\"data\\\": \" + X_test_json +\"}\"\n", "data = '{\"data\": ' + X_test_json + \"}\"\n",
"headers = {'Content-Type': 'application/json'}\n", "headers = {\"Content-Type\": \"application/json\"}\n",
"\n", "\n",
"resp = requests.post(aci_service.scoring_uri, data, headers=headers)\n", "resp = requests.post(aci_service.scoring_uri, data, headers=headers)\n",
"\n", "\n",
"y_pred = json.loads(json.loads(resp.text))['result']" "y_pred = json.loads(json.loads(resp.text))[\"result\"]"
] ]
}, },
{ {
@@ -801,7 +799,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"actual = array(y_test)\n", "actual = array(y_test)\n",
"actual = actual[:,0]\n", "actual = actual[:, 0]\n",
"print(len(y_pred), \" \", len(actual))" "print(len(y_pred), \" \", len(actual))"
] ]
}, },
@@ -824,20 +822,26 @@
"from sklearn.metrics import confusion_matrix\n", "from sklearn.metrics import confusion_matrix\n",
"import itertools\n", "import itertools\n",
"\n", "\n",
"cf =confusion_matrix(actual,y_pred)\n", "cf = confusion_matrix(actual, y_pred)\n",
"plt.imshow(cf,cmap=plt.cm.Blues,interpolation='nearest')\n", "plt.imshow(cf, cmap=plt.cm.Blues, interpolation=\"nearest\")\n",
"plt.colorbar()\n", "plt.colorbar()\n",
"plt.title('Confusion Matrix')\n", "plt.title(\"Confusion Matrix\")\n",
"plt.xlabel('Predicted')\n", "plt.xlabel(\"Predicted\")\n",
"plt.ylabel('Actual')\n", "plt.ylabel(\"Actual\")\n",
"class_labels = ['no','yes']\n", "class_labels = [\"no\", \"yes\"]\n",
"tick_marks = np.arange(len(class_labels))\n", "tick_marks = np.arange(len(class_labels))\n",
"plt.xticks(tick_marks,class_labels)\n", "plt.xticks(tick_marks, class_labels)\n",
"plt.yticks([-0.5,0,1,1.5],['','no','yes',''])\n", "plt.yticks([-0.5, 0, 1, 1.5], [\"\", \"no\", \"yes\", \"\"])\n",
"# plotting text value inside cells\n", "# plotting text value inside cells\n",
"thresh = cf.max() / 2.\n", "thresh = cf.max() / 2.0\n",
"for i,j in itertools.product(range(cf.shape[0]),range(cf.shape[1])):\n", "for i, j in itertools.product(range(cf.shape[0]), range(cf.shape[1])):\n",
" plt.text(j,i,format(cf[i,j],'d'),horizontalalignment='center',color='white' if cf[i,j] >thresh else 'black')\n", " plt.text(\n",
" j,\n",
" i,\n",
" format(cf[i, j], \"d\"),\n",
" horizontalalignment=\"center\",\n",
" color=\"white\" if cf[i, j] > thresh else \"black\",\n",
" )\n",
"plt.show()" "plt.show()"
] ]
}, },
@@ -902,9 +906,9 @@
"friendly_name": "Automated ML run with basic edition features.", "friendly_name": "Automated ML run with basic edition features.",
"index_order": 5, "index_order": 5,
"kernelspec": { "kernelspec": {
"display_name": "Python 3.6", "display_name": "Python 3.6 - AzureML",
"language": "python", "language": "python",
"name": "python36" "name": "python3-azureml"
}, },
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {

View File

@@ -1,21 +1,5 @@
{ {
"cells": [ "cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.png)"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@@ -87,16 +71,6 @@
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK." "This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
] ]
}, },
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
@@ -106,18 +80,18 @@
"ws = Workspace.from_config()\n", "ws = Workspace.from_config()\n",
"\n", "\n",
"# choose a name for experiment\n", "# choose a name for experiment\n",
"experiment_name = 'automl-classification-ccard-remote'\n", "experiment_name = \"automl-classification-ccard-remote\"\n",
"\n", "\n",
"experiment=Experiment(ws, experiment_name)\n", "experiment = Experiment(ws, experiment_name)\n",
"\n", "\n",
"output = {}\n", "output = {}\n",
"output['Subscription ID'] = ws.subscription_id\n", "output[\"Subscription ID\"] = ws.subscription_id\n",
"output['Workspace'] = ws.name\n", "output[\"Workspace\"] = ws.name\n",
"output['Resource Group'] = ws.resource_group\n", "output[\"Resource Group\"] = ws.resource_group\n",
"output['Location'] = ws.location\n", "output[\"Location\"] = ws.location\n",
"output['Experiment Name'] = experiment.name\n", "output[\"Experiment Name\"] = experiment.name\n",
"pd.set_option('display.max_colwidth', -1)\n", "pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data = output, index = [''])\n", "outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T" "outputDf.T"
] ]
}, },
@@ -150,12 +124,12 @@
"# Verify that cluster does not exist already\n", "# Verify that cluster does not exist already\n",
"try:\n", "try:\n",
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n", " compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
" print('Found existing cluster, use it.')\n", " print(\"Found existing cluster, use it.\")\n",
"except ComputeTargetException:\n", "except ComputeTargetException:\n",
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n", " compute_config = AmlCompute.provisioning_configuration(\n",
" max_nodes=6)\n", " vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
" )\n",
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n", " compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
"\n",
"compute_target.wait_for_completion(show_output=True)" "compute_target.wait_for_completion(show_output=True)"
] ]
}, },
@@ -184,7 +158,7 @@
"data = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/creditcard.csv\"\n", "data = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/creditcard.csv\"\n",
"dataset = Dataset.Tabular.from_delimited_files(data)\n", "dataset = Dataset.Tabular.from_delimited_files(data)\n",
"training_data, validation_data = dataset.random_split(percentage=0.8, seed=223)\n", "training_data, validation_data = dataset.random_split(percentage=0.8, seed=223)\n",
"label_column_name = 'Class'" "label_column_name = \"Class\""
] ]
}, },
{ {
@@ -215,20 +189,21 @@
"source": [ "source": [
"automl_settings = {\n", "automl_settings = {\n",
" \"n_cross_validations\": 3,\n", " \"n_cross_validations\": 3,\n",
" \"primary_metric\": 'AUC_weighted',\n", " \"primary_metric\": \"average_precision_score_weighted\",\n",
" \"enable_early_stopping\": True,\n", " \"enable_early_stopping\": True,\n",
" \"max_concurrent_iterations\": 2, # This is a limit for testing purpose, please increase it as per cluster size\n", " \"max_concurrent_iterations\": 2, # This is a limit for testing purpose, please increase it as per cluster size\n",
" \"experiment_timeout_hours\": 0.25, # This is a time limit for testing purposes, remove it for real use cases, this will drastically limit ablity to find the best model possible\n", " \"experiment_timeout_hours\": 0.25, # This is a time limit for testing purposes, remove it for real use cases, this will drastically limit ablity to find the best model possible\n",
" \"verbosity\": logging.INFO,\n", " \"verbosity\": logging.INFO,\n",
"}\n", "}\n",
"\n", "\n",
"automl_config = AutoMLConfig(task = 'classification',\n", "automl_config = AutoMLConfig(\n",
" debug_log = 'automl_errors.log',\n", " task=\"classification\",\n",
" compute_target = compute_target,\n", " debug_log=\"automl_errors.log\",\n",
" training_data = training_data,\n", " compute_target=compute_target,\n",
" label_column_name = label_column_name,\n", " training_data=training_data,\n",
" **automl_settings\n", " label_column_name=label_column_name,\n",
" )" " **automl_settings,\n",
")"
] ]
}, },
{ {
@@ -244,7 +219,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"remote_run = experiment.submit(automl_config, show_output = False)" "remote_run = experiment.submit(automl_config, show_output=False)"
] ]
}, },
{ {
@@ -254,8 +229,8 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"# If you need to retrieve a run that already started, use the following code\n", "# If you need to retrieve a run that already started, use the following code\n",
"#from azureml.train.automl.run import AutoMLRun\n", "# from azureml.train.automl.run import AutoMLRun\n",
"#remote_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')" "# remote_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')"
] ]
}, },
{ {
@@ -287,6 +262,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.widgets import RunDetails\n", "from azureml.widgets import RunDetails\n",
"\n",
"RunDetails(remote_run).show()" "RunDetails(remote_run).show()"
] ]
}, },
@@ -353,8 +329,12 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"# convert the test data to dataframe\n", "# convert the test data to dataframe\n",
"X_test_df = validation_data.drop_columns(columns=[label_column_name]).to_pandas_dataframe()\n", "X_test_df = validation_data.drop_columns(\n",
"y_test_df = validation_data.keep_columns(columns=[label_column_name], validate=True).to_pandas_dataframe()" " columns=[label_column_name]\n",
").to_pandas_dataframe()\n",
"y_test_df = validation_data.keep_columns(\n",
" columns=[label_column_name], validate=True\n",
").to_pandas_dataframe()"
] ]
}, },
{ {
@@ -388,20 +368,26 @@
"import numpy as np\n", "import numpy as np\n",
"import itertools\n", "import itertools\n",
"\n", "\n",
"cf =confusion_matrix(y_test_df.values,y_pred)\n", "cf = confusion_matrix(y_test_df.values, y_pred)\n",
"plt.imshow(cf,cmap=plt.cm.Blues,interpolation='nearest')\n", "plt.imshow(cf, cmap=plt.cm.Blues, interpolation=\"nearest\")\n",
"plt.colorbar()\n", "plt.colorbar()\n",
"plt.title('Confusion Matrix')\n", "plt.title(\"Confusion Matrix\")\n",
"plt.xlabel('Predicted')\n", "plt.xlabel(\"Predicted\")\n",
"plt.ylabel('Actual')\n", "plt.ylabel(\"Actual\")\n",
"class_labels = ['False','True']\n", "class_labels = [\"False\", \"True\"]\n",
"tick_marks = np.arange(len(class_labels))\n", "tick_marks = np.arange(len(class_labels))\n",
"plt.xticks(tick_marks,class_labels)\n", "plt.xticks(tick_marks, class_labels)\n",
"plt.yticks([-0.5,0,1,1.5],['','False','True',''])\n", "plt.yticks([-0.5, 0, 1, 1.5], [\"\", \"False\", \"True\", \"\"])\n",
"# plotting text value inside cells\n", "# plotting text value inside cells\n",
"thresh = cf.max() / 2.\n", "thresh = cf.max() / 2.0\n",
"for i,j in itertools.product(range(cf.shape[0]),range(cf.shape[1])):\n", "for i, j in itertools.product(range(cf.shape[0]), range(cf.shape[1])):\n",
" plt.text(j,i,format(cf[i,j],'d'),horizontalalignment='center',color='white' if cf[i,j] >thresh else 'black')\n", " plt.text(\n",
" j,\n",
" i,\n",
" format(cf[i, j], \"d\"),\n",
" horizontalalignment=\"center\",\n",
" color=\"white\" if cf[i, j] > thresh else \"black\",\n",
" )\n",
"plt.show()" "plt.show()"
] ]
}, },
@@ -418,7 +404,7 @@
"source": [ "source": [
"This Credit Card fraud Detection dataset is made available under the Open Database License: http://opendatacommons.org/licenses/odbl/1.0/. Any rights in individual contents of the database are licensed under the Database Contents License: http://opendatacommons.org/licenses/dbcl/1.0/ and is available at: https://www.kaggle.com/mlg-ulb/creditcardfraud\n", "This Credit Card fraud Detection dataset is made available under the Open Database License: http://opendatacommons.org/licenses/odbl/1.0/. Any rights in individual contents of the database are licensed under the Database Contents License: http://opendatacommons.org/licenses/dbcl/1.0/ and is available at: https://www.kaggle.com/mlg-ulb/creditcardfraud\n",
"\n", "\n",
"The dataset has been collected and analysed during a research collaboration of Worldline and the Machine Learning Group (http://mlg.ulb.ac.be) of ULB (Universit\u00c3\u00a9 Libre de Bruxelles) on big data mining and fraud detection.\n", "The dataset has been collected and analysed during a research collaboration of Worldline and the Machine Learning Group (http://mlg.ulb.ac.be) of ULB (Université Libre de Bruxelles) on big data mining and fraud detection.\n",
"More details on current and past projects on related topics are available on https://www.researchgate.net/project/Fraud-detection-5 and the page of the DefeatFraud project\n", "More details on current and past projects on related topics are available on https://www.researchgate.net/project/Fraud-detection-5 and the page of the DefeatFraud project\n",
"\n", "\n",
"Please cite the following works:\n", "Please cite the following works:\n",
@@ -431,13 +417,13 @@
"\n", "\n",
"Dal Pozzolo, Andrea Adaptive Machine learning for credit card fraud detection ULB MLG PhD thesis (supervised by G. Bontempi)\n", "Dal Pozzolo, Andrea Adaptive Machine learning for credit card fraud detection ULB MLG PhD thesis (supervised by G. Bontempi)\n",
"\n", "\n",
"Carcillo, Fabrizio; Dal Pozzolo, Andrea; Le Borgne, Yann-A\u00c3\u00abl; Caelen, Olivier; Mazzer, Yannis; Bontempi, Gianluca. Scarff: a scalable framework for streaming credit card fraud detection with Spark, Information fusion,41, 182-194,2018,Elsevier\n", "Carcillo, Fabrizio; Dal Pozzolo, Andrea; Le Borgne, Yann-Aël; Caelen, Olivier; Mazzer, Yannis; Bontempi, Gianluca. Scarff: a scalable framework for streaming credit card fraud detection with Spark, Information fusion,41, 182-194,2018,Elsevier\n",
"\n", "\n",
"Carcillo, Fabrizio; Le Borgne, Yann-A\u00c3\u00abl; Caelen, Olivier; Bontempi, Gianluca. Streaming active learning strategies for real-life credit card fraud detection: assessment and visualization, International Journal of Data Science and Analytics, 5,4,285-300,2018,Springer International Publishing\n", "Carcillo, Fabrizio; Le Borgne, Yann-Aël; Caelen, Olivier; Bontempi, Gianluca. Streaming active learning strategies for real-life credit card fraud detection: assessment and visualization, International Journal of Data Science and Analytics, 5,4,285-300,2018,Springer International Publishing\n",
"\n", "\n",
"Bertrand Lebichot, Yann-A\u00c3\u00abl Le Borgne, Liyun He, Frederic Obl\u00c3\u00a9, Gianluca Bontempi Deep-Learning Domain Adaptation Techniques for Credit Cards Fraud Detection, INNSBDDL 2019: Recent Advances in Big Data and Deep Learning, pp 78-88, 2019\n", "Bertrand Lebichot, Yann-Aël Le Borgne, Liyun He, Frederic Oblé, Gianluca Bontempi Deep-Learning Domain Adaptation Techniques for Credit Cards Fraud Detection, INNSBDDL 2019: Recent Advances in Big Data and Deep Learning, pp 78-88, 2019\n",
"\n", "\n",
"Fabrizio Carcillo, Yann-A\u00c3\u00abl Le Borgne, Olivier Caelen, Frederic Obl\u00c3\u00a9, Gianluca Bontempi Combining Unsupervised and Supervised Learning in Credit Card Fraud Detection Information Sciences, 2019" "Fabrizio Carcillo, Yann-Aël Le Borgne, Olivier Caelen, Frederic Oblé, Gianluca Bontempi Combining Unsupervised and Supervised Learning in Credit Card Fraud Detection Information Sciences, 2019"
] ]
} }
], ],
@@ -465,9 +451,9 @@
"friendly_name": "Classification of credit card fraudulent transactions using Automated ML", "friendly_name": "Classification of credit card fraudulent transactions using Automated ML",
"index_order": 5, "index_order": 5,
"kernelspec": { "kernelspec": {
"display_name": "Python 3.6", "display_name": "Python 3.6 - AzureML",
"language": "python", "language": "python",
"name": "python36" "name": "python3-azureml"
}, },
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {

View File

@@ -1,21 +1,5 @@
{ {
"cells": [ "cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/classification-text-dnn/auto-ml-classification-text-dnn.png)"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@@ -63,6 +47,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"import json\n",
"import logging\n", "import logging\n",
"import os\n", "import os\n",
"import shutil\n", "import shutil\n",
@@ -77,7 +62,7 @@
"from azureml.core.compute import ComputeTarget\n", "from azureml.core.compute import ComputeTarget\n",
"from azureml.core.run import Run\n", "from azureml.core.run import Run\n",
"from azureml.widgets import RunDetails\n", "from azureml.widgets import RunDetails\n",
"from azureml.core.model import Model \n", "from azureml.core.model import Model\n",
"from helper import run_inference, get_result_df\n", "from helper import run_inference, get_result_df\n",
"from azureml.train.automl import AutoMLConfig\n", "from azureml.train.automl import AutoMLConfig\n",
"from sklearn.datasets import fetch_20newsgroups" "from sklearn.datasets import fetch_20newsgroups"
@@ -90,16 +75,6 @@
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK." "This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
] ]
}, },
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@@ -116,18 +91,18 @@
"ws = Workspace.from_config()\n", "ws = Workspace.from_config()\n",
"\n", "\n",
"# Choose an experiment name.\n", "# Choose an experiment name.\n",
"experiment_name = 'automl-classification-text-dnn'\n", "experiment_name = \"automl-classification-text-dnn\"\n",
"\n", "\n",
"experiment = Experiment(ws, experiment_name)\n", "experiment = Experiment(ws, experiment_name)\n",
"\n", "\n",
"output = {}\n", "output = {}\n",
"output['Subscription ID'] = ws.subscription_id\n", "output[\"Subscription ID\"] = ws.subscription_id\n",
"output['Workspace Name'] = ws.name\n", "output[\"Workspace Name\"] = ws.name\n",
"output['Resource Group'] = ws.resource_group\n", "output[\"Resource Group\"] = ws.resource_group\n",
"output['Location'] = ws.location\n", "output[\"Location\"] = ws.location\n",
"output['Experiment Name'] = experiment.name\n", "output[\"Experiment Name\"] = experiment.name\n",
"pd.set_option('display.max_colwidth', -1)\n", "pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data = output, index = [''])\n", "outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T" "outputDf.T"
] ]
}, },
@@ -160,13 +135,15 @@
"# Verify that cluster does not exist already\n", "# Verify that cluster does not exist already\n",
"try:\n", "try:\n",
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n", " compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
" print('Found existing cluster, use it.')\n", " print(\"Found existing cluster, use it.\")\n",
"except ComputeTargetException:\n", "except ComputeTargetException:\n",
" compute_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_NC6\", # CPU for BiLSTM, such as \"STANDARD_DS12_V2\" \n", " compute_config = AmlCompute.provisioning_configuration(\n",
" # To use BERT (this is recommended for best performance), select a GPU such as \"STANDARD_NC6\" \n", " vm_size=\"STANDARD_NC6\", # CPU for BiLSTM, such as \"STANDARD_D2_V2\"\n",
" # To use BERT (this is recommended for best performance), select a GPU such as \"STANDARD_NC6\"\n",
" # or similar GPU option\n", " # or similar GPU option\n",
" # available in your workspace\n", " # available in your workspace\n",
" max_nodes = num_nodes)\n", " max_nodes=num_nodes,\n",
" )\n",
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n", " compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
"\n", "\n",
"compute_target.wait_for_completion(show_output=True)" "compute_target.wait_for_completion(show_output=True)"
@@ -188,39 +165,53 @@
"source": [ "source": [
"data_dir = \"text-dnn-data\" # Local directory to store data\n", "data_dir = \"text-dnn-data\" # Local directory to store data\n",
"blobstore_datadir = data_dir # Blob store directory to store data in\n", "blobstore_datadir = data_dir # Blob store directory to store data in\n",
"target_column_name = 'y'\n", "target_column_name = \"y\"\n",
"feature_column_name = 'X'\n", "feature_column_name = \"X\"\n",
"\n",
"\n", "\n",
"def get_20newsgroups_data():\n", "def get_20newsgroups_data():\n",
" '''Fetches 20 Newsgroups data from scikit-learn\n", " \"\"\"Fetches 20 Newsgroups data from scikit-learn\n",
" Returns them in form of pandas dataframes\n", " Returns them in form of pandas dataframes\n",
" '''\n", " \"\"\"\n",
" remove = ('headers', 'footers', 'quotes')\n", " remove = (\"headers\", \"footers\", \"quotes\")\n",
" categories = [\n", " categories = [\n",
" 'rec.sport.baseball',\n", " \"rec.sport.baseball\",\n",
" 'rec.sport.hockey',\n", " \"rec.sport.hockey\",\n",
" 'comp.graphics',\n", " \"comp.graphics\",\n",
" 'sci.space',\n", " \"sci.space\",\n",
" ]\n", " ]\n",
"\n", "\n",
" data = fetch_20newsgroups(subset = 'train', categories = categories,\n", " data = fetch_20newsgroups(\n",
" shuffle = True, random_state = 42,\n", " subset=\"train\",\n",
" remove = remove)\n", " categories=categories,\n",
" data = pd.DataFrame({feature_column_name: data.data, target_column_name: data.target})\n", " shuffle=True,\n",
" random_state=42,\n",
" remove=remove,\n",
" )\n",
" data = pd.DataFrame(\n",
" {feature_column_name: data.data, target_column_name: data.target}\n",
" )\n",
"\n", "\n",
" data_train = data[:200]\n", " data_train = data[:200]\n",
" data_test = data[200:300] \n", " data_test = data[200:300]\n",
"\n", "\n",
" data_train = remove_blanks_20news(data_train, feature_column_name, target_column_name)\n", " data_train = remove_blanks_20news(\n",
" data_train, feature_column_name, target_column_name\n",
" )\n",
" data_test = remove_blanks_20news(data_test, feature_column_name, target_column_name)\n", " data_test = remove_blanks_20news(data_test, feature_column_name, target_column_name)\n",
" \n", "\n",
" return data_train, data_test\n", " return data_train, data_test\n",
" \n", "\n",
"\n",
"def remove_blanks_20news(data, feature_column_name, target_column_name):\n", "def remove_blanks_20news(data, feature_column_name, target_column_name):\n",
" \n", "\n",
" data[feature_column_name] = data[feature_column_name].replace(r'\\n', ' ', regex=True).apply(lambda x: x.strip())\n", " data[feature_column_name] = (\n",
" data = data[data[feature_column_name] != '']\n", " data[feature_column_name]\n",
" \n", " .replace(r\"\\n\", \" \", regex=True)\n",
" .apply(lambda x: x.strip())\n",
" )\n",
" data = data[data[feature_column_name] != \"\"]\n",
"\n",
" return data" " return data"
] ]
}, },
@@ -241,16 +232,15 @@
"\n", "\n",
"if not os.path.isdir(data_dir):\n", "if not os.path.isdir(data_dir):\n",
" os.mkdir(data_dir)\n", " os.mkdir(data_dir)\n",
" \n", "\n",
"train_data_fname = data_dir + '/train_data.csv'\n", "train_data_fname = data_dir + \"/train_data.csv\"\n",
"test_data_fname = data_dir + '/test_data.csv'\n", "test_data_fname = data_dir + \"/test_data.csv\"\n",
"\n", "\n",
"data_train.to_csv(train_data_fname, index=False)\n", "data_train.to_csv(train_data_fname, index=False)\n",
"data_test.to_csv(test_data_fname, index=False)\n", "data_test.to_csv(test_data_fname, index=False)\n",
"\n", "\n",
"datastore = ws.get_default_datastore()\n", "datastore = ws.get_default_datastore()\n",
"datastore.upload(src_dir=data_dir, target_path=blobstore_datadir,\n", "datastore.upload(src_dir=data_dir, target_path=blobstore_datadir, overwrite=True)"
" overwrite=True)"
] ]
}, },
{ {
@@ -259,7 +249,9 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"train_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, blobstore_datadir + '/train_data.csv')])" "train_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, blobstore_datadir + \"/train_data.csv\")]\n",
")"
] ]
}, },
{ {
@@ -284,8 +276,8 @@
"source": [ "source": [
"automl_settings = {\n", "automl_settings = {\n",
" \"experiment_timeout_minutes\": 30,\n", " \"experiment_timeout_minutes\": 30,\n",
" \"primary_metric\": 'AUC_weighted',\n", " \"primary_metric\": \"accuracy\",\n",
" \"max_concurrent_iterations\": num_nodes, \n", " \"max_concurrent_iterations\": num_nodes,\n",
" \"max_cores_per_iteration\": -1,\n", " \"max_cores_per_iteration\": -1,\n",
" \"enable_dnn\": True,\n", " \"enable_dnn\": True,\n",
" \"enable_early_stopping\": True,\n", " \"enable_early_stopping\": True,\n",
@@ -295,14 +287,15 @@
" \"enable_stack_ensemble\": False,\n", " \"enable_stack_ensemble\": False,\n",
"}\n", "}\n",
"\n", "\n",
"automl_config = AutoMLConfig(task = 'classification',\n", "automl_config = AutoMLConfig(\n",
" debug_log = 'automl_errors.log',\n", " task=\"classification\",\n",
" debug_log=\"automl_errors.log\",\n",
" compute_target=compute_target,\n", " compute_target=compute_target,\n",
" training_data=train_dataset,\n", " training_data=train_dataset,\n",
" label_column_name=target_column_name,\n", " label_column_name=target_column_name,\n",
" blocked_models = ['LightGBM', 'XGBoostClassifier'],\n", " blocked_models=[\"LightGBM\", \"XGBoostClassifier\"],\n",
" **automl_settings\n", " **automl_settings,\n",
" )" ")"
] ]
}, },
{ {
@@ -340,8 +333,8 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"You can test the model locally to get a feel of the input/output. When the model contains BERT, this step will require pytorch and pytorch-transformers installed in your local environment. The exact versions of these packages can be found in the **automl_env.yml** file located in the local copy of your MachineLearningNotebooks folder here:\n", "For local inferencing, you can load the model locally via. the method `remote_run.get_output()`. For more information on the arguments expected by this method, you can run `remote_run.get_output??`.\n",
"MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/automl_env.yml" "Note that when the model contains BERT, this step will require pytorch and pytorch-transformers installed in your local environment. The exact versions of these packages can be found in the **automl_env.yml** file located in the local copy of your azureml-examples folder here: \"azureml-examples/python-sdk/tutorials/automl-with-azureml\""
] ]
}, },
{ {
@@ -350,7 +343,8 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"best_run, fitted_model = automl_run.get_output()" "# Retrieve the best Run object\n",
"best_run = automl_run.get_best_child()"
] ]
}, },
{ {
@@ -366,10 +360,17 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"text_transformations_used = []\n", "# Download the featurization summary JSON file locally\n",
"for column_group in fitted_model.named_steps['datatransformer'].get_featurization_summary():\n", "best_run.download_file(\n",
" text_transformations_used.extend(column_group['Transformations'])\n", " \"outputs/featurization_summary.json\", \"featurization_summary.json\"\n",
"text_transformations_used" ")\n",
"\n",
"# Render the JSON as a pandas DataFrame\n",
"with open(\"featurization_summary.json\", \"r\") as f:\n",
" records = json.load(f)\n",
"\n",
"featurization_summary = pd.DataFrame.from_records(records)\n",
"featurization_summary[\"Transformations\"].tolist()"
] ]
}, },
{ {
@@ -394,7 +395,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"summary_df = get_result_df(automl_run)\n", "summary_df = get_result_df(automl_run)\n",
"best_dnn_run_id = summary_df['run_id'].iloc[0]\n", "best_dnn_run_id = summary_df[\"run_id\"].iloc[0]\n",
"best_dnn_run = Run(experiment, best_dnn_run_id)" "best_dnn_run = Run(experiment, best_dnn_run_id)"
] ]
}, },
@@ -404,11 +405,11 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"model_dir = 'Model' # Local folder where the model will be stored temporarily\n", "model_dir = \"Model\" # Local folder where the model will be stored temporarily\n",
"if not os.path.isdir(model_dir):\n", "if not os.path.isdir(model_dir):\n",
" os.mkdir(model_dir)\n", " os.mkdir(model_dir)\n",
" \n", "\n",
"best_dnn_run.download_file('outputs/model.pkl', model_dir + '/model.pkl')" "best_dnn_run.download_file(\"outputs/model.pkl\", model_dir + \"/model.pkl\")"
] ]
}, },
{ {
@@ -425,11 +426,10 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"# Register the model\n", "# Register the model\n",
"model_name = 'textDNN-20News'\n", "model_name = \"textDNN-20News\"\n",
"model = Model.register(model_path = model_dir + '/model.pkl',\n", "model = Model.register(\n",
" model_name = model_name,\n", " model_path=model_dir + \"/model.pkl\", model_name=model_name, tags=None, workspace=ws\n",
" tags=None,\n", ")"
" workspace=ws)"
] ]
}, },
{ {
@@ -454,7 +454,9 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"test_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, blobstore_datadir + '/test_data.csv')])\n", "test_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, blobstore_datadir + \"/test_data.csv\")]\n",
")\n",
"\n", "\n",
"# preview the first 3 rows of the dataset\n", "# preview the first 3 rows of the dataset\n",
"test_dataset.take(3).to_pandas_dataframe()" "test_dataset.take(3).to_pandas_dataframe()"
@@ -475,9 +477,9 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"script_folder = os.path.join(os.getcwd(), 'inference')\n", "script_folder = os.path.join(os.getcwd(), \"inference\")\n",
"os.makedirs(script_folder, exist_ok=True)\n", "os.makedirs(script_folder, exist_ok=True)\n",
"shutil.copy('infer.py', script_folder)" "shutil.copy(\"infer.py\", script_folder)"
] ]
}, },
{ {
@@ -486,8 +488,15 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"test_run = run_inference(test_experiment, compute_target, script_folder, best_dnn_run,\n", "test_run = run_inference(\n",
" test_dataset, target_column_name, model_name)" " test_experiment,\n",
" compute_target,\n",
" script_folder,\n",
" best_dnn_run,\n",
" test_dataset,\n",
" target_column_name,\n",
" model_name,\n",
")"
] ]
}, },
{ {
@@ -556,9 +565,9 @@
"friendly_name": "DNN Text Featurization", "friendly_name": "DNN Text Featurization",
"index_order": 2, "index_order": 2,
"kernelspec": { "kernelspec": {
"display_name": "Python 3.6", "display_name": "Python 3.6 - AzureML",
"language": "python", "language": "python",
"name": "python36" "name": "python3-azureml"
}, },
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {

View File

@@ -4,52 +4,65 @@ from azureml.train.estimator import Estimator
from azureml.core.run import Run from azureml.core.run import Run
def run_inference(test_experiment, compute_target, script_folder, train_run, def run_inference(
test_dataset, target_column_name, model_name): test_experiment,
compute_target,
script_folder,
train_run,
test_dataset,
target_column_name,
model_name,
):
inference_env = train_run.get_environment() inference_env = train_run.get_environment()
est = Estimator(source_directory=script_folder, est = Estimator(
entry_script='infer.py', source_directory=script_folder,
entry_script="infer.py",
script_params={ script_params={
'--target_column_name': target_column_name, "--target_column_name": target_column_name,
'--model_name': model_name "--model_name": model_name,
}, },
inputs=[ inputs=[test_dataset.as_named_input("test_data")],
test_dataset.as_named_input('test_data')
],
compute_target=compute_target, compute_target=compute_target,
environment_definition=inference_env) environment_definition=inference_env,
)
run = test_experiment.submit( run = test_experiment.submit(
est, tags={ est,
'training_run_id': train_run.id, tags={
'run_algorithm': train_run.properties['run_algorithm'], "training_run_id": train_run.id,
'valid_score': train_run.properties['score'], "run_algorithm": train_run.properties["run_algorithm"],
'primary_metric': train_run.properties['primary_metric'] "valid_score": train_run.properties["score"],
}) "primary_metric": train_run.properties["primary_metric"],
},
)
run.log("run_algorithm", run.tags['run_algorithm']) run.log("run_algorithm", run.tags["run_algorithm"])
return run return run
def get_result_df(remote_run): def get_result_df(remote_run):
children = list(remote_run.get_children(recursive=True)) children = list(remote_run.get_children(recursive=True))
summary_df = pd.DataFrame(index=['run_id', 'run_algorithm', summary_df = pd.DataFrame(
'primary_metric', 'Score']) index=["run_id", "run_algorithm", "primary_metric", "Score"]
)
goal_minimize = False goal_minimize = False
for run in children: for run in children:
if('run_algorithm' in run.properties and 'score' in run.properties): if "run_algorithm" in run.properties and "score" in run.properties:
summary_df[run.id] = [run.id, run.properties['run_algorithm'], summary_df[run.id] = [
run.properties['primary_metric'], run.id,
float(run.properties['score'])] run.properties["run_algorithm"],
if('goal' in run.properties): run.properties["primary_metric"],
goal_minimize = run.properties['goal'].split('_')[-1] == 'min' float(run.properties["score"]),
]
if "goal" in run.properties:
goal_minimize = run.properties["goal"].split("_")[-1] == "min"
summary_df = summary_df.T.sort_values( summary_df = summary_df.T.sort_values(
'Score', "Score", ascending=goal_minimize
ascending=goal_minimize).drop_duplicates(['run_algorithm']) ).drop_duplicates(["run_algorithm"])
summary_df = summary_df.set_index('run_algorithm') summary_df = summary_df.set_index("run_algorithm")
return summary_df return summary_df

View File

@@ -12,19 +12,22 @@ from azureml.core.model import Model
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument( parser.add_argument(
'--target_column_name', type=str, dest='target_column_name', "--target_column_name",
help='Target Column Name') type=str,
dest="target_column_name",
help="Target Column Name",
)
parser.add_argument( parser.add_argument(
'--model_name', type=str, dest='model_name', "--model_name", type=str, dest="model_name", help="Name of registered model"
help='Name of registered model') )
args = parser.parse_args() args = parser.parse_args()
target_column_name = args.target_column_name target_column_name = args.target_column_name
model_name = args.model_name model_name = args.model_name
print('args passed are: ') print("args passed are: ")
print('Target column name: ', target_column_name) print("Target column name: ", target_column_name)
print('Name of registered model: ', model_name) print("Name of registered model: ", model_name)
model_path = Model.get_model_path(model_name) model_path = Model.get_model_path(model_name)
# deserialize the model file back into a sklearn model # deserialize the model file back into a sklearn model
@@ -32,13 +35,16 @@ model = joblib.load(model_path)
run = Run.get_context() run = Run.get_context()
# get input dataset by name # get input dataset by name
test_dataset = run.input_datasets['test_data'] test_dataset = run.input_datasets["test_data"]
X_test_df = test_dataset.drop_columns(columns=[target_column_name]) \ X_test_df = test_dataset.drop_columns(
.to_pandas_dataframe() columns=[target_column_name]
y_test_df = test_dataset.with_timestamp_columns(None) \ ).to_pandas_dataframe()
.keep_columns(columns=[target_column_name]) \ y_test_df = (
test_dataset.with_timestamp_columns(None)
.keep_columns(columns=[target_column_name])
.to_pandas_dataframe() .to_pandas_dataframe()
)
predicted = model.predict_proba(X_test_df) predicted = model.predict_proba(X_test_df)
@@ -47,11 +53,13 @@ if isinstance(predicted, pd.DataFrame):
# Use the AutoML scoring module # Use the AutoML scoring module
train_labels = model.classes_ train_labels = model.classes_
class_labels = np.unique(np.concatenate((y_test_df.values, np.reshape(train_labels, (-1, 1))))) class_labels = np.unique(
np.concatenate((y_test_df.values, np.reshape(train_labels, (-1, 1))))
)
classification_metrics = list(constants.CLASSIFICATION_SCALAR_SET) classification_metrics = list(constants.CLASSIFICATION_SCALAR_SET)
scores = scoring.score_classification(y_test_df.values, predicted, scores = scoring.score_classification(
classification_metrics, y_test_df.values, predicted, classification_metrics, class_labels, train_labels
class_labels, train_labels) )
print("scores:") print("scores:")
print(scores) print(scores)

View File

@@ -1,20 +1,5 @@
{ {
"cells": [ "cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved. \n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/continous-retraining/auto-ml-continuous-retraining.png)"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@@ -75,16 +60,6 @@
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK." "This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
] ]
}, },
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@@ -118,17 +93,17 @@
"dstor = ws.get_default_datastore()\n", "dstor = ws.get_default_datastore()\n",
"\n", "\n",
"# Choose a name for the run history container in the workspace.\n", "# Choose a name for the run history container in the workspace.\n",
"experiment_name = 'retrain-noaaweather'\n", "experiment_name = \"retrain-noaaweather\"\n",
"experiment = Experiment(ws, experiment_name)\n", "experiment = Experiment(ws, experiment_name)\n",
"\n", "\n",
"output = {}\n", "output = {}\n",
"output['Subscription ID'] = ws.subscription_id\n", "output[\"Subscription ID\"] = ws.subscription_id\n",
"output['Workspace'] = ws.name\n", "output[\"Workspace\"] = ws.name\n",
"output['Resource Group'] = ws.resource_group\n", "output[\"Resource Group\"] = ws.resource_group\n",
"output['Location'] = ws.location\n", "output[\"Location\"] = ws.location\n",
"output['Run History Name'] = experiment_name\n", "output[\"Run History Name\"] = experiment_name\n",
"pd.set_option('display.max_colwidth', -1)\n", "pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data = output, index = [''])\n", "outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T" "outputDf.T"
] ]
}, },
@@ -164,12 +139,12 @@
"# Verify that cluster does not exist already\n", "# Verify that cluster does not exist already\n",
"try:\n", "try:\n",
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n", " compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
" print('Found existing cluster, use it.')\n", " print(\"Found existing cluster, use it.\")\n",
"except ComputeTargetException:\n", "except ComputeTargetException:\n",
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n", " compute_config = AmlCompute.provisioning_configuration(\n",
" max_nodes=4)\n", " vm_size=\"STANDARD_DS12_V2\", max_nodes=4\n",
" )\n",
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n", " compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
"\n",
"compute_target.wait_for_completion(show_output=True)" "compute_target.wait_for_completion(show_output=True)"
] ]
}, },
@@ -196,12 +171,19 @@
"\n", "\n",
"conda_run_config.environment.docker.enabled = True\n", "conda_run_config.environment.docker.enabled = True\n",
"\n", "\n",
"cd = CondaDependencies.create(pip_packages=['azureml-sdk[automl]', 'applicationinsights', 'azureml-opendatasets', 'azureml-defaults'], \n", "cd = CondaDependencies.create(\n",
" conda_packages=['numpy==1.16.2'], \n", " pip_packages=[\n",
" pin_sdk_version=False)\n", " \"azureml-sdk[automl]\",\n",
" \"applicationinsights\",\n",
" \"azureml-opendatasets\",\n",
" \"azureml-defaults\",\n",
" ],\n",
" conda_packages=[\"numpy==1.16.2\"],\n",
" pin_sdk_version=False,\n",
")\n",
"conda_run_config.environment.python.conda_dependencies = cd\n", "conda_run_config.environment.python.conda_dependencies = cd\n",
"\n", "\n",
"print('run config is ready')" "print(\"run config is ready\")"
] ]
}, },
{ {
@@ -218,7 +200,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"# The name and target column of the Dataset to create \n", "# The name and target column of the Dataset to create\n",
"dataset = \"NOAA-Weather-DS4\"\n", "dataset = \"NOAA-Weather-DS4\"\n",
"target_column_name = \"temperature\"" "target_column_name = \"temperature\""
] ]
@@ -242,12 +224,14 @@
"from azureml.pipeline.steps import PythonScriptStep\n", "from azureml.pipeline.steps import PythonScriptStep\n",
"\n", "\n",
"ds_name = PipelineParameter(name=\"ds_name\", default_value=dataset)\n", "ds_name = PipelineParameter(name=\"ds_name\", default_value=dataset)\n",
"upload_data_step = PythonScriptStep(script_name=\"upload_weather_data.py\", \n", "upload_data_step = PythonScriptStep(\n",
" script_name=\"upload_weather_data.py\",\n",
" allow_reuse=False,\n", " allow_reuse=False,\n",
" name=\"upload_weather_data\",\n", " name=\"upload_weather_data\",\n",
" arguments=[\"--ds_name\", ds_name],\n", " arguments=[\"--ds_name\", ds_name],\n",
" compute_target=compute_target, \n", " compute_target=compute_target,\n",
" runconfig=conda_run_config)" " runconfig=conda_run_config,\n",
")"
] ]
}, },
{ {
@@ -264,10 +248,11 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"data_pipeline = Pipeline(\n", "data_pipeline = Pipeline(\n",
" description=\"pipeline_with_uploaddata\",\n", " description=\"pipeline_with_uploaddata\", workspace=ws, steps=[upload_data_step]\n",
" workspace=ws, \n", ")\n",
" steps=[upload_data_step])\n", "data_pipeline_run = experiment.submit(\n",
"data_pipeline_run = experiment.submit(data_pipeline, pipeline_parameters={\"ds_name\":dataset})" " data_pipeline, pipeline_parameters={\"ds_name\": dataset}\n",
")"
] ]
}, },
{ {
@@ -307,13 +292,14 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"data_prep_step = PythonScriptStep(script_name=\"check_data.py\", \n", "data_prep_step = PythonScriptStep(\n",
" script_name=\"check_data.py\",\n",
" allow_reuse=False,\n", " allow_reuse=False,\n",
" name=\"check_data\",\n", " name=\"check_data\",\n",
" arguments=[\"--ds_name\", ds_name,\n", " arguments=[\"--ds_name\", ds_name, \"--model_name\", model_name],\n",
" \"--model_name\", model_name],\n", " compute_target=compute_target,\n",
" compute_target=compute_target, \n", " runconfig=conda_run_config,\n",
" runconfig=conda_run_config)" ")"
] ]
}, },
{ {
@@ -323,6 +309,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.core import Dataset\n", "from azureml.core import Dataset\n",
"\n",
"train_ds = Dataset.get_by_name(ws, dataset)\n", "train_ds = Dataset.get_by_name(ws, dataset)\n",
"train_ds = train_ds.drop_columns([\"partition_date\"])" "train_ds = train_ds.drop_columns([\"partition_date\"])"
] ]
@@ -348,21 +335,22 @@
" \"iteration_timeout_minutes\": 10,\n", " \"iteration_timeout_minutes\": 10,\n",
" \"experiment_timeout_hours\": 0.25,\n", " \"experiment_timeout_hours\": 0.25,\n",
" \"n_cross_validations\": 3,\n", " \"n_cross_validations\": 3,\n",
" \"primary_metric\": 'normalized_root_mean_squared_error',\n", " \"primary_metric\": \"r2_score\",\n",
" \"max_concurrent_iterations\": 3,\n", " \"max_concurrent_iterations\": 3,\n",
" \"max_cores_per_iteration\": -1,\n", " \"max_cores_per_iteration\": -1,\n",
" \"verbosity\": logging.INFO,\n", " \"verbosity\": logging.INFO,\n",
" \"enable_early_stopping\": True\n", " \"enable_early_stopping\": True,\n",
"}\n", "}\n",
"\n", "\n",
"automl_config = AutoMLConfig(task = 'regression',\n", "automl_config = AutoMLConfig(\n",
" debug_log = 'automl_errors.log',\n", " task=\"regression\",\n",
" path = \".\",\n", " debug_log=\"automl_errors.log\",\n",
" path=\".\",\n",
" compute_target=compute_target,\n", " compute_target=compute_target,\n",
" training_data = train_ds,\n", " training_data=train_ds,\n",
" label_column_name = target_column_name,\n", " label_column_name=target_column_name,\n",
" **automl_settings\n", " **automl_settings,\n",
" )" ")"
] ]
}, },
{ {
@@ -373,17 +361,21 @@
"source": [ "source": [
"from azureml.pipeline.core import PipelineData, TrainingOutput\n", "from azureml.pipeline.core import PipelineData, TrainingOutput\n",
"\n", "\n",
"metrics_output_name = 'metrics_output'\n", "metrics_output_name = \"metrics_output\"\n",
"best_model_output_name = 'best_model_output'\n", "best_model_output_name = \"best_model_output\"\n",
"\n", "\n",
"metrics_data = PipelineData(name='metrics_data',\n", "metrics_data = PipelineData(\n",
" name=\"metrics_data\",\n",
" datastore=dstor,\n", " datastore=dstor,\n",
" pipeline_output_name=metrics_output_name,\n", " pipeline_output_name=metrics_output_name,\n",
" training_output=TrainingOutput(type='Metrics'))\n", " training_output=TrainingOutput(type=\"Metrics\"),\n",
"model_data = PipelineData(name='model_data',\n", ")\n",
"model_data = PipelineData(\n",
" name=\"model_data\",\n",
" datastore=dstor,\n", " datastore=dstor,\n",
" pipeline_output_name=best_model_output_name,\n", " pipeline_output_name=best_model_output_name,\n",
" training_output=TrainingOutput(type='Model'))" " training_output=TrainingOutput(type=\"Model\"),\n",
")"
] ]
}, },
{ {
@@ -393,10 +385,11 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"automl_step = AutoMLStep(\n", "automl_step = AutoMLStep(\n",
" name='automl_module',\n", " name=\"automl_module\",\n",
" automl_config=automl_config,\n", " automl_config=automl_config,\n",
" outputs=[metrics_data, model_data],\n", " outputs=[metrics_data, model_data],\n",
" allow_reuse=False)" " allow_reuse=False,\n",
")"
] ]
}, },
{ {
@@ -413,13 +406,22 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"register_model_step = PythonScriptStep(script_name=\"register_model.py\",\n", "register_model_step = PythonScriptStep(\n",
" script_name=\"register_model.py\",\n",
" name=\"register_model\",\n", " name=\"register_model\",\n",
" allow_reuse=False,\n", " allow_reuse=False,\n",
" arguments=[\"--model_name\", model_name, \"--model_path\", model_data, \"--ds_name\", ds_name],\n", " arguments=[\n",
" \"--model_name\",\n",
" model_name,\n",
" \"--model_path\",\n",
" model_data,\n",
" \"--ds_name\",\n",
" ds_name,\n",
" ],\n",
" inputs=[model_data],\n", " inputs=[model_data],\n",
" compute_target=compute_target,\n", " compute_target=compute_target,\n",
" runconfig=conda_run_config)" " runconfig=conda_run_config,\n",
")"
] ]
}, },
{ {
@@ -437,8 +439,9 @@
"source": [ "source": [
"training_pipeline = Pipeline(\n", "training_pipeline = Pipeline(\n",
" description=\"training_pipeline\",\n", " description=\"training_pipeline\",\n",
" workspace=ws, \n", " workspace=ws,\n",
" steps=[data_prep_step, automl_step, register_model_step])" " steps=[data_prep_step, automl_step, register_model_step],\n",
")"
] ]
}, },
{ {
@@ -447,8 +450,10 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"training_pipeline_run = experiment.submit(training_pipeline, pipeline_parameters={\n", "training_pipeline_run = experiment.submit(\n",
" \"ds_name\": dataset, \"model_name\": \"noaaweatherds\"})" " training_pipeline,\n",
" pipeline_parameters={\"ds_name\": dataset, \"model_name\": \"noaaweatherds\"},\n",
")"
] ]
}, },
{ {
@@ -477,8 +482,8 @@
"pipeline_name = \"Retraining-Pipeline-NOAAWeather\"\n", "pipeline_name = \"Retraining-Pipeline-NOAAWeather\"\n",
"\n", "\n",
"published_pipeline = training_pipeline.publish(\n", "published_pipeline = training_pipeline.publish(\n",
" name=pipeline_name, \n", " name=pipeline_name, description=\"Pipeline that retrains AutoML model\"\n",
" description=\"Pipeline that retrains AutoML model\")\n", ")\n",
"\n", "\n",
"published_pipeline" "published_pipeline"
] ]
@@ -490,13 +495,17 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.pipeline.core import Schedule\n", "from azureml.pipeline.core import Schedule\n",
"schedule = Schedule.create(workspace=ws, name=\"RetrainingSchedule\",\n", "\n",
"schedule = Schedule.create(\n",
" workspace=ws,\n",
" name=\"RetrainingSchedule\",\n",
" pipeline_parameters={\"ds_name\": dataset, \"model_name\": \"noaaweatherds\"},\n", " pipeline_parameters={\"ds_name\": dataset, \"model_name\": \"noaaweatherds\"},\n",
" pipeline_id=published_pipeline.id, \n", " pipeline_id=published_pipeline.id,\n",
" experiment_name=experiment_name, \n", " experiment_name=experiment_name,\n",
" datastore=dstor,\n", " datastore=dstor,\n",
" wait_for_provisioning=True,\n", " wait_for_provisioning=True,\n",
" polling_interval=1440)" " polling_interval=1440,\n",
")"
] ]
}, },
{ {
@@ -520,8 +529,8 @@
"pipeline_name = \"DataIngestion-Pipeline-NOAAWeather\"\n", "pipeline_name = \"DataIngestion-Pipeline-NOAAWeather\"\n",
"\n", "\n",
"published_pipeline = training_pipeline.publish(\n", "published_pipeline = training_pipeline.publish(\n",
" name=pipeline_name, \n", " name=pipeline_name, description=\"Pipeline that updates NOAAWeather Dataset\"\n",
" description=\"Pipeline that updates NOAAWeather Dataset\")\n", ")\n",
"\n", "\n",
"published_pipeline" "published_pipeline"
] ]
@@ -533,13 +542,17 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.pipeline.core import Schedule\n", "from azureml.pipeline.core import Schedule\n",
"schedule = Schedule.create(workspace=ws, name=\"RetrainingSchedule-DataIngestion\",\n", "\n",
" pipeline_parameters={\"ds_name\":dataset},\n", "schedule = Schedule.create(\n",
" pipeline_id=published_pipeline.id, \n", " workspace=ws,\n",
" experiment_name=experiment_name, \n", " name=\"RetrainingSchedule-DataIngestion\",\n",
" pipeline_parameters={\"ds_name\": dataset},\n",
" pipeline_id=published_pipeline.id,\n",
" experiment_name=experiment_name,\n",
" datastore=dstor,\n", " datastore=dstor,\n",
" wait_for_provisioning=True,\n", " wait_for_provisioning=True,\n",
" polling_interval=1440)" " polling_interval=1440,\n",
")"
] ]
} }
], ],
@@ -550,9 +563,9 @@
} }
], ],
"kernelspec": { "kernelspec": {
"display_name": "Python 3.6", "display_name": "Python 3.6 - AzureML",
"language": "python", "language": "python",
"name": "python36" "name": "python3-azureml"
}, },
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {

View File

@@ -25,9 +25,11 @@ datasets = [(Dataset.Scenario.TRAINING, train_ds)]
# Register model with training dataset # Register model with training dataset
model = Model.register(workspace=ws, model = Model.register(
workspace=ws,
model_path=args.model_path, model_path=args.model_path,
model_name=args.model_name, model_name=args.model_name,
datasets=datasets) datasets=datasets,
)
print("Registered version {0} of model {1}".format(model.version, model.name)) print("Registered version {0} of model {1}".format(model.version, model.name))

View File

@@ -16,26 +16,82 @@ if type(run) == _OfflineRun:
else: else:
ws = run.experiment.workspace ws = run.experiment.workspace
usaf_list = ['725724', '722149', '723090', '722159', '723910', '720279', usaf_list = [
'725513', '725254', '726430', '720381', '723074', '726682', "725724",
'725486', '727883', '723177', '722075', '723086', '724053', "722149",
'725070', '722073', '726060', '725224', '725260', '724520', "723090",
'720305', '724020', '726510', '725126', '722523', '703333', "722159",
'722249', '722728', '725483', '722972', '724975', '742079', "723910",
'727468', '722193', '725624', '722030', '726380', '720309', "720279",
'722071', '720326', '725415', '724504', '725665', '725424', "725513",
'725066'] "725254",
"726430",
"720381",
"723074",
"726682",
"725486",
"727883",
"723177",
"722075",
"723086",
"724053",
"725070",
"722073",
"726060",
"725224",
"725260",
"724520",
"720305",
"724020",
"726510",
"725126",
"722523",
"703333",
"722249",
"722728",
"725483",
"722972",
"724975",
"742079",
"727468",
"722193",
"725624",
"722030",
"726380",
"720309",
"722071",
"720326",
"725415",
"724504",
"725665",
"725424",
"725066",
]
def get_noaa_data(start_time, end_time): def get_noaa_data(start_time, end_time):
columns = ['usaf', 'wban', 'datetime', 'latitude', 'longitude', 'elevation', columns = [
'windAngle', 'windSpeed', 'temperature', 'stationName', 'p_k'] "usaf",
"wban",
"datetime",
"latitude",
"longitude",
"elevation",
"windAngle",
"windSpeed",
"temperature",
"stationName",
"p_k",
]
isd = NoaaIsdWeather(start_time, end_time, cols=columns) isd = NoaaIsdWeather(start_time, end_time, cols=columns)
noaa_df = isd.to_pandas_dataframe() noaa_df = isd.to_pandas_dataframe()
df_filtered = noaa_df[noaa_df["usaf"].isin(usaf_list)] df_filtered = noaa_df[noaa_df["usaf"].isin(usaf_list)]
df_filtered.reset_index(drop=True) df_filtered.reset_index(drop=True)
print("Received {0} rows of training data between {1} and {2}".format( print(
df_filtered.shape[0], start_time, end_time)) "Received {0} rows of training data between {1} and {2}".format(
df_filtered.shape[0], start_time, end_time
)
)
return df_filtered return df_filtered
@@ -54,11 +110,12 @@ end_time = datetime.utcnow()
try: try:
ds = Dataset.get_by_name(ws, args.ds_name) ds = Dataset.get_by_name(ws, args.ds_name)
end_time_last_slice = ds.data_changed_time.replace(tzinfo=None) end_time_last_slice = ds.data_changed_time.replace(tzinfo=None)
print("Dataset {0} last updated on {1}".format(args.ds_name, print("Dataset {0} last updated on {1}".format(args.ds_name, end_time_last_slice))
end_time_last_slice))
except Exception: except Exception:
print(traceback.format_exc()) print(traceback.format_exc())
print("Dataset with name {0} not found, registering new dataset.".format(args.ds_name)) print(
"Dataset with name {0} not found, registering new dataset.".format(args.ds_name)
)
register_dataset = True register_dataset = True
end_time = datetime(2021, 5, 1, 0, 0) end_time = datetime(2021, 5, 1, 0, 0)
end_time_last_slice = end_time - relativedelta(weeks=2) end_time_last_slice = end_time - relativedelta(weeks=2)
@@ -66,26 +123,35 @@ except Exception:
train_df = get_noaa_data(end_time_last_slice, end_time) train_df = get_noaa_data(end_time_last_slice, end_time)
if train_df.size > 0: if train_df.size > 0:
print("Received {0} rows of new data after {1}.".format( print(
train_df.shape[0], end_time_last_slice)) "Received {0} rows of new data after {1}.".format(
folder_name = "{}/{:04d}/{:02d}/{:02d}/{:02d}/{:02d}/{:02d}".format(args.ds_name, end_time.year, train_df.shape[0], end_time_last_slice
end_time.month, end_time.day, )
end_time.hour, end_time.minute, )
end_time.second) folder_name = "{}/{:04d}/{:02d}/{:02d}/{:02d}/{:02d}/{:02d}".format(
args.ds_name,
end_time.year,
end_time.month,
end_time.day,
end_time.hour,
end_time.minute,
end_time.second,
)
file_path = "{0}/data.csv".format(folder_name) file_path = "{0}/data.csv".format(folder_name)
# Add a new partition to the registered dataset # Add a new partition to the registered dataset
os.makedirs(folder_name, exist_ok=True) os.makedirs(folder_name, exist_ok=True)
train_df.to_csv(file_path, index=False) train_df.to_csv(file_path, index=False)
dstor.upload_files(files=[file_path], dstor.upload_files(
target_path=folder_name, files=[file_path], target_path=folder_name, overwrite=True, show_progress=True
overwrite=True, )
show_progress=True)
else: else:
print("No new data since {0}.".format(end_time_last_slice)) print("No new data since {0}.".format(end_time_last_slice))
if register_dataset: if register_dataset:
ds = Dataset.Tabular.from_delimited_files(dstor.path("{}/**/*.csv".format( ds = Dataset.Tabular.from_delimited_files(
args.ds_name)), partition_format='/{partition_date:yyyy/MM/dd/HH/mm/ss}/data.csv') dstor.path("{}/**/*.csv".format(args.ds_name)),
partition_format="/{partition_date:yyyy/MM/dd/HH/mm/ss}/data.csv",
)
ds.register(ws, name=args.ds_name) ds.register(ws, name=args.ds_name)

View File

@@ -4,7 +4,6 @@ dependencies:
# Currently Azure ML only supports 3.5.2 and later. # Currently Azure ML only supports 3.5.2 and later.
- pip<=19.3.1 - pip<=19.3.1
- python>=3.5.2,<3.8 - python>=3.5.2,<3.8
- nb_conda
- cython - cython
- urllib3<1.24 - urllib3<1.24
- PyJWT < 2.0.0 - PyJWT < 2.0.0

View File

@@ -5,7 +5,6 @@ dependencies:
- pip<=19.3.1 - pip<=19.3.1
- nomkl - nomkl
- python>=3.5.2,<3.8 - python>=3.5.2,<3.8
- nb_conda
- cython - cython
- urllib3<1.24 - urllib3<1.24
- PyJWT < 2.0.0 - PyJWT < 2.0.0

View File

@@ -92,7 +92,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n", "print(\"This notebook was created using version 1.38.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
] ]
}, },

View File

@@ -91,7 +91,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n", "print(\"This notebook was created using version 1.38.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
] ]
}, },

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

View File

@@ -0,0 +1,167 @@
from typing import Any, Dict, Optional, List
import argparse
import json
import os
import re
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from azureml.automl.core.shared import constants
from azureml.automl.core.shared.types import GrainType
from azureml.automl.runtime.shared.score import scoring
GRAIN = "time_series_id"
BACKTEST_ITER = "backtest_iteration"
ACTUALS = "actual_level"
PREDICTIONS = "predicted_level"
ALL_GRAINS = "all_sets"
FORECASTS_FILE = "forecast.csv"
SCORES_FILE = "scores.csv"
PLOTS_FILE = "plots_fcst_vs_actual.pdf"
RE_INVALID_SYMBOLS = re.compile("[: ]")
def _compute_metrics(df: pd.DataFrame, metrics: List[str]):
"""
Compute metrics for one data frame.
:param df: The data frame which contains actual_level and predicted_level columns.
:return: The data frame with two columns - metric_name and metric.
"""
scores = scoring.score_regression(
y_test=df[ACTUALS], y_pred=df[PREDICTIONS], metrics=metrics
)
metrics_df = pd.DataFrame(list(scores.items()), columns=["metric_name", "metric"])
metrics_df.sort_values(["metric_name"], inplace=True)
metrics_df.reset_index(drop=True, inplace=True)
return metrics_df
def _format_grain_name(grain: GrainType) -> str:
"""
Convert grain name to string.
:param grain: the grain name.
:return: the string representation of the given grain.
"""
if not isinstance(grain, tuple) and not isinstance(grain, list):
return str(grain)
grain = list(map(str, grain))
return "|".join(grain)
def compute_all_metrics(
fcst_df: pd.DataFrame,
ts_id_colnames: List[str],
metric_names: Optional[List[set]] = None,
):
"""
Calculate metrics per grain.
:param fcst_df: forecast data frame. Must contain 2 columns: 'actual_level' and 'predicted_level'
:param metric_names: (optional) the list of metric names to return
:param ts_id_colnames: (optional) list of grain column names
:return: dictionary of summary table for all tests and final decision on stationary vs nonstaionary
"""
if not metric_names:
metric_names = list(constants.Metric.SCALAR_REGRESSION_SET)
if ts_id_colnames is None:
ts_id_colnames = []
metrics_list = []
if ts_id_colnames:
for grain, df in fcst_df.groupby(ts_id_colnames):
one_grain_metrics_df = _compute_metrics(df, metric_names)
one_grain_metrics_df[GRAIN] = _format_grain_name(grain)
metrics_list.append(one_grain_metrics_df)
# overall metrics
one_grain_metrics_df = _compute_metrics(fcst_df, metric_names)
one_grain_metrics_df[GRAIN] = ALL_GRAINS
metrics_list.append(one_grain_metrics_df)
# collect into a data frame
return pd.concat(metrics_list)
def _draw_one_plot(
df: pd.DataFrame,
time_column_name: str,
grain_column_names: List[str],
pdf: PdfPages,
) -> None:
"""
Draw the single plot.
:param df: The data frame with the data to build plot.
:param time_column_name: The name of a time column.
:param grain_column_names: The name of grain columns.
:param pdf: The pdf backend used to render the plot.
"""
fig, _ = plt.subplots(figsize=(20, 10))
df = df.set_index(time_column_name)
plt.plot(df[[ACTUALS, PREDICTIONS]])
plt.xticks(rotation=45)
iteration = df[BACKTEST_ITER].iloc[0]
if grain_column_names:
grain_name = [df[grain].iloc[0] for grain in grain_column_names]
plt.title(f"Time series ID: {_format_grain_name(grain_name)} {iteration}")
plt.legend(["actual", "forecast"])
plt.close(fig)
pdf.savefig(fig)
def calculate_scores_and_build_plots(
input_dir: str, output_dir: str, automl_settings: Dict[str, Any]
):
os.makedirs(output_dir, exist_ok=True)
grains = automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES)
time_column_name = automl_settings.get(constants.TimeSeries.TIME_COLUMN_NAME)
if grains is None:
grains = []
if isinstance(grains, str):
grains = [grains]
while BACKTEST_ITER in grains:
grains.remove(BACKTEST_ITER)
dfs = []
for fle in os.listdir(input_dir):
file_path = os.path.join(input_dir, fle)
if os.path.isfile(file_path) and file_path.endswith(".csv"):
df_iter = pd.read_csv(file_path, parse_dates=[time_column_name])
for _, iteration in df_iter.groupby(BACKTEST_ITER):
dfs.append(iteration)
forecast_df = pd.concat(dfs, sort=False, ignore_index=True)
# To make sure plots are in order, sort the predictions by grain and iteration.
ts_index = grains + [BACKTEST_ITER]
forecast_df.sort_values(by=ts_index, inplace=True)
pdf = PdfPages(os.path.join(output_dir, PLOTS_FILE))
for _, one_forecast in forecast_df.groupby(ts_index):
_draw_one_plot(one_forecast, time_column_name, grains, pdf)
pdf.close()
forecast_df.to_csv(os.path.join(output_dir, FORECASTS_FILE), index=False)
metrics = compute_all_metrics(forecast_df, grains + [BACKTEST_ITER])
metrics.to_csv(os.path.join(output_dir, SCORES_FILE), index=False)
if __name__ == "__main__":
args = {"forecasts": "--forecasts", "scores_out": "--output-dir"}
parser = argparse.ArgumentParser("Parsing input arguments.")
for argname, arg in args.items():
parser.add_argument(arg, dest=argname, required=True)
parsed_args, _ = parser.parse_known_args()
input_dir = parsed_args.forecasts
output_dir = parsed_args.scores_out
with open(
os.path.join(
os.path.dirname(os.path.realpath(__file__)), "automl_settings.json"
)
) as json_file:
automl_settings = json.load(json_file)
calculate_scores_and_build_plots(input_dir, output_dir, automl_settings)

View File

@@ -0,0 +1,725 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/forecasting-hierarchical-timeseries/auto-ml-forecasting-hierarchical-timeseries.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Many Models with Backtesting - Automated ML\n",
"**_Backtest many models time series forecasts with Automated Machine Learning_**\n",
"\n",
"---"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"For this notebook we are using a synthetic dataset to demonstrate the back testing in many model scenario. This allows us to check historical performance of AutoML on a historical data. To do that we step back on the backtesting period by the data set several times and split the data to train and test sets. Then these data sets are used for training and evaluation of model.<br>\n",
"\n",
"Thus, it is a quick way of evaluating AutoML as if it was in production. Here, we do not test historical performance of a particular model, for this see the [notebook](../forecasting-backtest-single-model/auto-ml-forecasting-backtest-single-model.ipynb). Instead, the best model for every backtest iteration can be different since AutoML chooses the best model for a given training set.\n",
"![Backtesting](Backtesting.png)\n",
"\n",
"**NOTE: There are limits on how many runs we can do in parallel per workspace, and we currently recommend to set the parallelism to maximum of 320 runs per experiment per workspace. If users want to have more parallelism and increase this limit they might encounter Too Many Requests errors (HTTP 429).**"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Prerequisites\n",
"You'll need to create a compute Instance by following the instructions in the [EnvironmentSetup.md](../Setup_Resources/EnvironmentSetup.md)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 1.0 Set up workspace, datastore, experiment"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613003526897
}
},
"outputs": [],
"source": [
"import os\n",
"\n",
"import azureml.core\n",
"from azureml.core import Workspace, Datastore\n",
"import numpy as np\n",
"import pandas as pd\n",
"\n",
"from pandas.tseries.frequencies import to_offset\n",
"\n",
"# Set up your workspace\n",
"ws = Workspace.from_config()\n",
"ws.get_details()\n",
"\n",
"# Set up your datastores\n",
"dstore = ws.get_default_datastore()\n",
"\n",
"output = {}\n",
"output[\"SDK version\"] = azureml.core.VERSION\n",
"output[\"Subscription ID\"] = ws.subscription_id\n",
"output[\"Workspace\"] = ws.name\n",
"output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n",
"output[\"Default datastore name\"] = dstore.name\n",
"pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This notebook is compatible with Azure ML SDK version 1.35.1 or later."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Choose an experiment"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613003540729
}
},
"outputs": [],
"source": [
"from azureml.core import Experiment\n",
"\n",
"experiment = Experiment(ws, \"automl-many-models-backtest\")\n",
"\n",
"print(\"Experiment name: \" + experiment.name)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 2.0 Data\n",
"\n",
"#### 2.1 Data generation\n",
"For this notebook we will generate the artificial data set with two [time series IDs](https://docs.microsoft.com/en-us/python/api/azureml-automl-core/azureml.automl.core.forecasting_parameters.forecastingparameters?view=azure-ml-py). Then we will generate backtest folds and will upload it to the default BLOB storage and create a [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# simulate data: 2 grains - 700\n",
"TIME_COLNAME = \"date\"\n",
"TARGET_COLNAME = \"value\"\n",
"TIME_SERIES_ID_COLNAME = \"ts_id\"\n",
"\n",
"sample_size = 700\n",
"# Set the random seed for reproducibility of results.\n",
"np.random.seed(20)\n",
"X1 = pd.DataFrame(\n",
" {\n",
" TIME_COLNAME: pd.date_range(start=\"2018-01-01\", periods=sample_size),\n",
" TARGET_COLNAME: np.random.normal(loc=100, scale=20, size=sample_size),\n",
" TIME_SERIES_ID_COLNAME: \"ts_A\",\n",
" }\n",
")\n",
"X2 = pd.DataFrame(\n",
" {\n",
" TIME_COLNAME: pd.date_range(start=\"2018-01-01\", periods=sample_size),\n",
" TARGET_COLNAME: np.random.normal(loc=100, scale=20, size=sample_size),\n",
" TIME_SERIES_ID_COLNAME: \"ts_B\",\n",
" }\n",
")\n",
"\n",
"X = pd.concat([X1, X2], ignore_index=True, sort=False)\n",
"print(\"Simulated dataset contains {} rows \\n\".format(X.shape[0]))\n",
"X.head()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Now we will generate 8 backtesting folds with backtesting period of 7 days and with the same forecasting horizon. We will add the column \"backtest_iteration\", which will identify the backtesting period by the last training date."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"offset_type = \"7D\"\n",
"NUMBER_OF_BACKTESTS = 8 # number of train/test sets to generate\n",
"\n",
"dfs_train = []\n",
"dfs_test = []\n",
"for ts_id, df_one in X.groupby(TIME_SERIES_ID_COLNAME):\n",
"\n",
" data_end = df_one[TIME_COLNAME].max()\n",
"\n",
" for i in range(NUMBER_OF_BACKTESTS):\n",
" train_cutoff_date = data_end - to_offset(offset_type)\n",
" df_one = df_one.copy()\n",
" df_one[\"backtest_iteration\"] = \"iteration_\" + str(train_cutoff_date)\n",
" train = df_one[df_one[TIME_COLNAME] <= train_cutoff_date]\n",
" test = df_one[\n",
" (df_one[TIME_COLNAME] > train_cutoff_date)\n",
" & (df_one[TIME_COLNAME] <= data_end)\n",
" ]\n",
" data_end = train[TIME_COLNAME].max()\n",
" dfs_train.append(train)\n",
" dfs_test.append(test)\n",
"\n",
"X_train = pd.concat(dfs_train, sort=False, ignore_index=True)\n",
"X_test = pd.concat(dfs_test, sort=False, ignore_index=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### 2.2 Create the Tabular Data Set.\n",
"\n",
"A Datastore is a place where data can be stored that is then made accessible to a compute either by means of mounting or copying the data to the compute target.\n",
"\n",
"Please refer to [Datastore](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.datastore(class)?view=azure-ml-py) documentation on how to access data from Datastore.\n",
"\n",
"In this next step, we will upload the data and create a TabularDataset."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.data.dataset_factory import TabularDatasetFactory\n",
"\n",
"ds = ws.get_default_datastore()\n",
"# Upload saved data to the default data store.\n",
"train_data = TabularDatasetFactory.register_pandas_dataframe(\n",
" X_train, target=(ds, \"data_mm\"), name=\"data_train\"\n",
")\n",
"test_data = TabularDatasetFactory.register_pandas_dataframe(\n",
" X_test, target=(ds, \"data_mm\"), name=\"data_test\"\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 3.0 Build the training pipeline\n",
"Now that the dataset, WorkSpace, and datastore are set up, we can put together a pipeline for training.\n",
"\n",
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Choose a compute target\n",
"\n",
"You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
"\n",
"\\*\\*Creation of AmlCompute takes approximately 5 minutes.**\n",
"\n",
"If the AmlCompute with that name is already in your workspace this code will skip the creation process. As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this [article](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-manage-quotas) on the default limits and how to request more quota."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613007037308
}
},
"outputs": [],
"source": [
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
"\n",
"# Name your cluster\n",
"compute_name = \"backtest-mm\"\n",
"\n",
"\n",
"if compute_name in ws.compute_targets:\n",
" compute_target = ws.compute_targets[compute_name]\n",
" if compute_target and type(compute_target) is AmlCompute:\n",
" print(\"Found compute target: \" + compute_name)\n",
"else:\n",
" print(\"Creating a new compute target...\")\n",
" provisioning_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
" )\n",
" # Create the compute target\n",
" compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)\n",
"\n",
" # Can poll for a minimum number of nodes and for a specific timeout.\n",
" # If no min node count is provided it will use the scale settings for the cluster\n",
" compute_target.wait_for_completion(\n",
" show_output=True, min_node_count=None, timeout_in_minutes=20\n",
" )\n",
"\n",
" # For a more detailed view of current cluster status, use the 'status' property\n",
" print(compute_target.status.serialize())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Set up training parameters\n",
"\n",
"This dictionary defines the AutoML and many models settings. For this forecasting task we need to define several settings including the name of the time column, the maximum forecast horizon, and the partition column name definition. Please note, that in this case we are setting grain_column_names to be the time series ID column plus iteration, because we want to train a separate model for each time series and iteration.\n",
"\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **task** | forecasting |\n",
"| **primary_metric** | This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>normalized_root_mean_squared_error</i><br><i>normalized_mean_absolute_error</i> |\n",
"| **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. |\n",
"| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n",
"| **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. |\n",
"| **label_column_name** | The name of the label column. |\n",
"| **max_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n",
"| **n_cross_validations** | Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way. |\n",
"| **time_column_name** | The name of your time column. |\n",
"| **grain_column_names** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |\n",
"| **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). |\n",
"| **partition_column_names** | The names of columns used to group your models. For timeseries, the groups must not split up individual time-series. That is, each group must contain one or more whole time-series. |"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613007061544
}
},
"outputs": [],
"source": [
"from azureml.train.automl.runtime._many_models.many_models_parameters import (\n",
" ManyModelsTrainParameters,\n",
")\n",
"\n",
"partition_column_names = [TIME_SERIES_ID_COLNAME, \"backtest_iteration\"]\n",
"automl_settings = {\n",
" \"task\": \"forecasting\",\n",
" \"primary_metric\": \"normalized_root_mean_squared_error\",\n",
" \"iteration_timeout_minutes\": 10, # This needs to be changed based on the dataset. We ask customer to explore how long training is taking before settings this value\n",
" \"iterations\": 15,\n",
" \"experiment_timeout_hours\": 0.25, # This also needs to be changed based on the dataset. For larger data set this number needs to be bigger.\n",
" \"label_column_name\": TARGET_COLNAME,\n",
" \"n_cross_validations\": 3,\n",
" \"time_column_name\": TIME_COLNAME,\n",
" \"max_horizon\": 6,\n",
" \"grain_column_names\": partition_column_names,\n",
" \"track_child_runs\": False,\n",
"}\n",
"\n",
"mm_paramters = ManyModelsTrainParameters(\n",
" automl_settings=automl_settings, partition_column_names=partition_column_names\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Set up many models pipeline"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Parallel run step is leveraged to train multiple models at once. To configure the ParallelRunConfig you will need to determine the appropriate number of workers and nodes for your use case. The process_count_per_node is based off the number of cores of the compute VM. The node_count will determine the number of master nodes to use, increasing the node count will speed up the training process.\n",
"\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **experiment** | The experiment used for training. |\n",
"| **train_data** | The file dataset to be used as input to the training run. |\n",
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with 3 and increase the node_count if the training time is taking too long. |\n",
"| **process_count_per_node** | Process count per node, we recommend 2:1 ratio for number of cores: number of processes per node. eg. If node has 16 cores then configure 8 or less process count per node or optimal performance. |\n",
"| **train_pipeline_parameters** | The set of configuration parameters defined in the previous section. |\n",
"\n",
"Calling this method will create a new aggregated dataset which is generated dynamically on pipeline execution."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder\n",
"\n",
"\n",
"training_pipeline_steps = AutoMLPipelineBuilder.get_many_models_train_steps(\n",
" experiment=experiment,\n",
" train_data=train_data,\n",
" compute_target=compute_target,\n",
" node_count=2,\n",
" process_count_per_node=2,\n",
" run_invocation_timeout=920,\n",
" train_pipeline_parameters=mm_paramters,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.core import Pipeline\n",
"\n",
"training_pipeline = Pipeline(ws, steps=training_pipeline_steps)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Submit the pipeline to run\n",
"Next we submit our pipeline to run. The whole training pipeline takes about 20 minutes using a STANDARD_DS12_V2 VM with our current ParallelRunConfig setting."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"training_run = experiment.submit(training_pipeline)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"training_run.wait_for_completion(show_output=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Check the run status, if training_run is in completed state, continue to next section. Otherwise, check the portal for failures."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 4.0 Backtesting\n",
"Now that we selected the best AutoML model for each backtest fold, we will use these models to generate the forecasts and compare with the actuals."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Set up output dataset for inference data\n",
"Output of inference can be represented as [OutputFileDatasetConfig](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.output_dataset_config.outputdatasetconfig?view=azure-ml-py) object and OutputFileDatasetConfig can be registered as a dataset. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.data import OutputFileDatasetConfig\n",
"\n",
"output_inference_data_ds = OutputFileDatasetConfig(\n",
" name=\"many_models_inference_output\",\n",
" destination=(dstore, \"backtesting/inference_data/\"),\n",
").register_on_complete(name=\"backtesting_data_ds\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"For many models we need to provide the ManyModelsInferenceParameters object.\n",
"\n",
"#### ManyModelsInferenceParameters arguments\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **partition_column_names** | List of column names that identifies groups. |\n",
"| **target_column_name** | \\[Optional\\] Column name only if the inference dataset has the target. |\n",
"| **time_column_name** | Column name only if it is timeseries. |\n",
"| **many_models_run_id** | \\[Optional\\] Many models pipeline run id where models were trained. |\n",
"\n",
"#### get_many_models_batch_inference_steps arguments\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **experiment** | The experiment used for inference run. |\n",
"| **inference_data** | The data to use for inferencing. It should be the same schema as used for training.\n",
"| **compute_target** | The compute target that runs the inference pipeline.|\n",
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku). |\n",
"| **process_count_per_node** | The number of processes per node.\n",
"| **train_run_id** | \\[Optional\\] The run id of the hierarchy training, by default it is the latest successful training many model run in the experiment. |\n",
"| **train_experiment_name** | \\[Optional\\] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline. |\n",
"| **process_count_per_node** | \\[Optional\\] The number of processes per node, by default it's 4. |"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder\n",
"from azureml.train.automl.runtime._many_models.many_models_parameters import (\n",
" ManyModelsInferenceParameters,\n",
")\n",
"\n",
"mm_parameters = ManyModelsInferenceParameters(\n",
" partition_column_names=partition_column_names,\n",
" time_column_name=TIME_COLNAME,\n",
" target_column_name=TARGET_COLNAME,\n",
")\n",
"\n",
"inference_steps = AutoMLPipelineBuilder.get_many_models_batch_inference_steps(\n",
" experiment=experiment,\n",
" inference_data=test_data,\n",
" node_count=2,\n",
" process_count_per_node=2,\n",
" compute_target=compute_target,\n",
" run_invocation_timeout=300,\n",
" output_datastore=output_inference_data_ds,\n",
" train_run_id=training_run.id,\n",
" train_experiment_name=training_run.experiment.name,\n",
" inference_pipeline_parameters=mm_parameters,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.core import Pipeline\n",
"\n",
"inference_pipeline = Pipeline(ws, steps=inference_steps)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"inference_run = experiment.submit(inference_pipeline)\n",
"inference_run.wait_for_completion(show_output=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 5.0 Retrieve results and calculate metrics\n",
"\n",
"The pipeline returns one file with the predictions for each times series ID and outputs the result to the forecasting_output Blob container. The details of the blob container is listed in 'forecasting_output.txt' under Outputs+logs. \n",
"\n",
"The next code snippet does the following:\n",
"1. Downloads the contents of the output folder that is passed in the parallel run step \n",
"2. Reads the parallel_run_step.txt file that has the predictions as pandas dataframe \n",
"3. Saves the table in csv format and \n",
"4. Displays the top 10 rows of the predictions"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.contrib.automl.pipeline.steps.utilities import get_output_from_mm_pipeline\n",
"\n",
"forecasting_results_name = \"forecasting_results\"\n",
"forecasting_output_name = \"many_models_inference_output\"\n",
"forecast_file = get_output_from_mm_pipeline(\n",
" inference_run, forecasting_results_name, forecasting_output_name\n",
")\n",
"df = pd.read_csv(forecast_file, delimiter=\" \", header=None, parse_dates=[0])\n",
"df.columns = list(X_train.columns) + [\"predicted_level\"]\n",
"print(\n",
" \"Prediction has \", df.shape[0], \" rows. Here the first 10 rows are being displayed.\"\n",
")\n",
"# Save the scv file with header to read it in the next step.\n",
"df.rename(columns={TARGET_COLNAME: \"actual_level\"}, inplace=True)\n",
"df.to_csv(os.path.join(forecasting_results_name, \"forecast.csv\"), index=False)\n",
"df.head(10)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## View metrics\n",
"We will read in the obtained results and run the helper script, which will generate metrics and create the plots of predicted versus actual values."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from assets.score import calculate_scores_and_build_plots\n",
"\n",
"backtesting_results = \"backtesting_mm_results\"\n",
"os.makedirs(backtesting_results, exist_ok=True)\n",
"calculate_scores_and_build_plots(\n",
" forecasting_results_name, backtesting_results, automl_settings\n",
")\n",
"pd.DataFrame({\"File\": os.listdir(backtesting_results)})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The directory contains a set of files with results:\n",
"- forecast.csv contains forecasts for all backtest iterations. The backtest_iteration column contains iteration identifier with the last training date as a suffix\n",
"- scores.csv contains all metrics. If data set contains several time series, the metrics are given for all combinations of time series id and iterations, as well as scores for all iterations and time series ids, which are marked as \"all_sets\"\n",
"- plots_fcst_vs_actual.pdf contains the predictions vs forecast plots for each iteration and, eash time series is saved as separate plot.\n",
"\n",
"For demonstration purposes we will display the table of metrics for one of the time series with ID \"ts0\". We will create the utility function, which will build the table with metrics."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def get_metrics_for_ts(all_metrics, ts):\n",
" \"\"\"\n",
" Get the metrics for the time series with ID ts and return it as pandas data frame.\n",
"\n",
" :param all_metrics: The table with all the metrics.\n",
" :param ts: The ID of a time series of interest.\n",
" :return: The pandas DataFrame with metrics for one time series.\n",
" \"\"\"\n",
" results_df = None\n",
" for ts_id, one_series in all_metrics.groupby(\"time_series_id\"):\n",
" if not ts_id.startswith(ts):\n",
" continue\n",
" iteration = ts_id.split(\"|\")[-1]\n",
" df = one_series[[\"metric_name\", \"metric\"]]\n",
" df.rename({\"metric\": iteration}, axis=1, inplace=True)\n",
" df.set_index(\"metric_name\", inplace=True)\n",
" if results_df is None:\n",
" results_df = df\n",
" else:\n",
" results_df = results_df.merge(\n",
" df, how=\"inner\", left_index=True, right_index=True\n",
" )\n",
" results_df.sort_index(axis=1, inplace=True)\n",
" return results_df\n",
"\n",
"\n",
"metrics_df = pd.read_csv(os.path.join(backtesting_results, \"scores.csv\"))\n",
"ts = \"ts_A\"\n",
"get_metrics_for_ts(metrics_df, ts)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Forecast vs actuals plots."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from IPython.display import IFrame\n",
"\n",
"IFrame(\"./backtesting_mm_results/plots_fcst_vs_actual.pdf\", width=800, height=300)"
]
}
],
"metadata": {
"authors": [
{
"name": "jialiu"
}
],
"categories": [
"how-to-use-azureml",
"automated-machine-learning"
],
"kernelspec": {
"display_name": "Python 3.6 - AzureML",
"language": "python",
"name": "python3-azureml"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.9"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@@ -0,0 +1,4 @@
name: auto-ml-forecasting-backtest-many-models
dependencies:
- pip:
- azureml-sdk

View File

@@ -0,0 +1,3 @@
dependencies:
- pip:
- azureml-contrib-automl-pipeline-steps

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

View File

@@ -0,0 +1,45 @@
import argparse
import os
import pandas as pd
import azureml.train.automl.runtime._hts.hts_runtime_utilities as hru
from azureml.core import Run
from azureml.core.dataset import Dataset
# Parse the arguments.
args = {
"step_size": "--step-size",
"step_number": "--step-number",
"time_column_name": "--time-column-name",
"time_series_id_column_names": "--time-series-id-column-names",
"out_dir": "--output-dir",
}
parser = argparse.ArgumentParser("Parsing input arguments.")
for argname, arg in args.items():
parser.add_argument(arg, dest=argname, required=True)
parsed_args, _ = parser.parse_known_args()
step_number = int(parsed_args.step_number)
step_size = int(parsed_args.step_size)
# Create the working dirrectory to store the temporary csv files.
working_dir = parsed_args.out_dir
os.makedirs(working_dir, exist_ok=True)
# Set input and output
script_run = Run.get_context()
input_dataset = script_run.input_datasets["training_data"]
X_train = input_dataset.to_pandas_dataframe()
# Split the data.
for i in range(step_number):
file_name = os.path.join(working_dir, "backtest_{}.csv".format(i))
if parsed_args.time_series_id_column_names:
dfs = []
for _, one_series in X_train.groupby([parsed_args.time_series_id_column_names]):
one_series = one_series.sort_values(
by=[parsed_args.time_column_name], inplace=False
)
dfs.append(one_series.iloc[: len(one_series) - step_size * i])
pd.concat(dfs, sort=False, ignore_index=True).to_csv(file_name, index=False)
else:
X_train.sort_values(by=[parsed_args.time_column_name], inplace=True)
X_train.iloc[: len(X_train) - step_size * i].to_csv(file_name, index=False)

View File

@@ -0,0 +1,173 @@
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
"""The batch script needed for back testing of models using PRS."""
import argparse
import json
import logging
import os
import pickle
import re
import pandas as pd
from azureml.core.experiment import Experiment
from azureml.core.model import Model
from azureml.core.run import Run
from azureml.automl.core.shared import constants
from azureml.automl.runtime.shared.score import scoring
from azureml.train.automl import AutoMLConfig
RE_INVALID_SYMBOLS = re.compile(r"[:\s]")
model_name = None
target_column_name = None
current_step_run = None
output_dir = None
logger = logging.getLogger(__name__)
def _get_automl_settings():
with open(
os.path.join(
os.path.dirname(os.path.realpath(__file__)), "automl_settings.json"
)
) as json_file:
return json.load(json_file)
def init():
global model_name
global target_column_name
global output_dir
global automl_settings
global model_uid
logger.info("Initialization of the run.")
parser = argparse.ArgumentParser("Parsing input arguments.")
parser.add_argument("--output-dir", dest="out", required=True)
parser.add_argument("--model-name", dest="model", default=None)
parser.add_argument("--model-uid", dest="model_uid", default=None)
parsed_args, _ = parser.parse_known_args()
model_name = parsed_args.model
automl_settings = _get_automl_settings()
target_column_name = automl_settings.get("label_column_name")
output_dir = parsed_args.out
model_uid = parsed_args.model_uid
os.makedirs(output_dir, exist_ok=True)
os.environ["AUTOML_IGNORE_PACKAGE_VERSION_INCOMPATIBILITIES".lower()] = "True"
def get_run():
global current_step_run
if current_step_run is None:
current_step_run = Run.get_context()
return current_step_run
def run_backtest(data_input_name: str, file_name: str, experiment: Experiment):
"""Re-train the model and return metrics."""
data_input = pd.read_csv(
data_input_name,
parse_dates=[automl_settings[constants.TimeSeries.TIME_COLUMN_NAME]],
)
print(data_input.head())
if not automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES):
# There is no grains.
data_input.sort_values(
[automl_settings[constants.TimeSeries.TIME_COLUMN_NAME]], inplace=True
)
X_train = data_input.iloc[: -automl_settings["max_horizon"]]
y_train = X_train.pop(target_column_name).values
X_test = data_input.iloc[-automl_settings["max_horizon"] :]
y_test = X_test.pop(target_column_name).values
else:
# The data contain grains.
dfs_train = []
dfs_test = []
for _, one_series in data_input.groupby(
automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES)
):
one_series.sort_values(
[automl_settings[constants.TimeSeries.TIME_COLUMN_NAME]], inplace=True
)
dfs_train.append(one_series.iloc[: -automl_settings["max_horizon"]])
dfs_test.append(one_series.iloc[-automl_settings["max_horizon"] :])
X_train = pd.concat(dfs_train, sort=False, ignore_index=True)
y_train = X_train.pop(target_column_name).values
X_test = pd.concat(dfs_test, sort=False, ignore_index=True)
y_test = X_test.pop(target_column_name).values
last_training_date = str(
X_train[automl_settings[constants.TimeSeries.TIME_COLUMN_NAME]].max()
)
if file_name:
# If file name is provided, we will load model and retrain it on backtest data.
with open(file_name, "rb") as fp:
fitted_model = pickle.load(fp)
fitted_model.fit(X_train, y_train)
else:
# We will run the experiment and select the best model.
X_train[target_column_name] = y_train
automl_config = AutoMLConfig(training_data=X_train, **automl_settings)
automl_run = current_step_run.submit_child(automl_config, show_output=True)
best_run, fitted_model = automl_run.get_output()
# As we have generated models, we need to register them for the future use.
description = "Backtest model example"
tags = {"last_training_date": last_training_date, "experiment": experiment.name}
if model_uid:
tags["model_uid"] = model_uid
automl_run.register_model(
model_name=best_run.properties["model_name"],
description=description,
tags=tags,
)
print(f"The model {best_run.properties['model_name']} was registered.")
_, x_pred = fitted_model.forecast(X_test)
x_pred.reset_index(inplace=True, drop=False)
columns = [automl_settings[constants.TimeSeries.TIME_COLUMN_NAME]]
if automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES):
# We know that fitted_model.grain_column_names is a list.
columns.extend(fitted_model.grain_column_names)
columns.append(constants.TimeSeriesInternal.DUMMY_TARGET_COLUMN)
# Remove featurized columns.
x_pred = x_pred[columns]
x_pred.rename(
{constants.TimeSeriesInternal.DUMMY_TARGET_COLUMN: "predicted_level"},
axis=1,
inplace=True,
)
x_pred["actual_level"] = y_test
x_pred["backtest_iteration"] = f"iteration_{last_training_date}"
date_safe = RE_INVALID_SYMBOLS.sub("_", last_training_date)
x_pred.to_csv(os.path.join(output_dir, f"iteration_{date_safe}.csv"), index=False)
return x_pred
def run(input_files):
"""Run the script"""
logger.info("Running mini batch.")
ws = get_run().experiment.workspace
file_name = None
if model_name:
models = Model.list(ws, name=model_name)
cloud_model = None
if models:
for one_mod in models:
if cloud_model is None or one_mod.version > cloud_model.version:
logger.info(
"Using existing model from the workspace. Model version: {}".format(
one_mod.version
)
)
cloud_model = one_mod
file_name = cloud_model.download(exist_ok=True)
forecasts = []
logger.info("Running backtest.")
for input_file in input_files:
forecasts.append(run_backtest(input_file, file_name, get_run().experiment))
return pd.concat(forecasts)

View File

@@ -0,0 +1,167 @@
from typing import Any, Dict, Optional, List
import argparse
import json
import os
import re
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from azureml.automl.core.shared import constants
from azureml.automl.core.shared.types import GrainType
from azureml.automl.runtime.shared.score import scoring
GRAIN = "time_series_id"
BACKTEST_ITER = "backtest_iteration"
ACTUALS = "actual_level"
PREDICTIONS = "predicted_level"
ALL_GRAINS = "all_sets"
FORECASTS_FILE = "forecast.csv"
SCORES_FILE = "scores.csv"
PLOTS_FILE = "plots_fcst_vs_actual.pdf"
RE_INVALID_SYMBOLS = re.compile("[: ]")
def _compute_metrics(df: pd.DataFrame, metrics: List[str]):
"""
Compute metrics for one data frame.
:param df: The data frame which contains actual_level and predicted_level columns.
:return: The data frame with two columns - metric_name and metric.
"""
scores = scoring.score_regression(
y_test=df[ACTUALS], y_pred=df[PREDICTIONS], metrics=metrics
)
metrics_df = pd.DataFrame(list(scores.items()), columns=["metric_name", "metric"])
metrics_df.sort_values(["metric_name"], inplace=True)
metrics_df.reset_index(drop=True, inplace=True)
return metrics_df
def _format_grain_name(grain: GrainType) -> str:
"""
Convert grain name to string.
:param grain: the grain name.
:return: the string representation of the given grain.
"""
if not isinstance(grain, tuple) and not isinstance(grain, list):
return str(grain)
grain = list(map(str, grain))
return "|".join(grain)
def compute_all_metrics(
fcst_df: pd.DataFrame,
ts_id_colnames: List[str],
metric_names: Optional[List[set]] = None,
):
"""
Calculate metrics per grain.
:param fcst_df: forecast data frame. Must contain 2 columns: 'actual_level' and 'predicted_level'
:param metric_names: (optional) the list of metric names to return
:param ts_id_colnames: (optional) list of grain column names
:return: dictionary of summary table for all tests and final decision on stationary vs nonstaionary
"""
if not metric_names:
metric_names = list(constants.Metric.SCALAR_REGRESSION_SET)
if ts_id_colnames is None:
ts_id_colnames = []
metrics_list = []
if ts_id_colnames:
for grain, df in fcst_df.groupby(ts_id_colnames):
one_grain_metrics_df = _compute_metrics(df, metric_names)
one_grain_metrics_df[GRAIN] = _format_grain_name(grain)
metrics_list.append(one_grain_metrics_df)
# overall metrics
one_grain_metrics_df = _compute_metrics(fcst_df, metric_names)
one_grain_metrics_df[GRAIN] = ALL_GRAINS
metrics_list.append(one_grain_metrics_df)
# collect into a data frame
return pd.concat(metrics_list)
def _draw_one_plot(
df: pd.DataFrame,
time_column_name: str,
grain_column_names: List[str],
pdf: PdfPages,
) -> None:
"""
Draw the single plot.
:param df: The data frame with the data to build plot.
:param time_column_name: The name of a time column.
:param grain_column_names: The name of grain columns.
:param pdf: The pdf backend used to render the plot.
"""
fig, _ = plt.subplots(figsize=(20, 10))
df = df.set_index(time_column_name)
plt.plot(df[[ACTUALS, PREDICTIONS]])
plt.xticks(rotation=45)
iteration = df[BACKTEST_ITER].iloc[0]
if grain_column_names:
grain_name = [df[grain].iloc[0] for grain in grain_column_names]
plt.title(f"Time series ID: {_format_grain_name(grain_name)} {iteration}")
plt.legend(["actual", "forecast"])
plt.close(fig)
pdf.savefig(fig)
def calculate_scores_and_build_plots(
input_dir: str, output_dir: str, automl_settings: Dict[str, Any]
):
os.makedirs(output_dir, exist_ok=True)
grains = automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES)
time_column_name = automl_settings.get(constants.TimeSeries.TIME_COLUMN_NAME)
if grains is None:
grains = []
if isinstance(grains, str):
grains = [grains]
while BACKTEST_ITER in grains:
grains.remove(BACKTEST_ITER)
dfs = []
for fle in os.listdir(input_dir):
file_path = os.path.join(input_dir, fle)
if os.path.isfile(file_path) and file_path.endswith(".csv"):
df_iter = pd.read_csv(file_path, parse_dates=[time_column_name])
for _, iteration in df_iter.groupby(BACKTEST_ITER):
dfs.append(iteration)
forecast_df = pd.concat(dfs, sort=False, ignore_index=True)
# To make sure plots are in order, sort the predictions by grain and iteration.
ts_index = grains + [BACKTEST_ITER]
forecast_df.sort_values(by=ts_index, inplace=True)
pdf = PdfPages(os.path.join(output_dir, PLOTS_FILE))
for _, one_forecast in forecast_df.groupby(ts_index):
_draw_one_plot(one_forecast, time_column_name, grains, pdf)
pdf.close()
forecast_df.to_csv(os.path.join(output_dir, FORECASTS_FILE), index=False)
metrics = compute_all_metrics(forecast_df, grains + [BACKTEST_ITER])
metrics.to_csv(os.path.join(output_dir, SCORES_FILE), index=False)
if __name__ == "__main__":
args = {"forecasts": "--forecasts", "scores_out": "--output-dir"}
parser = argparse.ArgumentParser("Parsing input arguments.")
for argname, arg in args.items():
parser.add_argument(arg, dest=argname, required=True)
parsed_args, _ = parser.parse_known_args()
input_dir = parsed_args.forecasts
output_dir = parsed_args.scores_out
with open(
os.path.join(
os.path.dirname(os.path.realpath(__file__)), "automl_settings.json"
)
) as json_file:
automl_settings = json.load(json_file)
calculate_scores_and_build_plots(input_dir, output_dir, automl_settings)

View File

@@ -0,0 +1,719 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License.\n",
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/automl-forecasting-function.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Automated MachineLearning\n",
"_**The model backtesting**_\n",
"\n",
"## Contents\n",
"1. [Introduction](#Introduction)\n",
"2. [Setup](#Setup)\n",
"3. [Data](#Data)\n",
"4. [Prepare remote compute and data.](#prepare_remote)\n",
"5. [Create the configuration for AutoML backtesting](#train)\n",
"6. [Backtest AutoML](#backtest_automl)\n",
"7. [View metrics](#Metrics)\n",
"8. [Backtest the best model](#backtest_model)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Introduction\n",
"Model backtesting is used to evaluate its performance on historical data. To do that we step back on the backtesting period by the data set several times and split the data to train and test sets. Then these data sets are used for training and evaluation of model.<br>\n",
"This notebook is intended to demonstrate backtesting on a single model, this is the best solution for small data sets with a few or one time series in it. For scenarios where we would like to choose the best AutoML model for every backtest iteration, please see [AutoML Forecasting Backtest Many Models Example](../forecasting-backtest-many-models/auto-ml-forecasting-backtest-many-models.ipynb) notebook.\n",
"![Backtesting](Backtesting.png)\n",
"This notebook demonstrates two ways of backtesting:\n",
"- AutoML backtesting: we will train separate AutoML models for historical data\n",
"- Model backtesting: from the first run we will select the best model trained on the most recent data, retrain it on the past data and evaluate."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setup"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import numpy as np\n",
"import pandas as pd\n",
"import shutil\n",
"\n",
"import azureml.core\n",
"from azureml.core import Experiment, Model, Workspace"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This notebook is compatible with Azure ML SDK version 1.35.1 or later."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"As part of the setup you have already created a <b>Workspace</b>."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ws = Workspace.from_config()\n",
"\n",
"output = {}\n",
"output[\"Subscription ID\"] = ws.subscription_id\n",
"output[\"Workspace\"] = ws.name\n",
"output[\"SKU\"] = ws.sku\n",
"output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n",
"pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Data\n",
"For the demonstration purposes we will simulate one year of daily data. To do this we need to specify the following parameters: time column name, time series ID column names and label column name. Our intention is to forecast for two weeks ahead."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"TIME_COLUMN_NAME = \"date\"\n",
"TIME_SERIES_ID_COLUMN_NAMES = \"time_series_id\"\n",
"LABEL_COLUMN_NAME = \"y\"\n",
"FORECAST_HORIZON = 14\n",
"FREQUENCY = \"D\"\n",
"\n",
"\n",
"def simulate_timeseries_data(\n",
" train_len: int,\n",
" test_len: int,\n",
" time_column_name: str,\n",
" target_column_name: str,\n",
" time_series_id_column_name: str,\n",
" time_series_number: int = 1,\n",
" freq: str = \"H\",\n",
"):\n",
" \"\"\"\n",
" Return the time series of designed length.\n",
"\n",
" :param train_len: The length of training data (one series).\n",
" :type train_len: int\n",
" :param test_len: The length of testing data (one series).\n",
" :type test_len: int\n",
" :param time_column_name: The desired name of a time column.\n",
" :type time_column_name: str\n",
" :param time_series_number: The number of time series in the data set.\n",
" :type time_series_number: int\n",
" :param freq: The frequency string representing pandas offset.\n",
" see https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html\n",
" :type freq: str\n",
" :returns: the tuple of train and test data sets.\n",
" :rtype: tuple\n",
"\n",
" \"\"\"\n",
" data_train = [] # type: List[pd.DataFrame]\n",
" data_test = [] # type: List[pd.DataFrame]\n",
" data_length = train_len + test_len\n",
" for i in range(time_series_number):\n",
" X = pd.DataFrame(\n",
" {\n",
" time_column_name: pd.date_range(\n",
" start=\"2000-01-01\", periods=data_length, freq=freq\n",
" ),\n",
" target_column_name: np.arange(data_length).astype(float)\n",
" + np.random.rand(data_length)\n",
" + i * 5,\n",
" \"ext_predictor\": np.asarray(range(42, 42 + data_length)),\n",
" time_series_id_column_name: np.repeat(\"ts{}\".format(i), data_length),\n",
" }\n",
" )\n",
" data_train.append(X[:train_len])\n",
" data_test.append(X[train_len:])\n",
" train = pd.concat(data_train)\n",
" label_train = train.pop(target_column_name).values\n",
" test = pd.concat(data_test)\n",
" label_test = test.pop(target_column_name).values\n",
" return train, label_train, test, label_test\n",
"\n",
"\n",
"n_test_periods = FORECAST_HORIZON\n",
"n_train_periods = 365\n",
"X_train, y_train, X_test, y_test = simulate_timeseries_data(\n",
" train_len=n_train_periods,\n",
" test_len=n_test_periods,\n",
" time_column_name=TIME_COLUMN_NAME,\n",
" target_column_name=LABEL_COLUMN_NAME,\n",
" time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAMES,\n",
" time_series_number=2,\n",
" freq=FREQUENCY,\n",
")\n",
"X_train[LABEL_COLUMN_NAME] = y_train"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Let's see what the training data looks like."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"X_train.tail()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Prepare remote compute and data. <a id=\"prepare_remote\"></a>\n",
"The [Machine Learning service workspace](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-workspace), is paired with the storage account, which contains the default data store. We will use it to upload the artificial data and create [tabular dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py) for training. A tabular dataset defines a series of lazily-evaluated, immutable operations to load data from the data source into tabular representation."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.data.dataset_factory import TabularDatasetFactory\n",
"\n",
"ds = ws.get_default_datastore()\n",
"# Upload saved data to the default data store.\n",
"train_data = TabularDatasetFactory.register_pandas_dataframe(\n",
" X_train, target=(ds, \"data\"), name=\"data_backtest\"\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You will need to create a compute target for backtesting. In this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute), you create AmlCompute as your training compute resource.\n",
"\n",
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
"from azureml.core.compute_target import ComputeTargetException\n",
"\n",
"# Choose a name for your CPU cluster\n",
"amlcompute_cluster_name = \"backtest-cluster\"\n",
"\n",
"# Verify that cluster does not exist already\n",
"try:\n",
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
" print(\"Found existing cluster, use it.\")\n",
"except ComputeTargetException:\n",
" compute_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
" )\n",
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
"\n",
"compute_target.wait_for_completion(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Create the configuration for AutoML backtesting <a id=\"train\"></a>\n",
"\n",
"This dictionary defines the AutoML and many models settings. For this forecasting task we need to define several settings including the name of the time column, the maximum forecast horizon, and the partition column name definition.\n",
"\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **task** | forecasting |\n",
"| **primary_metric** | This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>normalized_root_mean_squared_error</i><br><i>normalized_mean_absolute_error</i> |\n",
"| **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. |\n",
"| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n",
"| **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. |\n",
"| **label_column_name** | The name of the label column. |\n",
"| **max_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n",
"| **n_cross_validations** | Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way. |\n",
"| **time_column_name** | The name of your time column. |\n",
"| **grain_column_names** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_settings = {\n",
" \"task\": \"forecasting\",\n",
" \"primary_metric\": \"normalized_root_mean_squared_error\",\n",
" \"iteration_timeout_minutes\": 10, # This needs to be changed based on the dataset. We ask customer to explore how long training is taking before settings this value\n",
" \"iterations\": 15,\n",
" \"experiment_timeout_hours\": 1, # This also needs to be changed based on the dataset. For larger data set this number needs to be bigger.\n",
" \"label_column_name\": LABEL_COLUMN_NAME,\n",
" \"n_cross_validations\": 3,\n",
" \"time_column_name\": TIME_COLUMN_NAME,\n",
" \"max_horizon\": FORECAST_HORIZON,\n",
" \"track_child_runs\": False,\n",
" \"grain_column_names\": TIME_SERIES_ID_COLUMN_NAMES,\n",
"}"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Backtest AutoML <a id=\"backtest_automl\"></a>\n",
"First we set backtesting parameters: we will step back by 30 days and will make 5 such steps; for each step we will forecast for next two weeks."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# The number of periods to step back on each backtest iteration.\n",
"BACKTESTING_PERIOD = 30\n",
"# The number of times we will back test the model.\n",
"NUMBER_OF_BACKTESTS = 5"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"To train AutoML on backtesting folds we will use the [Azure Machine Learning pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/concept-ml-pipelines). It will generate backtest folds, then train model for each of them and calculate the accuracy metrics. To run pipeline, you also need to create an <b>Experiment</b>. An Experiment corresponds to a prediction problem you are trying to solve (here, it is a forecasting), while a Run corresponds to a specific approach to the problem."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from uuid import uuid1\n",
"\n",
"from pipeline_helper import get_backtest_pipeline\n",
"\n",
"pipeline_exp = Experiment(ws, \"automl-backtesting\")\n",
"\n",
"# We will create the unique identifier to mark our models.\n",
"model_uid = str(uuid1())\n",
"\n",
"pipeline = get_backtest_pipeline(\n",
" experiment=pipeline_exp,\n",
" dataset=train_data,\n",
" # The STANDARD_DS12_V2 has 4 vCPU per node, we will set 2 process per node to be safe.\n",
" process_per_node=2,\n",
" # The maximum number of nodes for our compute is 6.\n",
" node_count=6,\n",
" compute_target=compute_target,\n",
" automl_settings=automl_settings,\n",
" step_size=BACKTESTING_PERIOD,\n",
" step_number=NUMBER_OF_BACKTESTS,\n",
" model_uid=model_uid,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Run the pipeline and wait for results."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"pipeline_run = pipeline_exp.submit(pipeline)\n",
"pipeline_run.wait_for_completion(show_output=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"After the run is complete, we can download the results. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"metrics_output = pipeline_run.get_pipeline_output(\"results\")\n",
"metrics_output.download(\"backtest_metrics\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## View metrics<a id=\"Metrics\"></a>\n",
"To distinguish these metrics from the model backtest, which we will obtain in the next section, we will move the directory with metrics out of the backtest_metrics and will remove the parent folder. We will create the utility function for that."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def copy_scoring_directory(new_name):\n",
" scores_path = os.path.join(\"backtest_metrics\", \"azureml\")\n",
" directory_list = [os.path.join(scores_path, d) for d in os.listdir(scores_path)]\n",
" latest_file = max(directory_list, key=os.path.getctime)\n",
" print(\n",
" f\"The output directory {latest_file} was created on {pd.Timestamp(os.path.getctime(latest_file), unit='s')} GMT.\"\n",
" )\n",
" shutil.move(os.path.join(latest_file, \"results\"), new_name)\n",
" shutil.rmtree(\"backtest_metrics\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Move the directory and list its contents."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"copy_scoring_directory(\"automl_backtest\")\n",
"pd.DataFrame({\"File\": os.listdir(\"automl_backtest\")})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The directory contains a set of files with results:\n",
"- forecast.csv contains forecasts for all backtest iterations. The backtest_iteration column contains iteration identifier with the last training date as a suffix\n",
"- scores.csv contains all metrics. If data set contains several time series, the metrics are given for all combinations of time series id and iterations, as well as scores for all iterations and time series id are marked as \"all_sets\"\n",
"- plots_fcst_vs_actual.pdf contains the predictions vs forecast plots for each iteration and time series.\n",
"\n",
"For demonstration purposes we will display the table of metrics for one of the time series with ID \"ts0\". Again, we will create the utility function, which will be re used in model backtesting."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def get_metrics_for_ts(all_metrics, ts):\n",
" \"\"\"\n",
" Get the metrics for the time series with ID ts and return it as pandas data frame.\n",
"\n",
" :param all_metrics: The table with all the metrics.\n",
" :param ts: The ID of a time series of interest.\n",
" :return: The pandas DataFrame with metrics for one time series.\n",
" \"\"\"\n",
" results_df = None\n",
" for ts_id, one_series in all_metrics.groupby(\"time_series_id\"):\n",
" if not ts_id.startswith(ts):\n",
" continue\n",
" iteration = ts_id.split(\"|\")[-1]\n",
" df = one_series[[\"metric_name\", \"metric\"]]\n",
" df.rename({\"metric\": iteration}, axis=1, inplace=True)\n",
" df.set_index(\"metric_name\", inplace=True)\n",
" if results_df is None:\n",
" results_df = df\n",
" else:\n",
" results_df = results_df.merge(\n",
" df, how=\"inner\", left_index=True, right_index=True\n",
" )\n",
" results_df.sort_index(axis=1, inplace=True)\n",
" return results_df\n",
"\n",
"\n",
"metrics_df = pd.read_csv(os.path.join(\"automl_backtest\", \"scores.csv\"))\n",
"ts_id = \"ts0\"\n",
"get_metrics_for_ts(metrics_df, ts_id)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Forecast vs actuals plots."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from IPython.display import IFrame\n",
"\n",
"IFrame(\"./automl_backtest/plots_fcst_vs_actual.pdf\", width=800, height=300)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# <font color='blue'>Backtest the best model</font> <a id=\"backtest_model\"></a>\n",
"\n",
"For model backtesting we will use the same parameters we used to backtest AutoML. All the models, we have obtained in the previous run were registered in our workspace. To identify the model, each was assigned a tag with the last trainig date."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model_list = Model.list(ws, tags={\"experiment\": \"automl-backtesting\"})\n",
"model_data = {\"name\": [], \"last_training_date\": []}\n",
"for model in model_list:\n",
" if (\n",
" \"last_training_date\" not in model.tags\n",
" or \"model_uid\" not in model.tags\n",
" or model.tags[\"model_uid\"] != model_uid\n",
" ):\n",
" continue\n",
" model_data[\"name\"].append(model.name)\n",
" model_data[\"last_training_date\"].append(\n",
" pd.Timestamp(model.tags[\"last_training_date\"])\n",
" )\n",
"df_models = pd.DataFrame(model_data)\n",
"df_models.sort_values([\"last_training_date\"], inplace=True)\n",
"df_models.reset_index(inplace=True, drop=True)\n",
"df_models"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We will backtest the model trained on the most recet data."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model_name = df_models[\"name\"].iloc[-1]\n",
"model_name"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Retrain the models.\n",
"Assemble the pipeline, which will retrain the best model from AutoML run on historical data."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"pipeline_exp = Experiment(ws, \"model-backtesting\")\n",
"\n",
"pipeline = get_backtest_pipeline(\n",
" experiment=pipeline_exp,\n",
" dataset=train_data,\n",
" # The STANDARD_DS12_V2 has 4 vCPU per node, we will set 2 process per node to be safe.\n",
" process_per_node=2,\n",
" # The maximum number of nodes for our compute is 6.\n",
" node_count=6,\n",
" compute_target=compute_target,\n",
" automl_settings=automl_settings,\n",
" step_size=BACKTESTING_PERIOD,\n",
" step_number=NUMBER_OF_BACKTESTS,\n",
" model_name=model_name,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Launch the backtesting pipeline."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"pipeline_run = pipeline_exp.submit(pipeline)\n",
"pipeline_run.wait_for_completion(show_output=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The metrics are stored in the pipeline output named \"score\". The next code will download the table with metrics."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"metrics_output = pipeline_run.get_pipeline_output(\"results\")\n",
"metrics_output.download(\"backtest_metrics\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Again, we will copy the data files from the downloaded directory, but in this case we will call the folder \"model_backtest\"; it will contain the same files as the one for AutoML backtesting."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"copy_scoring_directory(\"model_backtest\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Finally, we will display the metrics."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model_metrics_df = pd.read_csv(os.path.join(\"model_backtest\", \"scores.csv\"))\n",
"get_metrics_for_ts(model_metrics_df, ts_id)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Forecast vs actuals plots."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from IPython.display import IFrame\n",
"\n",
"IFrame(\"./model_backtest/plots_fcst_vs_actual.pdf\", width=800, height=300)"
]
}
],
"metadata": {
"authors": [
{
"name": "jialiu"
}
],
"category": "tutorial",
"compute": [
"Remote"
],
"datasets": [
"None"
],
"deployment": [
"None"
],
"exclude_from_index": false,
"framework": [
"Azure ML AutoML"
],
"kernelspec": {
"display_name": "Python 3.6 - AzureML",
"language": "python",
"name": "python3-azureml"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.9"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@@ -0,0 +1,4 @@
name: auto-ml-forecasting-backtest-single-model
dependencies:
- pip:
- azureml-sdk

View File

@@ -0,0 +1,166 @@
from typing import Any, Dict, Optional
import os
import azureml.train.automl.runtime._hts.hts_runtime_utilities as hru
from azureml._restclient.jasmine_client import JasmineClient
from azureml.contrib.automl.pipeline.steps import utilities
from azureml.core import RunConfiguration
from azureml.core.compute import ComputeTarget
from azureml.core.experiment import Experiment
from azureml.data import LinkTabularOutputDatasetConfig, TabularDataset
from azureml.pipeline.core import Pipeline, PipelineData, PipelineParameter
from azureml.pipeline.steps import ParallelRunConfig, ParallelRunStep, PythonScriptStep
from azureml.train.automl.constants import Scenarios
from azureml.data.dataset_consumption_config import DatasetConsumptionConfig
PROJECT_FOLDER = "assets"
SETTINGS_FILE = "automl_settings.json"
def get_backtest_pipeline(
experiment: Experiment,
dataset: TabularDataset,
process_per_node: int,
node_count: int,
compute_target: ComputeTarget,
automl_settings: Dict[str, Any],
step_size: int,
step_number: int,
model_name: Optional[str] = None,
model_uid: Optional[str] = None,
) -> Pipeline:
"""
:param experiment: The experiment used to run the pipeline.
:param dataset: Tabular data set to be used for model training.
:param process_per_node: The number of processes per node. Generally it should be the number of cores
on the node divided by two.
:param node_count: The number of nodes to be used.
:param compute_target: The compute target to be used to run the pipeline.
:param model_name: The name of a model to be back tested.
:param automl_settings: The dictionary with automl settings.
:param step_size: The number of periods to step back in backtesting.
:param step_number: The number of backtesting iterations.
:param model_uid: The uid to mark models from this run of the experiment.
:return: The pipeline to be used for model retraining.
**Note:** The output will be uploaded in the pipeline output
called 'score'.
"""
jasmine_client = JasmineClient(
service_context=experiment.workspace.service_context,
experiment_name=experiment.name,
experiment_id=experiment.id,
)
env = jasmine_client.get_curated_environment(
scenario=Scenarios.AUTOML,
enable_dnn=False,
enable_gpu=False,
compute=compute_target,
compute_sku=experiment.workspace.compute_targets.get(
compute_target.name
).vm_size,
)
data_results = PipelineData(
name="results", datastore=None, pipeline_output_name="results"
)
############################################################
# Split the data set using python script.
############################################################
run_config = RunConfiguration()
run_config.docker.use_docker = True
run_config.environment = env
split_data = PipelineData(name="split_data_output", datastore=None).as_dataset()
split_step = PythonScriptStep(
name="split_data_for_backtest",
script_name="data_split.py",
inputs=[dataset.as_named_input("training_data")],
outputs=[split_data],
source_directory=PROJECT_FOLDER,
arguments=[
"--step-size",
step_size,
"--step-number",
step_number,
"--time-column-name",
automl_settings.get("time_column_name"),
"--time-series-id-column-names",
automl_settings.get("grain_column_names"),
"--output-dir",
split_data,
],
runconfig=run_config,
compute_target=compute_target,
allow_reuse=False,
)
############################################################
# We will do the backtest the parallel run step.
############################################################
settings_path = os.path.join(PROJECT_FOLDER, SETTINGS_FILE)
hru.dump_object_to_json(automl_settings, settings_path)
mini_batch_size = PipelineParameter(name="batch_size_param", default_value=str(1))
back_test_config = ParallelRunConfig(
source_directory=PROJECT_FOLDER,
entry_script="retrain_models.py",
mini_batch_size=mini_batch_size,
error_threshold=-1,
output_action="append_row",
append_row_file_name="outputs.txt",
compute_target=compute_target,
environment=env,
process_count_per_node=process_per_node,
run_invocation_timeout=3600,
node_count=node_count,
)
forecasts = PipelineData(name="forecasts", datastore=None)
if model_name:
parallel_step_name = "{}-backtest".format(model_name.replace("_", "-"))
else:
parallel_step_name = "AutoML-backtest"
prs_args = [
"--target_column_name",
automl_settings.get("label_column_name"),
"--output-dir",
forecasts,
]
if model_name is not None:
prs_args.append("--model-name")
prs_args.append(model_name)
if model_uid is not None:
prs_args.append("--model-uid")
prs_args.append(model_uid)
backtest_prs = ParallelRunStep(
name=parallel_step_name,
parallel_run_config=back_test_config,
arguments=prs_args,
inputs=[split_data],
output=forecasts,
allow_reuse=False,
)
############################################################
# Then we collect the output and return it as scores output.
############################################################
collection_step = PythonScriptStep(
name="score",
script_name="score.py",
inputs=[forecasts.as_mount()],
outputs=[data_results],
source_directory=PROJECT_FOLDER,
arguments=[
"--forecasts",
forecasts,
"--output-dir",
data_results,
],
runconfig=run_config,
compute_target=compute_target,
allow_reuse=False,
)
# Build and return the pipeline.
return Pipeline(
workspace=experiment.workspace,
steps=[split_step, backtest_prs, collection_step],
)

View File

@@ -113,7 +113,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n", "print(\"This notebook was created using version 1.38.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
] ]
}, },
@@ -139,18 +139,18 @@
"ws = Workspace.from_config()\n", "ws = Workspace.from_config()\n",
"\n", "\n",
"# choose a name for the run history container in the workspace\n", "# choose a name for the run history container in the workspace\n",
"experiment_name = 'beer-remote-cpu'\n", "experiment_name = \"beer-remote-cpu\"\n",
"\n", "\n",
"experiment = Experiment(ws, experiment_name)\n", "experiment = Experiment(ws, experiment_name)\n",
"\n", "\n",
"output = {}\n", "output = {}\n",
"output['Subscription ID'] = ws.subscription_id\n", "output[\"Subscription ID\"] = ws.subscription_id\n",
"output['Workspace'] = ws.name\n", "output[\"Workspace\"] = ws.name\n",
"output['Resource Group'] = ws.resource_group\n", "output[\"Resource Group\"] = ws.resource_group\n",
"output['Location'] = ws.location\n", "output[\"Location\"] = ws.location\n",
"output['Run History Name'] = experiment_name\n", "output[\"Run History Name\"] = experiment_name\n",
"pd.set_option('display.max_colwidth', -1)\n", "pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data = output, index = [''])\n", "outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T" "outputDf.T"
] ]
}, },
@@ -185,10 +185,11 @@
"# Verify that cluster does not exist already\n", "# Verify that cluster does not exist already\n",
"try:\n", "try:\n",
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n", " compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
" print('Found existing cluster, use it.')\n", " print(\"Found existing cluster, use it.\")\n",
"except ComputeTargetException:\n", "except ComputeTargetException:\n",
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n", " compute_config = AmlCompute.provisioning_configuration(\n",
" max_nodes=4)\n", " vm_size=\"STANDARD_DS12_V2\", max_nodes=4\n",
" )\n",
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n", " compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
"\n", "\n",
"compute_target.wait_for_completion(show_output=True)" "compute_target.wait_for_completion(show_output=True)"
@@ -245,17 +246,21 @@
"plt.tight_layout()\n", "plt.tight_layout()\n",
"\n", "\n",
"plt.subplot(2, 1, 1)\n", "plt.subplot(2, 1, 1)\n",
"plt.title('Beer Production By Year')\n", "plt.title(\"Beer Production By Year\")\n",
"df = pd.read_csv(\"Beer_no_valid_split_train.csv\", parse_dates=True, index_col= 'DATE').drop(columns='grain')\n", "df = pd.read_csv(\n",
"test_df = pd.read_csv(\"Beer_no_valid_split_test.csv\", parse_dates=True, index_col= 'DATE').drop(columns='grain')\n", " \"Beer_no_valid_split_train.csv\", parse_dates=True, index_col=\"DATE\"\n",
").drop(columns=\"grain\")\n",
"test_df = pd.read_csv(\n",
" \"Beer_no_valid_split_test.csv\", parse_dates=True, index_col=\"DATE\"\n",
").drop(columns=\"grain\")\n",
"plt.plot(df)\n", "plt.plot(df)\n",
"\n", "\n",
"plt.subplot(2, 1, 2)\n", "plt.subplot(2, 1, 2)\n",
"plt.title('Beer Production By Month')\n", "plt.title(\"Beer Production By Month\")\n",
"groups = df.groupby(df.index.month)\n", "groups = df.groupby(df.index.month)\n",
"months = concat([DataFrame(x[1].values) for x in groups], axis=1)\n", "months = concat([DataFrame(x[1].values) for x in groups], axis=1)\n",
"months = DataFrame(months)\n", "months = DataFrame(months)\n",
"months.columns = range(1,13)\n", "months.columns = range(1, 13)\n",
"months.boxplot()\n", "months.boxplot()\n",
"\n", "\n",
"plt.show()" "plt.show()"
@@ -270,10 +275,10 @@
}, },
"outputs": [], "outputs": [],
"source": [ "source": [
"target_column_name = 'BeerProduction'\n", "target_column_name = \"BeerProduction\"\n",
"time_column_name = 'DATE'\n", "time_column_name = \"DATE\"\n",
"time_series_id_column_names = []\n", "time_series_id_column_names = []\n",
"freq = 'M' #Monthly data" "freq = \"M\" # Monthly data"
] ]
}, },
{ {
@@ -301,14 +306,36 @@
"test_df.to_csv(\"test.csv\")\n", "test_df.to_csv(\"test.csv\")\n",
"\n", "\n",
"datastore = ws.get_default_datastore()\n", "datastore = ws.get_default_datastore()\n",
"datastore.upload_files(files = ['./train.csv'], target_path = 'beer-dataset/tabular/', overwrite = True,show_progress = True)\n", "datastore.upload_files(\n",
"datastore.upload_files(files = ['./valid.csv'], target_path = 'beer-dataset/tabular/', overwrite = True,show_progress = True)\n", " files=[\"./train.csv\"],\n",
"datastore.upload_files(files = ['./test.csv'], target_path = 'beer-dataset/tabular/', overwrite = True,show_progress = True)\n", " target_path=\"beer-dataset/tabular/\",\n",
" overwrite=True,\n",
" show_progress=True,\n",
")\n",
"datastore.upload_files(\n",
" files=[\"./valid.csv\"],\n",
" target_path=\"beer-dataset/tabular/\",\n",
" overwrite=True,\n",
" show_progress=True,\n",
")\n",
"datastore.upload_files(\n",
" files=[\"./test.csv\"],\n",
" target_path=\"beer-dataset/tabular/\",\n",
" overwrite=True,\n",
" show_progress=True,\n",
")\n",
"\n", "\n",
"from azureml.core import Dataset\n", "from azureml.core import Dataset\n",
"train_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'beer-dataset/tabular/train.csv')])\n", "\n",
"valid_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'beer-dataset/tabular/valid.csv')])\n", "train_dataset = Dataset.Tabular.from_delimited_files(\n",
"test_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'beer-dataset/tabular/test.csv')])" " path=[(datastore, \"beer-dataset/tabular/train.csv\")]\n",
")\n",
"valid_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"beer-dataset/tabular/valid.csv\")]\n",
")\n",
"test_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"beer-dataset/tabular/test.csv\")]\n",
")"
] ]
}, },
{ {
@@ -366,26 +393,29 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n", "from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
"\n",
"forecasting_parameters = ForecastingParameters(\n", "forecasting_parameters = ForecastingParameters(\n",
" time_column_name=time_column_name,\n", " time_column_name=time_column_name,\n",
" forecast_horizon=forecast_horizon,\n", " forecast_horizon=forecast_horizon,\n",
" freq='MS' # Set the forecast frequency to be monthly (start of the month)\n", " freq=\"MS\", # Set the forecast frequency to be monthly (start of the month)\n",
")\n", ")\n",
"\n", "\n",
"# We will disable the enable_early_stopping flag to ensure the DNN model is recommended for demonstration purpose.\n", "# We will disable the enable_early_stopping flag to ensure the DNN model is recommended for demonstration purpose.\n",
"automl_config = AutoMLConfig(task='forecasting',\n", "automl_config = AutoMLConfig(\n",
" primary_metric='normalized_root_mean_squared_error',\n", " task=\"forecasting\",\n",
" experiment_timeout_hours = 1,\n", " primary_metric=\"normalized_root_mean_squared_error\",\n",
" experiment_timeout_hours=1,\n",
" training_data=train_dataset,\n", " training_data=train_dataset,\n",
" label_column_name=target_column_name,\n", " label_column_name=target_column_name,\n",
" validation_data=valid_dataset, \n", " validation_data=valid_dataset,\n",
" verbosity=logging.INFO,\n", " verbosity=logging.INFO,\n",
" compute_target=compute_target,\n", " compute_target=compute_target,\n",
" max_concurrent_iterations=4,\n", " max_concurrent_iterations=4,\n",
" max_cores_per_iteration=-1,\n", " max_cores_per_iteration=-1,\n",
" enable_dnn=True,\n", " enable_dnn=True,\n",
" enable_early_stopping=False,\n", " enable_early_stopping=False,\n",
" forecasting_parameters=forecasting_parameters)" " forecasting_parameters=forecasting_parameters,\n",
")"
] ]
}, },
{ {
@@ -407,7 +437,7 @@
}, },
"outputs": [], "outputs": [],
"source": [ "source": [
"remote_run = experiment.submit(automl_config, show_output= True)" "remote_run = experiment.submit(automl_config, show_output=True)"
] ]
}, },
{ {
@@ -455,6 +485,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"from helper import get_result_df\n", "from helper import get_result_df\n",
"\n",
"summary_df = get_result_df(remote_run)\n", "summary_df = get_result_df(remote_run)\n",
"summary_df" "summary_df"
] ]
@@ -470,11 +501,12 @@
"source": [ "source": [
"from azureml.core.run import Run\n", "from azureml.core.run import Run\n",
"from azureml.widgets import RunDetails\n", "from azureml.widgets import RunDetails\n",
"forecast_model = 'TCNForecaster'\n", "\n",
"if not forecast_model in summary_df['run_id']:\n", "forecast_model = \"TCNForecaster\"\n",
" forecast_model = 'ForecastTCN'\n", "if not forecast_model in summary_df[\"run_id\"]:\n",
" \n", " forecast_model = \"ForecastTCN\"\n",
"best_dnn_run_id = summary_df['run_id'][forecast_model]\n", "\n",
"best_dnn_run_id = summary_df[\"run_id\"][forecast_model]\n",
"best_dnn_run = Run(experiment, best_dnn_run_id)" "best_dnn_run = Run(experiment, best_dnn_run_id)"
] ]
}, },
@@ -488,7 +520,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"best_dnn_run.parent\n", "best_dnn_run.parent\n",
"RunDetails(best_dnn_run.parent).show() " "RunDetails(best_dnn_run.parent).show()"
] ]
}, },
{ {
@@ -501,7 +533,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"best_dnn_run\n", "best_dnn_run\n",
"RunDetails(best_dnn_run).show() " "RunDetails(best_dnn_run).show()"
] ]
}, },
{ {
@@ -536,7 +568,10 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.core import Dataset\n", "from azureml.core import Dataset\n",
"test_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'beer-dataset/tabular/test.csv')])\n", "\n",
"test_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"beer-dataset/tabular/test.csv\")]\n",
")\n",
"# preview the first 3 rows of the dataset\n", "# preview the first 3 rows of the dataset\n",
"test_dataset.take(5).to_pandas_dataframe()" "test_dataset.take(5).to_pandas_dataframe()"
] ]
@@ -547,7 +582,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"compute_target = ws.compute_targets['beer-cluster']\n", "compute_target = ws.compute_targets[\"beer-cluster\"]\n",
"test_experiment = Experiment(ws, experiment_name + \"_test\")" "test_experiment = Experiment(ws, experiment_name + \"_test\")"
] ]
}, },
@@ -563,9 +598,9 @@
"import os\n", "import os\n",
"import shutil\n", "import shutil\n",
"\n", "\n",
"script_folder = os.path.join(os.getcwd(), 'inference')\n", "script_folder = os.path.join(os.getcwd(), \"inference\")\n",
"os.makedirs(script_folder, exist_ok=True)\n", "os.makedirs(script_folder, exist_ok=True)\n",
"shutil.copy('infer.py', script_folder)" "shutil.copy(\"infer.py\", script_folder)"
] ]
}, },
{ {
@@ -576,8 +611,18 @@
"source": [ "source": [
"from helper import run_inference\n", "from helper import run_inference\n",
"\n", "\n",
"test_run = run_inference(test_experiment, compute_target, script_folder, best_dnn_run, test_dataset, valid_dataset, forecast_horizon,\n", "test_run = run_inference(\n",
" target_column_name, time_column_name, freq)" " test_experiment,\n",
" compute_target,\n",
" script_folder,\n",
" best_dnn_run,\n",
" test_dataset,\n",
" valid_dataset,\n",
" forecast_horizon,\n",
" target_column_name,\n",
" time_column_name,\n",
" freq,\n",
")"
] ]
}, },
{ {
@@ -597,8 +642,19 @@
"source": [ "source": [
"from helper import run_multiple_inferences\n", "from helper import run_multiple_inferences\n",
"\n", "\n",
"summary_df = run_multiple_inferences(summary_df, experiment, test_experiment, compute_target, script_folder, test_dataset, \n", "summary_df = run_multiple_inferences(\n",
" valid_dataset, forecast_horizon, target_column_name, time_column_name, freq)" " summary_df,\n",
" experiment,\n",
" test_experiment,\n",
" compute_target,\n",
" script_folder,\n",
" test_dataset,\n",
" valid_dataset,\n",
" forecast_horizon,\n",
" target_column_name,\n",
" time_column_name,\n",
" freq,\n",
")"
] ]
}, },
{ {
@@ -618,7 +674,7 @@
" test_run = Run(test_experiment, test_run_id)\n", " test_run = Run(test_experiment, test_run_id)\n",
" test_run.wait_for_completion()\n", " test_run.wait_for_completion()\n",
" test_score = test_run.get_metrics()[run_summary.primary_metric]\n", " test_score = test_run.get_metrics()[run_summary.primary_metric]\n",
" summary_df.loc[summary_df.run_id == run_id, 'Test Score'] = test_score\n", " summary_df.loc[summary_df.run_id == run_id, \"Test Score\"] = test_score\n",
" print(\"Test Score: \", test_score)" " print(\"Test Score: \", test_score)"
] ]
}, },

View File

@@ -6,120 +6,158 @@ from azureml.core.run import Run
from azureml.automl.core.shared import constants from azureml.automl.core.shared import constants
def split_fraction_by_grain(df, fraction, time_column_name, def split_fraction_by_grain(df, fraction, time_column_name, grain_column_names=None):
grain_column_names=None):
if not grain_column_names: if not grain_column_names:
df['tmp_grain_column'] = 'grain' df["tmp_grain_column"] = "grain"
grain_column_names = ['tmp_grain_column'] grain_column_names = ["tmp_grain_column"]
"""Group df by grain and split on last n rows for each group.""" """Group df by grain and split on last n rows for each group."""
df_grouped = (df.sort_values(time_column_name) df_grouped = df.sort_values(time_column_name).groupby(
.groupby(grain_column_names, group_keys=False)) grain_column_names, group_keys=False
)
df_head = df_grouped.apply(lambda dfg: dfg.iloc[:-int(len(dfg) * df_head = df_grouped.apply(
fraction)] if fraction > 0 else dfg) lambda dfg: dfg.iloc[: -int(len(dfg) * fraction)] if fraction > 0 else dfg
)
df_tail = df_grouped.apply(lambda dfg: dfg.iloc[-int(len(dfg) * df_tail = df_grouped.apply(
fraction):] if fraction > 0 else dfg[:0]) lambda dfg: dfg.iloc[-int(len(dfg) * fraction) :] if fraction > 0 else dfg[:0]
)
if 'tmp_grain_column' in grain_column_names: if "tmp_grain_column" in grain_column_names:
for df2 in (df, df_head, df_tail): for df2 in (df, df_head, df_tail):
df2.drop('tmp_grain_column', axis=1, inplace=True) df2.drop("tmp_grain_column", axis=1, inplace=True)
grain_column_names.remove('tmp_grain_column') grain_column_names.remove("tmp_grain_column")
return df_head, df_tail return df_head, df_tail
def split_full_for_forecasting(df, time_column_name, def split_full_for_forecasting(
grain_column_names=None, test_split=0.2): df, time_column_name, grain_column_names=None, test_split=0.2
):
index_name = df.index.name index_name = df.index.name
# Assumes that there isn't already a column called tmpindex # Assumes that there isn't already a column called tmpindex
df['tmpindex'] = df.index df["tmpindex"] = df.index
train_df, test_df = split_fraction_by_grain( train_df, test_df = split_fraction_by_grain(
df, test_split, time_column_name, grain_column_names) df, test_split, time_column_name, grain_column_names
)
train_df = train_df.set_index('tmpindex') train_df = train_df.set_index("tmpindex")
train_df.index.name = index_name train_df.index.name = index_name
test_df = test_df.set_index('tmpindex') test_df = test_df.set_index("tmpindex")
test_df.index.name = index_name test_df.index.name = index_name
df.drop('tmpindex', axis=1, inplace=True) df.drop("tmpindex", axis=1, inplace=True)
return train_df, test_df return train_df, test_df
def get_result_df(remote_run): def get_result_df(remote_run):
children = list(remote_run.get_children(recursive=True)) children = list(remote_run.get_children(recursive=True))
summary_df = pd.DataFrame(index=['run_id', 'run_algorithm', summary_df = pd.DataFrame(
'primary_metric', 'Score']) index=["run_id", "run_algorithm", "primary_metric", "Score"]
)
goal_minimize = False goal_minimize = False
for run in children: for run in children:
if run.get_status().lower() == constants.RunState.COMPLETE_RUN \ if (
and 'run_algorithm' in run.properties and 'score' in run.properties: run.get_status().lower() == constants.RunState.COMPLETE_RUN
and "run_algorithm" in run.properties
and "score" in run.properties
):
# We only count in the completed child runs. # We only count in the completed child runs.
summary_df[run.id] = [run.id, run.properties['run_algorithm'], summary_df[run.id] = [
run.properties['primary_metric'], run.id,
float(run.properties['score'])] run.properties["run_algorithm"],
if ('goal' in run.properties): run.properties["primary_metric"],
goal_minimize = run.properties['goal'].split('_')[-1] == 'min' float(run.properties["score"]),
]
if "goal" in run.properties:
goal_minimize = run.properties["goal"].split("_")[-1] == "min"
summary_df = summary_df.T.sort_values( summary_df = summary_df.T.sort_values(
'Score', "Score", ascending=goal_minimize
ascending=goal_minimize).drop_duplicates(['run_algorithm']) ).drop_duplicates(["run_algorithm"])
summary_df = summary_df.set_index('run_algorithm') summary_df = summary_df.set_index("run_algorithm")
return summary_df return summary_df
def run_inference(test_experiment, compute_target, script_folder, train_run, def run_inference(
test_dataset, lookback_dataset, max_horizon, test_experiment,
target_column_name, time_column_name, freq): compute_target,
model_base_name = 'model.pkl' script_folder,
if 'model_data_location' in train_run.properties: train_run,
model_location = train_run.properties['model_data_location'] test_dataset,
_, model_base_name = model_location.rsplit('/', 1) lookback_dataset,
train_run.download_file('outputs/{}'.format(model_base_name), 'inference/{}'.format(model_base_name)) max_horizon,
train_run.download_file('outputs/conda_env_v_1_0_0.yml', 'inference/condafile.yml') target_column_name,
time_column_name,
freq,
):
model_base_name = "model.pkl"
if "model_data_location" in train_run.properties:
model_location = train_run.properties["model_data_location"]
_, model_base_name = model_location.rsplit("/", 1)
train_run.download_file(
"outputs/{}".format(model_base_name), "inference/{}".format(model_base_name)
)
train_run.download_file("outputs/conda_env_v_1_0_0.yml", "inference/condafile.yml")
inference_env = Environment("myenv") inference_env = Environment("myenv")
inference_env.docker.enabled = True inference_env.docker.enabled = True
inference_env.python.conda_dependencies = CondaDependencies( inference_env.python.conda_dependencies = CondaDependencies(
conda_dependencies_file_path='inference/condafile.yml') conda_dependencies_file_path="inference/condafile.yml"
)
est = Estimator(source_directory=script_folder, est = Estimator(
entry_script='infer.py', source_directory=script_folder,
entry_script="infer.py",
script_params={ script_params={
'--max_horizon': max_horizon, "--max_horizon": max_horizon,
'--target_column_name': target_column_name, "--target_column_name": target_column_name,
'--time_column_name': time_column_name, "--time_column_name": time_column_name,
'--frequency': freq, "--frequency": freq,
'--model_path': model_base_name "--model_path": model_base_name,
}, },
inputs=[test_dataset.as_named_input('test_data'), inputs=[
lookback_dataset.as_named_input('lookback_data')], test_dataset.as_named_input("test_data"),
lookback_dataset.as_named_input("lookback_data"),
],
compute_target=compute_target, compute_target=compute_target,
environment_definition=inference_env) environment_definition=inference_env,
)
run = test_experiment.submit( run = test_experiment.submit(
est, tags={ est,
'training_run_id': train_run.id, tags={
'run_algorithm': train_run.properties['run_algorithm'], "training_run_id": train_run.id,
'valid_score': train_run.properties['score'], "run_algorithm": train_run.properties["run_algorithm"],
'primary_metric': train_run.properties['primary_metric'] "valid_score": train_run.properties["score"],
}) "primary_metric": train_run.properties["primary_metric"],
},
)
run.log("run_algorithm", run.tags['run_algorithm']) run.log("run_algorithm", run.tags["run_algorithm"])
return run return run
def run_multiple_inferences(summary_df, train_experiment, test_experiment, def run_multiple_inferences(
compute_target, script_folder, test_dataset, summary_df,
lookback_dataset, max_horizon, target_column_name, train_experiment,
time_column_name, freq): test_experiment,
compute_target,
script_folder,
test_dataset,
lookback_dataset,
max_horizon,
target_column_name,
time_column_name,
freq,
):
for run_name, run_summary in summary_df.iterrows(): for run_name, run_summary in summary_df.iterrows():
print(run_name) print(run_name)
print(run_summary) print(run_summary)
@@ -127,12 +165,19 @@ def run_multiple_inferences(summary_df, train_experiment, test_experiment,
train_run = Run(train_experiment, run_id) train_run = Run(train_experiment, run_id)
test_run = run_inference( test_run = run_inference(
test_experiment, compute_target, script_folder, train_run, test_experiment,
test_dataset, lookback_dataset, max_horizon, target_column_name, compute_target,
time_column_name, freq) script_folder,
train_run,
test_dataset,
lookback_dataset,
max_horizon,
target_column_name,
time_column_name,
freq,
)
print(test_run) print(test_run)
summary_df.loc[summary_df.run_id == run_id, summary_df.loc[summary_df.run_id == run_id, "test_run_id"] = test_run.id
'test_run_id'] = test_run.id
return summary_df return summary_df

View File

@@ -19,9 +19,14 @@ except ImportError:
_torch_present = False _torch_present = False
def align_outputs(y_predicted, X_trans, X_test, y_test, def align_outputs(
predicted_column_name='predicted', y_predicted,
horizon_colname='horizon_origin'): X_trans,
X_test,
y_test,
predicted_column_name="predicted",
horizon_colname="horizon_origin",
):
""" """
Demonstrates how to get the output aligned to the inputs Demonstrates how to get the output aligned to the inputs
using pandas indexes. Helps understand what happened if using pandas indexes. Helps understand what happened if
@@ -33,9 +38,13 @@ def align_outputs(y_predicted, X_trans, X_test, y_test,
* model was asked to predict past max_horizon -> increase max horizon * model was asked to predict past max_horizon -> increase max horizon
* data at start of X_test was needed for lags -> provide previous periods * data at start of X_test was needed for lags -> provide previous periods
""" """
if (horizon_colname in X_trans): if horizon_colname in X_trans:
df_fcst = pd.DataFrame({predicted_column_name: y_predicted, df_fcst = pd.DataFrame(
horizon_colname: X_trans[horizon_colname]}) {
predicted_column_name: y_predicted,
horizon_colname: X_trans[horizon_colname],
}
)
else: else:
df_fcst = pd.DataFrame({predicted_column_name: y_predicted}) df_fcst = pd.DataFrame({predicted_column_name: y_predicted})
@@ -48,20 +57,21 @@ def align_outputs(y_predicted, X_trans, X_test, y_test,
# X_test_full's index does not include origin, so reset for merge # X_test_full's index does not include origin, so reset for merge
df_fcst.reset_index(inplace=True) df_fcst.reset_index(inplace=True)
X_test_full = X_test_full.reset_index().drop(columns='index') X_test_full = X_test_full.reset_index().drop(columns="index")
together = df_fcst.merge(X_test_full, how='right') together = df_fcst.merge(X_test_full, how="right")
# drop rows where prediction or actuals are nan # drop rows where prediction or actuals are nan
# happens because of missing actuals # happens because of missing actuals
# or at edges of time due to lags/rolling windows # or at edges of time due to lags/rolling windows
clean = together[together[[target_column_name, clean = together[
predicted_column_name]].notnull().all(axis=1)] together[[target_column_name, predicted_column_name]].notnull().all(axis=1)
return (clean) ]
return clean
def do_rolling_forecast_with_lookback(fitted_model, X_test, y_test, def do_rolling_forecast_with_lookback(
max_horizon, X_lookback, y_lookback, fitted_model, X_test, y_test, max_horizon, X_lookback, y_lookback, freq="D"
freq='D'): ):
""" """
Produce forecasts on a rolling origin over the given test set. Produce forecasts on a rolling origin over the given test set.
@@ -83,22 +93,28 @@ def do_rolling_forecast_with_lookback(fitted_model, X_test, y_test,
horizon_time = origin_time + max_horizon * to_offset(freq) horizon_time = origin_time + max_horizon * to_offset(freq)
# Extract test data from an expanding window up-to the horizon # Extract test data from an expanding window up-to the horizon
expand_wind = (X[time_column_name] < horizon_time) expand_wind = X[time_column_name] < horizon_time
X_test_expand = X[expand_wind] X_test_expand = X[expand_wind]
y_query_expand = np.zeros(len(X_test_expand)).astype(np.float) y_query_expand = np.zeros(len(X_test_expand)).astype(np.float)
y_query_expand.fill(np.NaN) y_query_expand.fill(np.NaN)
if origin_time != X[time_column_name].min(): if origin_time != X[time_column_name].min():
# Set the context by including actuals up-to the origin time # Set the context by including actuals up-to the origin time
test_context_expand_wind = (X[time_column_name] < origin_time) test_context_expand_wind = X[time_column_name] < origin_time
context_expand_wind = (X_test_expand[time_column_name] < origin_time) context_expand_wind = X_test_expand[time_column_name] < origin_time
y_query_expand[context_expand_wind] = y[test_context_expand_wind] y_query_expand[context_expand_wind] = y[test_context_expand_wind]
# Print some debug info # Print some debug info
print("Horizon_time:", horizon_time, print(
" origin_time: ", origin_time, "Horizon_time:",
" max_horizon: ", max_horizon, horizon_time,
" freq: ", freq) " origin_time: ",
origin_time,
" max_horizon: ",
max_horizon,
" freq: ",
freq,
)
print("expand_wind: ", expand_wind) print("expand_wind: ", expand_wind)
print("y_query_expand") print("y_query_expand")
print(y_query_expand) print(y_query_expand)
@@ -124,9 +140,14 @@ def do_rolling_forecast_with_lookback(fitted_model, X_test, y_test,
trans_tindex = X_trans.index.get_level_values(time_column_name) trans_tindex = X_trans.index.get_level_values(time_column_name)
trans_roll_wind = (trans_tindex >= origin_time) & (trans_tindex < horizon_time) trans_roll_wind = (trans_tindex >= origin_time) & (trans_tindex < horizon_time)
test_roll_wind = expand_wind & (X[time_column_name] >= origin_time) test_roll_wind = expand_wind & (X[time_column_name] >= origin_time)
df_list.append(align_outputs( df_list.append(
y_fcst[trans_roll_wind], X_trans[trans_roll_wind], align_outputs(
X[test_roll_wind], y[test_roll_wind])) y_fcst[trans_roll_wind],
X_trans[trans_roll_wind],
X[test_roll_wind],
y[test_roll_wind],
)
)
# Advance the origin time # Advance the origin time
origin_time = horizon_time origin_time = horizon_time
@@ -134,7 +155,7 @@ def do_rolling_forecast_with_lookback(fitted_model, X_test, y_test,
return pd.concat(df_list, ignore_index=True) return pd.concat(df_list, ignore_index=True)
def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq='D'): def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq="D"):
""" """
Produce forecasts on a rolling origin over the given test set. Produce forecasts on a rolling origin over the given test set.
@@ -153,23 +174,28 @@ def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq='D'):
horizon_time = origin_time + max_horizon * to_offset(freq) horizon_time = origin_time + max_horizon * to_offset(freq)
# Extract test data from an expanding window up-to the horizon # Extract test data from an expanding window up-to the horizon
expand_wind = (X_test[time_column_name] < horizon_time) expand_wind = X_test[time_column_name] < horizon_time
X_test_expand = X_test[expand_wind] X_test_expand = X_test[expand_wind]
y_query_expand = np.zeros(len(X_test_expand)).astype(np.float) y_query_expand = np.zeros(len(X_test_expand)).astype(np.float)
y_query_expand.fill(np.NaN) y_query_expand.fill(np.NaN)
if origin_time != X_test[time_column_name].min(): if origin_time != X_test[time_column_name].min():
# Set the context by including actuals up-to the origin time # Set the context by including actuals up-to the origin time
test_context_expand_wind = (X_test[time_column_name] < origin_time) test_context_expand_wind = X_test[time_column_name] < origin_time
context_expand_wind = (X_test_expand[time_column_name] < origin_time) context_expand_wind = X_test_expand[time_column_name] < origin_time
y_query_expand[context_expand_wind] = y_test[ y_query_expand[context_expand_wind] = y_test[test_context_expand_wind]
test_context_expand_wind]
# Print some debug info # Print some debug info
print("Horizon_time:", horizon_time, print(
" origin_time: ", origin_time, "Horizon_time:",
" max_horizon: ", max_horizon, horizon_time,
" freq: ", freq) " origin_time: ",
origin_time,
" max_horizon: ",
max_horizon,
" freq: ",
freq,
)
print("expand_wind: ", expand_wind) print("expand_wind: ", expand_wind)
print("y_query_expand") print("y_query_expand")
print(y_query_expand) print(y_query_expand)
@@ -193,10 +219,14 @@ def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq='D'):
trans_tindex = X_trans.index.get_level_values(time_column_name) trans_tindex = X_trans.index.get_level_values(time_column_name)
trans_roll_wind = (trans_tindex >= origin_time) & (trans_tindex < horizon_time) trans_roll_wind = (trans_tindex >= origin_time) & (trans_tindex < horizon_time)
test_roll_wind = expand_wind & (X_test[time_column_name] >= origin_time) test_roll_wind = expand_wind & (X_test[time_column_name] >= origin_time)
df_list.append(align_outputs(y_fcst[trans_roll_wind], df_list.append(
align_outputs(
y_fcst[trans_roll_wind],
X_trans[trans_roll_wind], X_trans[trans_roll_wind],
X_test[test_roll_wind], X_test[test_roll_wind],
y_test[test_roll_wind])) y_test[test_roll_wind],
)
)
# Advance the origin time # Advance the origin time
origin_time = horizon_time origin_time = horizon_time
@@ -230,20 +260,31 @@ def map_location_cuda(storage, loc):
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument( parser.add_argument(
'--max_horizon', type=int, dest='max_horizon', "--max_horizon",
default=10, help='Max Horizon for forecasting') type=int,
dest="max_horizon",
default=10,
help="Max Horizon for forecasting",
)
parser.add_argument( parser.add_argument(
'--target_column_name', type=str, dest='target_column_name', "--target_column_name",
help='Target Column Name') type=str,
dest="target_column_name",
help="Target Column Name",
)
parser.add_argument( parser.add_argument(
'--time_column_name', type=str, dest='time_column_name', "--time_column_name", type=str, dest="time_column_name", help="Time Column Name"
help='Time Column Name') )
parser.add_argument( parser.add_argument(
'--frequency', type=str, dest='freq', "--frequency", type=str, dest="freq", help="Frequency of prediction"
help='Frequency of prediction') )
parser.add_argument( parser.add_argument(
'--model_path', type=str, dest='model_path', "--model_path",
default='model.pkl', help='Filename of model to be loaded') type=str,
dest="model_path",
default="model.pkl",
help="Filename of model to be loaded",
)
args = parser.parse_args() args = parser.parse_args()
max_horizon = args.max_horizon max_horizon = args.max_horizon
@@ -252,7 +293,7 @@ time_column_name = args.time_column_name
freq = args.freq freq = args.freq
model_path = args.model_path model_path = args.model_path
print('args passed are: ') print("args passed are: ")
print(max_horizon) print(max_horizon)
print(target_column_name) print(target_column_name)
print(time_column_name) print(time_column_name)
@@ -261,39 +302,41 @@ print(model_path)
run = Run.get_context() run = Run.get_context()
# get input dataset by name # get input dataset by name
test_dataset = run.input_datasets['test_data'] test_dataset = run.input_datasets["test_data"]
lookback_dataset = run.input_datasets['lookback_data'] lookback_dataset = run.input_datasets["lookback_data"]
grain_column_names = [] grain_column_names = []
df = test_dataset.to_pandas_dataframe() df = test_dataset.to_pandas_dataframe()
print('Read df') print("Read df")
print(df) print(df)
X_test_df = test_dataset.drop_columns(columns=[target_column_name]) X_test_df = test_dataset.drop_columns(columns=[target_column_name])
y_test_df = test_dataset.with_timestamp_columns( y_test_df = test_dataset.with_timestamp_columns(None).keep_columns(
None).keep_columns(columns=[target_column_name]) columns=[target_column_name]
)
X_lookback_df = lookback_dataset.drop_columns(columns=[target_column_name]) X_lookback_df = lookback_dataset.drop_columns(columns=[target_column_name])
y_lookback_df = lookback_dataset.with_timestamp_columns( y_lookback_df = lookback_dataset.with_timestamp_columns(None).keep_columns(
None).keep_columns(columns=[target_column_name]) columns=[target_column_name]
)
_, ext = os.path.splitext(model_path) _, ext = os.path.splitext(model_path)
if ext == '.pt': if ext == ".pt":
# Load the fc-tcn torch model. # Load the fc-tcn torch model.
assert _torch_present assert _torch_present
if torch.cuda.is_available(): if torch.cuda.is_available():
map_location = map_location_cuda map_location = map_location_cuda
else: else:
map_location = 'cpu' map_location = "cpu"
with open(model_path, 'rb') as fh: with open(model_path, "rb") as fh:
fitted_model = torch.load(fh, map_location=map_location) fitted_model = torch.load(fh, map_location=map_location)
else: else:
# Load the sklearn pipeline. # Load the sklearn pipeline.
fitted_model = joblib.load(model_path) fitted_model = joblib.load(model_path)
if hasattr(fitted_model, 'get_lookback'): if hasattr(fitted_model, "get_lookback"):
lookback = fitted_model.get_lookback() lookback = fitted_model.get_lookback()
df_all = do_rolling_forecast_with_lookback( df_all = do_rolling_forecast_with_lookback(
fitted_model, fitted_model,
@@ -302,26 +345,28 @@ if hasattr(fitted_model, 'get_lookback'):
max_horizon, max_horizon,
X_lookback_df.to_pandas_dataframe()[-lookback:], X_lookback_df.to_pandas_dataframe()[-lookback:],
y_lookback_df.to_pandas_dataframe().values.T[0][-lookback:], y_lookback_df.to_pandas_dataframe().values.T[0][-lookback:],
freq) freq,
)
else: else:
df_all = do_rolling_forecast( df_all = do_rolling_forecast(
fitted_model, fitted_model,
X_test_df.to_pandas_dataframe(), X_test_df.to_pandas_dataframe(),
y_test_df.to_pandas_dataframe().values.T[0], y_test_df.to_pandas_dataframe().values.T[0],
max_horizon, max_horizon,
freq) freq,
)
print(df_all) print(df_all)
print("target values:::") print("target values:::")
print(df_all[target_column_name]) print(df_all[target_column_name])
print("predicted values:::") print("predicted values:::")
print(df_all['predicted']) print(df_all["predicted"])
# Use the AutoML scoring module # Use the AutoML scoring module
regression_metrics = list(constants.REGRESSION_SCALAR_SET) regression_metrics = list(constants.REGRESSION_SCALAR_SET)
y_test = np.array(df_all[target_column_name]) y_test = np.array(df_all[target_column_name])
y_pred = np.array(df_all['predicted']) y_pred = np.array(df_all["predicted"])
scores = scoring.score_regression(y_test, y_pred, regression_metrics) scores = scoring.score_regression(y_test, y_pred, regression_metrics)
print("scores:") print("scores:")
@@ -331,12 +376,11 @@ for key, value in scores.items():
run.log(key, value) run.log(key, value)
print("Simple forecasting model") print("Simple forecasting model")
rmse = np.sqrt(mean_squared_error( rmse = np.sqrt(mean_squared_error(df_all[target_column_name], df_all["predicted"]))
df_all[target_column_name], df_all['predicted']))
print("[Test Data] \nRoot Mean squared error: %.2f" % rmse) print("[Test Data] \nRoot Mean squared error: %.2f" % rmse)
mae = mean_absolute_error(df_all[target_column_name], df_all['predicted']) mae = mean_absolute_error(df_all[target_column_name], df_all["predicted"])
print('mean_absolute_error score: %.2f' % mae) print("mean_absolute_error score: %.2f" % mae)
print('MAPE: %.2f' % MAPE(df_all[target_column_name], df_all['predicted'])) print("MAPE: %.2f" % MAPE(df_all[target_column_name], df_all["predicted"]))
run.log('rmse', rmse) run.log("rmse", rmse)
run.log('mae', mae) run.log("mae", mae)

View File

@@ -64,22 +64,23 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"import azureml.core\n", "import json\n",
"import pandas as pd\n",
"import numpy as np\n",
"import logging\n", "import logging\n",
"\n",
"from azureml.core import Workspace, Experiment, Dataset\n",
"from azureml.train.automl import AutoMLConfig\n",
"from datetime import datetime\n", "from datetime import datetime\n",
"from azureml.automl.core.featurization import FeaturizationConfig" "\n",
"import azureml.core\n",
"import numpy as np\n",
"import pandas as pd\n",
"from azureml.automl.core.featurization import FeaturizationConfig\n",
"from azureml.core import Dataset, Experiment, Workspace\n",
"from azureml.train.automl import AutoMLConfig"
] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK." "This notebook is compatible with Azure ML SDK version 1.35.0 or later."
] ]
}, },
{ {
@@ -88,7 +89,6 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
] ]
}, },
@@ -108,19 +108,19 @@
"ws = Workspace.from_config()\n", "ws = Workspace.from_config()\n",
"\n", "\n",
"# choose a name for the run history container in the workspace\n", "# choose a name for the run history container in the workspace\n",
"experiment_name = 'automl-bikeshareforecasting'\n", "experiment_name = \"automl-bikeshareforecasting\"\n",
"\n", "\n",
"experiment = Experiment(ws, experiment_name)\n", "experiment = Experiment(ws, experiment_name)\n",
"\n", "\n",
"output = {}\n", "output = {}\n",
"output['Subscription ID'] = ws.subscription_id\n", "output[\"Subscription ID\"] = ws.subscription_id\n",
"output['Workspace'] = ws.name\n", "output[\"Workspace\"] = ws.name\n",
"output['SKU'] = ws.sku\n", "output[\"SKU\"] = ws.sku\n",
"output['Resource Group'] = ws.resource_group\n", "output[\"Resource Group\"] = ws.resource_group\n",
"output['Location'] = ws.location\n", "output[\"Location\"] = ws.location\n",
"output['Run History Name'] = experiment_name\n", "output[\"Run History Name\"] = experiment_name\n",
"pd.set_option('display.max_colwidth', -1)\n", "pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data = output, index = [''])\n", "outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T" "outputDf.T"
] ]
}, },
@@ -153,10 +153,11 @@
"# Verify that cluster does not exist already\n", "# Verify that cluster does not exist already\n",
"try:\n", "try:\n",
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n", " compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
" print('Found existing cluster, use it.')\n", " print(\"Found existing cluster, use it.\")\n",
"except ComputeTargetException:\n", "except ComputeTargetException:\n",
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n", " compute_config = AmlCompute.provisioning_configuration(\n",
" max_nodes=4)\n", " vm_size=\"STANDARD_DS12_V2\", max_nodes=4\n",
" )\n",
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n", " compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
"\n", "\n",
"compute_target.wait_for_completion(show_output=True)" "compute_target.wait_for_completion(show_output=True)"
@@ -178,7 +179,9 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"datastore = ws.get_default_datastore()\n", "datastore = ws.get_default_datastore()\n",
"datastore.upload_files(files = ['./bike-no.csv'], target_path = 'dataset/', overwrite = True,show_progress = True)" "datastore.upload_files(\n",
" files=[\"./bike-no.csv\"], target_path=\"dataset/\", overwrite=True, show_progress=True\n",
")"
] ]
}, },
{ {
@@ -198,8 +201,8 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"target_column_name = 'cnt'\n", "target_column_name = \"cnt\"\n",
"time_column_name = 'date'" "time_column_name = \"date\""
] ]
}, },
{ {
@@ -208,10 +211,12 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'dataset/bike-no.csv')]).with_timestamp_columns(fine_grain_timestamp=time_column_name) \n", "dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"dataset/bike-no.csv\")]\n",
").with_timestamp_columns(fine_grain_timestamp=time_column_name)\n",
"\n", "\n",
"# Drop the columns 'casual' and 'registered' as these columns are a breakdown of the total and therefore a leak.\n", "# Drop the columns 'casual' and 'registered' as these columns are a breakdown of the total and therefore a leak.\n",
"dataset = dataset.drop_columns(columns=['casual', 'registered'])\n", "dataset = dataset.drop_columns(columns=[\"casual\", \"registered\"])\n",
"\n", "\n",
"dataset.take(5).to_pandas_dataframe().reset_index(drop=True)" "dataset.take(5).to_pandas_dataframe().reset_index(drop=True)"
] ]
@@ -320,7 +325,7 @@
"source": [ "source": [
"featurization_config = FeaturizationConfig()\n", "featurization_config = FeaturizationConfig()\n",
"# Force the target column, to be integer type.\n", "# Force the target column, to be integer type.\n",
"featurization_config.add_prediction_transform_type('Integer')" "featurization_config.add_prediction_transform_type(\"Integer\")"
] ]
}, },
{ {
@@ -337,28 +342,31 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n", "from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
"\n",
"forecasting_parameters = ForecastingParameters(\n", "forecasting_parameters = ForecastingParameters(\n",
" time_column_name=time_column_name,\n", " time_column_name=time_column_name,\n",
" forecast_horizon=forecast_horizon,\n", " forecast_horizon=forecast_horizon,\n",
" country_or_region_for_holidays='US', # set country_or_region will trigger holiday featurizer\n", " country_or_region_for_holidays=\"US\", # set country_or_region will trigger holiday featurizer\n",
" target_lags='auto', # use heuristic based lag setting\n", " target_lags=\"auto\", # use heuristic based lag setting\n",
" freq='D' # Set the forecast frequency to be daily\n", " freq=\"D\", # Set the forecast frequency to be daily\n",
")\n", ")\n",
"\n", "\n",
"automl_config = AutoMLConfig(task='forecasting', \n", "automl_config = AutoMLConfig(\n",
" primary_metric='normalized_root_mean_squared_error',\n", " task=\"forecasting\",\n",
" primary_metric=\"normalized_root_mean_squared_error\",\n",
" featurization=featurization_config,\n", " featurization=featurization_config,\n",
" blocked_models = ['ExtremeRandomTrees'], \n", " blocked_models=[\"ExtremeRandomTrees\"],\n",
" experiment_timeout_hours=0.3,\n", " experiment_timeout_hours=0.3,\n",
" training_data=train,\n", " training_data=train,\n",
" label_column_name=target_column_name,\n", " label_column_name=target_column_name,\n",
" compute_target=compute_target,\n", " compute_target=compute_target,\n",
" enable_early_stopping=True,\n", " enable_early_stopping=True,\n",
" n_cross_validations=3, \n", " n_cross_validations=3,\n",
" max_concurrent_iterations=4,\n", " max_concurrent_iterations=4,\n",
" max_cores_per_iteration=-1,\n", " max_cores_per_iteration=-1,\n",
" verbosity=logging.INFO,\n", " verbosity=logging.INFO,\n",
" forecasting_parameters=forecasting_parameters)" " forecasting_parameters=forecasting_parameters,\n",
")"
] ]
}, },
{ {
@@ -390,8 +398,8 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"### Retrieve the Best Model\n", "### Retrieve the Best Run details\n",
"Below we select the best model from all the training iterations using get_output method." "Below we retrieve the best Run object from among all the runs in the experiment."
] ]
}, },
{ {
@@ -400,8 +408,8 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"best_run, fitted_model = remote_run.get_output()\n", "best_run = remote_run.get_best_child()\n",
"fitted_model.steps" "best_run"
] ]
}, },
{ {
@@ -410,7 +418,7 @@
"source": [ "source": [
"## Featurization\n", "## Featurization\n",
"\n", "\n",
"You can access the engineered feature names generated in time-series featurization. Note that a number of named holiday periods are represented. We recommend that you have at least one year of data when using this feature to ensure that all yearly holidays are captured in the training featurization." "We can look at the engineered feature names generated in time-series featurization via. the JSON file named 'engineered_feature_names.json' under the run outputs. Note that a number of named holiday periods are represented. We recommend that you have at least one year of data when using this feature to ensure that all yearly holidays are captured in the training featurization."
] ]
}, },
{ {
@@ -419,7 +427,14 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"fitted_model.named_steps['timeseriestransformer'].get_engineered_feature_names()" "# Download the JSON file locally\n",
"best_run.download_file(\n",
" \"outputs/engineered_feature_names.json\", \"engineered_feature_names.json\"\n",
")\n",
"with open(\"engineered_feature_names.json\", \"r\") as f:\n",
" records = json.load(f)\n",
"\n",
"records"
] ]
}, },
{ {
@@ -443,10 +458,26 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"# Get the featurization summary as a list of JSON\n", "# Download the featurization summary JSON file locally\n",
"featurization_summary = fitted_model.named_steps['timeseriestransformer'].get_featurization_summary()\n", "best_run.download_file(\n",
"# View the featurization summary as a pandas dataframe\n", " \"outputs/featurization_summary.json\", \"featurization_summary.json\"\n",
"pd.DataFrame.from_records(featurization_summary)" ")\n",
"\n",
"# Render the JSON as a pandas DataFrame\n",
"with open(\"featurization_summary.json\", \"r\") as f:\n",
" records = json.load(f)\n",
"fs = pd.DataFrame.from_records(records)\n",
"\n",
"# View a summary of the featurization\n",
"fs[\n",
" [\n",
" \"RawFeatureName\",\n",
" \"TypeDetected\",\n",
" \"Dropped\",\n",
" \"EngineeredFeatureCount\",\n",
" \"Transformations\",\n",
" ]\n",
"]"
] ]
}, },
{ {
@@ -491,9 +522,9 @@
"import os\n", "import os\n",
"import shutil\n", "import shutil\n",
"\n", "\n",
"script_folder = os.path.join(os.getcwd(), 'forecast')\n", "script_folder = os.path.join(os.getcwd(), \"forecast\")\n",
"os.makedirs(script_folder, exist_ok=True)\n", "os.makedirs(script_folder, exist_ok=True)\n",
"shutil.copy('forecasting_script.py', script_folder)" "shutil.copy(\"forecasting_script.py\", script_folder)"
] ]
}, },
{ {
@@ -511,7 +542,9 @@
"source": [ "source": [
"from run_forecast import run_rolling_forecast\n", "from run_forecast import run_rolling_forecast\n",
"\n", "\n",
"remote_run = run_rolling_forecast(test_experiment, compute_target, best_run, test, target_column_name)\n", "remote_run = run_rolling_forecast(\n",
" test_experiment, compute_target, best_run, test, target_column_name\n",
")\n",
"remote_run" "remote_run"
] ]
}, },
@@ -538,8 +571,8 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"remote_run.download_file('outputs/predictions.csv', 'predictions.csv')\n", "remote_run.download_file(\"outputs/predictions.csv\", \"predictions.csv\")\n",
"df_all = pd.read_csv('predictions.csv')" "df_all = pd.read_csv(\"predictions.csv\")"
] ]
}, },
{ {
@@ -556,18 +589,23 @@
"# use automl metrics module\n", "# use automl metrics module\n",
"scores = scoring.score_regression(\n", "scores = scoring.score_regression(\n",
" y_test=df_all[target_column_name],\n", " y_test=df_all[target_column_name],\n",
" y_pred=df_all['predicted'],\n", " y_pred=df_all[\"predicted\"],\n",
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET))\n", " metrics=list(constants.Metric.SCALAR_REGRESSION_SET),\n",
")\n",
"\n", "\n",
"print(\"[Test data scores]\\n\")\n", "print(\"[Test data scores]\\n\")\n",
"for key, value in scores.items(): \n", "for key, value in scores.items():\n",
" print('{}: {:.3f}'.format(key, value))\n", " print(\"{}: {:.3f}\".format(key, value))\n",
" \n", "\n",
"# Plot outputs\n", "# Plot outputs\n",
"%matplotlib inline\n", "%matplotlib inline\n",
"test_pred = plt.scatter(df_all[target_column_name], df_all['predicted'], color='b')\n", "test_pred = plt.scatter(df_all[target_column_name], df_all[\"predicted\"], color=\"b\")\n",
"test_test = plt.scatter(df_all[target_column_name], df_all[target_column_name], color='g')\n", "test_test = plt.scatter(\n",
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n", " df_all[target_column_name], df_all[target_column_name], color=\"g\"\n",
")\n",
"plt.legend(\n",
" (test_pred, test_test), (\"prediction\", \"truth\"), loc=\"upper left\", fontsize=8\n",
")\n",
"plt.show()" "plt.show()"
] ]
}, },
@@ -588,10 +626,18 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"from metrics_helper import MAPE, APE\n", "from metrics_helper import MAPE, APE\n",
"df_all.groupby('horizon_origin').apply(\n", "\n",
" lambda df: pd.Series({'MAPE': MAPE(df[target_column_name], df['predicted']),\n", "df_all.groupby(\"horizon_origin\").apply(\n",
" 'RMSE': np.sqrt(mean_squared_error(df[target_column_name], df['predicted'])),\n", " lambda df: pd.Series(\n",
" 'MAE': mean_absolute_error(df[target_column_name], df['predicted'])}))" " {\n",
" \"MAPE\": MAPE(df[target_column_name], df[\"predicted\"]),\n",
" \"RMSE\": np.sqrt(\n",
" mean_squared_error(df[target_column_name], df[\"predicted\"])\n",
" ),\n",
" \"MAE\": mean_absolute_error(df[target_column_name], df[\"predicted\"]),\n",
" }\n",
" )\n",
")"
] ]
}, },
{ {
@@ -607,15 +653,18 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"df_all_APE = df_all.assign(APE=APE(df_all[target_column_name], df_all['predicted']))\n", "df_all_APE = df_all.assign(APE=APE(df_all[target_column_name], df_all[\"predicted\"]))\n",
"APEs = [df_all_APE[df_all['horizon_origin'] == h].APE.values for h in range(1, forecast_horizon + 1)]\n", "APEs = [\n",
" df_all_APE[df_all[\"horizon_origin\"] == h].APE.values\n",
" for h in range(1, forecast_horizon + 1)\n",
"]\n",
"\n", "\n",
"%matplotlib inline\n", "%matplotlib inline\n",
"plt.boxplot(APEs)\n", "plt.boxplot(APEs)\n",
"plt.yscale('log')\n", "plt.yscale(\"log\")\n",
"plt.xlabel('horizon')\n", "plt.xlabel(\"horizon\")\n",
"plt.ylabel('APE (%)')\n", "plt.ylabel(\"APE (%)\")\n",
"plt.title('Absolute Percentage Errors by Forecast Horizon')\n", "plt.title(\"Absolute Percentage Errors by Forecast Horizon\")\n",
"\n", "\n",
"plt.show()" "plt.show()"
] ]
@@ -645,9 +694,9 @@
"friendly_name": "Forecasting BikeShare Demand", "friendly_name": "Forecasting BikeShare Demand",
"index_order": 1, "index_order": 1,
"kernelspec": { "kernelspec": {
"display_name": "Python 3.6", "display_name": "Python 3.6 - AzureML",
"language": "python", "language": "python",
"name": "python36" "name": "python3-azureml"
}, },
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {

View File

@@ -4,11 +4,14 @@ from sklearn.externals import joblib
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument( parser.add_argument(
'--target_column_name', type=str, dest='target_column_name', "--target_column_name",
help='Target Column Name') type=str,
dest="target_column_name",
help="Target Column Name",
)
parser.add_argument( parser.add_argument(
'--test_dataset', type=str, dest='test_dataset', "--test_dataset", type=str, dest="test_dataset", help="Test Dataset"
help='Test Dataset') )
args = parser.parse_args() args = parser.parse_args()
target_column_name = args.target_column_name target_column_name = args.target_column_name
@@ -20,19 +23,30 @@ ws = run.experiment.workspace
# get the input dataset by id # get the input dataset by id
test_dataset = Dataset.get_by_id(ws, id=test_dataset_id) test_dataset = Dataset.get_by_id(ws, id=test_dataset_id)
X_test_df = test_dataset.drop_columns(columns=[target_column_name]).to_pandas_dataframe().reset_index(drop=True) X_test_df = (
y_test_df = test_dataset.with_timestamp_columns(None).keep_columns(columns=[target_column_name]).to_pandas_dataframe() test_dataset.drop_columns(columns=[target_column_name])
.to_pandas_dataframe()
.reset_index(drop=True)
)
y_test_df = (
test_dataset.with_timestamp_columns(None)
.keep_columns(columns=[target_column_name])
.to_pandas_dataframe()
)
fitted_model = joblib.load('model.pkl') fitted_model = joblib.load("model.pkl")
y_pred, X_trans = fitted_model.rolling_evaluation(X_test_df, y_test_df.values) y_pred, X_trans = fitted_model.rolling_evaluation(X_test_df, y_test_df.values)
# Add predictions, actuals, and horizon relative to rolling origin to the test feature data # Add predictions, actuals, and horizon relative to rolling origin to the test feature data
assign_dict = {'horizon_origin': X_trans['horizon_origin'].values, 'predicted': y_pred, assign_dict = {
target_column_name: y_test_df[target_column_name].values} "horizon_origin": X_trans["horizon_origin"].values,
"predicted": y_pred,
target_column_name: y_test_df[target_column_name].values,
}
df_all = X_test_df.assign(**assign_dict) df_all = X_test_df.assign(**assign_dict)
file_name = 'outputs/predictions.csv' file_name = "outputs/predictions.csv"
export_csv = df_all.to_csv(file_name, header=True) export_csv = df_all.to_csv(file_name, header=True)
# Upload the predictions into artifacts # Upload the predictions into artifacts

View File

@@ -1,32 +1,40 @@
from azureml.core import ScriptRunConfig from azureml.core import ScriptRunConfig
def run_rolling_forecast(test_experiment, compute_target, train_run, def run_rolling_forecast(
test_dataset, target_column_name, test_experiment,
inference_folder='./forecast'): compute_target,
train_run.download_file('outputs/model.pkl', train_run,
inference_folder + '/model.pkl') test_dataset,
target_column_name,
inference_folder="./forecast",
):
train_run.download_file("outputs/model.pkl", inference_folder + "/model.pkl")
inference_env = train_run.get_environment() inference_env = train_run.get_environment()
config = ScriptRunConfig(source_directory=inference_folder, config = ScriptRunConfig(
script='forecasting_script.py', source_directory=inference_folder,
arguments=['--target_column_name', script="forecasting_script.py",
arguments=[
"--target_column_name",
target_column_name, target_column_name,
'--test_dataset', "--test_dataset",
test_dataset.as_named_input(test_dataset.name)], test_dataset.as_named_input(test_dataset.name),
],
compute_target=compute_target, compute_target=compute_target,
environment=inference_env) environment=inference_env,
)
run = test_experiment.submit(config, run = test_experiment.submit(
tags={'training_run_id': config,
train_run.id, tags={
'run_algorithm': "training_run_id": train_run.id,
train_run.properties['run_algorithm'], "run_algorithm": train_run.properties["run_algorithm"],
'valid_score': "valid_score": train_run.properties["score"],
train_run.properties['score'], "primary_metric": train_run.properties["primary_metric"],
'primary_metric': },
train_run.properties['primary_metric']}) )
run.log("run_algorithm", run.tags['run_algorithm']) run.log("run_algorithm", run.tags["run_algorithm"])
return run return run

View File

@@ -68,6 +68,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"import json\n",
"import logging\n", "import logging\n",
"\n", "\n",
"from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score\n", "from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score\n",
@@ -90,7 +91,7 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK." "This notebook is compatible with Azure ML SDK version 1.35.0 or later."
] ]
}, },
{ {
@@ -99,7 +100,6 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
] ]
}, },
@@ -119,7 +119,7 @@
"ws = Workspace.from_config()\n", "ws = Workspace.from_config()\n",
"\n", "\n",
"# choose a name for the run history container in the workspace\n", "# choose a name for the run history container in the workspace\n",
"experiment_name = 'automl-forecasting-energydemand'\n", "experiment_name = \"automl-forecasting-energydemand\"\n",
"\n", "\n",
"# # project folder\n", "# # project folder\n",
"# project_folder = './sample_projects/automl-forecasting-energy-demand'\n", "# project_folder = './sample_projects/automl-forecasting-energy-demand'\n",
@@ -127,13 +127,13 @@
"experiment = Experiment(ws, experiment_name)\n", "experiment = Experiment(ws, experiment_name)\n",
"\n", "\n",
"output = {}\n", "output = {}\n",
"output['Subscription ID'] = ws.subscription_id\n", "output[\"Subscription ID\"] = ws.subscription_id\n",
"output['Workspace'] = ws.name\n", "output[\"Workspace\"] = ws.name\n",
"output['Resource Group'] = ws.resource_group\n", "output[\"Resource Group\"] = ws.resource_group\n",
"output['Location'] = ws.location\n", "output[\"Location\"] = ws.location\n",
"output['Run History Name'] = experiment_name\n", "output[\"Run History Name\"] = experiment_name\n",
"pd.set_option('display.max_colwidth', -1)\n", "pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data = output, index = [''])\n", "outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T" "outputDf.T"
] ]
}, },
@@ -166,10 +166,11 @@
"# Verify that cluster does not exist already\n", "# Verify that cluster does not exist already\n",
"try:\n", "try:\n",
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n", " compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
" print('Found existing cluster, use it.')\n", " print(\"Found existing cluster, use it.\")\n",
"except ComputeTargetException:\n", "except ComputeTargetException:\n",
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n", " compute_config = AmlCompute.provisioning_configuration(\n",
" max_nodes=6)\n", " vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
" )\n",
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n", " compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
"\n", "\n",
"compute_target.wait_for_completion(show_output=True)" "compute_target.wait_for_completion(show_output=True)"
@@ -204,8 +205,8 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"target_column_name = 'demand'\n", "target_column_name = \"demand\"\n",
"time_column_name = 'timeStamp'" "time_column_name = \"timeStamp\""
] ]
}, },
{ {
@@ -214,7 +215,9 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"dataset = Dataset.Tabular.from_delimited_files(path = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/nyc_energy.csv\").with_timestamp_columns(fine_grain_timestamp=time_column_name) \n", "dataset = Dataset.Tabular.from_delimited_files(\n",
" path=\"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/nyc_energy.csv\"\n",
").with_timestamp_columns(fine_grain_timestamp=time_column_name)\n",
"dataset.take(5).to_pandas_dataframe().reset_index(drop=True)" "dataset.take(5).to_pandas_dataframe().reset_index(drop=True)"
] ]
}, },
@@ -343,23 +346,26 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n", "from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
"\n",
"forecasting_parameters = ForecastingParameters(\n", "forecasting_parameters = ForecastingParameters(\n",
" time_column_name=time_column_name,\n", " time_column_name=time_column_name,\n",
" forecast_horizon=forecast_horizon,\n", " forecast_horizon=forecast_horizon,\n",
" freq='H' # Set the forecast frequency to be hourly\n", " freq=\"H\", # Set the forecast frequency to be hourly\n",
")\n", ")\n",
"\n", "\n",
"automl_config = AutoMLConfig(task='forecasting', \n", "automl_config = AutoMLConfig(\n",
" primary_metric='normalized_root_mean_squared_error',\n", " task=\"forecasting\",\n",
" blocked_models = ['ExtremeRandomTrees', 'AutoArima', 'Prophet'], \n", " primary_metric=\"normalized_root_mean_squared_error\",\n",
" blocked_models=[\"ExtremeRandomTrees\", \"AutoArima\", \"Prophet\"],\n",
" experiment_timeout_hours=0.3,\n", " experiment_timeout_hours=0.3,\n",
" training_data=train,\n", " training_data=train,\n",
" label_column_name=target_column_name,\n", " label_column_name=target_column_name,\n",
" compute_target=compute_target,\n", " compute_target=compute_target,\n",
" enable_early_stopping=True,\n", " enable_early_stopping=True,\n",
" n_cross_validations=3, \n", " n_cross_validations=3,\n",
" verbosity=logging.INFO,\n", " verbosity=logging.INFO,\n",
" forecasting_parameters=forecasting_parameters)" " forecasting_parameters=forecasting_parameters,\n",
")"
] ]
}, },
{ {
@@ -392,8 +398,8 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"## Retrieve the Best Model\n", "## Retrieve the Best Run details\n",
"Below we select the best model from all the training iterations using get_output method." "Below we retrieve the best Run object from among all the runs in the experiment."
] ]
}, },
{ {
@@ -402,8 +408,8 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"best_run, fitted_model = remote_run.get_output()\n", "best_run = remote_run.get_best_child()\n",
"fitted_model.steps" "best_run"
] ]
}, },
{ {
@@ -411,7 +417,7 @@
"metadata": {}, "metadata": {},
"source": [ "source": [
"## Featurization\n", "## Featurization\n",
"You can access the engineered feature names generated in time-series featurization." "We can look at the engineered feature names generated in time-series featurization via. the JSON file named 'engineered_feature_names.json' under the run outputs."
] ]
}, },
{ {
@@ -420,7 +426,14 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"fitted_model.named_steps['timeseriestransformer'].get_engineered_feature_names()" "# Download the JSON file locally\n",
"best_run.download_file(\n",
" \"outputs/engineered_feature_names.json\", \"engineered_feature_names.json\"\n",
")\n",
"with open(\"engineered_feature_names.json\", \"r\") as f:\n",
" records = json.load(f)\n",
"\n",
"records"
] ]
}, },
{ {
@@ -443,10 +456,26 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"# Get the featurization summary as a list of JSON\n", "# Download the featurization summary JSON file locally\n",
"featurization_summary = fitted_model.named_steps['timeseriestransformer'].get_featurization_summary()\n", "best_run.download_file(\n",
"# View the featurization summary as a pandas dataframe\n", " \"outputs/featurization_summary.json\", \"featurization_summary.json\"\n",
"pd.DataFrame.from_records(featurization_summary)" ")\n",
"\n",
"# Render the JSON as a pandas DataFrame\n",
"with open(\"featurization_summary.json\", \"r\") as f:\n",
" records = json.load(f)\n",
"fs = pd.DataFrame.from_records(records)\n",
"\n",
"# View a summary of the featurization\n",
"fs[\n",
" [\n",
" \"RawFeatureName\",\n",
" \"TypeDetected\",\n",
" \"Dropped\",\n",
" \"EngineeredFeatureCount\",\n",
" \"Transformations\",\n",
" ]\n",
"]"
] ]
}, },
{ {
@@ -473,7 +502,7 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"### Retreiving forecasts from the model\n", "### Retrieving forecasts from the model\n",
"We have created a function called `run_forecast` that submits the test data to the best model determined during the training run and retrieves forecasts. This function uses a helper script `forecasting_script` which is uploaded and expecuted on the remote compute." "We have created a function called `run_forecast` that submits the test data to the best model determined during the training run and retrieves forecasts. This function uses a helper script `forecasting_script` which is uploaded and expecuted on the remote compute."
] ]
}, },
@@ -484,15 +513,18 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"from run_forecast import run_remote_inference\n", "from run_forecast import run_remote_inference\n",
"remote_run_infer = run_remote_inference(test_experiment=test_experiment,\n", "\n",
"remote_run_infer = run_remote_inference(\n",
" test_experiment=test_experiment,\n",
" compute_target=compute_target,\n", " compute_target=compute_target,\n",
" train_run=best_run,\n", " train_run=best_run,\n",
" test_dataset=test,\n", " test_dataset=test,\n",
" target_column_name=target_column_name)\n", " target_column_name=target_column_name,\n",
")\n",
"remote_run_infer.wait_for_completion(show_output=False)\n", "remote_run_infer.wait_for_completion(show_output=False)\n",
"\n", "\n",
"# download the inference output file to the local machine\n", "# download the inference output file to the local machine\n",
"remote_run_infer.download_file('outputs/predictions.csv', 'predictions.csv')" "remote_run_infer.download_file(\"outputs/predictions.csv\", \"predictions.csv\")"
] ]
}, },
{ {
@@ -510,7 +542,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"# load forecast data frame\n", "# load forecast data frame\n",
"fcst_df = pd.read_csv('predictions.csv', parse_dates=[time_column_name])\n", "fcst_df = pd.read_csv(\"predictions.csv\", parse_dates=[time_column_name])\n",
"fcst_df.head()" "fcst_df.head()"
] ]
}, },
@@ -527,18 +559,23 @@
"# use automl metrics module\n", "# use automl metrics module\n",
"scores = scoring.score_regression(\n", "scores = scoring.score_regression(\n",
" y_test=fcst_df[target_column_name],\n", " y_test=fcst_df[target_column_name],\n",
" y_pred=fcst_df['predicted'],\n", " y_pred=fcst_df[\"predicted\"],\n",
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET))\n", " metrics=list(constants.Metric.SCALAR_REGRESSION_SET),\n",
")\n",
"\n", "\n",
"print(\"[Test data scores]\\n\")\n", "print(\"[Test data scores]\\n\")\n",
"for key, value in scores.items(): \n", "for key, value in scores.items():\n",
" print('{}: {:.3f}'.format(key, value))\n", " print(\"{}: {:.3f}\".format(key, value))\n",
" \n", "\n",
"# Plot outputs\n", "# Plot outputs\n",
"%matplotlib inline\n", "%matplotlib inline\n",
"test_pred = plt.scatter(fcst_df[target_column_name], fcst_df['predicted'], color='b')\n", "test_pred = plt.scatter(fcst_df[target_column_name], fcst_df[\"predicted\"], color=\"b\")\n",
"test_test = plt.scatter(fcst_df[target_column_name], fcst_df[target_column_name], color='g')\n", "test_test = plt.scatter(\n",
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n", " fcst_df[target_column_name], fcst_df[target_column_name], color=\"g\"\n",
")\n",
"plt.legend(\n",
" (test_pred, test_test), (\"prediction\", \"truth\"), loc=\"upper left\", fontsize=8\n",
")\n",
"plt.show()" "plt.show()"
] ]
}, },
@@ -567,21 +604,33 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"advanced_forecasting_parameters = ForecastingParameters(\n", "advanced_forecasting_parameters = ForecastingParameters(\n",
" time_column_name=time_column_name, forecast_horizon=forecast_horizon,\n", " time_column_name=time_column_name,\n",
" target_lags=12, target_rolling_window_size=4\n", " forecast_horizon=forecast_horizon,\n",
" target_lags=12,\n",
" target_rolling_window_size=4,\n",
")\n", ")\n",
"\n", "\n",
"automl_config = AutoMLConfig(task='forecasting', \n", "automl_config = AutoMLConfig(\n",
" primary_metric='normalized_root_mean_squared_error',\n", " task=\"forecasting\",\n",
" blocked_models = ['ElasticNet','ExtremeRandomTrees','GradientBoosting','XGBoostRegressor','ExtremeRandomTrees', 'AutoArima', 'Prophet'], #These models are blocked for tutorial purposes, remove this for real use cases. \n", " primary_metric=\"normalized_root_mean_squared_error\",\n",
" blocked_models=[\n",
" \"ElasticNet\",\n",
" \"ExtremeRandomTrees\",\n",
" \"GradientBoosting\",\n",
" \"XGBoostRegressor\",\n",
" \"ExtremeRandomTrees\",\n",
" \"AutoArima\",\n",
" \"Prophet\",\n",
" ], # These models are blocked for tutorial purposes, remove this for real use cases.\n",
" experiment_timeout_hours=0.3,\n", " experiment_timeout_hours=0.3,\n",
" training_data=train,\n", " training_data=train,\n",
" label_column_name=target_column_name,\n", " label_column_name=target_column_name,\n",
" compute_target=compute_target,\n", " compute_target=compute_target,\n",
" enable_early_stopping = True,\n", " enable_early_stopping=True,\n",
" n_cross_validations=3, \n", " n_cross_validations=3,\n",
" verbosity=logging.INFO,\n", " verbosity=logging.INFO,\n",
" forecasting_parameters=advanced_forecasting_parameters)" " forecasting_parameters=advanced_forecasting_parameters,\n",
")"
] ]
}, },
{ {
@@ -613,7 +662,7 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"### Retrieve the Best Model" "### Retrieve the Best Run details"
] ]
}, },
{ {
@@ -622,7 +671,8 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"best_run_lags, fitted_model_lags = advanced_remote_run.get_output()" "best_run_lags = remote_run.get_best_child()\n",
"best_run_lags"
] ]
}, },
{ {
@@ -640,16 +690,20 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"test_experiment_advanced = Experiment(ws, experiment_name + \"_inference_advanced\")\n", "test_experiment_advanced = Experiment(ws, experiment_name + \"_inference_advanced\")\n",
"advanced_remote_run_infer = run_remote_inference(test_experiment=test_experiment_advanced,\n", "advanced_remote_run_infer = run_remote_inference(\n",
" test_experiment=test_experiment_advanced,\n",
" compute_target=compute_target,\n", " compute_target=compute_target,\n",
" train_run=best_run_lags,\n", " train_run=best_run_lags,\n",
" test_dataset=test,\n", " test_dataset=test,\n",
" target_column_name=target_column_name,\n", " target_column_name=target_column_name,\n",
" inference_folder='./forecast_advanced')\n", " inference_folder=\"./forecast_advanced\",\n",
")\n",
"advanced_remote_run_infer.wait_for_completion(show_output=False)\n", "advanced_remote_run_infer.wait_for_completion(show_output=False)\n",
"\n", "\n",
"# download the inference output file to the local machine\n", "# download the inference output file to the local machine\n",
"advanced_remote_run_infer.download_file('outputs/predictions.csv', 'predictions_advanced.csv')" "advanced_remote_run_infer.download_file(\n",
" \"outputs/predictions.csv\", \"predictions_advanced.csv\"\n",
")"
] ]
}, },
{ {
@@ -658,7 +712,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"fcst_adv_df = pd.read_csv('predictions_advanced.csv', parse_dates=[time_column_name])\n", "fcst_adv_df = pd.read_csv(\"predictions_advanced.csv\", parse_dates=[time_column_name])\n",
"fcst_adv_df.head()" "fcst_adv_df.head()"
] ]
}, },
@@ -675,18 +729,25 @@
"# use automl metrics module\n", "# use automl metrics module\n",
"scores = scoring.score_regression(\n", "scores = scoring.score_regression(\n",
" y_test=fcst_adv_df[target_column_name],\n", " y_test=fcst_adv_df[target_column_name],\n",
" y_pred=fcst_adv_df['predicted'],\n", " y_pred=fcst_adv_df[\"predicted\"],\n",
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET))\n", " metrics=list(constants.Metric.SCALAR_REGRESSION_SET),\n",
")\n",
"\n", "\n",
"print(\"[Test data scores]\\n\")\n", "print(\"[Test data scores]\\n\")\n",
"for key, value in scores.items(): \n", "for key, value in scores.items():\n",
" print('{}: {:.3f}'.format(key, value))\n", " print(\"{}: {:.3f}\".format(key, value))\n",
" \n", "\n",
"# Plot outputs\n", "# Plot outputs\n",
"%matplotlib inline\n", "%matplotlib inline\n",
"test_pred = plt.scatter(fcst_adv_df[target_column_name], fcst_adv_df['predicted'], color='b')\n", "test_pred = plt.scatter(\n",
"test_test = plt.scatter(fcst_adv_df[target_column_name], fcst_adv_df[target_column_name], color='g')\n", " fcst_adv_df[target_column_name], fcst_adv_df[\"predicted\"], color=\"b\"\n",
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n", ")\n",
"test_test = plt.scatter(\n",
" fcst_adv_df[target_column_name], fcst_adv_df[target_column_name], color=\"g\"\n",
")\n",
"plt.legend(\n",
" (test_pred, test_test), (\"prediction\", \"truth\"), loc=\"upper left\", fontsize=8\n",
")\n",
"plt.show()" "plt.show()"
] ]
} }
@@ -702,9 +763,9 @@
"automated-machine-learning" "automated-machine-learning"
], ],
"kernelspec": { "kernelspec": {
"display_name": "Python 3.6", "display_name": "Python 3.6 - AzureML",
"language": "python", "language": "python",
"name": "python36" "name": "python3-azureml"
}, },
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {

View File

@@ -5,62 +5,20 @@ compute instance.
""" """
import argparse import argparse
import pandas as pd
import numpy as np
from azureml.core import Dataset, Run from azureml.core import Dataset, Run
from azureml.automl.core.shared.constants import TimeSeriesInternal
from sklearn.externals import joblib from sklearn.externals import joblib
from pandas.tseries.frequencies import to_offset from pandas.tseries.frequencies import to_offset
def align_outputs(y_predicted, X_trans, X_test, y_test, target_column_name,
predicted_column_name='predicted',
horizon_colname='horizon_origin'):
"""
Demonstrates how to get the output aligned to the inputs
using pandas indexes. Helps understand what happened if
the output's shape differs from the input shape, or if
the data got re-sorted by time and grain during forecasting.
Typical causes of misalignment are:
* we predicted some periods that were missing in actuals -> drop from eval
* model was asked to predict past max_horizon -> increase max horizon
* data at start of X_test was needed for lags -> provide previous periods
"""
if (horizon_colname in X_trans):
df_fcst = pd.DataFrame({predicted_column_name: y_predicted,
horizon_colname: X_trans[horizon_colname]})
else:
df_fcst = pd.DataFrame({predicted_column_name: y_predicted})
# y and X outputs are aligned by forecast() function contract
df_fcst.index = X_trans.index
# align original X_test to y_test
X_test_full = X_test.copy()
X_test_full[target_column_name] = y_test
# X_test_full's index does not include origin, so reset for merge
df_fcst.reset_index(inplace=True)
X_test_full = X_test_full.reset_index().drop(columns='index')
together = df_fcst.merge(X_test_full, how='right')
# drop rows where prediction or actuals are nan
# happens because of missing actuals
# or at edges of time due to lags/rolling windows
clean = together[together[[target_column_name,
predicted_column_name]].notnull().all(axis=1)]
return(clean)
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument( parser.add_argument(
'--target_column_name', type=str, dest='target_column_name', "--target_column_name",
help='Target Column Name') type=str,
dest="target_column_name",
help="Target Column Name",
)
parser.add_argument( parser.add_argument(
'--test_dataset', type=str, dest='test_dataset', "--test_dataset", type=str, dest="test_dataset", help="Test Dataset"
help='Test Dataset') )
args = parser.parse_args() args = parser.parse_args()
target_column_name = args.target_column_name target_column_name = args.target_column_name
@@ -76,14 +34,28 @@ X_test = test_dataset.to_pandas_dataframe().reset_index(drop=True)
y_test = X_test.pop(target_column_name).values y_test = X_test.pop(target_column_name).values
# generate forecast # generate forecast
fitted_model = joblib.load('model.pkl') fitted_model = joblib.load("model.pkl")
y_predictions, X_trans = fitted_model.forecast(X_test) # We have default quantiles values set as below(95th percentile)
quantiles = [0.025, 0.5, 0.975]
predicted_column_name = "predicted"
PI = "prediction_interval"
fitted_model.quantiles = quantiles
pred_quantiles = fitted_model.forecast_quantiles(X_test)
pred_quantiles[PI] = pred_quantiles[[min(quantiles), max(quantiles)]].apply(
lambda x: "[{}, {}]".format(x[0], x[1]), axis=1
)
X_test[target_column_name] = y_test
X_test[PI] = pred_quantiles[PI]
X_test[predicted_column_name] = pred_quantiles[0.5]
# drop rows where prediction or actuals are nan
# happens because of missing actuals
# or at edges of time due to lags/rolling windows
clean = X_test[
X_test[[target_column_name, predicted_column_name]].notnull().all(axis=1)
]
# align output file_name = "outputs/predictions.csv"
df_all = align_outputs(y_predictions, X_trans, X_test, y_test, target_column_name) export_csv = clean.to_csv(file_name, header=True, index=False) # added Index
file_name = 'outputs/predictions.csv'
export_csv = df_all.to_csv(file_name, header=True, index=False) # added Index
# Upload the predictions into artifacts # Upload the predictions into artifacts
run.upload_file(name=file_name, path_or_stream=file_name) run.upload_file(name=file_name, path_or_stream=file_name)

View File

@@ -3,36 +3,47 @@ import shutil
from azureml.core import ScriptRunConfig from azureml.core import ScriptRunConfig
def run_remote_inference(test_experiment, compute_target, train_run, def run_remote_inference(
test_dataset, target_column_name, inference_folder='./forecast'): test_experiment,
compute_target,
train_run,
test_dataset,
target_column_name,
inference_folder="./forecast",
):
# Create local directory to copy the model.pkl and forecsting_script.py files into. # Create local directory to copy the model.pkl and forecsting_script.py files into.
# These files will be uploaded to and executed on the compute instance. # These files will be uploaded to and executed on the compute instance.
os.makedirs(inference_folder, exist_ok=True) os.makedirs(inference_folder, exist_ok=True)
shutil.copy('forecasting_script.py', inference_folder) shutil.copy("forecasting_script.py", inference_folder)
train_run.download_file('outputs/model.pkl', train_run.download_file(
os.path.join(inference_folder, 'model.pkl')) "outputs/model.pkl", os.path.join(inference_folder, "model.pkl")
)
inference_env = train_run.get_environment() inference_env = train_run.get_environment()
config = ScriptRunConfig(source_directory=inference_folder, config = ScriptRunConfig(
script='forecasting_script.py', source_directory=inference_folder,
arguments=['--target_column_name', script="forecasting_script.py",
arguments=[
"--target_column_name",
target_column_name, target_column_name,
'--test_dataset', "--test_dataset",
test_dataset.as_named_input(test_dataset.name)], test_dataset.as_named_input(test_dataset.name),
],
compute_target=compute_target, compute_target=compute_target,
environment=inference_env) environment=inference_env,
)
run = test_experiment.submit(config, run = test_experiment.submit(
tags={'training_run_id': config,
train_run.id, tags={
'run_algorithm': "training_run_id": train_run.id,
train_run.properties['run_algorithm'], "run_algorithm": train_run.properties["run_algorithm"],
'valid_score': "valid_score": train_run.properties["score"],
train_run.properties['score'], "primary_metric": train_run.properties["primary_metric"],
'primary_metric': },
train_run.properties['primary_metric']}) )
run.log("run_algorithm", run.tags['run_algorithm']) run.log("run_algorithm", run.tags["run_algorithm"])
return run return run

View File

@@ -85,7 +85,7 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK." "This notebook is compatible with Azure ML SDK version 1.35.0 or later."
] ]
}, },
{ {
@@ -94,7 +94,6 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
] ]
}, },
@@ -111,19 +110,19 @@
"ws = Workspace.from_config()\n", "ws = Workspace.from_config()\n",
"\n", "\n",
"# choose a name for the run history container in the workspace\n", "# choose a name for the run history container in the workspace\n",
"experiment_name = 'automl-forecast-function-demo'\n", "experiment_name = \"automl-forecast-function-demo\"\n",
"\n", "\n",
"experiment = Experiment(ws, experiment_name)\n", "experiment = Experiment(ws, experiment_name)\n",
"\n", "\n",
"output = {}\n", "output = {}\n",
"output['Subscription ID'] = ws.subscription_id\n", "output[\"Subscription ID\"] = ws.subscription_id\n",
"output['Workspace'] = ws.name\n", "output[\"Workspace\"] = ws.name\n",
"output['SKU'] = ws.sku\n", "output[\"SKU\"] = ws.sku\n",
"output['Resource Group'] = ws.resource_group\n", "output[\"Resource Group\"] = ws.resource_group\n",
"output['Location'] = ws.location\n", "output[\"Location\"] = ws.location\n",
"output['Run History Name'] = experiment_name\n", "output[\"Run History Name\"] = experiment_name\n",
"pd.set_option('display.max_colwidth', -1)\n", "pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data = output, index = [''])\n", "outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T" "outputDf.T"
] ]
}, },
@@ -141,17 +140,20 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"TIME_COLUMN_NAME = 'date'\n", "TIME_COLUMN_NAME = \"date\"\n",
"TIME_SERIES_ID_COLUMN_NAME = 'time_series_id'\n", "TIME_SERIES_ID_COLUMN_NAME = \"time_series_id\"\n",
"TARGET_COLUMN_NAME = 'y'\n", "TARGET_COLUMN_NAME = \"y\"\n",
"\n", "\n",
"def get_timeseries(train_len: int,\n", "\n",
"def get_timeseries(\n",
" train_len: int,\n",
" test_len: int,\n", " test_len: int,\n",
" time_column_name: str,\n", " time_column_name: str,\n",
" target_column_name: str,\n", " target_column_name: str,\n",
" time_series_id_column_name: str,\n", " time_series_id_column_name: str,\n",
" time_series_number: int = 1,\n", " time_series_number: int = 1,\n",
" freq: str = 'H'):\n", " freq: str = \"H\",\n",
"):\n",
" \"\"\"\n", " \"\"\"\n",
" Return the time series of designed length.\n", " Return the time series of designed length.\n",
"\n", "\n",
@@ -174,14 +176,18 @@
" data_test = [] # type: List[pd.DataFrame]\n", " data_test = [] # type: List[pd.DataFrame]\n",
" data_length = train_len + test_len\n", " data_length = train_len + test_len\n",
" for i in range(time_series_number):\n", " for i in range(time_series_number):\n",
" X = pd.DataFrame({\n", " X = pd.DataFrame(\n",
" time_column_name: pd.date_range(start='2000-01-01',\n", " {\n",
" periods=data_length,\n", " time_column_name: pd.date_range(\n",
" freq=freq),\n", " start=\"2000-01-01\", periods=data_length, freq=freq\n",
" target_column_name: np.arange(data_length).astype(float) + np.random.rand(data_length) + i*5,\n", " ),\n",
" 'ext_predictor': np.asarray(range(42, 42 + data_length)),\n", " target_column_name: np.arange(data_length).astype(float)\n",
" time_series_id_column_name: np.repeat('ts{}'.format(i), data_length)\n", " + np.random.rand(data_length)\n",
" })\n", " + i * 5,\n",
" \"ext_predictor\": np.asarray(range(42, 42 + data_length)),\n",
" time_series_id_column_name: np.repeat(\"ts{}\".format(i), data_length),\n",
" }\n",
" )\n",
" data_train.append(X[:train_len])\n", " data_train.append(X[:train_len])\n",
" data_test.append(X[train_len:])\n", " data_test.append(X[train_len:])\n",
" X_train = pd.concat(data_train)\n", " X_train = pd.concat(data_train)\n",
@@ -190,14 +196,17 @@
" y_test = X_test.pop(target_column_name).values\n", " y_test = X_test.pop(target_column_name).values\n",
" return X_train, y_train, X_test, y_test\n", " return X_train, y_train, X_test, y_test\n",
"\n", "\n",
"\n",
"n_test_periods = 6\n", "n_test_periods = 6\n",
"n_train_periods = 30\n", "n_train_periods = 30\n",
"X_train, y_train, X_test, y_test = get_timeseries(train_len=n_train_periods,\n", "X_train, y_train, X_test, y_test = get_timeseries(\n",
" train_len=n_train_periods,\n",
" test_len=n_test_periods,\n", " test_len=n_test_periods,\n",
" time_column_name=TIME_COLUMN_NAME,\n", " time_column_name=TIME_COLUMN_NAME,\n",
" target_column_name=TARGET_COLUMN_NAME,\n", " target_column_name=TARGET_COLUMN_NAME,\n",
" time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAME,\n", " time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAME,\n",
" time_series_number=2)" " time_series_number=2,\n",
")"
] ]
}, },
{ {
@@ -224,11 +233,12 @@
"source": [ "source": [
"# plot the example time series\n", "# plot the example time series\n",
"import matplotlib.pyplot as plt\n", "import matplotlib.pyplot as plt\n",
"\n",
"whole_data = X_train.copy()\n", "whole_data = X_train.copy()\n",
"target_label = 'y'\n", "target_label = \"y\"\n",
"whole_data[target_label] = y_train\n", "whole_data[target_label] = y_train\n",
"for g in whole_data.groupby('time_series_id'): \n", "for g in whole_data.groupby(\"time_series_id\"):\n",
" plt.plot(g[1]['date'].values, g[1]['y'].values, label=g[0])\n", " plt.plot(g[1][\"date\"].values, g[1][\"y\"].values, label=g[0])\n",
"plt.legend()\n", "plt.legend()\n",
"plt.show()" "plt.show()"
] ]
@@ -250,12 +260,12 @@
"# We need to save thw artificial data and then upload them to default workspace datastore.\n", "# We need to save thw artificial data and then upload them to default workspace datastore.\n",
"DATA_PATH = \"fc_fn_data\"\n", "DATA_PATH = \"fc_fn_data\"\n",
"DATA_PATH_X = \"{}/data_train.csv\".format(DATA_PATH)\n", "DATA_PATH_X = \"{}/data_train.csv\".format(DATA_PATH)\n",
"if not os.path.isdir('data'):\n", "if not os.path.isdir(\"data\"):\n",
" os.mkdir('data')\n", " os.mkdir(\"data\")\n",
"pd.DataFrame(whole_data).to_csv(\"data/data_train.csv\", index=False)\n", "pd.DataFrame(whole_data).to_csv(\"data/data_train.csv\", index=False)\n",
"# Upload saved data to the default data store.\n", "# Upload saved data to the default data store.\n",
"ds = ws.get_default_datastore()\n", "ds = ws.get_default_datastore()\n",
"ds.upload(src_dir='./data', target_path=DATA_PATH, overwrite=True, show_progress=True)\n", "ds.upload(src_dir=\"./data\", target_path=DATA_PATH, overwrite=True, show_progress=True)\n",
"train_data = Dataset.Tabular.from_delimited_files(path=ds.path(DATA_PATH_X))" "train_data = Dataset.Tabular.from_delimited_files(path=ds.path(DATA_PATH_X))"
] ]
}, },
@@ -283,10 +293,11 @@
"# Verify that cluster does not exist already\n", "# Verify that cluster does not exist already\n",
"try:\n", "try:\n",
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n", " compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
" print('Found existing cluster, use it.')\n", " print(\"Found existing cluster, use it.\")\n",
"except ComputeTargetException:\n", "except ComputeTargetException:\n",
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n", " compute_config = AmlCompute.provisioning_configuration(\n",
" max_nodes=6)\n", " vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
" )\n",
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n", " compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
"\n", "\n",
"compute_target.wait_for_completion(show_output=True)" "compute_target.wait_for_completion(show_output=True)"
@@ -315,14 +326,15 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n", "from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
"lags = [1,2,3]\n", "\n",
"lags = [1, 2, 3]\n",
"forecast_horizon = n_test_periods\n", "forecast_horizon = n_test_periods\n",
"forecasting_parameters = ForecastingParameters(\n", "forecasting_parameters = ForecastingParameters(\n",
" time_column_name=TIME_COLUMN_NAME,\n", " time_column_name=TIME_COLUMN_NAME,\n",
" forecast_horizon=forecast_horizon,\n", " forecast_horizon=forecast_horizon,\n",
" time_series_id_column_names=[ TIME_SERIES_ID_COLUMN_NAME ],\n", " time_series_id_column_names=[TIME_SERIES_ID_COLUMN_NAME],\n",
" target_lags=lags,\n", " target_lags=lags,\n",
" freq='H' # Set the forecast frequency to be hourly\n", " freq=\"H\", # Set the forecast frequency to be hourly\n",
")" ")"
] ]
}, },
@@ -344,19 +356,21 @@
"from azureml.train.automl import AutoMLConfig\n", "from azureml.train.automl import AutoMLConfig\n",
"\n", "\n",
"\n", "\n",
"automl_config = AutoMLConfig(task='forecasting',\n", "automl_config = AutoMLConfig(\n",
" debug_log='automl_forecasting_function.log',\n", " task=\"forecasting\",\n",
" primary_metric='normalized_root_mean_squared_error',\n", " debug_log=\"automl_forecasting_function.log\",\n",
" primary_metric=\"normalized_root_mean_squared_error\",\n",
" experiment_timeout_hours=0.25,\n", " experiment_timeout_hours=0.25,\n",
" enable_early_stopping=True,\n", " enable_early_stopping=True,\n",
" training_data=train_data,\n", " training_data=train_data,\n",
" compute_target=compute_target,\n", " compute_target=compute_target,\n",
" n_cross_validations=3,\n", " n_cross_validations=3,\n",
" verbosity = logging.INFO,\n", " verbosity=logging.INFO,\n",
" max_concurrent_iterations=4,\n", " max_concurrent_iterations=4,\n",
" max_cores_per_iteration=-1,\n", " max_cores_per_iteration=-1,\n",
" label_column_name=target_label,\n", " label_column_name=target_label,\n",
" forecasting_parameters=forecasting_parameters)\n", " forecasting_parameters=forecasting_parameters,\n",
")\n",
"\n", "\n",
"remote_run = experiment.submit(automl_config, show_output=False)" "remote_run = experiment.submit(automl_config, show_output=False)"
] ]
@@ -481,12 +495,12 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"# specify which quantiles you would like \n", "# specify which quantiles you would like\n",
"fitted_model.quantiles = [0.01, 0.5, 0.95]\n", "fitted_model.quantiles = [0.01, 0.5, 0.95]\n",
"# use forecast_quantiles function, not the forecast() one\n", "# use forecast_quantiles function, not the forecast() one\n",
"y_pred_quantiles = fitted_model.forecast_quantiles(X_test)\n", "y_pred_quantiles = fitted_model.forecast_quantiles(X_test)\n",
"\n", "\n",
"# quantile forecasts returned in a Dataframe along with the time and time series id columns \n", "# quantile forecasts returned in a Dataframe along with the time and time series id columns\n",
"y_pred_quantiles" "y_pred_quantiles"
] ]
}, },
@@ -534,14 +548,16 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"# generate the same kind of test data we trained on, \n", "# generate the same kind of test data we trained on,\n",
"# but now make the train set much longer, so that the test set will be in the future\n", "# but now make the train set much longer, so that the test set will be in the future\n",
"X_context, y_context, X_away, y_away = get_timeseries(train_len=42, # train data was 30 steps long\n", "X_context, y_context, X_away, y_away = get_timeseries(\n",
" train_len=42, # train data was 30 steps long\n",
" test_len=4,\n", " test_len=4,\n",
" time_column_name=TIME_COLUMN_NAME,\n", " time_column_name=TIME_COLUMN_NAME,\n",
" target_column_name=TARGET_COLUMN_NAME,\n", " target_column_name=TARGET_COLUMN_NAME,\n",
" time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAME,\n", " time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAME,\n",
" time_series_number=2)\n", " time_series_number=2,\n",
")\n",
"\n", "\n",
"# end of the data we trained on\n", "# end of the data we trained on\n",
"print(X_train.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].max())\n", "print(X_train.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].max())\n",
@@ -562,7 +578,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"try: \n", "try:\n",
" y_pred_away, xy_away = fitted_model.forecast(X_away)\n", " y_pred_away, xy_away = fitted_model.forecast(X_away)\n",
" xy_away\n", " xy_away\n",
"except Exception as e:\n", "except Exception as e:\n",
@@ -584,7 +600,9 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"def make_forecasting_query(fulldata, time_column_name, target_column_name, forecast_origin, horizon, lookback):\n", "def make_forecasting_query(\n",
" fulldata, time_column_name, target_column_name, forecast_origin, horizon, lookback\n",
"):\n",
"\n", "\n",
" \"\"\"\n", " \"\"\"\n",
" This function will take the full dataset, and create the query\n", " This function will take the full dataset, and create the query\n",
@@ -592,24 +610,24 @@
" forward for the next `horizon` horizons. Context from previous\n", " forward for the next `horizon` horizons. Context from previous\n",
" `lookback` periods will be included.\n", " `lookback` periods will be included.\n",
"\n", "\n",
" \n", "\n",
"\n", "\n",
" fulldata: pandas.DataFrame a time series dataset. Needs to contain X and y.\n", " fulldata: pandas.DataFrame a time series dataset. Needs to contain X and y.\n",
" time_column_name: string which column (must be in fulldata) is the time axis\n", " time_column_name: string which column (must be in fulldata) is the time axis\n",
" target_column_name: string which column (must be in fulldata) is to be forecast\n", " target_column_name: string which column (must be in fulldata) is to be forecast\n",
" forecast_origin: datetime type the last time we (pretend to) have target values \n", " forecast_origin: datetime type the last time we (pretend to) have target values\n",
" horizon: timedelta how far forward, in time units (not periods)\n", " horizon: timedelta how far forward, in time units (not periods)\n",
" lookback: timedelta how far back does the model look?\n", " lookback: timedelta how far back does the model look\n",
"\n", "\n",
" Example:\n", " Example:\n",
"\n", "\n",
"\n", "\n",
" ```\n", " ```\n",
"\n", "\n",
" forecast_origin = pd.to_datetime('2012-09-01') + pd.DateOffset(days=5) # forecast 5 days after end of training\n", " forecast_origin = pd.to_datetime(\"2012-09-01\") + pd.DateOffset(days=5) # forecast 5 days after end of training\n",
" print(forecast_origin)\n", " print(forecast_origin)\n",
"\n", "\n",
" X_query, y_query = make_forecasting_query(data, \n", " X_query, y_query = make_forecasting_query(data,\n",
" forecast_origin = forecast_origin,\n", " forecast_origin = forecast_origin,\n",
" horizon = pd.DateOffset(days=7), # 7 days into the future\n", " horizon = pd.DateOffset(days=7), # 7 days into the future\n",
" lookback = pd.DateOffset(days=1), # model has lag 1 period (day)\n", " lookback = pd.DateOffset(days=1), # model has lag 1 period (day)\n",
@@ -618,28 +636,30 @@
" ```\n", " ```\n",
" \"\"\"\n", " \"\"\"\n",
"\n", "\n",
" X_past = fulldata[ (fulldata[ time_column_name ] > forecast_origin - lookback) &\n", " X_past = fulldata[\n",
" (fulldata[ time_column_name ] <= forecast_origin)\n", " (fulldata[time_column_name] > forecast_origin - lookback)\n",
" & (fulldata[time_column_name] <= forecast_origin)\n",
" ]\n", " ]\n",
"\n", "\n",
" X_future = fulldata[ (fulldata[ time_column_name ] > forecast_origin) &\n", " X_future = fulldata[\n",
" (fulldata[ time_column_name ] <= forecast_origin + horizon)\n", " (fulldata[time_column_name] > forecast_origin)\n",
" & (fulldata[time_column_name] <= forecast_origin + horizon)\n",
" ]\n", " ]\n",
"\n", "\n",
" y_past = X_past.pop(target_column_name).values.astype(np.float)\n", " y_past = X_past.pop(target_column_name).values.astype(np.float)\n",
" y_future = X_future.pop(target_column_name).values.astype(np.float)\n", " y_future = X_future.pop(target_column_name).values.astype(np.float)\n",
"\n", "\n",
" # Now take y_future and turn it into question marks\n", " # Now take y_future and turn it into question marks\n",
" y_query = y_future.copy().astype(np.float) # because sometimes life hands you an int\n", " y_query = y_future.copy().astype(\n",
" np.float\n",
" ) # because sometimes life hands you an int\n",
" y_query.fill(np.NaN)\n", " y_query.fill(np.NaN)\n",
"\n", "\n",
"\n",
" print(\"X_past is \" + str(X_past.shape) + \" - shaped\")\n", " print(\"X_past is \" + str(X_past.shape) + \" - shaped\")\n",
" print(\"X_future is \" + str(X_future.shape) + \" - shaped\")\n", " print(\"X_future is \" + str(X_future.shape) + \" - shaped\")\n",
" print(\"y_past is \" + str(y_past.shape) + \" - shaped\")\n", " print(\"y_past is \" + str(y_past.shape) + \" - shaped\")\n",
" print(\"y_query is \" + str(y_query.shape) + \" - shaped\")\n", " print(\"y_query is \" + str(y_query.shape) + \" - shaped\")\n",
"\n", "\n",
"\n",
" X_pred = pd.concat([X_past, X_future])\n", " X_pred = pd.concat([X_past, X_future])\n",
" y_pred = np.concatenate([y_past, y_query])\n", " y_pred = np.concatenate([y_past, y_query])\n",
" return X_pred, y_pred" " return X_pred, y_pred"
@@ -658,8 +678,16 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"print(X_context.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].agg(['min','max','count']))\n", "print(\n",
"print(X_away.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].agg(['min','max','count']))\n", " X_context.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].agg(\n",
" [\"min\", \"max\", \"count\"]\n",
" )\n",
")\n",
"print(\n",
" X_away.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].agg(\n",
" [\"min\", \"max\", \"count\"]\n",
" )\n",
")\n",
"X_context.tail(5)" "X_context.tail(5)"
] ]
}, },
@@ -669,11 +697,11 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"# Since the length of the lookback is 3, \n", "# Since the length of the lookback is 3,\n",
"# we need to add 3 periods from the context to the request\n", "# we need to add 3 periods from the context to the request\n",
"# so that the model has the data it needs\n", "# so that the model has the data it needs\n",
"\n", "\n",
"# Put the X and y back together for a while. \n", "# Put the X and y back together for a while.\n",
"# They like each other and it makes them happy.\n", "# They like each other and it makes them happy.\n",
"X_context[TARGET_COLUMN_NAME] = y_context\n", "X_context[TARGET_COLUMN_NAME] = y_context\n",
"X_away[TARGET_COLUMN_NAME] = y_away\n", "X_away[TARGET_COLUMN_NAME] = y_away\n",
@@ -684,7 +712,7 @@
"# it is indeed the last point of the context\n", "# it is indeed the last point of the context\n",
"assert forecast_origin == X_context[TIME_COLUMN_NAME].max()\n", "assert forecast_origin == X_context[TIME_COLUMN_NAME].max()\n",
"print(\"Forecast origin: \" + str(forecast_origin))\n", "print(\"Forecast origin: \" + str(forecast_origin))\n",
" \n", "\n",
"# the model uses lags and rolling windows to look back in time\n", "# the model uses lags and rolling windows to look back in time\n",
"n_lookback_periods = max(lags)\n", "n_lookback_periods = max(lags)\n",
"lookback = pd.DateOffset(hours=n_lookback_periods)\n", "lookback = pd.DateOffset(hours=n_lookback_periods)\n",
@@ -692,8 +720,9 @@
"horizon = pd.DateOffset(hours=forecast_horizon)\n", "horizon = pd.DateOffset(hours=forecast_horizon)\n",
"\n", "\n",
"# now make the forecast query from context (refer to figure)\n", "# now make the forecast query from context (refer to figure)\n",
"X_pred, y_pred = make_forecasting_query(fulldata, TIME_COLUMN_NAME, TARGET_COLUMN_NAME,\n", "X_pred, y_pred = make_forecasting_query(\n",
" forecast_origin, horizon, lookback)\n", " fulldata, TIME_COLUMN_NAME, TARGET_COLUMN_NAME, forecast_origin, horizon, lookback\n",
")\n",
"\n", "\n",
"# show the forecast request aligned\n", "# show the forecast request aligned\n",
"X_show = X_pred.copy()\n", "X_show = X_pred.copy()\n",
@@ -720,7 +749,7 @@
"# show the forecast aligned\n", "# show the forecast aligned\n",
"X_show = xy_away.reset_index()\n", "X_show = xy_away.reset_index()\n",
"# without the generated features\n", "# without the generated features\n",
"X_show[['date', 'time_series_id', 'ext_predictor', '_automl_target_col']]\n", "X_show[[\"date\", \"time_series_id\", \"ext_predictor\", \"_automl_target_col\"]]\n",
"# prediction is in _automl_target_col" "# prediction is in _automl_target_col"
] ]
}, },
@@ -751,12 +780,14 @@
"source": [ "source": [
"# generate the same kind of test data we trained on, but with a single time-series and test period twice as long\n", "# generate the same kind of test data we trained on, but with a single time-series and test period twice as long\n",
"# as the forecast_horizon.\n", "# as the forecast_horizon.\n",
"_, _, X_test_long, y_test_long = get_timeseries(train_len=n_train_periods,\n", "_, _, X_test_long, y_test_long = get_timeseries(\n",
" test_len=forecast_horizon*2,\n", " train_len=n_train_periods,\n",
" test_len=forecast_horizon * 2,\n",
" time_column_name=TIME_COLUMN_NAME,\n", " time_column_name=TIME_COLUMN_NAME,\n",
" target_column_name=TARGET_COLUMN_NAME,\n", " target_column_name=TARGET_COLUMN_NAME,\n",
" time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAME,\n", " time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAME,\n",
" time_series_number=1)\n", " time_series_number=1,\n",
")\n",
"\n", "\n",
"print(X_test_long.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].min())\n", "print(X_test_long.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].min())\n",
"print(X_test_long.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].max())" "print(X_test_long.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].max())"
@@ -779,9 +810,11 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"# What forecast() function does in this case is equivalent to iterating it twice over the test set as the following. \n", "# What forecast() function does in this case is equivalent to iterating it twice over the test set as the following.\n",
"y_pred1, _ = fitted_model.forecast(X_test_long[:forecast_horizon])\n", "y_pred1, _ = fitted_model.forecast(X_test_long[:forecast_horizon])\n",
"y_pred_all, _ = fitted_model.forecast(X_test_long, np.concatenate((y_pred1, np.full(forecast_horizon, np.nan))))\n", "y_pred_all, _ = fitted_model.forecast(\n",
" X_test_long, np.concatenate((y_pred1, np.full(forecast_horizon, np.nan)))\n",
")\n",
"np.array_equal(y_pred_all, y_pred_long)" "np.array_equal(y_pred_all, y_pred_long)"
] ]
}, },
@@ -833,9 +866,9 @@
"friendly_name": "Forecasting away from training data", "friendly_name": "Forecasting away from training data",
"index_order": 3, "index_order": 3,
"kernelspec": { "kernelspec": {
"display_name": "Python 3.6", "display_name": "Python 3.6 - AzureML",
"language": "python", "language": "python",
"name": "python36" "name": "python3-azureml"
}, },
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {

View File

@@ -0,0 +1,725 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/forecasting-beer-remote/auto-ml-forecasting-beer-remote.png)"
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"# Automated Machine Learning\n",
"**Github DAU Forecasting**\n",
"\n",
"## Contents\n",
"1. [Introduction](#Introduction)\n",
"1. [Setup](#Setup)\n",
"1. [Data](#Data)\n",
"1. [Train](#Train)\n",
"1. [Evaluate](#Evaluate)"
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"## Introduction\n",
"This notebook demonstrates demand forecasting for Github Daily Active Users Dataset using AutoML.\n",
"\n",
"AutoML highlights here include using Deep Learning forecasts, Arima, Prophet, Remote Execution and Remote Inferencing, and working with the `forecast` function. Please also look at the additional forecasting notebooks, which document lagging, rolling windows, forecast quantiles, other ways to use the forecast function, and forecaster deployment.\n",
"\n",
"Make sure you have executed the [configuration](../../../configuration.ipynb) before running this notebook.\n",
"\n",
"Notebook synopsis:\n",
"\n",
"1. Creating an Experiment in an existing Workspace\n",
"2. Configuration and remote run of AutoML for a time-series model exploring Regression learners, Arima, Prophet and DNNs\n",
"4. Evaluating the fitted model using a rolling test "
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"## Setup\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"import os\n",
"import azureml.core\n",
"import pandas as pd\n",
"import numpy as np\n",
"import logging\n",
"import warnings\n",
"\n",
"from pandas.tseries.frequencies import to_offset\n",
"\n",
"# Squash warning messages for cleaner output in the notebook\n",
"warnings.showwarning = lambda *args, **kwargs: None\n",
"\n",
"from azureml.core.workspace import Workspace\n",
"from azureml.core.experiment import Experiment\n",
"from azureml.train.automl import AutoMLConfig\n",
"from matplotlib import pyplot as plt\n",
"from sklearn.metrics import mean_absolute_error, mean_squared_error\n",
"from azureml.train.estimator import Estimator"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This notebook is compatible with Azure ML SDK version 1.35.0 or later."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"As part of the setup you have already created a <b>Workspace</b>. To run AutoML, you also need to create an <b>Experiment</b>. An Experiment corresponds to a prediction problem you are trying to solve, while a Run corresponds to a specific approach to the problem."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"ws = Workspace.from_config()\n",
"\n",
"# choose a name for the run history container in the workspace\n",
"experiment_name = \"github-remote-cpu\"\n",
"\n",
"experiment = Experiment(ws, experiment_name)\n",
"\n",
"output = {}\n",
"output[\"Subscription ID\"] = ws.subscription_id\n",
"output[\"Workspace\"] = ws.name\n",
"output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n",
"output[\"Run History Name\"] = experiment_name\n",
"pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T"
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"### Using AmlCompute\n",
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you use `AmlCompute` as your training compute resource.\n",
"\n",
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
"from azureml.core.compute_target import ComputeTargetException\n",
"\n",
"# Choose a name for your CPU cluster\n",
"cpu_cluster_name = \"github-cluster\"\n",
"\n",
"# Verify that cluster does not exist already\n",
"try:\n",
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
" print(\"Found existing cluster, use it.\")\n",
"except ComputeTargetException:\n",
" compute_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"STANDARD_DS12_V2\", max_nodes=4\n",
" )\n",
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
"\n",
"compute_target.wait_for_completion(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"## Data\n",
"Read Github DAU data from file, and preview data."
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"Let's set up what we know about the dataset. \n",
"\n",
"**Target column** is what we want to forecast.\n",
"\n",
"**Time column** is the time axis along which to predict.\n",
"\n",
"**Time series identifier columns** are identified by values of the columns listed `time_series_id_column_names`, for example \"store\" and \"item\" if your data has multiple time series of sales, one series for each combination of store and item sold.\n",
"\n",
"**Forecast frequency (freq)** This optional parameter represents the period with which the forecast is desired, for example, daily, weekly, yearly, etc. Use this parameter for the correction of time series containing irregular data points or for padding of short time series. The frequency needs to be a pandas offset alias. Please refer to [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects) for more information.\n",
"\n",
"This dataset has only one time series. Please see the [orange juice notebook](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales) for an example of a multi-time series dataset."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"import pandas as pd\n",
"from pandas import DataFrame\n",
"from pandas import Grouper\n",
"from pandas import concat\n",
"from pandas.plotting import register_matplotlib_converters\n",
"\n",
"register_matplotlib_converters()\n",
"plt.figure(figsize=(20, 10))\n",
"plt.tight_layout()\n",
"\n",
"plt.subplot(2, 1, 1)\n",
"plt.title(\"Github Daily Active User By Year\")\n",
"df = pd.read_csv(\"github_dau_2011-2018_train.csv\", parse_dates=True, index_col=\"date\")\n",
"test_df = pd.read_csv(\n",
" \"github_dau_2011-2018_test.csv\", parse_dates=True, index_col=\"date\"\n",
")\n",
"plt.plot(df)\n",
"\n",
"plt.subplot(2, 1, 2)\n",
"plt.title(\"Github Daily Active User By Month\")\n",
"groups = df.groupby(df.index.month)\n",
"months = concat([DataFrame(x[1].values) for x in groups], axis=1)\n",
"months = DataFrame(months)\n",
"months.columns = range(1, 49)\n",
"months.boxplot()\n",
"\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"target_column_name = \"count\"\n",
"time_column_name = \"date\"\n",
"time_series_id_column_names = []\n",
"freq = \"D\" # Daily data"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Split Training data into Train and Validation set and Upload to Datastores"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"from helper import split_fraction_by_grain\n",
"from helper import split_full_for_forecasting\n",
"\n",
"train, valid = split_full_for_forecasting(df, time_column_name)\n",
"train.to_csv(\"train.csv\")\n",
"valid.to_csv(\"valid.csv\")\n",
"test_df.to_csv(\"test.csv\")\n",
"\n",
"datastore = ws.get_default_datastore()\n",
"datastore.upload_files(\n",
" files=[\"./train.csv\"],\n",
" target_path=\"github-dataset/tabular/\",\n",
" overwrite=True,\n",
" show_progress=True,\n",
")\n",
"datastore.upload_files(\n",
" files=[\"./valid.csv\"],\n",
" target_path=\"github-dataset/tabular/\",\n",
" overwrite=True,\n",
" show_progress=True,\n",
")\n",
"datastore.upload_files(\n",
" files=[\"./test.csv\"],\n",
" target_path=\"github-dataset/tabular/\",\n",
" overwrite=True,\n",
" show_progress=True,\n",
")\n",
"\n",
"from azureml.core import Dataset\n",
"\n",
"train_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"github-dataset/tabular/train.csv\")]\n",
")\n",
"valid_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"github-dataset/tabular/valid.csv\")]\n",
")\n",
"test_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"github-dataset/tabular/test.csv\")]\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"### Setting forecaster maximum horizon \n",
"\n",
"The forecast horizon is the number of periods into the future that the model should predict. Here, we set the horizon to 12 periods (i.e. 12 months). Notice that this is much shorter than the number of months in the test set; we will need to use a rolling test to evaluate the performance on the whole test set. For more discussion of forecast horizons and guiding principles for setting them, please see the [energy demand notebook](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand). "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"forecast_horizon = 12"
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"## Train\n",
"\n",
"Instantiate a AutoMLConfig object. This defines the settings and data used to run the experiment.\n",
"\n",
"|Property|Description|\n",
"|-|-|\n",
"|**task**|forecasting|\n",
"|**primary_metric**|This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i>\n",
"|**iteration_timeout_minutes**|Time limit in minutes for each iteration.|\n",
"|**training_data**|Input dataset, containing both features and label column.|\n",
"|**label_column_name**|The name of the label column.|\n",
"|**enable_dnn**|Enable Forecasting DNNs|\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
"\n",
"forecasting_parameters = ForecastingParameters(\n",
" time_column_name=time_column_name,\n",
" forecast_horizon=forecast_horizon,\n",
" freq=\"D\", # Set the forecast frequency to be daily\n",
")\n",
"\n",
"# We will disable the enable_early_stopping flag to ensure the DNN model is recommended for demonstration purpose.\n",
"automl_config = AutoMLConfig(\n",
" task=\"forecasting\",\n",
" primary_metric=\"normalized_root_mean_squared_error\",\n",
" experiment_timeout_hours=1,\n",
" training_data=train_dataset,\n",
" label_column_name=target_column_name,\n",
" validation_data=valid_dataset,\n",
" verbosity=logging.INFO,\n",
" compute_target=compute_target,\n",
" max_concurrent_iterations=4,\n",
" max_cores_per_iteration=-1,\n",
" enable_dnn=True,\n",
" enable_early_stopping=False,\n",
" forecasting_parameters=forecasting_parameters,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"We will now run the experiment, starting with 10 iterations of model search. The experiment can be continued for more iterations if more accurate results are required. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"remote_run = experiment.submit(automl_config, show_output=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"# If you need to retrieve a run that already started, use the following code\n",
"# from azureml.train.automl.run import AutoMLRun\n",
"# remote_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')"
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"Displaying the run objects gives you links to the visual tools in the Azure Portal. Go try them!"
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"### Retrieve the Best Model for Each Algorithm\n",
"Below we select the best pipeline from our iterations. The get_output method on automl_classifier returns the best run and the fitted model for the last fit invocation. There are overloads on get_output that allow you to retrieve the best run and fitted model for any logged metric or a particular iteration."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"from helper import get_result_df\n",
"\n",
"summary_df = get_result_df(remote_run)\n",
"summary_df"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"from azureml.core.run import Run\n",
"from azureml.widgets import RunDetails\n",
"\n",
"forecast_model = \"TCNForecaster\"\n",
"if not forecast_model in summary_df[\"run_id\"]:\n",
" forecast_model = \"ForecastTCN\"\n",
"\n",
"best_dnn_run_id = summary_df[\"run_id\"][forecast_model]\n",
"best_dnn_run = Run(experiment, best_dnn_run_id)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"best_dnn_run.parent\n",
"RunDetails(best_dnn_run.parent).show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"best_dnn_run\n",
"RunDetails(best_dnn_run).show()"
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"## Evaluate on Test Data"
]
},
{
"cell_type": "markdown",
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"source": [
"We now use the best fitted model from the AutoML Run to make forecasts for the test set. \n",
"\n",
"We always score on the original dataset whose schema matches the training set schema."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"from azureml.core import Dataset\n",
"\n",
"test_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"github-dataset/tabular/test.csv\")]\n",
")\n",
"# preview the first 3 rows of the dataset\n",
"test_dataset.take(5).to_pandas_dataframe()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"compute_target = ws.compute_targets[\"github-cluster\"]\n",
"test_experiment = Experiment(ws, experiment_name + \"_test\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"import os\n",
"import shutil\n",
"\n",
"script_folder = os.path.join(os.getcwd(), \"inference\")\n",
"os.makedirs(script_folder, exist_ok=True)\n",
"shutil.copy(\"infer.py\", script_folder)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from helper import run_inference\n",
"\n",
"test_run = run_inference(\n",
" test_experiment,\n",
" compute_target,\n",
" script_folder,\n",
" best_dnn_run,\n",
" test_dataset,\n",
" valid_dataset,\n",
" forecast_horizon,\n",
" target_column_name,\n",
" time_column_name,\n",
" freq,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"RunDetails(test_run).show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from helper import run_multiple_inferences\n",
"\n",
"summary_df = run_multiple_inferences(\n",
" summary_df,\n",
" experiment,\n",
" test_experiment,\n",
" compute_target,\n",
" script_folder,\n",
" test_dataset,\n",
" valid_dataset,\n",
" forecast_horizon,\n",
" target_column_name,\n",
" time_column_name,\n",
" freq,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"for run_name, run_summary in summary_df.iterrows():\n",
" print(run_name)\n",
" print(run_summary)\n",
" run_id = run_summary.run_id\n",
" test_run_id = run_summary.test_run_id\n",
" test_run = Run(test_experiment, test_run_id)\n",
" test_run.wait_for_completion()\n",
" test_score = test_run.get_metrics()[run_summary.primary_metric]\n",
" summary_df.loc[summary_df.run_id == run_id, \"Test Score\"] = test_score\n",
" print(\"Test Score: \", test_score)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hideCode": false,
"hidePrompt": false
},
"outputs": [],
"source": [
"summary_df"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"authors": [
{
"name": "jialiu"
}
],
"hide_code_all_hidden": false,
"kernelspec": {
"display_name": "Python 3.6 - AzureML",
"language": "python",
"name": "python3-azureml"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.9"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -0,0 +1,455 @@
date,count,day_of_week,month_of_year,holiday
2017-06-04,104663,6.0,5.0,0.0
2017-06-05,155824,0.0,5.0,0.0
2017-06-06,164908,1.0,5.0,0.0
2017-06-07,170309,2.0,5.0,0.0
2017-06-08,164256,3.0,5.0,0.0
2017-06-09,153406,4.0,5.0,0.0
2017-06-10,97024,5.0,5.0,0.0
2017-06-11,103442,6.0,5.0,0.0
2017-06-12,160768,0.0,5.0,0.0
2017-06-13,166288,1.0,5.0,0.0
2017-06-14,163819,2.0,5.0,0.0
2017-06-15,157593,3.0,5.0,0.0
2017-06-16,149259,4.0,5.0,0.0
2017-06-17,95579,5.0,5.0,0.0
2017-06-18,98723,6.0,5.0,0.0
2017-06-19,159076,0.0,5.0,0.0
2017-06-20,163340,1.0,5.0,0.0
2017-06-21,163344,2.0,5.0,0.0
2017-06-22,159528,3.0,5.0,0.0
2017-06-23,146563,4.0,5.0,0.0
2017-06-24,92631,5.0,5.0,0.0
2017-06-25,96549,6.0,5.0,0.0
2017-06-26,153249,0.0,5.0,0.0
2017-06-27,160357,1.0,5.0,0.0
2017-06-28,159941,2.0,5.0,0.0
2017-06-29,156781,3.0,5.0,0.0
2017-06-30,144709,4.0,5.0,0.0
2017-07-01,89101,5.0,6.0,0.0
2017-07-02,93046,6.0,6.0,0.0
2017-07-03,144113,0.0,6.0,0.0
2017-07-04,143061,1.0,6.0,1.0
2017-07-05,154603,2.0,6.0,0.0
2017-07-06,157200,3.0,6.0,0.0
2017-07-07,147213,4.0,6.0,0.0
2017-07-08,92348,5.0,6.0,0.0
2017-07-09,97018,6.0,6.0,0.0
2017-07-10,157192,0.0,6.0,0.0
2017-07-11,161819,1.0,6.0,0.0
2017-07-12,161998,2.0,6.0,0.0
2017-07-13,160280,3.0,6.0,0.0
2017-07-14,146818,4.0,6.0,0.0
2017-07-15,93041,5.0,6.0,0.0
2017-07-16,97505,6.0,6.0,0.0
2017-07-17,156167,0.0,6.0,0.0
2017-07-18,162855,1.0,6.0,0.0
2017-07-19,162519,2.0,6.0,0.0
2017-07-20,159941,3.0,6.0,0.0
2017-07-21,148460,4.0,6.0,0.0
2017-07-22,93431,5.0,6.0,0.0
2017-07-23,98553,6.0,6.0,0.0
2017-07-24,156202,0.0,6.0,0.0
2017-07-25,162503,1.0,6.0,0.0
2017-07-26,158479,2.0,6.0,0.0
2017-07-27,158192,3.0,6.0,0.0
2017-07-28,147108,4.0,6.0,0.0
2017-07-29,93799,5.0,6.0,0.0
2017-07-30,97920,6.0,6.0,0.0
2017-07-31,152197,0.0,6.0,0.0
2017-08-01,158477,1.0,7.0,0.0
2017-08-02,159089,2.0,7.0,0.0
2017-08-03,157182,3.0,7.0,0.0
2017-08-04,146345,4.0,7.0,0.0
2017-08-05,92534,5.0,7.0,0.0
2017-08-06,97128,6.0,7.0,0.0
2017-08-07,151359,0.0,7.0,0.0
2017-08-08,159895,1.0,7.0,0.0
2017-08-09,158329,2.0,7.0,0.0
2017-08-10,155468,3.0,7.0,0.0
2017-08-11,144914,4.0,7.0,0.0
2017-08-12,92258,5.0,7.0,0.0
2017-08-13,95933,6.0,7.0,0.0
2017-08-14,147706,0.0,7.0,0.0
2017-08-15,151115,1.0,7.0,0.0
2017-08-16,157640,2.0,7.0,0.0
2017-08-17,156600,3.0,7.0,0.0
2017-08-18,146980,4.0,7.0,0.0
2017-08-19,94592,5.0,7.0,0.0
2017-08-20,99320,6.0,7.0,0.0
2017-08-21,145727,0.0,7.0,0.0
2017-08-22,160260,1.0,7.0,0.0
2017-08-23,160440,2.0,7.0,0.0
2017-08-24,157830,3.0,7.0,0.0
2017-08-25,145822,4.0,7.0,0.0
2017-08-26,94706,5.0,7.0,0.0
2017-08-27,99047,6.0,7.0,0.0
2017-08-28,152112,0.0,7.0,0.0
2017-08-29,162440,1.0,7.0,0.0
2017-08-30,162902,2.0,7.0,0.0
2017-08-31,159498,3.0,7.0,0.0
2017-09-01,145689,4.0,8.0,0.0
2017-09-02,93589,5.0,8.0,0.0
2017-09-03,100058,6.0,8.0,0.0
2017-09-04,140865,0.0,8.0,1.0
2017-09-05,165715,1.0,8.0,0.0
2017-09-06,167463,2.0,8.0,0.0
2017-09-07,164811,3.0,8.0,0.0
2017-09-08,156157,4.0,8.0,0.0
2017-09-09,101358,5.0,8.0,0.0
2017-09-10,107915,6.0,8.0,0.0
2017-09-11,167845,0.0,8.0,0.0
2017-09-12,172756,1.0,8.0,0.0
2017-09-13,172851,2.0,8.0,0.0
2017-09-14,171675,3.0,8.0,0.0
2017-09-15,159266,4.0,8.0,0.0
2017-09-16,103547,5.0,8.0,0.0
2017-09-17,110964,6.0,8.0,0.0
2017-09-18,170976,0.0,8.0,0.0
2017-09-19,177864,1.0,8.0,0.0
2017-09-20,173567,2.0,8.0,0.0
2017-09-21,172017,3.0,8.0,0.0
2017-09-22,161357,4.0,8.0,0.0
2017-09-23,104681,5.0,8.0,0.0
2017-09-24,111711,6.0,8.0,0.0
2017-09-25,173517,0.0,8.0,0.0
2017-09-26,180049,1.0,8.0,0.0
2017-09-27,178307,2.0,8.0,0.0
2017-09-28,174157,3.0,8.0,0.0
2017-09-29,161707,4.0,8.0,0.0
2017-09-30,110536,5.0,8.0,0.0
2017-10-01,106505,6.0,9.0,0.0
2017-10-02,157565,0.0,9.0,0.0
2017-10-03,164764,1.0,9.0,0.0
2017-10-04,163383,2.0,9.0,0.0
2017-10-05,162847,3.0,9.0,0.0
2017-10-06,153575,4.0,9.0,0.0
2017-10-07,107472,5.0,9.0,0.0
2017-10-08,116127,6.0,9.0,0.0
2017-10-09,174457,0.0,9.0,1.0
2017-10-10,185217,1.0,9.0,0.0
2017-10-11,185120,2.0,9.0,0.0
2017-10-12,180844,3.0,9.0,0.0
2017-10-13,170178,4.0,9.0,0.0
2017-10-14,112754,5.0,9.0,0.0
2017-10-15,121251,6.0,9.0,0.0
2017-10-16,183906,0.0,9.0,0.0
2017-10-17,188945,1.0,9.0,0.0
2017-10-18,187297,2.0,9.0,0.0
2017-10-19,183867,3.0,9.0,0.0
2017-10-20,173021,4.0,9.0,0.0
2017-10-21,115851,5.0,9.0,0.0
2017-10-22,126088,6.0,9.0,0.0
2017-10-23,189452,0.0,9.0,0.0
2017-10-24,194412,1.0,9.0,0.0
2017-10-25,192293,2.0,9.0,0.0
2017-10-26,190163,3.0,9.0,0.0
2017-10-27,177053,4.0,9.0,0.0
2017-10-28,114934,5.0,9.0,0.0
2017-10-29,125289,6.0,9.0,0.0
2017-10-30,189245,0.0,9.0,0.0
2017-10-31,191480,1.0,9.0,0.0
2017-11-01,182281,2.0,10.0,0.0
2017-11-02,186351,3.0,10.0,0.0
2017-11-03,175422,4.0,10.0,0.0
2017-11-04,118160,5.0,10.0,0.0
2017-11-05,127602,6.0,10.0,0.0
2017-11-06,191067,0.0,10.0,0.0
2017-11-07,197083,1.0,10.0,0.0
2017-11-08,194333,2.0,10.0,0.0
2017-11-09,193914,3.0,10.0,0.0
2017-11-10,179933,4.0,10.0,1.0
2017-11-11,121346,5.0,10.0,0.0
2017-11-12,131900,6.0,10.0,0.0
2017-11-13,196969,0.0,10.0,0.0
2017-11-14,201949,1.0,10.0,0.0
2017-11-15,198424,2.0,10.0,0.0
2017-11-16,196902,3.0,10.0,0.0
2017-11-17,183893,4.0,10.0,0.0
2017-11-18,122767,5.0,10.0,0.0
2017-11-19,130890,6.0,10.0,0.0
2017-11-20,194515,0.0,10.0,0.0
2017-11-21,198601,1.0,10.0,0.0
2017-11-22,191041,2.0,10.0,0.0
2017-11-23,170321,3.0,10.0,1.0
2017-11-24,155623,4.0,10.0,0.0
2017-11-25,115759,5.0,10.0,0.0
2017-11-26,128771,6.0,10.0,0.0
2017-11-27,199419,0.0,10.0,0.0
2017-11-28,207253,1.0,10.0,0.0
2017-11-29,205406,2.0,10.0,0.0
2017-11-30,200674,3.0,10.0,0.0
2017-12-01,187017,4.0,11.0,0.0
2017-12-02,129735,5.0,11.0,0.0
2017-12-03,139120,6.0,11.0,0.0
2017-12-04,205505,0.0,11.0,0.0
2017-12-05,208218,1.0,11.0,0.0
2017-12-06,202480,2.0,11.0,0.0
2017-12-07,197822,3.0,11.0,0.0
2017-12-08,180686,4.0,11.0,0.0
2017-12-09,123667,5.0,11.0,0.0
2017-12-10,130987,6.0,11.0,0.0
2017-12-11,193901,0.0,11.0,0.0
2017-12-12,194997,1.0,11.0,0.0
2017-12-13,192063,2.0,11.0,0.0
2017-12-14,186496,3.0,11.0,0.0
2017-12-15,170812,4.0,11.0,0.0
2017-12-16,110474,5.0,11.0,0.0
2017-12-17,118165,6.0,11.0,0.0
2017-12-18,176843,0.0,11.0,0.0
2017-12-19,179550,1.0,11.0,0.0
2017-12-20,173506,2.0,11.0,0.0
2017-12-21,165910,3.0,11.0,0.0
2017-12-22,145886,4.0,11.0,0.0
2017-12-23,95246,5.0,11.0,0.0
2017-12-24,88781,6.0,11.0,0.0
2017-12-25,98189,0.0,11.0,1.0
2017-12-26,121383,1.0,11.0,0.0
2017-12-27,135300,2.0,11.0,0.0
2017-12-28,136827,3.0,11.0,0.0
2017-12-29,127700,4.0,11.0,0.0
2017-12-30,93014,5.0,11.0,0.0
2017-12-31,82878,6.0,11.0,0.0
2018-01-01,86419,0.0,0.0,1.0
2018-01-02,147428,1.0,0.0,0.0
2018-01-03,162193,2.0,0.0,0.0
2018-01-04,163784,3.0,0.0,0.0
2018-01-05,158606,4.0,0.0,0.0
2018-01-06,113467,5.0,0.0,0.0
2018-01-07,118313,6.0,0.0,0.0
2018-01-08,175623,0.0,0.0,0.0
2018-01-09,183880,1.0,0.0,0.0
2018-01-10,183945,2.0,0.0,0.0
2018-01-11,181769,3.0,0.0,0.0
2018-01-12,170552,4.0,0.0,0.0
2018-01-13,115707,5.0,0.0,0.0
2018-01-14,121191,6.0,0.0,0.0
2018-01-15,176127,0.0,0.0,1.0
2018-01-16,188032,1.0,0.0,0.0
2018-01-17,189871,2.0,0.0,0.0
2018-01-18,189348,3.0,0.0,0.0
2018-01-19,177456,4.0,0.0,0.0
2018-01-20,123321,5.0,0.0,0.0
2018-01-21,128306,6.0,0.0,0.0
2018-01-22,186132,0.0,0.0,0.0
2018-01-23,197618,1.0,0.0,0.0
2018-01-24,196402,2.0,0.0,0.0
2018-01-25,192722,3.0,0.0,0.0
2018-01-26,179415,4.0,0.0,0.0
2018-01-27,125769,5.0,0.0,0.0
2018-01-28,133306,6.0,0.0,0.0
2018-01-29,194151,0.0,0.0,0.0
2018-01-30,198680,1.0,0.0,0.0
2018-01-31,198652,2.0,0.0,0.0
2018-02-01,195472,3.0,1.0,0.0
2018-02-02,183173,4.0,1.0,0.0
2018-02-03,124276,5.0,1.0,0.0
2018-02-04,129054,6.0,1.0,0.0
2018-02-05,190024,0.0,1.0,0.0
2018-02-06,198658,1.0,1.0,0.0
2018-02-07,198272,2.0,1.0,0.0
2018-02-08,195339,3.0,1.0,0.0
2018-02-09,183086,4.0,1.0,0.0
2018-02-10,122536,5.0,1.0,0.0
2018-02-11,133033,6.0,1.0,0.0
2018-02-12,185386,0.0,1.0,0.0
2018-02-13,184789,1.0,1.0,0.0
2018-02-14,176089,2.0,1.0,0.0
2018-02-15,171317,3.0,1.0,0.0
2018-02-16,162693,4.0,1.0,0.0
2018-02-17,116342,5.0,1.0,0.0
2018-02-18,122466,6.0,1.0,0.0
2018-02-19,172364,0.0,1.0,1.0
2018-02-20,185896,1.0,1.0,0.0
2018-02-21,188166,2.0,1.0,0.0
2018-02-22,189427,3.0,1.0,0.0
2018-02-23,178732,4.0,1.0,0.0
2018-02-24,132664,5.0,1.0,0.0
2018-02-25,134008,6.0,1.0,0.0
2018-02-26,200075,0.0,1.0,0.0
2018-02-27,207996,1.0,1.0,0.0
2018-02-28,204416,2.0,1.0,0.0
2018-03-01,201320,3.0,2.0,0.0
2018-03-02,188205,4.0,2.0,0.0
2018-03-03,131162,5.0,2.0,0.0
2018-03-04,138320,6.0,2.0,0.0
2018-03-05,207326,0.0,2.0,0.0
2018-03-06,212462,1.0,2.0,0.0
2018-03-07,209357,2.0,2.0,0.0
2018-03-08,194876,3.0,2.0,0.0
2018-03-09,193761,4.0,2.0,0.0
2018-03-10,133449,5.0,2.0,0.0
2018-03-11,142258,6.0,2.0,0.0
2018-03-12,208753,0.0,2.0,0.0
2018-03-13,210602,1.0,2.0,0.0
2018-03-14,214236,2.0,2.0,0.0
2018-03-15,210761,3.0,2.0,0.0
2018-03-16,196619,4.0,2.0,0.0
2018-03-17,133056,5.0,2.0,0.0
2018-03-18,141335,6.0,2.0,0.0
2018-03-19,211580,0.0,2.0,0.0
2018-03-20,219051,1.0,2.0,0.0
2018-03-21,215435,2.0,2.0,0.0
2018-03-22,211961,3.0,2.0,0.0
2018-03-23,196009,4.0,2.0,0.0
2018-03-24,132390,5.0,2.0,0.0
2018-03-25,140021,6.0,2.0,0.0
2018-03-26,205273,0.0,2.0,0.0
2018-03-27,212686,1.0,2.0,0.0
2018-03-28,210683,2.0,2.0,0.0
2018-03-29,189044,3.0,2.0,0.0
2018-03-30,170256,4.0,2.0,0.0
2018-03-31,125999,5.0,2.0,0.0
2018-04-01,126749,6.0,3.0,0.0
2018-04-02,186546,0.0,3.0,0.0
2018-04-03,207905,1.0,3.0,0.0
2018-04-04,201528,2.0,3.0,0.0
2018-04-05,188580,3.0,3.0,0.0
2018-04-06,173714,4.0,3.0,0.0
2018-04-07,125723,5.0,3.0,0.0
2018-04-08,142545,6.0,3.0,0.0
2018-04-09,204767,0.0,3.0,0.0
2018-04-10,212048,1.0,3.0,0.0
2018-04-11,210517,2.0,3.0,0.0
2018-04-12,206924,3.0,3.0,0.0
2018-04-13,191679,4.0,3.0,0.0
2018-04-14,126394,5.0,3.0,0.0
2018-04-15,137279,6.0,3.0,0.0
2018-04-16,208085,0.0,3.0,0.0
2018-04-17,213273,1.0,3.0,0.0
2018-04-18,211580,2.0,3.0,0.0
2018-04-19,206037,3.0,3.0,0.0
2018-04-20,191211,4.0,3.0,0.0
2018-04-21,125564,5.0,3.0,0.0
2018-04-22,136469,6.0,3.0,0.0
2018-04-23,206288,0.0,3.0,0.0
2018-04-24,212115,1.0,3.0,0.0
2018-04-25,207948,2.0,3.0,0.0
2018-04-26,205759,3.0,3.0,0.0
2018-04-27,181330,4.0,3.0,0.0
2018-04-28,130046,5.0,3.0,0.0
2018-04-29,120802,6.0,3.0,0.0
2018-04-30,170390,0.0,3.0,0.0
2018-05-01,169054,1.0,4.0,0.0
2018-05-02,197891,2.0,4.0,0.0
2018-05-03,199820,3.0,4.0,0.0
2018-05-04,186783,4.0,4.0,0.0
2018-05-05,124420,5.0,4.0,0.0
2018-05-06,130666,6.0,4.0,0.0
2018-05-07,196014,0.0,4.0,0.0
2018-05-08,203058,1.0,4.0,0.0
2018-05-09,198582,2.0,4.0,0.0
2018-05-10,191321,3.0,4.0,0.0
2018-05-11,183639,4.0,4.0,0.0
2018-05-12,122023,5.0,4.0,0.0
2018-05-13,128775,6.0,4.0,0.0
2018-05-14,199104,0.0,4.0,0.0
2018-05-15,200658,1.0,4.0,0.0
2018-05-16,201541,2.0,4.0,0.0
2018-05-17,196886,3.0,4.0,0.0
2018-05-18,188597,4.0,4.0,0.0
2018-05-19,121392,5.0,4.0,0.0
2018-05-20,126981,6.0,4.0,0.0
2018-05-21,189291,0.0,4.0,0.0
2018-05-22,203038,1.0,4.0,0.0
2018-05-23,205330,2.0,4.0,0.0
2018-05-24,199208,3.0,4.0,0.0
2018-05-25,187768,4.0,4.0,0.0
2018-05-26,117635,5.0,4.0,0.0
2018-05-27,124352,6.0,4.0,0.0
2018-05-28,180398,0.0,4.0,1.0
2018-05-29,194170,1.0,4.0,0.0
2018-05-30,200281,2.0,4.0,0.0
2018-05-31,197244,3.0,4.0,0.0
2018-06-01,184037,4.0,5.0,0.0
2018-06-02,121135,5.0,5.0,0.0
2018-06-03,129389,6.0,5.0,0.0
2018-06-04,200331,0.0,5.0,0.0
2018-06-05,207735,1.0,5.0,0.0
2018-06-06,203354,2.0,5.0,0.0
2018-06-07,200520,3.0,5.0,0.0
2018-06-08,182038,4.0,5.0,0.0
2018-06-09,120164,5.0,5.0,0.0
2018-06-10,125256,6.0,5.0,0.0
2018-06-11,194786,0.0,5.0,0.0
2018-06-12,200815,1.0,5.0,0.0
2018-06-13,197740,2.0,5.0,0.0
2018-06-14,192294,3.0,5.0,0.0
2018-06-15,173587,4.0,5.0,0.0
2018-06-16,105955,5.0,5.0,0.0
2018-06-17,110780,6.0,5.0,0.0
2018-06-18,174582,0.0,5.0,0.0
2018-06-19,193310,1.0,5.0,0.0
2018-06-20,193062,2.0,5.0,0.0
2018-06-21,187986,3.0,5.0,0.0
2018-06-22,173606,4.0,5.0,0.0
2018-06-23,111795,5.0,5.0,0.0
2018-06-24,116134,6.0,5.0,0.0
2018-06-25,185919,0.0,5.0,0.0
2018-06-26,193142,1.0,5.0,0.0
2018-06-27,188114,2.0,5.0,0.0
2018-06-28,183737,3.0,5.0,0.0
2018-06-29,171496,4.0,5.0,0.0
2018-06-30,107210,5.0,5.0,0.0
2018-07-01,111053,6.0,6.0,0.0
2018-07-02,176198,0.0,6.0,0.0
2018-07-03,184040,1.0,6.0,0.0
2018-07-04,169783,2.0,6.0,1.0
2018-07-05,177996,3.0,6.0,0.0
2018-07-06,167378,4.0,6.0,0.0
2018-07-07,106401,5.0,6.0,0.0
2018-07-08,112327,6.0,6.0,0.0
2018-07-09,182835,0.0,6.0,0.0
2018-07-10,187694,1.0,6.0,0.0
2018-07-11,185762,2.0,6.0,0.0
2018-07-12,184099,3.0,6.0,0.0
2018-07-13,170860,4.0,6.0,0.0
2018-07-14,106799,5.0,6.0,0.0
2018-07-15,108475,6.0,6.0,0.0
2018-07-16,175704,0.0,6.0,0.0
2018-07-17,183596,1.0,6.0,0.0
2018-07-18,179897,2.0,6.0,0.0
2018-07-19,183373,3.0,6.0,0.0
2018-07-20,169626,4.0,6.0,0.0
2018-07-21,106785,5.0,6.0,0.0
2018-07-22,112387,6.0,6.0,0.0
2018-07-23,180572,0.0,6.0,0.0
2018-07-24,186943,1.0,6.0,0.0
2018-07-25,185744,2.0,6.0,0.0
2018-07-26,183117,3.0,6.0,0.0
2018-07-27,168526,4.0,6.0,0.0
2018-07-28,105936,5.0,6.0,0.0
2018-07-29,111708,6.0,6.0,0.0
2018-07-30,179950,0.0,6.0,0.0
2018-07-31,185930,1.0,6.0,0.0
2018-08-01,183366,2.0,7.0,0.0
2018-08-02,182412,3.0,7.0,0.0
2018-08-03,173429,4.0,7.0,0.0
2018-08-04,106108,5.0,7.0,0.0
2018-08-05,110059,6.0,7.0,0.0
2018-08-06,178355,0.0,7.0,0.0
2018-08-07,185518,1.0,7.0,0.0
2018-08-08,183204,2.0,7.0,0.0
2018-08-09,181276,3.0,7.0,0.0
2018-08-10,168297,4.0,7.0,0.0
2018-08-11,106488,5.0,7.0,0.0
2018-08-12,111786,6.0,7.0,0.0
2018-08-13,178620,0.0,7.0,0.0
2018-08-14,181922,1.0,7.0,0.0
2018-08-15,172198,2.0,7.0,0.0
2018-08-16,177367,3.0,7.0,0.0
2018-08-17,166550,4.0,7.0,0.0
2018-08-18,107011,5.0,7.0,0.0
2018-08-19,112299,6.0,7.0,0.0
2018-08-20,176718,0.0,7.0,0.0
2018-08-21,182562,1.0,7.0,0.0
2018-08-22,181484,2.0,7.0,0.0
2018-08-23,180317,3.0,7.0,0.0
2018-08-24,170197,4.0,7.0,0.0
2018-08-25,109383,5.0,7.0,0.0
2018-08-26,113373,6.0,7.0,0.0
2018-08-27,180142,0.0,7.0,0.0
2018-08-28,191628,1.0,7.0,0.0
2018-08-29,191149,2.0,7.0,0.0
2018-08-30,187503,3.0,7.0,0.0
2018-08-31,172280,4.0,7.0,0.0
1 date count day_of_week month_of_year holiday
2 2017-06-04 104663 6.0 5.0 0.0
3 2017-06-05 155824 0.0 5.0 0.0
4 2017-06-06 164908 1.0 5.0 0.0
5 2017-06-07 170309 2.0 5.0 0.0
6 2017-06-08 164256 3.0 5.0 0.0
7 2017-06-09 153406 4.0 5.0 0.0
8 2017-06-10 97024 5.0 5.0 0.0
9 2017-06-11 103442 6.0 5.0 0.0
10 2017-06-12 160768 0.0 5.0 0.0
11 2017-06-13 166288 1.0 5.0 0.0
12 2017-06-14 163819 2.0 5.0 0.0
13 2017-06-15 157593 3.0 5.0 0.0
14 2017-06-16 149259 4.0 5.0 0.0
15 2017-06-17 95579 5.0 5.0 0.0
16 2017-06-18 98723 6.0 5.0 0.0
17 2017-06-19 159076 0.0 5.0 0.0
18 2017-06-20 163340 1.0 5.0 0.0
19 2017-06-21 163344 2.0 5.0 0.0
20 2017-06-22 159528 3.0 5.0 0.0
21 2017-06-23 146563 4.0 5.0 0.0
22 2017-06-24 92631 5.0 5.0 0.0
23 2017-06-25 96549 6.0 5.0 0.0
24 2017-06-26 153249 0.0 5.0 0.0
25 2017-06-27 160357 1.0 5.0 0.0
26 2017-06-28 159941 2.0 5.0 0.0
27 2017-06-29 156781 3.0 5.0 0.0
28 2017-06-30 144709 4.0 5.0 0.0
29 2017-07-01 89101 5.0 6.0 0.0
30 2017-07-02 93046 6.0 6.0 0.0
31 2017-07-03 144113 0.0 6.0 0.0
32 2017-07-04 143061 1.0 6.0 1.0
33 2017-07-05 154603 2.0 6.0 0.0
34 2017-07-06 157200 3.0 6.0 0.0
35 2017-07-07 147213 4.0 6.0 0.0
36 2017-07-08 92348 5.0 6.0 0.0
37 2017-07-09 97018 6.0 6.0 0.0
38 2017-07-10 157192 0.0 6.0 0.0
39 2017-07-11 161819 1.0 6.0 0.0
40 2017-07-12 161998 2.0 6.0 0.0
41 2017-07-13 160280 3.0 6.0 0.0
42 2017-07-14 146818 4.0 6.0 0.0
43 2017-07-15 93041 5.0 6.0 0.0
44 2017-07-16 97505 6.0 6.0 0.0
45 2017-07-17 156167 0.0 6.0 0.0
46 2017-07-18 162855 1.0 6.0 0.0
47 2017-07-19 162519 2.0 6.0 0.0
48 2017-07-20 159941 3.0 6.0 0.0
49 2017-07-21 148460 4.0 6.0 0.0
50 2017-07-22 93431 5.0 6.0 0.0
51 2017-07-23 98553 6.0 6.0 0.0
52 2017-07-24 156202 0.0 6.0 0.0
53 2017-07-25 162503 1.0 6.0 0.0
54 2017-07-26 158479 2.0 6.0 0.0
55 2017-07-27 158192 3.0 6.0 0.0
56 2017-07-28 147108 4.0 6.0 0.0
57 2017-07-29 93799 5.0 6.0 0.0
58 2017-07-30 97920 6.0 6.0 0.0
59 2017-07-31 152197 0.0 6.0 0.0
60 2017-08-01 158477 1.0 7.0 0.0
61 2017-08-02 159089 2.0 7.0 0.0
62 2017-08-03 157182 3.0 7.0 0.0
63 2017-08-04 146345 4.0 7.0 0.0
64 2017-08-05 92534 5.0 7.0 0.0
65 2017-08-06 97128 6.0 7.0 0.0
66 2017-08-07 151359 0.0 7.0 0.0
67 2017-08-08 159895 1.0 7.0 0.0
68 2017-08-09 158329 2.0 7.0 0.0
69 2017-08-10 155468 3.0 7.0 0.0
70 2017-08-11 144914 4.0 7.0 0.0
71 2017-08-12 92258 5.0 7.0 0.0
72 2017-08-13 95933 6.0 7.0 0.0
73 2017-08-14 147706 0.0 7.0 0.0
74 2017-08-15 151115 1.0 7.0 0.0
75 2017-08-16 157640 2.0 7.0 0.0
76 2017-08-17 156600 3.0 7.0 0.0
77 2017-08-18 146980 4.0 7.0 0.0
78 2017-08-19 94592 5.0 7.0 0.0
79 2017-08-20 99320 6.0 7.0 0.0
80 2017-08-21 145727 0.0 7.0 0.0
81 2017-08-22 160260 1.0 7.0 0.0
82 2017-08-23 160440 2.0 7.0 0.0
83 2017-08-24 157830 3.0 7.0 0.0
84 2017-08-25 145822 4.0 7.0 0.0
85 2017-08-26 94706 5.0 7.0 0.0
86 2017-08-27 99047 6.0 7.0 0.0
87 2017-08-28 152112 0.0 7.0 0.0
88 2017-08-29 162440 1.0 7.0 0.0
89 2017-08-30 162902 2.0 7.0 0.0
90 2017-08-31 159498 3.0 7.0 0.0
91 2017-09-01 145689 4.0 8.0 0.0
92 2017-09-02 93589 5.0 8.0 0.0
93 2017-09-03 100058 6.0 8.0 0.0
94 2017-09-04 140865 0.0 8.0 1.0
95 2017-09-05 165715 1.0 8.0 0.0
96 2017-09-06 167463 2.0 8.0 0.0
97 2017-09-07 164811 3.0 8.0 0.0
98 2017-09-08 156157 4.0 8.0 0.0
99 2017-09-09 101358 5.0 8.0 0.0
100 2017-09-10 107915 6.0 8.0 0.0
101 2017-09-11 167845 0.0 8.0 0.0
102 2017-09-12 172756 1.0 8.0 0.0
103 2017-09-13 172851 2.0 8.0 0.0
104 2017-09-14 171675 3.0 8.0 0.0
105 2017-09-15 159266 4.0 8.0 0.0
106 2017-09-16 103547 5.0 8.0 0.0
107 2017-09-17 110964 6.0 8.0 0.0
108 2017-09-18 170976 0.0 8.0 0.0
109 2017-09-19 177864 1.0 8.0 0.0
110 2017-09-20 173567 2.0 8.0 0.0
111 2017-09-21 172017 3.0 8.0 0.0
112 2017-09-22 161357 4.0 8.0 0.0
113 2017-09-23 104681 5.0 8.0 0.0
114 2017-09-24 111711 6.0 8.0 0.0
115 2017-09-25 173517 0.0 8.0 0.0
116 2017-09-26 180049 1.0 8.0 0.0
117 2017-09-27 178307 2.0 8.0 0.0
118 2017-09-28 174157 3.0 8.0 0.0
119 2017-09-29 161707 4.0 8.0 0.0
120 2017-09-30 110536 5.0 8.0 0.0
121 2017-10-01 106505 6.0 9.0 0.0
122 2017-10-02 157565 0.0 9.0 0.0
123 2017-10-03 164764 1.0 9.0 0.0
124 2017-10-04 163383 2.0 9.0 0.0
125 2017-10-05 162847 3.0 9.0 0.0
126 2017-10-06 153575 4.0 9.0 0.0
127 2017-10-07 107472 5.0 9.0 0.0
128 2017-10-08 116127 6.0 9.0 0.0
129 2017-10-09 174457 0.0 9.0 1.0
130 2017-10-10 185217 1.0 9.0 0.0
131 2017-10-11 185120 2.0 9.0 0.0
132 2017-10-12 180844 3.0 9.0 0.0
133 2017-10-13 170178 4.0 9.0 0.0
134 2017-10-14 112754 5.0 9.0 0.0
135 2017-10-15 121251 6.0 9.0 0.0
136 2017-10-16 183906 0.0 9.0 0.0
137 2017-10-17 188945 1.0 9.0 0.0
138 2017-10-18 187297 2.0 9.0 0.0
139 2017-10-19 183867 3.0 9.0 0.0
140 2017-10-20 173021 4.0 9.0 0.0
141 2017-10-21 115851 5.0 9.0 0.0
142 2017-10-22 126088 6.0 9.0 0.0
143 2017-10-23 189452 0.0 9.0 0.0
144 2017-10-24 194412 1.0 9.0 0.0
145 2017-10-25 192293 2.0 9.0 0.0
146 2017-10-26 190163 3.0 9.0 0.0
147 2017-10-27 177053 4.0 9.0 0.0
148 2017-10-28 114934 5.0 9.0 0.0
149 2017-10-29 125289 6.0 9.0 0.0
150 2017-10-30 189245 0.0 9.0 0.0
151 2017-10-31 191480 1.0 9.0 0.0
152 2017-11-01 182281 2.0 10.0 0.0
153 2017-11-02 186351 3.0 10.0 0.0
154 2017-11-03 175422 4.0 10.0 0.0
155 2017-11-04 118160 5.0 10.0 0.0
156 2017-11-05 127602 6.0 10.0 0.0
157 2017-11-06 191067 0.0 10.0 0.0
158 2017-11-07 197083 1.0 10.0 0.0
159 2017-11-08 194333 2.0 10.0 0.0
160 2017-11-09 193914 3.0 10.0 0.0
161 2017-11-10 179933 4.0 10.0 1.0
162 2017-11-11 121346 5.0 10.0 0.0
163 2017-11-12 131900 6.0 10.0 0.0
164 2017-11-13 196969 0.0 10.0 0.0
165 2017-11-14 201949 1.0 10.0 0.0
166 2017-11-15 198424 2.0 10.0 0.0
167 2017-11-16 196902 3.0 10.0 0.0
168 2017-11-17 183893 4.0 10.0 0.0
169 2017-11-18 122767 5.0 10.0 0.0
170 2017-11-19 130890 6.0 10.0 0.0
171 2017-11-20 194515 0.0 10.0 0.0
172 2017-11-21 198601 1.0 10.0 0.0
173 2017-11-22 191041 2.0 10.0 0.0
174 2017-11-23 170321 3.0 10.0 1.0
175 2017-11-24 155623 4.0 10.0 0.0
176 2017-11-25 115759 5.0 10.0 0.0
177 2017-11-26 128771 6.0 10.0 0.0
178 2017-11-27 199419 0.0 10.0 0.0
179 2017-11-28 207253 1.0 10.0 0.0
180 2017-11-29 205406 2.0 10.0 0.0
181 2017-11-30 200674 3.0 10.0 0.0
182 2017-12-01 187017 4.0 11.0 0.0
183 2017-12-02 129735 5.0 11.0 0.0
184 2017-12-03 139120 6.0 11.0 0.0
185 2017-12-04 205505 0.0 11.0 0.0
186 2017-12-05 208218 1.0 11.0 0.0
187 2017-12-06 202480 2.0 11.0 0.0
188 2017-12-07 197822 3.0 11.0 0.0
189 2017-12-08 180686 4.0 11.0 0.0
190 2017-12-09 123667 5.0 11.0 0.0
191 2017-12-10 130987 6.0 11.0 0.0
192 2017-12-11 193901 0.0 11.0 0.0
193 2017-12-12 194997 1.0 11.0 0.0
194 2017-12-13 192063 2.0 11.0 0.0
195 2017-12-14 186496 3.0 11.0 0.0
196 2017-12-15 170812 4.0 11.0 0.0
197 2017-12-16 110474 5.0 11.0 0.0
198 2017-12-17 118165 6.0 11.0 0.0
199 2017-12-18 176843 0.0 11.0 0.0
200 2017-12-19 179550 1.0 11.0 0.0
201 2017-12-20 173506 2.0 11.0 0.0
202 2017-12-21 165910 3.0 11.0 0.0
203 2017-12-22 145886 4.0 11.0 0.0
204 2017-12-23 95246 5.0 11.0 0.0
205 2017-12-24 88781 6.0 11.0 0.0
206 2017-12-25 98189 0.0 11.0 1.0
207 2017-12-26 121383 1.0 11.0 0.0
208 2017-12-27 135300 2.0 11.0 0.0
209 2017-12-28 136827 3.0 11.0 0.0
210 2017-12-29 127700 4.0 11.0 0.0
211 2017-12-30 93014 5.0 11.0 0.0
212 2017-12-31 82878 6.0 11.0 0.0
213 2018-01-01 86419 0.0 0.0 1.0
214 2018-01-02 147428 1.0 0.0 0.0
215 2018-01-03 162193 2.0 0.0 0.0
216 2018-01-04 163784 3.0 0.0 0.0
217 2018-01-05 158606 4.0 0.0 0.0
218 2018-01-06 113467 5.0 0.0 0.0
219 2018-01-07 118313 6.0 0.0 0.0
220 2018-01-08 175623 0.0 0.0 0.0
221 2018-01-09 183880 1.0 0.0 0.0
222 2018-01-10 183945 2.0 0.0 0.0
223 2018-01-11 181769 3.0 0.0 0.0
224 2018-01-12 170552 4.0 0.0 0.0
225 2018-01-13 115707 5.0 0.0 0.0
226 2018-01-14 121191 6.0 0.0 0.0
227 2018-01-15 176127 0.0 0.0 1.0
228 2018-01-16 188032 1.0 0.0 0.0
229 2018-01-17 189871 2.0 0.0 0.0
230 2018-01-18 189348 3.0 0.0 0.0
231 2018-01-19 177456 4.0 0.0 0.0
232 2018-01-20 123321 5.0 0.0 0.0
233 2018-01-21 128306 6.0 0.0 0.0
234 2018-01-22 186132 0.0 0.0 0.0
235 2018-01-23 197618 1.0 0.0 0.0
236 2018-01-24 196402 2.0 0.0 0.0
237 2018-01-25 192722 3.0 0.0 0.0
238 2018-01-26 179415 4.0 0.0 0.0
239 2018-01-27 125769 5.0 0.0 0.0
240 2018-01-28 133306 6.0 0.0 0.0
241 2018-01-29 194151 0.0 0.0 0.0
242 2018-01-30 198680 1.0 0.0 0.0
243 2018-01-31 198652 2.0 0.0 0.0
244 2018-02-01 195472 3.0 1.0 0.0
245 2018-02-02 183173 4.0 1.0 0.0
246 2018-02-03 124276 5.0 1.0 0.0
247 2018-02-04 129054 6.0 1.0 0.0
248 2018-02-05 190024 0.0 1.0 0.0
249 2018-02-06 198658 1.0 1.0 0.0
250 2018-02-07 198272 2.0 1.0 0.0
251 2018-02-08 195339 3.0 1.0 0.0
252 2018-02-09 183086 4.0 1.0 0.0
253 2018-02-10 122536 5.0 1.0 0.0
254 2018-02-11 133033 6.0 1.0 0.0
255 2018-02-12 185386 0.0 1.0 0.0
256 2018-02-13 184789 1.0 1.0 0.0
257 2018-02-14 176089 2.0 1.0 0.0
258 2018-02-15 171317 3.0 1.0 0.0
259 2018-02-16 162693 4.0 1.0 0.0
260 2018-02-17 116342 5.0 1.0 0.0
261 2018-02-18 122466 6.0 1.0 0.0
262 2018-02-19 172364 0.0 1.0 1.0
263 2018-02-20 185896 1.0 1.0 0.0
264 2018-02-21 188166 2.0 1.0 0.0
265 2018-02-22 189427 3.0 1.0 0.0
266 2018-02-23 178732 4.0 1.0 0.0
267 2018-02-24 132664 5.0 1.0 0.0
268 2018-02-25 134008 6.0 1.0 0.0
269 2018-02-26 200075 0.0 1.0 0.0
270 2018-02-27 207996 1.0 1.0 0.0
271 2018-02-28 204416 2.0 1.0 0.0
272 2018-03-01 201320 3.0 2.0 0.0
273 2018-03-02 188205 4.0 2.0 0.0
274 2018-03-03 131162 5.0 2.0 0.0
275 2018-03-04 138320 6.0 2.0 0.0
276 2018-03-05 207326 0.0 2.0 0.0
277 2018-03-06 212462 1.0 2.0 0.0
278 2018-03-07 209357 2.0 2.0 0.0
279 2018-03-08 194876 3.0 2.0 0.0
280 2018-03-09 193761 4.0 2.0 0.0
281 2018-03-10 133449 5.0 2.0 0.0
282 2018-03-11 142258 6.0 2.0 0.0
283 2018-03-12 208753 0.0 2.0 0.0
284 2018-03-13 210602 1.0 2.0 0.0
285 2018-03-14 214236 2.0 2.0 0.0
286 2018-03-15 210761 3.0 2.0 0.0
287 2018-03-16 196619 4.0 2.0 0.0
288 2018-03-17 133056 5.0 2.0 0.0
289 2018-03-18 141335 6.0 2.0 0.0
290 2018-03-19 211580 0.0 2.0 0.0
291 2018-03-20 219051 1.0 2.0 0.0
292 2018-03-21 215435 2.0 2.0 0.0
293 2018-03-22 211961 3.0 2.0 0.0
294 2018-03-23 196009 4.0 2.0 0.0
295 2018-03-24 132390 5.0 2.0 0.0
296 2018-03-25 140021 6.0 2.0 0.0
297 2018-03-26 205273 0.0 2.0 0.0
298 2018-03-27 212686 1.0 2.0 0.0
299 2018-03-28 210683 2.0 2.0 0.0
300 2018-03-29 189044 3.0 2.0 0.0
301 2018-03-30 170256 4.0 2.0 0.0
302 2018-03-31 125999 5.0 2.0 0.0
303 2018-04-01 126749 6.0 3.0 0.0
304 2018-04-02 186546 0.0 3.0 0.0
305 2018-04-03 207905 1.0 3.0 0.0
306 2018-04-04 201528 2.0 3.0 0.0
307 2018-04-05 188580 3.0 3.0 0.0
308 2018-04-06 173714 4.0 3.0 0.0
309 2018-04-07 125723 5.0 3.0 0.0
310 2018-04-08 142545 6.0 3.0 0.0
311 2018-04-09 204767 0.0 3.0 0.0
312 2018-04-10 212048 1.0 3.0 0.0
313 2018-04-11 210517 2.0 3.0 0.0
314 2018-04-12 206924 3.0 3.0 0.0
315 2018-04-13 191679 4.0 3.0 0.0
316 2018-04-14 126394 5.0 3.0 0.0
317 2018-04-15 137279 6.0 3.0 0.0
318 2018-04-16 208085 0.0 3.0 0.0
319 2018-04-17 213273 1.0 3.0 0.0
320 2018-04-18 211580 2.0 3.0 0.0
321 2018-04-19 206037 3.0 3.0 0.0
322 2018-04-20 191211 4.0 3.0 0.0
323 2018-04-21 125564 5.0 3.0 0.0
324 2018-04-22 136469 6.0 3.0 0.0
325 2018-04-23 206288 0.0 3.0 0.0
326 2018-04-24 212115 1.0 3.0 0.0
327 2018-04-25 207948 2.0 3.0 0.0
328 2018-04-26 205759 3.0 3.0 0.0
329 2018-04-27 181330 4.0 3.0 0.0
330 2018-04-28 130046 5.0 3.0 0.0
331 2018-04-29 120802 6.0 3.0 0.0
332 2018-04-30 170390 0.0 3.0 0.0
333 2018-05-01 169054 1.0 4.0 0.0
334 2018-05-02 197891 2.0 4.0 0.0
335 2018-05-03 199820 3.0 4.0 0.0
336 2018-05-04 186783 4.0 4.0 0.0
337 2018-05-05 124420 5.0 4.0 0.0
338 2018-05-06 130666 6.0 4.0 0.0
339 2018-05-07 196014 0.0 4.0 0.0
340 2018-05-08 203058 1.0 4.0 0.0
341 2018-05-09 198582 2.0 4.0 0.0
342 2018-05-10 191321 3.0 4.0 0.0
343 2018-05-11 183639 4.0 4.0 0.0
344 2018-05-12 122023 5.0 4.0 0.0
345 2018-05-13 128775 6.0 4.0 0.0
346 2018-05-14 199104 0.0 4.0 0.0
347 2018-05-15 200658 1.0 4.0 0.0
348 2018-05-16 201541 2.0 4.0 0.0
349 2018-05-17 196886 3.0 4.0 0.0
350 2018-05-18 188597 4.0 4.0 0.0
351 2018-05-19 121392 5.0 4.0 0.0
352 2018-05-20 126981 6.0 4.0 0.0
353 2018-05-21 189291 0.0 4.0 0.0
354 2018-05-22 203038 1.0 4.0 0.0
355 2018-05-23 205330 2.0 4.0 0.0
356 2018-05-24 199208 3.0 4.0 0.0
357 2018-05-25 187768 4.0 4.0 0.0
358 2018-05-26 117635 5.0 4.0 0.0
359 2018-05-27 124352 6.0 4.0 0.0
360 2018-05-28 180398 0.0 4.0 1.0
361 2018-05-29 194170 1.0 4.0 0.0
362 2018-05-30 200281 2.0 4.0 0.0
363 2018-05-31 197244 3.0 4.0 0.0
364 2018-06-01 184037 4.0 5.0 0.0
365 2018-06-02 121135 5.0 5.0 0.0
366 2018-06-03 129389 6.0 5.0 0.0
367 2018-06-04 200331 0.0 5.0 0.0
368 2018-06-05 207735 1.0 5.0 0.0
369 2018-06-06 203354 2.0 5.0 0.0
370 2018-06-07 200520 3.0 5.0 0.0
371 2018-06-08 182038 4.0 5.0 0.0
372 2018-06-09 120164 5.0 5.0 0.0
373 2018-06-10 125256 6.0 5.0 0.0
374 2018-06-11 194786 0.0 5.0 0.0
375 2018-06-12 200815 1.0 5.0 0.0
376 2018-06-13 197740 2.0 5.0 0.0
377 2018-06-14 192294 3.0 5.0 0.0
378 2018-06-15 173587 4.0 5.0 0.0
379 2018-06-16 105955 5.0 5.0 0.0
380 2018-06-17 110780 6.0 5.0 0.0
381 2018-06-18 174582 0.0 5.0 0.0
382 2018-06-19 193310 1.0 5.0 0.0
383 2018-06-20 193062 2.0 5.0 0.0
384 2018-06-21 187986 3.0 5.0 0.0
385 2018-06-22 173606 4.0 5.0 0.0
386 2018-06-23 111795 5.0 5.0 0.0
387 2018-06-24 116134 6.0 5.0 0.0
388 2018-06-25 185919 0.0 5.0 0.0
389 2018-06-26 193142 1.0 5.0 0.0
390 2018-06-27 188114 2.0 5.0 0.0
391 2018-06-28 183737 3.0 5.0 0.0
392 2018-06-29 171496 4.0 5.0 0.0
393 2018-06-30 107210 5.0 5.0 0.0
394 2018-07-01 111053 6.0 6.0 0.0
395 2018-07-02 176198 0.0 6.0 0.0
396 2018-07-03 184040 1.0 6.0 0.0
397 2018-07-04 169783 2.0 6.0 1.0
398 2018-07-05 177996 3.0 6.0 0.0
399 2018-07-06 167378 4.0 6.0 0.0
400 2018-07-07 106401 5.0 6.0 0.0
401 2018-07-08 112327 6.0 6.0 0.0
402 2018-07-09 182835 0.0 6.0 0.0
403 2018-07-10 187694 1.0 6.0 0.0
404 2018-07-11 185762 2.0 6.0 0.0
405 2018-07-12 184099 3.0 6.0 0.0
406 2018-07-13 170860 4.0 6.0 0.0
407 2018-07-14 106799 5.0 6.0 0.0
408 2018-07-15 108475 6.0 6.0 0.0
409 2018-07-16 175704 0.0 6.0 0.0
410 2018-07-17 183596 1.0 6.0 0.0
411 2018-07-18 179897 2.0 6.0 0.0
412 2018-07-19 183373 3.0 6.0 0.0
413 2018-07-20 169626 4.0 6.0 0.0
414 2018-07-21 106785 5.0 6.0 0.0
415 2018-07-22 112387 6.0 6.0 0.0
416 2018-07-23 180572 0.0 6.0 0.0
417 2018-07-24 186943 1.0 6.0 0.0
418 2018-07-25 185744 2.0 6.0 0.0
419 2018-07-26 183117 3.0 6.0 0.0
420 2018-07-27 168526 4.0 6.0 0.0
421 2018-07-28 105936 5.0 6.0 0.0
422 2018-07-29 111708 6.0 6.0 0.0
423 2018-07-30 179950 0.0 6.0 0.0
424 2018-07-31 185930 1.0 6.0 0.0
425 2018-08-01 183366 2.0 7.0 0.0
426 2018-08-02 182412 3.0 7.0 0.0
427 2018-08-03 173429 4.0 7.0 0.0
428 2018-08-04 106108 5.0 7.0 0.0
429 2018-08-05 110059 6.0 7.0 0.0
430 2018-08-06 178355 0.0 7.0 0.0
431 2018-08-07 185518 1.0 7.0 0.0
432 2018-08-08 183204 2.0 7.0 0.0
433 2018-08-09 181276 3.0 7.0 0.0
434 2018-08-10 168297 4.0 7.0 0.0
435 2018-08-11 106488 5.0 7.0 0.0
436 2018-08-12 111786 6.0 7.0 0.0
437 2018-08-13 178620 0.0 7.0 0.0
438 2018-08-14 181922 1.0 7.0 0.0
439 2018-08-15 172198 2.0 7.0 0.0
440 2018-08-16 177367 3.0 7.0 0.0
441 2018-08-17 166550 4.0 7.0 0.0
442 2018-08-18 107011 5.0 7.0 0.0
443 2018-08-19 112299 6.0 7.0 0.0
444 2018-08-20 176718 0.0 7.0 0.0
445 2018-08-21 182562 1.0 7.0 0.0
446 2018-08-22 181484 2.0 7.0 0.0
447 2018-08-23 180317 3.0 7.0 0.0
448 2018-08-24 170197 4.0 7.0 0.0
449 2018-08-25 109383 5.0 7.0 0.0
450 2018-08-26 113373 6.0 7.0 0.0
451 2018-08-27 180142 0.0 7.0 0.0
452 2018-08-28 191628 1.0 7.0 0.0
453 2018-08-29 191149 2.0 7.0 0.0
454 2018-08-30 187503 3.0 7.0 0.0
455 2018-08-31 172280 4.0 7.0 0.0

View File

@@ -0,0 +1,183 @@
import pandas as pd
from azureml.core import Environment
from azureml.core.conda_dependencies import CondaDependencies
from azureml.train.estimator import Estimator
from azureml.core.run import Run
from azureml.automl.core.shared import constants
def split_fraction_by_grain(df, fraction, time_column_name, grain_column_names=None):
if not grain_column_names:
df["tmp_grain_column"] = "grain"
grain_column_names = ["tmp_grain_column"]
"""Group df by grain and split on last n rows for each group."""
df_grouped = df.sort_values(time_column_name).groupby(
grain_column_names, group_keys=False
)
df_head = df_grouped.apply(
lambda dfg: dfg.iloc[: -int(len(dfg) * fraction)] if fraction > 0 else dfg
)
df_tail = df_grouped.apply(
lambda dfg: dfg.iloc[-int(len(dfg) * fraction) :] if fraction > 0 else dfg[:0]
)
if "tmp_grain_column" in grain_column_names:
for df2 in (df, df_head, df_tail):
df2.drop("tmp_grain_column", axis=1, inplace=True)
grain_column_names.remove("tmp_grain_column")
return df_head, df_tail
def split_full_for_forecasting(
df, time_column_name, grain_column_names=None, test_split=0.2
):
index_name = df.index.name
# Assumes that there isn't already a column called tmpindex
df["tmpindex"] = df.index
train_df, test_df = split_fraction_by_grain(
df, test_split, time_column_name, grain_column_names
)
train_df = train_df.set_index("tmpindex")
train_df.index.name = index_name
test_df = test_df.set_index("tmpindex")
test_df.index.name = index_name
df.drop("tmpindex", axis=1, inplace=True)
return train_df, test_df
def get_result_df(remote_run):
children = list(remote_run.get_children(recursive=True))
summary_df = pd.DataFrame(
index=["run_id", "run_algorithm", "primary_metric", "Score"]
)
goal_minimize = False
for run in children:
if (
run.get_status().lower() == constants.RunState.COMPLETE_RUN
and "run_algorithm" in run.properties
and "score" in run.properties
):
# We only count in the completed child runs.
summary_df[run.id] = [
run.id,
run.properties["run_algorithm"],
run.properties["primary_metric"],
float(run.properties["score"]),
]
if "goal" in run.properties:
goal_minimize = run.properties["goal"].split("_")[-1] == "min"
summary_df = summary_df.T.sort_values(
"Score", ascending=goal_minimize
).drop_duplicates(["run_algorithm"])
summary_df = summary_df.set_index("run_algorithm")
return summary_df
def run_inference(
test_experiment,
compute_target,
script_folder,
train_run,
test_dataset,
lookback_dataset,
max_horizon,
target_column_name,
time_column_name,
freq,
):
model_base_name = "model.pkl"
if "model_data_location" in train_run.properties:
model_location = train_run.properties["model_data_location"]
_, model_base_name = model_location.rsplit("/", 1)
train_run.download_file(
"outputs/{}".format(model_base_name), "inference/{}".format(model_base_name)
)
train_run.download_file("outputs/conda_env_v_1_0_0.yml", "inference/condafile.yml")
inference_env = Environment("myenv")
inference_env.docker.enabled = True
inference_env.python.conda_dependencies = CondaDependencies(
conda_dependencies_file_path="inference/condafile.yml"
)
est = Estimator(
source_directory=script_folder,
entry_script="infer.py",
script_params={
"--max_horizon": max_horizon,
"--target_column_name": target_column_name,
"--time_column_name": time_column_name,
"--frequency": freq,
"--model_path": model_base_name,
},
inputs=[
test_dataset.as_named_input("test_data"),
lookback_dataset.as_named_input("lookback_data"),
],
compute_target=compute_target,
environment_definition=inference_env,
)
run = test_experiment.submit(
est,
tags={
"training_run_id": train_run.id,
"run_algorithm": train_run.properties["run_algorithm"],
"valid_score": train_run.properties["score"],
"primary_metric": train_run.properties["primary_metric"],
},
)
run.log("run_algorithm", run.tags["run_algorithm"])
return run
def run_multiple_inferences(
summary_df,
train_experiment,
test_experiment,
compute_target,
script_folder,
test_dataset,
lookback_dataset,
max_horizon,
target_column_name,
time_column_name,
freq,
):
for run_name, run_summary in summary_df.iterrows():
print(run_name)
print(run_summary)
run_id = run_summary.run_id
train_run = Run(train_experiment, run_id)
test_run = run_inference(
test_experiment,
compute_target,
script_folder,
train_run,
test_dataset,
lookback_dataset,
max_horizon,
target_column_name,
time_column_name,
freq,
)
print(test_run)
summary_df.loc[summary_df.run_id == run_id, "test_run_id"] = test_run.id
return summary_df

View File

@@ -0,0 +1,386 @@
import argparse
import os
import numpy as np
import pandas as pd
from pandas.tseries.frequencies import to_offset
from sklearn.externals import joblib
from sklearn.metrics import mean_absolute_error, mean_squared_error
from azureml.automl.runtime.shared.score import scoring, constants
from azureml.core import Run
try:
import torch
_torch_present = True
except ImportError:
_torch_present = False
def align_outputs(
y_predicted,
X_trans,
X_test,
y_test,
predicted_column_name="predicted",
horizon_colname="horizon_origin",
):
"""
Demonstrates how to get the output aligned to the inputs
using pandas indexes. Helps understand what happened if
the output's shape differs from the input shape, or if
the data got re-sorted by time and grain during forecasting.
Typical causes of misalignment are:
* we predicted some periods that were missing in actuals -> drop from eval
* model was asked to predict past max_horizon -> increase max horizon
* data at start of X_test was needed for lags -> provide previous periods
"""
if horizon_colname in X_trans:
df_fcst = pd.DataFrame(
{
predicted_column_name: y_predicted,
horizon_colname: X_trans[horizon_colname],
}
)
else:
df_fcst = pd.DataFrame({predicted_column_name: y_predicted})
# y and X outputs are aligned by forecast() function contract
df_fcst.index = X_trans.index
# align original X_test to y_test
X_test_full = X_test.copy()
X_test_full[target_column_name] = y_test
# X_test_full's index does not include origin, so reset for merge
df_fcst.reset_index(inplace=True)
X_test_full = X_test_full.reset_index().drop(columns="index")
together = df_fcst.merge(X_test_full, how="right")
# drop rows where prediction or actuals are nan
# happens because of missing actuals
# or at edges of time due to lags/rolling windows
clean = together[
together[[target_column_name, predicted_column_name]].notnull().all(axis=1)
]
return clean
def do_rolling_forecast_with_lookback(
fitted_model, X_test, y_test, max_horizon, X_lookback, y_lookback, freq="D"
):
"""
Produce forecasts on a rolling origin over the given test set.
Each iteration makes a forecast for the next 'max_horizon' periods
with respect to the current origin, then advances the origin by the
horizon time duration. The prediction context for each forecast is set so
that the forecaster uses the actual target values prior to the current
origin time for constructing lag features.
This function returns a concatenated DataFrame of rolling forecasts.
"""
print("Using lookback of size: ", y_lookback.size)
df_list = []
origin_time = X_test[time_column_name].min()
X = X_lookback.append(X_test)
y = np.concatenate((y_lookback, y_test), axis=0)
while origin_time <= X_test[time_column_name].max():
# Set the horizon time - end date of the forecast
horizon_time = origin_time + max_horizon * to_offset(freq)
# Extract test data from an expanding window up-to the horizon
expand_wind = X[time_column_name] < horizon_time
X_test_expand = X[expand_wind]
y_query_expand = np.zeros(len(X_test_expand)).astype(np.float)
y_query_expand.fill(np.NaN)
if origin_time != X[time_column_name].min():
# Set the context by including actuals up-to the origin time
test_context_expand_wind = X[time_column_name] < origin_time
context_expand_wind = X_test_expand[time_column_name] < origin_time
y_query_expand[context_expand_wind] = y[test_context_expand_wind]
# Print some debug info
print(
"Horizon_time:",
horizon_time,
" origin_time: ",
origin_time,
" max_horizon: ",
max_horizon,
" freq: ",
freq,
)
print("expand_wind: ", expand_wind)
print("y_query_expand")
print(y_query_expand)
print("X_test")
print(X)
print("X_test_expand")
print(X_test_expand)
print("Type of X_test_expand: ", type(X_test_expand))
print("Type of y_query_expand: ", type(y_query_expand))
print("y_query_expand")
print(y_query_expand)
# Make a forecast out to the maximum horizon
# y_fcst, X_trans = y_query_expand, X_test_expand
y_fcst, X_trans = fitted_model.forecast(X_test_expand, y_query_expand)
print("y_fcst")
print(y_fcst)
# Align forecast with test set for dates within
# the current rolling window
trans_tindex = X_trans.index.get_level_values(time_column_name)
trans_roll_wind = (trans_tindex >= origin_time) & (trans_tindex < horizon_time)
test_roll_wind = expand_wind & (X[time_column_name] >= origin_time)
df_list.append(
align_outputs(
y_fcst[trans_roll_wind],
X_trans[trans_roll_wind],
X[test_roll_wind],
y[test_roll_wind],
)
)
# Advance the origin time
origin_time = horizon_time
return pd.concat(df_list, ignore_index=True)
def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq="D"):
"""
Produce forecasts on a rolling origin over the given test set.
Each iteration makes a forecast for the next 'max_horizon' periods
with respect to the current origin, then advances the origin by the
horizon time duration. The prediction context for each forecast is set so
that the forecaster uses the actual target values prior to the current
origin time for constructing lag features.
This function returns a concatenated DataFrame of rolling forecasts.
"""
df_list = []
origin_time = X_test[time_column_name].min()
while origin_time <= X_test[time_column_name].max():
# Set the horizon time - end date of the forecast
horizon_time = origin_time + max_horizon * to_offset(freq)
# Extract test data from an expanding window up-to the horizon
expand_wind = X_test[time_column_name] < horizon_time
X_test_expand = X_test[expand_wind]
y_query_expand = np.zeros(len(X_test_expand)).astype(np.float)
y_query_expand.fill(np.NaN)
if origin_time != X_test[time_column_name].min():
# Set the context by including actuals up-to the origin time
test_context_expand_wind = X_test[time_column_name] < origin_time
context_expand_wind = X_test_expand[time_column_name] < origin_time
y_query_expand[context_expand_wind] = y_test[test_context_expand_wind]
# Print some debug info
print(
"Horizon_time:",
horizon_time,
" origin_time: ",
origin_time,
" max_horizon: ",
max_horizon,
" freq: ",
freq,
)
print("expand_wind: ", expand_wind)
print("y_query_expand")
print(y_query_expand)
print("X_test")
print(X_test)
print("X_test_expand")
print(X_test_expand)
print("Type of X_test_expand: ", type(X_test_expand))
print("Type of y_query_expand: ", type(y_query_expand))
print("y_query_expand")
print(y_query_expand)
# Make a forecast out to the maximum horizon
y_fcst, X_trans = fitted_model.forecast(X_test_expand, y_query_expand)
print("y_fcst")
print(y_fcst)
# Align forecast with test set for dates within the
# current rolling window
trans_tindex = X_trans.index.get_level_values(time_column_name)
trans_roll_wind = (trans_tindex >= origin_time) & (trans_tindex < horizon_time)
test_roll_wind = expand_wind & (X_test[time_column_name] >= origin_time)
df_list.append(
align_outputs(
y_fcst[trans_roll_wind],
X_trans[trans_roll_wind],
X_test[test_roll_wind],
y_test[test_roll_wind],
)
)
# Advance the origin time
origin_time = horizon_time
return pd.concat(df_list, ignore_index=True)
def APE(actual, pred):
"""
Calculate absolute percentage error.
Returns a vector of APE values with same length as actual/pred.
"""
return 100 * np.abs((actual - pred) / actual)
def MAPE(actual, pred):
"""
Calculate mean absolute percentage error.
Remove NA and values where actual is close to zero
"""
not_na = ~(np.isnan(actual) | np.isnan(pred))
not_zero = ~np.isclose(actual, 0.0)
actual_safe = actual[not_na & not_zero]
pred_safe = pred[not_na & not_zero]
return np.mean(APE(actual_safe, pred_safe))
def map_location_cuda(storage, loc):
return storage.cuda()
parser = argparse.ArgumentParser()
parser.add_argument(
"--max_horizon",
type=int,
dest="max_horizon",
default=10,
help="Max Horizon for forecasting",
)
parser.add_argument(
"--target_column_name",
type=str,
dest="target_column_name",
help="Target Column Name",
)
parser.add_argument(
"--time_column_name", type=str, dest="time_column_name", help="Time Column Name"
)
parser.add_argument(
"--frequency", type=str, dest="freq", help="Frequency of prediction"
)
parser.add_argument(
"--model_path",
type=str,
dest="model_path",
default="model.pkl",
help="Filename of model to be loaded",
)
args = parser.parse_args()
max_horizon = args.max_horizon
target_column_name = args.target_column_name
time_column_name = args.time_column_name
freq = args.freq
model_path = args.model_path
print("args passed are: ")
print(max_horizon)
print(target_column_name)
print(time_column_name)
print(freq)
print(model_path)
run = Run.get_context()
# get input dataset by name
test_dataset = run.input_datasets["test_data"]
lookback_dataset = run.input_datasets["lookback_data"]
grain_column_names = []
df = test_dataset.to_pandas_dataframe()
print("Read df")
print(df)
X_test_df = test_dataset.drop_columns(columns=[target_column_name])
y_test_df = test_dataset.with_timestamp_columns(None).keep_columns(
columns=[target_column_name]
)
X_lookback_df = lookback_dataset.drop_columns(columns=[target_column_name])
y_lookback_df = lookback_dataset.with_timestamp_columns(None).keep_columns(
columns=[target_column_name]
)
_, ext = os.path.splitext(model_path)
if ext == ".pt":
# Load the fc-tcn torch model.
assert _torch_present
if torch.cuda.is_available():
map_location = map_location_cuda
else:
map_location = "cpu"
with open(model_path, "rb") as fh:
fitted_model = torch.load(fh, map_location=map_location)
else:
# Load the sklearn pipeline.
fitted_model = joblib.load(model_path)
if hasattr(fitted_model, "get_lookback"):
lookback = fitted_model.get_lookback()
df_all = do_rolling_forecast_with_lookback(
fitted_model,
X_test_df.to_pandas_dataframe(),
y_test_df.to_pandas_dataframe().values.T[0],
max_horizon,
X_lookback_df.to_pandas_dataframe()[-lookback:],
y_lookback_df.to_pandas_dataframe().values.T[0][-lookback:],
freq,
)
else:
df_all = do_rolling_forecast(
fitted_model,
X_test_df.to_pandas_dataframe(),
y_test_df.to_pandas_dataframe().values.T[0],
max_horizon,
freq,
)
print(df_all)
print("target values:::")
print(df_all[target_column_name])
print("predicted values:::")
print(df_all["predicted"])
# Use the AutoML scoring module
regression_metrics = list(constants.REGRESSION_SCALAR_SET)
y_test = np.array(df_all[target_column_name])
y_pred = np.array(df_all["predicted"])
scores = scoring.score_regression(y_test, y_pred, regression_metrics)
print("scores:")
print(scores)
for key, value in scores.items():
run.log(key, value)
print("Simple forecasting model")
rmse = np.sqrt(mean_squared_error(df_all[target_column_name], df_all["predicted"]))
print("[Test Data] \nRoot Mean squared error: %.2f" % rmse)
mae = mean_absolute_error(df_all[target_column_name], df_all["predicted"])
print("mean_absolute_error score: %.2f" % mae)
print("MAPE: %.2f" % MAPE(df_all[target_column_name], df_all["predicted"]))
run.log("rmse", rmse)
run.log("mae", mae)

View File

@@ -0,0 +1,94 @@
---
page_type: sample
languages:
- python
products:
- azure-machine-learning
description: Tutorial showing how to solve a complex machine learning time series forecasting problems at scale by using Azure Automated ML and Hierarchical time series accelerator.
---
## Microsoft Solution Accelerator: Hierachical Time Series Forecasting
In most applications, customers have a need to understand their forecasts at a macro and micro level of the business. Whether that be predicting sales of products at different geographic locations, or understanding the expected workforce demand for different organizations at a company, the ability to train a machine learning model to intelligently forecast on hierarchy data is essential.
This business pattern is common across a wide variety of industries and applicable to many real world use cases. Below are some examples of where the hierarchical time series pattern is useful.
| Industry | Scenario |
|----------------|--------------------------------------------|
| *Restaurant Chain* | Building demand forecasting models across thousands of restaurants and several countries. |
| *Retail Organization* | Building workforce optimization models for thousands of stores. |
| *Retail Organization*| Price optimization models for hundreds of thousands of products available. |
### Technical Summary
A hierarchical time series is a structure in which each of the unique series are arranged into a hierarchy based on dimensions such as geography, or product type. The table below shows an example of data whose unique attributes form a hierarchy. Our hierarchy is defined by the `product type` such as headphones or tablets, the `product category` which splits product types into accessories and devices, and the `region` the products are sold in. The table below demonstrates the first input of each unique series in the hierarchy.
![data-table](./media/data-table.png)
To further visualize this, the leaf levels of the hierarchy contain all the time series with unique combinations of attribute values. Each higher level in the hierarchy will consider one less dimension for defining the time series and will aggregate each set of `child nodes` from the lower level into a `parent node`.
![hierachy-sample](./media/hierarchy-sample-ms.PNG)
> **Note:** If no unique root level exists in the data, Automated Machine Learning will create a node `automl_top_level` for users to train or forecasts totals.
## Prerequisites
To use this solution accelerator, all you need is access to an [Azure subscription](https://azure.microsoft.com/free/) and an [Azure Machine Learning Workspace](https://docs.microsoft.com/azure/machine-learning/how-to-manage-workspace) that you'll create below.
A basic understanding of Azure Machine Learning and hierarchical time series concepts will be helpful for understanding the solution. The following resources can help introduce you to these concepts:
1. [Azure Machine Learning Overview](https://azure.microsoft.com/services/machine-learning/)
2. [Azure Machine Learning Tutorials](https://docs.microsoft.com/azure/machine-learning/tutorial-1st-experiment-sdk-setup)
3. [Azure Machine Learning Sample Notebooks on Github](https://github.com/Azure/azureml-examples/)
4. [Forecasting: Principles and Practice, Hierarchical time series](https://otexts.com/fpp2/hts.html)
## Getting started
### 1. Set up the Compute Instance
Please create a [Compute Instance](https://docs.microsoft.com/en-us/azure/machine-learning/concept-compute-instance#create) and clone the git repo to your workspace.
### 2. Run the Notebook
Once your environment is set up, go to JupyterLab and run the notebook auto-ml-hierarchical-timeseries.ipynb on Compute Instance you created. It would run through the steps outlined sequentially. By the end, you'll know how to train, score, and make predictions using the hierarchical time series model pattern on Azure Machine Learning.
| Notebook | Description |
|----------------|--------------------------------------------|
| `auto-ml-forecasting-hierarchical-timeseries.ipynb`|Creates a pipeline to train machine learning models for the defined hierarchy and forecast at the desired hierarchy level using Automated ML. |
![Work Flow](./media/workflow.PNG)
## Key Concepts
### Automated Machine Learning
[Automated Machine Learning](https://docs.microsoft.com/azure/machine-learning/concept-automated-ml) also referred to as automated ML or AutoML, is the process of automating the time consuming, iterative tasks of machine learning model development. It allows data scientists, analysts, and developers to build ML models with high scale, efficiency, and productivity all while sustaining model quality.
### Pipelines
[Pipelines](https://docs.microsoft.com/azure/machine-learning/concept-ml-pipelines) allow you to create workflows in your machine learning projects. These workflows have a number of benefits including speed, simplicity, repeatability, and modularity.
### ParallelRunStep
[ParallelRunStep](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.parallel_run_step.parallelrunstep?view=azure-ml-py) enables the parallel training of models and is commonly used for batch inferencing. This [document](https://docs.microsoft.com/azure/machine-learning/how-to-use-parallel-run-step) walks through some of the key concepts around ParallelRunStep.
### Other Concepts
In additional to ParallelRunStep, Pipelines and Automated Machine Learning, you'll also be working with the following concepts including [workspace](https://docs.microsoft.com/azure/machine-learning/concept-workspace), [datasets](https://docs.microsoft.com/azure/machine-learning/concept-data#datasets), [compute targets](https://docs.microsoft.com/azure/machine-learning/concept-compute-target#train), [python script steps](https://docs.microsoft.com/python/api/azureml-pipeline-steps/azureml.pipeline.steps.python_script_step.pythonscriptstep?view=azure-ml-py), and [Azure Open Datasets](https://azure.microsoft.com/services/open-datasets/).
## Contributing
This project welcomes contributions and suggestions. To learn more visit the [contributing](CONTRIBUTING.md) section.
Most contributions require you to agree to a Contributor License Agreement (CLA)
declaring that you have the right to, and actually do, grant us
the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com.
When you submit a pull request, a CLA bot will automatically determine whether you need to provide
a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions
provided by the bot. You will only need to do this once across all repos using our CLA.
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.

View File

@@ -0,0 +1,639 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/forecasting-hierarchical-timeseries/auto-ml-forecasting-hierarchical-timeseries.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Hierarchical Time Series - Automated ML\n",
"**_Generate hierarchical time series forecasts with Automated Machine Learning_**\n",
"\n",
"---"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"For this notebook we are using a synthetic dataset portraying sales data to predict the the quantity of a vartiety of product skus across several states, stores, and product categories.\n",
"\n",
"**NOTE: There are limits on how many runs we can do in parallel per workspace, and we currently recommend to set the parallelism to maximum of 320 runs per experiment per workspace. If users want to have more parallelism and increase this limit they might encounter Too Many Requests errors (HTTP 429).**"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Prerequisites\n",
"You'll need to create a compute Instance by following the instructions in the [EnvironmentSetup.md](../Setup_Resources/EnvironmentSetup.md)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 1.0 Set up workspace, datastore, experiment"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613003526897
}
},
"outputs": [],
"source": [
"import azureml.core\n",
"from azureml.core import Workspace, Datastore\n",
"import pandas as pd\n",
"\n",
"# Set up your workspace\n",
"ws = Workspace.from_config()\n",
"ws.get_details()\n",
"\n",
"# Set up your datastores\n",
"dstore = ws.get_default_datastore()\n",
"\n",
"output = {}\n",
"output[\"SDK version\"] = azureml.core.VERSION\n",
"output[\"Subscription ID\"] = ws.subscription_id\n",
"output[\"Workspace\"] = ws.name\n",
"output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n",
"output[\"Default datastore name\"] = dstore.name\n",
"pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Choose an experiment"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613003540729
}
},
"outputs": [],
"source": [
"from azureml.core import Experiment\n",
"\n",
"experiment = Experiment(ws, \"automl-hts\")\n",
"\n",
"print(\"Experiment name: \" + experiment.name)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 2.0 Data\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"nteract": {
"transient": {
"deleting": false
}
}
},
"source": [
"### Upload local csv files to datastore\n",
"You can upload your train and inference csv files to the default datastore in your workspace. \n",
"\n",
"A Datastore is a place where data can be stored that is then made accessible to a compute either by means of mounting or copying the data to the compute target.\n",
"Please refer to [Datastore](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.datastore.datastore?view=azure-ml-py) documentation on how to access data from Datastore."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"datastore_path = \"hts-sample\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"datastore = ws.get_default_datastore()\n",
"datastore"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create the TabularDatasets \n",
"\n",
"Datasets in Azure Machine Learning are references to specific data in a Datastore. The data can be retrieved as a [TabularDatasets](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py). We will read in the data as a pandas DataFrame, upload to the data store and register them to your Workspace using ```register_pandas_dataframe``` so they can be called as an input into the training pipeline. We will use the inference dataset as part of the forecasting pipeline. The step need only be completed once."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613007017296
}
},
"outputs": [],
"source": [
"from azureml.data.dataset_factory import TabularDatasetFactory\n",
"\n",
"registered_train = TabularDatasetFactory.register_pandas_dataframe(\n",
" pd.read_csv(\"Data/hts-sample-train.csv\"),\n",
" target=(datastore, \"hts-sample\"),\n",
" name=\"hts-sales-train\",\n",
")\n",
"registered_inference = TabularDatasetFactory.register_pandas_dataframe(\n",
" pd.read_csv(\"Data/hts-sample-test.csv\"),\n",
" target=(datastore, \"hts-sample\"),\n",
" name=\"hts-sales-test\",\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 3.0 Build the training pipeline\n",
"Now that the dataset, WorkSpace, and datastore are set up, we can put together a pipeline for training.\n",
"\n",
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Choose a compute target\n",
"\n",
"You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
"\n",
"\\*\\*Creation of AmlCompute takes approximately 5 minutes.**\n",
"\n",
"If the AmlCompute with that name is already in your workspace this code will skip the creation process. As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this [article](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-manage-quotas) on the default limits and how to request more quota."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613007037308
}
},
"outputs": [],
"source": [
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
"\n",
"# Name your cluster\n",
"compute_name = \"hts-compute\"\n",
"\n",
"\n",
"if compute_name in ws.compute_targets:\n",
" compute_target = ws.compute_targets[compute_name]\n",
" if compute_target and type(compute_target) is AmlCompute:\n",
" print(\"Found compute target: \" + compute_name)\n",
"else:\n",
" print(\"Creating a new compute target...\")\n",
" provisioning_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"STANDARD_D16S_V3\", max_nodes=20\n",
" )\n",
" # Create the compute target\n",
" compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)\n",
"\n",
" # Can poll for a minimum number of nodes and for a specific timeout.\n",
" # If no min node count is provided it will use the scale settings for the cluster\n",
" compute_target.wait_for_completion(\n",
" show_output=True, min_node_count=None, timeout_in_minutes=20\n",
" )\n",
"\n",
" # For a more detailed view of current cluster status, use the 'status' property\n",
" print(compute_target.status.serialize())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Set up training parameters\n",
"\n",
"This dictionary defines the AutoML and hierarchy settings. For this forecasting task we need to define several settings inncluding the name of the time column, the maximum forecast horizon, the hierarchy definition, and the level of the hierarchy at which to train.\n",
"\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **task** | forecasting |\n",
"| **primary_metric** | This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i> |\n",
"| **blocked_models** | Blocked models won't be used by AutoML. |\n",
"| **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. |\n",
"| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n",
"| **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. |\n",
"| **label_column_name** | The name of the label column. |\n",
"| **forecast_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n",
"| **n_cross_validations** | Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way. |\n",
"| **enable_early_stopping** | Flag to enable early termination if the score is not improving in the short term. |\n",
"| **time_column_name** | The name of your time column. |\n",
"| **hierarchy_column_names** | The names of columns that define the hierarchical structure of the data from highest level to most granular. |\n",
"| **training_level** | The level of the hierarchy to be used for training models. |\n",
"| **enable_engineered_explanations** | Engineered feature explanations will be downloaded if enable_engineered_explanations flag is set to True. By default it is set to False to save storage space. |\n",
"| **time_series_id_column_name** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |\n",
"| **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). |\n",
"| **pipeline_fetch_max_batch_size** | Determines how many pipelines (training algorithms) to fetch at a time for training, this helps reduce throttling when training at large scale. |\n",
"| **model_explainability** | Flag to disable explaining the best automated ML model at the end of all training iterations. The default is True and will block non-explainable models which may impact the forecast accuracy. For more information, see [Interpretability: model explanations in automated machine learning](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-machine-learning-interpretability-automl). |"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613007061544
}
},
"outputs": [],
"source": [
"from azureml.train.automl.runtime._hts.hts_parameters import HTSTrainParameters\n",
"\n",
"model_explainability = True\n",
"\n",
"engineered_explanations = False\n",
"# Define your hierarchy. Adjust the settings below based on your dataset.\n",
"hierarchy = [\"state\", \"store_id\", \"product_category\", \"SKU\"]\n",
"training_level = \"SKU\"\n",
"\n",
"# Set your forecast parameters. Adjust the settings below based on your dataset.\n",
"time_column_name = \"date\"\n",
"label_column_name = \"quantity\"\n",
"forecast_horizon = 7\n",
"\n",
"\n",
"automl_settings = {\n",
" \"task\": \"forecasting\",\n",
" \"primary_metric\": \"normalized_root_mean_squared_error\",\n",
" \"label_column_name\": label_column_name,\n",
" \"time_column_name\": time_column_name,\n",
" \"forecast_horizon\": forecast_horizon,\n",
" \"hierarchy_column_names\": hierarchy,\n",
" \"hierarchy_training_level\": training_level,\n",
" \"track_child_runs\": False,\n",
" \"pipeline_fetch_max_batch_size\": 15,\n",
" \"model_explainability\": model_explainability,\n",
" # The following settings are specific to this sample and should be adjusted according to your own needs.\n",
" \"iteration_timeout_minutes\": 10,\n",
" \"iterations\": 10,\n",
" \"n_cross_validations\": 2,\n",
"}\n",
"\n",
"hts_parameters = HTSTrainParameters(\n",
" automl_settings=automl_settings,\n",
" hierarchy_column_names=hierarchy,\n",
" training_level=training_level,\n",
" enable_engineered_explanations=engineered_explanations,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Set up hierarchy training pipeline"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Parallel run step is leveraged to train the hierarchy. To configure the ParallelRunConfig you will need to determine the appropriate number of workers and nodes for your use case. The `process_count_per_node` is based off the number of cores of the compute VM. The node_count will determine the number of master nodes to use, increasing the node count will speed up the training process.\n",
"\n",
"* **experiment:** The experiment used for training.\n",
"* **train_data:** The tabular dataset to be used as input to the training run.\n",
"* **node_count:** The number of compute nodes to be used for running the user script. We recommend to start with 3 and increase the node_count if the training time is taking too long.\n",
"* **process_count_per_node:** Process count per node, we recommend 2:1 ratio for number of cores: number of processes per node. eg. If node has 16 cores then configure 8 or less process count per node or optimal performance.\n",
"* **train_pipeline_parameters:** The set of configuration parameters defined in the previous section. \n",
"\n",
"Calling this method will create a new aggregated dataset which is generated dynamically on pipeline execution."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder\n",
"\n",
"\n",
"training_pipeline_steps = AutoMLPipelineBuilder.get_many_models_train_steps(\n",
" experiment=experiment,\n",
" train_data=registered_train,\n",
" compute_target=compute_target,\n",
" node_count=2,\n",
" process_count_per_node=8,\n",
" train_pipeline_parameters=hts_parameters,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.core import Pipeline\n",
"\n",
"training_pipeline = Pipeline(ws, steps=training_pipeline_steps)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Submit the pipeline to run\n",
"Next we submit our pipeline to run. The whole training pipeline takes about 1h using a Standard_D16_V3 VM with our current ParallelRunConfig setting."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"training_run = experiment.submit(training_pipeline)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"training_run.wait_for_completion(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Check the run status, if training_run is in completed state, continue to forecasting. If training_run is in another state, check the portal for failures."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### [Optional] Get the explanations\n",
"First we need to download the explanations to the local disk."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"if model_explainability:\n",
" expl_output = training_run.get_pipeline_output(\"explanations\")\n",
" expl_output.download(\"training_explanations\")\n",
"else:\n",
" print(\n",
" \"Model explanations are available only if model_explainability is set to True.\"\n",
" )"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The explanations are downloaded to the \"training_explanations/azureml\" directory."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"if model_explainability:\n",
" explanations_dirrectory = os.listdir(\n",
" os.path.join(\"training_explanations\", \"azureml\")\n",
" )\n",
" if len(explanations_dirrectory) > 1:\n",
" print(\n",
" \"Warning! The directory contains multiple explanations, only the first one will be displayed.\"\n",
" )\n",
" print(\"The explanations are located at {}.\".format(explanations_dirrectory[0]))\n",
" # Now we will list all the explanations.\n",
" explanation_path = os.path.join(\n",
" \"training_explanations\",\n",
" \"azureml\",\n",
" explanations_dirrectory[0],\n",
" \"training_explanations\",\n",
" )\n",
" print(\"Available explanations\")\n",
" print(\"==============================\")\n",
" print(\"\\n\".join(os.listdir(explanation_path)))\n",
"else:\n",
" print(\n",
" \"Model explanations are available only if model_explainability is set to True.\"\n",
" )"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"View the explanations on \"state\" level."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from IPython.display import display\n",
"\n",
"explanation_type = \"raw\"\n",
"level = \"state\"\n",
"\n",
"if model_explainability:\n",
" display(\n",
" pd.read_csv(\n",
" os.path.join(explanation_path, \"{}_explanations_{}.csv\").format(\n",
" explanation_type, level\n",
" )\n",
" )\n",
" )"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 5.0 Forecasting\n",
"For hierarchical forecasting we need to provide the HTSInferenceParameters object.\n",
"#### HTSInferenceParameters arguments\n",
"* **hierarchy_forecast_level:** The default level of the hierarchy to produce prediction/forecast on.\n",
"* **allocation_method:** \\[Optional] The disaggregation method to use if the hierarchy forecast level specified is below the define hierarchy training level. <br><i>(average historical proportions) 'average_historical_proportions'</i><br><i>(proportions of the historical averages) 'proportions_of_historical_average'</i>\n",
"\n",
"#### get_many_models_batch_inference_steps arguments\n",
"* **experiment:** The experiment used for inference run.\n",
"* **inference_data:** The data to use for inferencing. It should be the same schema as used for training.\n",
"* **compute_target:** The compute target that runs the inference pipeline.\n",
"* **node_count:** The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku).\n",
"* **process_count_per_node:** The number of processes per node.\n",
"* **train_run_id:** \\[Optional] The run id of the hierarchy training, by default it is the latest successful training hts run in the experiment.\n",
"* **train_experiment_name:** \\[Optional] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline.\n",
"* **process_count_per_node:** \\[Optional] The number of processes per node, by default it's 4."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.train.automl.runtime._hts.hts_parameters import HTSInferenceParameters\n",
"\n",
"inference_parameters = HTSInferenceParameters(\n",
" hierarchy_forecast_level=\"store_id\", # The setting is specific to this dataset and should be changed based on your dataset.\n",
" allocation_method=\"proportions_of_historical_average\",\n",
")\n",
"\n",
"steps = AutoMLPipelineBuilder.get_many_models_batch_inference_steps(\n",
" experiment=experiment,\n",
" inference_data=registered_inference,\n",
" compute_target=compute_target,\n",
" inference_pipeline_parameters=inference_parameters,\n",
" node_count=2,\n",
" process_count_per_node=8,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.core import Pipeline\n",
"\n",
"inference_pipeline = Pipeline(ws, steps=steps)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"inference_run = experiment.submit(inference_pipeline)\n",
"inference_run.wait_for_completion(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Retrieve results\n",
"\n",
"Forecast results can be retrieved through the following code. The prediction results summary and the actual predictions are downloaded in forecast_results folder"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"forecasts = inference_run.get_pipeline_output(\"forecasts\")\n",
"forecasts.download(\"forecast_results\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Resbumit the Pipeline\n",
"\n",
"The inference pipeline can be submitted with different configurations."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"inference_run = experiment.submit(\n",
" inference_pipeline, pipeline_parameters={\"hierarchy_forecast_level\": \"state\"}\n",
")\n",
"inference_run.wait_for_completion(show_output=False)"
]
}
],
"metadata": {
"authors": [
{
"name": "jialiu"
}
],
"categories": [
"how-to-use-azureml",
"automated-machine-learning"
],
"kernelspec": {
"display_name": "Python 3.6 - AzureML",
"language": "python",
"name": "python3-azureml"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.8"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@@ -0,0 +1,4 @@
name: auto-ml-forecasting-hierarchical-timeseries
dependencies:
- pip:
- azureml-sdk

Binary file not shown.

After

Width:  |  Height:  |  Size: 65 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 165 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 31 KiB

View File

@@ -0,0 +1,3 @@
dependencies:
- pip:
- azureml-contrib-automl-pipeline-steps

View File

@@ -0,0 +1,122 @@
---
page_type: sample
languages:
- python
products:
- azure-machine-learning
description: Tutorial showing how to solve a complex machine learning time series forecasting problems at scale by using Azure Automated ML and Many Models solution accelerator.
---
![Many Models Solution Accelerator Banner](images/mmsa.png)
# Many Models Solution Accelerator
<!--
Guidelines on README format: https://review.docs.microsoft.com/help/onboard/admin/samples/concepts/readme-template?branch=master
Guidance on onboarding samples to docs.microsoft.com/samples: https://review.docs.microsoft.com/help/onboard/admin/samples/process/onboarding?branch=master
Taxonomies for products and languages: https://review.docs.microsoft.com/new-hope/information-architecture/metadata/taxonomies?branch=master
-->
In the real world, many problems can be too complex to be solved by a single machine learning model. Whether that be predicting sales for each individual store, building a predictive maintanence model for hundreds of oil wells, or tailoring an experience to individual users, building a model for each instance can lead to improved results on many machine learning problems.
This Pattern is very common across a wide variety of industries and applicable to many real world use cases. Below are some examples we have seen where this pattern is being used.
- Energy and utility companies building predictive maintenancemodelsforthousands of oil wells, hundreds of wind turbines or hundreds of smart meters
- Retail organizations building workforce optimization models for thousands of stores, campaign promotion propensity models, Price optimization models for hundreds of thousands of products they sell
- Restaurant chains buildingdemand forecasting models across thousands ofrestaurants
- Banks and financial institutes building models for cash replenishmentfor ATM Machine and for several ATMsor building personalized models for individuals
- Enterprises building revenue forecasting modelsat each division level
- Document management companies building text analytics and legal document search models per each state
Azure Machine Learning (AML) makes it easy to train, operate, and manage hundreds or even thousands of models. This repo will walk you through the end to end process of creating a many models solution from training to scoring to monitoring.
## Prerequisites
To use this solution accelerator, all you need is access to an [Azure subscription](https://azure.microsoft.com/free/) and an [Azure Machine Learning Workspace](https://docs.microsoft.com/azure/machine-learning/how-to-manage-workspace) that you'll create below.
While it's not required, a basic understanding of Azure Machine Learning will be helpful for understanding the solution. The following resources can help introduce you to AML:
1. [Azure Machine Learning Overview](https://azure.microsoft.com/services/machine-learning/)
2. [Azure Machine Learning Tutorials](https://docs.microsoft.com/azure/machine-learning/tutorial-1st-experiment-sdk-setup)
3. [Azure Machine Learning Sample Notebooks on Github](https://github.com/Azure/azureml-examples)
## Getting started
### 1. Deploy Resources
Start by deploying the resources to Azure. The button below will deploy Azure Machine Learning and its related resources:
<a href="https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2Fmicrosoft%2Fsolution-accelerator-many-models%2Fmaster%2Fazuredeploy.json" target="_blank">
<img src="http://azuredeploy.net/deploybutton.png"/>
</a>
### 2. Configure Development Environment
Next you'll need to configure your [development environment](https://docs.microsoft.com/azure/machine-learning/how-to-configure-environment) for Azure Machine Learning. We recommend using a [Compute Instance](https://docs.microsoft.com/azure/machine-learning/how-to-configure-environment#compute-instance) as it's the fastest way to get up and running.
### 3. Run Notebooks
Once your development environment is set up, run through the Jupyter Notebooks sequentially following the steps outlined. By the end, you'll know how to train, score, and make predictions using the many models pattern on Azure Machine Learning.
![Sequence of Notebooks](./images/mmsa-overview.png)
## Contents
In this repo, you'll train and score a forecasting model for each orange juice brand and for each store at a (simulated) grocery chain. By the end, you'll have forecasted sales by using up to 11,973 models to predict sales for the next few weeks.
The data used in this sample is simulated based on the [Dominick's Orange Juice Dataset](http://www.cs.unitn.it/~taufer/QMMA/L10-OJ-Data.html#(1)), sales data from a Chicago area grocery store.
<img src="images/Flow_map.png" width="1000">
### Using Automated ML to train the models:
The [`auto-ml-forecasting-many-models.ipynb`](./auto-ml-forecasting-many-models.ipynb) noteboook is a guided solution accelerator that demonstrates steps from data preparation, to model training, and forecasting on train models as well as operationalizing the solution.
## How-to-videos
Watch these how-to-videos for a step by step walk-through of the many model solution accelerator to learn how to setup your models using Automated ML.
### Automated ML
[![Watch the video](https://media.giphy.com/media/dWUKfameudyNGRnp1t/giphy.gif)](https://channel9.msdn.com/Shows/Docs-AI/Building-Large-Scale-Machine-Learning-Forecasting-Models-using-Azure-Machine-Learnings-Automated-ML)
## Key concepts
### ParallelRunStep
[ParallelRunStep](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.parallel_run_step.parallelrunstep?view=azure-ml-py) enables the parallel training of models and is commonly used for batch inferencing. This [document](https://docs.microsoft.com/azure/machine-learning/how-to-use-parallel-run-step) walks through some of the key concepts around ParallelRunStep.
### Pipelines
[Pipelines](https://docs.microsoft.com/azure/machine-learning/concept-ml-pipelines) allow you to create workflows in your machine learning projects. These workflows have a number of benefits including speed, simplicity, repeatability, and modularity.
### Automated Machine Learning
[Automated Machine Learning](https://docs.microsoft.com/azure/machine-learning/concept-automated-ml) also referred to as automated ML or AutoML, is the process of automating the time consuming, iterative tasks of machine learning model development. It allows data scientists, analysts, and developers to build ML models with high scale, efficiency, and productivity all while sustaining model quality.
### Other Concepts
In additional to ParallelRunStep, Pipelines and Automated Machine Learning, you'll also be working with the following concepts including [workspace](https://docs.microsoft.com/azure/machine-learning/concept-workspace), [datasets](https://docs.microsoft.com/azure/machine-learning/concept-data#datasets), [compute targets](https://docs.microsoft.com/azure/machine-learning/concept-compute-target#train), [python script steps](https://docs.microsoft.com/python/api/azureml-pipeline-steps/azureml.pipeline.steps.python_script_step.pythonscriptstep?view=azure-ml-py), and [Azure Open Datasets](https://azure.microsoft.com/services/open-datasets/).
## Contributing
This project welcomes contributions and suggestions. To learn more visit the [contributing](../../../CONTRIBUTING.md) section.
Most contributions require you to agree to a Contributor License Agreement (CLA)
declaring that you have the right to, and actually do, grant us
the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com.
When you submit a pull request, a CLA bot will automatically determine whether you need to provide
a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions
provided by the bot. You will only need to do this once across all repos using our CLA.
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.

View File

@@ -0,0 +1,746 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/forecasting-hierarchical-timeseries/auto-ml-forecasting-hierarchical-timeseries.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Many Models - Automated ML\n",
"**_Generate many models time series forecasts with Automated Machine Learning_**\n",
"\n",
"---"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"For this notebook we are using a synthetic dataset portraying sales data to predict the the quantity of a vartiety of product skus across several states, stores, and product categories.\n",
"\n",
"**NOTE: There are limits on how many runs we can do in parallel per workspace, and we currently recommend to set the parallelism to maximum of 320 runs per experiment per workspace. If users want to have more parallelism and increase this limit they might encounter Too Many Requests errors (HTTP 429).**"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Prerequisites\n",
"You'll need to create a compute Instance by following the instructions in the [EnvironmentSetup.md](../Setup_Resources/EnvironmentSetup.md)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 1.0 Set up workspace, datastore, experiment"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613003526897
}
},
"outputs": [],
"source": [
"import azureml.core\n",
"from azureml.core import Workspace, Datastore\n",
"import pandas as pd\n",
"\n",
"# Set up your workspace\n",
"ws = Workspace.from_config()\n",
"ws.get_details()\n",
"\n",
"# Set up your datastores\n",
"dstore = ws.get_default_datastore()\n",
"\n",
"output = {}\n",
"output[\"SDK version\"] = azureml.core.VERSION\n",
"output[\"Subscription ID\"] = ws.subscription_id\n",
"output[\"Workspace\"] = ws.name\n",
"output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n",
"output[\"Default datastore name\"] = dstore.name\n",
"pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Choose an experiment"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613003540729
}
},
"outputs": [],
"source": [
"from azureml.core import Experiment\n",
"\n",
"experiment = Experiment(ws, \"automl-many-models\")\n",
"\n",
"print(\"Experiment name: \" + experiment.name)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 2.0 Data\n",
"\n",
"This notebook uses simulated orange juice sales data to walk you through the process of training many models on Azure Machine Learning using Automated ML. \n",
"\n",
"The time series data used in this example was simulated based on the University of Chicago's Dominick's Finer Foods dataset which featured two years of sales of 3 different orange juice brands for individual stores. The full simulated dataset includes 3,991 stores with 3 orange juice brands each thus allowing 11,973 models to be trained to showcase the power of the many models pattern.\n",
"\n",
" \n",
"In this notebook, two datasets will be created: one with all 11,973 files and one with only 10 files that can be used to quickly test and debug. For each dataset, you'll be walked through the process of:\n",
"\n",
"1. Registering the blob container as a Datastore to the Workspace\n",
"2. Registering a tabular dataset to the Workspace"
]
},
{
"cell_type": "markdown",
"metadata": {
"nteract": {
"transient": {
"deleting": false
}
}
},
"source": [
"### 2.1 Data Preparation\n",
"The OJ data is available in the public blob container. The data is split to be used for training and for inferencing. For the current dataset, the data was split on time column ('WeekStarting') before and after '1992-5-28' .\n",
"\n",
"The container has\n",
"<ol>\n",
" <li><b>'oj-data-tabular'</b> and <b>'oj-inference-tabular'</b> folders that contains training and inference data respectively for the 11,973 models. </li>\n",
" <li>It also has <b>'oj-data-small-tabular'</b> and <b>'oj-inference-small-tabular'</b> folders that has training and inference data for 10 models.</li>\n",
"</ol>\n",
"\n",
"To create the [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py) needed for the ParallelRunStep, you first need to register the blob container to the workspace."
]
},
{
"cell_type": "markdown",
"metadata": {
"nteract": {
"transient": {
"deleting": false
}
}
},
"source": [
"<b> To use your own data, put your own data in a blobstore folder. As shown it can be one file or multiple files. We can then register datastore using that blob as shown below.\n",
" \n",
"<h3> How sample data in blob store looks like</h3>\n",
"\n",
"['oj-data-tabular'](https://ms.portal.azure.com/#blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)</b>\n",
"![image-4.png](mm-1.png)\n",
"\n",
"['oj-inference-tabular'](https://ms.portal.azure.com/#blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)\n",
"![image-3.png](mm-2.png)\n",
"\n",
"['oj-data-small-tabular'](https://ms.portal.azure.com/#blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)\n",
"\n",
"![image-5.png](mm-3.png)\n",
"\n",
"['oj-inference-small-tabular'](https://ms.portal.azure.com/#blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)\n",
"![image-6.png](mm-4.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### 2.2 Register the blob container as DataStore\n",
"\n",
"A Datastore is a place where data can be stored that is then made accessible to a compute either by means of mounting or copying the data to the compute target.\n",
"\n",
"Please refer to [Datastore](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.datastore(class)?view=azure-ml-py) documentation on how to access data from Datastore.\n",
"\n",
"In this next step, we will be registering blob storage as datastore to the Workspace."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Datastore\n",
"\n",
"# Please change the following to point to your own blob container and pass in account_key\n",
"blob_datastore_name = \"automl_many_models\"\n",
"container_name = \"automl-sample-notebook-data\"\n",
"account_name = \"automlsamplenotebookdata\"\n",
"\n",
"oj_datastore = Datastore.register_azure_blob_container(\n",
" workspace=ws,\n",
" datastore_name=blob_datastore_name,\n",
" container_name=container_name,\n",
" account_name=account_name,\n",
" create_if_not_exists=True,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### 2.3 Using tabular datasets \n",
"\n",
"Now that the datastore is available from the Workspace, [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py) can be created. Datasets in Azure Machine Learning are references to specific data in a Datastore. We are using TabularDataset, so that users who have their data which can be in one or many files (*.parquet or *.csv) and have not split up data according to group columns needed for training, can do so using out of box support for 'partiion_by' feature of TabularDataset shown in section 5.0 below."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613007017296
}
},
"outputs": [],
"source": [
"from azureml.core import Dataset\n",
"\n",
"ds_name_small = \"oj-data-small-tabular\"\n",
"input_ds_small = Dataset.Tabular.from_delimited_files(\n",
" path=oj_datastore.path(ds_name_small + \"/\"), validate=False\n",
")\n",
"\n",
"inference_name_small = \"oj-inference-small-tabular\"\n",
"inference_ds_small = Dataset.Tabular.from_delimited_files(\n",
" path=oj_datastore.path(inference_name_small + \"/\"), validate=False\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 3.0 Build the training pipeline\n",
"Now that the dataset, WorkSpace, and datastore are set up, we can put together a pipeline for training.\n",
"\n",
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Choose a compute target\n",
"\n",
"You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
"\n",
"\\*\\*Creation of AmlCompute takes approximately 5 minutes.**\n",
"\n",
"If the AmlCompute with that name is already in your workspace this code will skip the creation process. As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this [article](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-manage-quotas) on the default limits and how to request more quota."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613007037308
}
},
"outputs": [],
"source": [
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
"\n",
"# Name your cluster\n",
"compute_name = \"mm-compute\"\n",
"\n",
"\n",
"if compute_name in ws.compute_targets:\n",
" compute_target = ws.compute_targets[compute_name]\n",
" if compute_target and type(compute_target) is AmlCompute:\n",
" print(\"Found compute target: \" + compute_name)\n",
"else:\n",
" print(\"Creating a new compute target...\")\n",
" provisioning_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"STANDARD_D16S_V3\", max_nodes=20\n",
" )\n",
" # Create the compute target\n",
" compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)\n",
"\n",
" # Can poll for a minimum number of nodes and for a specific timeout.\n",
" # If no min node count is provided it will use the scale settings for the cluster\n",
" compute_target.wait_for_completion(\n",
" show_output=True, min_node_count=None, timeout_in_minutes=20\n",
" )\n",
"\n",
" # For a more detailed view of current cluster status, use the 'status' property\n",
" print(compute_target.status.serialize())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Set up training parameters\n",
"\n",
"This dictionary defines the AutoML and many models settings. For this forecasting task we need to define several settings inncluding the name of the time column, the maximum forecast horizon, and the partition column name definition.\n",
"\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **task** | forecasting |\n",
"| **primary_metric** | This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i> |\n",
"| **blocked_models** | Blocked models won't be used by AutoML. |\n",
"| **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. |\n",
"| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n",
"| **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. |\n",
"| **label_column_name** | The name of the label column. |\n",
"| **forecast_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n",
"| **n_cross_validations** | Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way. |\n",
"| **enable_early_stopping** | Flag to enable early termination if the score is not improving in the short term. |\n",
"| **time_column_name** | The name of your time column. |\n",
"| **enable_engineered_explanations** | Engineered feature explanations will be downloaded if enable_engineered_explanations flag is set to True. By default it is set to False to save storage space. |\n",
"| **time_series_id_column_name** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |\n",
"| **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). |\n",
"| **pipeline_fetch_max_batch_size** | Determines how many pipelines (training algorithms) to fetch at a time for training, this helps reduce throttling when training at large scale. |\n",
"| **partition_column_names** | The names of columns used to group your models. For timeseries, the groups must not split up individual time-series. That is, each group must contain one or more whole time-series. |"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613007061544
}
},
"outputs": [],
"source": [
"from azureml.train.automl.runtime._many_models.many_models_parameters import (\n",
" ManyModelsTrainParameters,\n",
")\n",
"\n",
"partition_column_names = [\"Store\", \"Brand\"]\n",
"automl_settings = {\n",
" \"task\": \"forecasting\",\n",
" \"primary_metric\": \"normalized_root_mean_squared_error\",\n",
" \"iteration_timeout_minutes\": 10, # This needs to be changed based on the dataset. We ask customer to explore how long training is taking before settings this value\n",
" \"iterations\": 15,\n",
" \"experiment_timeout_hours\": 0.25,\n",
" \"label_column_name\": \"Quantity\",\n",
" \"n_cross_validations\": 3,\n",
" \"time_column_name\": \"WeekStarting\",\n",
" \"drop_column_names\": \"Revenue\",\n",
" \"max_horizon\": 6,\n",
" \"grain_column_names\": partition_column_names,\n",
" \"track_child_runs\": False,\n",
"}\n",
"\n",
"mm_paramters = ManyModelsTrainParameters(\n",
" automl_settings=automl_settings, partition_column_names=partition_column_names\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Set up many models pipeline"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Parallel run step is leveraged to train multiple models at once. To configure the ParallelRunConfig you will need to determine the appropriate number of workers and nodes for your use case. The process_count_per_node is based off the number of cores of the compute VM. The node_count will determine the number of master nodes to use, increasing the node count will speed up the training process.\n",
"\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **experiment** | The experiment used for training. |\n",
"| **train_data** | The file dataset to be used as input to the training run. |\n",
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with 3 and increase the node_count if the training time is taking too long. |\n",
"| **process_count_per_node** | Process count per node, we recommend 2:1 ratio for number of cores: number of processes per node. eg. If node has 16 cores then configure 8 or less process count per node or optimal performance. |\n",
"| **train_pipeline_parameters** | The set of configuration parameters defined in the previous section. |\n",
"\n",
"Calling this method will create a new aggregated dataset which is generated dynamically on pipeline execution."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder\n",
"\n",
"\n",
"training_pipeline_steps = AutoMLPipelineBuilder.get_many_models_train_steps(\n",
" experiment=experiment,\n",
" train_data=input_ds_small,\n",
" compute_target=compute_target,\n",
" node_count=2,\n",
" process_count_per_node=8,\n",
" run_invocation_timeout=920,\n",
" train_pipeline_parameters=mm_paramters,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.core import Pipeline\n",
"\n",
"training_pipeline = Pipeline(ws, steps=training_pipeline_steps)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Submit the pipeline to run\n",
"Next we submit our pipeline to run. The whole training pipeline takes about 40m using a STANDARD_D16S_V3 VM with our current ParallelRunConfig setting."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"training_run = experiment.submit(training_pipeline)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"training_run.wait_for_completion(show_output=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Check the run status, if training_run is in completed state, continue to forecasting. If training_run is in another state, check the portal for failures."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 5.0 Publish and schedule the train pipeline (Optional)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 5.1 Publish the pipeline\n",
"\n",
"Once you have a pipeline you're happy with, you can publish a pipeline so you can call it programmatically later on. See this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-your-first-pipeline#publish-a-pipeline) for additional information on publishing and calling pipelines."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# published_pipeline = training_pipeline.publish(name = 'automl_train_many_models',\n",
"# description = 'train many models',\n",
"# version = '1',\n",
"# continue_on_step_failure = False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 7.2 Schedule the pipeline\n",
"You can also [schedule the pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-schedule-pipelines) to run on a time-based or change-based schedule. This could be used to automatically retrain models every month or based on another trigger such as data drift."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# from azureml.pipeline.core import Schedule, ScheduleRecurrence\n",
"\n",
"# training_pipeline_id = published_pipeline.id\n",
"\n",
"# recurrence = ScheduleRecurrence(frequency=\"Month\", interval=1, start_time=\"2020-01-01T09:00:00\")\n",
"# recurring_schedule = Schedule.create(ws, name=\"automl_training_recurring_schedule\",\n",
"# description=\"Schedule Training Pipeline to run on the first day of every month\",\n",
"# pipeline_id=training_pipeline_id,\n",
"# experiment_name=experiment.name,\n",
"# recurrence=recurrence)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 6.0 Forecasting"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Set up output dataset for inference data\n",
"Output of inference can be represented as [OutputFileDatasetConfig](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.output_dataset_config.outputdatasetconfig?view=azure-ml-py) object and OutputFileDatasetConfig can be registered as a dataset. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.data import OutputFileDatasetConfig\n",
"\n",
"output_inference_data_ds = OutputFileDatasetConfig(\n",
" name=\"many_models_inference_output\", destination=(dstore, \"oj/inference_data/\")\n",
").register_on_complete(name=\"oj_inference_data_ds\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"For many models we need to provide the ManyModelsInferenceParameters object.\n",
"\n",
"#### ManyModelsInferenceParameters arguments\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **partition_column_names** | List of column names that identifies groups. |\n",
"| **target_column_name** | \\[Optional] Column name only if the inference dataset has the target. |\n",
"| **time_column_name** | \\[Optional] Column name only if it is timeseries. |\n",
"| **many_models_run_id** | \\[Optional] Many models run id where models were trained. |\n",
"\n",
"#### get_many_models_batch_inference_steps arguments\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **experiment** | The experiment used for inference run. |\n",
"| **inference_data** | The data to use for inferencing. It should be the same schema as used for training.\n",
"| **compute_target** The compute target that runs the inference pipeline.|\n",
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku). |\n",
"| **process_count_per_node** The number of processes per node.\n",
"| **train_run_id** | \\[Optional] The run id of the hierarchy training, by default it is the latest successful training many model run in the experiment. |\n",
"| **train_experiment_name** | \\[Optional] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline. |\n",
"| **process_count_per_node** | \\[Optional] The number of processes per node, by default it's 4. |"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder\n",
"from azureml.train.automl.runtime._many_models.many_models_parameters import (\n",
" ManyModelsInferenceParameters,\n",
")\n",
"\n",
"mm_parameters = ManyModelsInferenceParameters(\n",
" partition_column_names=[\"Store\", \"Brand\"],\n",
" time_column_name=\"WeekStarting\",\n",
" target_column_name=\"Quantity\",\n",
")\n",
"\n",
"inference_steps = AutoMLPipelineBuilder.get_many_models_batch_inference_steps(\n",
" experiment=experiment,\n",
" inference_data=inference_ds_small,\n",
" node_count=2,\n",
" process_count_per_node=8,\n",
" compute_target=compute_target,\n",
" run_invocation_timeout=300,\n",
" output_datastore=output_inference_data_ds,\n",
" train_run_id=training_run.id,\n",
" train_experiment_name=training_run.experiment.name,\n",
" inference_pipeline_parameters=mm_parameters,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.core import Pipeline\n",
"\n",
"inference_pipeline = Pipeline(ws, steps=inference_steps)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"inference_run = experiment.submit(inference_pipeline)\n",
"inference_run.wait_for_completion(show_output=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Retrieve results\n",
"\n",
"The forecasting pipeline forecasts the orange juice quantity for a Store by Brand. The pipeline returns one file with the predictions for each store and outputs the result to the forecasting_output Blob container. The details of the blob container is listed in 'forecasting_output.txt' under Outputs+logs. \n",
"\n",
"The following code snippet:\n",
"1. Downloads the contents of the output folder that is passed in the parallel run step \n",
"2. Reads the parallel_run_step.txt file that has the predictions as pandas dataframe and \n",
"3. Displays the top 10 rows of the predictions"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.contrib.automl.pipeline.steps.utilities import get_output_from_mm_pipeline\n",
"\n",
"forecasting_results_name = \"forecasting_results\"\n",
"forecasting_output_name = \"many_models_inference_output\"\n",
"forecast_file = get_output_from_mm_pipeline(\n",
" inference_run, forecasting_results_name, forecasting_output_name\n",
")\n",
"df = pd.read_csv(forecast_file, delimiter=\" \", header=None)\n",
"df.columns = [\n",
" \"Week Starting\",\n",
" \"Store\",\n",
" \"Brand\",\n",
" \"Quantity\",\n",
" \"Advert\",\n",
" \"Price\",\n",
" \"Revenue\",\n",
" \"Predicted\",\n",
"]\n",
"print(\n",
" \"Prediction has \", df.shape[0], \" rows. Here the first 10 rows are being displayed.\"\n",
")\n",
"df.head(10)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 7.0 Publish and schedule the inference pipeline (Optional)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 7.1 Publish the pipeline\n",
"\n",
"Once you have a pipeline you're happy with, you can publish a pipeline so you can call it programmatically later on. See this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-your-first-pipeline#publish-a-pipeline) for additional information on publishing and calling pipelines."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# published_pipeline_inf = inference_pipeline.publish(name = 'automl_forecast_many_models',\n",
"# description = 'forecast many models',\n",
"# version = '1',\n",
"# continue_on_step_failure = False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 7.2 Schedule the pipeline\n",
"You can also [schedule the pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-schedule-pipelines) to run on a time-based or change-based schedule. This could be used to automatically retrain or forecast models every month or based on another trigger such as data drift."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# from azureml.pipeline.core import Schedule, ScheduleRecurrence\n",
"\n",
"# forecasting_pipeline_id = published_pipeline.id\n",
"\n",
"# recurrence = ScheduleRecurrence(frequency=\"Month\", interval=1, start_time=\"2020-01-01T09:00:00\")\n",
"# recurring_schedule = Schedule.create(ws, name=\"automl_forecasting_recurring_schedule\",\n",
"# description=\"Schedule Forecasting Pipeline to run on the first day of every week\",\n",
"# pipeline_id=forecasting_pipeline_id,\n",
"# experiment_name=experiment.name,\n",
"# recurrence=recurrence)"
]
}
],
"metadata": {
"authors": [
{
"name": "jialiu"
}
],
"categories": [
"how-to-use-azureml",
"automated-machine-learning"
],
"kernelspec": {
"display_name": "Python 3.6 - AzureML",
"language": "python",
"name": "python3-azureml"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.8"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@@ -0,0 +1,4 @@
name: auto-ml-forecasting-many-models
dependencies:
- pip:
- azureml-sdk

Binary file not shown.

After

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 306 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.6 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 106 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 158 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 80 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 68 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 631 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 176 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 165 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 162 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 166 KiB

View File

@@ -0,0 +1,3 @@
dependencies:
- pip:
- azureml-contrib-automl-pipeline-steps

View File

@@ -58,21 +58,22 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"import azureml.core\n", "import json\n",
"import pandas as pd\n",
"import logging\n", "import logging\n",
"\n", "\n",
"from azureml.core.workspace import Workspace\n", "import azureml.core\n",
"import pandas as pd\n",
"from azureml.automl.core.featurization import FeaturizationConfig\n",
"from azureml.core.experiment import Experiment\n", "from azureml.core.experiment import Experiment\n",
"from azureml.train.automl import AutoMLConfig\n", "from azureml.core.workspace import Workspace\n",
"from azureml.automl.core.featurization import FeaturizationConfig" "from azureml.train.automl import AutoMLConfig"
] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK." "This notebook is compatible with Azure ML SDK version 1.35.0 or later."
] ]
}, },
{ {
@@ -81,7 +82,6 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"print(\"This notebook was created using version 1.34.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
] ]
}, },
@@ -101,19 +101,19 @@
"ws = Workspace.from_config()\n", "ws = Workspace.from_config()\n",
"\n", "\n",
"# choose a name for the run history container in the workspace\n", "# choose a name for the run history container in the workspace\n",
"experiment_name = 'automl-ojforecasting'\n", "experiment_name = \"automl-ojforecasting\"\n",
"\n", "\n",
"experiment = Experiment(ws, experiment_name)\n", "experiment = Experiment(ws, experiment_name)\n",
"\n", "\n",
"output = {}\n", "output = {}\n",
"output['Subscription ID'] = ws.subscription_id\n", "output[\"Subscription ID\"] = ws.subscription_id\n",
"output['Workspace'] = ws.name\n", "output[\"Workspace\"] = ws.name\n",
"output['SKU'] = ws.sku\n", "output[\"SKU\"] = ws.sku\n",
"output['Resource Group'] = ws.resource_group\n", "output[\"Resource Group\"] = ws.resource_group\n",
"output['Location'] = ws.location\n", "output[\"Location\"] = ws.location\n",
"output['Run History Name'] = experiment_name\n", "output[\"Run History Name\"] = experiment_name\n",
"pd.set_option('display.max_colwidth', -1)\n", "pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data = output, index = [''])\n", "outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T" "outputDf.T"
] ]
}, },
@@ -146,10 +146,11 @@
"# Verify that cluster does not exist already\n", "# Verify that cluster does not exist already\n",
"try:\n", "try:\n",
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n", " compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
" print('Found existing cluster, use it.')\n", " print(\"Found existing cluster, use it.\")\n",
"except ComputeTargetException:\n", "except ComputeTargetException:\n",
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D12_V2',\n", " compute_config = AmlCompute.provisioning_configuration(\n",
" max_nodes=6)\n", " vm_size=\"STANDARD_D12_V2\", max_nodes=6\n",
" )\n",
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n", " compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
"\n", "\n",
"compute_target.wait_for_completion(show_output=True)" "compute_target.wait_for_completion(show_output=True)"
@@ -169,11 +170,11 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"time_column_name = 'WeekStarting'\n", "time_column_name = \"WeekStarting\"\n",
"data = pd.read_csv(\"dominicks_OJ.csv\", parse_dates=[time_column_name])\n", "data = pd.read_csv(\"dominicks_OJ.csv\", parse_dates=[time_column_name])\n",
"\n", "\n",
"# Drop the columns 'logQuantity' as it is a leaky feature.\n", "# Drop the columns 'logQuantity' as it is a leaky feature.\n",
"data.drop('logQuantity', axis=1, inplace=True)\n", "data.drop(\"logQuantity\", axis=1, inplace=True)\n",
"\n", "\n",
"data.head()" "data.head()"
] ]
@@ -193,9 +194,9 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"time_series_id_column_names = ['Store', 'Brand']\n", "time_series_id_column_names = [\"Store\", \"Brand\"]\n",
"nseries = data.groupby(time_series_id_column_names).ngroups\n", "nseries = data.groupby(time_series_id_column_names).ngroups\n",
"print('Data contains {0} individual time-series.'.format(nseries))" "print(\"Data contains {0} individual time-series.\".format(nseries))"
] ]
}, },
{ {
@@ -214,7 +215,7 @@
"use_stores = [2, 5, 8]\n", "use_stores = [2, 5, 8]\n",
"data_subset = data[data.Store.isin(use_stores)]\n", "data_subset = data[data.Store.isin(use_stores)]\n",
"nseries = data_subset.groupby(time_series_id_column_names).ngroups\n", "nseries = data_subset.groupby(time_series_id_column_names).ngroups\n",
"print('Data subset contains {0} individual time-series.'.format(nseries))" "print(\"Data subset contains {0} individual time-series.\".format(nseries))"
] ]
}, },
{ {
@@ -233,14 +234,17 @@
"source": [ "source": [
"n_test_periods = 20\n", "n_test_periods = 20\n",
"\n", "\n",
"\n",
"def split_last_n_by_series_id(df, n):\n", "def split_last_n_by_series_id(df, n):\n",
" \"\"\"Group df by series identifiers and split on last n rows for each group.\"\"\"\n", " \"\"\"Group df by series identifiers and split on last n rows for each group.\"\"\"\n",
" df_grouped = (df.sort_values(time_column_name) # Sort by ascending time\n", " df_grouped = df.sort_values(time_column_name).groupby( # Sort by ascending time\n",
" .groupby(time_series_id_column_names, group_keys=False))\n", " time_series_id_column_names, group_keys=False\n",
" )\n",
" df_head = df_grouped.apply(lambda dfg: dfg.iloc[:-n])\n", " df_head = df_grouped.apply(lambda dfg: dfg.iloc[:-n])\n",
" df_tail = df_grouped.apply(lambda dfg: dfg.iloc[-n:])\n", " df_tail = df_grouped.apply(lambda dfg: dfg.iloc[-n:])\n",
" return df_head, df_tail\n", " return df_head, df_tail\n",
"\n", "\n",
"\n",
"train, test = split_last_n_by_series_id(data_subset, n_test_periods)" "train, test = split_last_n_by_series_id(data_subset, n_test_periods)"
] ]
}, },
@@ -258,18 +262,15 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"train.to_csv (r'./dominicks_OJ_train.csv', index = None, header=True)\n", "from azureml.data.dataset_factory import TabularDatasetFactory\n",
"test.to_csv (r'./dominicks_OJ_test.csv', index = None, header=True)" "\n",
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"datastore = ws.get_default_datastore()\n", "datastore = ws.get_default_datastore()\n",
"datastore.upload_files(files = ['./dominicks_OJ_train.csv', './dominicks_OJ_test.csv'], target_path = 'dataset/', overwrite = True,show_progress = True)" "train_dataset = TabularDatasetFactory.register_pandas_dataframe(\n",
" train, target=(datastore, \"dataset/\"), name=\"dominicks_OJ_train\"\n",
")\n",
"test_dataset = TabularDatasetFactory.register_pandas_dataframe(\n",
" test, target=(datastore, \"dataset/\"), name=\"dominicks_OJ_test\"\n",
")"
] ]
}, },
{ {
@@ -279,17 +280,6 @@
"### Create dataset for training" "### Create dataset for training"
] ]
}, },
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.dataset import Dataset\n",
"train_dataset = Dataset.Tabular.from_delimited_files(path=datastore.path('dataset/dominicks_OJ_train.csv'))\n",
"test_dataset = Dataset.Tabular.from_delimited_files(path=datastore.path('dataset/dominicks_OJ_test.csv'))"
]
},
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
@@ -323,7 +313,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"target_column_name = 'Quantity'" "target_column_name = \"Quantity\""
] ]
}, },
{ {
@@ -351,13 +341,17 @@
"source": [ "source": [
"featurization_config = FeaturizationConfig()\n", "featurization_config = FeaturizationConfig()\n",
"# Force the CPWVOL5 feature to be numeric type.\n", "# Force the CPWVOL5 feature to be numeric type.\n",
"featurization_config.add_column_purpose('CPWVOL5', 'Numeric')\n", "featurization_config.add_column_purpose(\"CPWVOL5\", \"Numeric\")\n",
"# Fill missing values in the target column, Quantity, with zeros.\n", "# Fill missing values in the target column, Quantity, with zeros.\n",
"featurization_config.add_transformer_params('Imputer', ['Quantity'], {\"strategy\": \"constant\", \"fill_value\": 0})\n", "featurization_config.add_transformer_params(\n",
" \"Imputer\", [\"Quantity\"], {\"strategy\": \"constant\", \"fill_value\": 0}\n",
")\n",
"# Fill missing values in the INCOME column with median value.\n", "# Fill missing values in the INCOME column with median value.\n",
"featurization_config.add_transformer_params('Imputer', ['INCOME'], {\"strategy\": \"median\"})\n", "featurization_config.add_transformer_params(\n",
" \"Imputer\", [\"INCOME\"], {\"strategy\": \"median\"}\n",
")\n",
"# Fill missing values in the Price column with forward fill (last value carried forward).\n", "# Fill missing values in the Price column with forward fill (last value carried forward).\n",
"featurization_config.add_transformer_params('Imputer', ['Price'], {\"strategy\": \"ffill\"})" "featurization_config.add_transformer_params(\"Imputer\", [\"Price\"], {\"strategy\": \"ffill\"})"
] ]
}, },
{ {
@@ -423,16 +417,18 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n", "from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
"\n",
"forecasting_parameters = ForecastingParameters(\n", "forecasting_parameters = ForecastingParameters(\n",
" time_column_name=time_column_name,\n", " time_column_name=time_column_name,\n",
" forecast_horizon=n_test_periods,\n", " forecast_horizon=n_test_periods,\n",
" time_series_id_column_names=time_series_id_column_names,\n", " time_series_id_column_names=time_series_id_column_names,\n",
" freq='W-THU' # Set the forecast frequency to be weekly (start on each Thursday)\n", " freq=\"W-THU\", # Set the forecast frequency to be weekly (start on each Thursday)\n",
")\n", ")\n",
"\n", "\n",
"automl_config = AutoMLConfig(task='forecasting',\n", "automl_config = AutoMLConfig(\n",
" debug_log='automl_oj_sales_errors.log',\n", " task=\"forecasting\",\n",
" primary_metric='normalized_mean_absolute_error',\n", " debug_log=\"automl_oj_sales_errors.log\",\n",
" primary_metric=\"normalized_mean_absolute_error\",\n",
" experiment_timeout_hours=0.25,\n", " experiment_timeout_hours=0.25,\n",
" training_data=train_dataset,\n", " training_data=train_dataset,\n",
" label_column_name=target_column_name,\n", " label_column_name=target_column_name,\n",
@@ -442,7 +438,8 @@
" n_cross_validations=3,\n", " n_cross_validations=3,\n",
" verbosity=logging.INFO,\n", " verbosity=logging.INFO,\n",
" max_cores_per_iteration=-1,\n", " max_cores_per_iteration=-1,\n",
" forecasting_parameters=forecasting_parameters)" " forecasting_parameters=forecasting_parameters,\n",
")"
] ]
}, },
{ {
@@ -475,8 +472,8 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"### Retrieve the Best Model\n", "### Retrieve the Best Run details\n",
"Each run within an Experiment stores serialized (i.e. pickled) pipelines from the AutoML iterations. We can now retrieve the pipeline with the best performance on the validation dataset:" "Below we retrieve the best Run object from among all the runs in the experiment."
] ]
}, },
{ {
@@ -485,9 +482,9 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"best_run, fitted_model = remote_run.get_output()\n", "best_run = remote_run.get_best_child()\n",
"print(fitted_model.steps)\n", "model_name = best_run.properties[\"model_name\"]\n",
"model_name = best_run.properties['model_name']" "best_run"
] ]
}, },
{ {
@@ -505,16 +502,26 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"custom_featurizer = fitted_model.named_steps['timeseriestransformer']" "# Download the featurization summary JSON file locally\n",
] "best_run.download_file(\n",
}, " \"outputs/featurization_summary.json\", \"featurization_summary.json\"\n",
{ ")\n",
"cell_type": "code", "\n",
"execution_count": null, "# Render the JSON as a pandas DataFrame\n",
"metadata": {}, "with open(\"featurization_summary.json\", \"r\") as f:\n",
"outputs": [], " records = json.load(f)\n",
"source": [ "fs = pd.DataFrame.from_records(records)\n",
"custom_featurizer.get_featurization_summary()" "\n",
"# View a summary of the featurization\n",
"fs[\n",
" [\n",
" \"RawFeatureName\",\n",
" \"TypeDetected\",\n",
" \"Dropped\",\n",
" \"EngineeredFeatureCount\",\n",
" \"Transformations\",\n",
" ]\n",
"]"
] ]
}, },
{ {
@@ -541,7 +548,7 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"### Retreiving forecasts from the model\n", "### Retrieving forecasts from the model\n",
"We have created a function called `run_forecast` that submits the test data to the best model determined during the training run and retrieves forecasts. This function uses a helper script `forecasting_script` which is uploaded and expecuted on the remote compute." "We have created a function called `run_forecast` that submits the test data to the best model determined during the training run and retrieves forecasts. This function uses a helper script `forecasting_script` which is uploaded and expecuted on the remote compute."
] ]
}, },
@@ -559,15 +566,18 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"from run_forecast import run_remote_inference\n", "from run_forecast import run_remote_inference\n",
"remote_run_infer = run_remote_inference(test_experiment=test_experiment, \n", "\n",
"remote_run_infer = run_remote_inference(\n",
" test_experiment=test_experiment,\n",
" compute_target=compute_target,\n", " compute_target=compute_target,\n",
" train_run=best_run,\n", " train_run=best_run,\n",
" test_dataset=test_dataset,\n", " test_dataset=test_dataset,\n",
" target_column_name=target_column_name)\n", " target_column_name=target_column_name,\n",
")\n",
"remote_run_infer.wait_for_completion(show_output=False)\n", "remote_run_infer.wait_for_completion(show_output=False)\n",
"\n", "\n",
"# download the forecast file to the local machine\n", "# download the forecast file to the local machine\n",
"remote_run_infer.download_file('outputs/predictions.csv', 'predictions.csv')" "remote_run_infer.download_file(\"outputs/predictions.csv\", \"predictions.csv\")"
] ]
}, },
{ {
@@ -588,7 +598,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"# load forecast data frame\n", "# load forecast data frame\n",
"fcst_df = pd.read_csv('predictions.csv', parse_dates=[time_column_name])\n", "fcst_df = pd.read_csv(\"predictions.csv\", parse_dates=[time_column_name])\n",
"fcst_df.head()" "fcst_df.head()"
] ]
}, },
@@ -605,18 +615,23 @@
"# use automl scoring module\n", "# use automl scoring module\n",
"scores = scoring.score_regression(\n", "scores = scoring.score_regression(\n",
" y_test=fcst_df[target_column_name],\n", " y_test=fcst_df[target_column_name],\n",
" y_pred=fcst_df['predicted'],\n", " y_pred=fcst_df[\"predicted\"],\n",
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET))\n", " metrics=list(constants.Metric.SCALAR_REGRESSION_SET),\n",
")\n",
"\n", "\n",
"print(\"[Test data scores]\\n\")\n", "print(\"[Test data scores]\\n\")\n",
"for key, value in scores.items(): \n", "for key, value in scores.items():\n",
" print('{}: {:.3f}'.format(key, value))\n", " print(\"{}: {:.3f}\".format(key, value))\n",
" \n", "\n",
"# Plot outputs\n", "# Plot outputs\n",
"%matplotlib inline\n", "%matplotlib inline\n",
"test_pred = plt.scatter(fcst_df[target_column_name], fcst_df['predicted'], color='b')\n", "test_pred = plt.scatter(fcst_df[target_column_name], fcst_df[\"predicted\"], color=\"b\")\n",
"test_test = plt.scatter(fcst_df[target_column_name], fcst_df[target_column_name], color='g')\n", "test_test = plt.scatter(\n",
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n", " fcst_df[target_column_name], fcst_df[target_column_name], color=\"g\"\n",
")\n",
"plt.legend(\n",
" (test_pred, test_test), (\"prediction\", \"truth\"), loc=\"upper left\", fontsize=8\n",
")\n",
"plt.show()" "plt.show()"
] ]
}, },
@@ -640,9 +655,11 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"description = 'AutoML OJ forecaster'\n", "description = \"AutoML OJ forecaster\"\n",
"tags = None\n", "tags = None\n",
"model = remote_run.register_model(model_name = model_name, description = description, tags = tags)\n", "model = remote_run.register_model(\n",
" model_name=model_name, description=description, tags=tags\n",
")\n",
"\n", "\n",
"print(remote_run.model_id)" "print(remote_run.model_id)"
] ]
@@ -662,8 +679,8 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"script_file_name = 'score_fcast.py'\n", "script_file_name = \"score_fcast.py\"\n",
"best_run.download_file('outputs/scoring_file_v_1_0_0.py', script_file_name)" "best_run.download_file(\"outputs/scoring_file_v_1_0_0.py\", script_file_name)"
] ]
}, },
{ {
@@ -684,15 +701,18 @@
"from azureml.core.webservice import Webservice\n", "from azureml.core.webservice import Webservice\n",
"from azureml.core.model import Model\n", "from azureml.core.model import Model\n",
"\n", "\n",
"inference_config = InferenceConfig(environment = best_run.get_environment(), \n", "inference_config = InferenceConfig(\n",
" entry_script = script_file_name)\n", " environment=best_run.get_environment(), entry_script=script_file_name\n",
")\n",
"\n", "\n",
"aciconfig = AciWebservice.deploy_configuration(cpu_cores = 2, \n", "aciconfig = AciWebservice.deploy_configuration(\n",
" memory_gb = 4, \n", " cpu_cores=2,\n",
" tags = {'type': \"automl-forecasting\"},\n", " memory_gb=4,\n",
" description = \"Automl forecasting sample service\")\n", " tags={\"type\": \"automl-forecasting\"},\n",
" description=\"Automl forecasting sample service\",\n",
")\n",
"\n", "\n",
"aci_service_name = 'automl-oj-forecast-01'\n", "aci_service_name = \"automl-oj-forecast-01\"\n",
"print(aci_service_name)\n", "print(aci_service_name)\n",
"aci_service = Model.deploy(ws, aci_service_name, [model], inference_config, aciconfig)\n", "aci_service = Model.deploy(ws, aci_service_name, [model], inference_config, aciconfig)\n",
"aci_service.wait_for_deployment(True)\n", "aci_service.wait_for_deployment(True)\n",
@@ -722,20 +742,27 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"import json\n", "import json\n",
"\n",
"X_query = test.copy()\n", "X_query = test.copy()\n",
"X_query.pop(target_column_name)\n", "X_query.pop(target_column_name)\n",
"# We have to convert datetime to string, because Timestamps cannot be serialized to JSON.\n", "# We have to convert datetime to string, because Timestamps cannot be serialized to JSON.\n",
"X_query[time_column_name] = X_query[time_column_name].astype(str)\n", "X_query[time_column_name] = X_query[time_column_name].astype(str)\n",
"# The Service object accept the complex dictionary, which is internally converted to JSON string.\n", "# The Service object accept the complex dictionary, which is internally converted to JSON string.\n",
"# The section 'data' contains the data frame in the form of dictionary.\n", "# The section 'data' contains the data frame in the form of dictionary.\n",
"test_sample = json.dumps({\"data\": json.loads(X_query.to_json(orient=\"records\"))})\n", "sample_quantiles = [0.025, 0.975]\n",
"response = aci_service.run(input_data = test_sample)\n", "test_sample = json.dumps(\n",
" {\"data\": X_query.to_dict(orient=\"records\"), \"quantiles\": sample_quantiles}\n",
")\n",
"response = aci_service.run(input_data=test_sample)\n",
"# translate from networkese to datascientese\n", "# translate from networkese to datascientese\n",
"try: \n", "try:\n",
" res_dict = json.loads(response)\n", " res_dict = json.loads(response)\n",
" y_fcst_all = pd.DataFrame(res_dict['index'])\n", " y_fcst_all = pd.DataFrame(res_dict[\"index\"])\n",
" y_fcst_all[time_column_name] = pd.to_datetime(y_fcst_all[time_column_name], unit = 'ms')\n", " y_fcst_all[time_column_name] = pd.to_datetime(\n",
" y_fcst_all['forecast'] = res_dict['forecast'] \n", " y_fcst_all[time_column_name], unit=\"ms\"\n",
" )\n",
" y_fcst_all[\"forecast\"] = res_dict[\"forecast\"]\n",
" y_fcst_all[\"prediction_interval\"] = res_dict[\"prediction_interval\"]\n",
"except:\n", "except:\n",
" print(res_dict)" " print(res_dict)"
] ]
@@ -762,7 +789,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"serv = Webservice(ws, 'automl-oj-forecast-01')\n", "serv = Webservice(ws, \"automl-oj-forecast-01\")\n",
"serv.delete() # don't do it accidentally" "serv.delete() # don't do it accidentally"
] ]
} }
@@ -791,9 +818,9 @@
"friendly_name": "Forecasting orange juice sales with deployment", "friendly_name": "Forecasting orange juice sales with deployment",
"index_order": 1, "index_order": 1,
"kernelspec": { "kernelspec": {
"display_name": "Python 3.6", "display_name": "Python 3.6 - AzureML",
"language": "python", "language": "python",
"name": "python36" "name": "python3-azureml"
}, },
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {

View File

@@ -5,62 +5,20 @@ compute instance.
""" """
import argparse import argparse
import pandas as pd
import numpy as np
from azureml.core import Dataset, Run from azureml.core import Dataset, Run
from azureml.automl.core.shared.constants import TimeSeriesInternal
from sklearn.externals import joblib from sklearn.externals import joblib
from pandas.tseries.frequencies import to_offset from pandas.tseries.frequencies import to_offset
def align_outputs(y_predicted, X_trans, X_test, y_test, target_column_name,
predicted_column_name='predicted',
horizon_colname='horizon_origin'):
"""
Demonstrates how to get the output aligned to the inputs
using pandas indexes. Helps understand what happened if
the output's shape differs from the input shape, or if
the data got re-sorted by time and grain during forecasting.
Typical causes of misalignment are:
* we predicted some periods that were missing in actuals -> drop from eval
* model was asked to predict past max_horizon -> increase max horizon
* data at start of X_test was needed for lags -> provide previous periods
"""
if (horizon_colname in X_trans):
df_fcst = pd.DataFrame({predicted_column_name: y_predicted,
horizon_colname: X_trans[horizon_colname]})
else:
df_fcst = pd.DataFrame({predicted_column_name: y_predicted})
# y and X outputs are aligned by forecast() function contract
df_fcst.index = X_trans.index
# align original X_test to y_test
X_test_full = X_test.copy()
X_test_full[target_column_name] = y_test
# X_test_full's index does not include origin, so reset for merge
df_fcst.reset_index(inplace=True)
X_test_full = X_test_full.reset_index().drop(columns='index')
together = df_fcst.merge(X_test_full, how='right')
# drop rows where prediction or actuals are nan
# happens because of missing actuals
# or at edges of time due to lags/rolling windows
clean = together[together[[target_column_name,
predicted_column_name]].notnull().all(axis=1)]
return(clean)
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument( parser.add_argument(
'--target_column_name', type=str, dest='target_column_name', "--target_column_name",
help='Target Column Name') type=str,
dest="target_column_name",
help="Target Column Name",
)
parser.add_argument( parser.add_argument(
'--test_dataset', type=str, dest='test_dataset', "--test_dataset", type=str, dest="test_dataset", help="Test Dataset"
help='Test Dataset') )
args = parser.parse_args() args = parser.parse_args()
target_column_name = args.target_column_name target_column_name = args.target_column_name
@@ -76,14 +34,28 @@ X_test = test_dataset.to_pandas_dataframe().reset_index(drop=True)
y_test = X_test.pop(target_column_name).values y_test = X_test.pop(target_column_name).values
# generate forecast # generate forecast
fitted_model = joblib.load('model.pkl') fitted_model = joblib.load("model.pkl")
y_predictions, X_trans = fitted_model.forecast(X_test) # We have default quantiles values set as below(95th percentile)
quantiles = [0.025, 0.5, 0.975]
predicted_column_name = "predicted"
PI = "prediction_interval"
fitted_model.quantiles = quantiles
pred_quantiles = fitted_model.forecast_quantiles(X_test)
pred_quantiles[PI] = pred_quantiles[[min(quantiles), max(quantiles)]].apply(
lambda x: "[{}, {}]".format(x[0], x[1]), axis=1
)
X_test[target_column_name] = y_test
X_test[PI] = pred_quantiles[PI]
X_test[predicted_column_name] = pred_quantiles[0.5]
# drop rows where prediction or actuals are nan
# happens because of missing actuals
# or at edges of time due to lags/rolling windows
clean = X_test[
X_test[[target_column_name, predicted_column_name]].notnull().all(axis=1)
]
# align output file_name = "outputs/predictions.csv"
df_all = align_outputs(y_predictions, X_trans, X_test, y_test, target_column_name) export_csv = clean.to_csv(file_name, header=True, index=False) # added Index
file_name = 'outputs/predictions.csv'
export_csv = df_all.to_csv(file_name, header=True, index=False) # added Index
# Upload the predictions into artifacts # Upload the predictions into artifacts
run.upload_file(name=file_name, path_or_stream=file_name) run.upload_file(name=file_name, path_or_stream=file_name)

View File

@@ -3,36 +3,47 @@ import shutil
from azureml.core import ScriptRunConfig from azureml.core import ScriptRunConfig
def run_remote_inference(test_experiment, compute_target, train_run, def run_remote_inference(
test_dataset, target_column_name, inference_folder='./forecast'): test_experiment,
compute_target,
train_run,
test_dataset,
target_column_name,
inference_folder="./forecast",
):
# Create local directory to copy the model.pkl and forecsting_script.py files into. # Create local directory to copy the model.pkl and forecsting_script.py files into.
# These files will be uploaded to and executed on the compute instance. # These files will be uploaded to and executed on the compute instance.
os.makedirs(inference_folder, exist_ok=True) os.makedirs(inference_folder, exist_ok=True)
shutil.copy('forecasting_script.py', inference_folder) shutil.copy("forecasting_script.py", inference_folder)
train_run.download_file('outputs/model.pkl', train_run.download_file(
os.path.join(inference_folder, 'model.pkl')) "outputs/model.pkl", os.path.join(inference_folder, "model.pkl")
)
inference_env = train_run.get_environment() inference_env = train_run.get_environment()
config = ScriptRunConfig(source_directory=inference_folder, config = ScriptRunConfig(
script='forecasting_script.py', source_directory=inference_folder,
arguments=['--target_column_name', script="forecasting_script.py",
arguments=[
"--target_column_name",
target_column_name, target_column_name,
'--test_dataset', "--test_dataset",
test_dataset.as_named_input(test_dataset.name)], test_dataset.as_named_input(test_dataset.name),
],
compute_target=compute_target, compute_target=compute_target,
environment=inference_env) environment=inference_env,
)
run = test_experiment.submit(config, run = test_experiment.submit(
tags={'training_run_id': config,
train_run.id, tags={
'run_algorithm': "training_run_id": train_run.id,
train_run.properties['run_algorithm'], "run_algorithm": train_run.properties["run_algorithm"],
'valid_score': "valid_score": train_run.properties["score"],
train_run.properties['score'], "primary_metric": train_run.properties["primary_metric"],
'primary_metric': },
train_run.properties['primary_metric']}) )
run.log("run_algorithm", run.tags['run_algorithm']) run.log("run_algorithm", run.tags["run_algorithm"])
return run return run

View File

@@ -56,16 +56,18 @@
"from statsmodels.graphics.tsaplots import plot_acf, plot_pacf\n", "from statsmodels.graphics.tsaplots import plot_acf, plot_pacf\n",
"import matplotlib.pyplot as plt\n", "import matplotlib.pyplot as plt\n",
"from pandas.plotting import register_matplotlib_converters\n", "from pandas.plotting import register_matplotlib_converters\n",
"\n",
"register_matplotlib_converters() # fixes the future warning issue\n", "register_matplotlib_converters() # fixes the future warning issue\n",
"\n", "\n",
"from helper_functions import unit_root_test_wrapper\n", "from helper_functions import unit_root_test_wrapper\n",
"from statsmodels.tools.sm_exceptions import InterpolationWarning\n", "from statsmodels.tools.sm_exceptions import InterpolationWarning\n",
"warnings.simplefilter('ignore', InterpolationWarning)\n", "\n",
"warnings.simplefilter(\"ignore\", InterpolationWarning)\n",
"\n", "\n",
"\n", "\n",
"# set printing options\n", "# set printing options\n",
"pd.set_option('display.max_columns', 500)\n", "pd.set_option(\"display.max_columns\", 500)\n",
"pd.set_option('display.width', 1000)" "pd.set_option(\"display.width\", 1000)"
] ]
}, },
{ {
@@ -75,15 +77,15 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"# load data\n", "# load data\n",
"main_data_loc = 'data'\n", "main_data_loc = \"data\"\n",
"train_file_name = 'S4248SM144SCEN.csv'\n", "train_file_name = \"S4248SM144SCEN.csv\"\n",
"\n", "\n",
"TARGET_COLNAME = 'S4248SM144SCEN'\n", "TARGET_COLNAME = \"S4248SM144SCEN\"\n",
"TIME_COLNAME = 'observation_date'\n", "TIME_COLNAME = \"observation_date\"\n",
"COVID_PERIOD_START = '2020-03-01'\n", "COVID_PERIOD_START = \"2020-03-01\"\n",
"\n", "\n",
"df = pd.read_csv(os.path.join(main_data_loc, train_file_name))\n", "df = pd.read_csv(os.path.join(main_data_loc, train_file_name))\n",
"df[TIME_COLNAME] = pd.to_datetime(df[TIME_COLNAME], format='%Y-%m-%d')\n", "df[TIME_COLNAME] = pd.to_datetime(df[TIME_COLNAME], format=\"%Y-%m-%d\")\n",
"df.sort_values(by=TIME_COLNAME, inplace=True)\n", "df.sort_values(by=TIME_COLNAME, inplace=True)\n",
"df.set_index(TIME_COLNAME, inplace=True)\n", "df.set_index(TIME_COLNAME, inplace=True)\n",
"df.head(2)" "df.head(2)"
@@ -96,9 +98,9 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"# plot the entire dataset\n", "# plot the entire dataset\n",
"fig, ax = plt.subplots(figsize=(6,2), dpi=180)\n", "fig, ax = plt.subplots(figsize=(6, 2), dpi=180)\n",
"ax.plot(df)\n", "ax.plot(df)\n",
"ax.title.set_text('Original Data Series')\n", "ax.title.set_text(\"Original Data Series\")\n",
"locs, labels = plt.xticks()\n", "locs, labels = plt.xticks()\n",
"plt.xticks(rotation=45)" "plt.xticks(rotation=45)"
] ]
@@ -117,9 +119,9 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"# plot the entire dataset in first differences\n", "# plot the entire dataset in first differences\n",
"fig, ax = plt.subplots(figsize=(6,2), dpi=180)\n", "fig, ax = plt.subplots(figsize=(6, 2), dpi=180)\n",
"ax.plot(df.diff().dropna())\n", "ax.plot(df.diff().dropna())\n",
"ax.title.set_text('Data in first differences')\n", "ax.title.set_text(\"Data in first differences\")\n",
"locs, labels = plt.xticks()\n", "locs, labels = plt.xticks()\n",
"plt.xticks(rotation=45)" "plt.xticks(rotation=45)"
] ]
@@ -151,9 +153,9 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"# plot the entire dataset in first differences\n", "# plot the entire dataset in first differences\n",
"fig, ax = plt.subplots(figsize=(6,2), dpi=180)\n", "fig, ax = plt.subplots(figsize=(6, 2), dpi=180)\n",
"ax.plot(df.diff().dropna())\n", "ax.plot(df.diff().dropna())\n",
"ax.title.set_text('Data in first differences')\n", "ax.title.set_text(\"Data in first differences\")\n",
"locs, labels = plt.xticks()\n", "locs, labels = plt.xticks()\n",
"plt.xticks(rotation=45)" "plt.xticks(rotation=45)"
] ]
@@ -175,9 +177,9 @@
"df = df[:COVID_PERIOD_START]\n", "df = df[:COVID_PERIOD_START]\n",
"\n", "\n",
"# plot the entire dataset in first differences\n", "# plot the entire dataset in first differences\n",
"fig, ax = plt.subplots(figsize=(6,2), dpi=180)\n", "fig, ax = plt.subplots(figsize=(6, 2), dpi=180)\n",
"ax.plot(df['2015-01-01':].diff().dropna())\n", "ax.plot(df[\"2015-01-01\":].diff().dropna())\n",
"ax.title.set_text('Data in first differences')\n", "ax.title.set_text(\"Data in first differences\")\n",
"locs, labels = plt.xticks()\n", "locs, labels = plt.xticks()\n",
"plt.xticks(rotation=45)" "plt.xticks(rotation=45)"
] ]
@@ -245,10 +247,10 @@
"source": [ "source": [
"# unit root tests\n", "# unit root tests\n",
"test = unit_root_test_wrapper(df[TARGET_COLNAME])\n", "test = unit_root_test_wrapper(df[TARGET_COLNAME])\n",
"print('---------------', '\\n')\n", "print(\"---------------\", \"\\n\")\n",
"print('Summary table', '\\n', test['summary'], '\\n')\n", "print(\"Summary table\", \"\\n\", test[\"summary\"], \"\\n\")\n",
"print('Is the {} series stationary?: {}'.format(TARGET_COLNAME, test['stationary']))\n", "print(\"Is the {} series stationary?: {}\".format(TARGET_COLNAME, test[\"stationary\"]))\n",
"print('---------------', '\\n')" "print(\"---------------\", \"\\n\")"
] ]
}, },
{ {
@@ -260,7 +262,7 @@
" <li> test_name is the name of the test.\n", " <li> test_name is the name of the test.\n",
" <ul> \n", " <ul> \n",
" <li> ADF: Augmented Dickey-Fuller test </li>\n", " <li> ADF: Augmented Dickey-Fuller test </li>\n",
" <li> KPSS: Kwiatkowski-Phillips\u00e2\u20ac\u201cSchmidt\u00e2\u20ac\u201cShin test </li>\n", " <li> KPSS: Kwiatkowski-PhillipsSchmidtShin test </li>\n",
" <li> PP: Phillips-Perron test\n", " <li> PP: Phillips-Perron test\n",
" <li> ADF GLS: Augmented Dickey-Fuller using generalized least squares method </li>\n", " <li> ADF GLS: Augmented Dickey-Fuller using generalized least squares method </li>\n",
" <li> AZ: Andrews-Zivot test </li>\n", " <li> AZ: Andrews-Zivot test </li>\n",
@@ -285,10 +287,10 @@
"source": [ "source": [
"# unit root tests\n", "# unit root tests\n",
"test = unit_root_test_wrapper(df[TARGET_COLNAME].diff().dropna())\n", "test = unit_root_test_wrapper(df[TARGET_COLNAME].diff().dropna())\n",
"print('---------------', '\\n')\n", "print(\"---------------\", \"\\n\")\n",
"print('Summary table', '\\n', test['summary'], '\\n')\n", "print(\"Summary table\", \"\\n\", test[\"summary\"], \"\\n\")\n",
"print('Is the {} series stationary?: {}'.format(TARGET_COLNAME, test['stationary']))\n", "print(\"Is the {} series stationary?: {}\".format(TARGET_COLNAME, test[\"stationary\"]))\n",
"print('---------------', '\\n')" "print(\"---------------\", \"\\n\")"
] ]
}, },
{ {
@@ -305,13 +307,13 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"# plot original and stationary data\n", "# plot original and stationary data\n",
"fig = plt.figure(figsize=(10,10))\n", "fig = plt.figure(figsize=(10, 10))\n",
"ax1 = fig.add_subplot(211)\n", "ax1 = fig.add_subplot(211)\n",
"ax1.plot(df[TARGET_COLNAME], '-b')\n", "ax1.plot(df[TARGET_COLNAME], \"-b\")\n",
"ax2 = fig.add_subplot(212)\n", "ax2 = fig.add_subplot(212)\n",
"ax2.plot(df[TARGET_COLNAME].diff().dropna(), '-b')\n", "ax2.plot(df[TARGET_COLNAME].diff().dropna(), \"-b\")\n",
"ax1.title.set_text('Original data')\n", "ax1.title.set_text(\"Original data\")\n",
"ax2.title.set_text('Data in first differences')" "ax2.title.set_text(\"Data in first differences\")"
] ]
}, },
{ {
@@ -372,7 +374,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"# Plot the ACF/PACF for the series in differences\n", "# Plot the ACF/PACF for the series in differences\n",
"fig, ax = plt.subplots(1,2,figsize=(10,5))\n", "fig, ax = plt.subplots(1, 2, figsize=(10, 5))\n",
"plot_acf(df[TARGET_COLNAME].diff().dropna().values.squeeze(), ax=ax[0])\n", "plot_acf(df[TARGET_COLNAME].diff().dropna().values.squeeze(), ax=ax[0])\n",
"plot_pacf(df[TARGET_COLNAME].diff().dropna().values.squeeze(), ax=ax[1])\n", "plot_pacf(df[TARGET_COLNAME].diff().dropna().values.squeeze(), ax=ax[1])\n",
"plt.show()" "plt.show()"
@@ -470,9 +472,9 @@
} }
], ],
"kernelspec": { "kernelspec": {
"display_name": "Python 3.6", "display_name": "Python 3.6 - AzureML",
"language": "python", "language": "python",
"name": "python36" "name": "python3-azureml"
}, },
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {

View File

@@ -51,7 +51,7 @@
"from azureml.core.compute import AmlCompute\n", "from azureml.core.compute import AmlCompute\n",
"from azureml.core.compute import ComputeTarget\n", "from azureml.core.compute import ComputeTarget\n",
"import matplotlib.pyplot as plt\n", "import matplotlib.pyplot as plt\n",
"from helper_functions import (ts_train_test_split, compute_metrics)\n", "from helper_functions import ts_train_test_split, compute_metrics\n",
"\n", "\n",
"import azureml.core\n", "import azureml.core\n",
"from azureml.core.workspace import Workspace\n", "from azureml.core.workspace import Workspace\n",
@@ -61,8 +61,8 @@
"\n", "\n",
"# set printing options\n", "# set printing options\n",
"np.set_printoptions(precision=4, suppress=True, linewidth=100)\n", "np.set_printoptions(precision=4, suppress=True, linewidth=100)\n",
"pd.set_option('display.max_columns', 500)\n", "pd.set_option(\"display.max_columns\", 500)\n",
"pd.set_option('display.width', 1000)" "pd.set_option(\"display.width\", 1000)"
] ]
}, },
{ {
@@ -81,27 +81,32 @@
"source": [ "source": [
"ws = Workspace.from_config()\n", "ws = Workspace.from_config()\n",
"amlcompute_cluster_name = \"recipe-cluster\"\n", "amlcompute_cluster_name = \"recipe-cluster\"\n",
" \n", "\n",
"found = False\n", "found = False\n",
"# Check if this compute target already exists in the workspace.\n", "# Check if this compute target already exists in the workspace.\n",
"cts = ws.compute_targets\n", "cts = ws.compute_targets\n",
"if amlcompute_cluster_name in cts and cts[amlcompute_cluster_name].type == 'AmlCompute':\n", "if amlcompute_cluster_name in cts and cts[amlcompute_cluster_name].type == \"AmlCompute\":\n",
" found = True\n", " found = True\n",
" print('Found existing compute target.')\n", " print(\"Found existing compute target.\")\n",
" compute_target = cts[amlcompute_cluster_name]\n", " compute_target = cts[amlcompute_cluster_name]\n",
"\n", "\n",
"if not found:\n", "if not found:\n",
" print('Creating a new compute target...')\n", " print(\"Creating a new compute target...\")\n",
" provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_D2_V2\",\n", " provisioning_config = AmlCompute.provisioning_configuration(\n",
" max_nodes = 6)\n", " vm_size=\"STANDARD_D2_V2\", max_nodes=6\n",
" )\n",
"\n", "\n",
" # Create the cluster.\\n\",\n", " # Create the cluster.\\n\",\n",
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, provisioning_config)\n", " compute_target = ComputeTarget.create(\n",
" ws, amlcompute_cluster_name, provisioning_config\n",
" )\n",
"\n", "\n",
"print('Checking cluster status...')\n", "print(\"Checking cluster status...\")\n",
"# Can poll for a minimum number of nodes and for a specific timeout.\n", "# Can poll for a minimum number of nodes and for a specific timeout.\n",
"# If no min_node_count is provided, it will use the scale settings for the cluster.\n", "# If no min_node_count is provided, it will use the scale settings for the cluster.\n",
"compute_target.wait_for_completion(show_output = True, min_node_count = None, timeout_in_minutes = 20)" "compute_target.wait_for_completion(\n",
" show_output=True, min_node_count=None, timeout_in_minutes=20\n",
")"
] ]
}, },
{ {
@@ -119,16 +124,18 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"main_data_loc = 'data'\n", "main_data_loc = \"data\"\n",
"train_file_name = 'S4248SM144SCEN.csv'\n", "train_file_name = \"S4248SM144SCEN.csv\"\n",
"\n", "\n",
"TARGET_COLNAME = \"S4248SM144SCEN\"\n", "TARGET_COLNAME = \"S4248SM144SCEN\"\n",
"TIME_COLNAME = \"observation_date\"\n", "TIME_COLNAME = \"observation_date\"\n",
"COVID_PERIOD_START = '2020-03-01' # start of the covid period. To be excluded from evaluation.\n", "COVID_PERIOD_START = (\n",
" \"2020-03-01\" # start of the covid period. To be excluded from evaluation.\n",
")\n",
"\n", "\n",
"# load data\n", "# load data\n",
"df = pd.read_csv(os.path.join(main_data_loc, train_file_name))\n", "df = pd.read_csv(os.path.join(main_data_loc, train_file_name))\n",
"df[TIME_COLNAME] = pd.to_datetime(df[TIME_COLNAME], format='%Y-%m-%d')\n", "df[TIME_COLNAME] = pd.to_datetime(df[TIME_COLNAME], format=\"%Y-%m-%d\")\n",
"df.sort_values(by=TIME_COLNAME, inplace=True)\n", "df.sort_values(by=TIME_COLNAME, inplace=True)\n",
"\n", "\n",
"# remove the Covid period\n", "# remove the Covid period\n",
@@ -202,24 +209,28 @@
"source": [ "source": [
"# choose a name for the run history container in the workspace\n", "# choose a name for the run history container in the workspace\n",
"if isinstance(TARGET_LAGS, list):\n", "if isinstance(TARGET_LAGS, list):\n",
" TARGET_LAGS_STR = '-'.join(map(str, TARGET_LAGS)) if (len(TARGET_LAGS) > 0) else None\n", " TARGET_LAGS_STR = (\n",
" \"-\".join(map(str, TARGET_LAGS)) if (len(TARGET_LAGS) > 0) else None\n",
" )\n",
"else:\n", "else:\n",
" TARGET_LAGS_STR = TARGET_LAGS\n", " TARGET_LAGS_STR = TARGET_LAGS\n",
"\n", "\n",
"experiment_desc = 'diff-{}_lags-{}_STL-{}'.format(DIFFERENCE_SERIES, TARGET_LAGS_STR, STL_TYPE)\n", "experiment_desc = \"diff-{}_lags-{}_STL-{}\".format(\n",
"experiment_name = 'alcohol_{}'.format(experiment_desc)\n", " DIFFERENCE_SERIES, TARGET_LAGS_STR, STL_TYPE\n",
")\n",
"experiment_name = \"alcohol_{}\".format(experiment_desc)\n",
"experiment = Experiment(ws, experiment_name)\n", "experiment = Experiment(ws, experiment_name)\n",
"\n", "\n",
"output = {}\n", "output = {}\n",
"output['SDK version'] = azureml.core.VERSION\n", "output[\"SDK version\"] = azureml.core.VERSION\n",
"output['Subscription ID'] = ws.subscription_id\n", "output[\"Subscription ID\"] = ws.subscription_id\n",
"output['Workspace'] = ws.name\n", "output[\"Workspace\"] = ws.name\n",
"output['SKU'] = ws.sku\n", "output[\"SKU\"] = ws.sku\n",
"output['Resource Group'] = ws.resource_group\n", "output[\"Resource Group\"] = ws.resource_group\n",
"output['Location'] = ws.location\n", "output[\"Location\"] = ws.location\n",
"output['Run History Name'] = experiment_name\n", "output[\"Run History Name\"] = experiment_name\n",
"pd.set_option('display.max_colwidth', -1)\n", "pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data = output, index = [''])\n", "outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"print(outputDf.T)" "print(outputDf.T)"
] ]
}, },
@@ -230,9 +241,9 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"# create output directory\n", "# create output directory\n",
"output_dir = 'experiment_output/{}'.format(experiment_desc)\n", "output_dir = \"experiment_output/{}\".format(experiment_desc)\n",
"if not os.path.exists(output_dir):\n", "if not os.path.exists(output_dir):\n",
" os.makedirs(output_dir) " " os.makedirs(output_dir)"
] ]
}, },
{ {
@@ -255,17 +266,21 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"# split the data into train and test set\n", "# split the data into train and test set\n",
"if DIFFERENCE_SERIES: \n", "if DIFFERENCE_SERIES:\n",
" # generate train/inference sets using data in first differences\n", " # generate train/inference sets using data in first differences\n",
" df_train, df_test = ts_train_test_split(df_input=df_delta,\n", " df_train, df_test = ts_train_test_split(\n",
" df_input=df_delta,\n",
" n=FORECAST_HORIZON,\n", " n=FORECAST_HORIZON,\n",
" time_colname=TIME_COLNAME,\n", " time_colname=TIME_COLNAME,\n",
" ts_id_colnames=TIME_SERIES_ID_COLNAMES)\n", " ts_id_colnames=TIME_SERIES_ID_COLNAMES,\n",
" )\n",
"else:\n", "else:\n",
" df_train, df_test = ts_train_test_split(df_input=df,\n", " df_train, df_test = ts_train_test_split(\n",
" df_input=df,\n",
" n=FORECAST_HORIZON,\n", " n=FORECAST_HORIZON,\n",
" time_colname=TIME_COLNAME,\n", " time_colname=TIME_COLNAME,\n",
" ts_id_colnames=TIME_SERIES_ID_COLNAMES)" " ts_id_colnames=TIME_SERIES_ID_COLNAMES,\n",
" )"
] ]
}, },
{ {
@@ -286,12 +301,27 @@
"df_test.to_csv(\"test.csv\", index=False)\n", "df_test.to_csv(\"test.csv\", index=False)\n",
"\n", "\n",
"datastore = ws.get_default_datastore()\n", "datastore = ws.get_default_datastore()\n",
"datastore.upload_files(files = ['./train.csv'], target_path = 'uni-recipe-dataset/tabular/', overwrite = True,show_progress = True)\n", "datastore.upload_files(\n",
"datastore.upload_files(files = ['./test.csv'], target_path = 'uni-recipe-dataset/tabular/', overwrite = True,show_progress = True)\n", " files=[\"./train.csv\"],\n",
" target_path=\"uni-recipe-dataset/tabular/\",\n",
" overwrite=True,\n",
" show_progress=True,\n",
")\n",
"datastore.upload_files(\n",
" files=[\"./test.csv\"],\n",
" target_path=\"uni-recipe-dataset/tabular/\",\n",
" overwrite=True,\n",
" show_progress=True,\n",
")\n",
"\n", "\n",
"from azureml.core import Dataset\n", "from azureml.core import Dataset\n",
"train_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'uni-recipe-dataset/tabular/train.csv')])\n", "\n",
"test_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'uni-recipe-dataset/tabular/test.csv')])\n", "train_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"uni-recipe-dataset/tabular/train.csv\")]\n",
")\n",
"test_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"uni-recipe-dataset/tabular/test.csv\")]\n",
")\n",
"\n", "\n",
"# print the first 5 rows of the Dataset\n", "# print the first 5 rows of the Dataset\n",
"train_dataset.to_pandas_dataframe().reset_index(drop=True).head(5)" "train_dataset.to_pandas_dataframe().reset_index(drop=True).head(5)"
@@ -311,17 +341,18 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"time_series_settings = {\n", "time_series_settings = {\n",
" 'time_column_name': TIME_COLNAME,\n", " \"time_column_name\": TIME_COLNAME,\n",
" 'forecast_horizon': FORECAST_HORIZON,\n", " \"forecast_horizon\": FORECAST_HORIZON,\n",
" 'target_lags': TARGET_LAGS,\n", " \"target_lags\": TARGET_LAGS,\n",
" 'use_stl': STL_TYPE,\n", " \"use_stl\": STL_TYPE,\n",
" 'blocked_models': BLOCKED_MODELS,\n", " \"blocked_models\": BLOCKED_MODELS,\n",
" 'time_series_id_column_names': TIME_SERIES_ID_COLNAMES\n", " \"time_series_id_column_names\": TIME_SERIES_ID_COLNAMES,\n",
"}\n", "}\n",
"\n", "\n",
"automl_config = AutoMLConfig(task='forecasting',\n", "automl_config = AutoMLConfig(\n",
" debug_log='sample_experiment.log',\n", " task=\"forecasting\",\n",
" primary_metric='normalized_root_mean_squared_error',\n", " debug_log=\"sample_experiment.log\",\n",
" primary_metric=\"normalized_root_mean_squared_error\",\n",
" experiment_timeout_minutes=20,\n", " experiment_timeout_minutes=20,\n",
" iteration_timeout_minutes=5,\n", " iteration_timeout_minutes=5,\n",
" enable_early_stopping=True,\n", " enable_early_stopping=True,\n",
@@ -331,7 +362,8 @@
" verbosity=logging.INFO,\n", " verbosity=logging.INFO,\n",
" max_cores_per_iteration=-1,\n", " max_cores_per_iteration=-1,\n",
" compute_target=compute_target,\n", " compute_target=compute_target,\n",
" **time_series_settings)" " **time_series_settings,\n",
")"
] ]
}, },
{ {
@@ -355,8 +387,8 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"### Retrieve the best model\n", "### Retrieve the Best Run details\n",
"Below we select the best model from all the training iterations using get_output method." "Below we retrieve the best Run object from among all the runs in the experiment."
] ]
}, },
{ {
@@ -365,8 +397,8 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"best_run, fitted_model = remote_run.get_output()\n", "best_run = remote_run.get_best_child()\n",
"fitted_model.steps" "best_run"
] ]
}, },
{ {
@@ -404,14 +436,17 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"from run_forecast import run_remote_inference\n", "from run_forecast import run_remote_inference\n",
"remote_run = run_remote_inference(test_experiment=test_experiment, \n", "\n",
"remote_run = run_remote_inference(\n",
" test_experiment=test_experiment,\n",
" compute_target=compute_target,\n", " compute_target=compute_target,\n",
" train_run=best_run,\n", " train_run=best_run,\n",
" test_dataset=test_dataset,\n", " test_dataset=test_dataset,\n",
" target_column_name=TARGET_COLNAME)\n", " target_column_name=TARGET_COLNAME,\n",
")\n",
"remote_run.wait_for_completion(show_output=False)\n", "remote_run.wait_for_completion(show_output=False)\n",
"\n", "\n",
"remote_run.download_file('outputs/predictions.csv', f'{output_dir}/predictions.csv')" "remote_run.download_file(\"outputs/predictions.csv\", f\"{output_dir}/predictions.csv\")"
] ]
}, },
{ {
@@ -428,7 +463,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"X_trans = pd.read_csv(f'{output_dir}/predictions.csv', parse_dates=[TIME_COLNAME])\n", "X_trans = pd.read_csv(f\"{output_dir}/predictions.csv\", parse_dates=[TIME_COLNAME])\n",
"X_trans.head()" "X_trans.head()"
] ]
}, },
@@ -440,15 +475,15 @@
"source": [ "source": [
"# convert forecast in differences to levels\n", "# convert forecast in differences to levels\n",
"def convert_fcst_diff_to_levels(fcst, yt, df_orig):\n", "def convert_fcst_diff_to_levels(fcst, yt, df_orig):\n",
" \"\"\" Convert forecast from first differences to levels. \"\"\"\n", " \"\"\"Convert forecast from first differences to levels.\"\"\"\n",
" fcst = fcst.reset_index(drop=False, inplace=False)\n", " fcst = fcst.reset_index(drop=False, inplace=False)\n",
" fcst['predicted_level'] = fcst['predicted'].cumsum()\n", " fcst[\"predicted_level\"] = fcst[\"predicted\"].cumsum()\n",
" fcst['predicted_level'] = fcst['predicted_level'].astype(float) + float(yt)\n", " fcst[\"predicted_level\"] = fcst[\"predicted_level\"].astype(float) + float(yt)\n",
" # merge actuals\n", " # merge actuals\n",
" out = pd.merge(fcst,\n", " out = pd.merge(\n",
" df_orig[[TIME_COLNAME, TARGET_COLNAME]], \n", " fcst, df_orig[[TIME_COLNAME, TARGET_COLNAME]], on=[TIME_COLNAME], how=\"inner\"\n",
" on=[TIME_COLNAME], how='inner')\n", " )\n",
" out.rename(columns={TARGET_COLNAME: 'actual_level'}, inplace=True)\n", " out.rename(columns={TARGET_COLNAME: \"actual_level\"}, inplace=True)\n",
" return out" " return out"
] ]
}, },
@@ -458,16 +493,16 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"if DIFFERENCE_SERIES: \n", "if DIFFERENCE_SERIES:\n",
" # convert forecast in differences to the levels\n", " # convert forecast in differences to the levels\n",
" INFORMATION_SET_DATE = max(df_train[TIME_COLNAME])\n", " INFORMATION_SET_DATE = max(df_train[TIME_COLNAME])\n",
" YT = df.query('{} == @INFORMATION_SET_DATE'.format(TIME_COLNAME))[TARGET_COLNAME]\n", " YT = df.query(\"{} == @INFORMATION_SET_DATE\".format(TIME_COLNAME))[TARGET_COLNAME]\n",
"\n", "\n",
" fcst_df = convert_fcst_diff_to_levels(fcst=X_trans, yt=YT, df_orig=df)\n", " fcst_df = convert_fcst_diff_to_levels(fcst=X_trans, yt=YT, df_orig=df)\n",
"else:\n", "else:\n",
" fcst_df = X_trans.copy()\n", " fcst_df = X_trans.copy()\n",
" fcst_df['actual_level'] = y_test\n", " fcst_df[\"actual_level\"] = y_test\n",
" fcst_df['predicted_level'] = y_predictions\n", " fcst_df[\"predicted_level\"] = y_predictions\n",
"\n", "\n",
"del X_trans" "del X_trans"
] ]
@@ -486,13 +521,11 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"# compute metrics\n", "# compute metrics\n",
"metrics_df = compute_metrics(fcst_df=fcst_df,\n", "metrics_df = compute_metrics(fcst_df=fcst_df, metric_name=None, ts_id_colnames=None)\n",
" metric_name=None,\n",
" ts_id_colnames=None)\n",
"# save output\n", "# save output\n",
"metrics_file_name = '{}_metrics.csv'.format(experiment_name)\n", "metrics_file_name = \"{}_metrics.csv\".format(experiment_name)\n",
"fcst_file_name = '{}_forecst.csv'.format(experiment_name)\n", "fcst_file_name = \"{}_forecst.csv\".format(experiment_name)\n",
"plot_file_name = '{}_plot.pdf'.format(experiment_name)\n", "plot_file_name = \"{}_plot.pdf\".format(experiment_name)\n",
"\n", "\n",
"metrics_df.to_csv(os.path.join(output_dir, metrics_file_name), index=True)\n", "metrics_df.to_csv(os.path.join(output_dir, metrics_file_name), index=True)\n",
"fcst_df.to_csv(os.path.join(output_dir, fcst_file_name), index=True)" "fcst_df.to_csv(os.path.join(output_dir, fcst_file_name), index=True)"
@@ -517,9 +550,9 @@
"\n", "\n",
"# generate and save plots\n", "# generate and save plots\n",
"fig, ax = plt.subplots(dpi=180)\n", "fig, ax = plt.subplots(dpi=180)\n",
"ax.plot(plot_df[TARGET_COLNAME], '-g', label='Historical')\n", "ax.plot(plot_df[TARGET_COLNAME], \"-g\", label=\"Historical\")\n",
"ax.plot(fcst_df['actual_level'], '-b', label='Actual')\n", "ax.plot(fcst_df[\"actual_level\"], \"-b\", label=\"Actual\")\n",
"ax.plot(fcst_df['predicted_level'], '-r', label='Forecast')\n", "ax.plot(fcst_df[\"predicted_level\"], \"-r\", label=\"Forecast\")\n",
"ax.legend()\n", "ax.legend()\n",
"ax.set_title(\"Forecast vs Actuals\")\n", "ax.set_title(\"Forecast vs Actuals\")\n",
"ax.set_xlabel(TIME_COLNAME)\n", "ax.set_xlabel(TIME_COLNAME)\n",
@@ -538,9 +571,9 @@
} }
], ],
"kernelspec": { "kernelspec": {
"display_name": "Python 3.6", "display_name": "Python 3.6 - AzureML",
"language": "python", "language": "python",
"name": "python36" "name": "python3-azureml"
}, },
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {

View File

@@ -11,11 +11,14 @@ from sklearn.externals import joblib
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument( parser.add_argument(
'--target_column_name', type=str, dest='target_column_name', "--target_column_name",
help='Target Column Name') type=str,
dest="target_column_name",
help="Target Column Name",
)
parser.add_argument( parser.add_argument(
'--test_dataset', type=str, dest='test_dataset', "--test_dataset", type=str, dest="test_dataset", help="Test Dataset"
help='Test Dataset') )
args = parser.parse_args() args = parser.parse_args()
target_column_name = args.target_column_name target_column_name = args.target_column_name
@@ -27,20 +30,41 @@ ws = run.experiment.workspace
# get the input dataset by id # get the input dataset by id
test_dataset = Dataset.get_by_id(ws, id=test_dataset_id) test_dataset = Dataset.get_by_id(ws, id=test_dataset_id)
X_test_df = test_dataset.drop_columns(columns=[target_column_name]).to_pandas_dataframe().reset_index(drop=True) X_test = (
y_test_df = test_dataset.with_timestamp_columns(None).keep_columns(columns=[target_column_name]).to_pandas_dataframe() test_dataset.drop_columns(columns=[target_column_name])
.to_pandas_dataframe()
.reset_index(drop=True)
)
y_test_df = (
test_dataset.with_timestamp_columns(None)
.keep_columns(columns=[target_column_name])
.to_pandas_dataframe()
)
# generate forecast # generate forecast
fitted_model = joblib.load('model.pkl') fitted_model = joblib.load("model.pkl")
y_pred, X_trans = fitted_model.forecast(X_test_df) # We have default quantiles values set as below(95th percentile)
quantiles = [0.025, 0.5, 0.975]
predicted_column_name = "predicted"
PI = "prediction_interval"
fitted_model.quantiles = quantiles
pred_quantiles = fitted_model.forecast_quantiles(X_test)
pred_quantiles[PI] = pred_quantiles[[min(quantiles), max(quantiles)]].apply(
lambda x: "[{}, {}]".format(x[0], x[1]), axis=1
)
X_test[target_column_name] = y_test_df[target_column_name]
X_test[PI] = pred_quantiles[PI]
X_test[predicted_column_name] = pred_quantiles[0.5]
# drop rows where prediction or actuals are nan
# happens because of missing actuals
# or at edges of time due to lags/rolling windows
clean = X_test[
X_test[[target_column_name, predicted_column_name]].notnull().all(axis=1)
]
clean.rename(columns={target_column_name: "actual"}, inplace=True)
# rename target column file_name = "outputs/predictions.csv"
X_trans.reset_index(drop=False, inplace=True) export_csv = clean.to_csv(file_name, header=True, index=False) # added Index
X_trans.rename(columns={TimeSeriesInternal.DUMMY_TARGET_COLUMN: 'predicted'}, inplace=True)
X_trans['actual'] = y_test_df[target_column_name].values
file_name = 'outputs/predictions.csv'
export_csv = X_trans.to_csv(file_name, header=True, index=False) # added Index
# Upload the predictions into artifacts # Upload the predictions into artifacts
run.upload_file(name=file_name, path_or_stream=file_name) run.upload_file(name=file_name, path_or_stream=file_name)

View File

@@ -15,22 +15,25 @@ def adf_test(series, **kw):
:param series: series to test :param series: series to test
:return: dictionary of results :return: dictionary of results
""" """
if 'lags' in kw.keys(): if "lags" in kw.keys():
msg = 'Lag order of {} detected. Running the ADF test...'.format(str(kw['lags'])) msg = "Lag order of {} detected. Running the ADF test...".format(
str(kw["lags"])
)
print(msg) print(msg)
statistic, pval, critval, resstore = stattools.adfuller(series, statistic, pval, critval, resstore = stattools.adfuller(
maxlag=kw['lags'], series, maxlag=kw["lags"], autolag=kw["autolag"], store=kw["store"]
autolag=kw['autolag'], )
store=kw['store'])
else: else:
statistic, pval, critval, resstore = stattools.adfuller(series, statistic, pval, critval, resstore = stattools.adfuller(
autolag=kw['IC'], series, autolag=kw["IC"], store=kw["store"]
store=kw['store']) )
output = {'statistic': statistic, output = {
'pval': pval, "statistic": statistic,
'critical': critval, "pval": pval,
'resstore': resstore} "critical": critval,
"resstore": resstore,
}
return output return output
@@ -41,22 +44,23 @@ def kpss_test(series, **kw):
:param series: series to test :param series: series to test
:return: dictionary of results :return: dictionary of results
""" """
if kw['store']: if kw["store"]:
statistic, p_value, critical_values, rstore = stattools.kpss(series, statistic, p_value, critical_values, rstore = stattools.kpss(
regression=kw['reg_type'], series, regression=kw["reg_type"], lags=kw["lags"], store=kw["store"]
lags=kw['lags'], )
store=kw['store'])
else: else:
statistic, p_value, lags, critical_values = stattools.kpss(series, statistic, p_value, lags, critical_values = stattools.kpss(
regression=kw['reg_type'], series, regression=kw["reg_type"], lags=kw["lags"]
lags=kw['lags']) )
output = {'statistic': statistic, output = {
'pval': p_value, "statistic": statistic,
'critical': critical_values, "pval": p_value,
'lags': rstore.lags if kw['store'] else lags} "critical": critical_values,
"lags": rstore.lags if kw["store"] else lags,
}
if kw['store']: if kw["store"]:
output.update({'resstore': rstore}) output.update({"resstore": rstore})
return output return output
@@ -75,9 +79,9 @@ def format_test_output(test_name, test_res, H0_unit_root=True):
If test failed (test_res is None), return empty dictionary. If test failed (test_res is None), return empty dictionary.
""" """
# Check if the test failed by trying to extract the test statistic # Check if the test failed by trying to extract the test statistic
if test_name in ('ADF', 'KPSS'): if test_name in ("ADF", "KPSS"):
try: try:
test_res['statistic'] test_res["statistic"]
except BaseException: except BaseException:
test_res = None test_res = None
else: else:
@@ -90,32 +94,32 @@ def format_test_output(test_name, test_res, H0_unit_root=True):
return {} return {}
# extract necessary information # extract necessary information
if test_name in ('ADF', 'KPSS'): if test_name in ("ADF", "KPSS"):
statistic = test_res['statistic'] statistic = test_res["statistic"]
crit_val = test_res['critical']['5%'] crit_val = test_res["critical"]["5%"]
p_val = test_res['pval'] p_val = test_res["pval"]
lags = test_res['resstore'].usedlag if test_name == 'ADF' else test_res['lags'] lags = test_res["resstore"].usedlag if test_name == "ADF" else test_res["lags"]
else: else:
statistic = test_res.stat statistic = test_res.stat
crit_val = test_res.critical_values['5%'] crit_val = test_res.critical_values["5%"]
p_val = test_res.pvalue p_val = test_res.pvalue
lags = test_res.lags lags = test_res.lags
if H0_unit_root: if H0_unit_root:
H0 = 'The process is non-stationary' H0 = "The process is non-stationary"
stationary = "yes" if p_val < 0.05 else "not" stationary = "yes" if p_val < 0.05 else "not"
else: else:
H0 = 'The process is stationary' H0 = "The process is stationary"
stationary = "yes" if p_val > 0.05 else "not" stationary = "yes" if p_val > 0.05 else "not"
out = { out = {
'test_name': test_name, "test_name": test_name,
'statistic': statistic, "statistic": statistic,
'crit_val': crit_val, "crit_val": crit_val,
'p_val': p_val, "p_val": p_val,
'lags': int(lags), "lags": int(lags),
'stationary': stationary, "stationary": stationary,
'Null Hypothesis': H0 "Null Hypothesis": H0,
} }
return out return out
@@ -136,22 +140,15 @@ def unit_root_test_wrapper(series, lags=None):
:return: dictionary of summary table for all tests and final decision on stationary vs nonstaionary :return: dictionary of summary table for all tests and final decision on stationary vs nonstaionary
""" """
# setting for ADF and KPSS tests # setting for ADF and KPSS tests
adf_settings = { adf_settings = {"IC": "AIC", "store": True}
'IC': 'AIC',
'store': True
}
kpss_settings = { kpss_settings = {"reg_type": "c", "lags": "auto", "store": True}
'reg_type': 'c',
'lags': 'auto',
'store': True
}
arch_test_settings = {} # settings for PP, ADF GLS and ZA tests arch_test_settings = {} # settings for PP, ADF GLS and ZA tests
if lags is not None: if lags is not None:
adf_settings.update({'lags': lags, 'autolag': None}) adf_settings.update({"lags": lags, "autolag": None})
kpss_settings.update({'lags:': lags}) kpss_settings.update({"lags:": lags})
arch_test_settings = {'lags': lags} arch_test_settings = {"lags": lags}
# Run individual tests # Run individual tests
adf = adf_test(series, **adf_settings) # ADF test adf = adf_test(series, **adf_settings) # ADF test
kpss = kpss_test(series, **kpss_settings) # KPSS test kpss = kpss_test(series, **kpss_settings) # KPSS test
@@ -160,14 +157,26 @@ def unit_root_test_wrapper(series, lags=None):
za = unitroot.ZivotAndrews(series, **arch_test_settings) # Zivot-Andrews test za = unitroot.ZivotAndrews(series, **arch_test_settings) # Zivot-Andrews test
# generate output table # generate output table
adf_dict = format_test_output(test_name='ADF', test_res=adf, H0_unit_root=True) adf_dict = format_test_output(test_name="ADF", test_res=adf, H0_unit_root=True)
kpss_dict = format_test_output(test_name='KPSS', test_res=kpss, H0_unit_root=False) kpss_dict = format_test_output(test_name="KPSS", test_res=kpss, H0_unit_root=False)
pp_dict = format_test_output(test_name='Philips Perron', test_res=pp, H0_unit_root=True) pp_dict = format_test_output(
adfgls_dict = format_test_output(test_name='ADF GLS', test_res=adfgls, H0_unit_root=True) test_name="Philips Perron", test_res=pp, H0_unit_root=True
za_dict = format_test_output(test_name='Zivot-Andrews', test_res=za, H0_unit_root=True) )
adfgls_dict = format_test_output(
test_name="ADF GLS", test_res=adfgls, H0_unit_root=True
)
za_dict = format_test_output(
test_name="Zivot-Andrews", test_res=za, H0_unit_root=True
)
test_dict = {'ADF': adf_dict, 'KPSS': kpss_dict, 'PP': pp_dict, 'ADF GLS': adfgls_dict, 'ZA': za_dict} test_dict = {
test_sum = pd.DataFrame.from_dict(test_dict, orient='index').reset_index(drop=True) "ADF": adf_dict,
"KPSS": kpss_dict,
"PP": pp_dict,
"ADF GLS": adfgls_dict,
"ZA": za_dict,
}
test_sum = pd.DataFrame.from_dict(test_dict, orient="index").reset_index(drop=True)
# decision based on the majority rule # decision based on the majority rule
if test_sum.shape[0] > 0: if test_sum.shape[0] > 0:
@@ -176,9 +185,9 @@ def unit_root_test_wrapper(series, lags=None):
ratio = 1 # all tests fail, assume the series is stationary ratio = 1 # all tests fail, assume the series is stationary
# Majority rule. If the ratio is exactly 0.5, assume the series in non-stationary. # Majority rule. If the ratio is exactly 0.5, assume the series in non-stationary.
stationary = 'YES' if (ratio > 0.5) else 'NO' stationary = "YES" if (ratio > 0.5) else "NO"
out = {'summary': test_sum, 'stationary': stationary} out = {"summary": test_sum, "stationary": stationary}
return out return out
@@ -196,10 +205,12 @@ def ts_train_test_split(df_input, n, time_colname, ts_id_colnames=None):
ts_id_colnames = [] ts_id_colnames = []
ts_id_colnames_original = ts_id_colnames.copy() ts_id_colnames_original = ts_id_colnames.copy()
if len(ts_id_colnames) == 0: if len(ts_id_colnames) == 0:
ts_id_colnames = ['Grain'] ts_id_colnames = ["Grain"]
df_input[ts_id_colnames[0]] = 'dummy' df_input[ts_id_colnames[0]] = "dummy"
# Sort by ascending time # Sort by ascending time
df_grouped = (df_input.sort_values(time_colname).groupby(ts_id_colnames, group_keys=False)) df_grouped = df_input.sort_values(time_colname).groupby(
ts_id_colnames, group_keys=False
)
df_head = df_grouped.apply(lambda dfg: dfg.iloc[:-n]) df_head = df_grouped.apply(lambda dfg: dfg.iloc[:-n])
df_tail = df_grouped.apply(lambda dfg: dfg.iloc[-n:]) df_tail = df_grouped.apply(lambda dfg: dfg.iloc[-n:])
# drop group column name if it was not originally provided # drop group column name if it was not originally provided
@@ -221,30 +232,32 @@ def compute_metrics(fcst_df, metric_name=None, ts_id_colnames=None):
if ts_id_colnames is None: if ts_id_colnames is None:
ts_id_colnames = [] ts_id_colnames = []
if len(ts_id_colnames) == 0: if len(ts_id_colnames) == 0:
ts_id_colnames = ['TS_ID'] ts_id_colnames = ["TS_ID"]
fcst_df[ts_id_colnames[0]] = 'dummy' fcst_df[ts_id_colnames[0]] = "dummy"
metrics_list = [] metrics_list = []
for grain, df in fcst_df.groupby(ts_id_colnames): for grain, df in fcst_df.groupby(ts_id_colnames):
try: try:
scores = scoring.score_regression( scores = scoring.score_regression(
y_test=df['actual_level'], y_test=df["actual_level"],
y_pred=df['predicted_level'], y_pred=df["predicted_level"],
metrics=list(constants.Metric.SCALAR_REGRESSION_SET)) metrics=list(constants.Metric.SCALAR_REGRESSION_SET),
)
except BaseException: except BaseException:
msg = '{}: metrics calculation failed.'.format(grain) msg = "{}: metrics calculation failed.".format(grain)
print(msg) print(msg)
scores = {} scores = {}
one_grain_metrics_df = pd.DataFrame(list(scores.items()), columns=['metric_name', 'metric']).\ one_grain_metrics_df = pd.DataFrame(
sort_values(['metric_name']) list(scores.items()), columns=["metric_name", "metric"]
).sort_values(["metric_name"])
one_grain_metrics_df.reset_index(inplace=True, drop=True) one_grain_metrics_df.reset_index(inplace=True, drop=True)
if len(ts_id_colnames) < 2: if len(ts_id_colnames) < 2:
one_grain_metrics_df['grain'] = ts_id_colnames[0] one_grain_metrics_df["grain"] = ts_id_colnames[0]
else: else:
one_grain_metrics_df['grain'] = "|".join(list(grain)) one_grain_metrics_df["grain"] = "|".join(list(grain))
metrics_list.append(one_grain_metrics_df) metrics_list.append(one_grain_metrics_df)
# collect into a data frame # collect into a data frame
grain_metrics = pd.concat(metrics_list) grain_metrics = pd.concat(metrics_list)
if metric_name is not None: if metric_name is not None:
grain_metrics = grain_metrics.query('metric_name == @metric_name') grain_metrics = grain_metrics.query("metric_name == @metric_name")
return grain_metrics return grain_metrics

View File

@@ -3,36 +3,47 @@ import shutil
from azureml.core import ScriptRunConfig from azureml.core import ScriptRunConfig
def run_remote_inference(test_experiment, compute_target, train_run, def run_remote_inference(
test_dataset, target_column_name, inference_folder='./forecast'): test_experiment,
compute_target,
train_run,
test_dataset,
target_column_name,
inference_folder="./forecast",
):
# Create local directory to copy the model.pkl and forecsting_script.py files into. # Create local directory to copy the model.pkl and forecsting_script.py files into.
# These files will be uploaded to and executed on the compute instance. # These files will be uploaded to and executed on the compute instance.
os.makedirs(inference_folder, exist_ok=True) os.makedirs(inference_folder, exist_ok=True)
shutil.copy('forecasting_script.py', inference_folder) shutil.copy("forecasting_script.py", inference_folder)
train_run.download_file('outputs/model.pkl', train_run.download_file(
os.path.join(inference_folder, 'model.pkl')) "outputs/model.pkl", os.path.join(inference_folder, "model.pkl")
)
inference_env = train_run.get_environment() inference_env = train_run.get_environment()
config = ScriptRunConfig(source_directory=inference_folder, config = ScriptRunConfig(
script='forecasting_script.py', source_directory=inference_folder,
arguments=['--target_column_name', script="forecasting_script.py",
arguments=[
"--target_column_name",
target_column_name, target_column_name,
'--test_dataset', "--test_dataset",
test_dataset.as_named_input(test_dataset.name)], test_dataset.as_named_input(test_dataset.name),
],
compute_target=compute_target, compute_target=compute_target,
environment=inference_env) environment=inference_env,
)
run = test_experiment.submit(config, run = test_experiment.submit(
tags={'training_run_id': config,
train_run.id, tags={
'run_algorithm': "training_run_id": train_run.id,
train_run.properties['run_algorithm'], "run_algorithm": train_run.properties["run_algorithm"],
'valid_score': "valid_score": train_run.properties["score"],
train_run.properties['score'], "primary_metric": train_run.properties["primary_metric"],
'primary_metric': },
train_run.properties['primary_metric']}) )
run.log("run_algorithm", run.tags['run_algorithm']) run.log("run_algorithm", run.tags["run_algorithm"])
return run return run

View File

@@ -0,0 +1,18 @@
---
page_type: sample
languages:
- python
products:
- azure-machine-learning
description: Notebook showing how to use Azure Machine Learning pipelines to do Batch Predictions with an Image Classification model trained using AutoML.
---
# Batch Scoring with an Image Classification Model
- Dataset: Toy dataset with images of products found in a fridge
- **[Jupyter Notebook](auto-ml-image-classification-multiclass-batch-scoring.ipynb)**
- register an Image Classification Multi-Class model already trained using AutoML
- create an Inference Dataset
- provision compute targets and create a Batch Scoring script
- use ParallelRunStep to do batch scoring
- build, run, and publish a pipeline
- enable a REST endpoint for the pipeline

View File

@@ -0,0 +1,950 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License.\n",
"\n",
"# Batch Predictions for an Image Classification model trained using AutoML\n",
"In this notebook, we go over how you can use [Azure Machine Learning pipelines](https://docs.microsoft.com/en-us/azure/machine-learning/tutorial-pipeline-batch-scoring-classification) to run a batch scoring image classification job.\n",
"\n",
"**Please note:** For this notebook you can use an existing image classification model trained using AutoML for Images or use the simple model training we included below for convenience. For detailed instructions on how to train an image classification model with AutoML, please refer to the official [documentation](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models) and to the [image classification multiclass notebook](https://github.com/Azure/azureml-examples/blob/main/python-sdk/tutorials/automl-with-azureml/image-classification-multiclass/auto-ml-image-classification-multiclass.ipynb)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"**Important:** This feature is currently in public preview. This preview version is provided without a service-level agreement. Certain features might not be supported or might have constrained capabilities. For more information, see [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/en-us/support/legal/preview-supplemental-terms/)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Environment Setup\n",
"Please follow the [\"Setup a new conda environment\"](https://github.com/Azure/azureml-examples/tree/main/python-sdk/tutorials/automl-with-azureml#3-setup-a-new-conda-environment) instructions to get started."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import azureml.core\n",
"\n",
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK.\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK.\")\n",
"assert (\n",
" azureml.core.VERSION >= \"1.35\"\n",
"), \"Please upgrade the Azure ML SDK by running '!pip install --upgrade azureml-sdk' then restart the kernel.\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## You will perform the following tasks:\n",
"\n",
"* Register a Model already trained using AutoML for Image Classification.\n",
"* Create an Inference Dataset.\n",
"* Provision compute targets and create a Batch Scoring script.\n",
"* Use ParallelRunStep to do batch scoring.\n",
"* Build, run, and publish a pipeline.\n",
"* Enable a REST endpoint for the pipeline."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Workspace setup\n",
"\n",
"An [Azure ML Workspace](https://docs.microsoft.com/en-us/azure/machine-learning/concept-azure-machine-learning-architecture#workspace) is an Azure resource that organizes and coordinates the actions of many other Azure resources to assist in executing and sharing machine learning workflows. In particular, an Azure ML Workspace coordinates storage, databases, and compute resources providing added functionality for machine learning experimentation, deployment, inference, and the monitoring of deployed models.\n",
"\n",
"Create an Azure ML Workspace within your Azure subscription or load an existing workspace."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.workspace import Workspace\n",
"\n",
"ws = Workspace.from_config()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Workspace default datastore is used to store inference input images and outputs"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def_data_store = ws.get_default_datastore()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Compute target setup\n",
"You will need to provide a [Compute Target](https://docs.microsoft.com/en-us/azure/machine-learning/concept-azure-machine-learning-architecture#computes) that will be used for your AutoML model training. AutoML models for image tasks require [GPU SKUs](https://docs.microsoft.com/en-us/azure/virtual-machines/sizes-gpu) such as the ones from the NC, NCv2, NCv3, ND, NDv2 and NCasT4 series. We recommend using the NCsv3-series (with v100 GPUs) for faster training. Using a compute target with a multi-GPU VM SKU will leverage the multiple GPUs to speed up training. Additionally, setting up a compute target with multiple nodes will allow for faster model training by leveraging parallelism, when tuning hyperparameters for your model."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.compute import AmlCompute, ComputeTarget\n",
"\n",
"cluster_name = \"gpu-cluster-nc6\"\n",
"\n",
"try:\n",
" compute_target = ws.compute_targets[cluster_name]\n",
" print(\"Found existing compute target.\")\n",
"except KeyError:\n",
" print(\"Creating a new compute target...\")\n",
" compute_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"Standard_NC6\",\n",
" idle_seconds_before_scaledown=600,\n",
" min_nodes=0,\n",
" max_nodes=4,\n",
" )\n",
" compute_target = ComputeTarget.create(ws, cluster_name, compute_config)\n",
"# Can poll for a minimum number of nodes and for a specific timeout.\n",
"# If no min_node_count is provided, it will use the scale settings for the cluster.\n",
"compute_target.wait_for_completion(\n",
" show_output=True, min_node_count=None, timeout_in_minutes=20\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Train an Image Classification model\n",
"\n",
"In this section we will do a quick model train to use for the batch scoring. For a datailed example on how to train an image classification model, please refer to the official [documentation](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models) or to the [image classification multiclass notebook](https://github.com/Azure/azureml-examples/blob/main/python-sdk/tutorials/automl-with-azureml/image-classification-multiclass/auto-ml-image-classification-multiclass.ipynb). If you already have a model trained in the same workspace, you can skip to section [\"Create data objects\"](#Create-data-objects)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Experiment Setup"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Experiment\n",
"\n",
"experiment_name = \"automl-image-batchscoring\"\n",
"experiment = Experiment(ws, name=experiment_name)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Download dataset with input Training Data\n",
"\n",
"All images in this notebook are hosted in [this repository](https://github.com/microsoft/computervision-recipes) and are made available under the [MIT license](https://github.com/microsoft/computervision-recipes/blob/master/LICENSE)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import urllib\n",
"from zipfile import ZipFile\n",
"\n",
"# download data\n",
"download_url = \"https://cvbp-secondary.z19.web.core.windows.net/datasets/image_classification/fridgeObjects.zip\"\n",
"data_file = \"./fridgeObjects.zip\"\n",
"urllib.request.urlretrieve(download_url, filename=data_file)\n",
"\n",
"# extract files\n",
"with ZipFile(data_file, \"r\") as zip:\n",
" print(\"extracting files...\")\n",
" zip.extractall()\n",
" print(\"done\")\n",
"# delete zip file\n",
"os.remove(data_file)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Convert the downloaded data to JSONL"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"import os\n",
"\n",
"src = \"./fridgeObjects/\"\n",
"train_validation_ratio = 5\n",
"\n",
"# Retrieving default datastore that got automatically created when we setup a workspace\n",
"workspaceblobstore = ws.get_default_datastore().name\n",
"\n",
"# Path to the training and validation files\n",
"train_annotations_file = os.path.join(src, \"train_annotations.jsonl\")\n",
"validation_annotations_file = os.path.join(src, \"validation_annotations.jsonl\")\n",
"\n",
"# sample json line dictionary\n",
"json_line_sample = {\n",
" \"image_url\": \"AmlDatastore://\"\n",
" + workspaceblobstore\n",
" + \"/\"\n",
" + os.path.basename(os.path.dirname(src)),\n",
" \"label\": \"\",\n",
"}\n",
"\n",
"index = 0\n",
"# Scan each sub directary and generate jsonl line\n",
"with open(train_annotations_file, \"w\") as train_f:\n",
" with open(validation_annotations_file, \"w\") as validation_f:\n",
" for className in os.listdir(src):\n",
" subDir = src + className\n",
" if not os.path.isdir(subDir):\n",
" continue\n",
" # Scan each sub directary\n",
" print(\"Parsing \" + subDir)\n",
" for image in os.listdir(subDir):\n",
" json_line = dict(json_line_sample)\n",
" json_line[\"image_url\"] += f\"/{className}/{image}\"\n",
" json_line[\"label\"] = className\n",
"\n",
" if index % train_validation_ratio == 0:\n",
" # validation annotation\n",
" validation_f.write(json.dumps(json_line) + \"\\n\")\n",
" else:\n",
" # train annotation\n",
" train_f.write(json.dumps(json_line) + \"\\n\")\n",
" index += 1"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Upload the JSONL file and images to Datastore"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Retrieving default datastore that got automatically created when we setup a workspace\n",
"ds = ws.get_default_datastore()\n",
"ds.upload(src_dir=\"./fridgeObjects\", target_path=\"fridgeObjects\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Create and register datasets in workspace"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Dataset\n",
"from azureml.data import DataType\n",
"\n",
"# get existing training dataset\n",
"training_dataset_name = \"fridgeObjectsTrainingDataset\"\n",
"if training_dataset_name in ws.datasets:\n",
" training_dataset = ws.datasets.get(training_dataset_name)\n",
" print(\"Found the training dataset\", training_dataset_name)\n",
"else:\n",
" # create training dataset\n",
" training_dataset = Dataset.Tabular.from_json_lines_files(\n",
" path=ds.path(\"fridgeObjects/train_annotations.jsonl\"),\n",
" set_column_types={\"image_url\": DataType.to_stream(ds.workspace)},\n",
" )\n",
" training_dataset = training_dataset.register(\n",
" workspace=ws, name=training_dataset_name\n",
" )\n",
"# get existing validation dataset\n",
"validation_dataset_name = \"fridgeObjectsValidationDataset\"\n",
"if validation_dataset_name in ws.datasets:\n",
" validation_dataset = ws.datasets.get(validation_dataset_name)\n",
" print(\"Found the validation dataset\", validation_dataset_name)\n",
"else:\n",
" # create validation dataset\n",
" validation_dataset = Dataset.Tabular.from_json_lines_files(\n",
" path=ds.path(\"fridgeObjects/validation_annotations.jsonl\"),\n",
" set_column_types={\"image_url\": DataType.to_stream(ds.workspace)},\n",
" )\n",
" validation_dataset = validation_dataset.register(\n",
" workspace=ws, name=validation_dataset_name\n",
" )\n",
"print(\"Training dataset name: \" + training_dataset.name)\n",
"print(\"Validation dataset name: \" + validation_dataset.name)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Submit training 1 training run with default hyperparameters"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.automl.core.shared.constants import ImageTask\n",
"from azureml.train.automl import AutoMLImageConfig\n",
"from azureml.train.hyperdrive import GridParameterSampling, choice\n",
"\n",
"image_config_vit = AutoMLImageConfig(\n",
" task=ImageTask.IMAGE_CLASSIFICATION,\n",
" compute_target=compute_target,\n",
" training_data=training_dataset,\n",
" validation_data=validation_dataset,\n",
" hyperparameter_sampling=GridParameterSampling({\"model_name\": choice(\"vitb16r224\")}),\n",
" iterations=1,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_image_run = experiment.submit(image_config_vit)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_image_run.wait_for_completion(wait_post_processing=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Create data objects\n",
"\n",
"When building pipelines, `Dataset` objects are used for reading data from workspace datastores, and `PipelineData` objects are used for transferring intermediate data between pipeline steps.\n",
"\n",
"This batch scoring example only uses one pipeline step, but in use-cases with multiple steps, the typical flow will include:\n",
"\n",
"1. Using `Dataset` objects as inputs to fetch raw data, performing some transformations, then output a `PipelineData` object. \n",
"1. Use the previous step's `PipelineData` **output object** as an **input object**, repeated for subsequent steps.\n",
"\n",
"For this scenario you create `Dataset` objects corresponding to the datastore directories for the input images. You also create a `PipelineData` object for the batch scoring output data. An object reference in the `outputs` array becomes available as an **input** for a subsequent pipeline step, for scenarios where there is more than one step. In this case we are just going to build a single step pipeline.\n",
"\n",
"It is assumed that an image classification training run was already performed in this workspace and the files are already in the datastore. If this is not the case, please refer to the [documentation](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models) to know how to train an image classification model with AutoML.\n",
"\n",
"All images in this notebook are hosted in [this repository](https://github.com/microsoft/computervision-recipes) and are made available under the [MIT license](https://github.com/microsoft/computervision-recipes/blob/master/LICENSE)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.dataset import Dataset\n",
"from azureml.pipeline.core import PipelineData\n",
"\n",
"input_images = Dataset.File.from_files((def_data_store, \"fridgeObjects/**/*.jpg\"))\n",
"\n",
"output_dir = PipelineData(name=\"scores\", datastore=def_data_store)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Next, we need to register the input datasets for batch scoring with the workspace."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"input_images = input_images.register(\n",
" workspace=ws, name=\"fridgeObjects_scoring_images\", create_new_version=True\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Retrieve the environment and metrics from the training run"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.experiment import Experiment\n",
"from azureml.core import Run\n",
"\n",
"experiment_name = \"automl-image-batchscoring\"\n",
"# If your model was not trained with this notebook, replace the id below\n",
"# with the run id of the child training run (i.e., the one ending with HD_0)\n",
"training_run_id = automl_image_run.id + \"_HD_0\"\n",
"exp = Experiment(ws, experiment_name)\n",
"training_run = Run(exp, training_run_id)\n",
"\n",
"# The below will give only the requested metric\n",
"metrics = training_run.get_metrics(\"accuracy\")\n",
"best_metric = max(metrics[\"accuracy\"])\n",
"print(\"best_metric:\", best_metric)\n",
"\n",
"# Retrieve the training environment\n",
"env = training_run.get_environment()\n",
"print(env)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Register model with metric and environment tags\n",
"\n",
"Now you register the model to your workspace, which allows you to easily retrieve it in the pipeline process. In the `register()` static function, the `model_name` parameter is the key you use to locate your model throughout the SDK.\n",
"Tag the model with the metrics and the environment used to train the model."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.model import Model\n",
"\n",
"tags = dict()\n",
"tags[\"accuracy\"] = best_metric\n",
"tags[\"env_name\"] = env.name\n",
"tags[\"env_version\"] = env.version\n",
"\n",
"model_name = \"fridgeObjectsClassifier\"\n",
"model = training_run.register_model(\n",
" model_name=model_name, model_path=\"train_artifacts\", tags=tags\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# List the models from the workspace\n",
"models = Model.list(ws, name=model_name, latest=True)\n",
"print(model.name)\n",
"print(model.tags)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Write a scoring script"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"To do the scoring, you create a batch scoring script `batch_scoring.py`, and write it to the scripts folder in current directory. The script takes a minibatch of input images, applies the classification model, and outputs the predictions to a results file.\n",
"\n",
"The script `batch_scoring.py` takes the following parameters, which get passed from the `ParallelRunStep` that you create later:\n",
"\n",
"- `--model_name`: the name of the model being used\n",
"\n",
"While creating the batch scoring script, refer to the scoring scripts generated under the outputs folder of the Automl training runs. This will help to identify the right model settings to be used in the batch scoring script init method while loading the model.\n",
"Note: The batch scoring script we generate in the subsequent step is different from the scoring script generated by the training runs in the below screenshot. We refer to it just to identify the right model settings to be used in the batch scoring script.\n",
"\n",
"![Training run outputs](ui_outputs.PNG \"Training run outputs\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# View the batch scoring script. Use the model settings as appropriate for your model.\n",
"with open(\"./scripts/batch_scoring.py\", \"r\") as f:\n",
" print(f.read())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Build and run the pipeline"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create the parallel-run configuration to wrap the inference script\n",
"Create the pipeline run configuration specifying the script, environment configuration, and parameters. Specify the compute target you already attached to your workspace as the target of execution of the script. This will set the run configuration of the ParallelRunStep we will define next.\n",
"\n",
"Refer this [site](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/machine-learning-pipelines/parallel-run) for more details on ParallelRunStep of Azure Machine Learning Pipelines."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.steps import ParallelRunConfig\n",
"\n",
"parallel_run_config = ParallelRunConfig(\n",
" environment=env,\n",
" entry_script=\"batch_scoring.py\",\n",
" source_directory=\"scripts\",\n",
" output_action=\"append_row\",\n",
" append_row_file_name=\"parallel_run_step.txt\",\n",
" mini_batch_size=\"20\", # Num files to process in one call\n",
" error_threshold=1,\n",
" compute_target=compute_target,\n",
" process_count_per_node=2,\n",
" node_count=1,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create the pipeline step\n",
"\n",
"A pipeline step is an object that encapsulates everything you need for running a pipeline including:\n",
"\n",
"* environment and dependency settings\n",
"* the compute resource to run the pipeline on\n",
"* input and output data, and any custom parameters\n",
"* reference to a script to run during the step\n",
"\n",
"There are multiple classes that inherit from the parent class [`PipelineStep`](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/?view=azure-ml-py) to assist with building a step using certain frameworks and stacks. In this example, you use the [`ParallelRunStep`](https://docs.microsoft.com/en-us/python/api/azureml-contrib-pipeline-steps/azureml.contrib.pipeline.steps.parallelrunstep?view=azure-ml-py) class to define your step logic using a scoring script. `ParallelRunStep` executes the script in a distributed fashion.\n",
"\n",
"The pipelines infrastructure uses the `ArgumentParser` class to pass parameters into pipeline steps. For example, in the code below the first argument `--model_name` is given the property identifier `model_name`. In the `main()` function, this property is accessed using `Model.get_model_path(args.model_name)`.\n",
"\n",
"Note: The pipeline in this tutorial only has one step and writes the output to a file, but for multi-step pipelines, you also use `ArgumentParser` to define a directory to write output data for input to subsequent steps. See the [notebook](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/nyc-taxi-data-regression-model-building/nyc-taxi-data-regression-model-building.ipynb) for an example of passing data between multiple pipeline steps using the `ArgumentParser` design pattern."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.steps import ParallelRunStep\n",
"from datetime import datetime\n",
"\n",
"parallel_step_name = \"batchscoring-\" + datetime.now().strftime(\"%Y%m%d%H%M\")\n",
"\n",
"arguments = [\"--model_name\", model_name]\n",
"\n",
"# Specify inference batch_size, otherwise uses default value. (This is different from the mini_batch_size above)\n",
"# NOTE: Large batch sizes may result in OOM errors.\n",
"# arguments = arguments + [\"--batch_size\", \"20\"]\n",
"\n",
"batch_score_step = ParallelRunStep(\n",
" name=parallel_step_name,\n",
" inputs=[input_images.as_named_input(\"input_images\")],\n",
" output=output_dir,\n",
" arguments=arguments,\n",
" parallel_run_config=parallel_run_config,\n",
" allow_reuse=False,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"For a list of all classes for different step types, see the [steps package](https://docs.microsoft.com/python/api/azureml-pipeline-steps/azureml.pipeline.steps?view=azure-ml-py)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Run the pipeline\n",
"\n",
"Now you run the pipeline. First create a `Pipeline` object with your workspace reference and the pipeline step you created. The `steps` parameter is an array of steps, and in this case, there is only one step for batch scoring. To build pipelines with multiple steps, you place the steps in order in this array.\n",
"\n",
"Next use the `Experiment.submit()` function to submit the pipeline for execution. You also specify the custom parameter `param_batch_size`. The `wait_for_completion` function will output logs during the pipeline build process, which allows you to see current progress.\n",
"\n",
"Note: The first pipeline run takes roughly **15 minutes**, as all dependencies must be downloaded, a Docker image is created, and the Python environment is provisioned/created. Running it again takes significantly less time as those resources are reused. However, total run time depends on the workload of your scripts and processes running in each pipeline step."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Experiment\n",
"from azureml.pipeline.core import Pipeline\n",
"\n",
"pipeline = Pipeline(workspace=ws, steps=[batch_score_step])\n",
"pipeline_run = Experiment(ws, \"batch_scoring_automl_image\").submit(pipeline)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# This will output information of the pipeline run, including the link to the details page of portal.\n",
"pipeline_run"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Wait the run for completion and show output log to console\n",
"pipeline_run.wait_for_completion(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Download and review output"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import tempfile\n",
"import os\n",
"\n",
"batch_run = pipeline_run.find_step_run(batch_score_step.name)[0]\n",
"batch_output = batch_run.get_output_data(output_dir.name)\n",
"\n",
"target_dir = tempfile.mkdtemp()\n",
"batch_output.download(local_path=target_dir)\n",
"result_file = os.path.join(\n",
" target_dir, batch_output.path_on_datastore, parallel_run_config.append_row_file_name\n",
")\n",
"result_file\n",
"\n",
"# Print the first five lines of the output\n",
"with open(result_file) as f:\n",
" for x in range(5):\n",
" print(next(f))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Choose a random file for visualization"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import random\n",
"import json\n",
"\n",
"with open(result_file, \"r\") as f:\n",
" contents = f.readlines()\n",
"rand_file = contents[random.randrange(len(contents))]\n",
"prediction = json.loads(rand_file)\n",
"print(prediction[\"filename\"])\n",
"print(prediction[\"probs\"])\n",
"print(prediction[\"labels\"])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Download the image file from the datastore\n",
"path = (\n",
" \"fridgeObjects\"\n",
" + \"/\"\n",
" + prediction[\"filename\"].split(\"/\")[-2]\n",
" + \"/\"\n",
" + prediction[\"filename\"].split(\"/\")[-1]\n",
")\n",
"path_on_datastore = def_data_store.path(path)\n",
"single_image_ds = Dataset.File.from_files(path=path_on_datastore, validate=False)\n",
"image = single_image_ds.download()[0]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%matplotlib inline\n",
"import matplotlib.pyplot as plt\n",
"import matplotlib.image as mpimg\n",
"from PIL import Image\n",
"import numpy as np\n",
"import json\n",
"\n",
"IMAGE_SIZE = (18, 12)\n",
"plt.figure(figsize=IMAGE_SIZE)\n",
"img_np = mpimg.imread(image)\n",
"img = Image.fromarray(img_np.astype(\"uint8\"), \"RGB\")\n",
"x, y = img.size\n",
"\n",
"fig, ax = plt.subplots(1, figsize=(15, 15))\n",
"# Display the image\n",
"ax.imshow(img_np)\n",
"\n",
"label_index = np.argmax(prediction[\"probs\"])\n",
"label = prediction[\"labels\"][label_index]\n",
"conf_score = prediction[\"probs\"][label_index]\n",
"\n",
"display_text = \"{} ({})\".format(label, round(conf_score, 3))\n",
"print(display_text)\n",
"\n",
"color = \"red\"\n",
"plt.text(30, 30, display_text, color=color, fontsize=30)\n",
"\n",
"plt.show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Publish and run from REST endpoint"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Run the following code to publish the pipeline to your workspace. In your workspace in the portal, you can see metadata for the pipeline including run history and durations. You can also run the pipeline manually from the portal.\n",
"\n",
"Additionally, publishing the pipeline enables a REST endpoint to rerun the pipeline from any HTTP library on any platform."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"published_pipeline = pipeline_run.publish_pipeline(\n",
" name=\"automl-image-batch-scoring\",\n",
" description=\"Batch scoring using Automl for Image\",\n",
" version=\"1.0\",\n",
")\n",
"\n",
"published_pipeline"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"To run the pipeline from the REST endpoint, you first need an OAuth2 Bearer-type authentication header. This example uses interactive authentication for illustration purposes, but for most production scenarios requiring automated or headless authentication, use service principal authentication as [described in this notebook](https://aka.ms/pl-restep-auth).\n",
"\n",
"Service principal authentication involves creating an **App Registration** in **Azure Active Directory**, generating a client secret, and then granting your service principal **role access** to your machine learning workspace. You then use the [`ServicePrincipalAuthentication`](https://docs.microsoft.com/python/api/azureml-core/azureml.core.authentication.serviceprincipalauthentication?view=azure-ml-py) class to manage your auth flow.\n",
"\n",
"Both `InteractiveLoginAuthentication` and `ServicePrincipalAuthentication` inherit from `AbstractAuthentication`, and in both cases you use the `get_authentication_header()` function in the same way to fetch the header."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.authentication import InteractiveLoginAuthentication\n",
"\n",
"interactive_auth = InteractiveLoginAuthentication()\n",
"auth_header = interactive_auth.get_authentication_header()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Get the REST url from the `endpoint` property of the published pipeline object. You can also find the REST url in your workspace in the portal. Build an HTTP POST request to the endpoint, specifying your authentication header. Additionally, add a JSON payload object with the experiment name and the batch size parameter. As a reminder, the `process_count_per_node` is passed through to `ParallelRunStep` because you defined it is defined as a `PipelineParameter` object in the step configuration.\n",
"\n",
"Make the request to trigger the run. Access the `Id` key from the response dictionary to get the value of the run id."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import requests\n",
"\n",
"rest_endpoint = published_pipeline.endpoint\n",
"response = requests.post(\n",
" rest_endpoint,\n",
" headers=auth_header,\n",
" json={\n",
" \"ExperimentName\": \"batch_scoring\",\n",
" \"ParameterAssignments\": {\"process_count_per_node\": 2},\n",
" },\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"try:\n",
" response.raise_for_status()\n",
"except Exception:\n",
" raise Exception(\n",
" \"Received bad response from the endpoint: {}\\n\"\n",
" \"Response Code: {}\\n\"\n",
" \"Headers: {}\\n\"\n",
" \"Content: {}\".format(\n",
" rest_endpoint, response.status_code, response.headers, response.content\n",
" )\n",
" )\n",
"run_id = response.json().get(\"Id\")\n",
"print(\"Submitted pipeline run: \", run_id)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Use the run id to monitor the status of the new run. This will take another 10-15 min to run and will look similar to the previous pipeline run, so if you don't need to see another pipeline run, you can skip watching the full output."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.core.run import PipelineRun\n",
"\n",
"published_pipeline_run = PipelineRun(ws.experiments[\"batch_scoring\"], run_id)\n",
"published_pipeline_run"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Wait the run for completion and show output log to console\n",
"published_pipeline_run.wait_for_completion(show_output=True)"
]
}
],
"metadata": {
"authors": [
{
"name": [
"sanpil",
"trmccorm",
"pansav"
]
}
],
"categories": [
"tutorials"
],
"kernelspec": {
"display_name": "Python 3.6 - AzureML",
"language": "python",
"name": "python3-azureml"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.8"
},
"metadata": {
"interpreter": {
"hash": "0f25b6eb4724eea488a4edd67dd290abce7d142c09986fc811384b5aebc0585a"
}
},
"msauthor": "trbye"
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@@ -0,0 +1,69 @@
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license.
import os
import argparse
import json
from azureml.core.model import Model
from azureml.automl.core.shared import logging_utilities
try:
from azureml.automl.dnn.vision.common.logging_utils import get_logger
from azureml.automl.dnn.vision.common.model_export_utils import (
load_model,
run_inference_batch,
)
from azureml.automl.dnn.vision.classification.inference.score import (
_score_with_model,
)
from azureml.automl.dnn.vision.common.utils import _set_logging_parameters
except ImportError:
from azureml.contrib.automl.dnn.vision.common.logging_utils import get_logger
from azureml.contrib.automl.dnn.vision.common.model_export_utils import (
load_model,
run_inference_batch,
)
from azureml.contrib.automl.dnn.vision.classification.inference.score import (
_score_with_model,
)
from azureml.contrib.automl.dnn.vision.common.utils import _set_logging_parameters
TASK_TYPE = "image-classification"
logger = get_logger("azureml.automl.core.scoring_script_images")
def init():
global model
global batch_size
# Set up logging
_set_logging_parameters(TASK_TYPE, {})
parser = argparse.ArgumentParser(
description="Retrieve model_name and batch_size from arguments."
)
parser.add_argument("--model_name", dest="model_name", required=True)
parser.add_argument("--batch_size", dest="batch_size", type=int, required=False)
args, _ = parser.parse_known_args()
batch_size = args.batch_size
model_path = os.path.join(Model.get_model_path(args.model_name), "model.pt")
print(model_path)
try:
logger.info("Loading model from path: {}.".format(model_path))
model_settings = {}
model = load_model(TASK_TYPE, model_path, **model_settings)
logger.info("Loading successful.")
except Exception as e:
logging_utilities.log_traceback(e, logger)
raise
def run(mini_batch):
logger.info("Running inference.")
result = run_inference_batch(model, mini_batch, _score_with_model, batch_size)
logger.info("Finished inferencing.")
return result

Binary file not shown.

After

Width:  |  Height:  |  Size: 258 KiB

View File

@@ -0,0 +1,15 @@
---
page_type: sample
languages:
- python
products:
- azure-machine-learning
description: Notebook showing how to use AutoML for training an Image Classification Multi-Class model. We will use a small dataset to train the model, demonstrate how you can tune hyperparameters of the model to optimize model performance and deploy the model to use in inference scenarios.
---
# Image Classification Multi-Class using AutoML for Images
- Dataset: Toy dataset with images of products found in a fridge
- **[Jupyter Notebook](auto-ml-image-classification-multiclass.ipynb)**
- train an Image Classification Multi-Class model using AutoML
- tune hyperparameters of the model to optimize model performance
- deploy the model to use in inference scenarios

View File

@@ -0,0 +1,744 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License.\n",
"\n",
"# Training an Image Classification Multi-Class model using AutoML\n",
"In this notebook, we go over how you can use AutoML for training an Image Classification Multi-Class model. We will use a small dataset to train the model, demonstrate how you can tune hyperparameters of the model to optimize model performance and deploy the model to use in inference scenarios. For detailed information please refer to the [documentation of AutoML for Images](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![img](example_image_classification_multiclass_predictions.jpg)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"**Important:** This feature is currently in public preview. This preview version is provided without a service-level agreement. Certain features might not be supported or might have constrained capabilities. For more information, see [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/en-us/support/legal/preview-supplemental-terms/)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Environment Setup\n",
"Please follow the [\"Setup a new conda environment\"](https://github.com/Azure/azureml-examples/tree/main/python-sdk/tutorials/automl-with-azureml#3-setup-a-new-conda-environment) instructions to get started."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import azureml.core\n",
"\n",
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK.\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK.\")\n",
"assert (\n",
" azureml.core.VERSION >= \"1.35\"\n",
"), \"Please upgrade the Azure ML SDK by running '!pip install --upgrade azureml-sdk' then restart the kernel.\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Workspace setup\n",
"In order to train and deploy models in Azure ML, you will first need to set up a workspace.\n",
"\n",
"An [Azure ML Workspace](https://docs.microsoft.com/en-us/azure/machine-learning/concept-azure-machine-learning-architecture#workspace) is an Azure resource that organizes and coordinates the actions of many other Azure resources to assist in executing and sharing machine learning workflows. In particular, an Azure ML Workspace coordinates storage, databases, and compute resources providing added functionality for machine learning experimentation, deployment, inference, and the monitoring of deployed models.\n",
"\n",
"Create an Azure ML Workspace within your Azure subscription or load an existing workspace."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.workspace import Workspace\n",
"\n",
"ws = Workspace.from_config()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Compute target setup\n",
"You will need to provide a [Compute Target](https://docs.microsoft.com/en-us/azure/machine-learning/concept-azure-machine-learning-architecture#computes) that will be used for your AutoML model training. AutoML models for image tasks require [GPU SKUs](https://docs.microsoft.com/en-us/azure/virtual-machines/sizes-gpu) such as the ones from the NC, NCv2, NCv3, ND, NDv2 and NCasT4 series. We recommend using the NCsv3-series (with v100 GPUs) for faster training. Using a compute target with a multi-GPU VM SKU will leverage the multiple GPUs to speed up training. Additionally, setting up a compute target with multiple nodes will allow for faster model training by leveraging parallelism, when tuning hyperparameters for your model."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.compute import AmlCompute, ComputeTarget\n",
"\n",
"cluster_name = \"gpu-cluster-nc6\"\n",
"\n",
"try:\n",
" compute_target = ws.compute_targets[cluster_name]\n",
" print(\"Found existing compute target.\")\n",
"except KeyError:\n",
" print(\"Creating a new compute target...\")\n",
" compute_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"Standard_NC6\",\n",
" idle_seconds_before_scaledown=600,\n",
" min_nodes=0,\n",
" max_nodes=4,\n",
" )\n",
" compute_target = ComputeTarget.create(ws, cluster_name, compute_config)\n",
"# Can poll for a minimum number of nodes and for a specific timeout.\n",
"# If no min_node_count is provided, it will use the scale settings for the cluster.\n",
"compute_target.wait_for_completion(\n",
" show_output=True, min_node_count=None, timeout_in_minutes=20\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Experiment Setup\n",
"Create an [Experiment](https://docs.microsoft.com/en-us/azure/machine-learning/concept-azure-machine-learning-architecture#experiments) in your workspace to track your model training runs"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Experiment\n",
"\n",
"experiment_name = \"automl-image-multiclass\"\n",
"experiment = Experiment(ws, name=experiment_name)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Dataset with input Training Data\n",
"\n",
"In order to generate models for computer vision, you will need to bring in labeled image data as input for model training in the form of an [AzureML Tabular Dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset). You can either use a dataset that you have exported from a [Data Labeling](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-label-data) project, or create a new Tabular Dataset with your labeled training data."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"In this notebook, we use a toy dataset called Fridge Objects, which consists of 134 images of 4 classes of beverage container {can, carton, milk bottle, water bottle} photos taken on different backgrounds.\n",
"\n",
"All images in this notebook are hosted in [this repository](https://github.com/microsoft/computervision-recipes) and are made available under the [MIT license](https://github.com/microsoft/computervision-recipes/blob/master/LICENSE).\n",
"\n",
"We first download and unzip the data locally."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import urllib\n",
"from zipfile import ZipFile\n",
"\n",
"# download data\n",
"download_url = \"https://cvbp-secondary.z19.web.core.windows.net/datasets/image_classification/fridgeObjects.zip\"\n",
"data_file = \"./fridgeObjects.zip\"\n",
"urllib.request.urlretrieve(download_url, filename=data_file)\n",
"\n",
"# extract files\n",
"with ZipFile(data_file, \"r\") as zip:\n",
" print(\"extracting files...\")\n",
" zip.extractall()\n",
" print(\"done\")\n",
"# delete zip file\n",
"os.remove(data_file)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This is a sample image from this dataset:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from IPython.display import Image\n",
"\n",
"sample_image = \"./fridgeObjects/milk_bottle/99.jpg\"\n",
"Image(filename=sample_image)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Convert the downloaded data to JSONL\n",
"In this example, the fridge object dataset is stored in a directory. There are four different folders inside:\n",
"\n",
"- /water_bottle\n",
"- /milk_bottle\n",
"- /carton\n",
"- /can\n",
"\n",
"This is the most common data format for multiclass image classification. Each folder title corresponds to the image label for the images contained inside.\n",
"\n",
"In order to use this data to create an [AzureML Tabular Dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset), we first need to convert it to the required JSONL format. Please refer to the [documentation on how to prepare datasets](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-prepare-datasets-for-automl-images).\n",
"\n",
"The following script is creating two .jsonl files (one for training and one for validation) in the parent folder of the dataset. The train / validation ratio corresponds to 20% of the data going into the validation file."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"import os\n",
"\n",
"src = \"./fridgeObjects/\"\n",
"train_validation_ratio = 5\n",
"\n",
"# Retrieving default datastore that got automatically created when we setup a workspace\n",
"workspaceblobstore = ws.get_default_datastore().name\n",
"\n",
"# Path to the training and validation files\n",
"train_annotations_file = os.path.join(src, \"train_annotations.jsonl\")\n",
"validation_annotations_file = os.path.join(src, \"validation_annotations.jsonl\")\n",
"\n",
"# sample json line dictionary\n",
"json_line_sample = {\n",
" \"image_url\": \"AmlDatastore://\"\n",
" + workspaceblobstore\n",
" + \"/\"\n",
" + os.path.basename(os.path.dirname(src)),\n",
" \"label\": \"\",\n",
"}\n",
"\n",
"index = 0\n",
"# Scan each sub directary and generate jsonl line\n",
"with open(train_annotations_file, \"w\") as train_f:\n",
" with open(validation_annotations_file, \"w\") as validation_f:\n",
" for className in os.listdir(src):\n",
" subDir = src + className\n",
" if not os.path.isdir(subDir):\n",
" continue\n",
" # Scan each sub directary\n",
" print(\"Parsing \" + subDir)\n",
" for image in os.listdir(subDir):\n",
" json_line = dict(json_line_sample)\n",
" json_line[\"image_url\"] += f\"/{className}/{image}\"\n",
" json_line[\"label\"] = className\n",
"\n",
" if index % train_validation_ratio == 0:\n",
" # validation annotation\n",
" validation_f.write(json.dumps(json_line) + \"\\n\")\n",
" else:\n",
" # train annotation\n",
" train_f.write(json.dumps(json_line) + \"\\n\")\n",
" index += 1"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Upload the JSONL file and images to Datastore\n",
"In order to use the data for training in Azure ML, we upload it to our Azure ML Workspace via a [Datastore](https://docs.microsoft.com/en-us/azure/machine-learning/concept-azure-machine-learning-architecture#datasets-and-datastores). The datastore provides a mechanism for you to upload/download data and interact with it from your remote compute targets. It is an abstraction over Azure Storage."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Retrieving default datastore that got automatically created when we setup a workspace\n",
"ds = ws.get_default_datastore()\n",
"ds.upload(src_dir=\"./fridgeObjects\", target_path=\"fridgeObjects\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Finally, we need to create an [AzureML Tabular Dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset) from the data we uploaded to the Datastore. We create one dataset for training and one for validation."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Dataset\n",
"from azureml.data import DataType\n",
"\n",
"# get existing training dataset\n",
"training_dataset_name = \"fridgeObjectsTrainingDataset\"\n",
"if training_dataset_name in ws.datasets:\n",
" training_dataset = ws.datasets.get(training_dataset_name)\n",
" print(\"Found the training dataset\", training_dataset_name)\n",
"else:\n",
" # create training dataset\n",
" training_dataset = Dataset.Tabular.from_json_lines_files(\n",
" path=ds.path(\"fridgeObjects/train_annotations.jsonl\"),\n",
" set_column_types={\"image_url\": DataType.to_stream(ds.workspace)},\n",
" )\n",
" training_dataset = training_dataset.register(\n",
" workspace=ws, name=training_dataset_name\n",
" )\n",
"# get existing validation dataset\n",
"validation_dataset_name = \"fridgeObjectsValidationDataset\"\n",
"if validation_dataset_name in ws.datasets:\n",
" validation_dataset = ws.datasets.get(validation_dataset_name)\n",
" print(\"Found the validation dataset\", validation_dataset_name)\n",
"else:\n",
" # create validation dataset\n",
" validation_dataset = Dataset.Tabular.from_json_lines_files(\n",
" path=ds.path(\"fridgeObjects/validation_annotations.jsonl\"),\n",
" set_column_types={\"image_url\": DataType.to_stream(ds.workspace)},\n",
" )\n",
" validation_dataset = validation_dataset.register(\n",
" workspace=ws, name=validation_dataset_name\n",
" )\n",
"print(\"Training dataset name: \" + training_dataset.name)\n",
"print(\"Validation dataset name: \" + validation_dataset.name)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Validation dataset is optional. If no validation dataset is specified, by default 20% of your training data will be used for validation. You can control the percentage using the `split_ratio` argument - please refer to the [documentation](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models#model-agnostic-hyperparameters) for more details.\n",
"\n",
"This is what the training dataset looks like:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"training_dataset.to_pandas_dataframe()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Configuring your AutoML run for image tasks\n",
"AutoML allows you to easily train models for Image Classification, Object Detection & Instance Segmentation on your image data. You can control the model algorithm to be used, specify hyperparameter values for your model as well as perform a sweep across the hyperparameter space to generate an optimal model. Parameters for configuring your AutoML Image run are specified using the `AutoMLImageConfig` - please refer to the [documentation](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models#configure-your-experiment-settings) for the details on the parameters that can be used and their values."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"When using AutoML for image tasks, you need to specify the model algorithms using the `model_name` parameter. You can either specify a single model or choose to sweep over multiple models. Please refer to the [documentation](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models#configure-model-algorithms-and-hyperparameters) for the list of supported model algorithms.\n",
"\n",
"### Using default hyperparameter values for the specified algorithm\n",
"Before doing a large sweep to search for the optimal models and hyperparameters, we recommend trying the default values for a given model to get a first baseline. Next, you can explore multiple hyperparameters for the same model before sweeping over multiple models and their parameters. This allows an iterative approach, as with multiple models and multiple hyperparameters for each (as we showcase in the next section), the search space grows exponentially, and you need more iterations to find optimal configurations.\n",
"\n",
"If you wish to use the default hyperparameter values for a given algorithm (say `vitb16r224`), you can specify the config for your AutoML Image runs as follows:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.automl.core.shared.constants import ImageTask\n",
"from azureml.train.automl import AutoMLImageConfig\n",
"from azureml.train.hyperdrive import GridParameterSampling, choice\n",
"\n",
"image_config_vit = AutoMLImageConfig(\n",
" task=ImageTask.IMAGE_CLASSIFICATION,\n",
" compute_target=compute_target,\n",
" training_data=training_dataset,\n",
" validation_data=validation_dataset,\n",
" hyperparameter_sampling=GridParameterSampling({\"model_name\": choice(\"vitb16r224\")}),\n",
" iterations=1,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Submitting an AutoML run for Computer Vision tasks\n",
"Once you've created the config settings for your run, you can submit an AutoML run using the config in order to train a vision model using your training dataset."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_image_run = experiment.submit(image_config_vit)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_image_run.wait_for_completion(wait_post_processing=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Hyperparameter sweeping for your AutoML models for computer vision tasks\n",
"In this example, we use the AutoMLImageConfig to train an Image Classification model using the following model algorithms: `seresnext`, `resnet50`, `vitb16r224`, and `vits16r224`.\n",
"\n",
"When using AutoML for Images, you can perform a hyperparameter sweep over a defined parameter space to find the optimal model. In this example, we sweep over the hyperparameters for each algorithm, choosing from a range of values for learning_rate, number_of_epochs, layers_to_freeze, etc., to generate a model with the optimal 'accuracy'. If hyperparameter values are not specified, then default values are used for the specified algorithm.\n",
"\n",
"We use Random Sampling to pick samples from this parameter space and try a total of 10 iterations with these different samples, running 2 iterations at a time on our compute target, which has been previously set up using 4 nodes. Please note that the more parameters the space has, the more iterations you need to find optimal models.\n",
"\n",
"We leverage the Bandit early termination policy which will terminate poor performing configs (those that are not within 20% slack of the best performing config), thus significantly saving compute resources.\n",
"\n",
"For more details on model and hyperparameter sweeping, please refer to the [documentation](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-tune-hyperparameters)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.automl.core.shared.constants import ImageTask\n",
"from azureml.train.automl import AutoMLImageConfig\n",
"from azureml.train.hyperdrive import BanditPolicy, RandomParameterSampling\n",
"from azureml.train.hyperdrive import choice, uniform\n",
"\n",
"parameter_space = {\n",
" \"learning_rate\": uniform(0.001, 0.01),\n",
" \"model\": choice(\n",
" {\n",
" \"model_name\": choice(\"vitb16r224\", \"vits16r224\"),\n",
" \"number_of_epochs\": choice(15, 30),\n",
" },\n",
" {\n",
" \"model_name\": choice(\"seresnext\", \"resnest50\"),\n",
" \"layers_to_freeze\": choice(0, 2),\n",
" },\n",
" ),\n",
"}\n",
"\n",
"tuning_settings = {\n",
" \"iterations\": 10,\n",
" \"max_concurrent_iterations\": 2,\n",
" \"hyperparameter_sampling\": RandomParameterSampling(parameter_space),\n",
" \"early_termination_policy\": BanditPolicy(\n",
" evaluation_interval=2, slack_factor=0.2, delay_evaluation=6\n",
" ),\n",
"}\n",
"\n",
"automl_image_config = AutoMLImageConfig(\n",
" task=ImageTask.IMAGE_CLASSIFICATION,\n",
" compute_target=compute_target,\n",
" training_data=training_dataset,\n",
" validation_data=validation_dataset,\n",
" **tuning_settings,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_image_run = experiment.submit(automl_image_config)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_image_run.wait_for_completion(wait_post_processing=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"When doing a hyperparameter sweep, it can be useful to visualize the different configurations that were tried using the HyperDrive UI. You can navigate to this UI by going to the 'Child runs' tab in the UI of the main `automl_image_run` from above, which is the HyperDrive parent run. Then you can go into the 'Child runs' tab of this HyperDrive parent run. Alternatively, here below you can see directly the HyperDrive parent run and navigate to its 'Child runs' tab:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Run\n",
"\n",
"hyperdrive_run = Run(experiment=experiment, run_id=automl_image_run.id + \"_HD\")\n",
"hyperdrive_run"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Register the optimal vision model from the AutoML run\n",
"Once the run completes, we can register the model that was created from the best run (configuration that resulted in the best primary metric)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Register the model from the best run\n",
"\n",
"best_child_run = automl_image_run.get_best_child()\n",
"model_name = best_child_run.properties[\"model_name\"]\n",
"model = best_child_run.register_model(\n",
" model_name=model_name, model_path=\"outputs/model.pt\"\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Deploy model as a web service\n",
"Once you have your trained model, you can deploy the model on Azure. You can deploy your trained model as a web service on Azure Container Instances ([ACI](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-deploy-azure-container-instance)) or Azure Kubernetes Service ([AKS](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-deploy-azure-kubernetes-service)). Please note that ACI only supports small models under 1 GB in size. For testing larger models or for the high-scale production stage, we recommend using AKS.\n",
"In this tutorial, we will deploy the model as a web service in AKS."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You will need to first create an AKS compute cluster or use an existing AKS cluster. You can use either GPU or CPU VM SKUs for your deployment cluster"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.compute import ComputeTarget, AksCompute\n",
"from azureml.exceptions import ComputeTargetException\n",
"\n",
"# Choose a name for your cluster\n",
"aks_name = \"aks-cpu-mc\"\n",
"# Check to see if the cluster already exists\n",
"try:\n",
" aks_target = ComputeTarget(workspace=ws, name=aks_name)\n",
" print(\"Found existing compute target\")\n",
"except ComputeTargetException:\n",
" print(\"Creating a new compute target...\")\n",
" # Provision AKS cluster with a CPU machine\n",
" prov_config = AksCompute.provisioning_configuration(vm_size=\"STANDARD_D3_V2\")\n",
" # Create the cluster\n",
" aks_target = ComputeTarget.create(\n",
" workspace=ws, name=aks_name, provisioning_configuration=prov_config\n",
" )\n",
" aks_target.wait_for_completion(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Next, you will need to define the [inference configuration](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models#update-inference-configuration), that describes how to set up the web-service containing your model. You can use the scoring script and the environment from the training run in your inference config.\n",
"\n",
"<b>Note:</b> To change the model's settings, open the downloaded scoring script and modify the model_settings variable <i>before</i> deploying the model."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.model import InferenceConfig\n",
"\n",
"best_child_run.download_file(\n",
" \"outputs/scoring_file_v_1_0_0.py\", output_file_path=\"score.py\"\n",
")\n",
"environment = best_child_run.get_environment()\n",
"inference_config = InferenceConfig(entry_script=\"score.py\", environment=environment)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You can then deploy the model as an AKS web service."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Deploy the model from the best run as an AKS web service\n",
"from azureml.core.webservice import AksWebservice\n",
"from azureml.core.model import Model\n",
"\n",
"aks_config = AksWebservice.deploy_configuration(\n",
" autoscale_enabled=True, cpu_cores=1, memory_gb=5, enable_app_insights=True\n",
")\n",
"\n",
"aks_service = Model.deploy(\n",
" ws,\n",
" models=[model],\n",
" inference_config=inference_config,\n",
" deployment_config=aks_config,\n",
" deployment_target=aks_target,\n",
" name=\"automl-image-test-cpu-mc\",\n",
" overwrite=True,\n",
")\n",
"aks_service.wait_for_deployment(show_output=True)\n",
"print(aks_service.state)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Test the web service\n",
"Finally, let's test our deployed web service to predict new images. You can pass in any image. In this case, we'll use a random image from the dataset and pass it to the scoring URI."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import requests\n",
"\n",
"# URL for the web service\n",
"scoring_uri = aks_service.scoring_uri\n",
"\n",
"# If the service is authenticated, set the key or token\n",
"key, _ = aks_service.get_keys()\n",
"\n",
"sample_image = \"./test_image.jpg\"\n",
"\n",
"# Load image data\n",
"data = open(sample_image, \"rb\").read()\n",
"\n",
"# Set the content type\n",
"headers = {\"Content-Type\": \"application/octet-stream\"}\n",
"\n",
"# If authentication is enabled, set the authorization header\n",
"headers[\"Authorization\"] = f\"Bearer {key}\"\n",
"\n",
"# Make the request and display the response\n",
"resp = requests.post(scoring_uri, data, headers=headers)\n",
"print(resp.text)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Visualize predictions\n",
"Now that we have scored a test image, we can visualize the prediction for this image"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%matplotlib inline\n",
"import matplotlib.pyplot as plt\n",
"import matplotlib.image as mpimg\n",
"from PIL import Image\n",
"import numpy as np\n",
"import json\n",
"\n",
"IMAGE_SIZE = (18, 12)\n",
"plt.figure(figsize=IMAGE_SIZE)\n",
"img_np = mpimg.imread(sample_image)\n",
"img = Image.fromarray(img_np.astype(\"uint8\"), \"RGB\")\n",
"x, y = img.size\n",
"\n",
"fig, ax = plt.subplots(1, figsize=(15, 15))\n",
"# Display the image\n",
"ax.imshow(img_np)\n",
"\n",
"prediction = json.loads(resp.text)\n",
"label_index = np.argmax(prediction[\"probs\"])\n",
"label = prediction[\"labels\"][label_index]\n",
"conf_score = prediction[\"probs\"][label_index]\n",
"\n",
"display_text = \"{} ({})\".format(label, round(conf_score, 3))\n",
"print(display_text)\n",
"\n",
"color = \"red\"\n",
"plt.text(30, 30, display_text, color=color, fontsize=30)\n",
"\n",
"plt.show()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.6 - AzureML",
"language": "python",
"name": "python3-azureml"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.10"
},
"nteract": {
"version": "nteract-front-end@1.0.0"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 272 KiB

View File

@@ -0,0 +1,15 @@
---
page_type: sample
languages:
- python
products:
- azure-machine-learning
description: Notebook showing how to use AutoML for training an Image Classification Multi-Label model. We will use a small dataset to train the model, demonstrate how you can tune hyperparameters of the model to optimize model performance and deploy the model to use in inference scenarios.
---
# Image Classification Multi-Label using AutoML for Images
- Dataset: Toy dataset with images of products found in a fridge
- **[Jupyter Notebook](auto-ml-image-classification-multilabel.ipynb)**
- train an Image Classification Multi-Label model using AutoML
- tune hyperparameters of the model to optimize model performance
- deploy the model to use in inference scenarios

View File

@@ -0,0 +1,742 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License.\n",
"\n",
"# Training an Image Classification Multi-Label model using AutoML\n",
"In this notebook, we go over how you can use AutoML for training an Image Classification Multi-Label model. We will use a small dataset to train the model, demonstrate how you can tune hyperparameters of the model to optimize model performance and deploy the model to use in inference scenarios. For detailed information please refer to the [documentation of AutoML for Images](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![img](example_image_classification_multilabel_predictions.jpg)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"**Important:** This feature is currently in public preview. This preview version is provided without a service-level agreement. Certain features might not be supported or might have constrained capabilities. For more information, see [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/en-us/support/legal/preview-supplemental-terms/)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Environment Setup\n",
"Please follow the [\"Setup a new conda environment\"](https://github.com/Azure/azureml-examples/tree/main/python-sdk/tutorials/automl-with-azureml#3-setup-a-new-conda-environment) instructions to get started."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import azureml.core\n",
"\n",
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK.\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK.\")\n",
"assert (\n",
" azureml.core.VERSION >= \"1.35\"\n",
"), \"Please upgrade the Azure ML SDK by running '!pip install --upgrade azureml-sdk' then restart the kernel.\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Workspace setup\n",
"In order to train and deploy models in Azure ML, you will first need to set up a workspace.\n",
"\n",
"An [Azure ML Workspace](https://docs.microsoft.com/en-us/azure/machine-learning/concept-azure-machine-learning-architecture#workspace) is an Azure resource that organizes and coordinates the actions of many other Azure resources to assist in executing and sharing machine learning workflows. In particular, an Azure ML Workspace coordinates storage, databases, and compute resources providing added functionality for machine learning experimentation, deployment, inference, and the monitoring of deployed models.\n",
"\n",
"Create an Azure ML Workspace within your Azure subscription or load an existing workspace."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.workspace import Workspace\n",
"\n",
"ws = Workspace.from_config()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Compute target setup\n",
"You will need to provide a [Compute Target](https://docs.microsoft.com/en-us/azure/machine-learning/concept-azure-machine-learning-architecture#computes) that will be used for your AutoML model training. AutoML models for image tasks require [GPU SKUs](https://docs.microsoft.com/en-us/azure/virtual-machines/sizes-gpu) such as the ones from the NC, NCv2, NCv3, ND, NDv2 and NCasT4 series. We recommend using the NCsv3-series (with v100 GPUs) for faster training. Using a compute target with a multi-GPU VM SKU will leverage the multiple GPUs to speed up training. Additionally, setting up a compute target with multiple nodes will allow for faster model training by leveraging parallelism, when tuning hyperparameters for your model."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.compute import AmlCompute, ComputeTarget\n",
"\n",
"cluster_name = \"gpu-cluster-nc6\"\n",
"\n",
"try:\n",
" compute_target = ws.compute_targets[cluster_name]\n",
" print(\"Found existing compute target.\")\n",
"except KeyError:\n",
" print(\"Creating a new compute target...\")\n",
" compute_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"Standard_NC6\",\n",
" idle_seconds_before_scaledown=600,\n",
" min_nodes=0,\n",
" max_nodes=4,\n",
" )\n",
" compute_target = ComputeTarget.create(ws, cluster_name, compute_config)\n",
"# Can poll for a minimum number of nodes and for a specific timeout.\n",
"# If no min_node_count is provided, it will use the scale settings for the cluster.\n",
"compute_target.wait_for_completion(\n",
" show_output=True, min_node_count=None, timeout_in_minutes=20\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Experiment Setup\n",
"Create an [Experiment](https://docs.microsoft.com/en-us/azure/machine-learning/concept-azure-machine-learning-architecture#experiments) in your workspace to track your model training runs"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Experiment\n",
"\n",
"experiment_name = \"automl-image-classification-multilabel\"\n",
"experiment = Experiment(ws, name=experiment_name)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Dataset with input Training Data\n",
"\n",
"In order to generate models for computer vision, you will need to bring in labeled image data as input for model training in the form of an [AzureML Tabular Dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset). You can either use a dataset that you have exported from a [Data Labeling](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-label-data) project, or create a new Tabular Dataset with your labeled training data."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"In this notebook, we use a toy dataset called Fridge Objects, which consists of 128 images of 4 labels of beverage container {can, carton, milk bottle, water bottle} photos taken on different backgrounds. It also includes a labels file in .csv format. This is one of the most common data formats for Image Classification Multi-Label: one csv file that contains the mapping of labels to a folder of images.\n",
"\n",
"All images in this notebook are hosted in [this repository](https://github.com/microsoft/computervision-recipes) and are made available under the [MIT license](https://github.com/microsoft/computervision-recipes/blob/master/LICENSE).\n",
"\n",
"We first download and unzip the data locally."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import urllib\n",
"from zipfile import ZipFile\n",
"\n",
"# download data\n",
"download_url = \"https://cvbp-secondary.z19.web.core.windows.net/datasets/image_classification/multilabelFridgeObjects.zip\"\n",
"data_file = \"./multilabelFridgeObjects.zip\"\n",
"urllib.request.urlretrieve(download_url, filename=data_file)\n",
"\n",
"# extract files\n",
"with ZipFile(data_file, \"r\") as zip:\n",
" print(\"extracting files...\")\n",
" zip.extractall()\n",
" print(\"done\")\n",
"# delete zip file\n",
"os.remove(data_file)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This is a sample image from this dataset:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from IPython.display import Image\n",
"\n",
"sample_image = \"./multilabelFridgeObjects/images/56.jpg\"\n",
"Image(filename=sample_image)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Convert the downloaded data to JSONL\n",
"In this example, the fridge object dataset is annotated in the CSV file, where each image corresponds to a line. It defines a mapping of the filename to the labels. Since this is a multi-label classification problem, each image can be associated to multiple labels. In order to use this data to create an [AzureML Tabular Dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset), we first need to convert it to the required JSONL format. Please refer to the [documentation on how to prepare datasets](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-prepare-datasets-for-automl-images).\n",
"\n",
"The following script is creating two .jsonl files (one for training and one for validation) in the parent folder of the dataset. The train / validation ratio corresponds to 20% of the data going into the validation file."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"import os\n",
"\n",
"src = \"./multilabelFridgeObjects\"\n",
"train_validation_ratio = 5\n",
"\n",
"# Retrieving default datastore that got automatically created when we setup a workspace\n",
"workspaceblobstore = ws.get_default_datastore().name\n",
"\n",
"# Path to the labels file.\n",
"labelFile = os.path.join(src, \"labels.csv\")\n",
"\n",
"# Path to the training and validation files\n",
"train_annotations_file = os.path.join(src, \"train_annotations.jsonl\")\n",
"validation_annotations_file = os.path.join(src, \"validation_annotations.jsonl\")\n",
"\n",
"# sample json line dictionary\n",
"json_line_sample = {\n",
" \"image_url\": \"AmlDatastore://\" + workspaceblobstore + \"/multilabelFridgeObjects\",\n",
" \"label\": [],\n",
"}\n",
"\n",
"# Read each annotation and convert it to jsonl line\n",
"with open(train_annotations_file, \"w\") as train_f:\n",
" with open(validation_annotations_file, \"w\") as validation_f:\n",
" with open(labelFile, \"r\") as labels:\n",
" for i, line in enumerate(labels):\n",
" # Skipping the title line and any empty lines.\n",
" if i == 0 or len(line.strip()) == 0:\n",
" continue\n",
" line_split = line.strip().split(\",\")\n",
" if len(line_split) != 2:\n",
" print(\"Skipping the invalid line: {}\".format(line))\n",
" continue\n",
" json_line = dict(json_line_sample)\n",
" json_line[\"image_url\"] += f\"/images/{line_split[0]}\"\n",
" json_line[\"label\"] = line_split[1].strip().split(\" \")\n",
"\n",
" if i % train_validation_ratio == 0:\n",
" # validation annotation\n",
" validation_f.write(json.dumps(json_line) + \"\\n\")\n",
" else:\n",
" # train annotation\n",
" train_f.write(json.dumps(json_line) + \"\\n\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Upload the JSONL file and images to Datastore\n",
"In order to use the data for training in Azure ML, we upload it to our Azure ML Workspace via a [Datastore](https://docs.microsoft.com/en-us/azure/machine-learning/concept-azure-machine-learning-architecture#datasets-and-datastores). The datastore provides a mechanism for you to upload/download data and interact with it from your remote compute targets. It is an abstraction over Azure Storage."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Retrieving default datastore that got automatically created when we setup a workspace\n",
"ds = ws.get_default_datastore()\n",
"ds.upload(src_dir=\"./multilabelFridgeObjects\", target_path=\"multilabelFridgeObjects\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Finally, we need to create an [AzureML Tabular Dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset) from the data we uploaded to the Datastore. We create one dataset for training and one for validation."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Dataset\n",
"from azureml.data import DataType\n",
"\n",
"# get existing training dataset\n",
"training_dataset_name = \"multilabelFridgeObjectsTrainingDataset\"\n",
"if training_dataset_name in ws.datasets:\n",
" training_dataset = ws.datasets.get(training_dataset_name)\n",
" print(\"Found the training dataset\", training_dataset_name)\n",
"else:\n",
" # create training dataset\n",
" training_dataset = Dataset.Tabular.from_json_lines_files(\n",
" path=ds.path(\"multilabelFridgeObjects/train_annotations.jsonl\"),\n",
" set_column_types={\"image_url\": DataType.to_stream(ds.workspace)},\n",
" )\n",
" training_dataset = training_dataset.register(\n",
" workspace=ws, name=training_dataset_name\n",
" )\n",
"# get existing validation dataset\n",
"validation_dataset_name = \"multilabelFridgeObjectsValidationDataset\"\n",
"if validation_dataset_name in ws.datasets:\n",
" validation_dataset = ws.datasets.get(validation_dataset_name)\n",
" print(\"Found the validation dataset\", validation_dataset_name)\n",
"else:\n",
" # create validation dataset\n",
" validation_dataset = Dataset.Tabular.from_json_lines_files(\n",
" path=ds.path(\"multilabelFridgeObjects/validation_annotations.jsonl\"),\n",
" set_column_types={\"image_url\": DataType.to_stream(ds.workspace)},\n",
" )\n",
" validation_dataset = validation_dataset.register(\n",
" workspace=ws, name=validation_dataset_name\n",
" )\n",
"print(\"Training dataset name: \" + training_dataset.name)\n",
"print(\"Validation dataset name: \" + validation_dataset.name)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Validation dataset is optional. If no validation dataset is specified, by default 20% of your training data will be used for validation. You can control the percentage using the `split_ratio` argument - please refer to the [documentation](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models#model-agnostic-hyperparameters) for more details.\n",
"\n",
"This is what the training dataset looks like:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"training_dataset.to_pandas_dataframe()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Configuring your AutoML run for image tasks\n",
"AutoML allows you to easily train models for Image Classification, Object Detection & Instance Segmentation on your image data. You can control the model algorithm to be used, specify hyperparameter values for your model as well as perform a sweep across the hyperparameter space to generate an optimal model. Parameters for configuring your AutoML Image run are specified using the `AutoMLImageConfig` - please refer to the [documentation](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models#configure-your-experiment-settings) for the details on the parameters that can be used and their values."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"When using AutoML for image tasks, you need to specify the model algorithms using the `model_name` parameter. You can either specify a single model or choose to sweep over multiple models. Please refer to the [documentation](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models#configure-model-algorithms-and-hyperparameters) for the list of supported model algorithms.\n",
"\n",
"### Using default hyperparameter values for the specified algorithm\n",
"Before doing a large sweep to search for the optimal models and hyperparameters, we recommend trying the default values for a given model to get a first baseline. Next, you can explore multiple hyperparameters for the same model before sweeping over multiple models and their parameters. This allows an iterative approach, as with multiple models and multiple hyperparameters for each (as we showcase in the next section), the search space grows exponentially, and you need more iterations to find optimal configurations.\n",
"\n",
"If you wish to use the default hyperparameter values for a given algorithm (say `vitb16r224`), you can specify the config for your AutoML Image runs as follows:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.automl.core.shared.constants import ImageTask\n",
"from azureml.train.automl import AutoMLImageConfig\n",
"from azureml.train.hyperdrive import GridParameterSampling, choice\n",
"\n",
"image_config_vit = AutoMLImageConfig(\n",
" task=ImageTask.IMAGE_CLASSIFICATION_MULTILABEL,\n",
" compute_target=compute_target,\n",
" training_data=training_dataset,\n",
" validation_data=validation_dataset,\n",
" hyperparameter_sampling=GridParameterSampling({\"model_name\": choice(\"vitb16r224\")}),\n",
" iterations=1,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Submitting an AutoML run for Computer Vision tasks\n",
"Once you've created the config settings for your run, you can submit an AutoML run using the config in order to train a vision model using your training dataset."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_image_run = experiment.submit(image_config_vit)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_image_run.wait_for_completion(wait_post_processing=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Hyperparameter sweeping for your AutoML models for computer vision tasks\n",
"In this example, we use the AutoMLImageConfig to train an Image Classification model using the `vitb16r224` and `seresnext` model algorithms.\n",
"\n",
"When using AutoML for Images, you can perform a hyperparameter sweep over a defined parameter space to find the optimal model. In this example, we sweep over the hyperparameters for each algorithm, choosing from a range of values for learning_rate, grad_accumulation_step, valid_resize_size, etc., to generate a model with the optimal 'accuracy'. If hyperparameter values are not specified, then default values are used for the specified algorithm.\n",
"\n",
"We use Random Sampling to pick samples from this parameter space and try a total of 10 iterations with these different samples, running 2 iterations at a time on our compute target, which has been previously set up using 4 nodes. Please note that the more parameters the space has, the more iterations you need to find optimal models.\n",
"\n",
"We leverage the Bandit early termination policy which will terminate poor performing configs (those that are not within 20% slack of the best performing config), thus significantly saving compute resources.\n",
"\n",
"For more details on model and hyperparameter sweeping, please refer to the [documentation](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-tune-hyperparameters)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.automl.core.shared.constants import ImageTask\n",
"from azureml.train.automl import AutoMLImageConfig\n",
"from azureml.train.hyperdrive import BanditPolicy, RandomParameterSampling\n",
"from azureml.train.hyperdrive import choice, uniform\n",
"\n",
"parameter_space = {\n",
" \"learning_rate\": uniform(0.005, 0.05),\n",
" \"model\": choice(\n",
" {\n",
" \"model_name\": choice(\"vitb16r224\"),\n",
" \"number_of_epochs\": choice(15, 30),\n",
" \"grad_accumulation_step\": choice(1, 2),\n",
" },\n",
" {\n",
" \"model_name\": choice(\"seresnext\"),\n",
" # model-specific, valid_resize_size should be larger or equal than valid_crop_size\n",
" \"valid_resize_size\": choice(288, 320, 352),\n",
" \"valid_crop_size\": choice(224, 256), # model-specific\n",
" \"train_crop_size\": choice(224, 256), # model-specific\n",
" },\n",
" ),\n",
"}\n",
"\n",
"tuning_settings = {\n",
" \"iterations\": 10,\n",
" \"max_concurrent_iterations\": 2,\n",
" \"hyperparameter_sampling\": RandomParameterSampling(parameter_space),\n",
" \"early_termination_policy\": BanditPolicy(\n",
" evaluation_interval=2, slack_factor=0.2, delay_evaluation=6\n",
" ),\n",
"}\n",
"\n",
"automl_image_config = AutoMLImageConfig(\n",
" task=ImageTask.IMAGE_CLASSIFICATION_MULTILABEL,\n",
" compute_target=compute_target,\n",
" training_data=training_dataset,\n",
" validation_data=validation_dataset,\n",
" **tuning_settings,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_image_run = experiment.submit(automl_image_config)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_image_run.wait_for_completion(wait_post_processing=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"When doing a hyperparameter sweep, it can be useful to visualize the different configurations that were tried using the HyperDrive UI. You can navigate to this UI by going to the 'Child runs' tab in the UI of the main `automl_image_run` from above, which is the HyperDrive parent run. Then you can go into the 'Child runs' tab of this HyperDrive parent run. Alternatively, here below you can see directly the HyperDrive parent run and navigate to its 'Child runs' tab:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Run\n",
"\n",
"hyperdrive_run = Run(experiment=experiment, run_id=automl_image_run.id + \"_HD\")\n",
"hyperdrive_run"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Register the optimal vision model from the AutoML run\n",
"Once the run completes, we can register the model that was created from the best run (configuration that resulted in the best primary metric)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Register the model from the best run\n",
"\n",
"best_child_run = automl_image_run.get_best_child()\n",
"model_name = best_child_run.properties[\"model_name\"]\n",
"model = best_child_run.register_model(\n",
" model_name=model_name, model_path=\"outputs/model.pt\"\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Deploy model as a web service\n",
"Once you have your trained model, you can deploy the model on Azure. You can deploy your trained model as a web service on Azure Container Instances ([ACI](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-deploy-azure-container-instance)) or Azure Kubernetes Service ([AKS](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-deploy-azure-kubernetes-service)). Please note that ACI only supports small models under 1 GB in size. For testing larger models or for the high-scale production stage, we recommend using AKS.\n",
"In this tutorial, we will deploy the model as a web service in AKS."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You will need to first create an AKS compute cluster or use an existing AKS cluster. You can use either GPU or CPU VM SKUs for your deployment cluster"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.compute import ComputeTarget, AksCompute\n",
"from azureml.exceptions import ComputeTargetException\n",
"\n",
"# Choose a name for your cluster\n",
"aks_name = \"aks-cpu-ml\"\n",
"# Check to see if the cluster already exists\n",
"try:\n",
" aks_target = ComputeTarget(workspace=ws, name=aks_name)\n",
" print(\"Found existing compute target\")\n",
"except ComputeTargetException:\n",
" print(\"Creating a new compute target...\")\n",
" # Provision AKS cluster with a CPU machine\n",
" prov_config = AksCompute.provisioning_configuration(vm_size=\"STANDARD_D3_V2\")\n",
" # Create the cluster\n",
" aks_target = ComputeTarget.create(\n",
" workspace=ws, name=aks_name, provisioning_configuration=prov_config\n",
" )\n",
" aks_target.wait_for_completion(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Next, you will need to define the [inference configuration](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models#update-inference-configuration), that describes how to set up the web-service containing your model. You can use the scoring script and the environment from the training run in your inference config.\n",
"\n",
"<b>Note:</b> To change the model's settings, open the downloaded scoring script and modify the model_settings variable <i>before</i> deploying the model."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.model import InferenceConfig\n",
"\n",
"best_child_run.download_file(\n",
" \"outputs/scoring_file_v_1_0_0.py\", output_file_path=\"score.py\"\n",
")\n",
"environment = best_child_run.get_environment()\n",
"inference_config = InferenceConfig(entry_script=\"score.py\", environment=environment)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You can then deploy the model as an AKS web service."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Deploy the model from the best run as an AKS web service\n",
"from azureml.core.webservice import AksWebservice\n",
"from azureml.core.model import Model\n",
"\n",
"aks_config = AksWebservice.deploy_configuration(\n",
" autoscale_enabled=True, cpu_cores=1, memory_gb=5, enable_app_insights=True\n",
")\n",
"\n",
"aks_service = Model.deploy(\n",
" ws,\n",
" models=[model],\n",
" inference_config=inference_config,\n",
" deployment_config=aks_config,\n",
" deployment_target=aks_target,\n",
" name=\"automl-image-test-cpu-ml\",\n",
" overwrite=True,\n",
")\n",
"aks_service.wait_for_deployment(show_output=True)\n",
"print(aks_service.state)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Test the web service\n",
"Finally, let's test our deployed web service to predict new images. You can pass in any image. In this case, we'll use a random image from the dataset and pass it to the scoring URI."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import requests\n",
"from IPython.display import Image\n",
"\n",
"# URL for the web service\n",
"scoring_uri = aks_service.scoring_uri\n",
"\n",
"# If the service is authenticated, set the key or token\n",
"key, _ = aks_service.get_keys()\n",
"\n",
"sample_image = \"./test_image.jpg\"\n",
"\n",
"# Load image data\n",
"data = open(sample_image, \"rb\").read()\n",
"\n",
"# Set the content type\n",
"headers = {\"Content-Type\": \"application/octet-stream\"}\n",
"\n",
"# If authentication is enabled, set the authorization header\n",
"headers[\"Authorization\"] = f\"Bearer {key}\"\n",
"\n",
"# Make the request and display the response\n",
"resp = requests.post(scoring_uri, data, headers=headers)\n",
"print(resp.text)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Visualize predictions\n",
"Now that we have scored a test image, we can visualize the predictions for this image"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%matplotlib inline\n",
"import matplotlib.pyplot as plt\n",
"import matplotlib.image as mpimg\n",
"from PIL import Image\n",
"import json\n",
"\n",
"IMAGE_SIZE = (18, 12)\n",
"plt.figure(figsize=IMAGE_SIZE)\n",
"img_np = mpimg.imread(sample_image)\n",
"img = Image.fromarray(img_np.astype(\"uint8\"), \"RGB\")\n",
"x, y = img.size\n",
"\n",
"fig, ax = plt.subplots(1, figsize=(15, 15))\n",
"# Display the image\n",
"ax.imshow(img_np)\n",
"\n",
"prediction = json.loads(resp.text)\n",
"score_threshold = 0.5\n",
"\n",
"label_offset_x = 30\n",
"label_offset_y = 30\n",
"for index, score in enumerate(prediction[\"probs\"]):\n",
" if score > score_threshold:\n",
" label = prediction[\"labels\"][index]\n",
" display_text = \"{} ({})\".format(label, round(score, 3))\n",
" print(display_text)\n",
"\n",
" color = \"red\"\n",
" plt.text(label_offset_x, label_offset_y, display_text, color=color, fontsize=30)\n",
" label_offset_y += 30\n",
"plt.show()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.6 - AzureML",
"language": "python",
"name": "python3-azureml"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.10"
},
"nteract": {
"version": "nteract-front-end@1.0.0"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 160 KiB

View File

@@ -0,0 +1,15 @@
---
page_type: sample
languages:
- python
products:
- azure-machine-learning
description: Notebook showing how to use AutoML for training an Instance Segmentation model. We will use a small dataset to train the model, demonstrate how you can tune hyperparameters of the model to optimize model performance and deploy the model to use in inference scenarios.
---
# Instance Segmentation using AutoML for Images
- Dataset: Toy dataset with images of products found in a fridge
- **[Jupyter Notebook](auto-ml-image-instance-segmentation.ipynb)**
- train an Instance Segmentation model using AutoML
- tune hyperparameters of the model to optimize model performance
- deploy the model to use in inference scenarios

View File

@@ -0,0 +1,769 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License.\n",
"\n",
"# Training an Instance Segmentation model using AutoML\n",
"In this notebook, we go over how you can use AutoML for training an Instance Segmentation model. We will use a small dataset to train the model, demonstrate how you can tune hyperparameters of the model to optimize model performance and deploy the model to use in inference scenarios. For detailed information please refer to the [documentation of AutoML for Images](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![img](example_instance_segmentation_predictions.jpg)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"**Important:** This feature is currently in public preview. This preview version is provided without a service-level agreement. Certain features might not be supported or might have constrained capabilities. For more information, see [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/en-us/support/legal/preview-supplemental-terms/)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Environment Setup\n",
"Please follow the [\"Setup a new conda environment\"](https://github.com/Azure/azureml-examples/tree/main/python-sdk/tutorials/automl-with-azureml#3-setup-a-new-conda-environment) instructions to get started."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import azureml.core\n",
"\n",
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK.\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK.\")\n",
"assert (\n",
" azureml.core.VERSION >= \"1.35\"\n",
"), \"Please upgrade the Azure ML SDK by running '!pip install --upgrade azureml-sdk' then restart the kernel.\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Additional environment setup\n",
"You will need to install these additional packages below to run this notebook:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%pip install \"scikit-image==0.17.2\" \"simplification==0.5.1\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Workspace setup\n",
"In order to train and deploy models in Azure ML, you will first need to set up a workspace.\n",
"\n",
"An [Azure ML Workspace](https://docs.microsoft.com/en-us/azure/machine-learning/concept-azure-machine-learning-architecture#workspace) is an Azure resource that organizes and coordinates the actions of many other Azure resources to assist in executing and sharing machine learning workflows. In particular, an Azure ML Workspace coordinates storage, databases, and compute resources providing added functionality for machine learning experimentation, deployment, inference, and the monitoring of deployed models.\n",
"\n",
"Create an Azure ML Workspace within your Azure subscription or load an existing workspace."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.workspace import Workspace\n",
"\n",
"ws = Workspace.from_config()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Compute target setup\n",
"You will need to provide a [Compute Target](https://docs.microsoft.com/en-us/azure/machine-learning/concept-azure-machine-learning-architecture#computes) that will be used for your AutoML model training. AutoML models for image tasks require [GPU SKUs](https://docs.microsoft.com/en-us/azure/virtual-machines/sizes-gpu) such as the ones from the NC, NCv2, NCv3, ND, NDv2 and NCasT4 series. We recommend using the NCsv3-series (with v100 GPUs) for faster training. Using a compute target with a multi-GPU VM SKU will leverage the multiple GPUs to speed up training. Additionally, setting up a compute target with multiple nodes will allow for faster model training by leveraging parallelism, when tuning hyperparameters for your model."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.compute import AmlCompute, ComputeTarget\n",
"\n",
"cluster_name = \"gpu-cluster-nc6\"\n",
"\n",
"try:\n",
" compute_target = ws.compute_targets[cluster_name]\n",
" print(\"Found existing compute target.\")\n",
"except KeyError:\n",
" print(\"Creating a new compute target...\")\n",
" compute_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"Standard_NC6\",\n",
" idle_seconds_before_scaledown=600,\n",
" min_nodes=0,\n",
" max_nodes=4,\n",
" )\n",
" compute_target = ComputeTarget.create(ws, cluster_name, compute_config)\n",
"# Can poll for a minimum number of nodes and for a specific timeout.\n",
"# If no min_node_count is provided, it will use the scale settings for the cluster.\n",
"compute_target.wait_for_completion(\n",
" show_output=True, min_node_count=None, timeout_in_minutes=20\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Experiment Setup\n",
"Create an [Experiment](https://docs.microsoft.com/en-us/azure/machine-learning/concept-azure-machine-learning-architecture#experiments) in your workspace to track your model training runs"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Experiment\n",
"\n",
"experiment_name = \"automl-image-instance-segmentation\"\n",
"experiment = Experiment(ws, name=experiment_name)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Dataset with input Training Data\n",
"\n",
"In order to generate models for computer vision, you will need to bring in labeled image data as input for model training in the form of an [AzureML Tabular Dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset). You can either use a dataset that you have exported from a [Data Labeling](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-label-data) project, or create a new Tabular Dataset with your labeled training data."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"In this notebook, we use a toy dataset called Fridge Objects, which includes 128 images of 4 classes of beverage container {can, carton, milk bottle, water bottle} photos taken on different backgrounds.\n",
"\n",
"All images in this notebook are hosted in [this repository](https://github.com/microsoft/computervision-recipes) and are made available under the [MIT license](https://github.com/microsoft/computervision-recipes/blob/master/LICENSE).\n",
"\n",
"We first download and unzip the data locally."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import urllib\n",
"from zipfile import ZipFile\n",
"\n",
"# download data\n",
"download_url = \"https://cvbp-secondary.z19.web.core.windows.net/datasets/object_detection/odFridgeObjectsMask.zip\"\n",
"data_file = \"./odFridgeObjectsMask.zip\"\n",
"urllib.request.urlretrieve(download_url, filename=data_file)\n",
"\n",
"# extract files\n",
"with ZipFile(data_file, \"r\") as zip:\n",
" print(\"extracting files...\")\n",
" zip.extractall()\n",
" print(\"done\")\n",
"# delete zip file\n",
"os.remove(data_file)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This is a sample image from this dataset:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from IPython.display import Image\n",
"\n",
"Image(filename=\"./odFridgeObjectsMask/images/31.jpg\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Convert the downloaded data to JSONL\n",
"In this example, the fridge object dataset is annotated in Pascal VOC format, where each image corresponds to an xml file. Each xml file contains information on where its corresponding image file is located and also contains information about the bounding boxes and the object labels. In order to use this data to create an [AzureML Tabular Dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset), we first need to convert it to the required JSONL format. Please refer to the [documentation on how to prepare datasets](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-prepare-datasets-for-automl-images).\n",
"\n",
"The following script is creating two .jsonl files (one for training and one for validation) in the parent folder of the dataset. The train / validation ratio corresponds to 20% of the data going into the validation file."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# The jsonl_converter below relies on scikit-image and simplification.\n",
"# If you don't have them installed, install them before converting data by runing this cell.\n",
"%pip install \"scikit-image==0.17.2\" \"simplification==0.5.1\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from jsonl_converter import convert_mask_in_VOC_to_jsonl\n",
"\n",
"data_path = \"./odFridgeObjectsMask/\"\n",
"convert_mask_in_VOC_to_jsonl(data_path, ws)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Upload the JSONL file and images to Datastore\n",
"In order to use the data for training in Azure ML, we upload it to our Azure ML Workspace via a [Datastore](https://docs.microsoft.com/en-us/azure/machine-learning/concept-azure-machine-learning-architecture#datasets-and-datastores). The datastore provides a mechanism for you to upload/download data and interact with it from your remote compute targets. It is an abstraction over Azure Storage."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Retrieving default datastore that got automatically created when we setup a workspace\n",
"ds = ws.get_default_datastore()\n",
"ds.upload(src_dir=\"./odFridgeObjectsMask\", target_path=\"odFridgeObjectsMask\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Finally, we need to create an [AzureML Tabular Dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset) from the data we uploaded to the Datastore. We create one dataset for training and one for validation."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Dataset\n",
"from azureml.data import DataType\n",
"\n",
"# get existing training dataset\n",
"training_dataset_name = \"odFridgeObjectsMaskTrainingDataset\"\n",
"if training_dataset_name in ws.datasets:\n",
" training_dataset = ws.datasets.get(training_dataset_name)\n",
" print(\"Found the training dataset\", training_dataset_name)\n",
"else:\n",
" # create training dataset\n",
" training_dataset = Dataset.Tabular.from_json_lines_files(\n",
" path=ds.path(\"odFridgeObjectsMask/train_annotations.jsonl\"),\n",
" set_column_types={\"image_url\": DataType.to_stream(ds.workspace)},\n",
" )\n",
" training_dataset = training_dataset.register(\n",
" workspace=ws, name=training_dataset_name\n",
" )\n",
"# get existing validation dataset\n",
"validation_dataset_name = \"odFridgeObjectsMaskValidationDataset\"\n",
"if validation_dataset_name in ws.datasets:\n",
" validation_dataset = ws.datasets.get(validation_dataset_name)\n",
" print(\"Found the validation dataset\", validation_dataset_name)\n",
"else:\n",
" # create validation dataset\n",
" validation_dataset = Dataset.Tabular.from_json_lines_files(\n",
" path=ds.path(\"odFridgeObjectsMask/validation_annotations.jsonl\"),\n",
" set_column_types={\"image_url\": DataType.to_stream(ds.workspace)},\n",
" )\n",
" validation_dataset = validation_dataset.register(\n",
" workspace=ws, name=validation_dataset_name\n",
" )\n",
"print(\"Training dataset name: \" + training_dataset.name)\n",
"print(\"Validation dataset name: \" + validation_dataset.name)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Validation dataset is optional. If no validation dataset is specified, by default 20% of your training data will be used for validation. You can control the percentage using the `split_ratio` argument - please refer to the [documentation](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models#model-agnostic-hyperparameters) for more details.\n",
"\n",
"This is what the training dataset looks like:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"training_dataset.to_pandas_dataframe()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Configuring your AutoML run for image tasks\n",
"AutoML allows you to easily train models for Image Classification, Object Detection & Instance Segmentation on your image data. You can control the model algorithm to be used, specify hyperparameter values for your model as well as perform a sweep across the hyperparameter space to generate an optimal model. Parameters for configuring your AutoML Image run are specified using the `AutoMLImageConfig` - please refer to the [documentation](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models#configure-your-experiment-settings) for the details on the parameters that can be used and their values."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"When using AutoML for image tasks, you need to specify the model algorithms using the `model_name` parameter. You can either specify a single model or choose to sweep over multiple models. Please refer to the [documentation](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models#configure-model-algorithms-and-hyperparameters) for the list of supported model algorithms."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Using default hyperparameter values for the specified algorithm\n",
"Before doing a large sweep to search for the optimal models and hyperparameters, we recommend trying the default values for a given model to get a first baseline. Next, you can explore multiple hyperparameters for the same model before sweeping over multiple models and their parameters. This allows an iterative approach, as with multiple models and multiple hyperparameters for each (as we showcase in the next section), the search space grows exponentially, and you need more iterations to find optimal configurations.\n",
"\n",
"If you wish to use the default hyperparameter values for a given algorithm (say `maskrcnn`), you can specify the config for your AutoML Image runs as follows:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.automl.core.shared.constants import ImageTask\n",
"from azureml.train.automl import AutoMLImageConfig\n",
"from azureml.train.hyperdrive import GridParameterSampling, choice\n",
"\n",
"image_config_maskrcnn = AutoMLImageConfig(\n",
" task=ImageTask.IMAGE_INSTANCE_SEGMENTATION,\n",
" compute_target=compute_target,\n",
" training_data=training_dataset,\n",
" validation_data=validation_dataset,\n",
" hyperparameter_sampling=GridParameterSampling(\n",
" {\"model_name\": choice(\"maskrcnn_resnet50_fpn\")}\n",
" ),\n",
" iterations=1,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Submitting an AutoML run for Computer Vision tasks\n",
"Once you've created the config settings for your run, you can submit an AutoML run using the config in order to train a vision model using your training dataset."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_image_run = experiment.submit(image_config_maskrcnn)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_image_run.wait_for_completion(wait_post_processing=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Hyperparameter sweeping for your AutoML models for computer vision tasks\n",
"In this example, we use the AutoMLImageConfig to train an Instance Segmentation model using `maskrcnn_resnet50_fpn` which is pretrained on COCO, a large-scale object detection, segmentation, and captioning dataset that contains over 200K labeled images with over 80 label categories.\n",
"\n",
"When using AutoML for Images, you can perform a hyperparameter sweep over a defined parameter space to find the optimal model. In this example, we sweep over the hyperparameters for each algorithm, choosing from a range of values for learning_rate, optimizer, etc., to generate a model with the optimal 'accuracy'. If hyperparameter values are not specified, then default values are used for the specified algorithm.\n",
"\n",
"We use Random Sampling to pick samples from this parameter space and try a total of 10 iterations with these different samples, running 2 iterations at a time on our compute target, which has been previously set up using 4 nodes. Please note that the more parameters the space has, the more iterations you need to find optimal models.\n",
"\n",
"We leverage the Bandit early termination policy which will terminate poor performing configs (those that are not within 20% slack of the best performing config), thus significantly saving compute resources.\n",
"\n",
"For more details on model and hyperparameter sweeping, please refer to the [documentation](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-tune-hyperparameters)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.automl.core.shared.constants import ImageTask\n",
"from azureml.train.automl import AutoMLImageConfig\n",
"from azureml.train.hyperdrive import BanditPolicy, RandomParameterSampling\n",
"from azureml.train.hyperdrive import choice, uniform\n",
"\n",
"parameter_space = {\n",
" \"model_name\": choice(\"maskrcnn_resnet50_fpn\"),\n",
" \"learning_rate\": uniform(0.0001, 0.001),\n",
" #'warmup_cosine_lr_warmup_epochs': choice(0, 3),\n",
" \"optimizer\": choice(\"sgd\", \"adam\", \"adamw\"),\n",
" \"min_size\": choice(600, 800),\n",
"}\n",
"\n",
"tuning_settings = {\n",
" \"iterations\": 10,\n",
" \"max_concurrent_iterations\": 2,\n",
" \"hyperparameter_sampling\": RandomParameterSampling(parameter_space),\n",
" \"early_termination_policy\": BanditPolicy(\n",
" evaluation_interval=2, slack_factor=0.2, delay_evaluation=6\n",
" ),\n",
"}\n",
"\n",
"automl_image_config = AutoMLImageConfig(\n",
" task=ImageTask.IMAGE_INSTANCE_SEGMENTATION,\n",
" compute_target=compute_target,\n",
" training_data=training_dataset,\n",
" validation_data=validation_dataset,\n",
" **tuning_settings,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_image_run = experiment.submit(automl_image_config)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_image_run.wait_for_completion(wait_post_processing=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"When doing a hyperparameter sweep, it can be useful to visualize the different configurations that were tried using the HyperDrive UI. You can navigate to this UI by going to the 'Child runs' tab in the UI of the main `automl_image_run` from above, which is the HyperDrive parent run. Then you can go into the 'Child runs' tab of this HyperDrive parent run. Alternatively, here below you can see directly the HyperDrive parent run and navigate to its 'Child runs' tab:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Run\n",
"\n",
"hyperdrive_run = Run(experiment=experiment, run_id=automl_image_run.id + \"_HD\")\n",
"hyperdrive_run"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Register the optimal vision model from the AutoML run\n",
"Once the run completes, we can register the model that was created from the best run (configuration that resulted in the best primary metric)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Register the model from the best run\n",
"\n",
"best_child_run = automl_image_run.get_best_child()\n",
"model_name = best_child_run.properties[\"model_name\"]\n",
"model = best_child_run.register_model(\n",
" model_name=model_name, model_path=\"outputs/model.pt\"\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Deploy model as a web service\n",
"Once you have your trained model, you can deploy the model on Azure. You can deploy your trained model as a web service on Azure Container Instances ([ACI](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-deploy-azure-container-instance)) or Azure Kubernetes Service ([AKS](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-deploy-azure-kubernetes-service)). Please note that ACI only supports small models under 1 GB in size. For testing larger models or for the high-scale production stage, we recommend using AKS.\n",
"In this tutorial, we will deploy the model as a web service in AKS."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You will need to first create an AKS compute cluster or use an existing AKS cluster. You can use either GPU or CPU VM SKUs for your deployment cluster"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.compute import ComputeTarget, AksCompute\n",
"from azureml.exceptions import ComputeTargetException\n",
"\n",
"# Choose a name for your cluster\n",
"aks_name = \"aks-cpu-is\"\n",
"# Check to see if the cluster already exists\n",
"try:\n",
" aks_target = ComputeTarget(workspace=ws, name=aks_name)\n",
" print(\"Found existing compute target\")\n",
"except ComputeTargetException:\n",
" print(\"Creating a new compute target...\")\n",
" # Provision AKS cluster with a CPU machine\n",
" prov_config = AksCompute.provisioning_configuration(vm_size=\"STANDARD_D3_V2\")\n",
" # Create the cluster\n",
" aks_target = ComputeTarget.create(\n",
" workspace=ws, name=aks_name, provisioning_configuration=prov_config\n",
" )\n",
" aks_target.wait_for_completion(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Next, you will need to define the [inference configuration](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models#update-inference-configuration), that describes how to set up the web-service containing your model. You can use the scoring script and the environment from the training run in your inference config.\n",
"\n",
"<b>Note:</b> To change the model's settings, open the downloaded scoring script and modify the model_settings variable <i>before</i> deploying the model."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.model import InferenceConfig\n",
"\n",
"best_child_run.download_file(\n",
" \"outputs/scoring_file_v_1_0_0.py\", output_file_path=\"score.py\"\n",
")\n",
"environment = best_child_run.get_environment()\n",
"inference_config = InferenceConfig(entry_script=\"score.py\", environment=environment)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You can then deploy the model as an AKS web service."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Deploy the model from the best run as an AKS web service\n",
"from azureml.core.webservice import AksWebservice\n",
"from azureml.core.model import Model\n",
"\n",
"aks_config = AksWebservice.deploy_configuration(\n",
" autoscale_enabled=True, cpu_cores=1, memory_gb=5, enable_app_insights=True\n",
")\n",
"\n",
"aks_service = Model.deploy(\n",
" ws,\n",
" models=[model],\n",
" inference_config=inference_config,\n",
" deployment_config=aks_config,\n",
" deployment_target=aks_target,\n",
" name=\"automl-image-test-cpu-is\",\n",
" overwrite=True,\n",
")\n",
"aks_service.wait_for_deployment(show_output=True)\n",
"print(aks_service.state)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Test the web service\n",
"Finally, let's test our deployed web service to predict new images. You can pass in any image. In this case, we'll use a random image from the dataset and pass it to the scoring URI."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import requests\n",
"\n",
"# URL for the web service\n",
"scoring_uri = aks_service.scoring_uri\n",
"\n",
"# If the service is authenticated, set the key or token\n",
"key, _ = aks_service.get_keys()\n",
"\n",
"sample_image = \"./test_image.jpg\"\n",
"\n",
"# Load image data\n",
"data = open(sample_image, \"rb\").read()\n",
"\n",
"# Set the content type\n",
"headers = {\"Content-Type\": \"application/octet-stream\"}\n",
"\n",
"# If authentication is enabled, set the authorization header\n",
"headers[\"Authorization\"] = f\"Bearer {key}\"\n",
"\n",
"# Make the request and display the response\n",
"resp = requests.post(scoring_uri, data, headers=headers)\n",
"print(resp.text)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Visualize predictions\n",
"Now that we have scored a test image, we can visualize the predictions for this image"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%matplotlib inline\n",
"import matplotlib.pyplot as plt\n",
"import matplotlib.image as mpimg\n",
"import matplotlib.patches as patches\n",
"from matplotlib.lines import Line2D\n",
"from PIL import Image\n",
"import numpy as np\n",
"import json\n",
"\n",
"IMAGE_SIZE = (18, 12)\n",
"plt.figure(figsize=IMAGE_SIZE)\n",
"img_np = mpimg.imread(sample_image)\n",
"img = Image.fromarray(img_np.astype(\"uint8\"), \"RGB\")\n",
"x, y = img.size\n",
"\n",
"fig, ax = plt.subplots(1, figsize=(15, 15))\n",
"# Display the image\n",
"ax.imshow(img_np)\n",
"\n",
"# draw box and label for each detection\n",
"detections = json.loads(resp.text)\n",
"for detect in detections[\"boxes\"]:\n",
" label = detect[\"label\"]\n",
" box = detect[\"box\"]\n",
" polygon = detect[\"polygon\"]\n",
" conf_score = detect[\"score\"]\n",
" if conf_score > 0.6:\n",
" ymin, xmin, ymax, xmax = (\n",
" box[\"topY\"],\n",
" box[\"topX\"],\n",
" box[\"bottomY\"],\n",
" box[\"bottomX\"],\n",
" )\n",
" topleft_x, topleft_y = x * xmin, y * ymin\n",
" width, height = x * (xmax - xmin), y * (ymax - ymin)\n",
" print(\n",
" \"{}: [{}, {}, {}, {}], {}\".format(\n",
" detect[\"label\"],\n",
" round(topleft_x, 3),\n",
" round(topleft_y, 3),\n",
" round(width, 3),\n",
" round(height, 3),\n",
" round(conf_score, 3),\n",
" )\n",
" )\n",
"\n",
" color = np.random.rand(3) #'red'\n",
" rect = patches.Rectangle(\n",
" (topleft_x, topleft_y),\n",
" width,\n",
" height,\n",
" linewidth=2,\n",
" edgecolor=color,\n",
" facecolor=\"none\",\n",
" )\n",
"\n",
" ax.add_patch(rect)\n",
" plt.text(topleft_x, topleft_y - 10, label, color=color, fontsize=20)\n",
"\n",
" polygon_np = np.array(polygon[0])\n",
" polygon_np = polygon_np.reshape(-1, 2)\n",
" polygon_np[:, 0] *= x\n",
" polygon_np[:, 1] *= y\n",
" poly = patches.Polygon(polygon_np, True, facecolor=color, alpha=0.4)\n",
" ax.add_patch(poly)\n",
" poly_line = Line2D(\n",
" polygon_np[:, 0],\n",
" polygon_np[:, 1],\n",
" linewidth=2,\n",
" marker=\"o\",\n",
" markersize=8,\n",
" markerfacecolor=color,\n",
" )\n",
" ax.add_line(poly_line)\n",
"plt.show()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.6 - AzureML",
"language": "python",
"name": "python3-azureml"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.10"
},
"nteract": {
"version": "nteract-front-end@1.0.0"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -0,0 +1,213 @@
import argparse
import os
import json
import numpy as np
import PIL.Image as Image
import xml.etree.ElementTree as ET
from simplification.cutil import simplify_coords
from skimage import measure
def convert_mask_to_polygon(
mask,
max_polygon_points=100,
score_threshold=0.5,
max_refinement_iterations=25,
edge_safety_padding=1,
):
"""Convert a numpy mask to a polygon outline in normalized coordinates.
:param mask: Pixel mask, where each pixel has an object (float) score in [0, 1], in size ([1, height, width])
:type: mask: <class 'numpy.array'>
:param max_polygon_points: Maximum number of (x, y) coordinate pairs in polygon
:type: max_polygon_points: Int
:param score_threshold: Score cutoff for considering a pixel as in object.
:type: score_threshold: Float
:param max_refinement_iterations: Maximum number of times to refine the polygon
trying to reduce the number of pixels to meet max polygon points.
:type: max_refinement_iterations: Int
:param edge_safety_padding: Number of pixels to pad the mask with
:type edge_safety_padding: Int
:return: normalized polygon coordinates
:rtype: list of list
"""
# Convert to numpy bitmask
mask = mask[0]
mask_array = np.array((mask > score_threshold), dtype=np.uint8)
image_shape = mask_array.shape
# Pad the mask to avoid errors at the edge of the mask
embedded_mask = np.zeros(
(
image_shape[0] + 2 * edge_safety_padding,
image_shape[1] + 2 * edge_safety_padding,
),
dtype=np.uint8,
)
embedded_mask[
edge_safety_padding : image_shape[0] + edge_safety_padding,
edge_safety_padding : image_shape[1] + edge_safety_padding,
] = mask_array
# Find Image Contours
contours = measure.find_contours(embedded_mask, 0.5)
simplified_contours = []
for contour in contours:
# Iteratively reduce polygon points, if necessary
if max_polygon_points is not None:
simplify_factor = 0
while (
len(contour) > max_polygon_points
and simplify_factor < max_refinement_iterations
):
contour = simplify_coords(contour, simplify_factor)
simplify_factor += 1
# Convert to [x, y, x, y, ....] coordinates and correct for padding
unwrapped_contour = [0] * (2 * len(contour))
unwrapped_contour[::2] = np.ceil(contour[:, 1]) - edge_safety_padding
unwrapped_contour[1::2] = np.ceil(contour[:, 0]) - edge_safety_padding
simplified_contours.append(unwrapped_contour)
return _normalize_contour(simplified_contours, image_shape)
def _normalize_contour(contours, image_shape):
height, width = image_shape[0], image_shape[1]
for contour in contours:
contour[::2] = [x * 1.0 / width for x in contour[::2]]
contour[1::2] = [y * 1.0 / height for y in contour[1::2]]
return contours
def binarise_mask(mask_fname):
mask = Image.open(mask_fname)
mask = np.array(mask)
# instances are encoded as different colors
obj_ids = np.unique(mask)
# first id is the background, so remove it
obj_ids = obj_ids[1:]
# split the color-encoded mask into a set of binary masks
binary_masks = mask == obj_ids[:, None, None]
return binary_masks
def parsing_mask(mask_fname):
# For this particular dataset, initially each mask was merged (based on binary mask of each object)
# in the order of the bounding boxes described in the corresponding PASCAL VOC annotation file.
# Therefore, we have to extract each binary mask which is in the order of objects in the annotation file.
# https://github.com/microsoft/computervision-recipes/blob/master/utils_cv/detection/dataset.py
binary_masks = binarise_mask(mask_fname)
polygons = []
for bi_mask in binary_masks:
if len(bi_mask.shape) == 2:
bi_mask = bi_mask[np.newaxis, :]
polygon = convert_mask_to_polygon(bi_mask)
polygons.append(polygon)
return polygons
def convert_mask_in_VOC_to_jsonl(base_dir, workspace):
src = base_dir
train_validation_ratio = 5
# Retrieving default datastore that got automatically created when we setup a workspace
workspaceblobstore = workspace.get_default_datastore().name
# Path to the annotations
annotations_folder = os.path.join(src, "annotations")
mask_folder = os.path.join(src, "segmentation-masks")
# Path to the training and validation files
train_annotations_file = os.path.join(src, "train_annotations.jsonl")
validation_annotations_file = os.path.join(src, "validation_annotations.jsonl")
# sample json line dictionary
json_line_sample = {
"image_url": "AmlDatastore://"
+ workspaceblobstore
+ "/"
+ os.path.basename(os.path.dirname(src))
+ "/"
+ "images",
"image_details": {"format": None, "width": None, "height": None},
"label": [],
}
# Read each annotation and convert it to jsonl line
with open(train_annotations_file, "w") as train_f:
with open(validation_annotations_file, "w") as validation_f:
for i, filename in enumerate(os.listdir(annotations_folder)):
if filename.endswith(".xml"):
print("Parsing " + os.path.join(src, filename))
root = ET.parse(
os.path.join(annotations_folder, filename)
).getroot()
width = int(root.find("size/width").text)
height = int(root.find("size/height").text)
# convert mask into polygon
mask_fname = os.path.join(mask_folder, filename[:-4] + ".png")
polygons = parsing_mask(mask_fname)
labels = []
for index, object in enumerate(root.findall("object")):
name = object.find("name").text
isCrowd = int(object.find("difficult").text)
labels.append(
{
"label": name,
"bbox": "null",
"isCrowd": isCrowd,
"polygon": polygons[index],
}
)
# build the jsonl file
image_filename = root.find("filename").text
_, file_extension = os.path.splitext(image_filename)
json_line = dict(json_line_sample)
json_line["image_url"] = (
json_line["image_url"] + "/" + image_filename
)
json_line["image_details"]["format"] = file_extension[1:]
json_line["image_details"]["width"] = width
json_line["image_details"]["height"] = height
json_line["label"] = labels
if i % train_validation_ratio == 0:
# validation annotation
validation_f.write(json.dumps(json_line) + "\n")
else:
# train annotation
train_f.write(json.dumps(json_line) + "\n")
else:
print("Skipping unknown file: {}".format(filename))
if __name__ == "__main__":
parser = argparse.ArgumentParser(allow_abbrev=False)
parser.add_argument(
"--data_path",
type=str,
help="the directory contains images, annotations, and masks",
)
args, remaining_args = parser.parse_known_args()
data_path = args.data_path
convert_mask_in_VOC_to_jsonl(data_path)

Binary file not shown.

After

Width:  |  Height:  |  Size: 156 KiB

Some files were not shown because too many files have changed in this diff Show More