Compare commits

...

4 Commits

Author SHA1 Message Date
amlrelsa-ms
dd494e9cac update samples from Release-117 as a part of SDK release 2021-12-13 16:57:22 +00:00
Harneet Virk
352adb7487 Merge pull request #1629 from Azure/release_update/Release-116
Update samples from Release as a part of SDK release 1.36.0
2021-11-08 09:48:25 -08:00
amlrelsa-ms
aebe34b4e8 update samples from Release-116 as a part of SDK release 2021-11-08 16:09:41 +00:00
Harneet Virk
c7e1241e20 Merge pull request #1612 from Azure/release_update/Release-115
Update samples from Release-115 as a part of  SDK release
2021-10-11 12:01:59 -07:00
82 changed files with 4668 additions and 1187 deletions

View File

@@ -103,7 +103,7 @@
"source": [
"import azureml.core\n",
"\n",
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},

View File

@@ -6,4 +6,4 @@ dependencies:
- fairlearn>=0.6.2
- joblib
- liac-arff
- raiwidgets~=0.11.0
- raiwidgets~=0.15.0

View File

@@ -6,4 +6,4 @@ dependencies:
- fairlearn>=0.6.2
- joblib
- liac-arff
- raiwidgets~=0.11.0
- raiwidgets~=0.15.0

View File

@@ -4,7 +4,6 @@ dependencies:
# Currently Azure ML only supports 3.5.2 and later.
- pip==21.1.2
- python>=3.5.2,<3.8
- nb_conda
- boto3==1.15.18
- matplotlib==2.1.0
- numpy==1.18.5
@@ -22,9 +21,9 @@ dependencies:
- pip:
# Required packages for AzureML execution, history, and data preparation.
- azureml-widgets~=1.35.0
- azureml-widgets~=1.37.0
- pytorch-transformers==1.0.0
- spacy==2.1.8
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
- -r https://automlresources-prod.azureedge.net/validated-requirements/1.35.0/validated_win32_requirements.txt [--no-deps]
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.37.0/validated_win32_requirements.txt [--no-deps]
- arch==4.14

View File

@@ -22,9 +22,9 @@ dependencies:
- pip:
# Required packages for AzureML execution, history, and data preparation.
- azureml-widgets~=1.35.0
- azureml-widgets~=1.37.0
- pytorch-transformers==1.0.0
- spacy==2.1.8
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
- -r https://automlresources-prod.azureedge.net/validated-requirements/1.35.0/validated_linux_requirements.txt [--no-deps]
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.37.0/validated_linux_requirements.txt [--no-deps]
- arch==4.14

View File

@@ -23,9 +23,9 @@ dependencies:
- pip:
# Required packages for AzureML execution, history, and data preparation.
- azureml-widgets~=1.35.0
- azureml-widgets~=1.37.0
- pytorch-transformers==1.0.0
- spacy==2.1.8
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
- -r https://automlresources-prod.azureedge.net/validated-requirements/1.35.0/validated_darwin_requirements.txt [--no-deps]
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.37.0/validated_darwin_requirements.txt [--no-deps]
- arch==4.14

View File

@@ -77,6 +77,7 @@
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"import logging\n",
"\n",
"from matplotlib import pyplot as plt\n",
@@ -104,7 +105,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
@@ -410,7 +411,8 @@
"metadata": {},
"outputs": [],
"source": [
"best_run_customized, fitted_model_customized = remote_run.get_output()"
"# Retrieve the best Run object\n",
"best_run = remote_run.get_best_child()"
]
},
{
@@ -419,7 +421,7 @@
"source": [
"## Transparency\n",
"\n",
"View updated featurization summary"
"View featurization summary for the best model - to study how different features were transformed. This is stored as a JSON file in the outputs directory for the run."
]
},
{
@@ -428,36 +430,14 @@
"metadata": {},
"outputs": [],
"source": [
"custom_featurizer = fitted_model_customized.named_steps['datatransformer']\n",
"df = custom_featurizer.get_featurization_summary()\n",
"pd.DataFrame(data=df)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Set `is_user_friendly=False` to get a more detailed summary for the transforms being applied."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df = custom_featurizer.get_featurization_summary(is_user_friendly=False)\n",
"pd.DataFrame(data=df)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df = custom_featurizer.get_stats_feature_type_summary()\n",
"pd.DataFrame(data=df)"
"# Download the featuurization summary JSON file locally\n",
"best_run.download_file(\"outputs/featurization_summary.json\", \"featurization_summary.json\")\n",
"\n",
"# Render the JSON as a pandas DataFrame\n",
"with open(\"featurization_summary.json\", \"r\") as f:\n",
" records = json.load(f)\n",
"\n",
"pd.DataFrame.from_records(records)"
]
},
{
@@ -499,7 +479,7 @@
"model_explainability_run.wait_for_completion()\n",
"\n",
"# Get the best run object\n",
"best_run, fitted_model = remote_run.get_output()"
"best_run = remote_run.get_best_child()"
]
},
{
@@ -629,7 +609,16 @@
"\n",
"### Retrieve the Best Model\n",
"\n",
"Below we select the best pipeline from our iterations. The `get_output` method returns the best run and the fitted model. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*."
"Below we select the best pipeline from our iterations. The `get_best_child` method returns the Run object for the best model based on the default primary metric. There are additional flags that can be passed to the method if we want to retrieve the best Run based on any of the other supported metrics, or if we are just interested in the best run among the ONNX compatible runs. As always, you can execute `remote_run.get_best_child??` in a new cell to view the source or docs for the function."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"remote_run.get_best_child??"
]
},
{
@@ -649,7 +638,7 @@
"metadata": {},
"outputs": [],
"source": [
"best_run, fitted_model = remote_run.get_output()"
"best_run = remote_run.get_best_child()"
]
},
{

View File

@@ -93,7 +93,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},

View File

@@ -63,6 +63,7 @@
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"import logging\n",
"import os\n",
"import shutil\n",
@@ -96,7 +97,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
@@ -340,8 +341,9 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"You can test the model locally to get a feel of the input/output. When the model contains BERT, this step will require pytorch and pytorch-transformers installed in your local environment. The exact versions of these packages can be found in the **automl_env.yml** file located in the local copy of your MachineLearningNotebooks folder here:\n",
"MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/automl_env.yml"
"For local inferencing, you can load the model locally via. the method `remote_run.get_output()`. For more information on the arguments expected by this method, you can run `remote_run.get_output??`.\n",
"Note that when the model contains BERT, this step will require pytorch and pytorch-transformers installed in your local environment. The exact versions of these packages can be found in the **automl_env.yml** file located in the local copy of your MachineLearningNotebooks folder here:\n",
"MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/automl_env.yml\n"
]
},
{
@@ -350,7 +352,8 @@
"metadata": {},
"outputs": [],
"source": [
"best_run, fitted_model = automl_run.get_output()"
"# Retrieve the best Run object\n",
"best_run = automl_run.get_best_child()"
]
},
{
@@ -366,10 +369,15 @@
"metadata": {},
"outputs": [],
"source": [
"text_transformations_used = []\n",
"for column_group in fitted_model.named_steps['datatransformer'].get_featurization_summary():\n",
" text_transformations_used.extend(column_group['Transformations'])\n",
"text_transformations_used"
"# Download the featuurization summary JSON file locally\n",
"best_run.download_file(\"outputs/featurization_summary.json\", \"featurization_summary.json\")\n",
"\n",
"# Render the JSON as a pandas DataFrame\n",
"with open(\"featurization_summary.json\", \"r\") as f:\n",
" records = json.load(f)\n",
"\n",
"featurization_summary = pd.DataFrame.from_records(records)\n",
"featurization_summary['Transformations'].tolist()"
]
},
{

View File

@@ -81,7 +81,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},

View File

@@ -92,7 +92,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},

View File

@@ -91,7 +91,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

View File

@@ -0,0 +1,167 @@
from typing import Any, Dict, Optional, List
import argparse
import json
import os
import re
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from azureml.automl.core.shared import constants
from azureml.automl.core.shared.types import GrainType
from azureml.automl.runtime.shared.score import scoring
GRAIN = "time_series_id"
BACKTEST_ITER = "backtest_iteration"
ACTUALS = "actual_level"
PREDICTIONS = "predicted_level"
ALL_GRAINS = "all_sets"
FORECASTS_FILE = "forecast.csv"
SCORES_FILE = "scores.csv"
PLOTS_FILE = "plots_fcst_vs_actual.pdf"
RE_INVALID_SYMBOLS = re.compile("[: ]")
def _compute_metrics(df: pd.DataFrame, metrics: List[str]):
"""
Compute metrics for one data frame.
:param df: The data frame which contains actual_level and predicted_level columns.
:return: The data frame with two columns - metric_name and metric.
"""
scores = scoring.score_regression(
y_test=df[ACTUALS], y_pred=df[PREDICTIONS], metrics=metrics
)
metrics_df = pd.DataFrame(list(scores.items()), columns=["metric_name", "metric"])
metrics_df.sort_values(["metric_name"], inplace=True)
metrics_df.reset_index(drop=True, inplace=True)
return metrics_df
def _format_grain_name(grain: GrainType) -> str:
"""
Convert grain name to string.
:param grain: the grain name.
:return: the string representation of the given grain.
"""
if not isinstance(grain, tuple) and not isinstance(grain, list):
return str(grain)
grain = list(map(str, grain))
return "|".join(grain)
def compute_all_metrics(
fcst_df: pd.DataFrame,
ts_id_colnames: List[str],
metric_names: Optional[List[set]] = None,
):
"""
Calculate metrics per grain.
:param fcst_df: forecast data frame. Must contain 2 columns: 'actual_level' and 'predicted_level'
:param metric_names: (optional) the list of metric names to return
:param ts_id_colnames: (optional) list of grain column names
:return: dictionary of summary table for all tests and final decision on stationary vs nonstaionary
"""
if not metric_names:
metric_names = list(constants.Metric.SCALAR_REGRESSION_SET)
if ts_id_colnames is None:
ts_id_colnames = []
metrics_list = []
if ts_id_colnames:
for grain, df in fcst_df.groupby(ts_id_colnames):
one_grain_metrics_df = _compute_metrics(df, metric_names)
one_grain_metrics_df[GRAIN] = _format_grain_name(grain)
metrics_list.append(one_grain_metrics_df)
# overall metrics
one_grain_metrics_df = _compute_metrics(fcst_df, metric_names)
one_grain_metrics_df[GRAIN] = ALL_GRAINS
metrics_list.append(one_grain_metrics_df)
# collect into a data frame
return pd.concat(metrics_list)
def _draw_one_plot(
df: pd.DataFrame,
time_column_name: str,
grain_column_names: List[str],
pdf: PdfPages,
) -> None:
"""
Draw the single plot.
:param df: The data frame with the data to build plot.
:param time_column_name: The name of a time column.
:param grain_column_names: The name of grain columns.
:param pdf: The pdf backend used to render the plot.
"""
fig, _ = plt.subplots(figsize=(20, 10))
df = df.set_index(time_column_name)
plt.plot(df[[ACTUALS, PREDICTIONS]])
plt.xticks(rotation=45)
iteration = df[BACKTEST_ITER].iloc[0]
if grain_column_names:
grain_name = [df[grain].iloc[0] for grain in grain_column_names]
plt.title(f"Time series ID: {_format_grain_name(grain_name)} {iteration}")
plt.legend(["actual", "forecast"])
plt.close(fig)
pdf.savefig(fig)
def calculate_scores_and_build_plots(
input_dir: str, output_dir: str, automl_settings: Dict[str, Any]
):
os.makedirs(output_dir, exist_ok=True)
grains = automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES)
time_column_name = automl_settings.get(constants.TimeSeries.TIME_COLUMN_NAME)
if grains is None:
grains = []
if isinstance(grains, str):
grains = [grains]
while BACKTEST_ITER in grains:
grains.remove(BACKTEST_ITER)
dfs = []
for fle in os.listdir(input_dir):
file_path = os.path.join(input_dir, fle)
if os.path.isfile(file_path) and file_path.endswith(".csv"):
df_iter = pd.read_csv(file_path, parse_dates=[time_column_name])
for _, iteration in df_iter.groupby(BACKTEST_ITER):
dfs.append(iteration)
forecast_df = pd.concat(dfs, sort=False, ignore_index=True)
# To make sure plots are in order, sort the predictions by grain and iteration.
ts_index = grains + [BACKTEST_ITER]
forecast_df.sort_values(by=ts_index, inplace=True)
pdf = PdfPages(os.path.join(output_dir, PLOTS_FILE))
for _, one_forecast in forecast_df.groupby(ts_index):
_draw_one_plot(one_forecast, time_column_name, grains, pdf)
pdf.close()
forecast_df.to_csv(os.path.join(output_dir, FORECASTS_FILE), index=False)
metrics = compute_all_metrics(forecast_df, grains + [BACKTEST_ITER])
metrics.to_csv(os.path.join(output_dir, SCORES_FILE), index=False)
if __name__ == "__main__":
args = {"forecasts": "--forecasts", "scores_out": "--output-dir"}
parser = argparse.ArgumentParser("Parsing input arguments.")
for argname, arg in args.items():
parser.add_argument(arg, dest=argname, required=True)
parsed_args, _ = parser.parse_known_args()
input_dir = parsed_args.forecasts
output_dir = parsed_args.scores_out
with open(
os.path.join(
os.path.dirname(os.path.realpath(__file__)), "automl_settings.json"
)
) as json_file:
automl_settings = json.load(json_file)
calculate_scores_and_build_plots(input_dir, output_dir, automl_settings)

View File

@@ -0,0 +1,725 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/forecasting-hierarchical-timeseries/auto-ml-forecasting-hierarchical-timeseries.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Many Models with Backtesting - Automated ML\n",
"**_Backtest many models time series forecasts with Automated Machine Learning_**\n",
"\n",
"---"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"For this notebook we are using a synthetic dataset to demonstrate the back testing in many model scenario. This allows us to check historical performance of AutoML on a historical data. To do that we step back on the backtesting period by the data set several times and split the data to train and test sets. Then these data sets are used for training and evaluation of model.<br>\n",
"\n",
"Thus, it is a quick way of evaluating AutoML as if it was in production. Here, we do not test historical performance of a particular model, for this see the [notebook](../forecasting-backtest-single-model/auto-ml-forecasting-backtest-single-model.ipynb). Instead, the best model for every backtest iteration can be different since AutoML chooses the best model for a given training set.\n",
"![Backtesting](Backtesting.png)\n",
"\n",
"**NOTE: There are limits on how many runs we can do in parallel per workspace, and we currently recommend to set the parallelism to maximum of 320 runs per experiment per workspace. If users want to have more parallelism and increase this limit they might encounter Too Many Requests errors (HTTP 429).**"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Prerequisites\n",
"You'll need to create a compute Instance by following the instructions in the [EnvironmentSetup.md](../Setup_Resources/EnvironmentSetup.md)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 1.0 Set up workspace, datastore, experiment"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613003526897
}
},
"outputs": [],
"source": [
"import os\n",
"\n",
"import azureml.core\n",
"from azureml.core import Workspace, Datastore\n",
"import numpy as np\n",
"import pandas as pd\n",
"\n",
"from pandas.tseries.frequencies import to_offset\n",
"\n",
"# Set up your workspace\n",
"ws = Workspace.from_config()\n",
"ws.get_details()\n",
"\n",
"# Set up your datastores\n",
"dstore = ws.get_default_datastore()\n",
"\n",
"output = {}\n",
"output[\"SDK version\"] = azureml.core.VERSION\n",
"output[\"Subscription ID\"] = ws.subscription_id\n",
"output[\"Workspace\"] = ws.name\n",
"output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n",
"output[\"Default datastore name\"] = dstore.name\n",
"pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This notebook is compatible with Azure ML SDK version 1.35.1 or later."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Choose an experiment"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613003540729
}
},
"outputs": [],
"source": [
"from azureml.core import Experiment\n",
"\n",
"experiment = Experiment(ws, \"automl-many-models-backtest\")\n",
"\n",
"print(\"Experiment name: \" + experiment.name)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 2.0 Data\n",
"\n",
"#### 2.1 Data generation\n",
"For this notebook we will generate the artificial data set with two [time series IDs](https://docs.microsoft.com/en-us/python/api/azureml-automl-core/azureml.automl.core.forecasting_parameters.forecastingparameters?view=azure-ml-py). Then we will generate backtest folds and will upload it to the default BLOB storage and create a [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# simulate data: 2 grains - 700\n",
"TIME_COLNAME = \"date\"\n",
"TARGET_COLNAME = \"value\"\n",
"TIME_SERIES_ID_COLNAME = \"ts_id\"\n",
"\n",
"sample_size = 700\n",
"# Set the random seed for reproducibility of results.\n",
"np.random.seed(20)\n",
"X1 = pd.DataFrame(\n",
" {\n",
" TIME_COLNAME: pd.date_range(start=\"2018-01-01\", periods=sample_size),\n",
" TARGET_COLNAME: np.random.normal(loc=100, scale=20, size=sample_size),\n",
" TIME_SERIES_ID_COLNAME: \"ts_A\",\n",
" }\n",
")\n",
"X2 = pd.DataFrame(\n",
" {\n",
" TIME_COLNAME: pd.date_range(start=\"2018-01-01\", periods=sample_size),\n",
" TARGET_COLNAME: np.random.normal(loc=100, scale=20, size=sample_size),\n",
" TIME_SERIES_ID_COLNAME: \"ts_B\",\n",
" }\n",
")\n",
"\n",
"X = pd.concat([X1, X2], ignore_index=True, sort=False)\n",
"print(\"Simulated dataset contains {} rows \\n\".format(X.shape[0]))\n",
"X.head()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Now we will generate 8 backtesting folds with backtesting period of 7 days and with the same forecasting horizon. We will add the column \"backtest_iteration\", which will identify the backtesting period by the last training date."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"offset_type = \"7D\"\n",
"NUMBER_OF_BACKTESTS = 8 # number of train/test sets to generate\n",
"\n",
"dfs_train = []\n",
"dfs_test = []\n",
"for ts_id, df_one in X.groupby(TIME_SERIES_ID_COLNAME):\n",
"\n",
" data_end = df_one[TIME_COLNAME].max()\n",
"\n",
" for i in range(NUMBER_OF_BACKTESTS):\n",
" train_cutoff_date = data_end - to_offset(offset_type)\n",
" df_one = df_one.copy()\n",
" df_one[\"backtest_iteration\"] = \"iteration_\" + str(train_cutoff_date)\n",
" train = df_one[df_one[TIME_COLNAME] <= train_cutoff_date]\n",
" test = df_one[\n",
" (df_one[TIME_COLNAME] > train_cutoff_date)\n",
" & (df_one[TIME_COLNAME] <= data_end)\n",
" ]\n",
" data_end = train[TIME_COLNAME].max()\n",
" dfs_train.append(train)\n",
" dfs_test.append(test)\n",
"\n",
"X_train = pd.concat(dfs_train, sort=False, ignore_index=True)\n",
"X_test = pd.concat(dfs_test, sort=False, ignore_index=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### 2.2 Create the Tabular Data Set.\n",
"\n",
"A Datastore is a place where data can be stored that is then made accessible to a compute either by means of mounting or copying the data to the compute target.\n",
"\n",
"Please refer to [Datastore](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.datastore(class)?view=azure-ml-py) documentation on how to access data from Datastore.\n",
"\n",
"In this next step, we will upload the data and create a TabularDataset."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.data.dataset_factory import TabularDatasetFactory\n",
"\n",
"ds = ws.get_default_datastore()\n",
"# Upload saved data to the default data store.\n",
"train_data = TabularDatasetFactory.register_pandas_dataframe(\n",
" X_train, target=(ds, \"data_mm\"), name=\"data_train\"\n",
")\n",
"test_data = TabularDatasetFactory.register_pandas_dataframe(\n",
" X_test, target=(ds, \"data_mm\"), name=\"data_test\"\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 3.0 Build the training pipeline\n",
"Now that the dataset, WorkSpace, and datastore are set up, we can put together a pipeline for training.\n",
"\n",
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Choose a compute target\n",
"\n",
"You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
"\n",
"\\*\\*Creation of AmlCompute takes approximately 5 minutes.**\n",
"\n",
"If the AmlCompute with that name is already in your workspace this code will skip the creation process. As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this [article](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-manage-quotas) on the default limits and how to request more quota."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613007037308
}
},
"outputs": [],
"source": [
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
"\n",
"# Name your cluster\n",
"compute_name = \"backtest-mm\"\n",
"\n",
"\n",
"if compute_name in ws.compute_targets:\n",
" compute_target = ws.compute_targets[compute_name]\n",
" if compute_target and type(compute_target) is AmlCompute:\n",
" print(\"Found compute target: \" + compute_name)\n",
"else:\n",
" print(\"Creating a new compute target...\")\n",
" provisioning_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
" )\n",
" # Create the compute target\n",
" compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)\n",
"\n",
" # Can poll for a minimum number of nodes and for a specific timeout.\n",
" # If no min node count is provided it will use the scale settings for the cluster\n",
" compute_target.wait_for_completion(\n",
" show_output=True, min_node_count=None, timeout_in_minutes=20\n",
" )\n",
"\n",
" # For a more detailed view of current cluster status, use the 'status' property\n",
" print(compute_target.status.serialize())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Set up training parameters\n",
"\n",
"This dictionary defines the AutoML and many models settings. For this forecasting task we need to define several settings including the name of the time column, the maximum forecast horizon, and the partition column name definition. Please note, that in this case we are setting grain_column_names to be the time series ID column plus iteration, because we want to train a separate model for each time series and iteration.\n",
"\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **task** | forecasting |\n",
"| **primary_metric** | This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>normalized_root_mean_squared_error</i><br><i>normalized_mean_absolute_error</i> |\n",
"| **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. |\n",
"| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n",
"| **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. |\n",
"| **label_column_name** | The name of the label column. |\n",
"| **max_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n",
"| **n_cross_validations** | Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way. |\n",
"| **time_column_name** | The name of your time column. |\n",
"| **grain_column_names** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |\n",
"| **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). |\n",
"| **partition_column_names** | The names of columns used to group your models. For timeseries, the groups must not split up individual time-series. That is, each group must contain one or more whole time-series. |"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613007061544
}
},
"outputs": [],
"source": [
"from azureml.train.automl.runtime._many_models.many_models_parameters import (\n",
" ManyModelsTrainParameters,\n",
")\n",
"\n",
"partition_column_names = [TIME_SERIES_ID_COLNAME, \"backtest_iteration\"]\n",
"automl_settings = {\n",
" \"task\": \"forecasting\",\n",
" \"primary_metric\": \"normalized_root_mean_squared_error\",\n",
" \"iteration_timeout_minutes\": 10, # This needs to be changed based on the dataset. We ask customer to explore how long training is taking before settings this value\n",
" \"iterations\": 15,\n",
" \"experiment_timeout_hours\": 0.25, # This also needs to be changed based on the dataset. For larger data set this number needs to be bigger.\n",
" \"label_column_name\": TARGET_COLNAME,\n",
" \"n_cross_validations\": 3,\n",
" \"time_column_name\": TIME_COLNAME,\n",
" \"max_horizon\": 6,\n",
" \"grain_column_names\": partition_column_names,\n",
" \"track_child_runs\": False,\n",
"}\n",
"\n",
"mm_paramters = ManyModelsTrainParameters(\n",
" automl_settings=automl_settings, partition_column_names=partition_column_names\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Set up many models pipeline"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Parallel run step is leveraged to train multiple models at once. To configure the ParallelRunConfig you will need to determine the appropriate number of workers and nodes for your use case. The process_count_per_node is based off the number of cores of the compute VM. The node_count will determine the number of master nodes to use, increasing the node count will speed up the training process.\n",
"\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **experiment** | The experiment used for training. |\n",
"| **train_data** | The file dataset to be used as input to the training run. |\n",
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with 3 and increase the node_count if the training time is taking too long. |\n",
"| **process_count_per_node** | Process count per node, we recommend 2:1 ratio for number of cores: number of processes per node. eg. If node has 16 cores then configure 8 or less process count per node or optimal performance. |\n",
"| **train_pipeline_parameters** | The set of configuration parameters defined in the previous section. |\n",
"\n",
"Calling this method will create a new aggregated dataset which is generated dynamically on pipeline execution."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder\n",
"\n",
"\n",
"training_pipeline_steps = AutoMLPipelineBuilder.get_many_models_train_steps(\n",
" experiment=experiment,\n",
" train_data=train_data,\n",
" compute_target=compute_target,\n",
" node_count=2,\n",
" process_count_per_node=2,\n",
" run_invocation_timeout=920,\n",
" train_pipeline_parameters=mm_paramters,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.core import Pipeline\n",
"\n",
"training_pipeline = Pipeline(ws, steps=training_pipeline_steps)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Submit the pipeline to run\n",
"Next we submit our pipeline to run. The whole training pipeline takes about 20 minutes using a STANDARD_DS12_V2 VM with our current ParallelRunConfig setting."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"training_run = experiment.submit(training_pipeline)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"training_run.wait_for_completion(show_output=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Check the run status, if training_run is in completed state, continue to next section. Otherwise, check the portal for failures."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 4.0 Backtesting\n",
"Now that we selected the best AutoML model for each backtest fold, we will use these models to generate the forecasts and compare with the actuals."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Set up output dataset for inference data\n",
"Output of inference can be represented as [OutputFileDatasetConfig](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.output_dataset_config.outputdatasetconfig?view=azure-ml-py) object and OutputFileDatasetConfig can be registered as a dataset. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.data import OutputFileDatasetConfig\n",
"\n",
"output_inference_data_ds = OutputFileDatasetConfig(\n",
" name=\"many_models_inference_output\",\n",
" destination=(dstore, \"backtesting/inference_data/\"),\n",
").register_on_complete(name=\"backtesting_data_ds\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"For many models we need to provide the ManyModelsInferenceParameters object.\n",
"\n",
"#### ManyModelsInferenceParameters arguments\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **partition_column_names** | List of column names that identifies groups. |\n",
"| **target_column_name** | \\[Optional\\] Column name only if the inference dataset has the target. |\n",
"| **time_column_name** | Column name only if it is timeseries. |\n",
"| **many_models_run_id** | \\[Optional\\] Many models pipeline run id where models were trained. |\n",
"\n",
"#### get_many_models_batch_inference_steps arguments\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **experiment** | The experiment used for inference run. |\n",
"| **inference_data** | The data to use for inferencing. It should be the same schema as used for training.\n",
"| **compute_target** | The compute target that runs the inference pipeline.|\n",
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku). |\n",
"| **process_count_per_node** | The number of processes per node.\n",
"| **train_run_id** | \\[Optional\\] The run id of the hierarchy training, by default it is the latest successful training many model run in the experiment. |\n",
"| **train_experiment_name** | \\[Optional\\] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline. |\n",
"| **process_count_per_node** | \\[Optional\\] The number of processes per node, by default it's 4. |"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder\n",
"from azureml.train.automl.runtime._many_models.many_models_parameters import (\n",
" ManyModelsInferenceParameters,\n",
")\n",
"\n",
"mm_parameters = ManyModelsInferenceParameters(\n",
" partition_column_names=partition_column_names,\n",
" time_column_name=TIME_COLNAME,\n",
" target_column_name=TARGET_COLNAME,\n",
")\n",
"\n",
"inference_steps = AutoMLPipelineBuilder.get_many_models_batch_inference_steps(\n",
" experiment=experiment,\n",
" inference_data=test_data,\n",
" node_count=2,\n",
" process_count_per_node=2,\n",
" compute_target=compute_target,\n",
" run_invocation_timeout=300,\n",
" output_datastore=output_inference_data_ds,\n",
" train_run_id=training_run.id,\n",
" train_experiment_name=training_run.experiment.name,\n",
" inference_pipeline_parameters=mm_parameters,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.core import Pipeline\n",
"\n",
"inference_pipeline = Pipeline(ws, steps=inference_steps)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"inference_run = experiment.submit(inference_pipeline)\n",
"inference_run.wait_for_completion(show_output=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 5.0 Retrieve results and calculate metrics\n",
"\n",
"The pipeline returns one file with the predictions for each times series ID and outputs the result to the forecasting_output Blob container. The details of the blob container is listed in 'forecasting_output.txt' under Outputs+logs. \n",
"\n",
"The next code snippet does the following:\n",
"1. Downloads the contents of the output folder that is passed in the parallel run step \n",
"2. Reads the parallel_run_step.txt file that has the predictions as pandas dataframe \n",
"3. Saves the table in csv format and \n",
"4. Displays the top 10 rows of the predictions"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.contrib.automl.pipeline.steps.utilities import get_output_from_mm_pipeline\n",
"\n",
"forecasting_results_name = \"forecasting_results\"\n",
"forecasting_output_name = \"many_models_inference_output\"\n",
"forecast_file = get_output_from_mm_pipeline(\n",
" inference_run, forecasting_results_name, forecasting_output_name\n",
")\n",
"df = pd.read_csv(forecast_file, delimiter=\" \", header=None, parse_dates=[0])\n",
"df.columns = list(X_train.columns) + [\"predicted_level\"]\n",
"print(\n",
" \"Prediction has \", df.shape[0], \" rows. Here the first 10 rows are being displayed.\"\n",
")\n",
"# Save the scv file with header to read it in the next step.\n",
"df.rename(columns={TARGET_COLNAME: \"actual_level\"}, inplace=True)\n",
"df.to_csv(os.path.join(forecasting_results_name, \"forecast.csv\"), index=False)\n",
"df.head(10)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## View metrics\n",
"We will read in the obtained results and run the helper script, which will generate metrics and create the plots of predicted versus actual values."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from assets.score import calculate_scores_and_build_plots\n",
"\n",
"backtesting_results = \"backtesting_mm_results\"\n",
"os.makedirs(backtesting_results, exist_ok=True)\n",
"calculate_scores_and_build_plots(\n",
" forecasting_results_name, backtesting_results, automl_settings\n",
")\n",
"pd.DataFrame({\"File\": os.listdir(backtesting_results)})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The directory contains a set of files with results:\n",
"- forecast.csv contains forecasts for all backtest iterations. The backtest_iteration column contains iteration identifier with the last training date as a suffix\n",
"- scores.csv contains all metrics. If data set contains several time series, the metrics are given for all combinations of time series id and iterations, as well as scores for all iterations and time series ids, which are marked as \"all_sets\"\n",
"- plots_fcst_vs_actual.pdf contains the predictions vs forecast plots for each iteration and, eash time series is saved as separate plot.\n",
"\n",
"For demonstration purposes we will display the table of metrics for one of the time series with ID \"ts0\". We will create the utility function, which will build the table with metrics."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def get_metrics_for_ts(all_metrics, ts):\n",
" \"\"\"\n",
" Get the metrics for the time series with ID ts and return it as pandas data frame.\n",
"\n",
" :param all_metrics: The table with all the metrics.\n",
" :param ts: The ID of a time series of interest.\n",
" :return: The pandas DataFrame with metrics for one time series.\n",
" \"\"\"\n",
" results_df = None\n",
" for ts_id, one_series in all_metrics.groupby(\"time_series_id\"):\n",
" if not ts_id.startswith(ts):\n",
" continue\n",
" iteration = ts_id.split(\"|\")[-1]\n",
" df = one_series[[\"metric_name\", \"metric\"]]\n",
" df.rename({\"metric\": iteration}, axis=1, inplace=True)\n",
" df.set_index(\"metric_name\", inplace=True)\n",
" if results_df is None:\n",
" results_df = df\n",
" else:\n",
" results_df = results_df.merge(\n",
" df, how=\"inner\", left_index=True, right_index=True\n",
" )\n",
" results_df.sort_index(axis=1, inplace=True)\n",
" return results_df\n",
"\n",
"\n",
"metrics_df = pd.read_csv(os.path.join(backtesting_results, \"scores.csv\"))\n",
"ts = \"ts_A\"\n",
"get_metrics_for_ts(metrics_df, ts)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Forecast vs actuals plots."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from IPython.display import IFrame\n",
"\n",
"IFrame(\"./backtesting_mm_results/plots_fcst_vs_actual.pdf\", width=800, height=300)"
]
}
],
"metadata": {
"authors": [
{
"name": "jialiu"
}
],
"categories": [
"how-to-use-azureml",
"automated-machine-learning"
],
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.9"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@@ -0,0 +1,4 @@
name: auto-ml-forecasting-backtest-many-models
dependencies:
- pip:
- azureml-sdk

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

View File

@@ -0,0 +1,45 @@
import argparse
import os
import pandas as pd
import azureml.train.automl.runtime._hts.hts_runtime_utilities as hru
from azureml.core import Run
from azureml.core.dataset import Dataset
# Parse the arguments.
args = {
"step_size": "--step-size",
"step_number": "--step-number",
"time_column_name": "--time-column-name",
"time_series_id_column_names": "--time-series-id-column-names",
"out_dir": "--output-dir",
}
parser = argparse.ArgumentParser("Parsing input arguments.")
for argname, arg in args.items():
parser.add_argument(arg, dest=argname, required=True)
parsed_args, _ = parser.parse_known_args()
step_number = int(parsed_args.step_number)
step_size = int(parsed_args.step_size)
# Create the working dirrectory to store the temporary csv files.
working_dir = parsed_args.out_dir
os.makedirs(working_dir, exist_ok=True)
# Set input and output
script_run = Run.get_context()
input_dataset = script_run.input_datasets["training_data"]
X_train = input_dataset.to_pandas_dataframe()
# Split the data.
for i in range(step_number):
file_name = os.path.join(working_dir, "backtest_{}.csv".format(i))
if parsed_args.time_series_id_column_names:
dfs = []
for _, one_series in X_train.groupby([parsed_args.time_series_id_column_names]):
one_series = one_series.sort_values(
by=[parsed_args.time_column_name], inplace=False
)
dfs.append(one_series.iloc[: len(one_series) - step_size * i])
pd.concat(dfs, sort=False, ignore_index=True).to_csv(file_name, index=False)
else:
X_train.sort_values(by=[parsed_args.time_column_name], inplace=True)
X_train.iloc[: len(X_train) - step_size * i].to_csv(file_name, index=False)

View File

@@ -0,0 +1,173 @@
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
"""The batch script needed for back testing of models using PRS."""
import argparse
import json
import logging
import os
import pickle
import re
import pandas as pd
from azureml.core.experiment import Experiment
from azureml.core.model import Model
from azureml.core.run import Run
from azureml.automl.core.shared import constants
from azureml.automl.runtime.shared.score import scoring
from azureml.train.automl import AutoMLConfig
RE_INVALID_SYMBOLS = re.compile(r"[:\s]")
model_name = None
target_column_name = None
current_step_run = None
output_dir = None
logger = logging.getLogger(__name__)
def _get_automl_settings():
with open(
os.path.join(
os.path.dirname(os.path.realpath(__file__)), "automl_settings.json"
)
) as json_file:
return json.load(json_file)
def init():
global model_name
global target_column_name
global output_dir
global automl_settings
global model_uid
logger.info("Initialization of the run.")
parser = argparse.ArgumentParser("Parsing input arguments.")
parser.add_argument("--output-dir", dest="out", required=True)
parser.add_argument("--model-name", dest="model", default=None)
parser.add_argument("--model-uid", dest="model_uid", default=None)
parsed_args, _ = parser.parse_known_args()
model_name = parsed_args.model
automl_settings = _get_automl_settings()
target_column_name = automl_settings.get("label_column_name")
output_dir = parsed_args.out
model_uid = parsed_args.model_uid
os.makedirs(output_dir, exist_ok=True)
os.environ["AUTOML_IGNORE_PACKAGE_VERSION_INCOMPATIBILITIES".lower()] = "True"
def get_run():
global current_step_run
if current_step_run is None:
current_step_run = Run.get_context()
return current_step_run
def run_backtest(data_input_name: str, file_name: str, experiment: Experiment):
"""Re-train the model and return metrics."""
data_input = pd.read_csv(
data_input_name,
parse_dates=[automl_settings[constants.TimeSeries.TIME_COLUMN_NAME]],
)
print(data_input.head())
if not automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES):
# There is no grains.
data_input.sort_values(
[automl_settings[constants.TimeSeries.TIME_COLUMN_NAME]], inplace=True
)
X_train = data_input.iloc[: -automl_settings["max_horizon"]]
y_train = X_train.pop(target_column_name).values
X_test = data_input.iloc[-automl_settings["max_horizon"] :]
y_test = X_test.pop(target_column_name).values
else:
# The data contain grains.
dfs_train = []
dfs_test = []
for _, one_series in data_input.groupby(
automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES)
):
one_series.sort_values(
[automl_settings[constants.TimeSeries.TIME_COLUMN_NAME]], inplace=True
)
dfs_train.append(one_series.iloc[: -automl_settings["max_horizon"]])
dfs_test.append(one_series.iloc[-automl_settings["max_horizon"] :])
X_train = pd.concat(dfs_train, sort=False, ignore_index=True)
y_train = X_train.pop(target_column_name).values
X_test = pd.concat(dfs_test, sort=False, ignore_index=True)
y_test = X_test.pop(target_column_name).values
last_training_date = str(
X_train[automl_settings[constants.TimeSeries.TIME_COLUMN_NAME]].max()
)
if file_name:
# If file name is provided, we will load model and retrain it on backtest data.
with open(file_name, "rb") as fp:
fitted_model = pickle.load(fp)
fitted_model.fit(X_train, y_train)
else:
# We will run the experiment and select the best model.
X_train[target_column_name] = y_train
automl_config = AutoMLConfig(training_data=X_train, **automl_settings)
automl_run = current_step_run.submit_child(automl_config, show_output=True)
best_run, fitted_model = automl_run.get_output()
# As we have generated models, we need to register them for the future use.
description = "Backtest model example"
tags = {"last_training_date": last_training_date, "experiment": experiment.name}
if model_uid:
tags["model_uid"] = model_uid
automl_run.register_model(
model_name=best_run.properties["model_name"],
description=description,
tags=tags,
)
print(f"The model {best_run.properties['model_name']} was registered.")
_, x_pred = fitted_model.forecast(X_test)
x_pred.reset_index(inplace=True, drop=False)
columns = [automl_settings[constants.TimeSeries.TIME_COLUMN_NAME]]
if automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES):
# We know that fitted_model.grain_column_names is a list.
columns.extend(fitted_model.grain_column_names)
columns.append(constants.TimeSeriesInternal.DUMMY_TARGET_COLUMN)
# Remove featurized columns.
x_pred = x_pred[columns]
x_pred.rename(
{constants.TimeSeriesInternal.DUMMY_TARGET_COLUMN: "predicted_level"},
axis=1,
inplace=True,
)
x_pred["actual_level"] = y_test
x_pred["backtest_iteration"] = f"iteration_{last_training_date}"
date_safe = RE_INVALID_SYMBOLS.sub("_", last_training_date)
x_pred.to_csv(os.path.join(output_dir, f"iteration_{date_safe}.csv"), index=False)
return x_pred
def run(input_files):
"""Run the script"""
logger.info("Running mini batch.")
ws = get_run().experiment.workspace
file_name = None
if model_name:
models = Model.list(ws, name=model_name)
cloud_model = None
if models:
for one_mod in models:
if cloud_model is None or one_mod.version > cloud_model.version:
logger.info(
"Using existing model from the workspace. Model version: {}".format(
one_mod.version
)
)
cloud_model = one_mod
file_name = cloud_model.download(exist_ok=True)
forecasts = []
logger.info("Running backtest.")
for input_file in input_files:
forecasts.append(run_backtest(input_file, file_name, get_run().experiment))
return pd.concat(forecasts)

View File

@@ -0,0 +1,167 @@
from typing import Any, Dict, Optional, List
import argparse
import json
import os
import re
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from azureml.automl.core.shared import constants
from azureml.automl.core.shared.types import GrainType
from azureml.automl.runtime.shared.score import scoring
GRAIN = "time_series_id"
BACKTEST_ITER = "backtest_iteration"
ACTUALS = "actual_level"
PREDICTIONS = "predicted_level"
ALL_GRAINS = "all_sets"
FORECASTS_FILE = "forecast.csv"
SCORES_FILE = "scores.csv"
PLOTS_FILE = "plots_fcst_vs_actual.pdf"
RE_INVALID_SYMBOLS = re.compile("[: ]")
def _compute_metrics(df: pd.DataFrame, metrics: List[str]):
"""
Compute metrics for one data frame.
:param df: The data frame which contains actual_level and predicted_level columns.
:return: The data frame with two columns - metric_name and metric.
"""
scores = scoring.score_regression(
y_test=df[ACTUALS], y_pred=df[PREDICTIONS], metrics=metrics
)
metrics_df = pd.DataFrame(list(scores.items()), columns=["metric_name", "metric"])
metrics_df.sort_values(["metric_name"], inplace=True)
metrics_df.reset_index(drop=True, inplace=True)
return metrics_df
def _format_grain_name(grain: GrainType) -> str:
"""
Convert grain name to string.
:param grain: the grain name.
:return: the string representation of the given grain.
"""
if not isinstance(grain, tuple) and not isinstance(grain, list):
return str(grain)
grain = list(map(str, grain))
return "|".join(grain)
def compute_all_metrics(
fcst_df: pd.DataFrame,
ts_id_colnames: List[str],
metric_names: Optional[List[set]] = None,
):
"""
Calculate metrics per grain.
:param fcst_df: forecast data frame. Must contain 2 columns: 'actual_level' and 'predicted_level'
:param metric_names: (optional) the list of metric names to return
:param ts_id_colnames: (optional) list of grain column names
:return: dictionary of summary table for all tests and final decision on stationary vs nonstaionary
"""
if not metric_names:
metric_names = list(constants.Metric.SCALAR_REGRESSION_SET)
if ts_id_colnames is None:
ts_id_colnames = []
metrics_list = []
if ts_id_colnames:
for grain, df in fcst_df.groupby(ts_id_colnames):
one_grain_metrics_df = _compute_metrics(df, metric_names)
one_grain_metrics_df[GRAIN] = _format_grain_name(grain)
metrics_list.append(one_grain_metrics_df)
# overall metrics
one_grain_metrics_df = _compute_metrics(fcst_df, metric_names)
one_grain_metrics_df[GRAIN] = ALL_GRAINS
metrics_list.append(one_grain_metrics_df)
# collect into a data frame
return pd.concat(metrics_list)
def _draw_one_plot(
df: pd.DataFrame,
time_column_name: str,
grain_column_names: List[str],
pdf: PdfPages,
) -> None:
"""
Draw the single plot.
:param df: The data frame with the data to build plot.
:param time_column_name: The name of a time column.
:param grain_column_names: The name of grain columns.
:param pdf: The pdf backend used to render the plot.
"""
fig, _ = plt.subplots(figsize=(20, 10))
df = df.set_index(time_column_name)
plt.plot(df[[ACTUALS, PREDICTIONS]])
plt.xticks(rotation=45)
iteration = df[BACKTEST_ITER].iloc[0]
if grain_column_names:
grain_name = [df[grain].iloc[0] for grain in grain_column_names]
plt.title(f"Time series ID: {_format_grain_name(grain_name)} {iteration}")
plt.legend(["actual", "forecast"])
plt.close(fig)
pdf.savefig(fig)
def calculate_scores_and_build_plots(
input_dir: str, output_dir: str, automl_settings: Dict[str, Any]
):
os.makedirs(output_dir, exist_ok=True)
grains = automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES)
time_column_name = automl_settings.get(constants.TimeSeries.TIME_COLUMN_NAME)
if grains is None:
grains = []
if isinstance(grains, str):
grains = [grains]
while BACKTEST_ITER in grains:
grains.remove(BACKTEST_ITER)
dfs = []
for fle in os.listdir(input_dir):
file_path = os.path.join(input_dir, fle)
if os.path.isfile(file_path) and file_path.endswith(".csv"):
df_iter = pd.read_csv(file_path, parse_dates=[time_column_name])
for _, iteration in df_iter.groupby(BACKTEST_ITER):
dfs.append(iteration)
forecast_df = pd.concat(dfs, sort=False, ignore_index=True)
# To make sure plots are in order, sort the predictions by grain and iteration.
ts_index = grains + [BACKTEST_ITER]
forecast_df.sort_values(by=ts_index, inplace=True)
pdf = PdfPages(os.path.join(output_dir, PLOTS_FILE))
for _, one_forecast in forecast_df.groupby(ts_index):
_draw_one_plot(one_forecast, time_column_name, grains, pdf)
pdf.close()
forecast_df.to_csv(os.path.join(output_dir, FORECASTS_FILE), index=False)
metrics = compute_all_metrics(forecast_df, grains + [BACKTEST_ITER])
metrics.to_csv(os.path.join(output_dir, SCORES_FILE), index=False)
if __name__ == "__main__":
args = {"forecasts": "--forecasts", "scores_out": "--output-dir"}
parser = argparse.ArgumentParser("Parsing input arguments.")
for argname, arg in args.items():
parser.add_argument(arg, dest=argname, required=True)
parsed_args, _ = parser.parse_known_args()
input_dir = parsed_args.forecasts
output_dir = parsed_args.scores_out
with open(
os.path.join(
os.path.dirname(os.path.realpath(__file__)), "automl_settings.json"
)
) as json_file:
automl_settings = json.load(json_file)
calculate_scores_and_build_plots(input_dir, output_dir, automl_settings)

View File

@@ -0,0 +1,719 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License.\n",
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/automl-forecasting-function.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Automated MachineLearning\n",
"_**The model backtesting**_\n",
"\n",
"## Contents\n",
"1. [Introduction](#Introduction)\n",
"2. [Setup](#Setup)\n",
"3. [Data](#Data)\n",
"4. [Prepare remote compute and data.](#prepare_remote)\n",
"5. [Create the configuration for AutoML backtesting](#train)\n",
"6. [Backtest AutoML](#backtest_automl)\n",
"7. [View metrics](#Metrics)\n",
"8. [Backtest the best model](#backtest_model)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Introduction\n",
"Model backtesting is used to evaluate its performance on historical data. To do that we step back on the backtesting period by the data set several times and split the data to train and test sets. Then these data sets are used for training and evaluation of model.<br>\n",
"This notebook is intended to demonstrate backtesting on a single model, this is the best solution for small data sets with a few or one time series in it. For scenarios where we would like to choose the best AutoML model for every backtest iteration, please see [AutoML Forecasting Backtest Many Models Example](../forecasting-backtest-many-models/auto-ml-forecasting-backtest-many-models.ipynb) notebook.\n",
"![Backtesting](Backtesting.png)\n",
"This notebook demonstrates two ways of backtesting:\n",
"- AutoML backtesting: we will train separate AutoML models for historical data\n",
"- Model backtesting: from the first run we will select the best model trained on the most recent data, retrain it on the past data and evaluate."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setup"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import numpy as np\n",
"import pandas as pd\n",
"import shutil\n",
"\n",
"import azureml.core\n",
"from azureml.core import Experiment, Model, Workspace"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This notebook is compatible with Azure ML SDK version 1.35.1 or later."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"As part of the setup you have already created a <b>Workspace</b>."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ws = Workspace.from_config()\n",
"\n",
"output = {}\n",
"output[\"Subscription ID\"] = ws.subscription_id\n",
"output[\"Workspace\"] = ws.name\n",
"output[\"SKU\"] = ws.sku\n",
"output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n",
"pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Data\n",
"For the demonstration purposes we will simulate one year of daily data. To do this we need to specify the following parameters: time column name, time series ID column names and label column name. Our intention is to forecast for two weeks ahead."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"TIME_COLUMN_NAME = \"date\"\n",
"TIME_SERIES_ID_COLUMN_NAMES = \"time_series_id\"\n",
"LABEL_COLUMN_NAME = \"y\"\n",
"FORECAST_HORIZON = 14\n",
"FREQUENCY = \"D\"\n",
"\n",
"\n",
"def simulate_timeseries_data(\n",
" train_len: int,\n",
" test_len: int,\n",
" time_column_name: str,\n",
" target_column_name: str,\n",
" time_series_id_column_name: str,\n",
" time_series_number: int = 1,\n",
" freq: str = \"H\",\n",
"):\n",
" \"\"\"\n",
" Return the time series of designed length.\n",
"\n",
" :param train_len: The length of training data (one series).\n",
" :type train_len: int\n",
" :param test_len: The length of testing data (one series).\n",
" :type test_len: int\n",
" :param time_column_name: The desired name of a time column.\n",
" :type time_column_name: str\n",
" :param time_series_number: The number of time series in the data set.\n",
" :type time_series_number: int\n",
" :param freq: The frequency string representing pandas offset.\n",
" see https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html\n",
" :type freq: str\n",
" :returns: the tuple of train and test data sets.\n",
" :rtype: tuple\n",
"\n",
" \"\"\"\n",
" data_train = [] # type: List[pd.DataFrame]\n",
" data_test = [] # type: List[pd.DataFrame]\n",
" data_length = train_len + test_len\n",
" for i in range(time_series_number):\n",
" X = pd.DataFrame(\n",
" {\n",
" time_column_name: pd.date_range(\n",
" start=\"2000-01-01\", periods=data_length, freq=freq\n",
" ),\n",
" target_column_name: np.arange(data_length).astype(float)\n",
" + np.random.rand(data_length)\n",
" + i * 5,\n",
" \"ext_predictor\": np.asarray(range(42, 42 + data_length)),\n",
" time_series_id_column_name: np.repeat(\"ts{}\".format(i), data_length),\n",
" }\n",
" )\n",
" data_train.append(X[:train_len])\n",
" data_test.append(X[train_len:])\n",
" train = pd.concat(data_train)\n",
" label_train = train.pop(target_column_name).values\n",
" test = pd.concat(data_test)\n",
" label_test = test.pop(target_column_name).values\n",
" return train, label_train, test, label_test\n",
"\n",
"\n",
"n_test_periods = FORECAST_HORIZON\n",
"n_train_periods = 365\n",
"X_train, y_train, X_test, y_test = simulate_timeseries_data(\n",
" train_len=n_train_periods,\n",
" test_len=n_test_periods,\n",
" time_column_name=TIME_COLUMN_NAME,\n",
" target_column_name=LABEL_COLUMN_NAME,\n",
" time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAMES,\n",
" time_series_number=2,\n",
" freq=FREQUENCY,\n",
")\n",
"X_train[LABEL_COLUMN_NAME] = y_train"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Let's see what the training data looks like."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"X_train.tail()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Prepare remote compute and data. <a id=\"prepare_remote\"></a>\n",
"The [Machine Learning service workspace](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-workspace), is paired with the storage account, which contains the default data store. We will use it to upload the artificial data and create [tabular dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py) for training. A tabular dataset defines a series of lazily-evaluated, immutable operations to load data from the data source into tabular representation."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.data.dataset_factory import TabularDatasetFactory\n",
"\n",
"ds = ws.get_default_datastore()\n",
"# Upload saved data to the default data store.\n",
"train_data = TabularDatasetFactory.register_pandas_dataframe(\n",
" X_train, target=(ds, \"data\"), name=\"data_backtest\"\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You will need to create a compute target for backtesting. In this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute), you create AmlCompute as your training compute resource.\n",
"\n",
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
"from azureml.core.compute_target import ComputeTargetException\n",
"\n",
"# Choose a name for your CPU cluster\n",
"amlcompute_cluster_name = \"backtest-cluster\"\n",
"\n",
"# Verify that cluster does not exist already\n",
"try:\n",
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
" print(\"Found existing cluster, use it.\")\n",
"except ComputeTargetException:\n",
" compute_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
" )\n",
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
"\n",
"compute_target.wait_for_completion(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Create the configuration for AutoML backtesting <a id=\"train\"></a>\n",
"\n",
"This dictionary defines the AutoML and many models settings. For this forecasting task we need to define several settings including the name of the time column, the maximum forecast horizon, and the partition column name definition.\n",
"\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **task** | forecasting |\n",
"| **primary_metric** | This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>normalized_root_mean_squared_error</i><br><i>normalized_mean_absolute_error</i> |\n",
"| **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. |\n",
"| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n",
"| **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. |\n",
"| **label_column_name** | The name of the label column. |\n",
"| **max_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n",
"| **n_cross_validations** | Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way. |\n",
"| **time_column_name** | The name of your time column. |\n",
"| **grain_column_names** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_settings = {\n",
" \"task\": \"forecasting\",\n",
" \"primary_metric\": \"normalized_root_mean_squared_error\",\n",
" \"iteration_timeout_minutes\": 10, # This needs to be changed based on the dataset. We ask customer to explore how long training is taking before settings this value\n",
" \"iterations\": 15,\n",
" \"experiment_timeout_hours\": 1, # This also needs to be changed based on the dataset. For larger data set this number needs to be bigger.\n",
" \"label_column_name\": LABEL_COLUMN_NAME,\n",
" \"n_cross_validations\": 3,\n",
" \"time_column_name\": TIME_COLUMN_NAME,\n",
" \"max_horizon\": FORECAST_HORIZON,\n",
" \"track_child_runs\": False,\n",
" \"grain_column_names\": TIME_SERIES_ID_COLUMN_NAMES,\n",
"}"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Backtest AutoML <a id=\"backtest_automl\"></a>\n",
"First we set backtesting parameters: we will step back by 30 days and will make 5 such steps; for each step we will forecast for next two weeks."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# The number of periods to step back on each backtest iteration.\n",
"BACKTESTING_PERIOD = 30\n",
"# The number of times we will back test the model.\n",
"NUMBER_OF_BACKTESTS = 5"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"To train AutoML on backtesting folds we will use the [Azure Machine Learning pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/concept-ml-pipelines). It will generate backtest folds, then train model for each of them and calculate the accuracy metrics. To run pipeline, you also need to create an <b>Experiment</b>. An Experiment corresponds to a prediction problem you are trying to solve (here, it is a forecasting), while a Run corresponds to a specific approach to the problem."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from uuid import uuid1\n",
"\n",
"from pipeline_helper import get_backtest_pipeline\n",
"\n",
"pipeline_exp = Experiment(ws, \"automl-backtesting\")\n",
"\n",
"# We will create the unique identifier to mark our models.\n",
"model_uid = str(uuid1())\n",
"\n",
"pipeline = get_backtest_pipeline(\n",
" experiment=pipeline_exp,\n",
" dataset=train_data,\n",
" # The STANDARD_DS12_V2 has 4 vCPU per node, we will set 2 process per node to be safe.\n",
" process_per_node=2,\n",
" # The maximum number of nodes for our compute is 6.\n",
" node_count=6,\n",
" compute_target=compute_target,\n",
" automl_settings=automl_settings,\n",
" step_size=BACKTESTING_PERIOD,\n",
" step_number=NUMBER_OF_BACKTESTS,\n",
" model_uid=model_uid,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Run the pipeline and wait for results."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"pipeline_run = pipeline_exp.submit(pipeline)\n",
"pipeline_run.wait_for_completion(show_output=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"After the run is complete, we can download the results. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"metrics_output = pipeline_run.get_pipeline_output(\"results\")\n",
"metrics_output.download(\"backtest_metrics\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## View metrics<a id=\"Metrics\"></a>\n",
"To distinguish these metrics from the model backtest, which we will obtain in the next section, we will move the directory with metrics out of the backtest_metrics and will remove the parent folder. We will create the utility function for that."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def copy_scoring_directory(new_name):\n",
" scores_path = os.path.join(\"backtest_metrics\", \"azureml\")\n",
" directory_list = [os.path.join(scores_path, d) for d in os.listdir(scores_path)]\n",
" latest_file = max(directory_list, key=os.path.getctime)\n",
" print(\n",
" f\"The output directory {latest_file} was created on {pd.Timestamp(os.path.getctime(latest_file), unit='s')} GMT.\"\n",
" )\n",
" shutil.move(os.path.join(latest_file, \"results\"), new_name)\n",
" shutil.rmtree(\"backtest_metrics\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Move the directory and list its contents."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"copy_scoring_directory(\"automl_backtest\")\n",
"pd.DataFrame({\"File\": os.listdir(\"automl_backtest\")})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The directory contains a set of files with results:\n",
"- forecast.csv contains forecasts for all backtest iterations. The backtest_iteration column contains iteration identifier with the last training date as a suffix\n",
"- scores.csv contains all metrics. If data set contains several time series, the metrics are given for all combinations of time series id and iterations, as well as scores for all iterations and time series id are marked as \"all_sets\"\n",
"- plots_fcst_vs_actual.pdf contains the predictions vs forecast plots for each iteration and time series.\n",
"\n",
"For demonstration purposes we will display the table of metrics for one of the time series with ID \"ts0\". Again, we will create the utility function, which will be re used in model backtesting."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def get_metrics_for_ts(all_metrics, ts):\n",
" \"\"\"\n",
" Get the metrics for the time series with ID ts and return it as pandas data frame.\n",
"\n",
" :param all_metrics: The table with all the metrics.\n",
" :param ts: The ID of a time series of interest.\n",
" :return: The pandas DataFrame with metrics for one time series.\n",
" \"\"\"\n",
" results_df = None\n",
" for ts_id, one_series in all_metrics.groupby(\"time_series_id\"):\n",
" if not ts_id.startswith(ts):\n",
" continue\n",
" iteration = ts_id.split(\"|\")[-1]\n",
" df = one_series[[\"metric_name\", \"metric\"]]\n",
" df.rename({\"metric\": iteration}, axis=1, inplace=True)\n",
" df.set_index(\"metric_name\", inplace=True)\n",
" if results_df is None:\n",
" results_df = df\n",
" else:\n",
" results_df = results_df.merge(\n",
" df, how=\"inner\", left_index=True, right_index=True\n",
" )\n",
" results_df.sort_index(axis=1, inplace=True)\n",
" return results_df\n",
"\n",
"\n",
"metrics_df = pd.read_csv(os.path.join(\"automl_backtest\", \"scores.csv\"))\n",
"ts_id = \"ts0\"\n",
"get_metrics_for_ts(metrics_df, ts_id)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Forecast vs actuals plots."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from IPython.display import IFrame\n",
"\n",
"IFrame(\"./automl_backtest/plots_fcst_vs_actual.pdf\", width=800, height=300)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# <font color='blue'>Backtest the best model</font> <a id=\"backtest_model\"></a>\n",
"\n",
"For model backtesting we will use the same parameters we used to backtest AutoML. All the models, we have obtained in the previous run were registered in our workspace. To identify the model, each was assigned a tag with the last trainig date."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model_list = Model.list(ws, tags={\"experiment\": \"automl-backtesting\"})\n",
"model_data = {\"name\": [], \"last_training_date\": []}\n",
"for model in model_list:\n",
" if (\n",
" \"last_training_date\" not in model.tags\n",
" or \"model_uid\" not in model.tags\n",
" or model.tags[\"model_uid\"] != model_uid\n",
" ):\n",
" continue\n",
" model_data[\"name\"].append(model.name)\n",
" model_data[\"last_training_date\"].append(\n",
" pd.Timestamp(model.tags[\"last_training_date\"])\n",
" )\n",
"df_models = pd.DataFrame(model_data)\n",
"df_models.sort_values([\"last_training_date\"], inplace=True)\n",
"df_models.reset_index(inplace=True, drop=True)\n",
"df_models"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We will backtest the model trained on the most recet data."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model_name = df_models[\"name\"].iloc[-1]\n",
"model_name"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Retrain the models.\n",
"Assemble the pipeline, which will retrain the best model from AutoML run on historical data."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"pipeline_exp = Experiment(ws, \"model-backtesting\")\n",
"\n",
"pipeline = get_backtest_pipeline(\n",
" experiment=pipeline_exp,\n",
" dataset=train_data,\n",
" # The STANDARD_DS12_V2 has 4 vCPU per node, we will set 2 process per node to be safe.\n",
" process_per_node=2,\n",
" # The maximum number of nodes for our compute is 6.\n",
" node_count=6,\n",
" compute_target=compute_target,\n",
" automl_settings=automl_settings,\n",
" step_size=BACKTESTING_PERIOD,\n",
" step_number=NUMBER_OF_BACKTESTS,\n",
" model_name=model_name,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Launch the backtesting pipeline."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"pipeline_run = pipeline_exp.submit(pipeline)\n",
"pipeline_run.wait_for_completion(show_output=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The metrics are stored in the pipeline output named \"score\". The next code will download the table with metrics."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"metrics_output = pipeline_run.get_pipeline_output(\"results\")\n",
"metrics_output.download(\"backtest_metrics\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Again, we will copy the data files from the downloaded directory, but in this case we will call the folder \"model_backtest\"; it will contain the same files as the one for AutoML backtesting."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"copy_scoring_directory(\"model_backtest\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Finally, we will display the metrics."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model_metrics_df = pd.read_csv(os.path.join(\"model_backtest\", \"scores.csv\"))\n",
"get_metrics_for_ts(model_metrics_df, ts_id)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Forecast vs actuals plots."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from IPython.display import IFrame\n",
"\n",
"IFrame(\"./model_backtest/plots_fcst_vs_actual.pdf\", width=800, height=300)"
]
}
],
"metadata": {
"authors": [
{
"name": "jialiu"
}
],
"category": "tutorial",
"compute": [
"Remote"
],
"datasets": [
"None"
],
"deployment": [
"None"
],
"exclude_from_index": false,
"framework": [
"Azure ML AutoML"
],
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.9"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@@ -0,0 +1,4 @@
name: auto-ml-forecasting-backtest-single-model
dependencies:
- pip:
- azureml-sdk

View File

@@ -0,0 +1,166 @@
from typing import Any, Dict, Optional
import os
import azureml.train.automl.runtime._hts.hts_runtime_utilities as hru
from azureml._restclient.jasmine_client import JasmineClient
from azureml.contrib.automl.pipeline.steps import utilities
from azureml.core import RunConfiguration
from azureml.core.compute import ComputeTarget
from azureml.core.experiment import Experiment
from azureml.data import LinkTabularOutputDatasetConfig, TabularDataset
from azureml.pipeline.core import Pipeline, PipelineData, PipelineParameter
from azureml.pipeline.steps import ParallelRunConfig, ParallelRunStep, PythonScriptStep
from azureml.train.automl.constants import Scenarios
from azureml.data.dataset_consumption_config import DatasetConsumptionConfig
PROJECT_FOLDER = "assets"
SETTINGS_FILE = "automl_settings.json"
def get_backtest_pipeline(
experiment: Experiment,
dataset: TabularDataset,
process_per_node: int,
node_count: int,
compute_target: ComputeTarget,
automl_settings: Dict[str, Any],
step_size: int,
step_number: int,
model_name: Optional[str] = None,
model_uid: Optional[str] = None,
) -> Pipeline:
"""
:param experiment: The experiment used to run the pipeline.
:param dataset: Tabular data set to be used for model training.
:param process_per_node: The number of processes per node. Generally it should be the number of cores
on the node divided by two.
:param node_count: The number of nodes to be used.
:param compute_target: The compute target to be used to run the pipeline.
:param model_name: The name of a model to be back tested.
:param automl_settings: The dictionary with automl settings.
:param step_size: The number of periods to step back in backtesting.
:param step_number: The number of backtesting iterations.
:param model_uid: The uid to mark models from this run of the experiment.
:return: The pipeline to be used for model retraining.
**Note:** The output will be uploaded in the pipeline output
called 'score'.
"""
jasmine_client = JasmineClient(
service_context=experiment.workspace.service_context,
experiment_name=experiment.name,
experiment_id=experiment.id,
)
env = jasmine_client.get_curated_environment(
scenario=Scenarios.AUTOML,
enable_dnn=False,
enable_gpu=False,
compute=compute_target,
compute_sku=experiment.workspace.compute_targets.get(
compute_target.name
).vm_size,
)
data_results = PipelineData(
name="results", datastore=None, pipeline_output_name="results"
)
############################################################
# Split the data set using python script.
############################################################
run_config = RunConfiguration()
run_config.docker.use_docker = True
run_config.environment = env
split_data = PipelineData(name="split_data_output", datastore=None).as_dataset()
split_step = PythonScriptStep(
name="split_data_for_backtest",
script_name="data_split.py",
inputs=[dataset.as_named_input("training_data")],
outputs=[split_data],
source_directory=PROJECT_FOLDER,
arguments=[
"--step-size",
step_size,
"--step-number",
step_number,
"--time-column-name",
automl_settings.get("time_column_name"),
"--time-series-id-column-names",
automl_settings.get("grain_column_names"),
"--output-dir",
split_data,
],
runconfig=run_config,
compute_target=compute_target,
allow_reuse=False,
)
############################################################
# We will do the backtest the parallel run step.
############################################################
settings_path = os.path.join(PROJECT_FOLDER, SETTINGS_FILE)
hru.dump_object_to_json(automl_settings, settings_path)
mini_batch_size = PipelineParameter(name="batch_size_param", default_value=str(1))
back_test_config = ParallelRunConfig(
source_directory=PROJECT_FOLDER,
entry_script="retrain_models.py",
mini_batch_size=mini_batch_size,
error_threshold=-1,
output_action="append_row",
append_row_file_name="outputs.txt",
compute_target=compute_target,
environment=env,
process_count_per_node=process_per_node,
run_invocation_timeout=3600,
node_count=node_count,
)
forecasts = PipelineData(name="forecasts", datastore=None)
if model_name:
parallel_step_name = "{}-backtest".format(model_name.replace("_", "-"))
else:
parallel_step_name = "AutoML-backtest"
prs_args = [
"--target_column_name",
automl_settings.get("label_column_name"),
"--output-dir",
forecasts,
]
if model_name is not None:
prs_args.append("--model-name")
prs_args.append(model_name)
if model_uid is not None:
prs_args.append("--model-uid")
prs_args.append(model_uid)
backtest_prs = ParallelRunStep(
name=parallel_step_name,
parallel_run_config=back_test_config,
arguments=prs_args,
inputs=[split_data],
output=forecasts,
allow_reuse=False,
)
############################################################
# Then we collect the output and return it as scores output.
############################################################
collection_step = PythonScriptStep(
name="score",
script_name="score.py",
inputs=[forecasts.as_mount()],
outputs=[data_results],
source_directory=PROJECT_FOLDER,
arguments=[
"--forecasts",
forecasts,
"--output-dir",
data_results,
],
runconfig=run_config,
compute_target=compute_target,
allow_reuse=False,
)
# Build and return the pipeline.
return Pipeline(
workspace=experiment.workspace,
steps=[split_step, backtest_prs, collection_step],
)

View File

@@ -113,7 +113,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
@@ -139,18 +139,18 @@
"ws = Workspace.from_config()\n",
"\n",
"# choose a name for the run history container in the workspace\n",
"experiment_name = 'beer-remote-cpu'\n",
"experiment_name = \"beer-remote-cpu\"\n",
"\n",
"experiment = Experiment(ws, experiment_name)\n",
"\n",
"output = {}\n",
"output['Subscription ID'] = ws.subscription_id\n",
"output['Workspace'] = ws.name\n",
"output['Resource Group'] = ws.resource_group\n",
"output['Location'] = ws.location\n",
"output['Run History Name'] = experiment_name\n",
"pd.set_option('display.max_colwidth', -1)\n",
"outputDf = pd.DataFrame(data = output, index = [''])\n",
"output[\"Subscription ID\"] = ws.subscription_id\n",
"output[\"Workspace\"] = ws.name\n",
"output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n",
"output[\"Run History Name\"] = experiment_name\n",
"pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T"
]
},
@@ -185,10 +185,11 @@
"# Verify that cluster does not exist already\n",
"try:\n",
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
" print('Found existing cluster, use it.')\n",
" print(\"Found existing cluster, use it.\")\n",
"except ComputeTargetException:\n",
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
" max_nodes=4)\n",
" compute_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"STANDARD_DS12_V2\", max_nodes=4\n",
" )\n",
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
"\n",
"compute_target.wait_for_completion(show_output=True)"
@@ -245,17 +246,21 @@
"plt.tight_layout()\n",
"\n",
"plt.subplot(2, 1, 1)\n",
"plt.title('Beer Production By Year')\n",
"df = pd.read_csv(\"Beer_no_valid_split_train.csv\", parse_dates=True, index_col= 'DATE').drop(columns='grain')\n",
"test_df = pd.read_csv(\"Beer_no_valid_split_test.csv\", parse_dates=True, index_col= 'DATE').drop(columns='grain')\n",
"plt.title(\"Beer Production By Year\")\n",
"df = pd.read_csv(\n",
" \"Beer_no_valid_split_train.csv\", parse_dates=True, index_col=\"DATE\"\n",
").drop(columns=\"grain\")\n",
"test_df = pd.read_csv(\n",
" \"Beer_no_valid_split_test.csv\", parse_dates=True, index_col=\"DATE\"\n",
").drop(columns=\"grain\")\n",
"plt.plot(df)\n",
"\n",
"plt.subplot(2, 1, 2)\n",
"plt.title('Beer Production By Month')\n",
"plt.title(\"Beer Production By Month\")\n",
"groups = df.groupby(df.index.month)\n",
"months = concat([DataFrame(x[1].values) for x in groups], axis=1)\n",
"months = DataFrame(months)\n",
"months.columns = range(1,13)\n",
"months.columns = range(1, 13)\n",
"months.boxplot()\n",
"\n",
"plt.show()"
@@ -270,10 +275,10 @@
},
"outputs": [],
"source": [
"target_column_name = 'BeerProduction'\n",
"time_column_name = 'DATE'\n",
"target_column_name = \"BeerProduction\"\n",
"time_column_name = \"DATE\"\n",
"time_series_id_column_names = []\n",
"freq = 'M' #Monthly data"
"freq = \"M\" # Monthly data"
]
},
{
@@ -301,14 +306,36 @@
"test_df.to_csv(\"test.csv\")\n",
"\n",
"datastore = ws.get_default_datastore()\n",
"datastore.upload_files(files = ['./train.csv'], target_path = 'beer-dataset/tabular/', overwrite = True,show_progress = True)\n",
"datastore.upload_files(files = ['./valid.csv'], target_path = 'beer-dataset/tabular/', overwrite = True,show_progress = True)\n",
"datastore.upload_files(files = ['./test.csv'], target_path = 'beer-dataset/tabular/', overwrite = True,show_progress = True)\n",
"datastore.upload_files(\n",
" files=[\"./train.csv\"],\n",
" target_path=\"beer-dataset/tabular/\",\n",
" overwrite=True,\n",
" show_progress=True,\n",
")\n",
"datastore.upload_files(\n",
" files=[\"./valid.csv\"],\n",
" target_path=\"beer-dataset/tabular/\",\n",
" overwrite=True,\n",
" show_progress=True,\n",
")\n",
"datastore.upload_files(\n",
" files=[\"./test.csv\"],\n",
" target_path=\"beer-dataset/tabular/\",\n",
" overwrite=True,\n",
" show_progress=True,\n",
")\n",
"\n",
"from azureml.core import Dataset\n",
"train_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'beer-dataset/tabular/train.csv')])\n",
"valid_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'beer-dataset/tabular/valid.csv')])\n",
"test_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'beer-dataset/tabular/test.csv')])"
"\n",
"train_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"beer-dataset/tabular/train.csv\")]\n",
")\n",
"valid_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"beer-dataset/tabular/valid.csv\")]\n",
")\n",
"test_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"beer-dataset/tabular/test.csv\")]\n",
")"
]
},
{
@@ -366,26 +393,29 @@
"outputs": [],
"source": [
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
"\n",
"forecasting_parameters = ForecastingParameters(\n",
" time_column_name=time_column_name,\n",
" forecast_horizon=forecast_horizon,\n",
" freq='MS' # Set the forecast frequency to be monthly (start of the month)\n",
" freq=\"MS\", # Set the forecast frequency to be monthly (start of the month)\n",
")\n",
"\n",
"# We will disable the enable_early_stopping flag to ensure the DNN model is recommended for demonstration purpose.\n",
"automl_config = AutoMLConfig(task='forecasting',\n",
" primary_metric='normalized_root_mean_squared_error',\n",
" experiment_timeout_hours = 1,\n",
"automl_config = AutoMLConfig(\n",
" task=\"forecasting\",\n",
" primary_metric=\"normalized_root_mean_squared_error\",\n",
" experiment_timeout_hours=1,\n",
" training_data=train_dataset,\n",
" label_column_name=target_column_name,\n",
" validation_data=valid_dataset, \n",
" validation_data=valid_dataset,\n",
" verbosity=logging.INFO,\n",
" compute_target=compute_target,\n",
" max_concurrent_iterations=4,\n",
" max_cores_per_iteration=-1,\n",
" enable_dnn=True,\n",
" enable_early_stopping=False,\n",
" forecasting_parameters=forecasting_parameters)"
" forecasting_parameters=forecasting_parameters,\n",
")"
]
},
{
@@ -407,7 +437,7 @@
},
"outputs": [],
"source": [
"remote_run = experiment.submit(automl_config, show_output= True)"
"remote_run = experiment.submit(automl_config, show_output=True)"
]
},
{
@@ -455,6 +485,7 @@
"outputs": [],
"source": [
"from helper import get_result_df\n",
"\n",
"summary_df = get_result_df(remote_run)\n",
"summary_df"
]
@@ -470,11 +501,12 @@
"source": [
"from azureml.core.run import Run\n",
"from azureml.widgets import RunDetails\n",
"forecast_model = 'TCNForecaster'\n",
"if not forecast_model in summary_df['run_id']:\n",
" forecast_model = 'ForecastTCN'\n",
" \n",
"best_dnn_run_id = summary_df['run_id'][forecast_model]\n",
"\n",
"forecast_model = \"TCNForecaster\"\n",
"if not forecast_model in summary_df[\"run_id\"]:\n",
" forecast_model = \"ForecastTCN\"\n",
"\n",
"best_dnn_run_id = summary_df[\"run_id\"][forecast_model]\n",
"best_dnn_run = Run(experiment, best_dnn_run_id)"
]
},
@@ -488,7 +520,7 @@
"outputs": [],
"source": [
"best_dnn_run.parent\n",
"RunDetails(best_dnn_run.parent).show() "
"RunDetails(best_dnn_run.parent).show()"
]
},
{
@@ -501,7 +533,7 @@
"outputs": [],
"source": [
"best_dnn_run\n",
"RunDetails(best_dnn_run).show() "
"RunDetails(best_dnn_run).show()"
]
},
{
@@ -536,7 +568,10 @@
"outputs": [],
"source": [
"from azureml.core import Dataset\n",
"test_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'beer-dataset/tabular/test.csv')])\n",
"\n",
"test_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"beer-dataset/tabular/test.csv\")]\n",
")\n",
"# preview the first 3 rows of the dataset\n",
"test_dataset.take(5).to_pandas_dataframe()"
]
@@ -547,7 +582,7 @@
"metadata": {},
"outputs": [],
"source": [
"compute_target = ws.compute_targets['beer-cluster']\n",
"compute_target = ws.compute_targets[\"beer-cluster\"]\n",
"test_experiment = Experiment(ws, experiment_name + \"_test\")"
]
},
@@ -563,9 +598,9 @@
"import os\n",
"import shutil\n",
"\n",
"script_folder = os.path.join(os.getcwd(), 'inference')\n",
"script_folder = os.path.join(os.getcwd(), \"inference\")\n",
"os.makedirs(script_folder, exist_ok=True)\n",
"shutil.copy('infer.py', script_folder)"
"shutil.copy(\"infer.py\", script_folder)"
]
},
{
@@ -576,8 +611,18 @@
"source": [
"from helper import run_inference\n",
"\n",
"test_run = run_inference(test_experiment, compute_target, script_folder, best_dnn_run, test_dataset, valid_dataset, forecast_horizon,\n",
" target_column_name, time_column_name, freq)"
"test_run = run_inference(\n",
" test_experiment,\n",
" compute_target,\n",
" script_folder,\n",
" best_dnn_run,\n",
" test_dataset,\n",
" valid_dataset,\n",
" forecast_horizon,\n",
" target_column_name,\n",
" time_column_name,\n",
" freq,\n",
")"
]
},
{
@@ -597,8 +642,19 @@
"source": [
"from helper import run_multiple_inferences\n",
"\n",
"summary_df = run_multiple_inferences(summary_df, experiment, test_experiment, compute_target, script_folder, test_dataset, \n",
" valid_dataset, forecast_horizon, target_column_name, time_column_name, freq)"
"summary_df = run_multiple_inferences(\n",
" summary_df,\n",
" experiment,\n",
" test_experiment,\n",
" compute_target,\n",
" script_folder,\n",
" test_dataset,\n",
" valid_dataset,\n",
" forecast_horizon,\n",
" target_column_name,\n",
" time_column_name,\n",
" freq,\n",
")"
]
},
{
@@ -618,7 +674,7 @@
" test_run = Run(test_experiment, test_run_id)\n",
" test_run.wait_for_completion()\n",
" test_score = test_run.get_metrics()[run_summary.primary_metric]\n",
" summary_df.loc[summary_df.run_id == run_id, 'Test Score'] = test_score\n",
" summary_df.loc[summary_df.run_id == run_id, \"Test Score\"] = test_score\n",
" print(\"Test Score: \", test_score)"
]
},

View File

@@ -6,120 +6,158 @@ from azureml.core.run import Run
from azureml.automl.core.shared import constants
def split_fraction_by_grain(df, fraction, time_column_name,
grain_column_names=None):
def split_fraction_by_grain(df, fraction, time_column_name, grain_column_names=None):
if not grain_column_names:
df['tmp_grain_column'] = 'grain'
grain_column_names = ['tmp_grain_column']
df["tmp_grain_column"] = "grain"
grain_column_names = ["tmp_grain_column"]
"""Group df by grain and split on last n rows for each group."""
df_grouped = (df.sort_values(time_column_name)
.groupby(grain_column_names, group_keys=False))
df_grouped = df.sort_values(time_column_name).groupby(
grain_column_names, group_keys=False
)
df_head = df_grouped.apply(lambda dfg: dfg.iloc[:-int(len(dfg) *
fraction)] if fraction > 0 else dfg)
df_head = df_grouped.apply(
lambda dfg: dfg.iloc[: -int(len(dfg) * fraction)] if fraction > 0 else dfg
)
df_tail = df_grouped.apply(lambda dfg: dfg.iloc[-int(len(dfg) *
fraction):] if fraction > 0 else dfg[:0])
df_tail = df_grouped.apply(
lambda dfg: dfg.iloc[-int(len(dfg) * fraction) :] if fraction > 0 else dfg[:0]
)
if 'tmp_grain_column' in grain_column_names:
if "tmp_grain_column" in grain_column_names:
for df2 in (df, df_head, df_tail):
df2.drop('tmp_grain_column', axis=1, inplace=True)
df2.drop("tmp_grain_column", axis=1, inplace=True)
grain_column_names.remove('tmp_grain_column')
grain_column_names.remove("tmp_grain_column")
return df_head, df_tail
def split_full_for_forecasting(df, time_column_name,
grain_column_names=None, test_split=0.2):
def split_full_for_forecasting(
df, time_column_name, grain_column_names=None, test_split=0.2
):
index_name = df.index.name
# Assumes that there isn't already a column called tmpindex
df['tmpindex'] = df.index
df["tmpindex"] = df.index
train_df, test_df = split_fraction_by_grain(
df, test_split, time_column_name, grain_column_names)
df, test_split, time_column_name, grain_column_names
)
train_df = train_df.set_index('tmpindex')
train_df = train_df.set_index("tmpindex")
train_df.index.name = index_name
test_df = test_df.set_index('tmpindex')
test_df = test_df.set_index("tmpindex")
test_df.index.name = index_name
df.drop('tmpindex', axis=1, inplace=True)
df.drop("tmpindex", axis=1, inplace=True)
return train_df, test_df
def get_result_df(remote_run):
children = list(remote_run.get_children(recursive=True))
summary_df = pd.DataFrame(index=['run_id', 'run_algorithm',
'primary_metric', 'Score'])
summary_df = pd.DataFrame(
index=["run_id", "run_algorithm", "primary_metric", "Score"]
)
goal_minimize = False
for run in children:
if run.get_status().lower() == constants.RunState.COMPLETE_RUN \
and 'run_algorithm' in run.properties and 'score' in run.properties:
if (
run.get_status().lower() == constants.RunState.COMPLETE_RUN
and "run_algorithm" in run.properties
and "score" in run.properties
):
# We only count in the completed child runs.
summary_df[run.id] = [run.id, run.properties['run_algorithm'],
run.properties['primary_metric'],
float(run.properties['score'])]
if ('goal' in run.properties):
goal_minimize = run.properties['goal'].split('_')[-1] == 'min'
summary_df[run.id] = [
run.id,
run.properties["run_algorithm"],
run.properties["primary_metric"],
float(run.properties["score"]),
]
if "goal" in run.properties:
goal_minimize = run.properties["goal"].split("_")[-1] == "min"
summary_df = summary_df.T.sort_values(
'Score',
ascending=goal_minimize).drop_duplicates(['run_algorithm'])
summary_df = summary_df.set_index('run_algorithm')
"Score", ascending=goal_minimize
).drop_duplicates(["run_algorithm"])
summary_df = summary_df.set_index("run_algorithm")
return summary_df
def run_inference(test_experiment, compute_target, script_folder, train_run,
test_dataset, lookback_dataset, max_horizon,
target_column_name, time_column_name, freq):
model_base_name = 'model.pkl'
if 'model_data_location' in train_run.properties:
model_location = train_run.properties['model_data_location']
_, model_base_name = model_location.rsplit('/', 1)
train_run.download_file('outputs/{}'.format(model_base_name), 'inference/{}'.format(model_base_name))
train_run.download_file('outputs/conda_env_v_1_0_0.yml', 'inference/condafile.yml')
def run_inference(
test_experiment,
compute_target,
script_folder,
train_run,
test_dataset,
lookback_dataset,
max_horizon,
target_column_name,
time_column_name,
freq,
):
model_base_name = "model.pkl"
if "model_data_location" in train_run.properties:
model_location = train_run.properties["model_data_location"]
_, model_base_name = model_location.rsplit("/", 1)
train_run.download_file(
"outputs/{}".format(model_base_name), "inference/{}".format(model_base_name)
)
train_run.download_file("outputs/conda_env_v_1_0_0.yml", "inference/condafile.yml")
inference_env = Environment("myenv")
inference_env.docker.enabled = True
inference_env.python.conda_dependencies = CondaDependencies(
conda_dependencies_file_path='inference/condafile.yml')
conda_dependencies_file_path="inference/condafile.yml"
)
est = Estimator(source_directory=script_folder,
entry_script='infer.py',
est = Estimator(
source_directory=script_folder,
entry_script="infer.py",
script_params={
'--max_horizon': max_horizon,
'--target_column_name': target_column_name,
'--time_column_name': time_column_name,
'--frequency': freq,
'--model_path': model_base_name
"--max_horizon": max_horizon,
"--target_column_name": target_column_name,
"--time_column_name": time_column_name,
"--frequency": freq,
"--model_path": model_base_name,
},
inputs=[test_dataset.as_named_input('test_data'),
lookback_dataset.as_named_input('lookback_data')],
inputs=[
test_dataset.as_named_input("test_data"),
lookback_dataset.as_named_input("lookback_data"),
],
compute_target=compute_target,
environment_definition=inference_env)
environment_definition=inference_env,
)
run = test_experiment.submit(
est, tags={
'training_run_id': train_run.id,
'run_algorithm': train_run.properties['run_algorithm'],
'valid_score': train_run.properties['score'],
'primary_metric': train_run.properties['primary_metric']
})
est,
tags={
"training_run_id": train_run.id,
"run_algorithm": train_run.properties["run_algorithm"],
"valid_score": train_run.properties["score"],
"primary_metric": train_run.properties["primary_metric"],
},
)
run.log("run_algorithm", run.tags['run_algorithm'])
run.log("run_algorithm", run.tags["run_algorithm"])
return run
def run_multiple_inferences(summary_df, train_experiment, test_experiment,
compute_target, script_folder, test_dataset,
lookback_dataset, max_horizon, target_column_name,
time_column_name, freq):
def run_multiple_inferences(
summary_df,
train_experiment,
test_experiment,
compute_target,
script_folder,
test_dataset,
lookback_dataset,
max_horizon,
target_column_name,
time_column_name,
freq,
):
for run_name, run_summary in summary_df.iterrows():
print(run_name)
print(run_summary)
@@ -127,12 +165,19 @@ def run_multiple_inferences(summary_df, train_experiment, test_experiment,
train_run = Run(train_experiment, run_id)
test_run = run_inference(
test_experiment, compute_target, script_folder, train_run,
test_dataset, lookback_dataset, max_horizon, target_column_name,
time_column_name, freq)
test_experiment,
compute_target,
script_folder,
train_run,
test_dataset,
lookback_dataset,
max_horizon,
target_column_name,
time_column_name,
freq,
)
print(test_run)
summary_df.loc[summary_df.run_id == run_id,
'test_run_id'] = test_run.id
summary_df.loc[summary_df.run_id == run_id, "test_run_id"] = test_run.id
return summary_df

View File

@@ -19,9 +19,14 @@ except ImportError:
_torch_present = False
def align_outputs(y_predicted, X_trans, X_test, y_test,
predicted_column_name='predicted',
horizon_colname='horizon_origin'):
def align_outputs(
y_predicted,
X_trans,
X_test,
y_test,
predicted_column_name="predicted",
horizon_colname="horizon_origin",
):
"""
Demonstrates how to get the output aligned to the inputs
using pandas indexes. Helps understand what happened if
@@ -33,9 +38,13 @@ def align_outputs(y_predicted, X_trans, X_test, y_test,
* model was asked to predict past max_horizon -> increase max horizon
* data at start of X_test was needed for lags -> provide previous periods
"""
if (horizon_colname in X_trans):
df_fcst = pd.DataFrame({predicted_column_name: y_predicted,
horizon_colname: X_trans[horizon_colname]})
if horizon_colname in X_trans:
df_fcst = pd.DataFrame(
{
predicted_column_name: y_predicted,
horizon_colname: X_trans[horizon_colname],
}
)
else:
df_fcst = pd.DataFrame({predicted_column_name: y_predicted})
@@ -48,20 +57,21 @@ def align_outputs(y_predicted, X_trans, X_test, y_test,
# X_test_full's index does not include origin, so reset for merge
df_fcst.reset_index(inplace=True)
X_test_full = X_test_full.reset_index().drop(columns='index')
together = df_fcst.merge(X_test_full, how='right')
X_test_full = X_test_full.reset_index().drop(columns="index")
together = df_fcst.merge(X_test_full, how="right")
# drop rows where prediction or actuals are nan
# happens because of missing actuals
# or at edges of time due to lags/rolling windows
clean = together[together[[target_column_name,
predicted_column_name]].notnull().all(axis=1)]
return (clean)
clean = together[
together[[target_column_name, predicted_column_name]].notnull().all(axis=1)
]
return clean
def do_rolling_forecast_with_lookback(fitted_model, X_test, y_test,
max_horizon, X_lookback, y_lookback,
freq='D'):
def do_rolling_forecast_with_lookback(
fitted_model, X_test, y_test, max_horizon, X_lookback, y_lookback, freq="D"
):
"""
Produce forecasts on a rolling origin over the given test set.
@@ -83,22 +93,28 @@ def do_rolling_forecast_with_lookback(fitted_model, X_test, y_test,
horizon_time = origin_time + max_horizon * to_offset(freq)
# Extract test data from an expanding window up-to the horizon
expand_wind = (X[time_column_name] < horizon_time)
expand_wind = X[time_column_name] < horizon_time
X_test_expand = X[expand_wind]
y_query_expand = np.zeros(len(X_test_expand)).astype(np.float)
y_query_expand.fill(np.NaN)
if origin_time != X[time_column_name].min():
# Set the context by including actuals up-to the origin time
test_context_expand_wind = (X[time_column_name] < origin_time)
context_expand_wind = (X_test_expand[time_column_name] < origin_time)
test_context_expand_wind = X[time_column_name] < origin_time
context_expand_wind = X_test_expand[time_column_name] < origin_time
y_query_expand[context_expand_wind] = y[test_context_expand_wind]
# Print some debug info
print("Horizon_time:", horizon_time,
" origin_time: ", origin_time,
" max_horizon: ", max_horizon,
" freq: ", freq)
print(
"Horizon_time:",
horizon_time,
" origin_time: ",
origin_time,
" max_horizon: ",
max_horizon,
" freq: ",
freq,
)
print("expand_wind: ", expand_wind)
print("y_query_expand")
print(y_query_expand)
@@ -124,9 +140,14 @@ def do_rolling_forecast_with_lookback(fitted_model, X_test, y_test,
trans_tindex = X_trans.index.get_level_values(time_column_name)
trans_roll_wind = (trans_tindex >= origin_time) & (trans_tindex < horizon_time)
test_roll_wind = expand_wind & (X[time_column_name] >= origin_time)
df_list.append(align_outputs(
y_fcst[trans_roll_wind], X_trans[trans_roll_wind],
X[test_roll_wind], y[test_roll_wind]))
df_list.append(
align_outputs(
y_fcst[trans_roll_wind],
X_trans[trans_roll_wind],
X[test_roll_wind],
y[test_roll_wind],
)
)
# Advance the origin time
origin_time = horizon_time
@@ -134,7 +155,7 @@ def do_rolling_forecast_with_lookback(fitted_model, X_test, y_test,
return pd.concat(df_list, ignore_index=True)
def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq='D'):
def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq="D"):
"""
Produce forecasts on a rolling origin over the given test set.
@@ -153,23 +174,28 @@ def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq='D'):
horizon_time = origin_time + max_horizon * to_offset(freq)
# Extract test data from an expanding window up-to the horizon
expand_wind = (X_test[time_column_name] < horizon_time)
expand_wind = X_test[time_column_name] < horizon_time
X_test_expand = X_test[expand_wind]
y_query_expand = np.zeros(len(X_test_expand)).astype(np.float)
y_query_expand.fill(np.NaN)
if origin_time != X_test[time_column_name].min():
# Set the context by including actuals up-to the origin time
test_context_expand_wind = (X_test[time_column_name] < origin_time)
context_expand_wind = (X_test_expand[time_column_name] < origin_time)
y_query_expand[context_expand_wind] = y_test[
test_context_expand_wind]
test_context_expand_wind = X_test[time_column_name] < origin_time
context_expand_wind = X_test_expand[time_column_name] < origin_time
y_query_expand[context_expand_wind] = y_test[test_context_expand_wind]
# Print some debug info
print("Horizon_time:", horizon_time,
" origin_time: ", origin_time,
" max_horizon: ", max_horizon,
" freq: ", freq)
print(
"Horizon_time:",
horizon_time,
" origin_time: ",
origin_time,
" max_horizon: ",
max_horizon,
" freq: ",
freq,
)
print("expand_wind: ", expand_wind)
print("y_query_expand")
print(y_query_expand)
@@ -193,10 +219,14 @@ def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq='D'):
trans_tindex = X_trans.index.get_level_values(time_column_name)
trans_roll_wind = (trans_tindex >= origin_time) & (trans_tindex < horizon_time)
test_roll_wind = expand_wind & (X_test[time_column_name] >= origin_time)
df_list.append(align_outputs(y_fcst[trans_roll_wind],
df_list.append(
align_outputs(
y_fcst[trans_roll_wind],
X_trans[trans_roll_wind],
X_test[test_roll_wind],
y_test[test_roll_wind]))
y_test[test_roll_wind],
)
)
# Advance the origin time
origin_time = horizon_time
@@ -230,20 +260,31 @@ def map_location_cuda(storage, loc):
parser = argparse.ArgumentParser()
parser.add_argument(
'--max_horizon', type=int, dest='max_horizon',
default=10, help='Max Horizon for forecasting')
"--max_horizon",
type=int,
dest="max_horizon",
default=10,
help="Max Horizon for forecasting",
)
parser.add_argument(
'--target_column_name', type=str, dest='target_column_name',
help='Target Column Name')
"--target_column_name",
type=str,
dest="target_column_name",
help="Target Column Name",
)
parser.add_argument(
'--time_column_name', type=str, dest='time_column_name',
help='Time Column Name')
"--time_column_name", type=str, dest="time_column_name", help="Time Column Name"
)
parser.add_argument(
'--frequency', type=str, dest='freq',
help='Frequency of prediction')
"--frequency", type=str, dest="freq", help="Frequency of prediction"
)
parser.add_argument(
'--model_path', type=str, dest='model_path',
default='model.pkl', help='Filename of model to be loaded')
"--model_path",
type=str,
dest="model_path",
default="model.pkl",
help="Filename of model to be loaded",
)
args = parser.parse_args()
max_horizon = args.max_horizon
@@ -252,7 +293,7 @@ time_column_name = args.time_column_name
freq = args.freq
model_path = args.model_path
print('args passed are: ')
print("args passed are: ")
print(max_horizon)
print(target_column_name)
print(time_column_name)
@@ -261,39 +302,41 @@ print(model_path)
run = Run.get_context()
# get input dataset by name
test_dataset = run.input_datasets['test_data']
lookback_dataset = run.input_datasets['lookback_data']
test_dataset = run.input_datasets["test_data"]
lookback_dataset = run.input_datasets["lookback_data"]
grain_column_names = []
df = test_dataset.to_pandas_dataframe()
print('Read df')
print("Read df")
print(df)
X_test_df = test_dataset.drop_columns(columns=[target_column_name])
y_test_df = test_dataset.with_timestamp_columns(
None).keep_columns(columns=[target_column_name])
y_test_df = test_dataset.with_timestamp_columns(None).keep_columns(
columns=[target_column_name]
)
X_lookback_df = lookback_dataset.drop_columns(columns=[target_column_name])
y_lookback_df = lookback_dataset.with_timestamp_columns(
None).keep_columns(columns=[target_column_name])
y_lookback_df = lookback_dataset.with_timestamp_columns(None).keep_columns(
columns=[target_column_name]
)
_, ext = os.path.splitext(model_path)
if ext == '.pt':
if ext == ".pt":
# Load the fc-tcn torch model.
assert _torch_present
if torch.cuda.is_available():
map_location = map_location_cuda
else:
map_location = 'cpu'
with open(model_path, 'rb') as fh:
map_location = "cpu"
with open(model_path, "rb") as fh:
fitted_model = torch.load(fh, map_location=map_location)
else:
# Load the sklearn pipeline.
fitted_model = joblib.load(model_path)
if hasattr(fitted_model, 'get_lookback'):
if hasattr(fitted_model, "get_lookback"):
lookback = fitted_model.get_lookback()
df_all = do_rolling_forecast_with_lookback(
fitted_model,
@@ -302,26 +345,28 @@ if hasattr(fitted_model, 'get_lookback'):
max_horizon,
X_lookback_df.to_pandas_dataframe()[-lookback:],
y_lookback_df.to_pandas_dataframe().values.T[0][-lookback:],
freq)
freq,
)
else:
df_all = do_rolling_forecast(
fitted_model,
X_test_df.to_pandas_dataframe(),
y_test_df.to_pandas_dataframe().values.T[0],
max_horizon,
freq)
freq,
)
print(df_all)
print("target values:::")
print(df_all[target_column_name])
print("predicted values:::")
print(df_all['predicted'])
print(df_all["predicted"])
# Use the AutoML scoring module
regression_metrics = list(constants.REGRESSION_SCALAR_SET)
y_test = np.array(df_all[target_column_name])
y_pred = np.array(df_all['predicted'])
y_pred = np.array(df_all["predicted"])
scores = scoring.score_regression(y_test, y_pred, regression_metrics)
print("scores:")
@@ -331,12 +376,11 @@ for key, value in scores.items():
run.log(key, value)
print("Simple forecasting model")
rmse = np.sqrt(mean_squared_error(
df_all[target_column_name], df_all['predicted']))
rmse = np.sqrt(mean_squared_error(df_all[target_column_name], df_all["predicted"]))
print("[Test Data] \nRoot Mean squared error: %.2f" % rmse)
mae = mean_absolute_error(df_all[target_column_name], df_all['predicted'])
print('mean_absolute_error score: %.2f' % mae)
print('MAPE: %.2f' % MAPE(df_all[target_column_name], df_all['predicted']))
mae = mean_absolute_error(df_all[target_column_name], df_all["predicted"])
print("mean_absolute_error score: %.2f" % mae)
print("MAPE: %.2f" % MAPE(df_all[target_column_name], df_all["predicted"]))
run.log('rmse', rmse)
run.log('mae', mae)
run.log("rmse", rmse)
run.log("mae", mae)

View File

@@ -88,7 +88,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
@@ -108,19 +108,19 @@
"ws = Workspace.from_config()\n",
"\n",
"# choose a name for the run history container in the workspace\n",
"experiment_name = 'automl-bikeshareforecasting'\n",
"experiment_name = \"automl-bikeshareforecasting\"\n",
"\n",
"experiment = Experiment(ws, experiment_name)\n",
"\n",
"output = {}\n",
"output['Subscription ID'] = ws.subscription_id\n",
"output['Workspace'] = ws.name\n",
"output['SKU'] = ws.sku\n",
"output['Resource Group'] = ws.resource_group\n",
"output['Location'] = ws.location\n",
"output['Run History Name'] = experiment_name\n",
"pd.set_option('display.max_colwidth', -1)\n",
"outputDf = pd.DataFrame(data = output, index = [''])\n",
"output[\"Subscription ID\"] = ws.subscription_id\n",
"output[\"Workspace\"] = ws.name\n",
"output[\"SKU\"] = ws.sku\n",
"output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n",
"output[\"Run History Name\"] = experiment_name\n",
"pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T"
]
},
@@ -153,10 +153,11 @@
"# Verify that cluster does not exist already\n",
"try:\n",
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
" print('Found existing cluster, use it.')\n",
" print(\"Found existing cluster, use it.\")\n",
"except ComputeTargetException:\n",
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
" max_nodes=4)\n",
" compute_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"STANDARD_DS12_V2\", max_nodes=4\n",
" )\n",
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
"\n",
"compute_target.wait_for_completion(show_output=True)"
@@ -178,7 +179,9 @@
"outputs": [],
"source": [
"datastore = ws.get_default_datastore()\n",
"datastore.upload_files(files = ['./bike-no.csv'], target_path = 'dataset/', overwrite = True,show_progress = True)"
"datastore.upload_files(\n",
" files=[\"./bike-no.csv\"], target_path=\"dataset/\", overwrite=True, show_progress=True\n",
")"
]
},
{
@@ -198,8 +201,8 @@
"metadata": {},
"outputs": [],
"source": [
"target_column_name = 'cnt'\n",
"time_column_name = 'date'"
"target_column_name = \"cnt\"\n",
"time_column_name = \"date\""
]
},
{
@@ -208,10 +211,12 @@
"metadata": {},
"outputs": [],
"source": [
"dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'dataset/bike-no.csv')]).with_timestamp_columns(fine_grain_timestamp=time_column_name) \n",
"dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"dataset/bike-no.csv\")]\n",
").with_timestamp_columns(fine_grain_timestamp=time_column_name)\n",
"\n",
"# Drop the columns 'casual' and 'registered' as these columns are a breakdown of the total and therefore a leak.\n",
"dataset = dataset.drop_columns(columns=['casual', 'registered'])\n",
"dataset = dataset.drop_columns(columns=[\"casual\", \"registered\"])\n",
"\n",
"dataset.take(5).to_pandas_dataframe().reset_index(drop=True)"
]
@@ -320,7 +325,7 @@
"source": [
"featurization_config = FeaturizationConfig()\n",
"# Force the target column, to be integer type.\n",
"featurization_config.add_prediction_transform_type('Integer')"
"featurization_config.add_prediction_transform_type(\"Integer\")"
]
},
{
@@ -337,28 +342,31 @@
"outputs": [],
"source": [
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
"\n",
"forecasting_parameters = ForecastingParameters(\n",
" time_column_name=time_column_name,\n",
" forecast_horizon=forecast_horizon,\n",
" country_or_region_for_holidays='US', # set country_or_region will trigger holiday featurizer\n",
" target_lags='auto', # use heuristic based lag setting\n",
" freq='D' # Set the forecast frequency to be daily\n",
" country_or_region_for_holidays=\"US\", # set country_or_region will trigger holiday featurizer\n",
" target_lags=\"auto\", # use heuristic based lag setting\n",
" freq=\"D\", # Set the forecast frequency to be daily\n",
")\n",
"\n",
"automl_config = AutoMLConfig(task='forecasting', \n",
" primary_metric='normalized_root_mean_squared_error',\n",
"automl_config = AutoMLConfig(\n",
" task=\"forecasting\",\n",
" primary_metric=\"normalized_root_mean_squared_error\",\n",
" featurization=featurization_config,\n",
" blocked_models = ['ExtremeRandomTrees'], \n",
" blocked_models=[\"ExtremeRandomTrees\"],\n",
" experiment_timeout_hours=0.3,\n",
" training_data=train,\n",
" label_column_name=target_column_name,\n",
" compute_target=compute_target,\n",
" enable_early_stopping=True,\n",
" n_cross_validations=3, \n",
" n_cross_validations=3,\n",
" max_concurrent_iterations=4,\n",
" max_cores_per_iteration=-1,\n",
" verbosity=logging.INFO,\n",
" forecasting_parameters=forecasting_parameters)"
" forecasting_parameters=forecasting_parameters,\n",
")"
]
},
{
@@ -419,7 +427,7 @@
"metadata": {},
"outputs": [],
"source": [
"fitted_model.named_steps['timeseriestransformer'].get_engineered_feature_names()"
"fitted_model.named_steps[\"timeseriestransformer\"].get_engineered_feature_names()"
]
},
{
@@ -444,7 +452,9 @@
"outputs": [],
"source": [
"# Get the featurization summary as a list of JSON\n",
"featurization_summary = fitted_model.named_steps['timeseriestransformer'].get_featurization_summary()\n",
"featurization_summary = fitted_model.named_steps[\n",
" \"timeseriestransformer\"\n",
"].get_featurization_summary()\n",
"# View the featurization summary as a pandas dataframe\n",
"pd.DataFrame.from_records(featurization_summary)"
]
@@ -491,9 +501,9 @@
"import os\n",
"import shutil\n",
"\n",
"script_folder = os.path.join(os.getcwd(), 'forecast')\n",
"script_folder = os.path.join(os.getcwd(), \"forecast\")\n",
"os.makedirs(script_folder, exist_ok=True)\n",
"shutil.copy('forecasting_script.py', script_folder)"
"shutil.copy(\"forecasting_script.py\", script_folder)"
]
},
{
@@ -511,7 +521,9 @@
"source": [
"from run_forecast import run_rolling_forecast\n",
"\n",
"remote_run = run_rolling_forecast(test_experiment, compute_target, best_run, test, target_column_name)\n",
"remote_run = run_rolling_forecast(\n",
" test_experiment, compute_target, best_run, test, target_column_name\n",
")\n",
"remote_run"
]
},
@@ -538,8 +550,8 @@
"metadata": {},
"outputs": [],
"source": [
"remote_run.download_file('outputs/predictions.csv', 'predictions.csv')\n",
"df_all = pd.read_csv('predictions.csv')"
"remote_run.download_file(\"outputs/predictions.csv\", \"predictions.csv\")\n",
"df_all = pd.read_csv(\"predictions.csv\")"
]
},
{
@@ -556,18 +568,23 @@
"# use automl metrics module\n",
"scores = scoring.score_regression(\n",
" y_test=df_all[target_column_name],\n",
" y_pred=df_all['predicted'],\n",
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET))\n",
" y_pred=df_all[\"predicted\"],\n",
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET),\n",
")\n",
"\n",
"print(\"[Test data scores]\\n\")\n",
"for key, value in scores.items(): \n",
" print('{}: {:.3f}'.format(key, value))\n",
" \n",
"for key, value in scores.items():\n",
" print(\"{}: {:.3f}\".format(key, value))\n",
"\n",
"# Plot outputs\n",
"%matplotlib inline\n",
"test_pred = plt.scatter(df_all[target_column_name], df_all['predicted'], color='b')\n",
"test_test = plt.scatter(df_all[target_column_name], df_all[target_column_name], color='g')\n",
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n",
"test_pred = plt.scatter(df_all[target_column_name], df_all[\"predicted\"], color=\"b\")\n",
"test_test = plt.scatter(\n",
" df_all[target_column_name], df_all[target_column_name], color=\"g\"\n",
")\n",
"plt.legend(\n",
" (test_pred, test_test), (\"prediction\", \"truth\"), loc=\"upper left\", fontsize=8\n",
")\n",
"plt.show()"
]
},
@@ -588,10 +605,18 @@
"outputs": [],
"source": [
"from metrics_helper import MAPE, APE\n",
"df_all.groupby('horizon_origin').apply(\n",
" lambda df: pd.Series({'MAPE': MAPE(df[target_column_name], df['predicted']),\n",
" 'RMSE': np.sqrt(mean_squared_error(df[target_column_name], df['predicted'])),\n",
" 'MAE': mean_absolute_error(df[target_column_name], df['predicted'])}))"
"\n",
"df_all.groupby(\"horizon_origin\").apply(\n",
" lambda df: pd.Series(\n",
" {\n",
" \"MAPE\": MAPE(df[target_column_name], df[\"predicted\"]),\n",
" \"RMSE\": np.sqrt(\n",
" mean_squared_error(df[target_column_name], df[\"predicted\"])\n",
" ),\n",
" \"MAE\": mean_absolute_error(df[target_column_name], df[\"predicted\"]),\n",
" }\n",
" )\n",
")"
]
},
{
@@ -607,15 +632,18 @@
"metadata": {},
"outputs": [],
"source": [
"df_all_APE = df_all.assign(APE=APE(df_all[target_column_name], df_all['predicted']))\n",
"APEs = [df_all_APE[df_all['horizon_origin'] == h].APE.values for h in range(1, forecast_horizon + 1)]\n",
"df_all_APE = df_all.assign(APE=APE(df_all[target_column_name], df_all[\"predicted\"]))\n",
"APEs = [\n",
" df_all_APE[df_all[\"horizon_origin\"] == h].APE.values\n",
" for h in range(1, forecast_horizon + 1)\n",
"]\n",
"\n",
"%matplotlib inline\n",
"plt.boxplot(APEs)\n",
"plt.yscale('log')\n",
"plt.xlabel('horizon')\n",
"plt.ylabel('APE (%)')\n",
"plt.title('Absolute Percentage Errors by Forecast Horizon')\n",
"plt.yscale(\"log\")\n",
"plt.xlabel(\"horizon\")\n",
"plt.ylabel(\"APE (%)\")\n",
"plt.title(\"Absolute Percentage Errors by Forecast Horizon\")\n",
"\n",
"plt.show()"
]

View File

@@ -4,11 +4,14 @@ from sklearn.externals import joblib
parser = argparse.ArgumentParser()
parser.add_argument(
'--target_column_name', type=str, dest='target_column_name',
help='Target Column Name')
"--target_column_name",
type=str,
dest="target_column_name",
help="Target Column Name",
)
parser.add_argument(
'--test_dataset', type=str, dest='test_dataset',
help='Test Dataset')
"--test_dataset", type=str, dest="test_dataset", help="Test Dataset"
)
args = parser.parse_args()
target_column_name = args.target_column_name
@@ -20,19 +23,30 @@ ws = run.experiment.workspace
# get the input dataset by id
test_dataset = Dataset.get_by_id(ws, id=test_dataset_id)
X_test_df = test_dataset.drop_columns(columns=[target_column_name]).to_pandas_dataframe().reset_index(drop=True)
y_test_df = test_dataset.with_timestamp_columns(None).keep_columns(columns=[target_column_name]).to_pandas_dataframe()
X_test_df = (
test_dataset.drop_columns(columns=[target_column_name])
.to_pandas_dataframe()
.reset_index(drop=True)
)
y_test_df = (
test_dataset.with_timestamp_columns(None)
.keep_columns(columns=[target_column_name])
.to_pandas_dataframe()
)
fitted_model = joblib.load('model.pkl')
fitted_model = joblib.load("model.pkl")
y_pred, X_trans = fitted_model.rolling_evaluation(X_test_df, y_test_df.values)
# Add predictions, actuals, and horizon relative to rolling origin to the test feature data
assign_dict = {'horizon_origin': X_trans['horizon_origin'].values, 'predicted': y_pred,
target_column_name: y_test_df[target_column_name].values}
assign_dict = {
"horizon_origin": X_trans["horizon_origin"].values,
"predicted": y_pred,
target_column_name: y_test_df[target_column_name].values,
}
df_all = X_test_df.assign(**assign_dict)
file_name = 'outputs/predictions.csv'
file_name = "outputs/predictions.csv"
export_csv = df_all.to_csv(file_name, header=True)
# Upload the predictions into artifacts

View File

@@ -1,32 +1,40 @@
from azureml.core import ScriptRunConfig
def run_rolling_forecast(test_experiment, compute_target, train_run,
test_dataset, target_column_name,
inference_folder='./forecast'):
train_run.download_file('outputs/model.pkl',
inference_folder + '/model.pkl')
def run_rolling_forecast(
test_experiment,
compute_target,
train_run,
test_dataset,
target_column_name,
inference_folder="./forecast",
):
train_run.download_file("outputs/model.pkl", inference_folder + "/model.pkl")
inference_env = train_run.get_environment()
config = ScriptRunConfig(source_directory=inference_folder,
script='forecasting_script.py',
arguments=['--target_column_name',
config = ScriptRunConfig(
source_directory=inference_folder,
script="forecasting_script.py",
arguments=[
"--target_column_name",
target_column_name,
'--test_dataset',
test_dataset.as_named_input(test_dataset.name)],
"--test_dataset",
test_dataset.as_named_input(test_dataset.name),
],
compute_target=compute_target,
environment=inference_env)
environment=inference_env,
)
run = test_experiment.submit(config,
tags={'training_run_id':
train_run.id,
'run_algorithm':
train_run.properties['run_algorithm'],
'valid_score':
train_run.properties['score'],
'primary_metric':
train_run.properties['primary_metric']})
run = test_experiment.submit(
config,
tags={
"training_run_id": train_run.id,
"run_algorithm": train_run.properties["run_algorithm"],
"valid_score": train_run.properties["score"],
"primary_metric": train_run.properties["primary_metric"],
},
)
run.log("run_algorithm", run.tags['run_algorithm'])
run.log("run_algorithm", run.tags["run_algorithm"])
return run

View File

@@ -99,7 +99,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
@@ -119,7 +119,7 @@
"ws = Workspace.from_config()\n",
"\n",
"# choose a name for the run history container in the workspace\n",
"experiment_name = 'automl-forecasting-energydemand'\n",
"experiment_name = \"automl-forecasting-energydemand\"\n",
"\n",
"# # project folder\n",
"# project_folder = './sample_projects/automl-forecasting-energy-demand'\n",
@@ -127,13 +127,13 @@
"experiment = Experiment(ws, experiment_name)\n",
"\n",
"output = {}\n",
"output['Subscription ID'] = ws.subscription_id\n",
"output['Workspace'] = ws.name\n",
"output['Resource Group'] = ws.resource_group\n",
"output['Location'] = ws.location\n",
"output['Run History Name'] = experiment_name\n",
"pd.set_option('display.max_colwidth', -1)\n",
"outputDf = pd.DataFrame(data = output, index = [''])\n",
"output[\"Subscription ID\"] = ws.subscription_id\n",
"output[\"Workspace\"] = ws.name\n",
"output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n",
"output[\"Run History Name\"] = experiment_name\n",
"pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T"
]
},
@@ -166,10 +166,11 @@
"# Verify that cluster does not exist already\n",
"try:\n",
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
" print('Found existing cluster, use it.')\n",
" print(\"Found existing cluster, use it.\")\n",
"except ComputeTargetException:\n",
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
" max_nodes=6)\n",
" compute_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
" )\n",
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
"\n",
"compute_target.wait_for_completion(show_output=True)"
@@ -204,8 +205,8 @@
"metadata": {},
"outputs": [],
"source": [
"target_column_name = 'demand'\n",
"time_column_name = 'timeStamp'"
"target_column_name = \"demand\"\n",
"time_column_name = \"timeStamp\""
]
},
{
@@ -214,7 +215,9 @@
"metadata": {},
"outputs": [],
"source": [
"dataset = Dataset.Tabular.from_delimited_files(path = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/nyc_energy.csv\").with_timestamp_columns(fine_grain_timestamp=time_column_name) \n",
"dataset = Dataset.Tabular.from_delimited_files(\n",
" path=\"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/nyc_energy.csv\"\n",
").with_timestamp_columns(fine_grain_timestamp=time_column_name)\n",
"dataset.take(5).to_pandas_dataframe().reset_index(drop=True)"
]
},
@@ -343,23 +346,26 @@
"outputs": [],
"source": [
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
"\n",
"forecasting_parameters = ForecastingParameters(\n",
" time_column_name=time_column_name,\n",
" forecast_horizon=forecast_horizon,\n",
" freq='H' # Set the forecast frequency to be hourly\n",
" freq=\"H\", # Set the forecast frequency to be hourly\n",
")\n",
"\n",
"automl_config = AutoMLConfig(task='forecasting', \n",
" primary_metric='normalized_root_mean_squared_error',\n",
" blocked_models = ['ExtremeRandomTrees', 'AutoArima', 'Prophet'], \n",
"automl_config = AutoMLConfig(\n",
" task=\"forecasting\",\n",
" primary_metric=\"normalized_root_mean_squared_error\",\n",
" blocked_models=[\"ExtremeRandomTrees\", \"AutoArima\", \"Prophet\"],\n",
" experiment_timeout_hours=0.3,\n",
" training_data=train,\n",
" label_column_name=target_column_name,\n",
" compute_target=compute_target,\n",
" enable_early_stopping=True,\n",
" n_cross_validations=3, \n",
" n_cross_validations=3,\n",
" verbosity=logging.INFO,\n",
" forecasting_parameters=forecasting_parameters)"
" forecasting_parameters=forecasting_parameters,\n",
")"
]
},
{
@@ -420,7 +426,7 @@
"metadata": {},
"outputs": [],
"source": [
"fitted_model.named_steps['timeseriestransformer'].get_engineered_feature_names()"
"fitted_model.named_steps[\"timeseriestransformer\"].get_engineered_feature_names()"
]
},
{
@@ -444,7 +450,9 @@
"outputs": [],
"source": [
"# Get the featurization summary as a list of JSON\n",
"featurization_summary = fitted_model.named_steps['timeseriestransformer'].get_featurization_summary()\n",
"featurization_summary = fitted_model.named_steps[\n",
" \"timeseriestransformer\"\n",
"].get_featurization_summary()\n",
"# View the featurization summary as a pandas dataframe\n",
"pd.DataFrame.from_records(featurization_summary)"
]
@@ -484,15 +492,18 @@
"outputs": [],
"source": [
"from run_forecast import run_remote_inference\n",
"remote_run_infer = run_remote_inference(test_experiment=test_experiment,\n",
"\n",
"remote_run_infer = run_remote_inference(\n",
" test_experiment=test_experiment,\n",
" compute_target=compute_target,\n",
" train_run=best_run,\n",
" test_dataset=test,\n",
" target_column_name=target_column_name)\n",
" target_column_name=target_column_name,\n",
")\n",
"remote_run_infer.wait_for_completion(show_output=False)\n",
"\n",
"# download the inference output file to the local machine\n",
"remote_run_infer.download_file('outputs/predictions.csv', 'predictions.csv')"
"remote_run_infer.download_file(\"outputs/predictions.csv\", \"predictions.csv\")"
]
},
{
@@ -510,7 +521,7 @@
"outputs": [],
"source": [
"# load forecast data frame\n",
"fcst_df = pd.read_csv('predictions.csv', parse_dates=[time_column_name])\n",
"fcst_df = pd.read_csv(\"predictions.csv\", parse_dates=[time_column_name])\n",
"fcst_df.head()"
]
},
@@ -527,18 +538,23 @@
"# use automl metrics module\n",
"scores = scoring.score_regression(\n",
" y_test=fcst_df[target_column_name],\n",
" y_pred=fcst_df['predicted'],\n",
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET))\n",
" y_pred=fcst_df[\"predicted\"],\n",
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET),\n",
")\n",
"\n",
"print(\"[Test data scores]\\n\")\n",
"for key, value in scores.items(): \n",
" print('{}: {:.3f}'.format(key, value))\n",
" \n",
"for key, value in scores.items():\n",
" print(\"{}: {:.3f}\".format(key, value))\n",
"\n",
"# Plot outputs\n",
"%matplotlib inline\n",
"test_pred = plt.scatter(fcst_df[target_column_name], fcst_df['predicted'], color='b')\n",
"test_test = plt.scatter(fcst_df[target_column_name], fcst_df[target_column_name], color='g')\n",
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n",
"test_pred = plt.scatter(fcst_df[target_column_name], fcst_df[\"predicted\"], color=\"b\")\n",
"test_test = plt.scatter(\n",
" fcst_df[target_column_name], fcst_df[target_column_name], color=\"g\"\n",
")\n",
"plt.legend(\n",
" (test_pred, test_test), (\"prediction\", \"truth\"), loc=\"upper left\", fontsize=8\n",
")\n",
"plt.show()"
]
},
@@ -567,21 +583,33 @@
"outputs": [],
"source": [
"advanced_forecasting_parameters = ForecastingParameters(\n",
" time_column_name=time_column_name, forecast_horizon=forecast_horizon,\n",
" target_lags=12, target_rolling_window_size=4\n",
" time_column_name=time_column_name,\n",
" forecast_horizon=forecast_horizon,\n",
" target_lags=12,\n",
" target_rolling_window_size=4,\n",
")\n",
"\n",
"automl_config = AutoMLConfig(task='forecasting', \n",
" primary_metric='normalized_root_mean_squared_error',\n",
" blocked_models = ['ElasticNet','ExtremeRandomTrees','GradientBoosting','XGBoostRegressor','ExtremeRandomTrees', 'AutoArima', 'Prophet'], #These models are blocked for tutorial purposes, remove this for real use cases. \n",
"automl_config = AutoMLConfig(\n",
" task=\"forecasting\",\n",
" primary_metric=\"normalized_root_mean_squared_error\",\n",
" blocked_models=[\n",
" \"ElasticNet\",\n",
" \"ExtremeRandomTrees\",\n",
" \"GradientBoosting\",\n",
" \"XGBoostRegressor\",\n",
" \"ExtremeRandomTrees\",\n",
" \"AutoArima\",\n",
" \"Prophet\",\n",
" ], # These models are blocked for tutorial purposes, remove this for real use cases.\n",
" experiment_timeout_hours=0.3,\n",
" training_data=train,\n",
" label_column_name=target_column_name,\n",
" compute_target=compute_target,\n",
" enable_early_stopping = True,\n",
" n_cross_validations=3, \n",
" enable_early_stopping=True,\n",
" n_cross_validations=3,\n",
" verbosity=logging.INFO,\n",
" forecasting_parameters=advanced_forecasting_parameters)"
" forecasting_parameters=advanced_forecasting_parameters,\n",
")"
]
},
{
@@ -640,16 +668,20 @@
"outputs": [],
"source": [
"test_experiment_advanced = Experiment(ws, experiment_name + \"_inference_advanced\")\n",
"advanced_remote_run_infer = run_remote_inference(test_experiment=test_experiment_advanced,\n",
"advanced_remote_run_infer = run_remote_inference(\n",
" test_experiment=test_experiment_advanced,\n",
" compute_target=compute_target,\n",
" train_run=best_run_lags,\n",
" test_dataset=test,\n",
" target_column_name=target_column_name,\n",
" inference_folder='./forecast_advanced')\n",
" inference_folder=\"./forecast_advanced\",\n",
")\n",
"advanced_remote_run_infer.wait_for_completion(show_output=False)\n",
"\n",
"# download the inference output file to the local machine\n",
"advanced_remote_run_infer.download_file('outputs/predictions.csv', 'predictions_advanced.csv')"
"advanced_remote_run_infer.download_file(\n",
" \"outputs/predictions.csv\", \"predictions_advanced.csv\"\n",
")"
]
},
{
@@ -658,7 +690,7 @@
"metadata": {},
"outputs": [],
"source": [
"fcst_adv_df = pd.read_csv('predictions_advanced.csv', parse_dates=[time_column_name])\n",
"fcst_adv_df = pd.read_csv(\"predictions_advanced.csv\", parse_dates=[time_column_name])\n",
"fcst_adv_df.head()"
]
},
@@ -675,18 +707,25 @@
"# use automl metrics module\n",
"scores = scoring.score_regression(\n",
" y_test=fcst_adv_df[target_column_name],\n",
" y_pred=fcst_adv_df['predicted'],\n",
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET))\n",
" y_pred=fcst_adv_df[\"predicted\"],\n",
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET),\n",
")\n",
"\n",
"print(\"[Test data scores]\\n\")\n",
"for key, value in scores.items(): \n",
" print('{}: {:.3f}'.format(key, value))\n",
" \n",
"for key, value in scores.items():\n",
" print(\"{}: {:.3f}\".format(key, value))\n",
"\n",
"# Plot outputs\n",
"%matplotlib inline\n",
"test_pred = plt.scatter(fcst_adv_df[target_column_name], fcst_adv_df['predicted'], color='b')\n",
"test_test = plt.scatter(fcst_adv_df[target_column_name], fcst_adv_df[target_column_name], color='g')\n",
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n",
"test_pred = plt.scatter(\n",
" fcst_adv_df[target_column_name], fcst_adv_df[\"predicted\"], color=\"b\"\n",
")\n",
"test_test = plt.scatter(\n",
" fcst_adv_df[target_column_name], fcst_adv_df[target_column_name], color=\"g\"\n",
")\n",
"plt.legend(\n",
" (test_pred, test_test), (\"prediction\", \"truth\"), loc=\"upper left\", fontsize=8\n",
")\n",
"plt.show()"
]
}

View File

@@ -11,11 +11,14 @@ from pandas.tseries.frequencies import to_offset
parser = argparse.ArgumentParser()
parser.add_argument(
'--target_column_name', type=str, dest='target_column_name',
help='Target Column Name')
"--target_column_name",
type=str,
dest="target_column_name",
help="Target Column Name",
)
parser.add_argument(
'--test_dataset', type=str, dest='test_dataset',
help='Test Dataset')
"--test_dataset", type=str, dest="test_dataset", help="Test Dataset"
)
args = parser.parse_args()
target_column_name = args.target_column_name
@@ -31,25 +34,27 @@ X_test = test_dataset.to_pandas_dataframe().reset_index(drop=True)
y_test = X_test.pop(target_column_name).values
# generate forecast
fitted_model = joblib.load('model.pkl')
fitted_model = joblib.load("model.pkl")
# We have default quantiles values set as below(95th percentile)
quantiles = [0.025, 0.5, 0.975]
predicted_column_name = 'predicted'
PI = 'prediction_interval'
predicted_column_name = "predicted"
PI = "prediction_interval"
fitted_model.quantiles = quantiles
pred_quantiles = fitted_model.forecast_quantiles(X_test)
pred_quantiles[PI] = pred_quantiles[[min(quantiles), max(quantiles)]].apply(lambda x: '[{}, {}]'.format(x[0],
x[1]), axis=1)
pred_quantiles[PI] = pred_quantiles[[min(quantiles), max(quantiles)]].apply(
lambda x: "[{}, {}]".format(x[0], x[1]), axis=1
)
X_test[target_column_name] = y_test
X_test[PI] = pred_quantiles[PI]
X_test[predicted_column_name] = pred_quantiles[0.5]
# drop rows where prediction or actuals are nan
# happens because of missing actuals
# or at edges of time due to lags/rolling windows
clean = X_test[X_test[[target_column_name,
predicted_column_name]].notnull().all(axis=1)]
clean = X_test[
X_test[[target_column_name, predicted_column_name]].notnull().all(axis=1)
]
file_name = 'outputs/predictions.csv'
file_name = "outputs/predictions.csv"
export_csv = clean.to_csv(file_name, header=True, index=False) # added Index
# Upload the predictions into artifacts

View File

@@ -3,36 +3,47 @@ import shutil
from azureml.core import ScriptRunConfig
def run_remote_inference(test_experiment, compute_target, train_run,
test_dataset, target_column_name, inference_folder='./forecast'):
def run_remote_inference(
test_experiment,
compute_target,
train_run,
test_dataset,
target_column_name,
inference_folder="./forecast",
):
# Create local directory to copy the model.pkl and forecsting_script.py files into.
# These files will be uploaded to and executed on the compute instance.
os.makedirs(inference_folder, exist_ok=True)
shutil.copy('forecasting_script.py', inference_folder)
shutil.copy("forecasting_script.py", inference_folder)
train_run.download_file('outputs/model.pkl',
os.path.join(inference_folder, 'model.pkl'))
train_run.download_file(
"outputs/model.pkl", os.path.join(inference_folder, "model.pkl")
)
inference_env = train_run.get_environment()
config = ScriptRunConfig(source_directory=inference_folder,
script='forecasting_script.py',
arguments=['--target_column_name',
config = ScriptRunConfig(
source_directory=inference_folder,
script="forecasting_script.py",
arguments=[
"--target_column_name",
target_column_name,
'--test_dataset',
test_dataset.as_named_input(test_dataset.name)],
"--test_dataset",
test_dataset.as_named_input(test_dataset.name),
],
compute_target=compute_target,
environment=inference_env)
environment=inference_env,
)
run = test_experiment.submit(config,
tags={'training_run_id':
train_run.id,
'run_algorithm':
train_run.properties['run_algorithm'],
'valid_score':
train_run.properties['score'],
'primary_metric':
train_run.properties['primary_metric']})
run = test_experiment.submit(
config,
tags={
"training_run_id": train_run.id,
"run_algorithm": train_run.properties["run_algorithm"],
"valid_score": train_run.properties["score"],
"primary_metric": train_run.properties["primary_metric"],
},
)
run.log("run_algorithm", run.tags['run_algorithm'])
run.log("run_algorithm", run.tags["run_algorithm"])
return run

View File

@@ -94,7 +94,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
@@ -111,19 +111,19 @@
"ws = Workspace.from_config()\n",
"\n",
"# choose a name for the run history container in the workspace\n",
"experiment_name = 'automl-forecast-function-demo'\n",
"experiment_name = \"automl-forecast-function-demo\"\n",
"\n",
"experiment = Experiment(ws, experiment_name)\n",
"\n",
"output = {}\n",
"output['Subscription ID'] = ws.subscription_id\n",
"output['Workspace'] = ws.name\n",
"output['SKU'] = ws.sku\n",
"output['Resource Group'] = ws.resource_group\n",
"output['Location'] = ws.location\n",
"output['Run History Name'] = experiment_name\n",
"pd.set_option('display.max_colwidth', -1)\n",
"outputDf = pd.DataFrame(data = output, index = [''])\n",
"output[\"Subscription ID\"] = ws.subscription_id\n",
"output[\"Workspace\"] = ws.name\n",
"output[\"SKU\"] = ws.sku\n",
"output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n",
"output[\"Run History Name\"] = experiment_name\n",
"pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T"
]
},
@@ -141,17 +141,20 @@
"metadata": {},
"outputs": [],
"source": [
"TIME_COLUMN_NAME = 'date'\n",
"TIME_SERIES_ID_COLUMN_NAME = 'time_series_id'\n",
"TARGET_COLUMN_NAME = 'y'\n",
"TIME_COLUMN_NAME = \"date\"\n",
"TIME_SERIES_ID_COLUMN_NAME = \"time_series_id\"\n",
"TARGET_COLUMN_NAME = \"y\"\n",
"\n",
"def get_timeseries(train_len: int,\n",
"\n",
"def get_timeseries(\n",
" train_len: int,\n",
" test_len: int,\n",
" time_column_name: str,\n",
" target_column_name: str,\n",
" time_series_id_column_name: str,\n",
" time_series_number: int = 1,\n",
" freq: str = 'H'):\n",
" freq: str = \"H\",\n",
"):\n",
" \"\"\"\n",
" Return the time series of designed length.\n",
"\n",
@@ -174,14 +177,18 @@
" data_test = [] # type: List[pd.DataFrame]\n",
" data_length = train_len + test_len\n",
" for i in range(time_series_number):\n",
" X = pd.DataFrame({\n",
" time_column_name: pd.date_range(start='2000-01-01',\n",
" periods=data_length,\n",
" freq=freq),\n",
" target_column_name: np.arange(data_length).astype(float) + np.random.rand(data_length) + i*5,\n",
" 'ext_predictor': np.asarray(range(42, 42 + data_length)),\n",
" time_series_id_column_name: np.repeat('ts{}'.format(i), data_length)\n",
" })\n",
" X = pd.DataFrame(\n",
" {\n",
" time_column_name: pd.date_range(\n",
" start=\"2000-01-01\", periods=data_length, freq=freq\n",
" ),\n",
" target_column_name: np.arange(data_length).astype(float)\n",
" + np.random.rand(data_length)\n",
" + i * 5,\n",
" \"ext_predictor\": np.asarray(range(42, 42 + data_length)),\n",
" time_series_id_column_name: np.repeat(\"ts{}\".format(i), data_length),\n",
" }\n",
" )\n",
" data_train.append(X[:train_len])\n",
" data_test.append(X[train_len:])\n",
" X_train = pd.concat(data_train)\n",
@@ -190,14 +197,17 @@
" y_test = X_test.pop(target_column_name).values\n",
" return X_train, y_train, X_test, y_test\n",
"\n",
"\n",
"n_test_periods = 6\n",
"n_train_periods = 30\n",
"X_train, y_train, X_test, y_test = get_timeseries(train_len=n_train_periods,\n",
"X_train, y_train, X_test, y_test = get_timeseries(\n",
" train_len=n_train_periods,\n",
" test_len=n_test_periods,\n",
" time_column_name=TIME_COLUMN_NAME,\n",
" target_column_name=TARGET_COLUMN_NAME,\n",
" time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAME,\n",
" time_series_number=2)"
" time_series_number=2,\n",
")"
]
},
{
@@ -224,11 +234,12 @@
"source": [
"# plot the example time series\n",
"import matplotlib.pyplot as plt\n",
"\n",
"whole_data = X_train.copy()\n",
"target_label = 'y'\n",
"target_label = \"y\"\n",
"whole_data[target_label] = y_train\n",
"for g in whole_data.groupby('time_series_id'): \n",
" plt.plot(g[1]['date'].values, g[1]['y'].values, label=g[0])\n",
"for g in whole_data.groupby(\"time_series_id\"):\n",
" plt.plot(g[1][\"date\"].values, g[1][\"y\"].values, label=g[0])\n",
"plt.legend()\n",
"plt.show()"
]
@@ -250,12 +261,12 @@
"# We need to save thw artificial data and then upload them to default workspace datastore.\n",
"DATA_PATH = \"fc_fn_data\"\n",
"DATA_PATH_X = \"{}/data_train.csv\".format(DATA_PATH)\n",
"if not os.path.isdir('data'):\n",
" os.mkdir('data')\n",
"if not os.path.isdir(\"data\"):\n",
" os.mkdir(\"data\")\n",
"pd.DataFrame(whole_data).to_csv(\"data/data_train.csv\", index=False)\n",
"# Upload saved data to the default data store.\n",
"ds = ws.get_default_datastore()\n",
"ds.upload(src_dir='./data', target_path=DATA_PATH, overwrite=True, show_progress=True)\n",
"ds.upload(src_dir=\"./data\", target_path=DATA_PATH, overwrite=True, show_progress=True)\n",
"train_data = Dataset.Tabular.from_delimited_files(path=ds.path(DATA_PATH_X))"
]
},
@@ -283,10 +294,11 @@
"# Verify that cluster does not exist already\n",
"try:\n",
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
" print('Found existing cluster, use it.')\n",
" print(\"Found existing cluster, use it.\")\n",
"except ComputeTargetException:\n",
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
" max_nodes=6)\n",
" compute_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
" )\n",
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
"\n",
"compute_target.wait_for_completion(show_output=True)"
@@ -315,14 +327,15 @@
"outputs": [],
"source": [
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
"lags = [1,2,3]\n",
"\n",
"lags = [1, 2, 3]\n",
"forecast_horizon = n_test_periods\n",
"forecasting_parameters = ForecastingParameters(\n",
" time_column_name=TIME_COLUMN_NAME,\n",
" forecast_horizon=forecast_horizon,\n",
" time_series_id_column_names=[ TIME_SERIES_ID_COLUMN_NAME ],\n",
" time_series_id_column_names=[TIME_SERIES_ID_COLUMN_NAME],\n",
" target_lags=lags,\n",
" freq='H' # Set the forecast frequency to be hourly\n",
" freq=\"H\", # Set the forecast frequency to be hourly\n",
")"
]
},
@@ -344,19 +357,21 @@
"from azureml.train.automl import AutoMLConfig\n",
"\n",
"\n",
"automl_config = AutoMLConfig(task='forecasting',\n",
" debug_log='automl_forecasting_function.log',\n",
" primary_metric='normalized_root_mean_squared_error',\n",
"automl_config = AutoMLConfig(\n",
" task=\"forecasting\",\n",
" debug_log=\"automl_forecasting_function.log\",\n",
" primary_metric=\"normalized_root_mean_squared_error\",\n",
" experiment_timeout_hours=0.25,\n",
" enable_early_stopping=True,\n",
" training_data=train_data,\n",
" compute_target=compute_target,\n",
" n_cross_validations=3,\n",
" verbosity = logging.INFO,\n",
" verbosity=logging.INFO,\n",
" max_concurrent_iterations=4,\n",
" max_cores_per_iteration=-1,\n",
" label_column_name=target_label,\n",
" forecasting_parameters=forecasting_parameters)\n",
" forecasting_parameters=forecasting_parameters,\n",
")\n",
"\n",
"remote_run = experiment.submit(automl_config, show_output=False)"
]
@@ -481,12 +496,12 @@
"metadata": {},
"outputs": [],
"source": [
"# specify which quantiles you would like \n",
"# specify which quantiles you would like\n",
"fitted_model.quantiles = [0.01, 0.5, 0.95]\n",
"# use forecast_quantiles function, not the forecast() one\n",
"y_pred_quantiles = fitted_model.forecast_quantiles(X_test)\n",
"\n",
"# quantile forecasts returned in a Dataframe along with the time and time series id columns \n",
"# quantile forecasts returned in a Dataframe along with the time and time series id columns\n",
"y_pred_quantiles"
]
},
@@ -534,14 +549,16 @@
"metadata": {},
"outputs": [],
"source": [
"# generate the same kind of test data we trained on, \n",
"# generate the same kind of test data we trained on,\n",
"# but now make the train set much longer, so that the test set will be in the future\n",
"X_context, y_context, X_away, y_away = get_timeseries(train_len=42, # train data was 30 steps long\n",
"X_context, y_context, X_away, y_away = get_timeseries(\n",
" train_len=42, # train data was 30 steps long\n",
" test_len=4,\n",
" time_column_name=TIME_COLUMN_NAME,\n",
" target_column_name=TARGET_COLUMN_NAME,\n",
" time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAME,\n",
" time_series_number=2)\n",
" time_series_number=2,\n",
")\n",
"\n",
"# end of the data we trained on\n",
"print(X_train.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].max())\n",
@@ -562,7 +579,7 @@
"metadata": {},
"outputs": [],
"source": [
"try: \n",
"try:\n",
" y_pred_away, xy_away = fitted_model.forecast(X_away)\n",
" xy_away\n",
"except Exception as e:\n",
@@ -584,7 +601,9 @@
"metadata": {},
"outputs": [],
"source": [
"def make_forecasting_query(fulldata, time_column_name, target_column_name, forecast_origin, horizon, lookback):\n",
"def make_forecasting_query(\n",
" fulldata, time_column_name, target_column_name, forecast_origin, horizon, lookback\n",
"):\n",
"\n",
" \"\"\"\n",
" This function will take the full dataset, and create the query\n",
@@ -592,24 +611,24 @@
" forward for the next `horizon` horizons. Context from previous\n",
" `lookback` periods will be included.\n",
"\n",
" \n",
"\n",
"\n",
" fulldata: pandas.DataFrame a time series dataset. Needs to contain X and y.\n",
" time_column_name: string which column (must be in fulldata) is the time axis\n",
" target_column_name: string which column (must be in fulldata) is to be forecast\n",
" forecast_origin: datetime type the last time we (pretend to) have target values \n",
" forecast_origin: datetime type the last time we (pretend to) have target values\n",
" horizon: timedelta how far forward, in time units (not periods)\n",
" lookback: timedelta how far back does the model look?\n",
" lookback: timedelta how far back does the model look\n",
"\n",
" Example:\n",
"\n",
"\n",
" ```\n",
"\n",
" forecast_origin = pd.to_datetime('2012-09-01') + pd.DateOffset(days=5) # forecast 5 days after end of training\n",
" forecast_origin = pd.to_datetime(\"2012-09-01\") + pd.DateOffset(days=5) # forecast 5 days after end of training\n",
" print(forecast_origin)\n",
"\n",
" X_query, y_query = make_forecasting_query(data, \n",
" X_query, y_query = make_forecasting_query(data,\n",
" forecast_origin = forecast_origin,\n",
" horizon = pd.DateOffset(days=7), # 7 days into the future\n",
" lookback = pd.DateOffset(days=1), # model has lag 1 period (day)\n",
@@ -618,28 +637,30 @@
" ```\n",
" \"\"\"\n",
"\n",
" X_past = fulldata[ (fulldata[ time_column_name ] > forecast_origin - lookback) &\n",
" (fulldata[ time_column_name ] <= forecast_origin)\n",
" X_past = fulldata[\n",
" (fulldata[time_column_name] > forecast_origin - lookback)\n",
" & (fulldata[time_column_name] <= forecast_origin)\n",
" ]\n",
"\n",
" X_future = fulldata[ (fulldata[ time_column_name ] > forecast_origin) &\n",
" (fulldata[ time_column_name ] <= forecast_origin + horizon)\n",
" X_future = fulldata[\n",
" (fulldata[time_column_name] > forecast_origin)\n",
" & (fulldata[time_column_name] <= forecast_origin + horizon)\n",
" ]\n",
"\n",
" y_past = X_past.pop(target_column_name).values.astype(np.float)\n",
" y_future = X_future.pop(target_column_name).values.astype(np.float)\n",
"\n",
" # Now take y_future and turn it into question marks\n",
" y_query = y_future.copy().astype(np.float) # because sometimes life hands you an int\n",
" y_query = y_future.copy().astype(\n",
" np.float\n",
" ) # because sometimes life hands you an int\n",
" y_query.fill(np.NaN)\n",
"\n",
"\n",
" print(\"X_past is \" + str(X_past.shape) + \" - shaped\")\n",
" print(\"X_future is \" + str(X_future.shape) + \" - shaped\")\n",
" print(\"y_past is \" + str(y_past.shape) + \" - shaped\")\n",
" print(\"y_query is \" + str(y_query.shape) + \" - shaped\")\n",
"\n",
"\n",
" X_pred = pd.concat([X_past, X_future])\n",
" y_pred = np.concatenate([y_past, y_query])\n",
" return X_pred, y_pred"
@@ -658,8 +679,16 @@
"metadata": {},
"outputs": [],
"source": [
"print(X_context.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].agg(['min','max','count']))\n",
"print(X_away.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].agg(['min','max','count']))\n",
"print(\n",
" X_context.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].agg(\n",
" [\"min\", \"max\", \"count\"]\n",
" )\n",
")\n",
"print(\n",
" X_away.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].agg(\n",
" [\"min\", \"max\", \"count\"]\n",
" )\n",
")\n",
"X_context.tail(5)"
]
},
@@ -669,11 +698,11 @@
"metadata": {},
"outputs": [],
"source": [
"# Since the length of the lookback is 3, \n",
"# Since the length of the lookback is 3,\n",
"# we need to add 3 periods from the context to the request\n",
"# so that the model has the data it needs\n",
"\n",
"# Put the X and y back together for a while. \n",
"# Put the X and y back together for a while.\n",
"# They like each other and it makes them happy.\n",
"X_context[TARGET_COLUMN_NAME] = y_context\n",
"X_away[TARGET_COLUMN_NAME] = y_away\n",
@@ -684,7 +713,7 @@
"# it is indeed the last point of the context\n",
"assert forecast_origin == X_context[TIME_COLUMN_NAME].max()\n",
"print(\"Forecast origin: \" + str(forecast_origin))\n",
" \n",
"\n",
"# the model uses lags and rolling windows to look back in time\n",
"n_lookback_periods = max(lags)\n",
"lookback = pd.DateOffset(hours=n_lookback_periods)\n",
@@ -692,8 +721,9 @@
"horizon = pd.DateOffset(hours=forecast_horizon)\n",
"\n",
"# now make the forecast query from context (refer to figure)\n",
"X_pred, y_pred = make_forecasting_query(fulldata, TIME_COLUMN_NAME, TARGET_COLUMN_NAME,\n",
" forecast_origin, horizon, lookback)\n",
"X_pred, y_pred = make_forecasting_query(\n",
" fulldata, TIME_COLUMN_NAME, TARGET_COLUMN_NAME, forecast_origin, horizon, lookback\n",
")\n",
"\n",
"# show the forecast request aligned\n",
"X_show = X_pred.copy()\n",
@@ -720,7 +750,7 @@
"# show the forecast aligned\n",
"X_show = xy_away.reset_index()\n",
"# without the generated features\n",
"X_show[['date', 'time_series_id', 'ext_predictor', '_automl_target_col']]\n",
"X_show[[\"date\", \"time_series_id\", \"ext_predictor\", \"_automl_target_col\"]]\n",
"# prediction is in _automl_target_col"
]
},
@@ -751,12 +781,14 @@
"source": [
"# generate the same kind of test data we trained on, but with a single time-series and test period twice as long\n",
"# as the forecast_horizon.\n",
"_, _, X_test_long, y_test_long = get_timeseries(train_len=n_train_periods,\n",
" test_len=forecast_horizon*2,\n",
"_, _, X_test_long, y_test_long = get_timeseries(\n",
" train_len=n_train_periods,\n",
" test_len=forecast_horizon * 2,\n",
" time_column_name=TIME_COLUMN_NAME,\n",
" target_column_name=TARGET_COLUMN_NAME,\n",
" time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAME,\n",
" time_series_number=1)\n",
" time_series_number=1,\n",
")\n",
"\n",
"print(X_test_long.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].min())\n",
"print(X_test_long.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].max())"
@@ -779,9 +811,11 @@
"metadata": {},
"outputs": [],
"source": [
"# What forecast() function does in this case is equivalent to iterating it twice over the test set as the following. \n",
"# What forecast() function does in this case is equivalent to iterating it twice over the test set as the following.\n",
"y_pred1, _ = fitted_model.forecast(X_test_long[:forecast_horizon])\n",
"y_pred_all, _ = fitted_model.forecast(X_test_long, np.concatenate((y_pred1, np.full(forecast_horizon, np.nan))))\n",
"y_pred_all, _ = fitted_model.forecast(\n",
" X_test_long, np.concatenate((y_pred1, np.full(forecast_horizon, np.nan)))\n",
")\n",
"np.array_equal(y_pred_all, y_pred_long)"
]
},

View File

@@ -72,14 +72,14 @@
"dstore = ws.get_default_datastore()\n",
"\n",
"output = {}\n",
"output['SDK version'] = azureml.core.VERSION\n",
"output['Subscription ID'] = ws.subscription_id\n",
"output['Workspace'] = ws.name\n",
"output['Resource Group'] = ws.resource_group\n",
"output['Location'] = ws.location\n",
"output['Default datastore name'] = dstore.name\n",
"pd.set_option('display.max_colwidth', -1)\n",
"outputDf = pd.DataFrame(data = output, index = [''])\n",
"output[\"SDK version\"] = azureml.core.VERSION\n",
"output[\"Subscription ID\"] = ws.subscription_id\n",
"output[\"Workspace\"] = ws.name\n",
"output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n",
"output[\"Default datastore name\"] = dstore.name\n",
"pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T"
]
},
@@ -102,9 +102,9 @@
"source": [
"from azureml.core import Experiment\n",
"\n",
"experiment = Experiment(ws, 'automl-hts')\n",
"experiment = Experiment(ws, \"automl-hts\")\n",
"\n",
"print('Experiment name: ' + experiment.name)"
"print(\"Experiment name: \" + experiment.name)"
]
},
{
@@ -150,35 +150,13 @@
"datastore"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1613005886349
},
"jupyter": {
"outputs_hidden": false,
"source_hidden": false
},
"nteract": {
"transient": {
"deleting": false
}
}
},
"outputs": [],
"source": [
"datastore.upload(src_dir='./Data/', target_path=datastore_path, overwrite=True, show_progress=True) "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create the TabularDatasets \n",
"\n",
"Datasets in Azure Machine Learning are references to specific data in a Datastore. The data can be retrieved as a [TabularDatasets](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py)."
"Datasets in Azure Machine Learning are references to specific data in a Datastore. The data can be retrieved as a [TabularDatasets](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py). We will read in the data as a pandas DataFrame, upload to the data store and register them to your Workspace using ```register_pandas_dataframe``` so they can be called as an input into the training pipeline. We will use the inference dataset as part of the forecasting pipeline. The step need only be completed once."
]
},
{
@@ -191,27 +169,18 @@
},
"outputs": [],
"source": [
"from azureml.core.dataset import Dataset\n",
"train_ds = Dataset.Tabular.from_delimited_files(path=datastore.path(\"hts-sample/hts-sample-train.csv\"), validate=False) \n",
"inference_ds = Dataset.Tabular.from_delimited_files(path=datastore.path(\"hts-sample/hts-sample-test.csv\"), validate=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Register the TabularDatasets to the Workspace \n",
"Finally, register the dataset to your Workspace so it can be called as an input into the training pipeline in the next notebook. We will use the inference dataset as part of the forecasting pipeline. The step need only be completed once."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"registered_train = train_ds.register(ws, \"hts-sales-train\")\n",
"registered_inference = inference_ds.register(ws, \"hts-sales-test\")"
"from azureml.data.dataset_factory import TabularDatasetFactory\n",
"\n",
"registered_train = TabularDatasetFactory.register_pandas_dataframe(\n",
" pd.read_csv(\"Data/hts-sample-train.csv\"),\n",
" target=(datastore, \"hts-sample\"),\n",
" name=\"hts-sales-train\",\n",
")\n",
"registered_inference = TabularDatasetFactory.register_pandas_dataframe(\n",
" pd.read_csv(\"Data/hts-sample-test.csv\"),\n",
" target=(datastore, \"hts-sample\"),\n",
" name=\"hts-sales-test\",\n",
")"
]
},
{
@@ -256,19 +225,20 @@
"if compute_name in ws.compute_targets:\n",
" compute_target = ws.compute_targets[compute_name]\n",
" if compute_target and type(compute_target) is AmlCompute:\n",
" print('Found compute target: ' + compute_name)\n",
" print(\"Found compute target: \" + compute_name)\n",
"else:\n",
" print('Creating a new compute target...')\n",
" provisioning_config = AmlCompute.provisioning_configuration(vm_size= \"STANDARD_D16S_V3\",\n",
" max_nodes=20)\n",
" print(\"Creating a new compute target...\")\n",
" provisioning_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"STANDARD_D16S_V3\", max_nodes=20\n",
" )\n",
" # Create the compute target\n",
" compute_target = ComputeTarget.create(\n",
" ws, compute_name, provisioning_config)\n",
" compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)\n",
"\n",
" # Can poll for a minimum number of nodes and for a specific timeout.\n",
" # If no min node count is provided it will use the scale settings for the cluster\n",
" compute_target.wait_for_completion(\n",
" show_output=True, min_node_count=None, timeout_in_minutes=20)\n",
" show_output=True, min_node_count=None, timeout_in_minutes=20\n",
" )\n",
"\n",
" # For a more detailed view of current cluster status, use the 'status' property\n",
" print(compute_target.status.serialize())"
@@ -330,8 +300,8 @@
"\n",
"\n",
"automl_settings = {\n",
" \"task\" : \"forecasting\",\n",
" \"primary_metric\" : \"normalized_root_mean_squared_error\",\n",
" \"task\": \"forecasting\",\n",
" \"primary_metric\": \"normalized_root_mean_squared_error\",\n",
" \"label_column_name\": label_column_name,\n",
" \"time_column_name\": time_column_name,\n",
" \"forecast_horizon\": forecast_horizon,\n",
@@ -341,17 +311,17 @@
" \"pipeline_fetch_max_batch_size\": 15,\n",
" \"model_explainability\": model_explainability,\n",
" # The following settings are specific to this sample and should be adjusted according to your own needs.\n",
" \"iteration_timeout_minutes\" : 10,\n",
" \"iterations\" : 10,\n",
" \"n_cross_validations\": 2\n",
" \"iteration_timeout_minutes\": 10,\n",
" \"iterations\": 10,\n",
" \"n_cross_validations\": 2,\n",
"}\n",
"\n",
"hts_parameters = HTSTrainParameters(\n",
" automl_settings=automl_settings,\n",
" hierarchy_column_names=hierarchy,\n",
" training_level=training_level,\n",
" enable_engineered_explanations=engineered_explanations\n",
")\n"
" enable_engineered_explanations=engineered_explanations,\n",
")"
]
},
{
@@ -457,7 +427,9 @@
" expl_output = training_run.get_pipeline_output(\"explanations\")\n",
" expl_output.download(\"training_explanations\")\n",
"else:\n",
" print(\"Model explanations are available only if model_explainability is set to True.\")"
" print(\n",
" \"Model explanations are available only if model_explainability is set to True.\"\n",
" )"
]
},
{
@@ -476,17 +448,28 @@
"import os\n",
"\n",
"if model_explainability:\n",
" explanations_dirrectory = os.listdir(os.path.join('training_explanations', 'azureml'))\n",
" explanations_dirrectory = os.listdir(\n",
" os.path.join(\"training_explanations\", \"azureml\")\n",
" )\n",
" if len(explanations_dirrectory) > 1:\n",
" print(\"Warning! The directory contains multiple explanations, only the first one will be displayed.\")\n",
" print('The explanations are located at {}.'.format(explanations_dirrectory[0]))\n",
" print(\n",
" \"Warning! The directory contains multiple explanations, only the first one will be displayed.\"\n",
" )\n",
" print(\"The explanations are located at {}.\".format(explanations_dirrectory[0]))\n",
" # Now we will list all the explanations.\n",
" explanation_path = os.path.join('training_explanations', 'azureml', explanations_dirrectory[0], 'training_explanations')\n",
" explanation_path = os.path.join(\n",
" \"training_explanations\",\n",
" \"azureml\",\n",
" explanations_dirrectory[0],\n",
" \"training_explanations\",\n",
" )\n",
" print(\"Available explanations\")\n",
" print(\"==============================\")\n",
" print(\"\\n\".join(os.listdir(explanation_path)))\n",
"else:\n",
" print(\"Model explanations are available only if model_explainability is set to True.\")"
" print(\n",
" \"Model explanations are available only if model_explainability is set to True.\"\n",
" )"
]
},
{
@@ -504,11 +487,17 @@
"source": [
"from IPython.display import display\n",
"\n",
"explanation_type = 'raw'\n",
"level = 'state'\n",
"explanation_type = \"raw\"\n",
"level = \"state\"\n",
"\n",
"if model_explainability:\n",
" display(pd.read_csv(os.path.join(explanation_path, \"{}_explanations_{}.csv\").format(explanation_type, level)))"
" display(\n",
" pd.read_csv(\n",
" os.path.join(explanation_path, \"{}_explanations_{}.csv\").format(\n",
" explanation_type, level\n",
" )\n",
" )\n",
" )"
]
},
{
@@ -542,7 +531,7 @@
"\n",
"inference_parameters = HTSInferenceParameters(\n",
" hierarchy_forecast_level=\"store_id\", # The setting is specific to this dataset and should be changed based on your dataset.\n",
" allocation_method=\"proportions_of_historical_average\"\n",
" allocation_method=\"proportions_of_historical_average\",\n",
")\n",
"\n",
"steps = AutoMLPipelineBuilder.get_many_models_batch_inference_steps(\n",
@@ -551,7 +540,7 @@
" compute_target=compute_target,\n",
" inference_pipeline_parameters=inference_parameters,\n",
" node_count=2,\n",
" process_count_per_node=8\n",
" process_count_per_node=8,\n",
")"
]
},
@@ -610,7 +599,9 @@
"metadata": {},
"outputs": [],
"source": [
"inference_run = experiment.submit(inference_pipeline, pipeline_parameters={\"hierarchy_forecast_level\": \"state\"})\n",
"inference_run = experiment.submit(\n",
" inference_pipeline, pipeline_parameters={\"hierarchy_forecast_level\": \"state\"}\n",
")\n",
"inference_run.wait_for_completion(show_output=False)"
]
}

View File

@@ -30,7 +30,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"For this notebook we are using a synthetic dataset portraying sales data to predict the the quantity of a vartiety of product skus across several states, stores, and product categories.\n",
"For this notebook we are using a synthetic dataset portraying sales data to predict the quantity of a vartiety of product SKUs across several states, stores, and product categories.\n",
"\n",
"**NOTE: There are limits on how many runs we can do in parallel per workspace, and we currently recommend to set the parallelism to maximum of 320 runs per experiment per workspace. If users want to have more parallelism and increase this limit they might encounter Too Many Requests errors (HTTP 429).**"
]
@@ -72,14 +72,14 @@
"dstore = ws.get_default_datastore()\n",
"\n",
"output = {}\n",
"output['SDK version'] = azureml.core.VERSION\n",
"output['Subscription ID'] = ws.subscription_id\n",
"output['Workspace'] = ws.name\n",
"output['Resource Group'] = ws.resource_group\n",
"output['Location'] = ws.location\n",
"output['Default datastore name'] = dstore.name\n",
"pd.set_option('display.max_colwidth', -1)\n",
"outputDf = pd.DataFrame(data = output, index = [''])\n",
"output[\"SDK version\"] = azureml.core.VERSION\n",
"output[\"Subscription ID\"] = ws.subscription_id\n",
"output[\"Workspace\"] = ws.name\n",
"output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n",
"output[\"Default datastore name\"] = dstore.name\n",
"pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T"
]
},
@@ -102,9 +102,9 @@
"source": [
"from azureml.core import Experiment\n",
"\n",
"experiment = Experiment(ws, 'automl-many-models')\n",
"experiment = Experiment(ws, \"automl-many-models\")\n",
"\n",
"print('Experiment name: ' + experiment.name)"
"print(\"Experiment name: \" + experiment.name)"
]
},
{
@@ -200,11 +200,13 @@
"container_name = \"automl-sample-notebook-data\"\n",
"account_name = \"automlsamplenotebookdata\"\n",
"\n",
"oj_datastore = Datastore.register_azure_blob_container(workspace=ws, \n",
" datastore_name=blob_datastore_name, \n",
"oj_datastore = Datastore.register_azure_blob_container(\n",
" workspace=ws,\n",
" datastore_name=blob_datastore_name,\n",
" container_name=container_name,\n",
" account_name=account_name,\n",
" create_if_not_exists=True) "
" create_if_not_exists=True,\n",
")"
]
},
{
@@ -228,11 +230,15 @@
"source": [
"from azureml.core import Dataset\n",
"\n",
"ds_name_small = 'oj-data-small-tabular'\n",
"input_ds_small = Dataset.Tabular.from_delimited_files(path=oj_datastore.path(ds_name_small + '/'), validate=False)\n",
"ds_name_small = \"oj-data-small-tabular\"\n",
"input_ds_small = Dataset.Tabular.from_delimited_files(\n",
" path=oj_datastore.path(ds_name_small + \"/\"), validate=False\n",
")\n",
"\n",
"inference_name_small = 'oj-inference-small-tabular'\n",
"inference_ds_small = Dataset.Tabular.from_delimited_files(path=oj_datastore.path(inference_name_small + '/'), validate=False)"
"inference_name_small = \"oj-inference-small-tabular\"\n",
"inference_ds_small = Dataset.Tabular.from_delimited_files(\n",
" path=oj_datastore.path(inference_name_small + \"/\"), validate=False\n",
")"
]
},
{
@@ -277,19 +283,20 @@
"if compute_name in ws.compute_targets:\n",
" compute_target = ws.compute_targets[compute_name]\n",
" if compute_target and type(compute_target) is AmlCompute:\n",
" print('Found compute target: ' + compute_name)\n",
" print(\"Found compute target: \" + compute_name)\n",
"else:\n",
" print('Creating a new compute target...')\n",
" provisioning_config = AmlCompute.provisioning_configuration(vm_size= \"STANDARD_D16S_V3\",\n",
" max_nodes=20)\n",
" print(\"Creating a new compute target...\")\n",
" provisioning_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"STANDARD_D16S_V3\", max_nodes=20\n",
" )\n",
" # Create the compute target\n",
" compute_target = ComputeTarget.create(\n",
" ws, compute_name, provisioning_config)\n",
" compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)\n",
"\n",
" # Can poll for a minimum number of nodes and for a specific timeout.\n",
" # If no min node count is provided it will use the scale settings for the cluster\n",
" compute_target.wait_for_completion(\n",
" show_output=True, min_node_count=None, timeout_in_minutes=20)\n",
" show_output=True, min_node_count=None, timeout_in_minutes=20\n",
" )\n",
"\n",
" # For a more detailed view of current cluster status, use the 'status' property\n",
" print(compute_target.status.serialize())"
@@ -301,7 +308,7 @@
"source": [
"### Set up training parameters\n",
"\n",
"This dictionary defines the AutoML and many models settings. For this forecasting task we need to define several settings inncluding the name of the time column, the maximum forecast horizon, and the partition column name definition.\n",
"This dictionary defines the AutoML and many models settings. For this forecasting task we need to define several settings including the name of the time column, the maximum forecast horizon, and the partition column name definition.\n",
"\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
@@ -333,25 +340,29 @@
},
"outputs": [],
"source": [
"from azureml.train.automl.runtime._many_models.many_models_parameters import ManyModelsTrainParameters\n",
"from azureml.train.automl.runtime._many_models.many_models_parameters import (\n",
" ManyModelsTrainParameters,\n",
")\n",
"\n",
"partition_column_names = ['Store', 'Brand']\n",
"partition_column_names = [\"Store\", \"Brand\"]\n",
"automl_settings = {\n",
" \"task\" : 'forecasting',\n",
" \"primary_metric\" : 'normalized_root_mean_squared_error',\n",
" \"iteration_timeout_minutes\" : 10, # This needs to be changed based on the dataset. We ask customer to explore how long training is taking before settings this value\n",
" \"iterations\" : 15,\n",
" \"experiment_timeout_hours\" : 0.25,\n",
" \"label_column_name\" : 'Quantity',\n",
" \"n_cross_validations\" : 3,\n",
" \"time_column_name\": 'WeekStarting',\n",
" \"drop_column_names\": 'Revenue',\n",
" \"max_horizon\" : 6,\n",
" \"task\": \"forecasting\",\n",
" \"primary_metric\": \"normalized_root_mean_squared_error\",\n",
" \"iteration_timeout_minutes\": 10, # This needs to be changed based on the dataset. We ask customer to explore how long training is taking before settings this value\n",
" \"iterations\": 15,\n",
" \"experiment_timeout_hours\": 0.25,\n",
" \"label_column_name\": \"Quantity\",\n",
" \"n_cross_validations\": 3,\n",
" \"time_column_name\": \"WeekStarting\",\n",
" \"drop_column_names\": \"Revenue\",\n",
" \"max_horizon\": 6,\n",
" \"grain_column_names\": partition_column_names,\n",
" \"track_child_runs\": False,\n",
"}\n",
"\n",
"mm_paramters = ManyModelsTrainParameters(automl_settings=automl_settings, partition_column_names=partition_column_names)"
"mm_paramters = ManyModelsTrainParameters(\n",
" automl_settings=automl_settings, partition_column_names=partition_column_names\n",
")"
]
},
{
@@ -485,14 +496,14 @@
"outputs": [],
"source": [
"# from azureml.pipeline.core import Schedule, ScheduleRecurrence\n",
" \n",
"\n",
"# training_pipeline_id = published_pipeline.id\n",
"\n",
"# recurrence = ScheduleRecurrence(frequency=\"Month\", interval=1, start_time=\"2020-01-01T09:00:00\")\n",
"# recurring_schedule = Schedule.create(ws, name=\"automl_training_recurring_schedule\", \n",
"# recurring_schedule = Schedule.create(ws, name=\"automl_training_recurring_schedule\",\n",
"# description=\"Schedule Training Pipeline to run on the first day of every month\",\n",
"# pipeline_id=training_pipeline_id, \n",
"# experiment_name=experiment.name, \n",
"# pipeline_id=training_pipeline_id,\n",
"# experiment_name=experiment.name,\n",
"# recurrence=recurrence)"
]
},
@@ -518,7 +529,10 @@
"outputs": [],
"source": [
"from azureml.data import OutputFileDatasetConfig\n",
"output_inference_data_ds = OutputFileDatasetConfig(name='many_models_inference_output', destination=(dstore, 'oj/inference_data/')).register_on_complete(name='oj_inference_data_ds')"
"\n",
"output_inference_data_ds = OutputFileDatasetConfig(\n",
" name=\"many_models_inference_output\", destination=(dstore, \"oj/inference_data/\")\n",
").register_on_complete(name=\"oj_inference_data_ds\")"
]
},
{
@@ -540,12 +554,12 @@
"| :--------------- | :------------------- |\n",
"| **experiment** | The experiment used for inference run. |\n",
"| **inference_data** | The data to use for inferencing. It should be the same schema as used for training.\n",
"| **compute_target** The compute target that runs the inference pipeline.|\n",
"| **compute_target** | The compute target that runs the inference pipeline.|\n",
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku). |\n",
"| **process_count_per_node** The number of processes per node.\n",
"| **train_run_id** | \\[Optional] The run id of the hierarchy training, by default it is the latest successful training many model run in the experiment. |\n",
"| **train_experiment_name** | \\[Optional] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline. |\n",
"| **process_count_per_node** | \\[Optional] The number of processes per node, by default it's 4. |"
"| **process_count_per_node** | The number of processes per node.\n",
"| **train_run_id** | \\[Optional\\] The run id of the hierarchy training, by default it is the latest successful training many model run in the experiment. |\n",
"| **train_experiment_name** | \\[Optional\\] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline. |\n",
"| **process_count_per_node** | \\[Optional\\] The number of processes per node, by default it's 4. |"
]
},
{
@@ -555,12 +569,14 @@
"outputs": [],
"source": [
"from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder\n",
"from azureml.train.automl.runtime._many_models.many_models_parameters import ManyModelsInferenceParameters\n",
"from azureml.train.automl.runtime._many_models.many_models_parameters import (\n",
" ManyModelsInferenceParameters,\n",
")\n",
"\n",
"mm_parameters = ManyModelsInferenceParameters(\n",
" partition_column_names=['Store', 'Brand'],\n",
" partition_column_names=[\"Store\", \"Brand\"],\n",
" time_column_name=\"WeekStarting\",\n",
" target_column_name=\"Quantity\"\n",
" target_column_name=\"Quantity\",\n",
")\n",
"\n",
"inference_steps = AutoMLPipelineBuilder.get_many_models_batch_inference_steps(\n",
@@ -622,10 +638,23 @@
"\n",
"forecasting_results_name = \"forecasting_results\"\n",
"forecasting_output_name = \"many_models_inference_output\"\n",
"forecast_file = get_output_from_mm_pipeline(inference_run, forecasting_results_name, forecasting_output_name)\n",
"forecast_file = get_output_from_mm_pipeline(\n",
" inference_run, forecasting_results_name, forecasting_output_name\n",
")\n",
"df = pd.read_csv(forecast_file, delimiter=\" \", header=None)\n",
"df.columns = [\"Week Starting\", \"Store\", \"Brand\", \"Quantity\", \"Advert\", \"Price\" , \"Revenue\", \"Predicted\" ]\n",
"print(\"Prediction has \", df.shape[0], \" rows. Here the first 10 rows are being displayed.\")\n",
"df.columns = [\n",
" \"Week Starting\",\n",
" \"Store\",\n",
" \"Brand\",\n",
" \"Quantity\",\n",
" \"Advert\",\n",
" \"Price\",\n",
" \"Revenue\",\n",
" \"Predicted\",\n",
"]\n",
"print(\n",
" \"Prediction has \", df.shape[0], \" rows. Here the first 10 rows are being displayed.\"\n",
")\n",
"df.head(10)"
]
},
@@ -672,14 +701,14 @@
"outputs": [],
"source": [
"# from azureml.pipeline.core import Schedule, ScheduleRecurrence\n",
" \n",
"\n",
"# forecasting_pipeline_id = published_pipeline.id\n",
"\n",
"# recurrence = ScheduleRecurrence(frequency=\"Month\", interval=1, start_time=\"2020-01-01T09:00:00\")\n",
"# recurring_schedule = Schedule.create(ws, name=\"automl_forecasting_recurring_schedule\", \n",
"# recurring_schedule = Schedule.create(ws, name=\"automl_forecasting_recurring_schedule\",\n",
"# description=\"Schedule Forecasting Pipeline to run on the first day of every week\",\n",
"# pipeline_id=forecasting_pipeline_id, \n",
"# experiment_name=experiment.name, \n",
"# pipeline_id=forecasting_pipeline_id,\n",
"# experiment_name=experiment.name,\n",
"# recurrence=recurrence)"
]
}

View File

@@ -81,7 +81,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
@@ -101,19 +101,19 @@
"ws = Workspace.from_config()\n",
"\n",
"# choose a name for the run history container in the workspace\n",
"experiment_name = 'automl-ojforecasting'\n",
"experiment_name = \"automl-ojforecasting\"\n",
"\n",
"experiment = Experiment(ws, experiment_name)\n",
"\n",
"output = {}\n",
"output['Subscription ID'] = ws.subscription_id\n",
"output['Workspace'] = ws.name\n",
"output['SKU'] = ws.sku\n",
"output['Resource Group'] = ws.resource_group\n",
"output['Location'] = ws.location\n",
"output['Run History Name'] = experiment_name\n",
"pd.set_option('display.max_colwidth', -1)\n",
"outputDf = pd.DataFrame(data = output, index = [''])\n",
"output[\"Subscription ID\"] = ws.subscription_id\n",
"output[\"Workspace\"] = ws.name\n",
"output[\"SKU\"] = ws.sku\n",
"output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n",
"output[\"Run History Name\"] = experiment_name\n",
"pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T"
]
},
@@ -146,10 +146,11 @@
"# Verify that cluster does not exist already\n",
"try:\n",
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
" print('Found existing cluster, use it.')\n",
" print(\"Found existing cluster, use it.\")\n",
"except ComputeTargetException:\n",
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D12_V2',\n",
" max_nodes=6)\n",
" compute_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"STANDARD_D12_V2\", max_nodes=6\n",
" )\n",
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
"\n",
"compute_target.wait_for_completion(show_output=True)"
@@ -169,11 +170,11 @@
"metadata": {},
"outputs": [],
"source": [
"time_column_name = 'WeekStarting'\n",
"time_column_name = \"WeekStarting\"\n",
"data = pd.read_csv(\"dominicks_OJ.csv\", parse_dates=[time_column_name])\n",
"\n",
"# Drop the columns 'logQuantity' as it is a leaky feature.\n",
"data.drop('logQuantity', axis=1, inplace=True)\n",
"data.drop(\"logQuantity\", axis=1, inplace=True)\n",
"\n",
"data.head()"
]
@@ -193,9 +194,9 @@
"metadata": {},
"outputs": [],
"source": [
"time_series_id_column_names = ['Store', 'Brand']\n",
"time_series_id_column_names = [\"Store\", \"Brand\"]\n",
"nseries = data.groupby(time_series_id_column_names).ngroups\n",
"print('Data contains {0} individual time-series.'.format(nseries))"
"print(\"Data contains {0} individual time-series.\".format(nseries))"
]
},
{
@@ -214,7 +215,7 @@
"use_stores = [2, 5, 8]\n",
"data_subset = data[data.Store.isin(use_stores)]\n",
"nseries = data_subset.groupby(time_series_id_column_names).ngroups\n",
"print('Data subset contains {0} individual time-series.'.format(nseries))"
"print(\"Data subset contains {0} individual time-series.\".format(nseries))"
]
},
{
@@ -233,14 +234,17 @@
"source": [
"n_test_periods = 20\n",
"\n",
"\n",
"def split_last_n_by_series_id(df, n):\n",
" \"\"\"Group df by series identifiers and split on last n rows for each group.\"\"\"\n",
" df_grouped = (df.sort_values(time_column_name) # Sort by ascending time\n",
" .groupby(time_series_id_column_names, group_keys=False))\n",
" df_grouped = df.sort_values(time_column_name).groupby( # Sort by ascending time\n",
" time_series_id_column_names, group_keys=False\n",
" )\n",
" df_head = df_grouped.apply(lambda dfg: dfg.iloc[:-n])\n",
" df_tail = df_grouped.apply(lambda dfg: dfg.iloc[-n:])\n",
" return df_head, df_tail\n",
"\n",
"\n",
"train, test = split_last_n_by_series_id(data_subset, n_test_periods)"
]
},
@@ -258,18 +262,15 @@
"metadata": {},
"outputs": [],
"source": [
"train.to_csv (r'./dominicks_OJ_train.csv', index = None, header=True)\n",
"test.to_csv (r'./dominicks_OJ_test.csv', index = None, header=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.data.dataset_factory import TabularDatasetFactory\n",
"\n",
"datastore = ws.get_default_datastore()\n",
"datastore.upload_files(files = ['./dominicks_OJ_train.csv', './dominicks_OJ_test.csv'], target_path = 'dataset/', overwrite = True,show_progress = True)"
"train_dataset = TabularDatasetFactory.register_pandas_dataframe(\n",
" train, target=(datastore, \"dataset/\"), name=\"dominicks_OJ_train\"\n",
")\n",
"test_dataset = TabularDatasetFactory.register_pandas_dataframe(\n",
" test, target=(datastore, \"dataset/\"), name=\"dominicks_OJ_test\"\n",
")"
]
},
{
@@ -279,17 +280,6 @@
"### Create dataset for training"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.dataset import Dataset\n",
"train_dataset = Dataset.Tabular.from_delimited_files(path=datastore.path('dataset/dominicks_OJ_train.csv'))\n",
"test_dataset = Dataset.Tabular.from_delimited_files(path=datastore.path('dataset/dominicks_OJ_test.csv'))"
]
},
{
"cell_type": "code",
"execution_count": null,
@@ -323,7 +313,7 @@
"metadata": {},
"outputs": [],
"source": [
"target_column_name = 'Quantity'"
"target_column_name = \"Quantity\""
]
},
{
@@ -351,13 +341,17 @@
"source": [
"featurization_config = FeaturizationConfig()\n",
"# Force the CPWVOL5 feature to be numeric type.\n",
"featurization_config.add_column_purpose('CPWVOL5', 'Numeric')\n",
"featurization_config.add_column_purpose(\"CPWVOL5\", \"Numeric\")\n",
"# Fill missing values in the target column, Quantity, with zeros.\n",
"featurization_config.add_transformer_params('Imputer', ['Quantity'], {\"strategy\": \"constant\", \"fill_value\": 0})\n",
"featurization_config.add_transformer_params(\n",
" \"Imputer\", [\"Quantity\"], {\"strategy\": \"constant\", \"fill_value\": 0}\n",
")\n",
"# Fill missing values in the INCOME column with median value.\n",
"featurization_config.add_transformer_params('Imputer', ['INCOME'], {\"strategy\": \"median\"})\n",
"featurization_config.add_transformer_params(\n",
" \"Imputer\", [\"INCOME\"], {\"strategy\": \"median\"}\n",
")\n",
"# Fill missing values in the Price column with forward fill (last value carried forward).\n",
"featurization_config.add_transformer_params('Imputer', ['Price'], {\"strategy\": \"ffill\"})"
"featurization_config.add_transformer_params(\"Imputer\", [\"Price\"], {\"strategy\": \"ffill\"})"
]
},
{
@@ -423,16 +417,18 @@
"outputs": [],
"source": [
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
"\n",
"forecasting_parameters = ForecastingParameters(\n",
" time_column_name=time_column_name,\n",
" forecast_horizon=n_test_periods,\n",
" time_series_id_column_names=time_series_id_column_names,\n",
" freq='W-THU' # Set the forecast frequency to be weekly (start on each Thursday)\n",
" freq=\"W-THU\", # Set the forecast frequency to be weekly (start on each Thursday)\n",
")\n",
"\n",
"automl_config = AutoMLConfig(task='forecasting',\n",
" debug_log='automl_oj_sales_errors.log',\n",
" primary_metric='normalized_mean_absolute_error',\n",
"automl_config = AutoMLConfig(\n",
" task=\"forecasting\",\n",
" debug_log=\"automl_oj_sales_errors.log\",\n",
" primary_metric=\"normalized_mean_absolute_error\",\n",
" experiment_timeout_hours=0.25,\n",
" training_data=train_dataset,\n",
" label_column_name=target_column_name,\n",
@@ -442,7 +438,8 @@
" n_cross_validations=3,\n",
" verbosity=logging.INFO,\n",
" max_cores_per_iteration=-1,\n",
" forecasting_parameters=forecasting_parameters)"
" forecasting_parameters=forecasting_parameters,\n",
")"
]
},
{
@@ -487,7 +484,7 @@
"source": [
"best_run, fitted_model = remote_run.get_output()\n",
"print(fitted_model.steps)\n",
"model_name = best_run.properties['model_name']"
"model_name = best_run.properties[\"model_name\"]"
]
},
{
@@ -505,7 +502,7 @@
"metadata": {},
"outputs": [],
"source": [
"custom_featurizer = fitted_model.named_steps['timeseriestransformer']"
"custom_featurizer = fitted_model.named_steps[\"timeseriestransformer\"]"
]
},
{
@@ -559,15 +556,18 @@
"outputs": [],
"source": [
"from run_forecast import run_remote_inference\n",
"remote_run_infer = run_remote_inference(test_experiment=test_experiment, \n",
"\n",
"remote_run_infer = run_remote_inference(\n",
" test_experiment=test_experiment,\n",
" compute_target=compute_target,\n",
" train_run=best_run,\n",
" test_dataset=test_dataset,\n",
" target_column_name=target_column_name)\n",
" target_column_name=target_column_name,\n",
")\n",
"remote_run_infer.wait_for_completion(show_output=False)\n",
"\n",
"# download the forecast file to the local machine\n",
"remote_run_infer.download_file('outputs/predictions.csv', 'predictions.csv')"
"remote_run_infer.download_file(\"outputs/predictions.csv\", \"predictions.csv\")"
]
},
{
@@ -588,7 +588,7 @@
"outputs": [],
"source": [
"# load forecast data frame\n",
"fcst_df = pd.read_csv('predictions.csv', parse_dates=[time_column_name])\n",
"fcst_df = pd.read_csv(\"predictions.csv\", parse_dates=[time_column_name])\n",
"fcst_df.head()"
]
},
@@ -605,18 +605,23 @@
"# use automl scoring module\n",
"scores = scoring.score_regression(\n",
" y_test=fcst_df[target_column_name],\n",
" y_pred=fcst_df['predicted'],\n",
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET))\n",
" y_pred=fcst_df[\"predicted\"],\n",
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET),\n",
")\n",
"\n",
"print(\"[Test data scores]\\n\")\n",
"for key, value in scores.items(): \n",
" print('{}: {:.3f}'.format(key, value))\n",
" \n",
"for key, value in scores.items():\n",
" print(\"{}: {:.3f}\".format(key, value))\n",
"\n",
"# Plot outputs\n",
"%matplotlib inline\n",
"test_pred = plt.scatter(fcst_df[target_column_name], fcst_df['predicted'], color='b')\n",
"test_test = plt.scatter(fcst_df[target_column_name], fcst_df[target_column_name], color='g')\n",
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n",
"test_pred = plt.scatter(fcst_df[target_column_name], fcst_df[\"predicted\"], color=\"b\")\n",
"test_test = plt.scatter(\n",
" fcst_df[target_column_name], fcst_df[target_column_name], color=\"g\"\n",
")\n",
"plt.legend(\n",
" (test_pred, test_test), (\"prediction\", \"truth\"), loc=\"upper left\", fontsize=8\n",
")\n",
"plt.show()"
]
},
@@ -640,9 +645,11 @@
"metadata": {},
"outputs": [],
"source": [
"description = 'AutoML OJ forecaster'\n",
"description = \"AutoML OJ forecaster\"\n",
"tags = None\n",
"model = remote_run.register_model(model_name = model_name, description = description, tags = tags)\n",
"model = remote_run.register_model(\n",
" model_name=model_name, description=description, tags=tags\n",
")\n",
"\n",
"print(remote_run.model_id)"
]
@@ -662,8 +669,8 @@
"metadata": {},
"outputs": [],
"source": [
"script_file_name = 'score_fcast.py'\n",
"best_run.download_file('outputs/scoring_file_v_1_0_0.py', script_file_name)"
"script_file_name = \"score_fcast.py\"\n",
"best_run.download_file(\"outputs/scoring_file_v_1_0_0.py\", script_file_name)"
]
},
{
@@ -684,15 +691,18 @@
"from azureml.core.webservice import Webservice\n",
"from azureml.core.model import Model\n",
"\n",
"inference_config = InferenceConfig(environment = best_run.get_environment(), \n",
" entry_script = script_file_name)\n",
"inference_config = InferenceConfig(\n",
" environment=best_run.get_environment(), entry_script=script_file_name\n",
")\n",
"\n",
"aciconfig = AciWebservice.deploy_configuration(cpu_cores = 2, \n",
" memory_gb = 4, \n",
" tags = {'type': \"automl-forecasting\"},\n",
" description = \"Automl forecasting sample service\")\n",
"aciconfig = AciWebservice.deploy_configuration(\n",
" cpu_cores=2,\n",
" memory_gb=4,\n",
" tags={\"type\": \"automl-forecasting\"},\n",
" description=\"Automl forecasting sample service\",\n",
")\n",
"\n",
"aci_service_name = 'automl-oj-forecast-01'\n",
"aci_service_name = \"automl-oj-forecast-01\"\n",
"print(aci_service_name)\n",
"aci_service = Model.deploy(ws, aci_service_name, [model], inference_config, aciconfig)\n",
"aci_service.wait_for_deployment(True)\n",
@@ -722,22 +732,27 @@
"outputs": [],
"source": [
"import json\n",
"\n",
"X_query = test.copy()\n",
"X_query.pop(target_column_name)\n",
"# We have to convert datetime to string, because Timestamps cannot be serialized to JSON.\n",
"X_query[time_column_name] = X_query[time_column_name].astype(str)\n",
"# The Service object accept the complex dictionary, which is internally converted to JSON string.\n",
"# The section 'data' contains the data frame in the form of dictionary.\n",
"sample_quantiles=[0.025,0.975]\n",
"test_sample = json.dumps({'data': X_query.to_dict(orient='records'), 'quantiles': sample_quantiles})\n",
"response = aci_service.run(input_data = test_sample)\n",
"sample_quantiles = [0.025, 0.975]\n",
"test_sample = json.dumps(\n",
" {\"data\": X_query.to_dict(orient=\"records\"), \"quantiles\": sample_quantiles}\n",
")\n",
"response = aci_service.run(input_data=test_sample)\n",
"# translate from networkese to datascientese\n",
"try: \n",
"try:\n",
" res_dict = json.loads(response)\n",
" y_fcst_all = pd.DataFrame(res_dict['index'])\n",
" y_fcst_all[time_column_name] = pd.to_datetime(y_fcst_all[time_column_name], unit = 'ms')\n",
" y_fcst_all['forecast'] = res_dict['forecast']\n",
" y_fcst_all['prediction_interval'] = res_dict['prediction_interval']\n",
" y_fcst_all = pd.DataFrame(res_dict[\"index\"])\n",
" y_fcst_all[time_column_name] = pd.to_datetime(\n",
" y_fcst_all[time_column_name], unit=\"ms\"\n",
" )\n",
" y_fcst_all[\"forecast\"] = res_dict[\"forecast\"]\n",
" y_fcst_all[\"prediction_interval\"] = res_dict[\"prediction_interval\"]\n",
"except:\n",
" print(res_dict)"
]
@@ -764,7 +779,7 @@
"metadata": {},
"outputs": [],
"source": [
"serv = Webservice(ws, 'automl-oj-forecast-01')\n",
"serv = Webservice(ws, \"automl-oj-forecast-01\")\n",
"serv.delete() # don't do it accidentally"
]
}

View File

@@ -11,11 +11,14 @@ from pandas.tseries.frequencies import to_offset
parser = argparse.ArgumentParser()
parser.add_argument(
'--target_column_name', type=str, dest='target_column_name',
help='Target Column Name')
"--target_column_name",
type=str,
dest="target_column_name",
help="Target Column Name",
)
parser.add_argument(
'--test_dataset', type=str, dest='test_dataset',
help='Test Dataset')
"--test_dataset", type=str, dest="test_dataset", help="Test Dataset"
)
args = parser.parse_args()
target_column_name = args.target_column_name
@@ -31,25 +34,27 @@ X_test = test_dataset.to_pandas_dataframe().reset_index(drop=True)
y_test = X_test.pop(target_column_name).values
# generate forecast
fitted_model = joblib.load('model.pkl')
fitted_model = joblib.load("model.pkl")
# We have default quantiles values set as below(95th percentile)
quantiles = [0.025, 0.5, 0.975]
predicted_column_name = 'predicted'
PI = 'prediction_interval'
predicted_column_name = "predicted"
PI = "prediction_interval"
fitted_model.quantiles = quantiles
pred_quantiles = fitted_model.forecast_quantiles(X_test)
pred_quantiles[PI] = pred_quantiles[[min(quantiles), max(quantiles)]].apply(lambda x: '[{}, {}]'.format(x[0],
x[1]), axis=1)
pred_quantiles[PI] = pred_quantiles[[min(quantiles), max(quantiles)]].apply(
lambda x: "[{}, {}]".format(x[0], x[1]), axis=1
)
X_test[target_column_name] = y_test
X_test[PI] = pred_quantiles[PI]
X_test[predicted_column_name] = pred_quantiles[0.5]
# drop rows where prediction or actuals are nan
# happens because of missing actuals
# or at edges of time due to lags/rolling windows
clean = X_test[X_test[[target_column_name,
predicted_column_name]].notnull().all(axis=1)]
clean = X_test[
X_test[[target_column_name, predicted_column_name]].notnull().all(axis=1)
]
file_name = 'outputs/predictions.csv'
file_name = "outputs/predictions.csv"
export_csv = clean.to_csv(file_name, header=True, index=False) # added Index
# Upload the predictions into artifacts

View File

@@ -3,36 +3,47 @@ import shutil
from azureml.core import ScriptRunConfig
def run_remote_inference(test_experiment, compute_target, train_run,
test_dataset, target_column_name, inference_folder='./forecast'):
def run_remote_inference(
test_experiment,
compute_target,
train_run,
test_dataset,
target_column_name,
inference_folder="./forecast",
):
# Create local directory to copy the model.pkl and forecsting_script.py files into.
# These files will be uploaded to and executed on the compute instance.
os.makedirs(inference_folder, exist_ok=True)
shutil.copy('forecasting_script.py', inference_folder)
shutil.copy("forecasting_script.py", inference_folder)
train_run.download_file('outputs/model.pkl',
os.path.join(inference_folder, 'model.pkl'))
train_run.download_file(
"outputs/model.pkl", os.path.join(inference_folder, "model.pkl")
)
inference_env = train_run.get_environment()
config = ScriptRunConfig(source_directory=inference_folder,
script='forecasting_script.py',
arguments=['--target_column_name',
config = ScriptRunConfig(
source_directory=inference_folder,
script="forecasting_script.py",
arguments=[
"--target_column_name",
target_column_name,
'--test_dataset',
test_dataset.as_named_input(test_dataset.name)],
"--test_dataset",
test_dataset.as_named_input(test_dataset.name),
],
compute_target=compute_target,
environment=inference_env)
environment=inference_env,
)
run = test_experiment.submit(config,
tags={'training_run_id':
train_run.id,
'run_algorithm':
train_run.properties['run_algorithm'],
'valid_score':
train_run.properties['score'],
'primary_metric':
train_run.properties['primary_metric']})
run = test_experiment.submit(
config,
tags={
"training_run_id": train_run.id,
"run_algorithm": train_run.properties["run_algorithm"],
"valid_score": train_run.properties["score"],
"primary_metric": train_run.properties["primary_metric"],
},
)
run.log("run_algorithm", run.tags['run_algorithm'])
run.log("run_algorithm", run.tags["run_algorithm"])
return run

View File

@@ -56,16 +56,18 @@
"from statsmodels.graphics.tsaplots import plot_acf, plot_pacf\n",
"import matplotlib.pyplot as plt\n",
"from pandas.plotting import register_matplotlib_converters\n",
"\n",
"register_matplotlib_converters() # fixes the future warning issue\n",
"\n",
"from helper_functions import unit_root_test_wrapper\n",
"from statsmodels.tools.sm_exceptions import InterpolationWarning\n",
"warnings.simplefilter('ignore', InterpolationWarning)\n",
"\n",
"warnings.simplefilter(\"ignore\", InterpolationWarning)\n",
"\n",
"\n",
"# set printing options\n",
"pd.set_option('display.max_columns', 500)\n",
"pd.set_option('display.width', 1000)"
"pd.set_option(\"display.max_columns\", 500)\n",
"pd.set_option(\"display.width\", 1000)"
]
},
{
@@ -75,15 +77,15 @@
"outputs": [],
"source": [
"# load data\n",
"main_data_loc = 'data'\n",
"train_file_name = 'S4248SM144SCEN.csv'\n",
"main_data_loc = \"data\"\n",
"train_file_name = \"S4248SM144SCEN.csv\"\n",
"\n",
"TARGET_COLNAME = 'S4248SM144SCEN'\n",
"TIME_COLNAME = 'observation_date'\n",
"COVID_PERIOD_START = '2020-03-01'\n",
"TARGET_COLNAME = \"S4248SM144SCEN\"\n",
"TIME_COLNAME = \"observation_date\"\n",
"COVID_PERIOD_START = \"2020-03-01\"\n",
"\n",
"df = pd.read_csv(os.path.join(main_data_loc, train_file_name))\n",
"df[TIME_COLNAME] = pd.to_datetime(df[TIME_COLNAME], format='%Y-%m-%d')\n",
"df[TIME_COLNAME] = pd.to_datetime(df[TIME_COLNAME], format=\"%Y-%m-%d\")\n",
"df.sort_values(by=TIME_COLNAME, inplace=True)\n",
"df.set_index(TIME_COLNAME, inplace=True)\n",
"df.head(2)"
@@ -96,9 +98,9 @@
"outputs": [],
"source": [
"# plot the entire dataset\n",
"fig, ax = plt.subplots(figsize=(6,2), dpi=180)\n",
"fig, ax = plt.subplots(figsize=(6, 2), dpi=180)\n",
"ax.plot(df)\n",
"ax.title.set_text('Original Data Series')\n",
"ax.title.set_text(\"Original Data Series\")\n",
"locs, labels = plt.xticks()\n",
"plt.xticks(rotation=45)"
]
@@ -117,9 +119,9 @@
"outputs": [],
"source": [
"# plot the entire dataset in first differences\n",
"fig, ax = plt.subplots(figsize=(6,2), dpi=180)\n",
"fig, ax = plt.subplots(figsize=(6, 2), dpi=180)\n",
"ax.plot(df.diff().dropna())\n",
"ax.title.set_text('Data in first differences')\n",
"ax.title.set_text(\"Data in first differences\")\n",
"locs, labels = plt.xticks()\n",
"plt.xticks(rotation=45)"
]
@@ -151,9 +153,9 @@
"outputs": [],
"source": [
"# plot the entire dataset in first differences\n",
"fig, ax = plt.subplots(figsize=(6,2), dpi=180)\n",
"fig, ax = plt.subplots(figsize=(6, 2), dpi=180)\n",
"ax.plot(df.diff().dropna())\n",
"ax.title.set_text('Data in first differences')\n",
"ax.title.set_text(\"Data in first differences\")\n",
"locs, labels = plt.xticks()\n",
"plt.xticks(rotation=45)"
]
@@ -175,9 +177,9 @@
"df = df[:COVID_PERIOD_START]\n",
"\n",
"# plot the entire dataset in first differences\n",
"fig, ax = plt.subplots(figsize=(6,2), dpi=180)\n",
"ax.plot(df['2015-01-01':].diff().dropna())\n",
"ax.title.set_text('Data in first differences')\n",
"fig, ax = plt.subplots(figsize=(6, 2), dpi=180)\n",
"ax.plot(df[\"2015-01-01\":].diff().dropna())\n",
"ax.title.set_text(\"Data in first differences\")\n",
"locs, labels = plt.xticks()\n",
"plt.xticks(rotation=45)"
]
@@ -245,10 +247,10 @@
"source": [
"# unit root tests\n",
"test = unit_root_test_wrapper(df[TARGET_COLNAME])\n",
"print('---------------', '\\n')\n",
"print('Summary table', '\\n', test['summary'], '\\n')\n",
"print('Is the {} series stationary?: {}'.format(TARGET_COLNAME, test['stationary']))\n",
"print('---------------', '\\n')"
"print(\"---------------\", \"\\n\")\n",
"print(\"Summary table\", \"\\n\", test[\"summary\"], \"\\n\")\n",
"print(\"Is the {} series stationary?: {}\".format(TARGET_COLNAME, test[\"stationary\"]))\n",
"print(\"---------------\", \"\\n\")"
]
},
{
@@ -285,10 +287,10 @@
"source": [
"# unit root tests\n",
"test = unit_root_test_wrapper(df[TARGET_COLNAME].diff().dropna())\n",
"print('---------------', '\\n')\n",
"print('Summary table', '\\n', test['summary'], '\\n')\n",
"print('Is the {} series stationary?: {}'.format(TARGET_COLNAME, test['stationary']))\n",
"print('---------------', '\\n')"
"print(\"---------------\", \"\\n\")\n",
"print(\"Summary table\", \"\\n\", test[\"summary\"], \"\\n\")\n",
"print(\"Is the {} series stationary?: {}\".format(TARGET_COLNAME, test[\"stationary\"]))\n",
"print(\"---------------\", \"\\n\")"
]
},
{
@@ -305,13 +307,13 @@
"outputs": [],
"source": [
"# plot original and stationary data\n",
"fig = plt.figure(figsize=(10,10))\n",
"fig = plt.figure(figsize=(10, 10))\n",
"ax1 = fig.add_subplot(211)\n",
"ax1.plot(df[TARGET_COLNAME], '-b')\n",
"ax1.plot(df[TARGET_COLNAME], \"-b\")\n",
"ax2 = fig.add_subplot(212)\n",
"ax2.plot(df[TARGET_COLNAME].diff().dropna(), '-b')\n",
"ax1.title.set_text('Original data')\n",
"ax2.title.set_text('Data in first differences')"
"ax2.plot(df[TARGET_COLNAME].diff().dropna(), \"-b\")\n",
"ax1.title.set_text(\"Original data\")\n",
"ax2.title.set_text(\"Data in first differences\")"
]
},
{
@@ -372,7 +374,7 @@
"outputs": [],
"source": [
"# Plot the ACF/PACF for the series in differences\n",
"fig, ax = plt.subplots(1,2,figsize=(10,5))\n",
"fig, ax = plt.subplots(1, 2, figsize=(10, 5))\n",
"plot_acf(df[TARGET_COLNAME].diff().dropna().values.squeeze(), ax=ax[0])\n",
"plot_pacf(df[TARGET_COLNAME].diff().dropna().values.squeeze(), ax=ax[1])\n",
"plt.show()"

View File

@@ -51,7 +51,7 @@
"from azureml.core.compute import AmlCompute\n",
"from azureml.core.compute import ComputeTarget\n",
"import matplotlib.pyplot as plt\n",
"from helper_functions import (ts_train_test_split, compute_metrics)\n",
"from helper_functions import ts_train_test_split, compute_metrics\n",
"\n",
"import azureml.core\n",
"from azureml.core.workspace import Workspace\n",
@@ -61,8 +61,8 @@
"\n",
"# set printing options\n",
"np.set_printoptions(precision=4, suppress=True, linewidth=100)\n",
"pd.set_option('display.max_columns', 500)\n",
"pd.set_option('display.width', 1000)"
"pd.set_option(\"display.max_columns\", 500)\n",
"pd.set_option(\"display.width\", 1000)"
]
},
{
@@ -81,27 +81,32 @@
"source": [
"ws = Workspace.from_config()\n",
"amlcompute_cluster_name = \"recipe-cluster\"\n",
" \n",
"\n",
"found = False\n",
"# Check if this compute target already exists in the workspace.\n",
"cts = ws.compute_targets\n",
"if amlcompute_cluster_name in cts and cts[amlcompute_cluster_name].type == 'AmlCompute':\n",
"if amlcompute_cluster_name in cts and cts[amlcompute_cluster_name].type == \"AmlCompute\":\n",
" found = True\n",
" print('Found existing compute target.')\n",
" print(\"Found existing compute target.\")\n",
" compute_target = cts[amlcompute_cluster_name]\n",
"\n",
"if not found:\n",
" print('Creating a new compute target...')\n",
" provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_D2_V2\",\n",
" max_nodes = 6)\n",
" print(\"Creating a new compute target...\")\n",
" provisioning_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"STANDARD_D2_V2\", max_nodes=6\n",
" )\n",
"\n",
" # Create the cluster.\\n\",\n",
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, provisioning_config)\n",
" compute_target = ComputeTarget.create(\n",
" ws, amlcompute_cluster_name, provisioning_config\n",
" )\n",
"\n",
"print('Checking cluster status...')\n",
"print(\"Checking cluster status...\")\n",
"# Can poll for a minimum number of nodes and for a specific timeout.\n",
"# If no min_node_count is provided, it will use the scale settings for the cluster.\n",
"compute_target.wait_for_completion(show_output = True, min_node_count = None, timeout_in_minutes = 20)"
"compute_target.wait_for_completion(\n",
" show_output=True, min_node_count=None, timeout_in_minutes=20\n",
")"
]
},
{
@@ -119,16 +124,18 @@
"metadata": {},
"outputs": [],
"source": [
"main_data_loc = 'data'\n",
"train_file_name = 'S4248SM144SCEN.csv'\n",
"main_data_loc = \"data\"\n",
"train_file_name = \"S4248SM144SCEN.csv\"\n",
"\n",
"TARGET_COLNAME = \"S4248SM144SCEN\"\n",
"TIME_COLNAME = \"observation_date\"\n",
"COVID_PERIOD_START = '2020-03-01' # start of the covid period. To be excluded from evaluation.\n",
"COVID_PERIOD_START = (\n",
" \"2020-03-01\" # start of the covid period. To be excluded from evaluation.\n",
")\n",
"\n",
"# load data\n",
"df = pd.read_csv(os.path.join(main_data_loc, train_file_name))\n",
"df[TIME_COLNAME] = pd.to_datetime(df[TIME_COLNAME], format='%Y-%m-%d')\n",
"df[TIME_COLNAME] = pd.to_datetime(df[TIME_COLNAME], format=\"%Y-%m-%d\")\n",
"df.sort_values(by=TIME_COLNAME, inplace=True)\n",
"\n",
"# remove the Covid period\n",
@@ -202,24 +209,28 @@
"source": [
"# choose a name for the run history container in the workspace\n",
"if isinstance(TARGET_LAGS, list):\n",
" TARGET_LAGS_STR = '-'.join(map(str, TARGET_LAGS)) if (len(TARGET_LAGS) > 0) else None\n",
" TARGET_LAGS_STR = (\n",
" \"-\".join(map(str, TARGET_LAGS)) if (len(TARGET_LAGS) > 0) else None\n",
" )\n",
"else:\n",
" TARGET_LAGS_STR = TARGET_LAGS\n",
"\n",
"experiment_desc = 'diff-{}_lags-{}_STL-{}'.format(DIFFERENCE_SERIES, TARGET_LAGS_STR, STL_TYPE)\n",
"experiment_name = 'alcohol_{}'.format(experiment_desc)\n",
"experiment_desc = \"diff-{}_lags-{}_STL-{}\".format(\n",
" DIFFERENCE_SERIES, TARGET_LAGS_STR, STL_TYPE\n",
")\n",
"experiment_name = \"alcohol_{}\".format(experiment_desc)\n",
"experiment = Experiment(ws, experiment_name)\n",
"\n",
"output = {}\n",
"output['SDK version'] = azureml.core.VERSION\n",
"output['Subscription ID'] = ws.subscription_id\n",
"output['Workspace'] = ws.name\n",
"output['SKU'] = ws.sku\n",
"output['Resource Group'] = ws.resource_group\n",
"output['Location'] = ws.location\n",
"output['Run History Name'] = experiment_name\n",
"pd.set_option('display.max_colwidth', -1)\n",
"outputDf = pd.DataFrame(data = output, index = [''])\n",
"output[\"SDK version\"] = azureml.core.VERSION\n",
"output[\"Subscription ID\"] = ws.subscription_id\n",
"output[\"Workspace\"] = ws.name\n",
"output[\"SKU\"] = ws.sku\n",
"output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n",
"output[\"Run History Name\"] = experiment_name\n",
"pd.set_option(\"display.max_colwidth\", -1)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"print(outputDf.T)"
]
},
@@ -230,9 +241,9 @@
"outputs": [],
"source": [
"# create output directory\n",
"output_dir = 'experiment_output/{}'.format(experiment_desc)\n",
"output_dir = \"experiment_output/{}\".format(experiment_desc)\n",
"if not os.path.exists(output_dir):\n",
" os.makedirs(output_dir) "
" os.makedirs(output_dir)"
]
},
{
@@ -255,17 +266,21 @@
"outputs": [],
"source": [
"# split the data into train and test set\n",
"if DIFFERENCE_SERIES: \n",
"if DIFFERENCE_SERIES:\n",
" # generate train/inference sets using data in first differences\n",
" df_train, df_test = ts_train_test_split(df_input=df_delta,\n",
" df_train, df_test = ts_train_test_split(\n",
" df_input=df_delta,\n",
" n=FORECAST_HORIZON,\n",
" time_colname=TIME_COLNAME,\n",
" ts_id_colnames=TIME_SERIES_ID_COLNAMES)\n",
" ts_id_colnames=TIME_SERIES_ID_COLNAMES,\n",
" )\n",
"else:\n",
" df_train, df_test = ts_train_test_split(df_input=df,\n",
" df_train, df_test = ts_train_test_split(\n",
" df_input=df,\n",
" n=FORECAST_HORIZON,\n",
" time_colname=TIME_COLNAME,\n",
" ts_id_colnames=TIME_SERIES_ID_COLNAMES)"
" ts_id_colnames=TIME_SERIES_ID_COLNAMES,\n",
" )"
]
},
{
@@ -286,12 +301,27 @@
"df_test.to_csv(\"test.csv\", index=False)\n",
"\n",
"datastore = ws.get_default_datastore()\n",
"datastore.upload_files(files = ['./train.csv'], target_path = 'uni-recipe-dataset/tabular/', overwrite = True,show_progress = True)\n",
"datastore.upload_files(files = ['./test.csv'], target_path = 'uni-recipe-dataset/tabular/', overwrite = True,show_progress = True)\n",
"datastore.upload_files(\n",
" files=[\"./train.csv\"],\n",
" target_path=\"uni-recipe-dataset/tabular/\",\n",
" overwrite=True,\n",
" show_progress=True,\n",
")\n",
"datastore.upload_files(\n",
" files=[\"./test.csv\"],\n",
" target_path=\"uni-recipe-dataset/tabular/\",\n",
" overwrite=True,\n",
" show_progress=True,\n",
")\n",
"\n",
"from azureml.core import Dataset\n",
"train_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'uni-recipe-dataset/tabular/train.csv')])\n",
"test_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'uni-recipe-dataset/tabular/test.csv')])\n",
"\n",
"train_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"uni-recipe-dataset/tabular/train.csv\")]\n",
")\n",
"test_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"uni-recipe-dataset/tabular/test.csv\")]\n",
")\n",
"\n",
"# print the first 5 rows of the Dataset\n",
"train_dataset.to_pandas_dataframe().reset_index(drop=True).head(5)"
@@ -311,17 +341,18 @@
"outputs": [],
"source": [
"time_series_settings = {\n",
" 'time_column_name': TIME_COLNAME,\n",
" 'forecast_horizon': FORECAST_HORIZON,\n",
" 'target_lags': TARGET_LAGS,\n",
" 'use_stl': STL_TYPE,\n",
" 'blocked_models': BLOCKED_MODELS,\n",
" 'time_series_id_column_names': TIME_SERIES_ID_COLNAMES\n",
" \"time_column_name\": TIME_COLNAME,\n",
" \"forecast_horizon\": FORECAST_HORIZON,\n",
" \"target_lags\": TARGET_LAGS,\n",
" \"use_stl\": STL_TYPE,\n",
" \"blocked_models\": BLOCKED_MODELS,\n",
" \"time_series_id_column_names\": TIME_SERIES_ID_COLNAMES,\n",
"}\n",
"\n",
"automl_config = AutoMLConfig(task='forecasting',\n",
" debug_log='sample_experiment.log',\n",
" primary_metric='normalized_root_mean_squared_error',\n",
"automl_config = AutoMLConfig(\n",
" task=\"forecasting\",\n",
" debug_log=\"sample_experiment.log\",\n",
" primary_metric=\"normalized_root_mean_squared_error\",\n",
" experiment_timeout_minutes=20,\n",
" iteration_timeout_minutes=5,\n",
" enable_early_stopping=True,\n",
@@ -331,7 +362,8 @@
" verbosity=logging.INFO,\n",
" max_cores_per_iteration=-1,\n",
" compute_target=compute_target,\n",
" **time_series_settings)"
" **time_series_settings,\n",
")"
]
},
{
@@ -404,14 +436,17 @@
"outputs": [],
"source": [
"from run_forecast import run_remote_inference\n",
"remote_run = run_remote_inference(test_experiment=test_experiment, \n",
"\n",
"remote_run = run_remote_inference(\n",
" test_experiment=test_experiment,\n",
" compute_target=compute_target,\n",
" train_run=best_run,\n",
" test_dataset=test_dataset,\n",
" target_column_name=TARGET_COLNAME)\n",
" target_column_name=TARGET_COLNAME,\n",
")\n",
"remote_run.wait_for_completion(show_output=False)\n",
"\n",
"remote_run.download_file('outputs/predictions.csv', f'{output_dir}/predictions.csv')"
"remote_run.download_file(\"outputs/predictions.csv\", f\"{output_dir}/predictions.csv\")"
]
},
{
@@ -428,7 +463,7 @@
"metadata": {},
"outputs": [],
"source": [
"X_trans = pd.read_csv(f'{output_dir}/predictions.csv', parse_dates=[TIME_COLNAME])\n",
"X_trans = pd.read_csv(f\"{output_dir}/predictions.csv\", parse_dates=[TIME_COLNAME])\n",
"X_trans.head()"
]
},
@@ -440,15 +475,15 @@
"source": [
"# convert forecast in differences to levels\n",
"def convert_fcst_diff_to_levels(fcst, yt, df_orig):\n",
" \"\"\" Convert forecast from first differences to levels. \"\"\"\n",
" \"\"\"Convert forecast from first differences to levels.\"\"\"\n",
" fcst = fcst.reset_index(drop=False, inplace=False)\n",
" fcst['predicted_level'] = fcst['predicted'].cumsum()\n",
" fcst['predicted_level'] = fcst['predicted_level'].astype(float) + float(yt)\n",
" fcst[\"predicted_level\"] = fcst[\"predicted\"].cumsum()\n",
" fcst[\"predicted_level\"] = fcst[\"predicted_level\"].astype(float) + float(yt)\n",
" # merge actuals\n",
" out = pd.merge(fcst,\n",
" df_orig[[TIME_COLNAME, TARGET_COLNAME]], \n",
" on=[TIME_COLNAME], how='inner')\n",
" out.rename(columns={TARGET_COLNAME: 'actual_level'}, inplace=True)\n",
" out = pd.merge(\n",
" fcst, df_orig[[TIME_COLNAME, TARGET_COLNAME]], on=[TIME_COLNAME], how=\"inner\"\n",
" )\n",
" out.rename(columns={TARGET_COLNAME: \"actual_level\"}, inplace=True)\n",
" return out"
]
},
@@ -458,16 +493,16 @@
"metadata": {},
"outputs": [],
"source": [
"if DIFFERENCE_SERIES: \n",
"if DIFFERENCE_SERIES:\n",
" # convert forecast in differences to the levels\n",
" INFORMATION_SET_DATE = max(df_train[TIME_COLNAME])\n",
" YT = df.query('{} == @INFORMATION_SET_DATE'.format(TIME_COLNAME))[TARGET_COLNAME]\n",
" YT = df.query(\"{} == @INFORMATION_SET_DATE\".format(TIME_COLNAME))[TARGET_COLNAME]\n",
"\n",
" fcst_df = convert_fcst_diff_to_levels(fcst=X_trans, yt=YT, df_orig=df)\n",
"else:\n",
" fcst_df = X_trans.copy()\n",
" fcst_df['actual_level'] = y_test\n",
" fcst_df['predicted_level'] = y_predictions\n",
" fcst_df[\"actual_level\"] = y_test\n",
" fcst_df[\"predicted_level\"] = y_predictions\n",
"\n",
"del X_trans"
]
@@ -486,13 +521,11 @@
"outputs": [],
"source": [
"# compute metrics\n",
"metrics_df = compute_metrics(fcst_df=fcst_df,\n",
" metric_name=None,\n",
" ts_id_colnames=None)\n",
"metrics_df = compute_metrics(fcst_df=fcst_df, metric_name=None, ts_id_colnames=None)\n",
"# save output\n",
"metrics_file_name = '{}_metrics.csv'.format(experiment_name)\n",
"fcst_file_name = '{}_forecst.csv'.format(experiment_name)\n",
"plot_file_name = '{}_plot.pdf'.format(experiment_name)\n",
"metrics_file_name = \"{}_metrics.csv\".format(experiment_name)\n",
"fcst_file_name = \"{}_forecst.csv\".format(experiment_name)\n",
"plot_file_name = \"{}_plot.pdf\".format(experiment_name)\n",
"\n",
"metrics_df.to_csv(os.path.join(output_dir, metrics_file_name), index=True)\n",
"fcst_df.to_csv(os.path.join(output_dir, fcst_file_name), index=True)"
@@ -517,9 +550,9 @@
"\n",
"# generate and save plots\n",
"fig, ax = plt.subplots(dpi=180)\n",
"ax.plot(plot_df[TARGET_COLNAME], '-g', label='Historical')\n",
"ax.plot(fcst_df['actual_level'], '-b', label='Actual')\n",
"ax.plot(fcst_df['predicted_level'], '-r', label='Forecast')\n",
"ax.plot(plot_df[TARGET_COLNAME], \"-g\", label=\"Historical\")\n",
"ax.plot(fcst_df[\"actual_level\"], \"-b\", label=\"Actual\")\n",
"ax.plot(fcst_df[\"predicted_level\"], \"-r\", label=\"Forecast\")\n",
"ax.legend()\n",
"ax.set_title(\"Forecast vs Actuals\")\n",
"ax.set_xlabel(TIME_COLNAME)\n",

View File

@@ -11,11 +11,14 @@ from sklearn.externals import joblib
parser = argparse.ArgumentParser()
parser.add_argument(
'--target_column_name', type=str, dest='target_column_name',
help='Target Column Name')
"--target_column_name",
type=str,
dest="target_column_name",
help="Target Column Name",
)
parser.add_argument(
'--test_dataset', type=str, dest='test_dataset',
help='Test Dataset')
"--test_dataset", type=str, dest="test_dataset", help="Test Dataset"
)
args = parser.parse_args()
target_column_name = args.target_column_name
@@ -27,30 +30,40 @@ ws = run.experiment.workspace
# get the input dataset by id
test_dataset = Dataset.get_by_id(ws, id=test_dataset_id)
X_test = test_dataset.drop_columns(columns=[target_column_name]).to_pandas_dataframe().reset_index(drop=True)
y_test_df = test_dataset.with_timestamp_columns(None).keep_columns(columns=[target_column_name]).to_pandas_dataframe()
X_test = (
test_dataset.drop_columns(columns=[target_column_name])
.to_pandas_dataframe()
.reset_index(drop=True)
)
y_test_df = (
test_dataset.with_timestamp_columns(None)
.keep_columns(columns=[target_column_name])
.to_pandas_dataframe()
)
# generate forecast
fitted_model = joblib.load('model.pkl')
fitted_model = joblib.load("model.pkl")
# We have default quantiles values set as below(95th percentile)
quantiles = [0.025, 0.5, 0.975]
predicted_column_name = 'predicted'
PI = 'prediction_interval'
predicted_column_name = "predicted"
PI = "prediction_interval"
fitted_model.quantiles = quantiles
pred_quantiles = fitted_model.forecast_quantiles(X_test)
pred_quantiles[PI] = pred_quantiles[[min(quantiles), max(quantiles)]].apply(lambda x: '[{}, {}]'.format(x[0],
x[1]), axis=1)
pred_quantiles[PI] = pred_quantiles[[min(quantiles), max(quantiles)]].apply(
lambda x: "[{}, {}]".format(x[0], x[1]), axis=1
)
X_test[target_column_name] = y_test_df[target_column_name]
X_test[PI] = pred_quantiles[PI]
X_test[predicted_column_name] = pred_quantiles[0.5]
# drop rows where prediction or actuals are nan
# happens because of missing actuals
# or at edges of time due to lags/rolling windows
clean = X_test[X_test[[target_column_name,
predicted_column_name]].notnull().all(axis=1)]
clean.rename(columns={target_column_name: 'actual'}, inplace=True)
clean = X_test[
X_test[[target_column_name, predicted_column_name]].notnull().all(axis=1)
]
clean.rename(columns={target_column_name: "actual"}, inplace=True)
file_name = 'outputs/predictions.csv'
file_name = "outputs/predictions.csv"
export_csv = clean.to_csv(file_name, header=True, index=False) # added Index
# Upload the predictions into artifacts

View File

@@ -15,22 +15,25 @@ def adf_test(series, **kw):
:param series: series to test
:return: dictionary of results
"""
if 'lags' in kw.keys():
msg = 'Lag order of {} detected. Running the ADF test...'.format(str(kw['lags']))
if "lags" in kw.keys():
msg = "Lag order of {} detected. Running the ADF test...".format(
str(kw["lags"])
)
print(msg)
statistic, pval, critval, resstore = stattools.adfuller(series,
maxlag=kw['lags'],
autolag=kw['autolag'],
store=kw['store'])
statistic, pval, critval, resstore = stattools.adfuller(
series, maxlag=kw["lags"], autolag=kw["autolag"], store=kw["store"]
)
else:
statistic, pval, critval, resstore = stattools.adfuller(series,
autolag=kw['IC'],
store=kw['store'])
statistic, pval, critval, resstore = stattools.adfuller(
series, autolag=kw["IC"], store=kw["store"]
)
output = {'statistic': statistic,
'pval': pval,
'critical': critval,
'resstore': resstore}
output = {
"statistic": statistic,
"pval": pval,
"critical": critval,
"resstore": resstore,
}
return output
@@ -41,22 +44,23 @@ def kpss_test(series, **kw):
:param series: series to test
:return: dictionary of results
"""
if kw['store']:
statistic, p_value, critical_values, rstore = stattools.kpss(series,
regression=kw['reg_type'],
lags=kw['lags'],
store=kw['store'])
if kw["store"]:
statistic, p_value, critical_values, rstore = stattools.kpss(
series, regression=kw["reg_type"], lags=kw["lags"], store=kw["store"]
)
else:
statistic, p_value, lags, critical_values = stattools.kpss(series,
regression=kw['reg_type'],
lags=kw['lags'])
output = {'statistic': statistic,
'pval': p_value,
'critical': critical_values,
'lags': rstore.lags if kw['store'] else lags}
statistic, p_value, lags, critical_values = stattools.kpss(
series, regression=kw["reg_type"], lags=kw["lags"]
)
output = {
"statistic": statistic,
"pval": p_value,
"critical": critical_values,
"lags": rstore.lags if kw["store"] else lags,
}
if kw['store']:
output.update({'resstore': rstore})
if kw["store"]:
output.update({"resstore": rstore})
return output
@@ -75,9 +79,9 @@ def format_test_output(test_name, test_res, H0_unit_root=True):
If test failed (test_res is None), return empty dictionary.
"""
# Check if the test failed by trying to extract the test statistic
if test_name in ('ADF', 'KPSS'):
if test_name in ("ADF", "KPSS"):
try:
test_res['statistic']
test_res["statistic"]
except BaseException:
test_res = None
else:
@@ -90,32 +94,32 @@ def format_test_output(test_name, test_res, H0_unit_root=True):
return {}
# extract necessary information
if test_name in ('ADF', 'KPSS'):
statistic = test_res['statistic']
crit_val = test_res['critical']['5%']
p_val = test_res['pval']
lags = test_res['resstore'].usedlag if test_name == 'ADF' else test_res['lags']
if test_name in ("ADF", "KPSS"):
statistic = test_res["statistic"]
crit_val = test_res["critical"]["5%"]
p_val = test_res["pval"]
lags = test_res["resstore"].usedlag if test_name == "ADF" else test_res["lags"]
else:
statistic = test_res.stat
crit_val = test_res.critical_values['5%']
crit_val = test_res.critical_values["5%"]
p_val = test_res.pvalue
lags = test_res.lags
if H0_unit_root:
H0 = 'The process is non-stationary'
H0 = "The process is non-stationary"
stationary = "yes" if p_val < 0.05 else "not"
else:
H0 = 'The process is stationary'
H0 = "The process is stationary"
stationary = "yes" if p_val > 0.05 else "not"
out = {
'test_name': test_name,
'statistic': statistic,
'crit_val': crit_val,
'p_val': p_val,
'lags': int(lags),
'stationary': stationary,
'Null Hypothesis': H0
"test_name": test_name,
"statistic": statistic,
"crit_val": crit_val,
"p_val": p_val,
"lags": int(lags),
"stationary": stationary,
"Null Hypothesis": H0,
}
return out
@@ -136,22 +140,15 @@ def unit_root_test_wrapper(series, lags=None):
:return: dictionary of summary table for all tests and final decision on stationary vs nonstaionary
"""
# setting for ADF and KPSS tests
adf_settings = {
'IC': 'AIC',
'store': True
}
adf_settings = {"IC": "AIC", "store": True}
kpss_settings = {
'reg_type': 'c',
'lags': 'auto',
'store': True
}
kpss_settings = {"reg_type": "c", "lags": "auto", "store": True}
arch_test_settings = {} # settings for PP, ADF GLS and ZA tests
if lags is not None:
adf_settings.update({'lags': lags, 'autolag': None})
kpss_settings.update({'lags:': lags})
arch_test_settings = {'lags': lags}
adf_settings.update({"lags": lags, "autolag": None})
kpss_settings.update({"lags:": lags})
arch_test_settings = {"lags": lags}
# Run individual tests
adf = adf_test(series, **adf_settings) # ADF test
kpss = kpss_test(series, **kpss_settings) # KPSS test
@@ -160,14 +157,26 @@ def unit_root_test_wrapper(series, lags=None):
za = unitroot.ZivotAndrews(series, **arch_test_settings) # Zivot-Andrews test
# generate output table
adf_dict = format_test_output(test_name='ADF', test_res=adf, H0_unit_root=True)
kpss_dict = format_test_output(test_name='KPSS', test_res=kpss, H0_unit_root=False)
pp_dict = format_test_output(test_name='Philips Perron', test_res=pp, H0_unit_root=True)
adfgls_dict = format_test_output(test_name='ADF GLS', test_res=adfgls, H0_unit_root=True)
za_dict = format_test_output(test_name='Zivot-Andrews', test_res=za, H0_unit_root=True)
adf_dict = format_test_output(test_name="ADF", test_res=adf, H0_unit_root=True)
kpss_dict = format_test_output(test_name="KPSS", test_res=kpss, H0_unit_root=False)
pp_dict = format_test_output(
test_name="Philips Perron", test_res=pp, H0_unit_root=True
)
adfgls_dict = format_test_output(
test_name="ADF GLS", test_res=adfgls, H0_unit_root=True
)
za_dict = format_test_output(
test_name="Zivot-Andrews", test_res=za, H0_unit_root=True
)
test_dict = {'ADF': adf_dict, 'KPSS': kpss_dict, 'PP': pp_dict, 'ADF GLS': adfgls_dict, 'ZA': za_dict}
test_sum = pd.DataFrame.from_dict(test_dict, orient='index').reset_index(drop=True)
test_dict = {
"ADF": adf_dict,
"KPSS": kpss_dict,
"PP": pp_dict,
"ADF GLS": adfgls_dict,
"ZA": za_dict,
}
test_sum = pd.DataFrame.from_dict(test_dict, orient="index").reset_index(drop=True)
# decision based on the majority rule
if test_sum.shape[0] > 0:
@@ -176,9 +185,9 @@ def unit_root_test_wrapper(series, lags=None):
ratio = 1 # all tests fail, assume the series is stationary
# Majority rule. If the ratio is exactly 0.5, assume the series in non-stationary.
stationary = 'YES' if (ratio > 0.5) else 'NO'
stationary = "YES" if (ratio > 0.5) else "NO"
out = {'summary': test_sum, 'stationary': stationary}
out = {"summary": test_sum, "stationary": stationary}
return out
@@ -196,10 +205,12 @@ def ts_train_test_split(df_input, n, time_colname, ts_id_colnames=None):
ts_id_colnames = []
ts_id_colnames_original = ts_id_colnames.copy()
if len(ts_id_colnames) == 0:
ts_id_colnames = ['Grain']
df_input[ts_id_colnames[0]] = 'dummy'
ts_id_colnames = ["Grain"]
df_input[ts_id_colnames[0]] = "dummy"
# Sort by ascending time
df_grouped = (df_input.sort_values(time_colname).groupby(ts_id_colnames, group_keys=False))
df_grouped = df_input.sort_values(time_colname).groupby(
ts_id_colnames, group_keys=False
)
df_head = df_grouped.apply(lambda dfg: dfg.iloc[:-n])
df_tail = df_grouped.apply(lambda dfg: dfg.iloc[-n:])
# drop group column name if it was not originally provided
@@ -221,30 +232,32 @@ def compute_metrics(fcst_df, metric_name=None, ts_id_colnames=None):
if ts_id_colnames is None:
ts_id_colnames = []
if len(ts_id_colnames) == 0:
ts_id_colnames = ['TS_ID']
fcst_df[ts_id_colnames[0]] = 'dummy'
ts_id_colnames = ["TS_ID"]
fcst_df[ts_id_colnames[0]] = "dummy"
metrics_list = []
for grain, df in fcst_df.groupby(ts_id_colnames):
try:
scores = scoring.score_regression(
y_test=df['actual_level'],
y_pred=df['predicted_level'],
metrics=list(constants.Metric.SCALAR_REGRESSION_SET))
y_test=df["actual_level"],
y_pred=df["predicted_level"],
metrics=list(constants.Metric.SCALAR_REGRESSION_SET),
)
except BaseException:
msg = '{}: metrics calculation failed.'.format(grain)
msg = "{}: metrics calculation failed.".format(grain)
print(msg)
scores = {}
one_grain_metrics_df = pd.DataFrame(list(scores.items()), columns=['metric_name', 'metric']).\
sort_values(['metric_name'])
one_grain_metrics_df = pd.DataFrame(
list(scores.items()), columns=["metric_name", "metric"]
).sort_values(["metric_name"])
one_grain_metrics_df.reset_index(inplace=True, drop=True)
if len(ts_id_colnames) < 2:
one_grain_metrics_df['grain'] = ts_id_colnames[0]
one_grain_metrics_df["grain"] = ts_id_colnames[0]
else:
one_grain_metrics_df['grain'] = "|".join(list(grain))
one_grain_metrics_df["grain"] = "|".join(list(grain))
metrics_list.append(one_grain_metrics_df)
# collect into a data frame
grain_metrics = pd.concat(metrics_list)
if metric_name is not None:
grain_metrics = grain_metrics.query('metric_name == @metric_name')
grain_metrics = grain_metrics.query("metric_name == @metric_name")
return grain_metrics

View File

@@ -3,36 +3,47 @@ import shutil
from azureml.core import ScriptRunConfig
def run_remote_inference(test_experiment, compute_target, train_run,
test_dataset, target_column_name, inference_folder='./forecast'):
def run_remote_inference(
test_experiment,
compute_target,
train_run,
test_dataset,
target_column_name,
inference_folder="./forecast",
):
# Create local directory to copy the model.pkl and forecsting_script.py files into.
# These files will be uploaded to and executed on the compute instance.
os.makedirs(inference_folder, exist_ok=True)
shutil.copy('forecasting_script.py', inference_folder)
shutil.copy("forecasting_script.py", inference_folder)
train_run.download_file('outputs/model.pkl',
os.path.join(inference_folder, 'model.pkl'))
train_run.download_file(
"outputs/model.pkl", os.path.join(inference_folder, "model.pkl")
)
inference_env = train_run.get_environment()
config = ScriptRunConfig(source_directory=inference_folder,
script='forecasting_script.py',
arguments=['--target_column_name',
config = ScriptRunConfig(
source_directory=inference_folder,
script="forecasting_script.py",
arguments=[
"--target_column_name",
target_column_name,
'--test_dataset',
test_dataset.as_named_input(test_dataset.name)],
"--test_dataset",
test_dataset.as_named_input(test_dataset.name),
],
compute_target=compute_target,
environment=inference_env)
environment=inference_env,
)
run = test_experiment.submit(config,
tags={'training_run_id':
train_run.id,
'run_algorithm':
train_run.properties['run_algorithm'],
'valid_score':
train_run.properties['score'],
'primary_metric':
train_run.properties['primary_metric']})
run = test_experiment.submit(
config,
tags={
"training_run_id": train_run.id,
"run_algorithm": train_run.properties["run_algorithm"],
"valid_score": train_run.properties["score"],
"primary_metric": train_run.properties["primary_metric"],
},
)
run.log("run_algorithm", run.tags['run_algorithm'])
run.log("run_algorithm", run.tags["run_algorithm"])
return run

View File

@@ -96,7 +96,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},

View File

@@ -68,6 +68,7 @@
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"import logging\n",
"\n",
"from matplotlib import pyplot as plt\n",
@@ -95,7 +96,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
@@ -339,16 +340,8 @@
"metadata": {},
"outputs": [],
"source": [
"best_run, fitted_model = remote_run.get_output()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"best_run_customized, fitted_model_customized = remote_run.get_output()"
"# Retrieve the best Run object\n",
"best_run = remote_run.get_best_child()"
]
},
{
@@ -357,7 +350,7 @@
"source": [
"## Transparency\n",
"\n",
"View updated featurization summary"
"View featurization summary for the best model - to study how different features were transformed. This is stored as a JSON file in the outputs directory for the run."
]
},
{
@@ -366,41 +359,14 @@
"metadata": {},
"outputs": [],
"source": [
"custom_featurizer = fitted_model_customized.named_steps['datatransformer']"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"custom_featurizer.get_featurization_summary()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"is_user_friendly=False allows for more detailed summary for transforms being applied"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"custom_featurizer.get_featurization_summary(is_user_friendly=False)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"custom_featurizer.get_stats_feature_type_summary()"
"# Download the featuurization summary JSON file locally\n",
"best_run.download_file(\"outputs/featurization_summary.json\", \"featurization_summary.json\")\n",
"\n",
"# Render the JSON as a pandas DataFrame\n",
"with open(\"featurization_summary.json\", \"r\") as f:\n",
" records = json.load(f)\n",
"\n",
"pd.DataFrame.from_records(records)"
]
},
{

View File

@@ -92,7 +92,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},

View File

@@ -106,7 +106,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},

View File

@@ -11,4 +11,4 @@ dependencies:
- matplotlib
- azureml-dataset-runtime
- ipywidgets
- raiwidgets~=0.10.0
- raiwidgets~=0.15.0

View File

@@ -10,4 +10,4 @@ dependencies:
- ipython
- matplotlib
- ipywidgets
- raiwidgets~=0.10.0
- raiwidgets~=0.15.0

View File

@@ -324,13 +324,15 @@
"outputs": [],
"source": [
"from azureml.core.conda_dependencies import CondaDependencies \n",
"import sys\n",
"\n",
"# azureml-defaults is required to host the model as a web service.\n",
"azureml_pip_packages = [\n",
" 'azureml-defaults', 'azureml-core', 'azureml-telemetry',\n",
" 'azureml-interpret'\n",
"]\n",
" \n",
"\n",
"python_version = '{0}.{1}'.format(sys.version_info[0], sys.version_info[1])\n",
"\n",
"# Note: this is to pin the scikit-learn and pandas versions to be same as notebook.\n",
"# In production scenario user would choose their dependencies\n",
@@ -354,7 +356,9 @@
"# the submitted job is run in. Note the remote environment(s) needs to be similar to the local\n",
"# environment, otherwise if a model is trained or deployed in a different environment this can\n",
"# cause errors. Please take extra care when specifying your dependencies in a production environment.\n",
"myenv = CondaDependencies.create(pip_packages=['pyyaml', sklearn_dep, pandas_dep] + azureml_pip_packages)\n",
"myenv = CondaDependencies.create(\n",
" python_version=python_version,\n",
" pip_packages=['pyyaml', sklearn_dep, pandas_dep] + azureml_pip_packages)\n",
"\n",
"with open(\"myenv.yml\",\"w\") as f:\n",
" f.write(myenv.serialize_to_string())\n",

View File

@@ -10,4 +10,4 @@ dependencies:
- ipython
- matplotlib
- ipywidgets
- raiwidgets~=0.10.0
- raiwidgets~=0.15.0

View File

@@ -251,6 +251,7 @@
"from azureml.core.runconfig import RunConfiguration\n",
"from azureml.core.conda_dependencies import CondaDependencies\n",
"from azureml.core.runconfig import DEFAULT_CPU_IMAGE\n",
"import sys\n",
"\n",
"# Create a new runconfig object\n",
"run_config = RunConfiguration()\n",
@@ -268,7 +269,7 @@
" 'azureml-defaults', 'azureml-telemetry', 'azureml-interpret'\n",
"]\n",
" \n",
"\n",
"python_version = '{0}.{1}'.format(sys.version_info[0], sys.version_info[1])\n",
"\n",
"# Note: this is to pin the scikit-learn version to be same as notebook.\n",
"# In production scenario user would choose their dependencies\n",
@@ -293,7 +294,10 @@
"# environment, otherwise if a model is trained or deployed in a different environment this can\n",
"# cause errors. Please take extra care when specifying your dependencies in a production environment.\n",
"azureml_pip_packages.extend(['pyyaml', sklearn_dep, pandas_dep])\n",
"run_config.environment.python.conda_dependencies = CondaDependencies.create(pip_packages=azureml_pip_packages)\n",
"run_config.environment.python.conda_dependencies = CondaDependencies.create(\n",
" python_version=python_version,\n",
" pip_packages=azureml_pip_packages)\n",
"\n",
"# Now submit a run on AmlCompute\n",
"from azureml.core.script_run_config import ScriptRunConfig\n",
"\n",
@@ -453,7 +457,7 @@
"# environment, otherwise if a model is trained or deployed in a different environment this can\n",
"# cause errors. Please take extra care when specifying your dependencies in a production environment.\n",
"azureml_pip_packages.extend(['pyyaml', sklearn_dep, pandas_dep])\n",
"myenv = CondaDependencies.create(pip_packages=azureml_pip_packages)\n",
"myenv = CondaDependencies.create(python_version=python_version, pip_packages=azureml_pip_packages)\n",
"\n",
"with open(\"myenv.yml\",\"w\") as f:\n",
" f.write(myenv.serialize_to_string())\n",

View File

@@ -12,4 +12,4 @@ dependencies:
- azureml-dataset-runtime
- azureml-core
- ipywidgets
- raiwidgets~=0.10.0
- raiwidgets~=0.15.0

View File

@@ -63,6 +63,8 @@
"outputs": [],
"source": [
"import os\n",
"import requests\n",
"import tempfile\n",
"import azureml.core\n",
"from azureml.core import Workspace, Experiment, Datastore\n",
"from azureml.widgets import RunDetails\n",
@@ -158,9 +160,14 @@
"metadata": {},
"outputs": [],
"source": [
"# download data file from remote\n",
"response = requests.get(\"https://dprepdata.blob.core.windows.net/demo/Titanic.csv\")\n",
"titanic_file = os.path.join(tempfile.mkdtemp(), \"Titanic.csv\")\n",
"with open(titanic_file, \"w\") as f:\n",
" f.write(response.content.decode(\"utf-8\"))\n",
"# get_default_datastore() gets the default Azure Blob Store associated with your workspace.\n",
"# Here we are reusing the def_blob_store object we obtained earlier\n",
"def_blob_store.upload_files([\"./20news.pkl\"], target_path=\"20newsgroups\", overwrite=True)\n",
"def_blob_store.upload_files([titanic_file], target_path=\"titanic\", overwrite=True)\n",
"print(\"Upload call completed\")"
]
},
@@ -286,7 +293,7 @@
"- [**AzureBatchStep**](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.azurebatch_step.azurebatchstep?view=azure-ml-py): Creates a step for submitting jobs to Azure Batch\n",
"- [**EstimatorStep**](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.estimator_step.estimatorstep?view=azure-ml-py): Adds a step to run Estimator in a Pipeline.\n",
"- [**MpiStep**](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.mpi_step.mpistep?view=azure-ml-py): Adds a step to run a MPI job in a Pipeline.\n",
"- [**AutoMLStep**](https://docs.microsoft.com/en-us/python/api/azureml-train-automl/azureml.train.automl.automlstep?view=azure-ml-py): Creates a AutoML step in a Pipeline.\n",
"- [**AutoMLStep**](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.automlstep?view=azure-ml-py): Creates a AutoML step in a Pipeline.\n",
"\n",
"The following code will create a PythonScriptStep to be executed in the Azure Machine Learning Compute we created above using train.py, one of the files already made available in the `source_directory`.\n",
"\n",

View File

@@ -120,8 +120,10 @@
"metadata": {},
"outputs": [],
"source": [
"# Uploading data to the datastore\n",
"data_path = def_blob_store.upload_files([\"./20news.pkl\"], target_path=\"20newsgroups\", overwrite=True)"
"# Specify a public dataset path\n",
"data_path = \"https://dprepdata.blob.core.windows.net/demo/Titanic.csv\"\n",
"# Or uploading data to the datastore\n",
"# data_path = def_blob_store.upload_files([\"./your_data.pkl\"], target_path=\"your_path\", overwrite=True)"
]
},
{
@@ -400,11 +402,11 @@
"source": [
"try:\n",
" response.raise_for_status()\n",
"except Exception: \n",
"except Exception as ex: \n",
" raise Exception('Received bad response from the endpoint: {}\\n'\n",
" 'Response Code: {}\\n'\n",
" 'Headers: {}\\n'\n",
" 'Content: {}'.format(rest_endpoint, response.status_code, response.headers, response.content))\n",
" 'Content: {}'.format(rest_endpoint1, response.status_code, response.headers, response.content)) from ex\n",
"\n",
"run_id = response.json().get('Id')\n",
"print('Submitted pipeline run: ', run_id)"

View File

@@ -875,7 +875,12 @@
"\n",
"def populate_environ():\n",
" parser = argparse.ArgumentParser(description='Process arguments passed to script')\n",
"\n",
" # The AZUREML_SCRIPT_DIRECTORY_NAME argument will be filled in if the DatabricksStep\n",
" # was run using a local source_directory and python_script_name\n",
" parser.add_argument('--AZUREML_SCRIPT_DIRECTORY_NAME')\n",
"\n",
" # Remaining arguments are filled in for all databricks jobs and can be used to build the run context\n",
" parser.add_argument('--AZUREML_RUN_TOKEN')\n",
" parser.add_argument('--AZUREML_RUN_TOKEN_EXPIRY')\n",
" parser.add_argument('--AZUREML_RUN_ID')\n",
@@ -884,9 +889,10 @@
" parser.add_argument('--AZUREML_ARM_WORKSPACE_NAME')\n",
" parser.add_argument('--AZUREML_ARM_PROJECT_NAME')\n",
" parser.add_argument('--AZUREML_SERVICE_ENDPOINT')\n",
" parser.add_argument('--AZUREML_WORKSPACE_ID')\n",
" parser.add_argument('--AZUREML_EXPERIMENT_ID')\n",
"\n",
" args = parser.parse_args()\n",
" os.environ['AZUREML_SCRIPT_DIRECTORY_NAME'] = args.AZUREML_SCRIPT_DIRECTORY_NAME\n",
" (args, extra_args) = parser.parse_known_args()\n",
" os.environ['AZUREML_RUN_TOKEN'] = args.AZUREML_RUN_TOKEN\n",
" os.environ['AZUREML_RUN_TOKEN_EXPIRY'] = args.AZUREML_RUN_TOKEN_EXPIRY\n",
" os.environ['AZUREML_RUN_ID'] = args.AZUREML_RUN_ID\n",
@@ -895,10 +901,12 @@
" os.environ['AZUREML_ARM_WORKSPACE_NAME'] = args.AZUREML_ARM_WORKSPACE_NAME\n",
" os.environ['AZUREML_ARM_PROJECT_NAME'] = args.AZUREML_ARM_PROJECT_NAME\n",
" os.environ['AZUREML_SERVICE_ENDPOINT'] = args.AZUREML_SERVICE_ENDPOINT\n",
" os.environ['AZUREML_WORKSPACE_ID'] = args.AZUREML_WORKSPACE_ID\n",
" os.environ['AZUREML_EXPERIMENT_ID'] = args.AZUREML_EXPERIMENT_ID\n",
"\n",
"populate_environ()\n",
"run = Run.get_context(allow_offline=False)\n",
"print(run._run_dto[\"parent_run_id\"])\n",
"print(run.parent.id)\n",
"```"
]
},
@@ -947,7 +955,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.2"
"version": "3.7.9"
},
"order_index": 5,
"star_tag": [

View File

@@ -213,7 +213,7 @@
"blob_input_data = DataReference(\n",
" datastore=def_blob_store,\n",
" data_reference_name=\"test_data\",\n",
" path_on_datastore=\"20newsgroups/20news.pkl\")\n",
" path_on_datastore=\"titanic/Titanic.csv\")\n",
"print(\"DataReference object created\")"
]
},
@@ -382,7 +382,7 @@
"from azureml.pipeline.core import PipelineParameter\n",
"from azureml.data.datapath import DataPath, DataPathComputeBinding\n",
"\n",
"datapath = DataPath(datastore=def_blob_store, path_on_datastore='20newsgroups/20news.pkl')\n",
"datapath = DataPath(datastore=def_blob_store, path_on_datastore='titanic/Titanic.csv')\n",
"datapath_param = PipelineParameter(name=\"compare_data\", default_value=datapath)\n",
"data_parameter1 = (datapath_param, DataPathComputeBinding(mode='mount'))"
]

View File

@@ -42,9 +42,7 @@
"Advantages of running your notebook as a step in pipeline:\n",
"1. Run your notebook like a python script without converting into .py files, leveraging complete end to end experience of Azure Machine Learning Pipelines.\n",
"2. Use pipeline intermediate data to and from the notebook along with other steps in pipeline.\n",
"3. Parameterize your notebook with [Pipeline Parameters](./aml-pipelines-publish-and-run-using-rest-endpoint.ipynb).\n",
"\n",
"Try some more [quick start notebooks](https://github.com/microsoft/recommenders/tree/master/notebooks/00_quick_start) with `NotebookRunnerStep`."
"3. Parameterize your notebook with [Pipeline Parameters](./aml-pipelines-publish-and-run-using-rest-endpoint.ipynb).\n"
]
},
{
@@ -61,6 +59,8 @@
"outputs": [],
"source": [
"import os\n",
"import requests\n",
"import tempfile\n",
"\n",
"import azureml.core\n",
"\n",
@@ -114,7 +114,12 @@
"metadata": {},
"outputs": [],
"source": [
"Datastore.get(ws, \"workspaceblobstore\").upload_files([\"./20news.pkl\"], target_path=\"20newsgroups\", overwrite=True)\n",
"# download data file from remote\n",
"response = requests.get(\"https://dprepdata.blob.core.windows.net/demo/Titanic.csv\")\n",
"titanic_file = os.path.join(tempfile.mkdtemp(), \"Titanic.csv\")\n",
"with open(titanic_file, \"w\") as f:\n",
" f.write(response.content.decode(\"utf-8\"))\n",
"Datastore.get(ws, \"workspaceblobstore\").upload_files([titanic_file], target_path=\"titanic\", overwrite=True)\n",
"print(\"Upload call completed\")"
]
},
@@ -227,7 +232,7 @@
"input_data = DataReference(\n",
" datastore=Datastore.get(ws, \"workspaceblobstore\"),\n",
" data_reference_name=\"blob_test_data\",\n",
" path_on_datastore=\"20newsgroups/20news.pkl\")\n",
" path_on_datastore=\"titanic/Titanic.csv\")\n",
"\n",
"output_data = PipelineData(name=\"processed_data\",\n",
" datastore=Datastore.get(ws, \"workspaceblobstore\"))"

View File

@@ -20,7 +20,7 @@ if not (args.output_extract is None):
os.makedirs(args.output_extract, exist_ok=True)
print("%s created" % args.output_extract)
with open(os.path.join(args.input_extract, '20news.pkl'), 'rb') as f:
with open(os.path.join(args.input_extract, 'Titanic.csv'), 'rb') as f:
content = f.read()
with open(os.path.join(args.output_extract, '20news.pkl'), 'wb') as fw:
with open(os.path.join(args.output_extract, 'Titanic.csv'), 'wb') as fw:
fw.write(content)

View File

@@ -21,7 +21,7 @@ if not (args.output_train is None):
os.makedirs(args.output_train, exist_ok=True)
print("%s created" % args.output_train)
with open(os.path.join(args.input_data, '20news.pkl'), 'rb') as f:
with open(os.path.join(args.input_data), 'rb') as f:
content = f.read()
with open(os.path.join(args.output_train, '20news.pkl'), 'wb') as fw:
with open(os.path.join(args.output_train, 'Titanic.csv'), 'wb') as fw:
fw.write(content)

View File

@@ -7,7 +7,7 @@ from azureml.core import Run
def get_dict(dict_str):
pairs = dict_str.strip("{}").split("\;")
pairs = dict_str.strip("{}").split(r'\;')
new_dict = {}
for pair in pairs:
key, value = pair.strip().split(":")
@@ -31,14 +31,14 @@ parser.add_argument("--columns", type=str, help="rename column pattern")
args = parser.parse_args()
print("Argument 1(columns to keep): %s" % str(args.useful_columns.strip("[]").split("\;")))
print("Argument 2(columns renaming mapping): %s" % str(args.columns.strip("{}").split("\;")))
print("Argument 1(columns to keep): %s" % str(args.useful_columns.strip("[]").split(r'\;')))
print("Argument 2(columns renaming mapping): %s" % str(args.columns.strip("{}").split(r'\;')))
print("Argument 3(output cleansed taxi data path): %s" % args.output_cleanse)
# These functions ensure that null data is removed from the dataset,
# which will help increase machine learning model accuracy.
useful_columns = [s.strip().strip("'") for s in args.useful_columns.strip("[]").split("\;")]
useful_columns = [s.strip().strip("'") for s in args.useful_columns.strip("[]").split(r'\;')]
columns = get_dict(args.columns)
new_df = (raw_data.to_pandas_dataframe()

View File

@@ -29,14 +29,14 @@ print("Argument (output filtered taxi data path): %s" % args.output_filter)
combined_df = combined_df.astype({"pickup_longitude": 'float64', "pickup_latitude": 'float64',
"dropoff_longitude": 'float64', "dropoff_latitude": 'float64'})
latlong_filtered_df = combined_df[(combined_df.pickup_longitude <= -73.72) &
(combined_df.pickup_longitude >= -74.09) &
(combined_df.pickup_latitude <= 40.88) &
(combined_df.pickup_latitude >= 40.53) &
(combined_df.dropoff_longitude <= -73.72) &
(combined_df.dropoff_longitude >= -74.72) &
(combined_df.dropoff_latitude <= 40.88) &
(combined_df.dropoff_latitude >= 40.53)]
latlong_filtered_df = combined_df[(combined_df.pickup_longitude <= -73.72)
& (combined_df.pickup_longitude >= -74.09)
& (combined_df.pickup_latitude <= 40.88)
& (combined_df.pickup_latitude >= 40.53)
& (combined_df.dropoff_longitude <= -73.72)
& (combined_df.dropoff_longitude >= -74.72)
& (combined_df.dropoff_latitude <= 40.88)
& (combined_df.dropoff_latitude >= 40.53)]
latlong_filtered_df.reset_index(inplace=True, drop=True)

View File

@@ -1,6 +1,6 @@
import argparse
import os
import azureml.core
# import azureml.core
from azureml.core import Run
from sklearn.model_selection import train_test_split
@@ -32,7 +32,7 @@ output_split_train, output_split_test = train_test_split(transformed_df, test_si
output_split_train.reset_index(inplace=True, drop=True)
output_split_test.reset_index(inplace=True, drop=True)
if not (args.output_split_train is None and
args.output_split_test is None):
if not (args.output_split_train
is None and args.output_split_test is None):
write_output(output_split_train, args.output_split_train)
write_output(output_split_test, args.output_split_test)

View File

@@ -0,0 +1,49 @@
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license.
import os
def init():
print("Init")
# For partition per folder/column jobs, ParallelRunStep pass an optional positional parameter `mini_batch_context`
# to the `run` function in user's entry script, which contains information of the mini_batch.
def run(mini_batch, mini_batch_context):
print(f"run method start: {__file__}, run({mini_batch}, {mini_batch_context})")
# `partition_key_value` is a dict that corresponds to the mini_batch, the keys of the dict are those specified
# in `partition_keys` in ParallelRunConfig.
print(f"partition_key_value = {mini_batch_context.partition_key_value}")
# `dataset` is the dataset object that corresponds to the mini_batch, which is a subset of the input dataset
# filtered by condition specified in `partition_key_value`.
print(f"dataset = {mini_batch_context.dataset}")
print(f"file_count_of_mini_batch = {len(mini_batch)}")
file_name_list = []
file_size_list = []
total_file_size_of_mini_batch = 0
for file_path in mini_batch:
file_name_list.append(os.path.basename(file_path))
file_size = os.path.getsize(file_path)
file_size_list.append(file_size)
total_file_size_of_mini_batch += file_size
print(f"total_file_size_of_mini_batch = {total_file_size_of_mini_batch}")
file_size_ratio_list = [file_size * 1.0 / total_file_size_of_mini_batch for file_size in file_size_list]
# If `output_action` is set to `append_row` in ParallelRunConfig for FileDataset input(as is in this sample
# notebook), the return value of `run` method is expected to be a list/tuple of same length with the
# input parameter `mini_batch`, and each element in the list/tuple would form a row in the result file by
# calling the Python builtin `str` function.
# If you want to specify the output format, please format and return str value as in this example.
return [
",".join([str(x) for x in fields])
for fields in zip(
file_name_list,
file_size_list,
file_size_ratio_list,
[mini_batch_context.partition_key_value["user"]] * len(mini_batch),
[mini_batch_context.partition_key_value["genres"]] * len(mini_batch),
[total_file_size_of_mini_batch] * len(mini_batch),
)
]

View File

@@ -0,0 +1,17 @@
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license.
import os
def init():
print("Init")
def run(mini_batch):
print(f'run method start: {__file__}, run({mini_batch})')
total_income = mini_batch["INCOME"].sum()
print(f'total_income = {total_income}')
mini_batch["total_income"] = total_income
return mini_batch

View File

@@ -32,6 +32,7 @@ To run a Batch Inference job, you will need to gather some configuration data.
- **node_count**: number of compute nodes to use.
- **process_count_per_node**: number of processes per node (optional, default value is 1).
- **mini_batch_size**: the approximate amount of input data passed to each run() invocation. For FileDataset input, this is number of files user script can process in one run() call. For TabularDataset input it is approximate size of data user script can process in one run() call. E.g. 1024, 1024KB, 10MB, 1GB (optional, default value 10 files for FileDataset and 1MB for TabularDataset.)
- **partition_keys**: the keys used to partition the input data into mini-batches passed to each run() invocation. This parameter is mutually exclusive with `mini_batch_size`, and it requires the input datasets to have `partition_keys` attribute, the value of which is a superset of the value of this parameter. Each run() call would process a part of data that has identical value on the `partition_keys` specified. You can follow the examples in [file-dataset-partition-per-folder.ipynb](./file-dataset-partition-per-folder.ipynb) and [tabular-dataset-partition-per-column.ipynb](./tabular-dataset-partition-per-column.ipynb) to see how to create such datasets.
- **logging_level**: log verbosity. Values in increasing verbosity are: 'WARNING', 'INFO', 'DEBUG' (optional, default value is 'INFO').
- **run_invocation_timeout**: run method invocation timeout period in seconds (optional, default value is 60).
- **environment**: The environment definition. This field configures the Python environment. It can be configured to use an existing Python environment or to set up a temp environment for the experiment. The definition is also responsible for setting the required application dependencies.
@@ -121,6 +122,8 @@ pipeline_run.wait_for_completion(show_output=True)
- [file-dataset-image-inference-mnist.ipynb](./file-dataset-image-inference-mnist.ipynb) demonstrates how to run batch inference on an MNIST dataset using FileDataset.
- [tabular-dataset-inference-iris.ipynb](./tabular-dataset-inference-iris.ipynb) demonstrates how to run batch inference on an IRIS dataset using TabularDataset.
- [pipeline-style-transfer.ipynb](../pipeline-style-transfer/pipeline-style-transfer-parallel-run.ipynb) demonstrates using ParallelRunStep in multi-step pipeline and using output from one step as input to ParallelRunStep.
- [file-dataset-partition-per-folder.ipynb](./file-dataset-partition-per-folder.ipynb) demonstrates how to run batch inference on file data by treating files inside each leaf folder as a mini-batch.
- [tabular-dataset-partition-per-column.ipynb](./tabular-dataset-partition-per-column.ipynb) demonstrates how to run batch inference on tabular data by treating rows with identical value on specified columns as a mini-batch.
# Troubleshooting guide

View File

@@ -0,0 +1,404 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved. \n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/machine-learning-pipelines/parallel-run/file-dataset-partition-per-folder.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Using Azure Machine Learning Pipelines for Batch Inference for files input partitioned by folder structure\n",
"\n",
"In this notebook, we will demonstrate how to make predictions on large quantities of data asynchronously using the ML pipelines with Azure Machine Learning. Batch inference (or batch scoring) provides cost-effective inference, with unparalleled throughput for asynchronous applications. Batch prediction pipelines can scale to perform inference on terabytes of production data. Batch prediction is optimized for high throughput, fire-and-forget predictions for a large collection of data.\n",
"\n",
"> **Tip**\n",
"If your system requires low-latency processing (to process a single document or small set of documents quickly), use [real-time scoring](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-consume-web-service) instead of batch prediction.\n",
"\n",
"This example will create a sample dataset with nested folder structure, where the folder name corresponds to the attribute of the files inside it. The Batch Inference job would split the files inside the dataset according to their attributes, so that all files with identical value on the specified attribute will form up a single mini-batch to be processed.\n",
"\n",
"The outline of this notebook is as follows:\n",
"\n",
"- Create a dataset with nested folder structure and `partition_format` to interpret the folder structure into the attributes of files inside.\n",
"- Do batch inference on each mini-batch defined by the folder structure.\n",
"\n",
"## Prerequisites\n",
"If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure you go through the configuration Notebook located at https://github.com/Azure/MachineLearningNotebooks first. This sets you up with a working config file that has information on your workspace, subscription id, etc. "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Connect to workspace"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.workspace import Workspace\n",
"ws = Workspace.from_config()\n",
"print('Workspace name: ' + ws.name, \n",
" 'Azure region: ' + ws.location, \n",
" 'Subscription id: ' + ws.subscription_id, \n",
" 'Resource group: ' + ws.resource_group, sep = '\\n')\n",
"\n",
"datastore = ws.get_default_datastore()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import azureml.core\n",
"print(azureml.core.VERSION)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Upload local test data to datastore\n",
"The destination folder in the datastore is structured so that the name of each folder layer corresponds to a property of all the files inside the foler."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Dataset\n",
"\n",
"datastore.upload('test_files/disco', 'dataset_partition_test/user1/winter', overwrite=True, show_progress=False)\n",
"datastore.upload('test_files/orchestra', 'dataset_partition_test/user1/fall', overwrite=True, show_progress=False)\n",
"datastore.upload('test_files/piano', 'dataset_partition_test/user2/summer', overwrite=True, show_progress=False)\n",
"datastore.upload('test_files/spirituality', 'dataset_partition_test/user3/fall', overwrite=True, show_progress=False)\n",
"datastore.upload('test_files/piano', 'dataset_partition_test/user4/spring', overwrite=True, show_progress=False)\n",
"datastore.upload('test_files/piano', 'dataset_partition_test/user4/fall', overwrite=True, show_progress=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create partitioned file dataset\n",
"Create a file dataset partitioned by 'user', 'season', and 'genres', each corresponds to a folder layer specified in `partition_format`. You can get a partition of data by specifying the value of one or more partition keys. E.g., by specifying `user=user1 and genres=piano`, you can get all the file that matches `dataset_partition_test/user1/*/piano.wav`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"partitioned_file_dataset = Dataset.File.from_files(path=(datastore, 'dataset_partition_test/*/*/*.wav'),\n",
" partition_format=\"dataset_partition_test/{user}/{season}/{genres}.wav\",\n",
" validate=False)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"partitioned_file_dataset.partition_keys"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create or Attach existing compute resource"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"from azureml.core.compute import AmlCompute, ComputeTarget\n",
"\n",
"# choose a name for your cluster\n",
"compute_name = os.environ.get(\"AML_COMPUTE_CLUSTER_NAME\", \"cpu-cluster\")\n",
"compute_min_nodes = os.environ.get(\"AML_COMPUTE_CLUSTER_MIN_NODES\", 0)\n",
"compute_max_nodes = os.environ.get(\"AML_COMPUTE_CLUSTER_MAX_NODES\", 2)\n",
"\n",
"# This example uses CPU VM. For using GPU VM, set SKU to STANDARD_NC6\n",
"vm_size = os.environ.get(\"AML_COMPUTE_CLUSTER_SKU\", \"STANDARD_D2_V2\")\n",
"\n",
"\n",
"if compute_name in ws.compute_targets:\n",
" compute_target = ws.compute_targets[compute_name]\n",
" if compute_target and type(compute_target) is AmlCompute:\n",
" print('found compute target. just use it. ' + compute_name)\n",
"else:\n",
" print('creating a new compute target...')\n",
" provisioning_config = AmlCompute.provisioning_configuration(vm_size = vm_size,\n",
" min_nodes = compute_min_nodes, \n",
" max_nodes = compute_max_nodes)\n",
"\n",
" # create the cluster\n",
" compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)\n",
" \n",
" # can poll for a minimum number of nodes and for a specific timeout. \n",
" # if no min node count is provided it will use the scale settings for the cluster\n",
" compute_target.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)\n",
" \n",
" # For a more detailed view of current AmlCompute status, use get_status()\n",
" print(compute_target.get_status().serialize())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Intermediate/Output Data"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.core import Pipeline, PipelineData\n",
"\n",
"output_dir = PipelineData(name=\"file_dataset_inferences\", datastore=datastore)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Calculate total file size of each mini-batch partitioned by dataset partition key(s)\n",
"The script is to sum up the total size of files in each mini-batch."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"scripts_folder = \"Code\"\n",
"script_file = \"total_file_size.py\"\n",
"\n",
"# peek at contents\n",
"with open(os.path.join(scripts_folder, script_file)) as inference_file:\n",
" print(inference_file.read())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Build and run the batch inference pipeline\n",
"### Specify the environment to run the script\n",
"You would need to specify the required private azureml packages in dependencies. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Environment\n",
"from azureml.core.runconfig import CondaDependencies, DEFAULT_CPU_IMAGE\n",
"\n",
"batch_conda_deps = CondaDependencies.create(pip_packages=[\"azureml-core\", \"azureml-dataset-runtime[fuse]\"])\n",
"batch_env = Environment(name=\"batch_environment\")\n",
"batch_env.python.conda_dependencies = batch_conda_deps\n",
"batch_env.docker.base_image = DEFAULT_CPU_IMAGE"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create the configuration to wrap the inference script\n",
"The parameter `partition_keys` is a list containing a subset of the dataset partition keys, specifying how is the input dataset partitioned. Each and every possible combination of values of partition_keys will form up a mini-batch. E.g., by specifying `partition_keys=['user', 'genres']` will result in 5 mini-batches, i.e. `user=halit && genres=disco`, `user=halit && genres=orchestra`, `user=chunyu && genres=piano`, `user=kin && genres=spirituality` and `user=ramandeep && genres=piano`"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.steps import ParallelRunStep, ParallelRunConfig\n",
"\n",
"# In a real-world scenario, you'll want to shape your process per node and nodes to fit your problem domain.\n",
"parallel_run_config = ParallelRunConfig(\n",
" source_directory=scripts_folder,\n",
" entry_script=script_file, # the user script to run against each input\n",
" partition_keys=['user', 'genres'],\n",
" error_threshold=5,\n",
" output_action='append_row',\n",
" append_row_file_name=\"file_size_outputs.txt\",\n",
" environment=batch_env,\n",
" compute_target=compute_target, \n",
" node_count=2,\n",
" run_invocation_timeout=600\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create the pipeline step"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"parallel_run_step = ParallelRunStep(\n",
" name='summarize-file-size',\n",
" inputs=[partitioned_file_dataset.as_named_input(\"partitioned_file_input\")],\n",
" output=output_dir,\n",
" parallel_run_config=parallel_run_config,\n",
" allow_reuse=False\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Run the pipeline"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Experiment\n",
"from azureml.pipeline.core import Pipeline\n",
"\n",
"pipeline = Pipeline(workspace=ws, steps=[parallel_run_step])\n",
"\n",
"pipeline_run = Experiment(ws, 'file-dataset-partition').submit(pipeline)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"pipeline_run.wait_for_completion(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## View the prediction results\n",
"In the total_file_size.py file above you can see that the ResultList with the filename and the prediction result gets returned. These are written to the DataStore specified in the PipelineData object as the output data, which in this case is called inferences. This containers the outputs from all of the worker nodes used in the compute cluster. You can download this data to view the results ... below just filters to the first 10 rows"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"import tempfile\n",
"\n",
"batch_run = pipeline_run.find_step_run(parallel_run_step.name)[0]\n",
"batch_output = batch_run.get_output_data(output_dir.name)\n",
"\n",
"target_dir = tempfile.mkdtemp()\n",
"batch_output.download(local_path=target_dir)\n",
"result_file = os.path.join(target_dir, batch_output.path_on_datastore, parallel_run_config.append_row_file_name)\n",
"\n",
"df = pd.read_csv(result_file, delimiter=\",\", header=None)\n",
"df.columns = [\"File Name\", \"File Size\", \"Ratio of Size in Partition\", \"user\", \"genres\", \"Total File Size of Partition\"]\n",
"print(\"Prediction has\", df.shape[0], \"rows\")\n",
"df.head(10)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"authors": [
{
"name": "pansav"
},
{
"name": "tracych"
},
{
"name": "migu"
}
],
"category": "Other notebooks",
"compute": [
"AML Compute"
],
"datasets": [
"None"
],
"deployment": [
"None"
],
"exclude_from_index": false,
"framework": [
"None"
],
"friendly_name": "Batch inferencing file data partitioned by folder using ParallelRunStep",
"index_order": 1,
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.9"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@@ -0,0 +1,7 @@
name: file-dataset-partition-per-folder
dependencies:
- pip:
- azureml-sdk
- azureml-pipeline-steps
- azureml-widgets
- pandas

View File

@@ -0,0 +1,427 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved. \n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/machine-learning-pipelines/parallel-run/tabular-dataset-partition-per-column.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Using Azure Machine Learning Pipelines for Batch Inference for tabular input partitioned by column value\n",
"\n",
"In this notebook, we will demonstrate how to make predictions on large quantities of data asynchronously using the ML pipelines with Azure Machine Learning. Batch inference (or batch scoring) provides cost-effective inference, with unparalleled throughput for asynchronous applications. Batch prediction pipelines can scale to perform inference on terabytes of production data. Batch prediction is optimized for high throughput, fire-and-forget predictions for a large collection of data.\n",
"\n",
"> **Tip**\n",
"If your system requires low-latency processing (to process a single document or small set of documents quickly), use [real-time scoring](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-consume-web-service) instead of batch prediction.\n",
"\n",
"This example will create a partitioned tabular dataset by splitting the rows in a large csv file by its value on specified column. Each partition will form up a mini-batch in the parallel processing procedure.\n",
"\n",
"The outline of this notebook is as follows:\n",
"\n",
"- Create a tabular dataset partitioned by value on specified column.\n",
"- Do batch inference on the dataset with each mini-batch corresponds to one partition.\n",
"\n",
"## Prerequisites\n",
"If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure you go through the configuration Notebook located at https://github.com/Azure/MachineLearningNotebooks first. This sets you up with a working config file that has information on your workspace, subscription id, etc. "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Connect to workspace"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.workspace import Workspace\n",
"ws = Workspace.from_config()\n",
"print('Workspace name: ' + ws.name, \n",
" 'Azure region: ' + ws.location, \n",
" 'Subscription id: ' + ws.subscription_id, \n",
" 'Resource group: ' + ws.resource_group, sep = '\\n')\n",
"\n",
"datastore = ws.get_default_datastore()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import azureml.core\n",
"print(azureml.core.VERSION)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Download OJ sales data from opendataset url"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import requests\n",
"\n",
"oj_sales_path = \"./oj.csv\"\n",
"r = requests.get(\"http://www.cs.unitn.it/~taufer/Data/oj.csv\")\n",
"open(oj_sales_path, \"wb\").write(r.content)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Upload OJ sales data to datastore"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"datastore.upload_files([oj_sales_path], \".\", \"oj_sales_data\", overwrite=True, show_progress=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create tabular dataset\n",
"Create normal tabular dataset"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Dataset\n",
"\n",
"dataset = Dataset.Tabular.from_delimited_files(path=(datastore, 'oj_sales_data/*.csv'))\n",
"print(dataset.to_pandas_dataframe())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Partition the tabular dataset\n",
"Partition the dataset by column 'store' and 'brand'. You can get a partition of data by specifying the value of one or more partition keys. E.g., by specifying `store=1000 and brand='tropicana'`, you can get all the rows that matches this condition in the dataset."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"partitioned_dataset = dataset.partition_by(partition_keys=['store', 'brand'], target=(datastore, \"partition_by_key_res\"), name=\"partitioned_oj_data\")\n",
"partitioned_dataset.partition_keys"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create or Attach existing compute resource"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"from azureml.core.compute import AmlCompute, ComputeTarget\n",
"\n",
"# choose a name for your cluster\n",
"compute_name = os.environ.get(\"AML_COMPUTE_CLUSTER_NAME\", \"cpu-cluster\")\n",
"compute_min_nodes = os.environ.get(\"AML_COMPUTE_CLUSTER_MIN_NODES\", 0)\n",
"compute_max_nodes = os.environ.get(\"AML_COMPUTE_CLUSTER_MAX_NODES\", 2)\n",
"\n",
"# This example uses CPU VM. For using GPU VM, set SKU to STANDARD_NC6\n",
"vm_size = os.environ.get(\"AML_COMPUTE_CLUSTER_SKU\", \"STANDARD_D2_V2\")\n",
"\n",
"\n",
"if compute_name in ws.compute_targets:\n",
" compute_target = ws.compute_targets[compute_name]\n",
" if compute_target and type(compute_target) is AmlCompute:\n",
" print('found compute target. just use it. ' + compute_name)\n",
"else:\n",
" print('creating a new compute target...')\n",
" provisioning_config = AmlCompute.provisioning_configuration(vm_size = vm_size,\n",
" min_nodes = compute_min_nodes, \n",
" max_nodes = compute_max_nodes)\n",
"\n",
" # create the cluster\n",
" compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)\n",
" \n",
" # can poll for a minimum number of nodes and for a specific timeout. \n",
" # if no min node count is provided it will use the scale settings for the cluster\n",
" compute_target.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)\n",
" \n",
" # For a more detailed view of current AmlCompute status, use get_status()\n",
" print(compute_target.get_status().serialize())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Intermediate/Output Data"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.core import Pipeline, PipelineData\n",
"\n",
"output_dir = PipelineData(name=\"inferences\", datastore=datastore)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Calculate total revenue of each mini-batch partitioned by dataset partition key(s)\n",
"The script sum up the total revenue of a mini-batch."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"scripts_folder = \"Code\"\n",
"script_file = \"total_income.py\"\n",
"\n",
"# peek at contents\n",
"with open(os.path.join(scripts_folder, script_file)) as inference_file:\n",
" print(inference_file.read())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Build and run the batch inference pipeline\n",
"### Specify the environment to run the script\n",
"You would need to specify the required private azureml packages in dependencies. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Environment\n",
"from azureml.core.runconfig import CondaDependencies, DEFAULT_CPU_IMAGE\n",
"\n",
"batch_conda_deps = CondaDependencies.create(pip_packages=[\"azureml-core\", \"azureml-dataset-runtime[fuse,pandas]\"])\n",
"batch_env = Environment(name=\"batch_environment\")\n",
"batch_env.python.conda_dependencies = batch_conda_deps\n",
"batch_env.docker.base_image = DEFAULT_CPU_IMAGE"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create the configuration to wrap the inference script\n",
"The parameter `partition_keys` is a list containing a subset of the dataset partition keys, specifying how is the input dataset partitioned. Each and every possible combination of values of partition_keys will form up a mini-batch. E.g., by specifying `partition_keys=['store', 'brand']` will result in mini-batches like `store=1000 && brand=tropicana`, `store=1000 && brand=dominicks`, `store=1001 && brand=dominicks`, ..."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.steps import ParallelRunStep, ParallelRunConfig\n",
"\n",
"# In a real-world scenario, you'll want to shape your process per node and nodes to fit your problem domain.\n",
"parallel_run_config = ParallelRunConfig(\n",
" source_directory=scripts_folder,\n",
" entry_script=script_file, # the user script to run against each input\n",
" partition_keys=['store', 'brand'],\n",
" error_threshold=5,\n",
" output_action='append_row',\n",
" append_row_file_name=\"revenue_outputs.txt\",\n",
" environment=batch_env,\n",
" compute_target=compute_target, \n",
" node_count=2,\n",
" run_invocation_timeout=600\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create the pipeline step"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"parallel_run_step = ParallelRunStep(\n",
" name='summarize-revenue',\n",
" inputs=[partitioned_dataset.as_named_input(\"partitioned_tabular_input\")],\n",
" output=output_dir,\n",
" parallel_run_config=parallel_run_config,\n",
" allow_reuse=False\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Run the pipeline"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Experiment\n",
"from azureml.pipeline.core import Pipeline\n",
"\n",
"pipeline = Pipeline(workspace=ws, steps=[parallel_run_step])\n",
"\n",
"pipeline_run = Experiment(ws, 'tabular-dataset-partition').submit(pipeline)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"pipeline_run.wait_for_completion(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## View the prediction results\n",
"In the total_income.py file above you can see that the ResultList with the filename and the prediction result gets returned. These are written to the DataStore specified in the PipelineData object as the output data, which in this case is called inferences. This containers the outputs from all of the worker nodes used in the compute cluster. You can download this data to view the results ... below just filters to the first 10 rows"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"import tempfile\n",
"\n",
"batch_run = pipeline_run.find_step_run(parallel_run_step.name)[0]\n",
"batch_output = batch_run.get_output_data(output_dir.name)\n",
"\n",
"target_dir = tempfile.mkdtemp()\n",
"batch_output.download(local_path=target_dir)\n",
"result_file = os.path.join(target_dir, batch_output.path_on_datastore, parallel_run_config.append_row_file_name)\n",
"\n",
"df = pd.read_csv(result_file, delimiter=\" \", header=None)\n",
"\n",
"df.columns = [\"week\", \"logmove\", \"feat\", \"price\", \"AGE60\", \"EDUC\", \"ETHNIC\", \"INCOME\", \"HHLARGE\", \"WORKWOM\", \"HVAL150\", \"SSTRDIST\", \"SSTRVOL\", \"CPDIST5\", \"CPWVOL5\", \"store\", \"brand\", \"total_income\"]\n",
"print(\"Prediction has \", df.shape[0], \" rows\")\n",
"df.head(10)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"authors": [
{
"name": "pansav"
},
{
"name": "tracych"
},
{
"name": "migu"
}
],
"category": "Other notebooks",
"compute": [
"AML Compute"
],
"datasets": [
"OJ Sales Data"
],
"deployment": [
"None"
],
"exclude_from_index": false,
"framework": [
"None"
],
"friendly_name": "Batch inferencing OJ Sales Data partitioned by column using ParallelRunStep",
"index_order": 1,
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.9"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@@ -0,0 +1,7 @@
name: tabular-dataset-partition-per-column
dependencies:
- pip:
- azureml-sdk
- azureml-pipeline-steps
- azureml-widgets
- pandas

View File

@@ -95,7 +95,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},

View File

@@ -8,5 +8,5 @@ dependencies:
- matplotlib
- azureml-dataset-runtime
- ipywidgets
- raiwidgets~=0.11.0
- raiwidgets~=0.15.0
- liac-arff

View File

@@ -100,7 +100,7 @@
"\n",
"# Check core SDK version number\n",
"\n",
"print(\"This notebook was created using SDK version 1.35.0, you are currently running version\", azureml.core.VERSION)"
"print(\"This notebook was created using SDK version 1.37.0, you are currently running version\", azureml.core.VERSION)"
]
},
{

View File

@@ -28,6 +28,7 @@ Machine Learning notebook samples and encourage efficient retrieval of topics an
| [Classification of credit card fraudulent transactions using Automated ML](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/experimental/classification-credit-card-fraud-local-managed/auto-ml-classification-credit-card-fraud-local-managed.ipynb) | Classification | Creditcard | AML Compute | None | None | AutomatedML |
| [Automated ML run with featurization and model explainability.](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/regression-explanation-featurization/auto-ml-regression-explanation-featurization.ipynb) | Regression | MachineData | AML | ACI | None | featurization, explainability, remote_run, AutomatedML |
| [Automated ML run with featurization and model explainability.](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/responsible-ai/auto-ml-regression-responsibleai/auto-ml-regression-responsibleai.ipynb) | Regression | MachineData | AML | ACI | None | featurization, explainability, remote_run, AutomatedML |
| [auto-ml-forecasting-backtest-single-model](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-backtest-single-model/auto-ml-forecasting-backtest-single-model.ipynb) | | None | Remote | None | Azure ML AutoML | |
| :star:[Azure Machine Learning Pipeline with DataTranferStep](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-data-transfer.ipynb) | Demonstrates the use of DataTranferStep | Custom | ADF | None | Azure ML | None |
| [Getting Started with Azure Machine Learning Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-getting-started.ipynb) | Getting Started notebook for ANML Pipelines | Custom | AML Compute | None | Azure ML | None |
| [Azure Machine Learning Pipeline with AzureBatchStep](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-how-to-use-azurebatch-to-run-a-windows-executable.ipynb) | Demonstrates the use of AzureBatchStep | Custom | Azure Batch | None | Azure ML | None |
@@ -106,6 +107,7 @@ Machine Learning notebook samples and encourage efficient retrieval of topics an
| [azure-ml-with-nvidia-rapids](https://github.com/Azure/MachineLearningNotebooks/blob/master//contrib/RAPIDS/azure-ml-with-nvidia-rapids.ipynb) | | | | | | |
| [auto-ml-continuous-retraining](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/continuous-retraining/auto-ml-continuous-retraining.ipynb) | | | | | | |
| [auto-ml-regression-model-proxy](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/experimental/regression-model-proxy/auto-ml-regression-model-proxy.ipynb) | | | | | | |
| [auto-ml-forecasting-backtest-many-models](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-backtest-many-models/auto-ml-forecasting-backtest-many-models.ipynb) | | | | | | |
| [auto-ml-forecasting-beer-remote](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-beer-remote/auto-ml-forecasting-beer-remote.ipynb) | | | | | | |
| [auto-ml-forecasting-energy-demand](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb) | | | | | | |
| [auto-ml-forecasting-hierarchical-timeseries](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-hierarchical-timeseries/auto-ml-forecasting-hierarchical-timeseries.ipynb) | | | | | | |

View File

@@ -102,7 +102,7 @@
"source": [
"import azureml.core\n",
"\n",
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
@@ -213,10 +213,7 @@
"* You do not have permission to create a resource group if it's non-existing.\n",
"* You are not a subscription owner or contributor and no Azure ML workspaces have ever been created in this subscription\n",
"\n",
"If workspace creation fails, please work with your IT admin to provide you with the appropriate permissions or to provision the required resources.\n",
"\n",
"**Note**: A Basic workspace is created by default. If you would like to create an Enterprise workspace, please specify sku = 'enterprise'.\n",
"Please visit our [pricing page](https://azure.microsoft.com/en-us/pricing/details/machine-learning/) for more details on our Enterprise edition.\n"
"If workspace creation fails, please work with your IT admin to provide you with the appropriate permissions or to provision the required resources.\n"
]
},
{
@@ -237,7 +234,6 @@
" resource_group = resource_group, \n",
" location = workspace_region,\n",
" create_resource_group = True,\n",
" sku = 'basic',\n",
" exist_ok = True)\n",
"ws.get_details()\n",
"\n",

View File

@@ -145,7 +145,7 @@
"source": [
"from sklearn.linear_model import Ridge\n",
"from sklearn.metrics import mean_squared_error\n",
"from sklearn.externals import joblib\n",
"import joblib\n",
"import math\n",
"\n",
"alphas = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]\n",

View File

@@ -156,7 +156,7 @@
"\n",
"### Create scoring script\n",
"\n",
"Create the scoring script, called score.py, used by the web service call to show how to use the model.\n",
"Create the scoring script, called score_encrypted.py, used by the web service call to show how to use the model.\n",
"\n",
"You must include two required functions into the scoring script:\n",
"* The `init()` function, which typically loads the model into a global object. This function is run only once when the Docker container is started. \n",
@@ -171,7 +171,7 @@
"metadata": {},
"outputs": [],
"source": [
"%%writefile score.py\n",
"%%writefile score_encrypted.py\n",
"import json\n",
"import os\n",
"import pickle\n",
@@ -252,7 +252,7 @@
"\n",
"1. Create environment object containing dependencies needed by the model using the environment file (`myenv.yml`)\n",
"1. Create inference configuration necessary to deploy the model as a web service using:\n",
" * The scoring file (`score.py`)\n",
" * The scoring file (`score_encrypted.py`)\n",
" * envrionment object created in previous step\n",
"1. Deploy the model to the ACI container.\n",
"1. Get the web service HTTP endpoint."
@@ -283,7 +283,7 @@
"model = Model(ws, 'sklearn_mnist')\n",
"\n",
"myenv = Environment.get(workspace=ws, name=\"tutorial-encryption-env\")\n",
"inference_config = InferenceConfig(entry_script=\"score.py\", environment=myenv)\n",
"inference_config = InferenceConfig(entry_script=\"score_encrypted.py\", environment=myenv)\n",
"\n",
"service_name = 'sklearn-mnist-svc-' + str(uuid.uuid4())[:4]\n",
"service = Model.deploy(workspace=ws, \n",