mirror of
https://github.com/Azure/MachineLearningNotebooks.git
synced 2025-12-19 17:17:04 -05:00
Merge pull request #1629 from Azure/release_update/Release-116
Update samples from Release as a part of SDK release 1.36.0
This commit is contained in:
@@ -103,7 +103,7 @@
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"\n",
|
||||
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.36.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -6,4 +6,4 @@ dependencies:
|
||||
- fairlearn>=0.6.2
|
||||
- joblib
|
||||
- liac-arff
|
||||
- raiwidgets~=0.11.0
|
||||
- raiwidgets~=0.13.0
|
||||
|
||||
@@ -6,4 +6,4 @@ dependencies:
|
||||
- fairlearn>=0.6.2
|
||||
- joblib
|
||||
- liac-arff
|
||||
- raiwidgets~=0.11.0
|
||||
- raiwidgets~=0.13.0
|
||||
|
||||
@@ -22,9 +22,9 @@ dependencies:
|
||||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azureml-widgets~=1.35.0
|
||||
- azureml-widgets~=1.36.0
|
||||
- pytorch-transformers==1.0.0
|
||||
- spacy==2.1.8
|
||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||
- -r https://automlresources-prod.azureedge.net/validated-requirements/1.35.0/validated_win32_requirements.txt [--no-deps]
|
||||
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.36.0/validated_win32_requirements.txt [--no-deps]
|
||||
- arch==4.14
|
||||
|
||||
@@ -22,9 +22,9 @@ dependencies:
|
||||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azureml-widgets~=1.35.0
|
||||
- azureml-widgets~=1.36.0
|
||||
- pytorch-transformers==1.0.0
|
||||
- spacy==2.1.8
|
||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||
- -r https://automlresources-prod.azureedge.net/validated-requirements/1.35.0/validated_linux_requirements.txt [--no-deps]
|
||||
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.36.0/validated_linux_requirements.txt [--no-deps]
|
||||
- arch==4.14
|
||||
|
||||
@@ -23,9 +23,9 @@ dependencies:
|
||||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azureml-widgets~=1.35.0
|
||||
- azureml-widgets~=1.36.0
|
||||
- pytorch-transformers==1.0.0
|
||||
- spacy==2.1.8
|
||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||
- -r https://automlresources-prod.azureedge.net/validated-requirements/1.35.0/validated_darwin_requirements.txt [--no-deps]
|
||||
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.36.0/validated_darwin_requirements.txt [--no-deps]
|
||||
- arch==4.14
|
||||
|
||||
@@ -104,7 +104,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.36.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -93,7 +93,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.36.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -96,7 +96,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.36.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -81,7 +81,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.36.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -92,7 +92,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.36.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -91,7 +91,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.36.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -113,7 +113,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.36.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -139,18 +139,18 @@
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# choose a name for the run history container in the workspace\n",
|
||||
"experiment_name = 'beer-remote-cpu'\n",
|
||||
"experiment_name = \"beer-remote-cpu\"\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace'] = ws.name\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Run History Name'] = experiment_name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||
"output[\"Workspace\"] = ws.name\n",
|
||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||
"output[\"Location\"] = ws.location\n",
|
||||
"output[\"Run History Name\"] = experiment_name\n",
|
||||
"pd.set_option(\"display.max_colwidth\", -1)\n",
|
||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
@@ -185,10 +185,11 @@
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
||||
" print('Found existing cluster, use it.')\n",
|
||||
" print(\"Found existing cluster, use it.\")\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
|
||||
" max_nodes=4)\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||
" vm_size=\"STANDARD_DS12_V2\", max_nodes=4\n",
|
||||
" )\n",
|
||||
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
"compute_target.wait_for_completion(show_output=True)"
|
||||
@@ -245,17 +246,21 @@
|
||||
"plt.tight_layout()\n",
|
||||
"\n",
|
||||
"plt.subplot(2, 1, 1)\n",
|
||||
"plt.title('Beer Production By Year')\n",
|
||||
"df = pd.read_csv(\"Beer_no_valid_split_train.csv\", parse_dates=True, index_col= 'DATE').drop(columns='grain')\n",
|
||||
"test_df = pd.read_csv(\"Beer_no_valid_split_test.csv\", parse_dates=True, index_col= 'DATE').drop(columns='grain')\n",
|
||||
"plt.title(\"Beer Production By Year\")\n",
|
||||
"df = pd.read_csv(\n",
|
||||
" \"Beer_no_valid_split_train.csv\", parse_dates=True, index_col=\"DATE\"\n",
|
||||
").drop(columns=\"grain\")\n",
|
||||
"test_df = pd.read_csv(\n",
|
||||
" \"Beer_no_valid_split_test.csv\", parse_dates=True, index_col=\"DATE\"\n",
|
||||
").drop(columns=\"grain\")\n",
|
||||
"plt.plot(df)\n",
|
||||
"\n",
|
||||
"plt.subplot(2, 1, 2)\n",
|
||||
"plt.title('Beer Production By Month')\n",
|
||||
"plt.title(\"Beer Production By Month\")\n",
|
||||
"groups = df.groupby(df.index.month)\n",
|
||||
"months = concat([DataFrame(x[1].values) for x in groups], axis=1)\n",
|
||||
"months = DataFrame(months)\n",
|
||||
"months.columns = range(1,13)\n",
|
||||
"months.columns = range(1, 13)\n",
|
||||
"months.boxplot()\n",
|
||||
"\n",
|
||||
"plt.show()"
|
||||
@@ -270,10 +275,10 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"target_column_name = 'BeerProduction'\n",
|
||||
"time_column_name = 'DATE'\n",
|
||||
"target_column_name = \"BeerProduction\"\n",
|
||||
"time_column_name = \"DATE\"\n",
|
||||
"time_series_id_column_names = []\n",
|
||||
"freq = 'M' #Monthly data"
|
||||
"freq = \"M\" # Monthly data"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -301,14 +306,36 @@
|
||||
"test_df.to_csv(\"test.csv\")\n",
|
||||
"\n",
|
||||
"datastore = ws.get_default_datastore()\n",
|
||||
"datastore.upload_files(files = ['./train.csv'], target_path = 'beer-dataset/tabular/', overwrite = True,show_progress = True)\n",
|
||||
"datastore.upload_files(files = ['./valid.csv'], target_path = 'beer-dataset/tabular/', overwrite = True,show_progress = True)\n",
|
||||
"datastore.upload_files(files = ['./test.csv'], target_path = 'beer-dataset/tabular/', overwrite = True,show_progress = True)\n",
|
||||
"datastore.upload_files(\n",
|
||||
" files=[\"./train.csv\"],\n",
|
||||
" target_path=\"beer-dataset/tabular/\",\n",
|
||||
" overwrite=True,\n",
|
||||
" show_progress=True,\n",
|
||||
")\n",
|
||||
"datastore.upload_files(\n",
|
||||
" files=[\"./valid.csv\"],\n",
|
||||
" target_path=\"beer-dataset/tabular/\",\n",
|
||||
" overwrite=True,\n",
|
||||
" show_progress=True,\n",
|
||||
")\n",
|
||||
"datastore.upload_files(\n",
|
||||
" files=[\"./test.csv\"],\n",
|
||||
" target_path=\"beer-dataset/tabular/\",\n",
|
||||
" overwrite=True,\n",
|
||||
" show_progress=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"from azureml.core import Dataset\n",
|
||||
"train_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'beer-dataset/tabular/train.csv')])\n",
|
||||
"valid_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'beer-dataset/tabular/valid.csv')])\n",
|
||||
"test_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'beer-dataset/tabular/test.csv')])"
|
||||
"\n",
|
||||
"train_dataset = Dataset.Tabular.from_delimited_files(\n",
|
||||
" path=[(datastore, \"beer-dataset/tabular/train.csv\")]\n",
|
||||
")\n",
|
||||
"valid_dataset = Dataset.Tabular.from_delimited_files(\n",
|
||||
" path=[(datastore, \"beer-dataset/tabular/valid.csv\")]\n",
|
||||
")\n",
|
||||
"test_dataset = Dataset.Tabular.from_delimited_files(\n",
|
||||
" path=[(datastore, \"beer-dataset/tabular/test.csv\")]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -366,26 +393,29 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
|
||||
"\n",
|
||||
"forecasting_parameters = ForecastingParameters(\n",
|
||||
" time_column_name=time_column_name,\n",
|
||||
" forecast_horizon=forecast_horizon,\n",
|
||||
" freq='MS' # Set the forecast frequency to be monthly (start of the month)\n",
|
||||
" freq=\"MS\", # Set the forecast frequency to be monthly (start of the month)\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# We will disable the enable_early_stopping flag to ensure the DNN model is recommended for demonstration purpose.\n",
|
||||
"automl_config = AutoMLConfig(task='forecasting',\n",
|
||||
" primary_metric='normalized_root_mean_squared_error',\n",
|
||||
" experiment_timeout_hours = 1,\n",
|
||||
"automl_config = AutoMLConfig(\n",
|
||||
" task=\"forecasting\",\n",
|
||||
" primary_metric=\"normalized_root_mean_squared_error\",\n",
|
||||
" experiment_timeout_hours=1,\n",
|
||||
" training_data=train_dataset,\n",
|
||||
" label_column_name=target_column_name,\n",
|
||||
" validation_data=valid_dataset, \n",
|
||||
" validation_data=valid_dataset,\n",
|
||||
" verbosity=logging.INFO,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" max_concurrent_iterations=4,\n",
|
||||
" max_cores_per_iteration=-1,\n",
|
||||
" enable_dnn=True,\n",
|
||||
" enable_early_stopping=False,\n",
|
||||
" forecasting_parameters=forecasting_parameters)"
|
||||
" forecasting_parameters=forecasting_parameters,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -407,7 +437,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run = experiment.submit(automl_config, show_output= True)"
|
||||
"remote_run = experiment.submit(automl_config, show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -455,6 +485,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from helper import get_result_df\n",
|
||||
"\n",
|
||||
"summary_df = get_result_df(remote_run)\n",
|
||||
"summary_df"
|
||||
]
|
||||
@@ -470,11 +501,12 @@
|
||||
"source": [
|
||||
"from azureml.core.run import Run\n",
|
||||
"from azureml.widgets import RunDetails\n",
|
||||
"forecast_model = 'TCNForecaster'\n",
|
||||
"if not forecast_model in summary_df['run_id']:\n",
|
||||
" forecast_model = 'ForecastTCN'\n",
|
||||
" \n",
|
||||
"best_dnn_run_id = summary_df['run_id'][forecast_model]\n",
|
||||
"\n",
|
||||
"forecast_model = \"TCNForecaster\"\n",
|
||||
"if not forecast_model in summary_df[\"run_id\"]:\n",
|
||||
" forecast_model = \"ForecastTCN\"\n",
|
||||
"\n",
|
||||
"best_dnn_run_id = summary_df[\"run_id\"][forecast_model]\n",
|
||||
"best_dnn_run = Run(experiment, best_dnn_run_id)"
|
||||
]
|
||||
},
|
||||
@@ -488,7 +520,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"best_dnn_run.parent\n",
|
||||
"RunDetails(best_dnn_run.parent).show() "
|
||||
"RunDetails(best_dnn_run.parent).show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -501,7 +533,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"best_dnn_run\n",
|
||||
"RunDetails(best_dnn_run).show() "
|
||||
"RunDetails(best_dnn_run).show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -536,7 +568,10 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Dataset\n",
|
||||
"test_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'beer-dataset/tabular/test.csv')])\n",
|
||||
"\n",
|
||||
"test_dataset = Dataset.Tabular.from_delimited_files(\n",
|
||||
" path=[(datastore, \"beer-dataset/tabular/test.csv\")]\n",
|
||||
")\n",
|
||||
"# preview the first 3 rows of the dataset\n",
|
||||
"test_dataset.take(5).to_pandas_dataframe()"
|
||||
]
|
||||
@@ -547,7 +582,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"compute_target = ws.compute_targets['beer-cluster']\n",
|
||||
"compute_target = ws.compute_targets[\"beer-cluster\"]\n",
|
||||
"test_experiment = Experiment(ws, experiment_name + \"_test\")"
|
||||
]
|
||||
},
|
||||
@@ -563,9 +598,9 @@
|
||||
"import os\n",
|
||||
"import shutil\n",
|
||||
"\n",
|
||||
"script_folder = os.path.join(os.getcwd(), 'inference')\n",
|
||||
"script_folder = os.path.join(os.getcwd(), \"inference\")\n",
|
||||
"os.makedirs(script_folder, exist_ok=True)\n",
|
||||
"shutil.copy('infer.py', script_folder)"
|
||||
"shutil.copy(\"infer.py\", script_folder)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -576,8 +611,18 @@
|
||||
"source": [
|
||||
"from helper import run_inference\n",
|
||||
"\n",
|
||||
"test_run = run_inference(test_experiment, compute_target, script_folder, best_dnn_run, test_dataset, valid_dataset, forecast_horizon,\n",
|
||||
" target_column_name, time_column_name, freq)"
|
||||
"test_run = run_inference(\n",
|
||||
" test_experiment,\n",
|
||||
" compute_target,\n",
|
||||
" script_folder,\n",
|
||||
" best_dnn_run,\n",
|
||||
" test_dataset,\n",
|
||||
" valid_dataset,\n",
|
||||
" forecast_horizon,\n",
|
||||
" target_column_name,\n",
|
||||
" time_column_name,\n",
|
||||
" freq,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -597,8 +642,19 @@
|
||||
"source": [
|
||||
"from helper import run_multiple_inferences\n",
|
||||
"\n",
|
||||
"summary_df = run_multiple_inferences(summary_df, experiment, test_experiment, compute_target, script_folder, test_dataset, \n",
|
||||
" valid_dataset, forecast_horizon, target_column_name, time_column_name, freq)"
|
||||
"summary_df = run_multiple_inferences(\n",
|
||||
" summary_df,\n",
|
||||
" experiment,\n",
|
||||
" test_experiment,\n",
|
||||
" compute_target,\n",
|
||||
" script_folder,\n",
|
||||
" test_dataset,\n",
|
||||
" valid_dataset,\n",
|
||||
" forecast_horizon,\n",
|
||||
" target_column_name,\n",
|
||||
" time_column_name,\n",
|
||||
" freq,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -618,7 +674,7 @@
|
||||
" test_run = Run(test_experiment, test_run_id)\n",
|
||||
" test_run.wait_for_completion()\n",
|
||||
" test_score = test_run.get_metrics()[run_summary.primary_metric]\n",
|
||||
" summary_df.loc[summary_df.run_id == run_id, 'Test Score'] = test_score\n",
|
||||
" summary_df.loc[summary_df.run_id == run_id, \"Test Score\"] = test_score\n",
|
||||
" print(\"Test Score: \", test_score)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -6,120 +6,158 @@ from azureml.core.run import Run
|
||||
from azureml.automl.core.shared import constants
|
||||
|
||||
|
||||
def split_fraction_by_grain(df, fraction, time_column_name,
|
||||
grain_column_names=None):
|
||||
def split_fraction_by_grain(df, fraction, time_column_name, grain_column_names=None):
|
||||
if not grain_column_names:
|
||||
df['tmp_grain_column'] = 'grain'
|
||||
grain_column_names = ['tmp_grain_column']
|
||||
df["tmp_grain_column"] = "grain"
|
||||
grain_column_names = ["tmp_grain_column"]
|
||||
|
||||
"""Group df by grain and split on last n rows for each group."""
|
||||
df_grouped = (df.sort_values(time_column_name)
|
||||
.groupby(grain_column_names, group_keys=False))
|
||||
df_grouped = df.sort_values(time_column_name).groupby(
|
||||
grain_column_names, group_keys=False
|
||||
)
|
||||
|
||||
df_head = df_grouped.apply(lambda dfg: dfg.iloc[:-int(len(dfg) *
|
||||
fraction)] if fraction > 0 else dfg)
|
||||
df_head = df_grouped.apply(
|
||||
lambda dfg: dfg.iloc[: -int(len(dfg) * fraction)] if fraction > 0 else dfg
|
||||
)
|
||||
|
||||
df_tail = df_grouped.apply(lambda dfg: dfg.iloc[-int(len(dfg) *
|
||||
fraction):] if fraction > 0 else dfg[:0])
|
||||
df_tail = df_grouped.apply(
|
||||
lambda dfg: dfg.iloc[-int(len(dfg) * fraction) :] if fraction > 0 else dfg[:0]
|
||||
)
|
||||
|
||||
if 'tmp_grain_column' in grain_column_names:
|
||||
if "tmp_grain_column" in grain_column_names:
|
||||
for df2 in (df, df_head, df_tail):
|
||||
df2.drop('tmp_grain_column', axis=1, inplace=True)
|
||||
df2.drop("tmp_grain_column", axis=1, inplace=True)
|
||||
|
||||
grain_column_names.remove('tmp_grain_column')
|
||||
grain_column_names.remove("tmp_grain_column")
|
||||
|
||||
return df_head, df_tail
|
||||
|
||||
|
||||
def split_full_for_forecasting(df, time_column_name,
|
||||
grain_column_names=None, test_split=0.2):
|
||||
def split_full_for_forecasting(
|
||||
df, time_column_name, grain_column_names=None, test_split=0.2
|
||||
):
|
||||
index_name = df.index.name
|
||||
|
||||
# Assumes that there isn't already a column called tmpindex
|
||||
|
||||
df['tmpindex'] = df.index
|
||||
df["tmpindex"] = df.index
|
||||
|
||||
train_df, test_df = split_fraction_by_grain(
|
||||
df, test_split, time_column_name, grain_column_names)
|
||||
df, test_split, time_column_name, grain_column_names
|
||||
)
|
||||
|
||||
train_df = train_df.set_index('tmpindex')
|
||||
train_df = train_df.set_index("tmpindex")
|
||||
train_df.index.name = index_name
|
||||
|
||||
test_df = test_df.set_index('tmpindex')
|
||||
test_df = test_df.set_index("tmpindex")
|
||||
test_df.index.name = index_name
|
||||
|
||||
df.drop('tmpindex', axis=1, inplace=True)
|
||||
df.drop("tmpindex", axis=1, inplace=True)
|
||||
|
||||
return train_df, test_df
|
||||
|
||||
|
||||
def get_result_df(remote_run):
|
||||
children = list(remote_run.get_children(recursive=True))
|
||||
summary_df = pd.DataFrame(index=['run_id', 'run_algorithm',
|
||||
'primary_metric', 'Score'])
|
||||
summary_df = pd.DataFrame(
|
||||
index=["run_id", "run_algorithm", "primary_metric", "Score"]
|
||||
)
|
||||
goal_minimize = False
|
||||
for run in children:
|
||||
if run.get_status().lower() == constants.RunState.COMPLETE_RUN \
|
||||
and 'run_algorithm' in run.properties and 'score' in run.properties:
|
||||
if (
|
||||
run.get_status().lower() == constants.RunState.COMPLETE_RUN
|
||||
and "run_algorithm" in run.properties
|
||||
and "score" in run.properties
|
||||
):
|
||||
# We only count in the completed child runs.
|
||||
summary_df[run.id] = [run.id, run.properties['run_algorithm'],
|
||||
run.properties['primary_metric'],
|
||||
float(run.properties['score'])]
|
||||
if ('goal' in run.properties):
|
||||
goal_minimize = run.properties['goal'].split('_')[-1] == 'min'
|
||||
summary_df[run.id] = [
|
||||
run.id,
|
||||
run.properties["run_algorithm"],
|
||||
run.properties["primary_metric"],
|
||||
float(run.properties["score"]),
|
||||
]
|
||||
if "goal" in run.properties:
|
||||
goal_minimize = run.properties["goal"].split("_")[-1] == "min"
|
||||
|
||||
summary_df = summary_df.T.sort_values(
|
||||
'Score',
|
||||
ascending=goal_minimize).drop_duplicates(['run_algorithm'])
|
||||
summary_df = summary_df.set_index('run_algorithm')
|
||||
"Score", ascending=goal_minimize
|
||||
).drop_duplicates(["run_algorithm"])
|
||||
summary_df = summary_df.set_index("run_algorithm")
|
||||
return summary_df
|
||||
|
||||
|
||||
def run_inference(test_experiment, compute_target, script_folder, train_run,
|
||||
test_dataset, lookback_dataset, max_horizon,
|
||||
target_column_name, time_column_name, freq):
|
||||
model_base_name = 'model.pkl'
|
||||
if 'model_data_location' in train_run.properties:
|
||||
model_location = train_run.properties['model_data_location']
|
||||
_, model_base_name = model_location.rsplit('/', 1)
|
||||
train_run.download_file('outputs/{}'.format(model_base_name), 'inference/{}'.format(model_base_name))
|
||||
train_run.download_file('outputs/conda_env_v_1_0_0.yml', 'inference/condafile.yml')
|
||||
def run_inference(
|
||||
test_experiment,
|
||||
compute_target,
|
||||
script_folder,
|
||||
train_run,
|
||||
test_dataset,
|
||||
lookback_dataset,
|
||||
max_horizon,
|
||||
target_column_name,
|
||||
time_column_name,
|
||||
freq,
|
||||
):
|
||||
model_base_name = "model.pkl"
|
||||
if "model_data_location" in train_run.properties:
|
||||
model_location = train_run.properties["model_data_location"]
|
||||
_, model_base_name = model_location.rsplit("/", 1)
|
||||
train_run.download_file(
|
||||
"outputs/{}".format(model_base_name), "inference/{}".format(model_base_name)
|
||||
)
|
||||
train_run.download_file("outputs/conda_env_v_1_0_0.yml", "inference/condafile.yml")
|
||||
|
||||
inference_env = Environment("myenv")
|
||||
inference_env.docker.enabled = True
|
||||
inference_env.python.conda_dependencies = CondaDependencies(
|
||||
conda_dependencies_file_path='inference/condafile.yml')
|
||||
conda_dependencies_file_path="inference/condafile.yml"
|
||||
)
|
||||
|
||||
est = Estimator(source_directory=script_folder,
|
||||
entry_script='infer.py',
|
||||
est = Estimator(
|
||||
source_directory=script_folder,
|
||||
entry_script="infer.py",
|
||||
script_params={
|
||||
'--max_horizon': max_horizon,
|
||||
'--target_column_name': target_column_name,
|
||||
'--time_column_name': time_column_name,
|
||||
'--frequency': freq,
|
||||
'--model_path': model_base_name
|
||||
"--max_horizon": max_horizon,
|
||||
"--target_column_name": target_column_name,
|
||||
"--time_column_name": time_column_name,
|
||||
"--frequency": freq,
|
||||
"--model_path": model_base_name,
|
||||
},
|
||||
inputs=[test_dataset.as_named_input('test_data'),
|
||||
lookback_dataset.as_named_input('lookback_data')],
|
||||
inputs=[
|
||||
test_dataset.as_named_input("test_data"),
|
||||
lookback_dataset.as_named_input("lookback_data"),
|
||||
],
|
||||
compute_target=compute_target,
|
||||
environment_definition=inference_env)
|
||||
environment_definition=inference_env,
|
||||
)
|
||||
|
||||
run = test_experiment.submit(
|
||||
est, tags={
|
||||
'training_run_id': train_run.id,
|
||||
'run_algorithm': train_run.properties['run_algorithm'],
|
||||
'valid_score': train_run.properties['score'],
|
||||
'primary_metric': train_run.properties['primary_metric']
|
||||
})
|
||||
est,
|
||||
tags={
|
||||
"training_run_id": train_run.id,
|
||||
"run_algorithm": train_run.properties["run_algorithm"],
|
||||
"valid_score": train_run.properties["score"],
|
||||
"primary_metric": train_run.properties["primary_metric"],
|
||||
},
|
||||
)
|
||||
|
||||
run.log("run_algorithm", run.tags['run_algorithm'])
|
||||
run.log("run_algorithm", run.tags["run_algorithm"])
|
||||
return run
|
||||
|
||||
|
||||
def run_multiple_inferences(summary_df, train_experiment, test_experiment,
|
||||
compute_target, script_folder, test_dataset,
|
||||
lookback_dataset, max_horizon, target_column_name,
|
||||
time_column_name, freq):
|
||||
def run_multiple_inferences(
|
||||
summary_df,
|
||||
train_experiment,
|
||||
test_experiment,
|
||||
compute_target,
|
||||
script_folder,
|
||||
test_dataset,
|
||||
lookback_dataset,
|
||||
max_horizon,
|
||||
target_column_name,
|
||||
time_column_name,
|
||||
freq,
|
||||
):
|
||||
for run_name, run_summary in summary_df.iterrows():
|
||||
print(run_name)
|
||||
print(run_summary)
|
||||
@@ -127,12 +165,19 @@ def run_multiple_inferences(summary_df, train_experiment, test_experiment,
|
||||
train_run = Run(train_experiment, run_id)
|
||||
|
||||
test_run = run_inference(
|
||||
test_experiment, compute_target, script_folder, train_run,
|
||||
test_dataset, lookback_dataset, max_horizon, target_column_name,
|
||||
time_column_name, freq)
|
||||
test_experiment,
|
||||
compute_target,
|
||||
script_folder,
|
||||
train_run,
|
||||
test_dataset,
|
||||
lookback_dataset,
|
||||
max_horizon,
|
||||
target_column_name,
|
||||
time_column_name,
|
||||
freq,
|
||||
)
|
||||
|
||||
print(test_run)
|
||||
summary_df.loc[summary_df.run_id == run_id,
|
||||
'test_run_id'] = test_run.id
|
||||
summary_df.loc[summary_df.run_id == run_id, "test_run_id"] = test_run.id
|
||||
|
||||
return summary_df
|
||||
|
||||
@@ -19,9 +19,14 @@ except ImportError:
|
||||
_torch_present = False
|
||||
|
||||
|
||||
def align_outputs(y_predicted, X_trans, X_test, y_test,
|
||||
predicted_column_name='predicted',
|
||||
horizon_colname='horizon_origin'):
|
||||
def align_outputs(
|
||||
y_predicted,
|
||||
X_trans,
|
||||
X_test,
|
||||
y_test,
|
||||
predicted_column_name="predicted",
|
||||
horizon_colname="horizon_origin",
|
||||
):
|
||||
"""
|
||||
Demonstrates how to get the output aligned to the inputs
|
||||
using pandas indexes. Helps understand what happened if
|
||||
@@ -33,9 +38,13 @@ def align_outputs(y_predicted, X_trans, X_test, y_test,
|
||||
* model was asked to predict past max_horizon -> increase max horizon
|
||||
* data at start of X_test was needed for lags -> provide previous periods
|
||||
"""
|
||||
if (horizon_colname in X_trans):
|
||||
df_fcst = pd.DataFrame({predicted_column_name: y_predicted,
|
||||
horizon_colname: X_trans[horizon_colname]})
|
||||
if horizon_colname in X_trans:
|
||||
df_fcst = pd.DataFrame(
|
||||
{
|
||||
predicted_column_name: y_predicted,
|
||||
horizon_colname: X_trans[horizon_colname],
|
||||
}
|
||||
)
|
||||
else:
|
||||
df_fcst = pd.DataFrame({predicted_column_name: y_predicted})
|
||||
|
||||
@@ -48,20 +57,21 @@ def align_outputs(y_predicted, X_trans, X_test, y_test,
|
||||
|
||||
# X_test_full's index does not include origin, so reset for merge
|
||||
df_fcst.reset_index(inplace=True)
|
||||
X_test_full = X_test_full.reset_index().drop(columns='index')
|
||||
together = df_fcst.merge(X_test_full, how='right')
|
||||
X_test_full = X_test_full.reset_index().drop(columns="index")
|
||||
together = df_fcst.merge(X_test_full, how="right")
|
||||
|
||||
# drop rows where prediction or actuals are nan
|
||||
# happens because of missing actuals
|
||||
# or at edges of time due to lags/rolling windows
|
||||
clean = together[together[[target_column_name,
|
||||
predicted_column_name]].notnull().all(axis=1)]
|
||||
return (clean)
|
||||
clean = together[
|
||||
together[[target_column_name, predicted_column_name]].notnull().all(axis=1)
|
||||
]
|
||||
return clean
|
||||
|
||||
|
||||
def do_rolling_forecast_with_lookback(fitted_model, X_test, y_test,
|
||||
max_horizon, X_lookback, y_lookback,
|
||||
freq='D'):
|
||||
def do_rolling_forecast_with_lookback(
|
||||
fitted_model, X_test, y_test, max_horizon, X_lookback, y_lookback, freq="D"
|
||||
):
|
||||
"""
|
||||
Produce forecasts on a rolling origin over the given test set.
|
||||
|
||||
@@ -83,22 +93,28 @@ def do_rolling_forecast_with_lookback(fitted_model, X_test, y_test,
|
||||
horizon_time = origin_time + max_horizon * to_offset(freq)
|
||||
|
||||
# Extract test data from an expanding window up-to the horizon
|
||||
expand_wind = (X[time_column_name] < horizon_time)
|
||||
expand_wind = X[time_column_name] < horizon_time
|
||||
X_test_expand = X[expand_wind]
|
||||
y_query_expand = np.zeros(len(X_test_expand)).astype(np.float)
|
||||
y_query_expand.fill(np.NaN)
|
||||
|
||||
if origin_time != X[time_column_name].min():
|
||||
# Set the context by including actuals up-to the origin time
|
||||
test_context_expand_wind = (X[time_column_name] < origin_time)
|
||||
context_expand_wind = (X_test_expand[time_column_name] < origin_time)
|
||||
test_context_expand_wind = X[time_column_name] < origin_time
|
||||
context_expand_wind = X_test_expand[time_column_name] < origin_time
|
||||
y_query_expand[context_expand_wind] = y[test_context_expand_wind]
|
||||
|
||||
# Print some debug info
|
||||
print("Horizon_time:", horizon_time,
|
||||
" origin_time: ", origin_time,
|
||||
" max_horizon: ", max_horizon,
|
||||
" freq: ", freq)
|
||||
print(
|
||||
"Horizon_time:",
|
||||
horizon_time,
|
||||
" origin_time: ",
|
||||
origin_time,
|
||||
" max_horizon: ",
|
||||
max_horizon,
|
||||
" freq: ",
|
||||
freq,
|
||||
)
|
||||
print("expand_wind: ", expand_wind)
|
||||
print("y_query_expand")
|
||||
print(y_query_expand)
|
||||
@@ -124,9 +140,14 @@ def do_rolling_forecast_with_lookback(fitted_model, X_test, y_test,
|
||||
trans_tindex = X_trans.index.get_level_values(time_column_name)
|
||||
trans_roll_wind = (trans_tindex >= origin_time) & (trans_tindex < horizon_time)
|
||||
test_roll_wind = expand_wind & (X[time_column_name] >= origin_time)
|
||||
df_list.append(align_outputs(
|
||||
y_fcst[trans_roll_wind], X_trans[trans_roll_wind],
|
||||
X[test_roll_wind], y[test_roll_wind]))
|
||||
df_list.append(
|
||||
align_outputs(
|
||||
y_fcst[trans_roll_wind],
|
||||
X_trans[trans_roll_wind],
|
||||
X[test_roll_wind],
|
||||
y[test_roll_wind],
|
||||
)
|
||||
)
|
||||
|
||||
# Advance the origin time
|
||||
origin_time = horizon_time
|
||||
@@ -134,7 +155,7 @@ def do_rolling_forecast_with_lookback(fitted_model, X_test, y_test,
|
||||
return pd.concat(df_list, ignore_index=True)
|
||||
|
||||
|
||||
def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq='D'):
|
||||
def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq="D"):
|
||||
"""
|
||||
Produce forecasts on a rolling origin over the given test set.
|
||||
|
||||
@@ -153,23 +174,28 @@ def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq='D'):
|
||||
horizon_time = origin_time + max_horizon * to_offset(freq)
|
||||
|
||||
# Extract test data from an expanding window up-to the horizon
|
||||
expand_wind = (X_test[time_column_name] < horizon_time)
|
||||
expand_wind = X_test[time_column_name] < horizon_time
|
||||
X_test_expand = X_test[expand_wind]
|
||||
y_query_expand = np.zeros(len(X_test_expand)).astype(np.float)
|
||||
y_query_expand.fill(np.NaN)
|
||||
|
||||
if origin_time != X_test[time_column_name].min():
|
||||
# Set the context by including actuals up-to the origin time
|
||||
test_context_expand_wind = (X_test[time_column_name] < origin_time)
|
||||
context_expand_wind = (X_test_expand[time_column_name] < origin_time)
|
||||
y_query_expand[context_expand_wind] = y_test[
|
||||
test_context_expand_wind]
|
||||
test_context_expand_wind = X_test[time_column_name] < origin_time
|
||||
context_expand_wind = X_test_expand[time_column_name] < origin_time
|
||||
y_query_expand[context_expand_wind] = y_test[test_context_expand_wind]
|
||||
|
||||
# Print some debug info
|
||||
print("Horizon_time:", horizon_time,
|
||||
" origin_time: ", origin_time,
|
||||
" max_horizon: ", max_horizon,
|
||||
" freq: ", freq)
|
||||
print(
|
||||
"Horizon_time:",
|
||||
horizon_time,
|
||||
" origin_time: ",
|
||||
origin_time,
|
||||
" max_horizon: ",
|
||||
max_horizon,
|
||||
" freq: ",
|
||||
freq,
|
||||
)
|
||||
print("expand_wind: ", expand_wind)
|
||||
print("y_query_expand")
|
||||
print(y_query_expand)
|
||||
@@ -193,10 +219,14 @@ def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq='D'):
|
||||
trans_tindex = X_trans.index.get_level_values(time_column_name)
|
||||
trans_roll_wind = (trans_tindex >= origin_time) & (trans_tindex < horizon_time)
|
||||
test_roll_wind = expand_wind & (X_test[time_column_name] >= origin_time)
|
||||
df_list.append(align_outputs(y_fcst[trans_roll_wind],
|
||||
df_list.append(
|
||||
align_outputs(
|
||||
y_fcst[trans_roll_wind],
|
||||
X_trans[trans_roll_wind],
|
||||
X_test[test_roll_wind],
|
||||
y_test[test_roll_wind]))
|
||||
y_test[test_roll_wind],
|
||||
)
|
||||
)
|
||||
|
||||
# Advance the origin time
|
||||
origin_time = horizon_time
|
||||
@@ -230,20 +260,31 @@ def map_location_cuda(storage, loc):
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'--max_horizon', type=int, dest='max_horizon',
|
||||
default=10, help='Max Horizon for forecasting')
|
||||
"--max_horizon",
|
||||
type=int,
|
||||
dest="max_horizon",
|
||||
default=10,
|
||||
help="Max Horizon for forecasting",
|
||||
)
|
||||
parser.add_argument(
|
||||
'--target_column_name', type=str, dest='target_column_name',
|
||||
help='Target Column Name')
|
||||
"--target_column_name",
|
||||
type=str,
|
||||
dest="target_column_name",
|
||||
help="Target Column Name",
|
||||
)
|
||||
parser.add_argument(
|
||||
'--time_column_name', type=str, dest='time_column_name',
|
||||
help='Time Column Name')
|
||||
"--time_column_name", type=str, dest="time_column_name", help="Time Column Name"
|
||||
)
|
||||
parser.add_argument(
|
||||
'--frequency', type=str, dest='freq',
|
||||
help='Frequency of prediction')
|
||||
"--frequency", type=str, dest="freq", help="Frequency of prediction"
|
||||
)
|
||||
parser.add_argument(
|
||||
'--model_path', type=str, dest='model_path',
|
||||
default='model.pkl', help='Filename of model to be loaded')
|
||||
"--model_path",
|
||||
type=str,
|
||||
dest="model_path",
|
||||
default="model.pkl",
|
||||
help="Filename of model to be loaded",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
max_horizon = args.max_horizon
|
||||
@@ -252,7 +293,7 @@ time_column_name = args.time_column_name
|
||||
freq = args.freq
|
||||
model_path = args.model_path
|
||||
|
||||
print('args passed are: ')
|
||||
print("args passed are: ")
|
||||
print(max_horizon)
|
||||
print(target_column_name)
|
||||
print(time_column_name)
|
||||
@@ -261,39 +302,41 @@ print(model_path)
|
||||
|
||||
run = Run.get_context()
|
||||
# get input dataset by name
|
||||
test_dataset = run.input_datasets['test_data']
|
||||
lookback_dataset = run.input_datasets['lookback_data']
|
||||
test_dataset = run.input_datasets["test_data"]
|
||||
lookback_dataset = run.input_datasets["lookback_data"]
|
||||
|
||||
grain_column_names = []
|
||||
|
||||
df = test_dataset.to_pandas_dataframe()
|
||||
|
||||
print('Read df')
|
||||
print("Read df")
|
||||
print(df)
|
||||
|
||||
X_test_df = test_dataset.drop_columns(columns=[target_column_name])
|
||||
y_test_df = test_dataset.with_timestamp_columns(
|
||||
None).keep_columns(columns=[target_column_name])
|
||||
y_test_df = test_dataset.with_timestamp_columns(None).keep_columns(
|
||||
columns=[target_column_name]
|
||||
)
|
||||
|
||||
X_lookback_df = lookback_dataset.drop_columns(columns=[target_column_name])
|
||||
y_lookback_df = lookback_dataset.with_timestamp_columns(
|
||||
None).keep_columns(columns=[target_column_name])
|
||||
y_lookback_df = lookback_dataset.with_timestamp_columns(None).keep_columns(
|
||||
columns=[target_column_name]
|
||||
)
|
||||
|
||||
_, ext = os.path.splitext(model_path)
|
||||
if ext == '.pt':
|
||||
if ext == ".pt":
|
||||
# Load the fc-tcn torch model.
|
||||
assert _torch_present
|
||||
if torch.cuda.is_available():
|
||||
map_location = map_location_cuda
|
||||
else:
|
||||
map_location = 'cpu'
|
||||
with open(model_path, 'rb') as fh:
|
||||
map_location = "cpu"
|
||||
with open(model_path, "rb") as fh:
|
||||
fitted_model = torch.load(fh, map_location=map_location)
|
||||
else:
|
||||
# Load the sklearn pipeline.
|
||||
fitted_model = joblib.load(model_path)
|
||||
|
||||
if hasattr(fitted_model, 'get_lookback'):
|
||||
if hasattr(fitted_model, "get_lookback"):
|
||||
lookback = fitted_model.get_lookback()
|
||||
df_all = do_rolling_forecast_with_lookback(
|
||||
fitted_model,
|
||||
@@ -302,26 +345,28 @@ if hasattr(fitted_model, 'get_lookback'):
|
||||
max_horizon,
|
||||
X_lookback_df.to_pandas_dataframe()[-lookback:],
|
||||
y_lookback_df.to_pandas_dataframe().values.T[0][-lookback:],
|
||||
freq)
|
||||
freq,
|
||||
)
|
||||
else:
|
||||
df_all = do_rolling_forecast(
|
||||
fitted_model,
|
||||
X_test_df.to_pandas_dataframe(),
|
||||
y_test_df.to_pandas_dataframe().values.T[0],
|
||||
max_horizon,
|
||||
freq)
|
||||
freq,
|
||||
)
|
||||
|
||||
print(df_all)
|
||||
|
||||
print("target values:::")
|
||||
print(df_all[target_column_name])
|
||||
print("predicted values:::")
|
||||
print(df_all['predicted'])
|
||||
print(df_all["predicted"])
|
||||
|
||||
# Use the AutoML scoring module
|
||||
regression_metrics = list(constants.REGRESSION_SCALAR_SET)
|
||||
y_test = np.array(df_all[target_column_name])
|
||||
y_pred = np.array(df_all['predicted'])
|
||||
y_pred = np.array(df_all["predicted"])
|
||||
scores = scoring.score_regression(y_test, y_pred, regression_metrics)
|
||||
|
||||
print("scores:")
|
||||
@@ -331,12 +376,11 @@ for key, value in scores.items():
|
||||
run.log(key, value)
|
||||
|
||||
print("Simple forecasting model")
|
||||
rmse = np.sqrt(mean_squared_error(
|
||||
df_all[target_column_name], df_all['predicted']))
|
||||
rmse = np.sqrt(mean_squared_error(df_all[target_column_name], df_all["predicted"]))
|
||||
print("[Test Data] \nRoot Mean squared error: %.2f" % rmse)
|
||||
mae = mean_absolute_error(df_all[target_column_name], df_all['predicted'])
|
||||
print('mean_absolute_error score: %.2f' % mae)
|
||||
print('MAPE: %.2f' % MAPE(df_all[target_column_name], df_all['predicted']))
|
||||
mae = mean_absolute_error(df_all[target_column_name], df_all["predicted"])
|
||||
print("mean_absolute_error score: %.2f" % mae)
|
||||
print("MAPE: %.2f" % MAPE(df_all[target_column_name], df_all["predicted"]))
|
||||
|
||||
run.log('rmse', rmse)
|
||||
run.log('mae', mae)
|
||||
run.log("rmse", rmse)
|
||||
run.log("mae", mae)
|
||||
|
||||
@@ -88,7 +88,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.36.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -108,19 +108,19 @@
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# choose a name for the run history container in the workspace\n",
|
||||
"experiment_name = 'automl-bikeshareforecasting'\n",
|
||||
"experiment_name = \"automl-bikeshareforecasting\"\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace'] = ws.name\n",
|
||||
"output['SKU'] = ws.sku\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Run History Name'] = experiment_name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||
"output[\"Workspace\"] = ws.name\n",
|
||||
"output[\"SKU\"] = ws.sku\n",
|
||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||
"output[\"Location\"] = ws.location\n",
|
||||
"output[\"Run History Name\"] = experiment_name\n",
|
||||
"pd.set_option(\"display.max_colwidth\", -1)\n",
|
||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
@@ -153,10 +153,11 @@
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
||||
" print('Found existing cluster, use it.')\n",
|
||||
" print(\"Found existing cluster, use it.\")\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
|
||||
" max_nodes=4)\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||
" vm_size=\"STANDARD_DS12_V2\", max_nodes=4\n",
|
||||
" )\n",
|
||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
"compute_target.wait_for_completion(show_output=True)"
|
||||
@@ -178,7 +179,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"datastore = ws.get_default_datastore()\n",
|
||||
"datastore.upload_files(files = ['./bike-no.csv'], target_path = 'dataset/', overwrite = True,show_progress = True)"
|
||||
"datastore.upload_files(\n",
|
||||
" files=[\"./bike-no.csv\"], target_path=\"dataset/\", overwrite=True, show_progress=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -198,8 +201,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"target_column_name = 'cnt'\n",
|
||||
"time_column_name = 'date'"
|
||||
"target_column_name = \"cnt\"\n",
|
||||
"time_column_name = \"date\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -208,10 +211,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'dataset/bike-no.csv')]).with_timestamp_columns(fine_grain_timestamp=time_column_name) \n",
|
||||
"dataset = Dataset.Tabular.from_delimited_files(\n",
|
||||
" path=[(datastore, \"dataset/bike-no.csv\")]\n",
|
||||
").with_timestamp_columns(fine_grain_timestamp=time_column_name)\n",
|
||||
"\n",
|
||||
"# Drop the columns 'casual' and 'registered' as these columns are a breakdown of the total and therefore a leak.\n",
|
||||
"dataset = dataset.drop_columns(columns=['casual', 'registered'])\n",
|
||||
"dataset = dataset.drop_columns(columns=[\"casual\", \"registered\"])\n",
|
||||
"\n",
|
||||
"dataset.take(5).to_pandas_dataframe().reset_index(drop=True)"
|
||||
]
|
||||
@@ -320,7 +325,7 @@
|
||||
"source": [
|
||||
"featurization_config = FeaturizationConfig()\n",
|
||||
"# Force the target column, to be integer type.\n",
|
||||
"featurization_config.add_prediction_transform_type('Integer')"
|
||||
"featurization_config.add_prediction_transform_type(\"Integer\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -337,28 +342,31 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
|
||||
"\n",
|
||||
"forecasting_parameters = ForecastingParameters(\n",
|
||||
" time_column_name=time_column_name,\n",
|
||||
" forecast_horizon=forecast_horizon,\n",
|
||||
" country_or_region_for_holidays='US', # set country_or_region will trigger holiday featurizer\n",
|
||||
" target_lags='auto', # use heuristic based lag setting\n",
|
||||
" freq='D' # Set the forecast frequency to be daily\n",
|
||||
" country_or_region_for_holidays=\"US\", # set country_or_region will trigger holiday featurizer\n",
|
||||
" target_lags=\"auto\", # use heuristic based lag setting\n",
|
||||
" freq=\"D\", # Set the forecast frequency to be daily\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task='forecasting', \n",
|
||||
" primary_metric='normalized_root_mean_squared_error',\n",
|
||||
"automl_config = AutoMLConfig(\n",
|
||||
" task=\"forecasting\",\n",
|
||||
" primary_metric=\"normalized_root_mean_squared_error\",\n",
|
||||
" featurization=featurization_config,\n",
|
||||
" blocked_models = ['ExtremeRandomTrees'], \n",
|
||||
" blocked_models=[\"ExtremeRandomTrees\"],\n",
|
||||
" experiment_timeout_hours=0.3,\n",
|
||||
" training_data=train,\n",
|
||||
" label_column_name=target_column_name,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" enable_early_stopping=True,\n",
|
||||
" n_cross_validations=3, \n",
|
||||
" n_cross_validations=3,\n",
|
||||
" max_concurrent_iterations=4,\n",
|
||||
" max_cores_per_iteration=-1,\n",
|
||||
" verbosity=logging.INFO,\n",
|
||||
" forecasting_parameters=forecasting_parameters)"
|
||||
" forecasting_parameters=forecasting_parameters,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -419,7 +427,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"fitted_model.named_steps['timeseriestransformer'].get_engineered_feature_names()"
|
||||
"fitted_model.named_steps[\"timeseriestransformer\"].get_engineered_feature_names()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -444,7 +452,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Get the featurization summary as a list of JSON\n",
|
||||
"featurization_summary = fitted_model.named_steps['timeseriestransformer'].get_featurization_summary()\n",
|
||||
"featurization_summary = fitted_model.named_steps[\n",
|
||||
" \"timeseriestransformer\"\n",
|
||||
"].get_featurization_summary()\n",
|
||||
"# View the featurization summary as a pandas dataframe\n",
|
||||
"pd.DataFrame.from_records(featurization_summary)"
|
||||
]
|
||||
@@ -491,9 +501,9 @@
|
||||
"import os\n",
|
||||
"import shutil\n",
|
||||
"\n",
|
||||
"script_folder = os.path.join(os.getcwd(), 'forecast')\n",
|
||||
"script_folder = os.path.join(os.getcwd(), \"forecast\")\n",
|
||||
"os.makedirs(script_folder, exist_ok=True)\n",
|
||||
"shutil.copy('forecasting_script.py', script_folder)"
|
||||
"shutil.copy(\"forecasting_script.py\", script_folder)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -511,7 +521,9 @@
|
||||
"source": [
|
||||
"from run_forecast import run_rolling_forecast\n",
|
||||
"\n",
|
||||
"remote_run = run_rolling_forecast(test_experiment, compute_target, best_run, test, target_column_name)\n",
|
||||
"remote_run = run_rolling_forecast(\n",
|
||||
" test_experiment, compute_target, best_run, test, target_column_name\n",
|
||||
")\n",
|
||||
"remote_run"
|
||||
]
|
||||
},
|
||||
@@ -538,8 +550,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run.download_file('outputs/predictions.csv', 'predictions.csv')\n",
|
||||
"df_all = pd.read_csv('predictions.csv')"
|
||||
"remote_run.download_file(\"outputs/predictions.csv\", \"predictions.csv\")\n",
|
||||
"df_all = pd.read_csv(\"predictions.csv\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -556,18 +568,23 @@
|
||||
"# use automl metrics module\n",
|
||||
"scores = scoring.score_regression(\n",
|
||||
" y_test=df_all[target_column_name],\n",
|
||||
" y_pred=df_all['predicted'],\n",
|
||||
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET))\n",
|
||||
" y_pred=df_all[\"predicted\"],\n",
|
||||
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(\"[Test data scores]\\n\")\n",
|
||||
"for key, value in scores.items(): \n",
|
||||
" print('{}: {:.3f}'.format(key, value))\n",
|
||||
" \n",
|
||||
"for key, value in scores.items():\n",
|
||||
" print(\"{}: {:.3f}\".format(key, value))\n",
|
||||
"\n",
|
||||
"# Plot outputs\n",
|
||||
"%matplotlib inline\n",
|
||||
"test_pred = plt.scatter(df_all[target_column_name], df_all['predicted'], color='b')\n",
|
||||
"test_test = plt.scatter(df_all[target_column_name], df_all[target_column_name], color='g')\n",
|
||||
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n",
|
||||
"test_pred = plt.scatter(df_all[target_column_name], df_all[\"predicted\"], color=\"b\")\n",
|
||||
"test_test = plt.scatter(\n",
|
||||
" df_all[target_column_name], df_all[target_column_name], color=\"g\"\n",
|
||||
")\n",
|
||||
"plt.legend(\n",
|
||||
" (test_pred, test_test), (\"prediction\", \"truth\"), loc=\"upper left\", fontsize=8\n",
|
||||
")\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
@@ -588,10 +605,18 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from metrics_helper import MAPE, APE\n",
|
||||
"df_all.groupby('horizon_origin').apply(\n",
|
||||
" lambda df: pd.Series({'MAPE': MAPE(df[target_column_name], df['predicted']),\n",
|
||||
" 'RMSE': np.sqrt(mean_squared_error(df[target_column_name], df['predicted'])),\n",
|
||||
" 'MAE': mean_absolute_error(df[target_column_name], df['predicted'])}))"
|
||||
"\n",
|
||||
"df_all.groupby(\"horizon_origin\").apply(\n",
|
||||
" lambda df: pd.Series(\n",
|
||||
" {\n",
|
||||
" \"MAPE\": MAPE(df[target_column_name], df[\"predicted\"]),\n",
|
||||
" \"RMSE\": np.sqrt(\n",
|
||||
" mean_squared_error(df[target_column_name], df[\"predicted\"])\n",
|
||||
" ),\n",
|
||||
" \"MAE\": mean_absolute_error(df[target_column_name], df[\"predicted\"]),\n",
|
||||
" }\n",
|
||||
" )\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -607,15 +632,18 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"df_all_APE = df_all.assign(APE=APE(df_all[target_column_name], df_all['predicted']))\n",
|
||||
"APEs = [df_all_APE[df_all['horizon_origin'] == h].APE.values for h in range(1, forecast_horizon + 1)]\n",
|
||||
"df_all_APE = df_all.assign(APE=APE(df_all[target_column_name], df_all[\"predicted\"]))\n",
|
||||
"APEs = [\n",
|
||||
" df_all_APE[df_all[\"horizon_origin\"] == h].APE.values\n",
|
||||
" for h in range(1, forecast_horizon + 1)\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"%matplotlib inline\n",
|
||||
"plt.boxplot(APEs)\n",
|
||||
"plt.yscale('log')\n",
|
||||
"plt.xlabel('horizon')\n",
|
||||
"plt.ylabel('APE (%)')\n",
|
||||
"plt.title('Absolute Percentage Errors by Forecast Horizon')\n",
|
||||
"plt.yscale(\"log\")\n",
|
||||
"plt.xlabel(\"horizon\")\n",
|
||||
"plt.ylabel(\"APE (%)\")\n",
|
||||
"plt.title(\"Absolute Percentage Errors by Forecast Horizon\")\n",
|
||||
"\n",
|
||||
"plt.show()"
|
||||
]
|
||||
|
||||
@@ -4,11 +4,14 @@ from sklearn.externals import joblib
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'--target_column_name', type=str, dest='target_column_name',
|
||||
help='Target Column Name')
|
||||
"--target_column_name",
|
||||
type=str,
|
||||
dest="target_column_name",
|
||||
help="Target Column Name",
|
||||
)
|
||||
parser.add_argument(
|
||||
'--test_dataset', type=str, dest='test_dataset',
|
||||
help='Test Dataset')
|
||||
"--test_dataset", type=str, dest="test_dataset", help="Test Dataset"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
target_column_name = args.target_column_name
|
||||
@@ -20,19 +23,30 @@ ws = run.experiment.workspace
|
||||
# get the input dataset by id
|
||||
test_dataset = Dataset.get_by_id(ws, id=test_dataset_id)
|
||||
|
||||
X_test_df = test_dataset.drop_columns(columns=[target_column_name]).to_pandas_dataframe().reset_index(drop=True)
|
||||
y_test_df = test_dataset.with_timestamp_columns(None).keep_columns(columns=[target_column_name]).to_pandas_dataframe()
|
||||
X_test_df = (
|
||||
test_dataset.drop_columns(columns=[target_column_name])
|
||||
.to_pandas_dataframe()
|
||||
.reset_index(drop=True)
|
||||
)
|
||||
y_test_df = (
|
||||
test_dataset.with_timestamp_columns(None)
|
||||
.keep_columns(columns=[target_column_name])
|
||||
.to_pandas_dataframe()
|
||||
)
|
||||
|
||||
fitted_model = joblib.load('model.pkl')
|
||||
fitted_model = joblib.load("model.pkl")
|
||||
|
||||
y_pred, X_trans = fitted_model.rolling_evaluation(X_test_df, y_test_df.values)
|
||||
|
||||
# Add predictions, actuals, and horizon relative to rolling origin to the test feature data
|
||||
assign_dict = {'horizon_origin': X_trans['horizon_origin'].values, 'predicted': y_pred,
|
||||
target_column_name: y_test_df[target_column_name].values}
|
||||
assign_dict = {
|
||||
"horizon_origin": X_trans["horizon_origin"].values,
|
||||
"predicted": y_pred,
|
||||
target_column_name: y_test_df[target_column_name].values,
|
||||
}
|
||||
df_all = X_test_df.assign(**assign_dict)
|
||||
|
||||
file_name = 'outputs/predictions.csv'
|
||||
file_name = "outputs/predictions.csv"
|
||||
export_csv = df_all.to_csv(file_name, header=True)
|
||||
|
||||
# Upload the predictions into artifacts
|
||||
|
||||
@@ -1,32 +1,40 @@
|
||||
from azureml.core import ScriptRunConfig
|
||||
|
||||
|
||||
def run_rolling_forecast(test_experiment, compute_target, train_run,
|
||||
test_dataset, target_column_name,
|
||||
inference_folder='./forecast'):
|
||||
train_run.download_file('outputs/model.pkl',
|
||||
inference_folder + '/model.pkl')
|
||||
def run_rolling_forecast(
|
||||
test_experiment,
|
||||
compute_target,
|
||||
train_run,
|
||||
test_dataset,
|
||||
target_column_name,
|
||||
inference_folder="./forecast",
|
||||
):
|
||||
train_run.download_file("outputs/model.pkl", inference_folder + "/model.pkl")
|
||||
|
||||
inference_env = train_run.get_environment()
|
||||
|
||||
config = ScriptRunConfig(source_directory=inference_folder,
|
||||
script='forecasting_script.py',
|
||||
arguments=['--target_column_name',
|
||||
config = ScriptRunConfig(
|
||||
source_directory=inference_folder,
|
||||
script="forecasting_script.py",
|
||||
arguments=[
|
||||
"--target_column_name",
|
||||
target_column_name,
|
||||
'--test_dataset',
|
||||
test_dataset.as_named_input(test_dataset.name)],
|
||||
"--test_dataset",
|
||||
test_dataset.as_named_input(test_dataset.name),
|
||||
],
|
||||
compute_target=compute_target,
|
||||
environment=inference_env)
|
||||
environment=inference_env,
|
||||
)
|
||||
|
||||
run = test_experiment.submit(config,
|
||||
tags={'training_run_id':
|
||||
train_run.id,
|
||||
'run_algorithm':
|
||||
train_run.properties['run_algorithm'],
|
||||
'valid_score':
|
||||
train_run.properties['score'],
|
||||
'primary_metric':
|
||||
train_run.properties['primary_metric']})
|
||||
run = test_experiment.submit(
|
||||
config,
|
||||
tags={
|
||||
"training_run_id": train_run.id,
|
||||
"run_algorithm": train_run.properties["run_algorithm"],
|
||||
"valid_score": train_run.properties["score"],
|
||||
"primary_metric": train_run.properties["primary_metric"],
|
||||
},
|
||||
)
|
||||
|
||||
run.log("run_algorithm", run.tags['run_algorithm'])
|
||||
run.log("run_algorithm", run.tags["run_algorithm"])
|
||||
return run
|
||||
|
||||
@@ -99,7 +99,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.36.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -119,7 +119,7 @@
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# choose a name for the run history container in the workspace\n",
|
||||
"experiment_name = 'automl-forecasting-energydemand'\n",
|
||||
"experiment_name = \"automl-forecasting-energydemand\"\n",
|
||||
"\n",
|
||||
"# # project folder\n",
|
||||
"# project_folder = './sample_projects/automl-forecasting-energy-demand'\n",
|
||||
@@ -127,13 +127,13 @@
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace'] = ws.name\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Run History Name'] = experiment_name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||
"output[\"Workspace\"] = ws.name\n",
|
||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||
"output[\"Location\"] = ws.location\n",
|
||||
"output[\"Run History Name\"] = experiment_name\n",
|
||||
"pd.set_option(\"display.max_colwidth\", -1)\n",
|
||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
@@ -166,10 +166,11 @@
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
||||
" print('Found existing cluster, use it.')\n",
|
||||
" print(\"Found existing cluster, use it.\")\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
|
||||
" max_nodes=6)\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||
" vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
|
||||
" )\n",
|
||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
"compute_target.wait_for_completion(show_output=True)"
|
||||
@@ -204,8 +205,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"target_column_name = 'demand'\n",
|
||||
"time_column_name = 'timeStamp'"
|
||||
"target_column_name = \"demand\"\n",
|
||||
"time_column_name = \"timeStamp\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -214,7 +215,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"dataset = Dataset.Tabular.from_delimited_files(path = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/nyc_energy.csv\").with_timestamp_columns(fine_grain_timestamp=time_column_name) \n",
|
||||
"dataset = Dataset.Tabular.from_delimited_files(\n",
|
||||
" path=\"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/nyc_energy.csv\"\n",
|
||||
").with_timestamp_columns(fine_grain_timestamp=time_column_name)\n",
|
||||
"dataset.take(5).to_pandas_dataframe().reset_index(drop=True)"
|
||||
]
|
||||
},
|
||||
@@ -343,23 +346,26 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
|
||||
"\n",
|
||||
"forecasting_parameters = ForecastingParameters(\n",
|
||||
" time_column_name=time_column_name,\n",
|
||||
" forecast_horizon=forecast_horizon,\n",
|
||||
" freq='H' # Set the forecast frequency to be hourly\n",
|
||||
" freq=\"H\", # Set the forecast frequency to be hourly\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task='forecasting', \n",
|
||||
" primary_metric='normalized_root_mean_squared_error',\n",
|
||||
" blocked_models = ['ExtremeRandomTrees', 'AutoArima', 'Prophet'], \n",
|
||||
"automl_config = AutoMLConfig(\n",
|
||||
" task=\"forecasting\",\n",
|
||||
" primary_metric=\"normalized_root_mean_squared_error\",\n",
|
||||
" blocked_models=[\"ExtremeRandomTrees\", \"AutoArima\", \"Prophet\"],\n",
|
||||
" experiment_timeout_hours=0.3,\n",
|
||||
" training_data=train,\n",
|
||||
" label_column_name=target_column_name,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" enable_early_stopping=True,\n",
|
||||
" n_cross_validations=3, \n",
|
||||
" n_cross_validations=3,\n",
|
||||
" verbosity=logging.INFO,\n",
|
||||
" forecasting_parameters=forecasting_parameters)"
|
||||
" forecasting_parameters=forecasting_parameters,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -420,7 +426,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"fitted_model.named_steps['timeseriestransformer'].get_engineered_feature_names()"
|
||||
"fitted_model.named_steps[\"timeseriestransformer\"].get_engineered_feature_names()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -444,7 +450,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Get the featurization summary as a list of JSON\n",
|
||||
"featurization_summary = fitted_model.named_steps['timeseriestransformer'].get_featurization_summary()\n",
|
||||
"featurization_summary = fitted_model.named_steps[\n",
|
||||
" \"timeseriestransformer\"\n",
|
||||
"].get_featurization_summary()\n",
|
||||
"# View the featurization summary as a pandas dataframe\n",
|
||||
"pd.DataFrame.from_records(featurization_summary)"
|
||||
]
|
||||
@@ -484,15 +492,18 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from run_forecast import run_remote_inference\n",
|
||||
"remote_run_infer = run_remote_inference(test_experiment=test_experiment,\n",
|
||||
"\n",
|
||||
"remote_run_infer = run_remote_inference(\n",
|
||||
" test_experiment=test_experiment,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" train_run=best_run,\n",
|
||||
" test_dataset=test,\n",
|
||||
" target_column_name=target_column_name)\n",
|
||||
" target_column_name=target_column_name,\n",
|
||||
")\n",
|
||||
"remote_run_infer.wait_for_completion(show_output=False)\n",
|
||||
"\n",
|
||||
"# download the inference output file to the local machine\n",
|
||||
"remote_run_infer.download_file('outputs/predictions.csv', 'predictions.csv')"
|
||||
"remote_run_infer.download_file(\"outputs/predictions.csv\", \"predictions.csv\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -510,7 +521,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# load forecast data frame\n",
|
||||
"fcst_df = pd.read_csv('predictions.csv', parse_dates=[time_column_name])\n",
|
||||
"fcst_df = pd.read_csv(\"predictions.csv\", parse_dates=[time_column_name])\n",
|
||||
"fcst_df.head()"
|
||||
]
|
||||
},
|
||||
@@ -527,18 +538,23 @@
|
||||
"# use automl metrics module\n",
|
||||
"scores = scoring.score_regression(\n",
|
||||
" y_test=fcst_df[target_column_name],\n",
|
||||
" y_pred=fcst_df['predicted'],\n",
|
||||
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET))\n",
|
||||
" y_pred=fcst_df[\"predicted\"],\n",
|
||||
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(\"[Test data scores]\\n\")\n",
|
||||
"for key, value in scores.items(): \n",
|
||||
" print('{}: {:.3f}'.format(key, value))\n",
|
||||
" \n",
|
||||
"for key, value in scores.items():\n",
|
||||
" print(\"{}: {:.3f}\".format(key, value))\n",
|
||||
"\n",
|
||||
"# Plot outputs\n",
|
||||
"%matplotlib inline\n",
|
||||
"test_pred = plt.scatter(fcst_df[target_column_name], fcst_df['predicted'], color='b')\n",
|
||||
"test_test = plt.scatter(fcst_df[target_column_name], fcst_df[target_column_name], color='g')\n",
|
||||
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n",
|
||||
"test_pred = plt.scatter(fcst_df[target_column_name], fcst_df[\"predicted\"], color=\"b\")\n",
|
||||
"test_test = plt.scatter(\n",
|
||||
" fcst_df[target_column_name], fcst_df[target_column_name], color=\"g\"\n",
|
||||
")\n",
|
||||
"plt.legend(\n",
|
||||
" (test_pred, test_test), (\"prediction\", \"truth\"), loc=\"upper left\", fontsize=8\n",
|
||||
")\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
@@ -567,21 +583,33 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"advanced_forecasting_parameters = ForecastingParameters(\n",
|
||||
" time_column_name=time_column_name, forecast_horizon=forecast_horizon,\n",
|
||||
" target_lags=12, target_rolling_window_size=4\n",
|
||||
" time_column_name=time_column_name,\n",
|
||||
" forecast_horizon=forecast_horizon,\n",
|
||||
" target_lags=12,\n",
|
||||
" target_rolling_window_size=4,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task='forecasting', \n",
|
||||
" primary_metric='normalized_root_mean_squared_error',\n",
|
||||
" blocked_models = ['ElasticNet','ExtremeRandomTrees','GradientBoosting','XGBoostRegressor','ExtremeRandomTrees', 'AutoArima', 'Prophet'], #These models are blocked for tutorial purposes, remove this for real use cases. \n",
|
||||
"automl_config = AutoMLConfig(\n",
|
||||
" task=\"forecasting\",\n",
|
||||
" primary_metric=\"normalized_root_mean_squared_error\",\n",
|
||||
" blocked_models=[\n",
|
||||
" \"ElasticNet\",\n",
|
||||
" \"ExtremeRandomTrees\",\n",
|
||||
" \"GradientBoosting\",\n",
|
||||
" \"XGBoostRegressor\",\n",
|
||||
" \"ExtremeRandomTrees\",\n",
|
||||
" \"AutoArima\",\n",
|
||||
" \"Prophet\",\n",
|
||||
" ], # These models are blocked for tutorial purposes, remove this for real use cases.\n",
|
||||
" experiment_timeout_hours=0.3,\n",
|
||||
" training_data=train,\n",
|
||||
" label_column_name=target_column_name,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" enable_early_stopping = True,\n",
|
||||
" n_cross_validations=3, \n",
|
||||
" enable_early_stopping=True,\n",
|
||||
" n_cross_validations=3,\n",
|
||||
" verbosity=logging.INFO,\n",
|
||||
" forecasting_parameters=advanced_forecasting_parameters)"
|
||||
" forecasting_parameters=advanced_forecasting_parameters,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -640,16 +668,20 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"test_experiment_advanced = Experiment(ws, experiment_name + \"_inference_advanced\")\n",
|
||||
"advanced_remote_run_infer = run_remote_inference(test_experiment=test_experiment_advanced,\n",
|
||||
"advanced_remote_run_infer = run_remote_inference(\n",
|
||||
" test_experiment=test_experiment_advanced,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" train_run=best_run_lags,\n",
|
||||
" test_dataset=test,\n",
|
||||
" target_column_name=target_column_name,\n",
|
||||
" inference_folder='./forecast_advanced')\n",
|
||||
" inference_folder=\"./forecast_advanced\",\n",
|
||||
")\n",
|
||||
"advanced_remote_run_infer.wait_for_completion(show_output=False)\n",
|
||||
"\n",
|
||||
"# download the inference output file to the local machine\n",
|
||||
"advanced_remote_run_infer.download_file('outputs/predictions.csv', 'predictions_advanced.csv')"
|
||||
"advanced_remote_run_infer.download_file(\n",
|
||||
" \"outputs/predictions.csv\", \"predictions_advanced.csv\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -658,7 +690,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"fcst_adv_df = pd.read_csv('predictions_advanced.csv', parse_dates=[time_column_name])\n",
|
||||
"fcst_adv_df = pd.read_csv(\"predictions_advanced.csv\", parse_dates=[time_column_name])\n",
|
||||
"fcst_adv_df.head()"
|
||||
]
|
||||
},
|
||||
@@ -675,18 +707,25 @@
|
||||
"# use automl metrics module\n",
|
||||
"scores = scoring.score_regression(\n",
|
||||
" y_test=fcst_adv_df[target_column_name],\n",
|
||||
" y_pred=fcst_adv_df['predicted'],\n",
|
||||
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET))\n",
|
||||
" y_pred=fcst_adv_df[\"predicted\"],\n",
|
||||
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(\"[Test data scores]\\n\")\n",
|
||||
"for key, value in scores.items(): \n",
|
||||
" print('{}: {:.3f}'.format(key, value))\n",
|
||||
" \n",
|
||||
"for key, value in scores.items():\n",
|
||||
" print(\"{}: {:.3f}\".format(key, value))\n",
|
||||
"\n",
|
||||
"# Plot outputs\n",
|
||||
"%matplotlib inline\n",
|
||||
"test_pred = plt.scatter(fcst_adv_df[target_column_name], fcst_adv_df['predicted'], color='b')\n",
|
||||
"test_test = plt.scatter(fcst_adv_df[target_column_name], fcst_adv_df[target_column_name], color='g')\n",
|
||||
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n",
|
||||
"test_pred = plt.scatter(\n",
|
||||
" fcst_adv_df[target_column_name], fcst_adv_df[\"predicted\"], color=\"b\"\n",
|
||||
")\n",
|
||||
"test_test = plt.scatter(\n",
|
||||
" fcst_adv_df[target_column_name], fcst_adv_df[target_column_name], color=\"g\"\n",
|
||||
")\n",
|
||||
"plt.legend(\n",
|
||||
" (test_pred, test_test), (\"prediction\", \"truth\"), loc=\"upper left\", fontsize=8\n",
|
||||
")\n",
|
||||
"plt.show()"
|
||||
]
|
||||
}
|
||||
|
||||
@@ -11,11 +11,14 @@ from pandas.tseries.frequencies import to_offset
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'--target_column_name', type=str, dest='target_column_name',
|
||||
help='Target Column Name')
|
||||
"--target_column_name",
|
||||
type=str,
|
||||
dest="target_column_name",
|
||||
help="Target Column Name",
|
||||
)
|
||||
parser.add_argument(
|
||||
'--test_dataset', type=str, dest='test_dataset',
|
||||
help='Test Dataset')
|
||||
"--test_dataset", type=str, dest="test_dataset", help="Test Dataset"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
target_column_name = args.target_column_name
|
||||
@@ -31,25 +34,27 @@ X_test = test_dataset.to_pandas_dataframe().reset_index(drop=True)
|
||||
y_test = X_test.pop(target_column_name).values
|
||||
|
||||
# generate forecast
|
||||
fitted_model = joblib.load('model.pkl')
|
||||
fitted_model = joblib.load("model.pkl")
|
||||
# We have default quantiles values set as below(95th percentile)
|
||||
quantiles = [0.025, 0.5, 0.975]
|
||||
predicted_column_name = 'predicted'
|
||||
PI = 'prediction_interval'
|
||||
predicted_column_name = "predicted"
|
||||
PI = "prediction_interval"
|
||||
fitted_model.quantiles = quantiles
|
||||
pred_quantiles = fitted_model.forecast_quantiles(X_test)
|
||||
pred_quantiles[PI] = pred_quantiles[[min(quantiles), max(quantiles)]].apply(lambda x: '[{}, {}]'.format(x[0],
|
||||
x[1]), axis=1)
|
||||
pred_quantiles[PI] = pred_quantiles[[min(quantiles), max(quantiles)]].apply(
|
||||
lambda x: "[{}, {}]".format(x[0], x[1]), axis=1
|
||||
)
|
||||
X_test[target_column_name] = y_test
|
||||
X_test[PI] = pred_quantiles[PI]
|
||||
X_test[predicted_column_name] = pred_quantiles[0.5]
|
||||
# drop rows where prediction or actuals are nan
|
||||
# happens because of missing actuals
|
||||
# or at edges of time due to lags/rolling windows
|
||||
clean = X_test[X_test[[target_column_name,
|
||||
predicted_column_name]].notnull().all(axis=1)]
|
||||
clean = X_test[
|
||||
X_test[[target_column_name, predicted_column_name]].notnull().all(axis=1)
|
||||
]
|
||||
|
||||
file_name = 'outputs/predictions.csv'
|
||||
file_name = "outputs/predictions.csv"
|
||||
export_csv = clean.to_csv(file_name, header=True, index=False) # added Index
|
||||
|
||||
# Upload the predictions into artifacts
|
||||
|
||||
@@ -3,36 +3,47 @@ import shutil
|
||||
from azureml.core import ScriptRunConfig
|
||||
|
||||
|
||||
def run_remote_inference(test_experiment, compute_target, train_run,
|
||||
test_dataset, target_column_name, inference_folder='./forecast'):
|
||||
def run_remote_inference(
|
||||
test_experiment,
|
||||
compute_target,
|
||||
train_run,
|
||||
test_dataset,
|
||||
target_column_name,
|
||||
inference_folder="./forecast",
|
||||
):
|
||||
# Create local directory to copy the model.pkl and forecsting_script.py files into.
|
||||
# These files will be uploaded to and executed on the compute instance.
|
||||
os.makedirs(inference_folder, exist_ok=True)
|
||||
shutil.copy('forecasting_script.py', inference_folder)
|
||||
shutil.copy("forecasting_script.py", inference_folder)
|
||||
|
||||
train_run.download_file('outputs/model.pkl',
|
||||
os.path.join(inference_folder, 'model.pkl'))
|
||||
train_run.download_file(
|
||||
"outputs/model.pkl", os.path.join(inference_folder, "model.pkl")
|
||||
)
|
||||
|
||||
inference_env = train_run.get_environment()
|
||||
|
||||
config = ScriptRunConfig(source_directory=inference_folder,
|
||||
script='forecasting_script.py',
|
||||
arguments=['--target_column_name',
|
||||
config = ScriptRunConfig(
|
||||
source_directory=inference_folder,
|
||||
script="forecasting_script.py",
|
||||
arguments=[
|
||||
"--target_column_name",
|
||||
target_column_name,
|
||||
'--test_dataset',
|
||||
test_dataset.as_named_input(test_dataset.name)],
|
||||
"--test_dataset",
|
||||
test_dataset.as_named_input(test_dataset.name),
|
||||
],
|
||||
compute_target=compute_target,
|
||||
environment=inference_env)
|
||||
environment=inference_env,
|
||||
)
|
||||
|
||||
run = test_experiment.submit(config,
|
||||
tags={'training_run_id':
|
||||
train_run.id,
|
||||
'run_algorithm':
|
||||
train_run.properties['run_algorithm'],
|
||||
'valid_score':
|
||||
train_run.properties['score'],
|
||||
'primary_metric':
|
||||
train_run.properties['primary_metric']})
|
||||
run = test_experiment.submit(
|
||||
config,
|
||||
tags={
|
||||
"training_run_id": train_run.id,
|
||||
"run_algorithm": train_run.properties["run_algorithm"],
|
||||
"valid_score": train_run.properties["score"],
|
||||
"primary_metric": train_run.properties["primary_metric"],
|
||||
},
|
||||
)
|
||||
|
||||
run.log("run_algorithm", run.tags['run_algorithm'])
|
||||
run.log("run_algorithm", run.tags["run_algorithm"])
|
||||
return run
|
||||
|
||||
@@ -94,7 +94,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.36.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -111,19 +111,19 @@
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# choose a name for the run history container in the workspace\n",
|
||||
"experiment_name = 'automl-forecast-function-demo'\n",
|
||||
"experiment_name = \"automl-forecast-function-demo\"\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace'] = ws.name\n",
|
||||
"output['SKU'] = ws.sku\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Run History Name'] = experiment_name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||
"output[\"Workspace\"] = ws.name\n",
|
||||
"output[\"SKU\"] = ws.sku\n",
|
||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||
"output[\"Location\"] = ws.location\n",
|
||||
"output[\"Run History Name\"] = experiment_name\n",
|
||||
"pd.set_option(\"display.max_colwidth\", -1)\n",
|
||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
@@ -141,17 +141,20 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"TIME_COLUMN_NAME = 'date'\n",
|
||||
"TIME_SERIES_ID_COLUMN_NAME = 'time_series_id'\n",
|
||||
"TARGET_COLUMN_NAME = 'y'\n",
|
||||
"TIME_COLUMN_NAME = \"date\"\n",
|
||||
"TIME_SERIES_ID_COLUMN_NAME = \"time_series_id\"\n",
|
||||
"TARGET_COLUMN_NAME = \"y\"\n",
|
||||
"\n",
|
||||
"def get_timeseries(train_len: int,\n",
|
||||
"\n",
|
||||
"def get_timeseries(\n",
|
||||
" train_len: int,\n",
|
||||
" test_len: int,\n",
|
||||
" time_column_name: str,\n",
|
||||
" target_column_name: str,\n",
|
||||
" time_series_id_column_name: str,\n",
|
||||
" time_series_number: int = 1,\n",
|
||||
" freq: str = 'H'):\n",
|
||||
" freq: str = \"H\",\n",
|
||||
"):\n",
|
||||
" \"\"\"\n",
|
||||
" Return the time series of designed length.\n",
|
||||
"\n",
|
||||
@@ -174,14 +177,18 @@
|
||||
" data_test = [] # type: List[pd.DataFrame]\n",
|
||||
" data_length = train_len + test_len\n",
|
||||
" for i in range(time_series_number):\n",
|
||||
" X = pd.DataFrame({\n",
|
||||
" time_column_name: pd.date_range(start='2000-01-01',\n",
|
||||
" periods=data_length,\n",
|
||||
" freq=freq),\n",
|
||||
" target_column_name: np.arange(data_length).astype(float) + np.random.rand(data_length) + i*5,\n",
|
||||
" 'ext_predictor': np.asarray(range(42, 42 + data_length)),\n",
|
||||
" time_series_id_column_name: np.repeat('ts{}'.format(i), data_length)\n",
|
||||
" })\n",
|
||||
" X = pd.DataFrame(\n",
|
||||
" {\n",
|
||||
" time_column_name: pd.date_range(\n",
|
||||
" start=\"2000-01-01\", periods=data_length, freq=freq\n",
|
||||
" ),\n",
|
||||
" target_column_name: np.arange(data_length).astype(float)\n",
|
||||
" + np.random.rand(data_length)\n",
|
||||
" + i * 5,\n",
|
||||
" \"ext_predictor\": np.asarray(range(42, 42 + data_length)),\n",
|
||||
" time_series_id_column_name: np.repeat(\"ts{}\".format(i), data_length),\n",
|
||||
" }\n",
|
||||
" )\n",
|
||||
" data_train.append(X[:train_len])\n",
|
||||
" data_test.append(X[train_len:])\n",
|
||||
" X_train = pd.concat(data_train)\n",
|
||||
@@ -190,14 +197,17 @@
|
||||
" y_test = X_test.pop(target_column_name).values\n",
|
||||
" return X_train, y_train, X_test, y_test\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"n_test_periods = 6\n",
|
||||
"n_train_periods = 30\n",
|
||||
"X_train, y_train, X_test, y_test = get_timeseries(train_len=n_train_periods,\n",
|
||||
"X_train, y_train, X_test, y_test = get_timeseries(\n",
|
||||
" train_len=n_train_periods,\n",
|
||||
" test_len=n_test_periods,\n",
|
||||
" time_column_name=TIME_COLUMN_NAME,\n",
|
||||
" target_column_name=TARGET_COLUMN_NAME,\n",
|
||||
" time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAME,\n",
|
||||
" time_series_number=2)"
|
||||
" time_series_number=2,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -224,11 +234,12 @@
|
||||
"source": [
|
||||
"# plot the example time series\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"\n",
|
||||
"whole_data = X_train.copy()\n",
|
||||
"target_label = 'y'\n",
|
||||
"target_label = \"y\"\n",
|
||||
"whole_data[target_label] = y_train\n",
|
||||
"for g in whole_data.groupby('time_series_id'): \n",
|
||||
" plt.plot(g[1]['date'].values, g[1]['y'].values, label=g[0])\n",
|
||||
"for g in whole_data.groupby(\"time_series_id\"):\n",
|
||||
" plt.plot(g[1][\"date\"].values, g[1][\"y\"].values, label=g[0])\n",
|
||||
"plt.legend()\n",
|
||||
"plt.show()"
|
||||
]
|
||||
@@ -250,12 +261,12 @@
|
||||
"# We need to save thw artificial data and then upload them to default workspace datastore.\n",
|
||||
"DATA_PATH = \"fc_fn_data\"\n",
|
||||
"DATA_PATH_X = \"{}/data_train.csv\".format(DATA_PATH)\n",
|
||||
"if not os.path.isdir('data'):\n",
|
||||
" os.mkdir('data')\n",
|
||||
"if not os.path.isdir(\"data\"):\n",
|
||||
" os.mkdir(\"data\")\n",
|
||||
"pd.DataFrame(whole_data).to_csv(\"data/data_train.csv\", index=False)\n",
|
||||
"# Upload saved data to the default data store.\n",
|
||||
"ds = ws.get_default_datastore()\n",
|
||||
"ds.upload(src_dir='./data', target_path=DATA_PATH, overwrite=True, show_progress=True)\n",
|
||||
"ds.upload(src_dir=\"./data\", target_path=DATA_PATH, overwrite=True, show_progress=True)\n",
|
||||
"train_data = Dataset.Tabular.from_delimited_files(path=ds.path(DATA_PATH_X))"
|
||||
]
|
||||
},
|
||||
@@ -283,10 +294,11 @@
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
||||
" print('Found existing cluster, use it.')\n",
|
||||
" print(\"Found existing cluster, use it.\")\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
|
||||
" max_nodes=6)\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||
" vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
|
||||
" )\n",
|
||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
"compute_target.wait_for_completion(show_output=True)"
|
||||
@@ -315,14 +327,15 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
|
||||
"lags = [1,2,3]\n",
|
||||
"\n",
|
||||
"lags = [1, 2, 3]\n",
|
||||
"forecast_horizon = n_test_periods\n",
|
||||
"forecasting_parameters = ForecastingParameters(\n",
|
||||
" time_column_name=TIME_COLUMN_NAME,\n",
|
||||
" forecast_horizon=forecast_horizon,\n",
|
||||
" time_series_id_column_names=[ TIME_SERIES_ID_COLUMN_NAME ],\n",
|
||||
" time_series_id_column_names=[TIME_SERIES_ID_COLUMN_NAME],\n",
|
||||
" target_lags=lags,\n",
|
||||
" freq='H' # Set the forecast frequency to be hourly\n",
|
||||
" freq=\"H\", # Set the forecast frequency to be hourly\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -344,19 +357,21 @@
|
||||
"from azureml.train.automl import AutoMLConfig\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task='forecasting',\n",
|
||||
" debug_log='automl_forecasting_function.log',\n",
|
||||
" primary_metric='normalized_root_mean_squared_error',\n",
|
||||
"automl_config = AutoMLConfig(\n",
|
||||
" task=\"forecasting\",\n",
|
||||
" debug_log=\"automl_forecasting_function.log\",\n",
|
||||
" primary_metric=\"normalized_root_mean_squared_error\",\n",
|
||||
" experiment_timeout_hours=0.25,\n",
|
||||
" enable_early_stopping=True,\n",
|
||||
" training_data=train_data,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" n_cross_validations=3,\n",
|
||||
" verbosity = logging.INFO,\n",
|
||||
" verbosity=logging.INFO,\n",
|
||||
" max_concurrent_iterations=4,\n",
|
||||
" max_cores_per_iteration=-1,\n",
|
||||
" label_column_name=target_label,\n",
|
||||
" forecasting_parameters=forecasting_parameters)\n",
|
||||
" forecasting_parameters=forecasting_parameters,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"remote_run = experiment.submit(automl_config, show_output=False)"
|
||||
]
|
||||
@@ -481,12 +496,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# specify which quantiles you would like \n",
|
||||
"# specify which quantiles you would like\n",
|
||||
"fitted_model.quantiles = [0.01, 0.5, 0.95]\n",
|
||||
"# use forecast_quantiles function, not the forecast() one\n",
|
||||
"y_pred_quantiles = fitted_model.forecast_quantiles(X_test)\n",
|
||||
"\n",
|
||||
"# quantile forecasts returned in a Dataframe along with the time and time series id columns \n",
|
||||
"# quantile forecasts returned in a Dataframe along with the time and time series id columns\n",
|
||||
"y_pred_quantiles"
|
||||
]
|
||||
},
|
||||
@@ -534,14 +549,16 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# generate the same kind of test data we trained on, \n",
|
||||
"# generate the same kind of test data we trained on,\n",
|
||||
"# but now make the train set much longer, so that the test set will be in the future\n",
|
||||
"X_context, y_context, X_away, y_away = get_timeseries(train_len=42, # train data was 30 steps long\n",
|
||||
"X_context, y_context, X_away, y_away = get_timeseries(\n",
|
||||
" train_len=42, # train data was 30 steps long\n",
|
||||
" test_len=4,\n",
|
||||
" time_column_name=TIME_COLUMN_NAME,\n",
|
||||
" target_column_name=TARGET_COLUMN_NAME,\n",
|
||||
" time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAME,\n",
|
||||
" time_series_number=2)\n",
|
||||
" time_series_number=2,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# end of the data we trained on\n",
|
||||
"print(X_train.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].max())\n",
|
||||
@@ -562,7 +579,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"try: \n",
|
||||
"try:\n",
|
||||
" y_pred_away, xy_away = fitted_model.forecast(X_away)\n",
|
||||
" xy_away\n",
|
||||
"except Exception as e:\n",
|
||||
@@ -584,7 +601,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def make_forecasting_query(fulldata, time_column_name, target_column_name, forecast_origin, horizon, lookback):\n",
|
||||
"def make_forecasting_query(\n",
|
||||
" fulldata, time_column_name, target_column_name, forecast_origin, horizon, lookback\n",
|
||||
"):\n",
|
||||
"\n",
|
||||
" \"\"\"\n",
|
||||
" This function will take the full dataset, and create the query\n",
|
||||
@@ -592,24 +611,24 @@
|
||||
" forward for the next `horizon` horizons. Context from previous\n",
|
||||
" `lookback` periods will be included.\n",
|
||||
"\n",
|
||||
" \n",
|
||||
"\n",
|
||||
"\n",
|
||||
" fulldata: pandas.DataFrame a time series dataset. Needs to contain X and y.\n",
|
||||
" time_column_name: string which column (must be in fulldata) is the time axis\n",
|
||||
" target_column_name: string which column (must be in fulldata) is to be forecast\n",
|
||||
" forecast_origin: datetime type the last time we (pretend to) have target values \n",
|
||||
" forecast_origin: datetime type the last time we (pretend to) have target values\n",
|
||||
" horizon: timedelta how far forward, in time units (not periods)\n",
|
||||
" lookback: timedelta how far back does the model look?\n",
|
||||
" lookback: timedelta how far back does the model look\n",
|
||||
"\n",
|
||||
" Example:\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" ```\n",
|
||||
"\n",
|
||||
" forecast_origin = pd.to_datetime('2012-09-01') + pd.DateOffset(days=5) # forecast 5 days after end of training\n",
|
||||
" forecast_origin = pd.to_datetime(\"2012-09-01\") + pd.DateOffset(days=5) # forecast 5 days after end of training\n",
|
||||
" print(forecast_origin)\n",
|
||||
"\n",
|
||||
" X_query, y_query = make_forecasting_query(data, \n",
|
||||
" X_query, y_query = make_forecasting_query(data,\n",
|
||||
" forecast_origin = forecast_origin,\n",
|
||||
" horizon = pd.DateOffset(days=7), # 7 days into the future\n",
|
||||
" lookback = pd.DateOffset(days=1), # model has lag 1 period (day)\n",
|
||||
@@ -618,28 +637,30 @@
|
||||
" ```\n",
|
||||
" \"\"\"\n",
|
||||
"\n",
|
||||
" X_past = fulldata[ (fulldata[ time_column_name ] > forecast_origin - lookback) &\n",
|
||||
" (fulldata[ time_column_name ] <= forecast_origin)\n",
|
||||
" X_past = fulldata[\n",
|
||||
" (fulldata[time_column_name] > forecast_origin - lookback)\n",
|
||||
" & (fulldata[time_column_name] <= forecast_origin)\n",
|
||||
" ]\n",
|
||||
"\n",
|
||||
" X_future = fulldata[ (fulldata[ time_column_name ] > forecast_origin) &\n",
|
||||
" (fulldata[ time_column_name ] <= forecast_origin + horizon)\n",
|
||||
" X_future = fulldata[\n",
|
||||
" (fulldata[time_column_name] > forecast_origin)\n",
|
||||
" & (fulldata[time_column_name] <= forecast_origin + horizon)\n",
|
||||
" ]\n",
|
||||
"\n",
|
||||
" y_past = X_past.pop(target_column_name).values.astype(np.float)\n",
|
||||
" y_future = X_future.pop(target_column_name).values.astype(np.float)\n",
|
||||
"\n",
|
||||
" # Now take y_future and turn it into question marks\n",
|
||||
" y_query = y_future.copy().astype(np.float) # because sometimes life hands you an int\n",
|
||||
" y_query = y_future.copy().astype(\n",
|
||||
" np.float\n",
|
||||
" ) # because sometimes life hands you an int\n",
|
||||
" y_query.fill(np.NaN)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" print(\"X_past is \" + str(X_past.shape) + \" - shaped\")\n",
|
||||
" print(\"X_future is \" + str(X_future.shape) + \" - shaped\")\n",
|
||||
" print(\"y_past is \" + str(y_past.shape) + \" - shaped\")\n",
|
||||
" print(\"y_query is \" + str(y_query.shape) + \" - shaped\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" X_pred = pd.concat([X_past, X_future])\n",
|
||||
" y_pred = np.concatenate([y_past, y_query])\n",
|
||||
" return X_pred, y_pred"
|
||||
@@ -658,8 +679,16 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(X_context.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].agg(['min','max','count']))\n",
|
||||
"print(X_away.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].agg(['min','max','count']))\n",
|
||||
"print(\n",
|
||||
" X_context.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].agg(\n",
|
||||
" [\"min\", \"max\", \"count\"]\n",
|
||||
" )\n",
|
||||
")\n",
|
||||
"print(\n",
|
||||
" X_away.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].agg(\n",
|
||||
" [\"min\", \"max\", \"count\"]\n",
|
||||
" )\n",
|
||||
")\n",
|
||||
"X_context.tail(5)"
|
||||
]
|
||||
},
|
||||
@@ -669,11 +698,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Since the length of the lookback is 3, \n",
|
||||
"# Since the length of the lookback is 3,\n",
|
||||
"# we need to add 3 periods from the context to the request\n",
|
||||
"# so that the model has the data it needs\n",
|
||||
"\n",
|
||||
"# Put the X and y back together for a while. \n",
|
||||
"# Put the X and y back together for a while.\n",
|
||||
"# They like each other and it makes them happy.\n",
|
||||
"X_context[TARGET_COLUMN_NAME] = y_context\n",
|
||||
"X_away[TARGET_COLUMN_NAME] = y_away\n",
|
||||
@@ -684,7 +713,7 @@
|
||||
"# it is indeed the last point of the context\n",
|
||||
"assert forecast_origin == X_context[TIME_COLUMN_NAME].max()\n",
|
||||
"print(\"Forecast origin: \" + str(forecast_origin))\n",
|
||||
" \n",
|
||||
"\n",
|
||||
"# the model uses lags and rolling windows to look back in time\n",
|
||||
"n_lookback_periods = max(lags)\n",
|
||||
"lookback = pd.DateOffset(hours=n_lookback_periods)\n",
|
||||
@@ -692,8 +721,9 @@
|
||||
"horizon = pd.DateOffset(hours=forecast_horizon)\n",
|
||||
"\n",
|
||||
"# now make the forecast query from context (refer to figure)\n",
|
||||
"X_pred, y_pred = make_forecasting_query(fulldata, TIME_COLUMN_NAME, TARGET_COLUMN_NAME,\n",
|
||||
" forecast_origin, horizon, lookback)\n",
|
||||
"X_pred, y_pred = make_forecasting_query(\n",
|
||||
" fulldata, TIME_COLUMN_NAME, TARGET_COLUMN_NAME, forecast_origin, horizon, lookback\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# show the forecast request aligned\n",
|
||||
"X_show = X_pred.copy()\n",
|
||||
@@ -720,7 +750,7 @@
|
||||
"# show the forecast aligned\n",
|
||||
"X_show = xy_away.reset_index()\n",
|
||||
"# without the generated features\n",
|
||||
"X_show[['date', 'time_series_id', 'ext_predictor', '_automl_target_col']]\n",
|
||||
"X_show[[\"date\", \"time_series_id\", \"ext_predictor\", \"_automl_target_col\"]]\n",
|
||||
"# prediction is in _automl_target_col"
|
||||
]
|
||||
},
|
||||
@@ -751,12 +781,14 @@
|
||||
"source": [
|
||||
"# generate the same kind of test data we trained on, but with a single time-series and test period twice as long\n",
|
||||
"# as the forecast_horizon.\n",
|
||||
"_, _, X_test_long, y_test_long = get_timeseries(train_len=n_train_periods,\n",
|
||||
" test_len=forecast_horizon*2,\n",
|
||||
"_, _, X_test_long, y_test_long = get_timeseries(\n",
|
||||
" train_len=n_train_periods,\n",
|
||||
" test_len=forecast_horizon * 2,\n",
|
||||
" time_column_name=TIME_COLUMN_NAME,\n",
|
||||
" target_column_name=TARGET_COLUMN_NAME,\n",
|
||||
" time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAME,\n",
|
||||
" time_series_number=1)\n",
|
||||
" time_series_number=1,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(X_test_long.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].min())\n",
|
||||
"print(X_test_long.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].max())"
|
||||
@@ -779,9 +811,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# What forecast() function does in this case is equivalent to iterating it twice over the test set as the following. \n",
|
||||
"# What forecast() function does in this case is equivalent to iterating it twice over the test set as the following.\n",
|
||||
"y_pred1, _ = fitted_model.forecast(X_test_long[:forecast_horizon])\n",
|
||||
"y_pred_all, _ = fitted_model.forecast(X_test_long, np.concatenate((y_pred1, np.full(forecast_horizon, np.nan))))\n",
|
||||
"y_pred_all, _ = fitted_model.forecast(\n",
|
||||
" X_test_long, np.concatenate((y_pred1, np.full(forecast_horizon, np.nan)))\n",
|
||||
")\n",
|
||||
"np.array_equal(y_pred_all, y_pred_long)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -72,14 +72,14 @@
|
||||
"dstore = ws.get_default_datastore()\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['SDK version'] = azureml.core.VERSION\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace'] = ws.name\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Default datastore name'] = dstore.name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"output[\"SDK version\"] = azureml.core.VERSION\n",
|
||||
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||
"output[\"Workspace\"] = ws.name\n",
|
||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||
"output[\"Location\"] = ws.location\n",
|
||||
"output[\"Default datastore name\"] = dstore.name\n",
|
||||
"pd.set_option(\"display.max_colwidth\", -1)\n",
|
||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
@@ -102,9 +102,9 @@
|
||||
"source": [
|
||||
"from azureml.core import Experiment\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, 'automl-hts')\n",
|
||||
"experiment = Experiment(ws, \"automl-hts\")\n",
|
||||
"\n",
|
||||
"print('Experiment name: ' + experiment.name)"
|
||||
"print(\"Experiment name: \" + experiment.name)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -169,7 +169,9 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"datastore.upload(src_dir='./Data/', target_path=datastore_path, overwrite=True, show_progress=True) "
|
||||
"datastore.upload(\n",
|
||||
" src_dir=\"./Data/\", target_path=datastore_path, overwrite=True, show_progress=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -192,8 +194,13 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.dataset import Dataset\n",
|
||||
"train_ds = Dataset.Tabular.from_delimited_files(path=datastore.path(\"hts-sample/hts-sample-train.csv\"), validate=False) \n",
|
||||
"inference_ds = Dataset.Tabular.from_delimited_files(path=datastore.path(\"hts-sample/hts-sample-test.csv\"), validate=False)"
|
||||
"\n",
|
||||
"train_ds = Dataset.Tabular.from_delimited_files(\n",
|
||||
" path=datastore.path(\"hts-sample/hts-sample-train.csv\"), validate=False\n",
|
||||
")\n",
|
||||
"inference_ds = Dataset.Tabular.from_delimited_files(\n",
|
||||
" path=datastore.path(\"hts-sample/hts-sample-test.csv\"), validate=False\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -256,19 +263,20 @@
|
||||
"if compute_name in ws.compute_targets:\n",
|
||||
" compute_target = ws.compute_targets[compute_name]\n",
|
||||
" if compute_target and type(compute_target) is AmlCompute:\n",
|
||||
" print('Found compute target: ' + compute_name)\n",
|
||||
" print(\"Found compute target: \" + compute_name)\n",
|
||||
"else:\n",
|
||||
" print('Creating a new compute target...')\n",
|
||||
" provisioning_config = AmlCompute.provisioning_configuration(vm_size= \"STANDARD_D16S_V3\",\n",
|
||||
" max_nodes=20)\n",
|
||||
" print(\"Creating a new compute target...\")\n",
|
||||
" provisioning_config = AmlCompute.provisioning_configuration(\n",
|
||||
" vm_size=\"STANDARD_D16S_V3\", max_nodes=20\n",
|
||||
" )\n",
|
||||
" # Create the compute target\n",
|
||||
" compute_target = ComputeTarget.create(\n",
|
||||
" ws, compute_name, provisioning_config)\n",
|
||||
" compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)\n",
|
||||
"\n",
|
||||
" # Can poll for a minimum number of nodes and for a specific timeout.\n",
|
||||
" # If no min node count is provided it will use the scale settings for the cluster\n",
|
||||
" compute_target.wait_for_completion(\n",
|
||||
" show_output=True, min_node_count=None, timeout_in_minutes=20)\n",
|
||||
" show_output=True, min_node_count=None, timeout_in_minutes=20\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" # For a more detailed view of current cluster status, use the 'status' property\n",
|
||||
" print(compute_target.status.serialize())"
|
||||
@@ -330,8 +338,8 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"automl_settings = {\n",
|
||||
" \"task\" : \"forecasting\",\n",
|
||||
" \"primary_metric\" : \"normalized_root_mean_squared_error\",\n",
|
||||
" \"task\": \"forecasting\",\n",
|
||||
" \"primary_metric\": \"normalized_root_mean_squared_error\",\n",
|
||||
" \"label_column_name\": label_column_name,\n",
|
||||
" \"time_column_name\": time_column_name,\n",
|
||||
" \"forecast_horizon\": forecast_horizon,\n",
|
||||
@@ -341,17 +349,17 @@
|
||||
" \"pipeline_fetch_max_batch_size\": 15,\n",
|
||||
" \"model_explainability\": model_explainability,\n",
|
||||
" # The following settings are specific to this sample and should be adjusted according to your own needs.\n",
|
||||
" \"iteration_timeout_minutes\" : 10,\n",
|
||||
" \"iterations\" : 10,\n",
|
||||
" \"n_cross_validations\": 2\n",
|
||||
" \"iteration_timeout_minutes\": 10,\n",
|
||||
" \"iterations\": 10,\n",
|
||||
" \"n_cross_validations\": 2,\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"hts_parameters = HTSTrainParameters(\n",
|
||||
" automl_settings=automl_settings,\n",
|
||||
" hierarchy_column_names=hierarchy,\n",
|
||||
" training_level=training_level,\n",
|
||||
" enable_engineered_explanations=engineered_explanations\n",
|
||||
")\n"
|
||||
" enable_engineered_explanations=engineered_explanations,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -457,7 +465,9 @@
|
||||
" expl_output = training_run.get_pipeline_output(\"explanations\")\n",
|
||||
" expl_output.download(\"training_explanations\")\n",
|
||||
"else:\n",
|
||||
" print(\"Model explanations are available only if model_explainability is set to True.\")"
|
||||
" print(\n",
|
||||
" \"Model explanations are available only if model_explainability is set to True.\"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -476,17 +486,28 @@
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if model_explainability:\n",
|
||||
" explanations_dirrectory = os.listdir(os.path.join('training_explanations', 'azureml'))\n",
|
||||
" explanations_dirrectory = os.listdir(\n",
|
||||
" os.path.join(\"training_explanations\", \"azureml\")\n",
|
||||
" )\n",
|
||||
" if len(explanations_dirrectory) > 1:\n",
|
||||
" print(\"Warning! The directory contains multiple explanations, only the first one will be displayed.\")\n",
|
||||
" print('The explanations are located at {}.'.format(explanations_dirrectory[0]))\n",
|
||||
" print(\n",
|
||||
" \"Warning! The directory contains multiple explanations, only the first one will be displayed.\"\n",
|
||||
" )\n",
|
||||
" print(\"The explanations are located at {}.\".format(explanations_dirrectory[0]))\n",
|
||||
" # Now we will list all the explanations.\n",
|
||||
" explanation_path = os.path.join('training_explanations', 'azureml', explanations_dirrectory[0], 'training_explanations')\n",
|
||||
" explanation_path = os.path.join(\n",
|
||||
" \"training_explanations\",\n",
|
||||
" \"azureml\",\n",
|
||||
" explanations_dirrectory[0],\n",
|
||||
" \"training_explanations\",\n",
|
||||
" )\n",
|
||||
" print(\"Available explanations\")\n",
|
||||
" print(\"==============================\")\n",
|
||||
" print(\"\\n\".join(os.listdir(explanation_path)))\n",
|
||||
"else:\n",
|
||||
" print(\"Model explanations are available only if model_explainability is set to True.\")"
|
||||
" print(\n",
|
||||
" \"Model explanations are available only if model_explainability is set to True.\"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -504,11 +525,17 @@
|
||||
"source": [
|
||||
"from IPython.display import display\n",
|
||||
"\n",
|
||||
"explanation_type = 'raw'\n",
|
||||
"level = 'state'\n",
|
||||
"explanation_type = \"raw\"\n",
|
||||
"level = \"state\"\n",
|
||||
"\n",
|
||||
"if model_explainability:\n",
|
||||
" display(pd.read_csv(os.path.join(explanation_path, \"{}_explanations_{}.csv\").format(explanation_type, level)))"
|
||||
" display(\n",
|
||||
" pd.read_csv(\n",
|
||||
" os.path.join(explanation_path, \"{}_explanations_{}.csv\").format(\n",
|
||||
" explanation_type, level\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -542,7 +569,7 @@
|
||||
"\n",
|
||||
"inference_parameters = HTSInferenceParameters(\n",
|
||||
" hierarchy_forecast_level=\"store_id\", # The setting is specific to this dataset and should be changed based on your dataset.\n",
|
||||
" allocation_method=\"proportions_of_historical_average\"\n",
|
||||
" allocation_method=\"proportions_of_historical_average\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"steps = AutoMLPipelineBuilder.get_many_models_batch_inference_steps(\n",
|
||||
@@ -551,7 +578,7 @@
|
||||
" compute_target=compute_target,\n",
|
||||
" inference_pipeline_parameters=inference_parameters,\n",
|
||||
" node_count=2,\n",
|
||||
" process_count_per_node=8\n",
|
||||
" process_count_per_node=8,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -610,7 +637,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"inference_run = experiment.submit(inference_pipeline, pipeline_parameters={\"hierarchy_forecast_level\": \"state\"})\n",
|
||||
"inference_run = experiment.submit(\n",
|
||||
" inference_pipeline, pipeline_parameters={\"hierarchy_forecast_level\": \"state\"}\n",
|
||||
")\n",
|
||||
"inference_run.wait_for_completion(show_output=False)"
|
||||
]
|
||||
}
|
||||
|
||||
@@ -72,14 +72,14 @@
|
||||
"dstore = ws.get_default_datastore()\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['SDK version'] = azureml.core.VERSION\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace'] = ws.name\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Default datastore name'] = dstore.name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"output[\"SDK version\"] = azureml.core.VERSION\n",
|
||||
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||
"output[\"Workspace\"] = ws.name\n",
|
||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||
"output[\"Location\"] = ws.location\n",
|
||||
"output[\"Default datastore name\"] = dstore.name\n",
|
||||
"pd.set_option(\"display.max_colwidth\", -1)\n",
|
||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
@@ -102,9 +102,9 @@
|
||||
"source": [
|
||||
"from azureml.core import Experiment\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, 'automl-many-models')\n",
|
||||
"experiment = Experiment(ws, \"automl-many-models\")\n",
|
||||
"\n",
|
||||
"print('Experiment name: ' + experiment.name)"
|
||||
"print(\"Experiment name: \" + experiment.name)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -200,11 +200,13 @@
|
||||
"container_name = \"automl-sample-notebook-data\"\n",
|
||||
"account_name = \"automlsamplenotebookdata\"\n",
|
||||
"\n",
|
||||
"oj_datastore = Datastore.register_azure_blob_container(workspace=ws, \n",
|
||||
" datastore_name=blob_datastore_name, \n",
|
||||
"oj_datastore = Datastore.register_azure_blob_container(\n",
|
||||
" workspace=ws,\n",
|
||||
" datastore_name=blob_datastore_name,\n",
|
||||
" container_name=container_name,\n",
|
||||
" account_name=account_name,\n",
|
||||
" create_if_not_exists=True) "
|
||||
" create_if_not_exists=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -228,11 +230,15 @@
|
||||
"source": [
|
||||
"from azureml.core import Dataset\n",
|
||||
"\n",
|
||||
"ds_name_small = 'oj-data-small-tabular'\n",
|
||||
"input_ds_small = Dataset.Tabular.from_delimited_files(path=oj_datastore.path(ds_name_small + '/'), validate=False)\n",
|
||||
"ds_name_small = \"oj-data-small-tabular\"\n",
|
||||
"input_ds_small = Dataset.Tabular.from_delimited_files(\n",
|
||||
" path=oj_datastore.path(ds_name_small + \"/\"), validate=False\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"inference_name_small = 'oj-inference-small-tabular'\n",
|
||||
"inference_ds_small = Dataset.Tabular.from_delimited_files(path=oj_datastore.path(inference_name_small + '/'), validate=False)"
|
||||
"inference_name_small = \"oj-inference-small-tabular\"\n",
|
||||
"inference_ds_small = Dataset.Tabular.from_delimited_files(\n",
|
||||
" path=oj_datastore.path(inference_name_small + \"/\"), validate=False\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -277,19 +283,20 @@
|
||||
"if compute_name in ws.compute_targets:\n",
|
||||
" compute_target = ws.compute_targets[compute_name]\n",
|
||||
" if compute_target and type(compute_target) is AmlCompute:\n",
|
||||
" print('Found compute target: ' + compute_name)\n",
|
||||
" print(\"Found compute target: \" + compute_name)\n",
|
||||
"else:\n",
|
||||
" print('Creating a new compute target...')\n",
|
||||
" provisioning_config = AmlCompute.provisioning_configuration(vm_size= \"STANDARD_D16S_V3\",\n",
|
||||
" max_nodes=20)\n",
|
||||
" print(\"Creating a new compute target...\")\n",
|
||||
" provisioning_config = AmlCompute.provisioning_configuration(\n",
|
||||
" vm_size=\"STANDARD_D16S_V3\", max_nodes=20\n",
|
||||
" )\n",
|
||||
" # Create the compute target\n",
|
||||
" compute_target = ComputeTarget.create(\n",
|
||||
" ws, compute_name, provisioning_config)\n",
|
||||
" compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)\n",
|
||||
"\n",
|
||||
" # Can poll for a minimum number of nodes and for a specific timeout.\n",
|
||||
" # If no min node count is provided it will use the scale settings for the cluster\n",
|
||||
" compute_target.wait_for_completion(\n",
|
||||
" show_output=True, min_node_count=None, timeout_in_minutes=20)\n",
|
||||
" show_output=True, min_node_count=None, timeout_in_minutes=20\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" # For a more detailed view of current cluster status, use the 'status' property\n",
|
||||
" print(compute_target.status.serialize())"
|
||||
@@ -333,25 +340,29 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.train.automl.runtime._many_models.many_models_parameters import ManyModelsTrainParameters\n",
|
||||
"from azureml.train.automl.runtime._many_models.many_models_parameters import (\n",
|
||||
" ManyModelsTrainParameters,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"partition_column_names = ['Store', 'Brand']\n",
|
||||
"partition_column_names = [\"Store\", \"Brand\"]\n",
|
||||
"automl_settings = {\n",
|
||||
" \"task\" : 'forecasting',\n",
|
||||
" \"primary_metric\" : 'normalized_root_mean_squared_error',\n",
|
||||
" \"iteration_timeout_minutes\" : 10, # This needs to be changed based on the dataset. We ask customer to explore how long training is taking before settings this value\n",
|
||||
" \"iterations\" : 15,\n",
|
||||
" \"experiment_timeout_hours\" : 0.25,\n",
|
||||
" \"label_column_name\" : 'Quantity',\n",
|
||||
" \"n_cross_validations\" : 3,\n",
|
||||
" \"time_column_name\": 'WeekStarting',\n",
|
||||
" \"drop_column_names\": 'Revenue',\n",
|
||||
" \"max_horizon\" : 6,\n",
|
||||
" \"task\": \"forecasting\",\n",
|
||||
" \"primary_metric\": \"normalized_root_mean_squared_error\",\n",
|
||||
" \"iteration_timeout_minutes\": 10, # This needs to be changed based on the dataset. We ask customer to explore how long training is taking before settings this value\n",
|
||||
" \"iterations\": 15,\n",
|
||||
" \"experiment_timeout_hours\": 0.25,\n",
|
||||
" \"label_column_name\": \"Quantity\",\n",
|
||||
" \"n_cross_validations\": 3,\n",
|
||||
" \"time_column_name\": \"WeekStarting\",\n",
|
||||
" \"drop_column_names\": \"Revenue\",\n",
|
||||
" \"max_horizon\": 6,\n",
|
||||
" \"grain_column_names\": partition_column_names,\n",
|
||||
" \"track_child_runs\": False,\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"mm_paramters = ManyModelsTrainParameters(automl_settings=automl_settings, partition_column_names=partition_column_names)"
|
||||
"mm_paramters = ManyModelsTrainParameters(\n",
|
||||
" automl_settings=automl_settings, partition_column_names=partition_column_names\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -485,14 +496,14 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# from azureml.pipeline.core import Schedule, ScheduleRecurrence\n",
|
||||
" \n",
|
||||
"\n",
|
||||
"# training_pipeline_id = published_pipeline.id\n",
|
||||
"\n",
|
||||
"# recurrence = ScheduleRecurrence(frequency=\"Month\", interval=1, start_time=\"2020-01-01T09:00:00\")\n",
|
||||
"# recurring_schedule = Schedule.create(ws, name=\"automl_training_recurring_schedule\", \n",
|
||||
"# recurring_schedule = Schedule.create(ws, name=\"automl_training_recurring_schedule\",\n",
|
||||
"# description=\"Schedule Training Pipeline to run on the first day of every month\",\n",
|
||||
"# pipeline_id=training_pipeline_id, \n",
|
||||
"# experiment_name=experiment.name, \n",
|
||||
"# pipeline_id=training_pipeline_id,\n",
|
||||
"# experiment_name=experiment.name,\n",
|
||||
"# recurrence=recurrence)"
|
||||
]
|
||||
},
|
||||
@@ -518,7 +529,10 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.data import OutputFileDatasetConfig\n",
|
||||
"output_inference_data_ds = OutputFileDatasetConfig(name='many_models_inference_output', destination=(dstore, 'oj/inference_data/')).register_on_complete(name='oj_inference_data_ds')"
|
||||
"\n",
|
||||
"output_inference_data_ds = OutputFileDatasetConfig(\n",
|
||||
" name=\"many_models_inference_output\", destination=(dstore, \"oj/inference_data/\")\n",
|
||||
").register_on_complete(name=\"oj_inference_data_ds\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -555,12 +569,14 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder\n",
|
||||
"from azureml.train.automl.runtime._many_models.many_models_parameters import ManyModelsInferenceParameters\n",
|
||||
"from azureml.train.automl.runtime._many_models.many_models_parameters import (\n",
|
||||
" ManyModelsInferenceParameters,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"mm_parameters = ManyModelsInferenceParameters(\n",
|
||||
" partition_column_names=['Store', 'Brand'],\n",
|
||||
" partition_column_names=[\"Store\", \"Brand\"],\n",
|
||||
" time_column_name=\"WeekStarting\",\n",
|
||||
" target_column_name=\"Quantity\"\n",
|
||||
" target_column_name=\"Quantity\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"inference_steps = AutoMLPipelineBuilder.get_many_models_batch_inference_steps(\n",
|
||||
@@ -622,10 +638,23 @@
|
||||
"\n",
|
||||
"forecasting_results_name = \"forecasting_results\"\n",
|
||||
"forecasting_output_name = \"many_models_inference_output\"\n",
|
||||
"forecast_file = get_output_from_mm_pipeline(inference_run, forecasting_results_name, forecasting_output_name)\n",
|
||||
"forecast_file = get_output_from_mm_pipeline(\n",
|
||||
" inference_run, forecasting_results_name, forecasting_output_name\n",
|
||||
")\n",
|
||||
"df = pd.read_csv(forecast_file, delimiter=\" \", header=None)\n",
|
||||
"df.columns = [\"Week Starting\", \"Store\", \"Brand\", \"Quantity\", \"Advert\", \"Price\" , \"Revenue\", \"Predicted\" ]\n",
|
||||
"print(\"Prediction has \", df.shape[0], \" rows. Here the first 10 rows are being displayed.\")\n",
|
||||
"df.columns = [\n",
|
||||
" \"Week Starting\",\n",
|
||||
" \"Store\",\n",
|
||||
" \"Brand\",\n",
|
||||
" \"Quantity\",\n",
|
||||
" \"Advert\",\n",
|
||||
" \"Price\",\n",
|
||||
" \"Revenue\",\n",
|
||||
" \"Predicted\",\n",
|
||||
"]\n",
|
||||
"print(\n",
|
||||
" \"Prediction has \", df.shape[0], \" rows. Here the first 10 rows are being displayed.\"\n",
|
||||
")\n",
|
||||
"df.head(10)"
|
||||
]
|
||||
},
|
||||
@@ -672,14 +701,14 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# from azureml.pipeline.core import Schedule, ScheduleRecurrence\n",
|
||||
" \n",
|
||||
"\n",
|
||||
"# forecasting_pipeline_id = published_pipeline.id\n",
|
||||
"\n",
|
||||
"# recurrence = ScheduleRecurrence(frequency=\"Month\", interval=1, start_time=\"2020-01-01T09:00:00\")\n",
|
||||
"# recurring_schedule = Schedule.create(ws, name=\"automl_forecasting_recurring_schedule\", \n",
|
||||
"# recurring_schedule = Schedule.create(ws, name=\"automl_forecasting_recurring_schedule\",\n",
|
||||
"# description=\"Schedule Forecasting Pipeline to run on the first day of every week\",\n",
|
||||
"# pipeline_id=forecasting_pipeline_id, \n",
|
||||
"# experiment_name=experiment.name, \n",
|
||||
"# pipeline_id=forecasting_pipeline_id,\n",
|
||||
"# experiment_name=experiment.name,\n",
|
||||
"# recurrence=recurrence)"
|
||||
]
|
||||
}
|
||||
|
||||
@@ -81,7 +81,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.36.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -101,19 +101,19 @@
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# choose a name for the run history container in the workspace\n",
|
||||
"experiment_name = 'automl-ojforecasting'\n",
|
||||
"experiment_name = \"automl-ojforecasting\"\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace'] = ws.name\n",
|
||||
"output['SKU'] = ws.sku\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Run History Name'] = experiment_name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||
"output[\"Workspace\"] = ws.name\n",
|
||||
"output[\"SKU\"] = ws.sku\n",
|
||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||
"output[\"Location\"] = ws.location\n",
|
||||
"output[\"Run History Name\"] = experiment_name\n",
|
||||
"pd.set_option(\"display.max_colwidth\", -1)\n",
|
||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
@@ -146,10 +146,11 @@
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
||||
" print('Found existing cluster, use it.')\n",
|
||||
" print(\"Found existing cluster, use it.\")\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D12_V2',\n",
|
||||
" max_nodes=6)\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(\n",
|
||||
" vm_size=\"STANDARD_D12_V2\", max_nodes=6\n",
|
||||
" )\n",
|
||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
"compute_target.wait_for_completion(show_output=True)"
|
||||
@@ -169,11 +170,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"time_column_name = 'WeekStarting'\n",
|
||||
"time_column_name = \"WeekStarting\"\n",
|
||||
"data = pd.read_csv(\"dominicks_OJ.csv\", parse_dates=[time_column_name])\n",
|
||||
"\n",
|
||||
"# Drop the columns 'logQuantity' as it is a leaky feature.\n",
|
||||
"data.drop('logQuantity', axis=1, inplace=True)\n",
|
||||
"data.drop(\"logQuantity\", axis=1, inplace=True)\n",
|
||||
"\n",
|
||||
"data.head()"
|
||||
]
|
||||
@@ -193,9 +194,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"time_series_id_column_names = ['Store', 'Brand']\n",
|
||||
"time_series_id_column_names = [\"Store\", \"Brand\"]\n",
|
||||
"nseries = data.groupby(time_series_id_column_names).ngroups\n",
|
||||
"print('Data contains {0} individual time-series.'.format(nseries))"
|
||||
"print(\"Data contains {0} individual time-series.\".format(nseries))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -214,7 +215,7 @@
|
||||
"use_stores = [2, 5, 8]\n",
|
||||
"data_subset = data[data.Store.isin(use_stores)]\n",
|
||||
"nseries = data_subset.groupby(time_series_id_column_names).ngroups\n",
|
||||
"print('Data subset contains {0} individual time-series.'.format(nseries))"
|
||||
"print(\"Data subset contains {0} individual time-series.\".format(nseries))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -233,14 +234,17 @@
|
||||
"source": [
|
||||
"n_test_periods = 20\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def split_last_n_by_series_id(df, n):\n",
|
||||
" \"\"\"Group df by series identifiers and split on last n rows for each group.\"\"\"\n",
|
||||
" df_grouped = (df.sort_values(time_column_name) # Sort by ascending time\n",
|
||||
" .groupby(time_series_id_column_names, group_keys=False))\n",
|
||||
" df_grouped = df.sort_values(time_column_name).groupby( # Sort by ascending time\n",
|
||||
" time_series_id_column_names, group_keys=False\n",
|
||||
" )\n",
|
||||
" df_head = df_grouped.apply(lambda dfg: dfg.iloc[:-n])\n",
|
||||
" df_tail = df_grouped.apply(lambda dfg: dfg.iloc[-n:])\n",
|
||||
" return df_head, df_tail\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"train, test = split_last_n_by_series_id(data_subset, n_test_periods)"
|
||||
]
|
||||
},
|
||||
@@ -258,8 +262,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"train.to_csv (r'./dominicks_OJ_train.csv', index = None, header=True)\n",
|
||||
"test.to_csv (r'./dominicks_OJ_test.csv', index = None, header=True)"
|
||||
"train.to_csv(r\"./dominicks_OJ_train.csv\", index=None, header=True)\n",
|
||||
"test.to_csv(r\"./dominicks_OJ_test.csv\", index=None, header=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -269,7 +273,12 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"datastore = ws.get_default_datastore()\n",
|
||||
"datastore.upload_files(files = ['./dominicks_OJ_train.csv', './dominicks_OJ_test.csv'], target_path = 'dataset/', overwrite = True,show_progress = True)"
|
||||
"datastore.upload_files(\n",
|
||||
" files=[\"./dominicks_OJ_train.csv\", \"./dominicks_OJ_test.csv\"],\n",
|
||||
" target_path=\"dataset/\",\n",
|
||||
" overwrite=True,\n",
|
||||
" show_progress=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -286,8 +295,13 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.dataset import Dataset\n",
|
||||
"train_dataset = Dataset.Tabular.from_delimited_files(path=datastore.path('dataset/dominicks_OJ_train.csv'))\n",
|
||||
"test_dataset = Dataset.Tabular.from_delimited_files(path=datastore.path('dataset/dominicks_OJ_test.csv'))"
|
||||
"\n",
|
||||
"train_dataset = Dataset.Tabular.from_delimited_files(\n",
|
||||
" path=datastore.path(\"dataset/dominicks_OJ_train.csv\")\n",
|
||||
")\n",
|
||||
"test_dataset = Dataset.Tabular.from_delimited_files(\n",
|
||||
" path=datastore.path(\"dataset/dominicks_OJ_test.csv\")\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -323,7 +337,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"target_column_name = 'Quantity'"
|
||||
"target_column_name = \"Quantity\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -351,13 +365,17 @@
|
||||
"source": [
|
||||
"featurization_config = FeaturizationConfig()\n",
|
||||
"# Force the CPWVOL5 feature to be numeric type.\n",
|
||||
"featurization_config.add_column_purpose('CPWVOL5', 'Numeric')\n",
|
||||
"featurization_config.add_column_purpose(\"CPWVOL5\", \"Numeric\")\n",
|
||||
"# Fill missing values in the target column, Quantity, with zeros.\n",
|
||||
"featurization_config.add_transformer_params('Imputer', ['Quantity'], {\"strategy\": \"constant\", \"fill_value\": 0})\n",
|
||||
"featurization_config.add_transformer_params(\n",
|
||||
" \"Imputer\", [\"Quantity\"], {\"strategy\": \"constant\", \"fill_value\": 0}\n",
|
||||
")\n",
|
||||
"# Fill missing values in the INCOME column with median value.\n",
|
||||
"featurization_config.add_transformer_params('Imputer', ['INCOME'], {\"strategy\": \"median\"})\n",
|
||||
"featurization_config.add_transformer_params(\n",
|
||||
" \"Imputer\", [\"INCOME\"], {\"strategy\": \"median\"}\n",
|
||||
")\n",
|
||||
"# Fill missing values in the Price column with forward fill (last value carried forward).\n",
|
||||
"featurization_config.add_transformer_params('Imputer', ['Price'], {\"strategy\": \"ffill\"})"
|
||||
"featurization_config.add_transformer_params(\"Imputer\", [\"Price\"], {\"strategy\": \"ffill\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -423,16 +441,18 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
|
||||
"\n",
|
||||
"forecasting_parameters = ForecastingParameters(\n",
|
||||
" time_column_name=time_column_name,\n",
|
||||
" forecast_horizon=n_test_periods,\n",
|
||||
" time_series_id_column_names=time_series_id_column_names,\n",
|
||||
" freq='W-THU' # Set the forecast frequency to be weekly (start on each Thursday)\n",
|
||||
" freq=\"W-THU\", # Set the forecast frequency to be weekly (start on each Thursday)\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task='forecasting',\n",
|
||||
" debug_log='automl_oj_sales_errors.log',\n",
|
||||
" primary_metric='normalized_mean_absolute_error',\n",
|
||||
"automl_config = AutoMLConfig(\n",
|
||||
" task=\"forecasting\",\n",
|
||||
" debug_log=\"automl_oj_sales_errors.log\",\n",
|
||||
" primary_metric=\"normalized_mean_absolute_error\",\n",
|
||||
" experiment_timeout_hours=0.25,\n",
|
||||
" training_data=train_dataset,\n",
|
||||
" label_column_name=target_column_name,\n",
|
||||
@@ -442,7 +462,8 @@
|
||||
" n_cross_validations=3,\n",
|
||||
" verbosity=logging.INFO,\n",
|
||||
" max_cores_per_iteration=-1,\n",
|
||||
" forecasting_parameters=forecasting_parameters)"
|
||||
" forecasting_parameters=forecasting_parameters,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -487,7 +508,7 @@
|
||||
"source": [
|
||||
"best_run, fitted_model = remote_run.get_output()\n",
|
||||
"print(fitted_model.steps)\n",
|
||||
"model_name = best_run.properties['model_name']"
|
||||
"model_name = best_run.properties[\"model_name\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -505,7 +526,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"custom_featurizer = fitted_model.named_steps['timeseriestransformer']"
|
||||
"custom_featurizer = fitted_model.named_steps[\"timeseriestransformer\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -559,15 +580,18 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from run_forecast import run_remote_inference\n",
|
||||
"remote_run_infer = run_remote_inference(test_experiment=test_experiment, \n",
|
||||
"\n",
|
||||
"remote_run_infer = run_remote_inference(\n",
|
||||
" test_experiment=test_experiment,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" train_run=best_run,\n",
|
||||
" test_dataset=test_dataset,\n",
|
||||
" target_column_name=target_column_name)\n",
|
||||
" target_column_name=target_column_name,\n",
|
||||
")\n",
|
||||
"remote_run_infer.wait_for_completion(show_output=False)\n",
|
||||
"\n",
|
||||
"# download the forecast file to the local machine\n",
|
||||
"remote_run_infer.download_file('outputs/predictions.csv', 'predictions.csv')"
|
||||
"remote_run_infer.download_file(\"outputs/predictions.csv\", \"predictions.csv\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -588,7 +612,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# load forecast data frame\n",
|
||||
"fcst_df = pd.read_csv('predictions.csv', parse_dates=[time_column_name])\n",
|
||||
"fcst_df = pd.read_csv(\"predictions.csv\", parse_dates=[time_column_name])\n",
|
||||
"fcst_df.head()"
|
||||
]
|
||||
},
|
||||
@@ -605,18 +629,23 @@
|
||||
"# use automl scoring module\n",
|
||||
"scores = scoring.score_regression(\n",
|
||||
" y_test=fcst_df[target_column_name],\n",
|
||||
" y_pred=fcst_df['predicted'],\n",
|
||||
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET))\n",
|
||||
" y_pred=fcst_df[\"predicted\"],\n",
|
||||
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(\"[Test data scores]\\n\")\n",
|
||||
"for key, value in scores.items(): \n",
|
||||
" print('{}: {:.3f}'.format(key, value))\n",
|
||||
" \n",
|
||||
"for key, value in scores.items():\n",
|
||||
" print(\"{}: {:.3f}\".format(key, value))\n",
|
||||
"\n",
|
||||
"# Plot outputs\n",
|
||||
"%matplotlib inline\n",
|
||||
"test_pred = plt.scatter(fcst_df[target_column_name], fcst_df['predicted'], color='b')\n",
|
||||
"test_test = plt.scatter(fcst_df[target_column_name], fcst_df[target_column_name], color='g')\n",
|
||||
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n",
|
||||
"test_pred = plt.scatter(fcst_df[target_column_name], fcst_df[\"predicted\"], color=\"b\")\n",
|
||||
"test_test = plt.scatter(\n",
|
||||
" fcst_df[target_column_name], fcst_df[target_column_name], color=\"g\"\n",
|
||||
")\n",
|
||||
"plt.legend(\n",
|
||||
" (test_pred, test_test), (\"prediction\", \"truth\"), loc=\"upper left\", fontsize=8\n",
|
||||
")\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
@@ -640,9 +669,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"description = 'AutoML OJ forecaster'\n",
|
||||
"description = \"AutoML OJ forecaster\"\n",
|
||||
"tags = None\n",
|
||||
"model = remote_run.register_model(model_name = model_name, description = description, tags = tags)\n",
|
||||
"model = remote_run.register_model(\n",
|
||||
" model_name=model_name, description=description, tags=tags\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(remote_run.model_id)"
|
||||
]
|
||||
@@ -662,8 +693,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"script_file_name = 'score_fcast.py'\n",
|
||||
"best_run.download_file('outputs/scoring_file_v_1_0_0.py', script_file_name)"
|
||||
"script_file_name = \"score_fcast.py\"\n",
|
||||
"best_run.download_file(\"outputs/scoring_file_v_1_0_0.py\", script_file_name)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -684,15 +715,18 @@
|
||||
"from azureml.core.webservice import Webservice\n",
|
||||
"from azureml.core.model import Model\n",
|
||||
"\n",
|
||||
"inference_config = InferenceConfig(environment = best_run.get_environment(), \n",
|
||||
" entry_script = script_file_name)\n",
|
||||
"inference_config = InferenceConfig(\n",
|
||||
" environment=best_run.get_environment(), entry_script=script_file_name\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"aciconfig = AciWebservice.deploy_configuration(cpu_cores = 2, \n",
|
||||
" memory_gb = 4, \n",
|
||||
" tags = {'type': \"automl-forecasting\"},\n",
|
||||
" description = \"Automl forecasting sample service\")\n",
|
||||
"aciconfig = AciWebservice.deploy_configuration(\n",
|
||||
" cpu_cores=2,\n",
|
||||
" memory_gb=4,\n",
|
||||
" tags={\"type\": \"automl-forecasting\"},\n",
|
||||
" description=\"Automl forecasting sample service\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"aci_service_name = 'automl-oj-forecast-01'\n",
|
||||
"aci_service_name = \"automl-oj-forecast-01\"\n",
|
||||
"print(aci_service_name)\n",
|
||||
"aci_service = Model.deploy(ws, aci_service_name, [model], inference_config, aciconfig)\n",
|
||||
"aci_service.wait_for_deployment(True)\n",
|
||||
@@ -722,22 +756,27 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"X_query = test.copy()\n",
|
||||
"X_query.pop(target_column_name)\n",
|
||||
"# We have to convert datetime to string, because Timestamps cannot be serialized to JSON.\n",
|
||||
"X_query[time_column_name] = X_query[time_column_name].astype(str)\n",
|
||||
"# The Service object accept the complex dictionary, which is internally converted to JSON string.\n",
|
||||
"# The section 'data' contains the data frame in the form of dictionary.\n",
|
||||
"sample_quantiles=[0.025,0.975]\n",
|
||||
"test_sample = json.dumps({'data': X_query.to_dict(orient='records'), 'quantiles': sample_quantiles})\n",
|
||||
"response = aci_service.run(input_data = test_sample)\n",
|
||||
"sample_quantiles = [0.025, 0.975]\n",
|
||||
"test_sample = json.dumps(\n",
|
||||
" {\"data\": X_query.to_dict(orient=\"records\"), \"quantiles\": sample_quantiles}\n",
|
||||
")\n",
|
||||
"response = aci_service.run(input_data=test_sample)\n",
|
||||
"# translate from networkese to datascientese\n",
|
||||
"try: \n",
|
||||
"try:\n",
|
||||
" res_dict = json.loads(response)\n",
|
||||
" y_fcst_all = pd.DataFrame(res_dict['index'])\n",
|
||||
" y_fcst_all[time_column_name] = pd.to_datetime(y_fcst_all[time_column_name], unit = 'ms')\n",
|
||||
" y_fcst_all['forecast'] = res_dict['forecast']\n",
|
||||
" y_fcst_all['prediction_interval'] = res_dict['prediction_interval']\n",
|
||||
" y_fcst_all = pd.DataFrame(res_dict[\"index\"])\n",
|
||||
" y_fcst_all[time_column_name] = pd.to_datetime(\n",
|
||||
" y_fcst_all[time_column_name], unit=\"ms\"\n",
|
||||
" )\n",
|
||||
" y_fcst_all[\"forecast\"] = res_dict[\"forecast\"]\n",
|
||||
" y_fcst_all[\"prediction_interval\"] = res_dict[\"prediction_interval\"]\n",
|
||||
"except:\n",
|
||||
" print(res_dict)"
|
||||
]
|
||||
@@ -764,7 +803,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"serv = Webservice(ws, 'automl-oj-forecast-01')\n",
|
||||
"serv = Webservice(ws, \"automl-oj-forecast-01\")\n",
|
||||
"serv.delete() # don't do it accidentally"
|
||||
]
|
||||
}
|
||||
|
||||
@@ -11,11 +11,14 @@ from pandas.tseries.frequencies import to_offset
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'--target_column_name', type=str, dest='target_column_name',
|
||||
help='Target Column Name')
|
||||
"--target_column_name",
|
||||
type=str,
|
||||
dest="target_column_name",
|
||||
help="Target Column Name",
|
||||
)
|
||||
parser.add_argument(
|
||||
'--test_dataset', type=str, dest='test_dataset',
|
||||
help='Test Dataset')
|
||||
"--test_dataset", type=str, dest="test_dataset", help="Test Dataset"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
target_column_name = args.target_column_name
|
||||
@@ -31,25 +34,27 @@ X_test = test_dataset.to_pandas_dataframe().reset_index(drop=True)
|
||||
y_test = X_test.pop(target_column_name).values
|
||||
|
||||
# generate forecast
|
||||
fitted_model = joblib.load('model.pkl')
|
||||
fitted_model = joblib.load("model.pkl")
|
||||
# We have default quantiles values set as below(95th percentile)
|
||||
quantiles = [0.025, 0.5, 0.975]
|
||||
predicted_column_name = 'predicted'
|
||||
PI = 'prediction_interval'
|
||||
predicted_column_name = "predicted"
|
||||
PI = "prediction_interval"
|
||||
fitted_model.quantiles = quantiles
|
||||
pred_quantiles = fitted_model.forecast_quantiles(X_test)
|
||||
pred_quantiles[PI] = pred_quantiles[[min(quantiles), max(quantiles)]].apply(lambda x: '[{}, {}]'.format(x[0],
|
||||
x[1]), axis=1)
|
||||
pred_quantiles[PI] = pred_quantiles[[min(quantiles), max(quantiles)]].apply(
|
||||
lambda x: "[{}, {}]".format(x[0], x[1]), axis=1
|
||||
)
|
||||
X_test[target_column_name] = y_test
|
||||
X_test[PI] = pred_quantiles[PI]
|
||||
X_test[predicted_column_name] = pred_quantiles[0.5]
|
||||
# drop rows where prediction or actuals are nan
|
||||
# happens because of missing actuals
|
||||
# or at edges of time due to lags/rolling windows
|
||||
clean = X_test[X_test[[target_column_name,
|
||||
predicted_column_name]].notnull().all(axis=1)]
|
||||
clean = X_test[
|
||||
X_test[[target_column_name, predicted_column_name]].notnull().all(axis=1)
|
||||
]
|
||||
|
||||
file_name = 'outputs/predictions.csv'
|
||||
file_name = "outputs/predictions.csv"
|
||||
export_csv = clean.to_csv(file_name, header=True, index=False) # added Index
|
||||
|
||||
# Upload the predictions into artifacts
|
||||
|
||||
@@ -3,36 +3,47 @@ import shutil
|
||||
from azureml.core import ScriptRunConfig
|
||||
|
||||
|
||||
def run_remote_inference(test_experiment, compute_target, train_run,
|
||||
test_dataset, target_column_name, inference_folder='./forecast'):
|
||||
def run_remote_inference(
|
||||
test_experiment,
|
||||
compute_target,
|
||||
train_run,
|
||||
test_dataset,
|
||||
target_column_name,
|
||||
inference_folder="./forecast",
|
||||
):
|
||||
# Create local directory to copy the model.pkl and forecsting_script.py files into.
|
||||
# These files will be uploaded to and executed on the compute instance.
|
||||
os.makedirs(inference_folder, exist_ok=True)
|
||||
shutil.copy('forecasting_script.py', inference_folder)
|
||||
shutil.copy("forecasting_script.py", inference_folder)
|
||||
|
||||
train_run.download_file('outputs/model.pkl',
|
||||
os.path.join(inference_folder, 'model.pkl'))
|
||||
train_run.download_file(
|
||||
"outputs/model.pkl", os.path.join(inference_folder, "model.pkl")
|
||||
)
|
||||
|
||||
inference_env = train_run.get_environment()
|
||||
|
||||
config = ScriptRunConfig(source_directory=inference_folder,
|
||||
script='forecasting_script.py',
|
||||
arguments=['--target_column_name',
|
||||
config = ScriptRunConfig(
|
||||
source_directory=inference_folder,
|
||||
script="forecasting_script.py",
|
||||
arguments=[
|
||||
"--target_column_name",
|
||||
target_column_name,
|
||||
'--test_dataset',
|
||||
test_dataset.as_named_input(test_dataset.name)],
|
||||
"--test_dataset",
|
||||
test_dataset.as_named_input(test_dataset.name),
|
||||
],
|
||||
compute_target=compute_target,
|
||||
environment=inference_env)
|
||||
environment=inference_env,
|
||||
)
|
||||
|
||||
run = test_experiment.submit(config,
|
||||
tags={'training_run_id':
|
||||
train_run.id,
|
||||
'run_algorithm':
|
||||
train_run.properties['run_algorithm'],
|
||||
'valid_score':
|
||||
train_run.properties['score'],
|
||||
'primary_metric':
|
||||
train_run.properties['primary_metric']})
|
||||
run = test_experiment.submit(
|
||||
config,
|
||||
tags={
|
||||
"training_run_id": train_run.id,
|
||||
"run_algorithm": train_run.properties["run_algorithm"],
|
||||
"valid_score": train_run.properties["score"],
|
||||
"primary_metric": train_run.properties["primary_metric"],
|
||||
},
|
||||
)
|
||||
|
||||
run.log("run_algorithm", run.tags['run_algorithm'])
|
||||
run.log("run_algorithm", run.tags["run_algorithm"])
|
||||
return run
|
||||
|
||||
@@ -56,16 +56,18 @@
|
||||
"from statsmodels.graphics.tsaplots import plot_acf, plot_pacf\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"from pandas.plotting import register_matplotlib_converters\n",
|
||||
"\n",
|
||||
"register_matplotlib_converters() # fixes the future warning issue\n",
|
||||
"\n",
|
||||
"from helper_functions import unit_root_test_wrapper\n",
|
||||
"from statsmodels.tools.sm_exceptions import InterpolationWarning\n",
|
||||
"warnings.simplefilter('ignore', InterpolationWarning)\n",
|
||||
"\n",
|
||||
"warnings.simplefilter(\"ignore\", InterpolationWarning)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# set printing options\n",
|
||||
"pd.set_option('display.max_columns', 500)\n",
|
||||
"pd.set_option('display.width', 1000)"
|
||||
"pd.set_option(\"display.max_columns\", 500)\n",
|
||||
"pd.set_option(\"display.width\", 1000)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -75,15 +77,15 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# load data\n",
|
||||
"main_data_loc = 'data'\n",
|
||||
"train_file_name = 'S4248SM144SCEN.csv'\n",
|
||||
"main_data_loc = \"data\"\n",
|
||||
"train_file_name = \"S4248SM144SCEN.csv\"\n",
|
||||
"\n",
|
||||
"TARGET_COLNAME = 'S4248SM144SCEN'\n",
|
||||
"TIME_COLNAME = 'observation_date'\n",
|
||||
"COVID_PERIOD_START = '2020-03-01'\n",
|
||||
"TARGET_COLNAME = \"S4248SM144SCEN\"\n",
|
||||
"TIME_COLNAME = \"observation_date\"\n",
|
||||
"COVID_PERIOD_START = \"2020-03-01\"\n",
|
||||
"\n",
|
||||
"df = pd.read_csv(os.path.join(main_data_loc, train_file_name))\n",
|
||||
"df[TIME_COLNAME] = pd.to_datetime(df[TIME_COLNAME], format='%Y-%m-%d')\n",
|
||||
"df[TIME_COLNAME] = pd.to_datetime(df[TIME_COLNAME], format=\"%Y-%m-%d\")\n",
|
||||
"df.sort_values(by=TIME_COLNAME, inplace=True)\n",
|
||||
"df.set_index(TIME_COLNAME, inplace=True)\n",
|
||||
"df.head(2)"
|
||||
@@ -96,9 +98,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# plot the entire dataset\n",
|
||||
"fig, ax = plt.subplots(figsize=(6,2), dpi=180)\n",
|
||||
"fig, ax = plt.subplots(figsize=(6, 2), dpi=180)\n",
|
||||
"ax.plot(df)\n",
|
||||
"ax.title.set_text('Original Data Series')\n",
|
||||
"ax.title.set_text(\"Original Data Series\")\n",
|
||||
"locs, labels = plt.xticks()\n",
|
||||
"plt.xticks(rotation=45)"
|
||||
]
|
||||
@@ -117,9 +119,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# plot the entire dataset in first differences\n",
|
||||
"fig, ax = plt.subplots(figsize=(6,2), dpi=180)\n",
|
||||
"fig, ax = plt.subplots(figsize=(6, 2), dpi=180)\n",
|
||||
"ax.plot(df.diff().dropna())\n",
|
||||
"ax.title.set_text('Data in first differences')\n",
|
||||
"ax.title.set_text(\"Data in first differences\")\n",
|
||||
"locs, labels = plt.xticks()\n",
|
||||
"plt.xticks(rotation=45)"
|
||||
]
|
||||
@@ -151,9 +153,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# plot the entire dataset in first differences\n",
|
||||
"fig, ax = plt.subplots(figsize=(6,2), dpi=180)\n",
|
||||
"fig, ax = plt.subplots(figsize=(6, 2), dpi=180)\n",
|
||||
"ax.plot(df.diff().dropna())\n",
|
||||
"ax.title.set_text('Data in first differences')\n",
|
||||
"ax.title.set_text(\"Data in first differences\")\n",
|
||||
"locs, labels = plt.xticks()\n",
|
||||
"plt.xticks(rotation=45)"
|
||||
]
|
||||
@@ -175,9 +177,9 @@
|
||||
"df = df[:COVID_PERIOD_START]\n",
|
||||
"\n",
|
||||
"# plot the entire dataset in first differences\n",
|
||||
"fig, ax = plt.subplots(figsize=(6,2), dpi=180)\n",
|
||||
"ax.plot(df['2015-01-01':].diff().dropna())\n",
|
||||
"ax.title.set_text('Data in first differences')\n",
|
||||
"fig, ax = plt.subplots(figsize=(6, 2), dpi=180)\n",
|
||||
"ax.plot(df[\"2015-01-01\":].diff().dropna())\n",
|
||||
"ax.title.set_text(\"Data in first differences\")\n",
|
||||
"locs, labels = plt.xticks()\n",
|
||||
"plt.xticks(rotation=45)"
|
||||
]
|
||||
@@ -245,10 +247,10 @@
|
||||
"source": [
|
||||
"# unit root tests\n",
|
||||
"test = unit_root_test_wrapper(df[TARGET_COLNAME])\n",
|
||||
"print('---------------', '\\n')\n",
|
||||
"print('Summary table', '\\n', test['summary'], '\\n')\n",
|
||||
"print('Is the {} series stationary?: {}'.format(TARGET_COLNAME, test['stationary']))\n",
|
||||
"print('---------------', '\\n')"
|
||||
"print(\"---------------\", \"\\n\")\n",
|
||||
"print(\"Summary table\", \"\\n\", test[\"summary\"], \"\\n\")\n",
|
||||
"print(\"Is the {} series stationary?: {}\".format(TARGET_COLNAME, test[\"stationary\"]))\n",
|
||||
"print(\"---------------\", \"\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -285,10 +287,10 @@
|
||||
"source": [
|
||||
"# unit root tests\n",
|
||||
"test = unit_root_test_wrapper(df[TARGET_COLNAME].diff().dropna())\n",
|
||||
"print('---------------', '\\n')\n",
|
||||
"print('Summary table', '\\n', test['summary'], '\\n')\n",
|
||||
"print('Is the {} series stationary?: {}'.format(TARGET_COLNAME, test['stationary']))\n",
|
||||
"print('---------------', '\\n')"
|
||||
"print(\"---------------\", \"\\n\")\n",
|
||||
"print(\"Summary table\", \"\\n\", test[\"summary\"], \"\\n\")\n",
|
||||
"print(\"Is the {} series stationary?: {}\".format(TARGET_COLNAME, test[\"stationary\"]))\n",
|
||||
"print(\"---------------\", \"\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -305,13 +307,13 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# plot original and stationary data\n",
|
||||
"fig = plt.figure(figsize=(10,10))\n",
|
||||
"fig = plt.figure(figsize=(10, 10))\n",
|
||||
"ax1 = fig.add_subplot(211)\n",
|
||||
"ax1.plot(df[TARGET_COLNAME], '-b')\n",
|
||||
"ax1.plot(df[TARGET_COLNAME], \"-b\")\n",
|
||||
"ax2 = fig.add_subplot(212)\n",
|
||||
"ax2.plot(df[TARGET_COLNAME].diff().dropna(), '-b')\n",
|
||||
"ax1.title.set_text('Original data')\n",
|
||||
"ax2.title.set_text('Data in first differences')"
|
||||
"ax2.plot(df[TARGET_COLNAME].diff().dropna(), \"-b\")\n",
|
||||
"ax1.title.set_text(\"Original data\")\n",
|
||||
"ax2.title.set_text(\"Data in first differences\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -372,7 +374,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Plot the ACF/PACF for the series in differences\n",
|
||||
"fig, ax = plt.subplots(1,2,figsize=(10,5))\n",
|
||||
"fig, ax = plt.subplots(1, 2, figsize=(10, 5))\n",
|
||||
"plot_acf(df[TARGET_COLNAME].diff().dropna().values.squeeze(), ax=ax[0])\n",
|
||||
"plot_pacf(df[TARGET_COLNAME].diff().dropna().values.squeeze(), ax=ax[1])\n",
|
||||
"plt.show()"
|
||||
|
||||
@@ -51,7 +51,7 @@
|
||||
"from azureml.core.compute import AmlCompute\n",
|
||||
"from azureml.core.compute import ComputeTarget\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"from helper_functions import (ts_train_test_split, compute_metrics)\n",
|
||||
"from helper_functions import ts_train_test_split, compute_metrics\n",
|
||||
"\n",
|
||||
"import azureml.core\n",
|
||||
"from azureml.core.workspace import Workspace\n",
|
||||
@@ -61,8 +61,8 @@
|
||||
"\n",
|
||||
"# set printing options\n",
|
||||
"np.set_printoptions(precision=4, suppress=True, linewidth=100)\n",
|
||||
"pd.set_option('display.max_columns', 500)\n",
|
||||
"pd.set_option('display.width', 1000)"
|
||||
"pd.set_option(\"display.max_columns\", 500)\n",
|
||||
"pd.set_option(\"display.width\", 1000)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -81,27 +81,32 @@
|
||||
"source": [
|
||||
"ws = Workspace.from_config()\n",
|
||||
"amlcompute_cluster_name = \"recipe-cluster\"\n",
|
||||
" \n",
|
||||
"\n",
|
||||
"found = False\n",
|
||||
"# Check if this compute target already exists in the workspace.\n",
|
||||
"cts = ws.compute_targets\n",
|
||||
"if amlcompute_cluster_name in cts and cts[amlcompute_cluster_name].type == 'AmlCompute':\n",
|
||||
"if amlcompute_cluster_name in cts and cts[amlcompute_cluster_name].type == \"AmlCompute\":\n",
|
||||
" found = True\n",
|
||||
" print('Found existing compute target.')\n",
|
||||
" print(\"Found existing compute target.\")\n",
|
||||
" compute_target = cts[amlcompute_cluster_name]\n",
|
||||
"\n",
|
||||
"if not found:\n",
|
||||
" print('Creating a new compute target...')\n",
|
||||
" provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_D2_V2\",\n",
|
||||
" max_nodes = 6)\n",
|
||||
" print(\"Creating a new compute target...\")\n",
|
||||
" provisioning_config = AmlCompute.provisioning_configuration(\n",
|
||||
" vm_size=\"STANDARD_D2_V2\", max_nodes=6\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" # Create the cluster.\\n\",\n",
|
||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, provisioning_config)\n",
|
||||
" compute_target = ComputeTarget.create(\n",
|
||||
" ws, amlcompute_cluster_name, provisioning_config\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print('Checking cluster status...')\n",
|
||||
"print(\"Checking cluster status...\")\n",
|
||||
"# Can poll for a minimum number of nodes and for a specific timeout.\n",
|
||||
"# If no min_node_count is provided, it will use the scale settings for the cluster.\n",
|
||||
"compute_target.wait_for_completion(show_output = True, min_node_count = None, timeout_in_minutes = 20)"
|
||||
"compute_target.wait_for_completion(\n",
|
||||
" show_output=True, min_node_count=None, timeout_in_minutes=20\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -119,16 +124,18 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"main_data_loc = 'data'\n",
|
||||
"train_file_name = 'S4248SM144SCEN.csv'\n",
|
||||
"main_data_loc = \"data\"\n",
|
||||
"train_file_name = \"S4248SM144SCEN.csv\"\n",
|
||||
"\n",
|
||||
"TARGET_COLNAME = \"S4248SM144SCEN\"\n",
|
||||
"TIME_COLNAME = \"observation_date\"\n",
|
||||
"COVID_PERIOD_START = '2020-03-01' # start of the covid period. To be excluded from evaluation.\n",
|
||||
"COVID_PERIOD_START = (\n",
|
||||
" \"2020-03-01\" # start of the covid period. To be excluded from evaluation.\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# load data\n",
|
||||
"df = pd.read_csv(os.path.join(main_data_loc, train_file_name))\n",
|
||||
"df[TIME_COLNAME] = pd.to_datetime(df[TIME_COLNAME], format='%Y-%m-%d')\n",
|
||||
"df[TIME_COLNAME] = pd.to_datetime(df[TIME_COLNAME], format=\"%Y-%m-%d\")\n",
|
||||
"df.sort_values(by=TIME_COLNAME, inplace=True)\n",
|
||||
"\n",
|
||||
"# remove the Covid period\n",
|
||||
@@ -202,24 +209,28 @@
|
||||
"source": [
|
||||
"# choose a name for the run history container in the workspace\n",
|
||||
"if isinstance(TARGET_LAGS, list):\n",
|
||||
" TARGET_LAGS_STR = '-'.join(map(str, TARGET_LAGS)) if (len(TARGET_LAGS) > 0) else None\n",
|
||||
" TARGET_LAGS_STR = (\n",
|
||||
" \"-\".join(map(str, TARGET_LAGS)) if (len(TARGET_LAGS) > 0) else None\n",
|
||||
" )\n",
|
||||
"else:\n",
|
||||
" TARGET_LAGS_STR = TARGET_LAGS\n",
|
||||
"\n",
|
||||
"experiment_desc = 'diff-{}_lags-{}_STL-{}'.format(DIFFERENCE_SERIES, TARGET_LAGS_STR, STL_TYPE)\n",
|
||||
"experiment_name = 'alcohol_{}'.format(experiment_desc)\n",
|
||||
"experiment_desc = \"diff-{}_lags-{}_STL-{}\".format(\n",
|
||||
" DIFFERENCE_SERIES, TARGET_LAGS_STR, STL_TYPE\n",
|
||||
")\n",
|
||||
"experiment_name = \"alcohol_{}\".format(experiment_desc)\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['SDK version'] = azureml.core.VERSION\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace'] = ws.name\n",
|
||||
"output['SKU'] = ws.sku\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Run History Name'] = experiment_name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"output[\"SDK version\"] = azureml.core.VERSION\n",
|
||||
"output[\"Subscription ID\"] = ws.subscription_id\n",
|
||||
"output[\"Workspace\"] = ws.name\n",
|
||||
"output[\"SKU\"] = ws.sku\n",
|
||||
"output[\"Resource Group\"] = ws.resource_group\n",
|
||||
"output[\"Location\"] = ws.location\n",
|
||||
"output[\"Run History Name\"] = experiment_name\n",
|
||||
"pd.set_option(\"display.max_colwidth\", -1)\n",
|
||||
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
|
||||
"print(outputDf.T)"
|
||||
]
|
||||
},
|
||||
@@ -230,9 +241,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# create output directory\n",
|
||||
"output_dir = 'experiment_output/{}'.format(experiment_desc)\n",
|
||||
"output_dir = \"experiment_output/{}\".format(experiment_desc)\n",
|
||||
"if not os.path.exists(output_dir):\n",
|
||||
" os.makedirs(output_dir) "
|
||||
" os.makedirs(output_dir)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -255,17 +266,21 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# split the data into train and test set\n",
|
||||
"if DIFFERENCE_SERIES: \n",
|
||||
"if DIFFERENCE_SERIES:\n",
|
||||
" # generate train/inference sets using data in first differences\n",
|
||||
" df_train, df_test = ts_train_test_split(df_input=df_delta,\n",
|
||||
" df_train, df_test = ts_train_test_split(\n",
|
||||
" df_input=df_delta,\n",
|
||||
" n=FORECAST_HORIZON,\n",
|
||||
" time_colname=TIME_COLNAME,\n",
|
||||
" ts_id_colnames=TIME_SERIES_ID_COLNAMES)\n",
|
||||
" ts_id_colnames=TIME_SERIES_ID_COLNAMES,\n",
|
||||
" )\n",
|
||||
"else:\n",
|
||||
" df_train, df_test = ts_train_test_split(df_input=df,\n",
|
||||
" df_train, df_test = ts_train_test_split(\n",
|
||||
" df_input=df,\n",
|
||||
" n=FORECAST_HORIZON,\n",
|
||||
" time_colname=TIME_COLNAME,\n",
|
||||
" ts_id_colnames=TIME_SERIES_ID_COLNAMES)"
|
||||
" ts_id_colnames=TIME_SERIES_ID_COLNAMES,\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -286,12 +301,27 @@
|
||||
"df_test.to_csv(\"test.csv\", index=False)\n",
|
||||
"\n",
|
||||
"datastore = ws.get_default_datastore()\n",
|
||||
"datastore.upload_files(files = ['./train.csv'], target_path = 'uni-recipe-dataset/tabular/', overwrite = True,show_progress = True)\n",
|
||||
"datastore.upload_files(files = ['./test.csv'], target_path = 'uni-recipe-dataset/tabular/', overwrite = True,show_progress = True)\n",
|
||||
"datastore.upload_files(\n",
|
||||
" files=[\"./train.csv\"],\n",
|
||||
" target_path=\"uni-recipe-dataset/tabular/\",\n",
|
||||
" overwrite=True,\n",
|
||||
" show_progress=True,\n",
|
||||
")\n",
|
||||
"datastore.upload_files(\n",
|
||||
" files=[\"./test.csv\"],\n",
|
||||
" target_path=\"uni-recipe-dataset/tabular/\",\n",
|
||||
" overwrite=True,\n",
|
||||
" show_progress=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"from azureml.core import Dataset\n",
|
||||
"train_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'uni-recipe-dataset/tabular/train.csv')])\n",
|
||||
"test_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'uni-recipe-dataset/tabular/test.csv')])\n",
|
||||
"\n",
|
||||
"train_dataset = Dataset.Tabular.from_delimited_files(\n",
|
||||
" path=[(datastore, \"uni-recipe-dataset/tabular/train.csv\")]\n",
|
||||
")\n",
|
||||
"test_dataset = Dataset.Tabular.from_delimited_files(\n",
|
||||
" path=[(datastore, \"uni-recipe-dataset/tabular/test.csv\")]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# print the first 5 rows of the Dataset\n",
|
||||
"train_dataset.to_pandas_dataframe().reset_index(drop=True).head(5)"
|
||||
@@ -311,17 +341,18 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"time_series_settings = {\n",
|
||||
" 'time_column_name': TIME_COLNAME,\n",
|
||||
" 'forecast_horizon': FORECAST_HORIZON,\n",
|
||||
" 'target_lags': TARGET_LAGS,\n",
|
||||
" 'use_stl': STL_TYPE,\n",
|
||||
" 'blocked_models': BLOCKED_MODELS,\n",
|
||||
" 'time_series_id_column_names': TIME_SERIES_ID_COLNAMES\n",
|
||||
" \"time_column_name\": TIME_COLNAME,\n",
|
||||
" \"forecast_horizon\": FORECAST_HORIZON,\n",
|
||||
" \"target_lags\": TARGET_LAGS,\n",
|
||||
" \"use_stl\": STL_TYPE,\n",
|
||||
" \"blocked_models\": BLOCKED_MODELS,\n",
|
||||
" \"time_series_id_column_names\": TIME_SERIES_ID_COLNAMES,\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task='forecasting',\n",
|
||||
" debug_log='sample_experiment.log',\n",
|
||||
" primary_metric='normalized_root_mean_squared_error',\n",
|
||||
"automl_config = AutoMLConfig(\n",
|
||||
" task=\"forecasting\",\n",
|
||||
" debug_log=\"sample_experiment.log\",\n",
|
||||
" primary_metric=\"normalized_root_mean_squared_error\",\n",
|
||||
" experiment_timeout_minutes=20,\n",
|
||||
" iteration_timeout_minutes=5,\n",
|
||||
" enable_early_stopping=True,\n",
|
||||
@@ -331,7 +362,8 @@
|
||||
" verbosity=logging.INFO,\n",
|
||||
" max_cores_per_iteration=-1,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" **time_series_settings)"
|
||||
" **time_series_settings,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -404,14 +436,17 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from run_forecast import run_remote_inference\n",
|
||||
"remote_run = run_remote_inference(test_experiment=test_experiment, \n",
|
||||
"\n",
|
||||
"remote_run = run_remote_inference(\n",
|
||||
" test_experiment=test_experiment,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" train_run=best_run,\n",
|
||||
" test_dataset=test_dataset,\n",
|
||||
" target_column_name=TARGET_COLNAME)\n",
|
||||
" target_column_name=TARGET_COLNAME,\n",
|
||||
")\n",
|
||||
"remote_run.wait_for_completion(show_output=False)\n",
|
||||
"\n",
|
||||
"remote_run.download_file('outputs/predictions.csv', f'{output_dir}/predictions.csv')"
|
||||
"remote_run.download_file(\"outputs/predictions.csv\", f\"{output_dir}/predictions.csv\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -428,7 +463,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"X_trans = pd.read_csv(f'{output_dir}/predictions.csv', parse_dates=[TIME_COLNAME])\n",
|
||||
"X_trans = pd.read_csv(f\"{output_dir}/predictions.csv\", parse_dates=[TIME_COLNAME])\n",
|
||||
"X_trans.head()"
|
||||
]
|
||||
},
|
||||
@@ -440,15 +475,15 @@
|
||||
"source": [
|
||||
"# convert forecast in differences to levels\n",
|
||||
"def convert_fcst_diff_to_levels(fcst, yt, df_orig):\n",
|
||||
" \"\"\" Convert forecast from first differences to levels. \"\"\"\n",
|
||||
" \"\"\"Convert forecast from first differences to levels.\"\"\"\n",
|
||||
" fcst = fcst.reset_index(drop=False, inplace=False)\n",
|
||||
" fcst['predicted_level'] = fcst['predicted'].cumsum()\n",
|
||||
" fcst['predicted_level'] = fcst['predicted_level'].astype(float) + float(yt)\n",
|
||||
" fcst[\"predicted_level\"] = fcst[\"predicted\"].cumsum()\n",
|
||||
" fcst[\"predicted_level\"] = fcst[\"predicted_level\"].astype(float) + float(yt)\n",
|
||||
" # merge actuals\n",
|
||||
" out = pd.merge(fcst,\n",
|
||||
" df_orig[[TIME_COLNAME, TARGET_COLNAME]], \n",
|
||||
" on=[TIME_COLNAME], how='inner')\n",
|
||||
" out.rename(columns={TARGET_COLNAME: 'actual_level'}, inplace=True)\n",
|
||||
" out = pd.merge(\n",
|
||||
" fcst, df_orig[[TIME_COLNAME, TARGET_COLNAME]], on=[TIME_COLNAME], how=\"inner\"\n",
|
||||
" )\n",
|
||||
" out.rename(columns={TARGET_COLNAME: \"actual_level\"}, inplace=True)\n",
|
||||
" return out"
|
||||
]
|
||||
},
|
||||
@@ -458,16 +493,16 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"if DIFFERENCE_SERIES: \n",
|
||||
"if DIFFERENCE_SERIES:\n",
|
||||
" # convert forecast in differences to the levels\n",
|
||||
" INFORMATION_SET_DATE = max(df_train[TIME_COLNAME])\n",
|
||||
" YT = df.query('{} == @INFORMATION_SET_DATE'.format(TIME_COLNAME))[TARGET_COLNAME]\n",
|
||||
" YT = df.query(\"{} == @INFORMATION_SET_DATE\".format(TIME_COLNAME))[TARGET_COLNAME]\n",
|
||||
"\n",
|
||||
" fcst_df = convert_fcst_diff_to_levels(fcst=X_trans, yt=YT, df_orig=df)\n",
|
||||
"else:\n",
|
||||
" fcst_df = X_trans.copy()\n",
|
||||
" fcst_df['actual_level'] = y_test\n",
|
||||
" fcst_df['predicted_level'] = y_predictions\n",
|
||||
" fcst_df[\"actual_level\"] = y_test\n",
|
||||
" fcst_df[\"predicted_level\"] = y_predictions\n",
|
||||
"\n",
|
||||
"del X_trans"
|
||||
]
|
||||
@@ -486,13 +521,11 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# compute metrics\n",
|
||||
"metrics_df = compute_metrics(fcst_df=fcst_df,\n",
|
||||
" metric_name=None,\n",
|
||||
" ts_id_colnames=None)\n",
|
||||
"metrics_df = compute_metrics(fcst_df=fcst_df, metric_name=None, ts_id_colnames=None)\n",
|
||||
"# save output\n",
|
||||
"metrics_file_name = '{}_metrics.csv'.format(experiment_name)\n",
|
||||
"fcst_file_name = '{}_forecst.csv'.format(experiment_name)\n",
|
||||
"plot_file_name = '{}_plot.pdf'.format(experiment_name)\n",
|
||||
"metrics_file_name = \"{}_metrics.csv\".format(experiment_name)\n",
|
||||
"fcst_file_name = \"{}_forecst.csv\".format(experiment_name)\n",
|
||||
"plot_file_name = \"{}_plot.pdf\".format(experiment_name)\n",
|
||||
"\n",
|
||||
"metrics_df.to_csv(os.path.join(output_dir, metrics_file_name), index=True)\n",
|
||||
"fcst_df.to_csv(os.path.join(output_dir, fcst_file_name), index=True)"
|
||||
@@ -517,9 +550,9 @@
|
||||
"\n",
|
||||
"# generate and save plots\n",
|
||||
"fig, ax = plt.subplots(dpi=180)\n",
|
||||
"ax.plot(plot_df[TARGET_COLNAME], '-g', label='Historical')\n",
|
||||
"ax.plot(fcst_df['actual_level'], '-b', label='Actual')\n",
|
||||
"ax.plot(fcst_df['predicted_level'], '-r', label='Forecast')\n",
|
||||
"ax.plot(plot_df[TARGET_COLNAME], \"-g\", label=\"Historical\")\n",
|
||||
"ax.plot(fcst_df[\"actual_level\"], \"-b\", label=\"Actual\")\n",
|
||||
"ax.plot(fcst_df[\"predicted_level\"], \"-r\", label=\"Forecast\")\n",
|
||||
"ax.legend()\n",
|
||||
"ax.set_title(\"Forecast vs Actuals\")\n",
|
||||
"ax.set_xlabel(TIME_COLNAME)\n",
|
||||
|
||||
@@ -11,11 +11,14 @@ from sklearn.externals import joblib
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'--target_column_name', type=str, dest='target_column_name',
|
||||
help='Target Column Name')
|
||||
"--target_column_name",
|
||||
type=str,
|
||||
dest="target_column_name",
|
||||
help="Target Column Name",
|
||||
)
|
||||
parser.add_argument(
|
||||
'--test_dataset', type=str, dest='test_dataset',
|
||||
help='Test Dataset')
|
||||
"--test_dataset", type=str, dest="test_dataset", help="Test Dataset"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
target_column_name = args.target_column_name
|
||||
@@ -27,30 +30,40 @@ ws = run.experiment.workspace
|
||||
# get the input dataset by id
|
||||
test_dataset = Dataset.get_by_id(ws, id=test_dataset_id)
|
||||
|
||||
X_test = test_dataset.drop_columns(columns=[target_column_name]).to_pandas_dataframe().reset_index(drop=True)
|
||||
y_test_df = test_dataset.with_timestamp_columns(None).keep_columns(columns=[target_column_name]).to_pandas_dataframe()
|
||||
X_test = (
|
||||
test_dataset.drop_columns(columns=[target_column_name])
|
||||
.to_pandas_dataframe()
|
||||
.reset_index(drop=True)
|
||||
)
|
||||
y_test_df = (
|
||||
test_dataset.with_timestamp_columns(None)
|
||||
.keep_columns(columns=[target_column_name])
|
||||
.to_pandas_dataframe()
|
||||
)
|
||||
|
||||
# generate forecast
|
||||
fitted_model = joblib.load('model.pkl')
|
||||
fitted_model = joblib.load("model.pkl")
|
||||
# We have default quantiles values set as below(95th percentile)
|
||||
quantiles = [0.025, 0.5, 0.975]
|
||||
predicted_column_name = 'predicted'
|
||||
PI = 'prediction_interval'
|
||||
predicted_column_name = "predicted"
|
||||
PI = "prediction_interval"
|
||||
fitted_model.quantiles = quantiles
|
||||
pred_quantiles = fitted_model.forecast_quantiles(X_test)
|
||||
pred_quantiles[PI] = pred_quantiles[[min(quantiles), max(quantiles)]].apply(lambda x: '[{}, {}]'.format(x[0],
|
||||
x[1]), axis=1)
|
||||
pred_quantiles[PI] = pred_quantiles[[min(quantiles), max(quantiles)]].apply(
|
||||
lambda x: "[{}, {}]".format(x[0], x[1]), axis=1
|
||||
)
|
||||
X_test[target_column_name] = y_test_df[target_column_name]
|
||||
X_test[PI] = pred_quantiles[PI]
|
||||
X_test[predicted_column_name] = pred_quantiles[0.5]
|
||||
# drop rows where prediction or actuals are nan
|
||||
# happens because of missing actuals
|
||||
# or at edges of time due to lags/rolling windows
|
||||
clean = X_test[X_test[[target_column_name,
|
||||
predicted_column_name]].notnull().all(axis=1)]
|
||||
clean.rename(columns={target_column_name: 'actual'}, inplace=True)
|
||||
clean = X_test[
|
||||
X_test[[target_column_name, predicted_column_name]].notnull().all(axis=1)
|
||||
]
|
||||
clean.rename(columns={target_column_name: "actual"}, inplace=True)
|
||||
|
||||
file_name = 'outputs/predictions.csv'
|
||||
file_name = "outputs/predictions.csv"
|
||||
export_csv = clean.to_csv(file_name, header=True, index=False) # added Index
|
||||
|
||||
# Upload the predictions into artifacts
|
||||
|
||||
@@ -15,22 +15,25 @@ def adf_test(series, **kw):
|
||||
:param series: series to test
|
||||
:return: dictionary of results
|
||||
"""
|
||||
if 'lags' in kw.keys():
|
||||
msg = 'Lag order of {} detected. Running the ADF test...'.format(str(kw['lags']))
|
||||
if "lags" in kw.keys():
|
||||
msg = "Lag order of {} detected. Running the ADF test...".format(
|
||||
str(kw["lags"])
|
||||
)
|
||||
print(msg)
|
||||
statistic, pval, critval, resstore = stattools.adfuller(series,
|
||||
maxlag=kw['lags'],
|
||||
autolag=kw['autolag'],
|
||||
store=kw['store'])
|
||||
statistic, pval, critval, resstore = stattools.adfuller(
|
||||
series, maxlag=kw["lags"], autolag=kw["autolag"], store=kw["store"]
|
||||
)
|
||||
else:
|
||||
statistic, pval, critval, resstore = stattools.adfuller(series,
|
||||
autolag=kw['IC'],
|
||||
store=kw['store'])
|
||||
statistic, pval, critval, resstore = stattools.adfuller(
|
||||
series, autolag=kw["IC"], store=kw["store"]
|
||||
)
|
||||
|
||||
output = {'statistic': statistic,
|
||||
'pval': pval,
|
||||
'critical': critval,
|
||||
'resstore': resstore}
|
||||
output = {
|
||||
"statistic": statistic,
|
||||
"pval": pval,
|
||||
"critical": critval,
|
||||
"resstore": resstore,
|
||||
}
|
||||
return output
|
||||
|
||||
|
||||
@@ -41,22 +44,23 @@ def kpss_test(series, **kw):
|
||||
:param series: series to test
|
||||
:return: dictionary of results
|
||||
"""
|
||||
if kw['store']:
|
||||
statistic, p_value, critical_values, rstore = stattools.kpss(series,
|
||||
regression=kw['reg_type'],
|
||||
lags=kw['lags'],
|
||||
store=kw['store'])
|
||||
if kw["store"]:
|
||||
statistic, p_value, critical_values, rstore = stattools.kpss(
|
||||
series, regression=kw["reg_type"], lags=kw["lags"], store=kw["store"]
|
||||
)
|
||||
else:
|
||||
statistic, p_value, lags, critical_values = stattools.kpss(series,
|
||||
regression=kw['reg_type'],
|
||||
lags=kw['lags'])
|
||||
output = {'statistic': statistic,
|
||||
'pval': p_value,
|
||||
'critical': critical_values,
|
||||
'lags': rstore.lags if kw['store'] else lags}
|
||||
statistic, p_value, lags, critical_values = stattools.kpss(
|
||||
series, regression=kw["reg_type"], lags=kw["lags"]
|
||||
)
|
||||
output = {
|
||||
"statistic": statistic,
|
||||
"pval": p_value,
|
||||
"critical": critical_values,
|
||||
"lags": rstore.lags if kw["store"] else lags,
|
||||
}
|
||||
|
||||
if kw['store']:
|
||||
output.update({'resstore': rstore})
|
||||
if kw["store"]:
|
||||
output.update({"resstore": rstore})
|
||||
return output
|
||||
|
||||
|
||||
@@ -75,9 +79,9 @@ def format_test_output(test_name, test_res, H0_unit_root=True):
|
||||
If test failed (test_res is None), return empty dictionary.
|
||||
"""
|
||||
# Check if the test failed by trying to extract the test statistic
|
||||
if test_name in ('ADF', 'KPSS'):
|
||||
if test_name in ("ADF", "KPSS"):
|
||||
try:
|
||||
test_res['statistic']
|
||||
test_res["statistic"]
|
||||
except BaseException:
|
||||
test_res = None
|
||||
else:
|
||||
@@ -90,32 +94,32 @@ def format_test_output(test_name, test_res, H0_unit_root=True):
|
||||
return {}
|
||||
|
||||
# extract necessary information
|
||||
if test_name in ('ADF', 'KPSS'):
|
||||
statistic = test_res['statistic']
|
||||
crit_val = test_res['critical']['5%']
|
||||
p_val = test_res['pval']
|
||||
lags = test_res['resstore'].usedlag if test_name == 'ADF' else test_res['lags']
|
||||
if test_name in ("ADF", "KPSS"):
|
||||
statistic = test_res["statistic"]
|
||||
crit_val = test_res["critical"]["5%"]
|
||||
p_val = test_res["pval"]
|
||||
lags = test_res["resstore"].usedlag if test_name == "ADF" else test_res["lags"]
|
||||
else:
|
||||
statistic = test_res.stat
|
||||
crit_val = test_res.critical_values['5%']
|
||||
crit_val = test_res.critical_values["5%"]
|
||||
p_val = test_res.pvalue
|
||||
lags = test_res.lags
|
||||
|
||||
if H0_unit_root:
|
||||
H0 = 'The process is non-stationary'
|
||||
H0 = "The process is non-stationary"
|
||||
stationary = "yes" if p_val < 0.05 else "not"
|
||||
else:
|
||||
H0 = 'The process is stationary'
|
||||
H0 = "The process is stationary"
|
||||
stationary = "yes" if p_val > 0.05 else "not"
|
||||
|
||||
out = {
|
||||
'test_name': test_name,
|
||||
'statistic': statistic,
|
||||
'crit_val': crit_val,
|
||||
'p_val': p_val,
|
||||
'lags': int(lags),
|
||||
'stationary': stationary,
|
||||
'Null Hypothesis': H0
|
||||
"test_name": test_name,
|
||||
"statistic": statistic,
|
||||
"crit_val": crit_val,
|
||||
"p_val": p_val,
|
||||
"lags": int(lags),
|
||||
"stationary": stationary,
|
||||
"Null Hypothesis": H0,
|
||||
}
|
||||
return out
|
||||
|
||||
@@ -136,22 +140,15 @@ def unit_root_test_wrapper(series, lags=None):
|
||||
:return: dictionary of summary table for all tests and final decision on stationary vs nonstaionary
|
||||
"""
|
||||
# setting for ADF and KPSS tests
|
||||
adf_settings = {
|
||||
'IC': 'AIC',
|
||||
'store': True
|
||||
}
|
||||
adf_settings = {"IC": "AIC", "store": True}
|
||||
|
||||
kpss_settings = {
|
||||
'reg_type': 'c',
|
||||
'lags': 'auto',
|
||||
'store': True
|
||||
}
|
||||
kpss_settings = {"reg_type": "c", "lags": "auto", "store": True}
|
||||
|
||||
arch_test_settings = {} # settings for PP, ADF GLS and ZA tests
|
||||
if lags is not None:
|
||||
adf_settings.update({'lags': lags, 'autolag': None})
|
||||
kpss_settings.update({'lags:': lags})
|
||||
arch_test_settings = {'lags': lags}
|
||||
adf_settings.update({"lags": lags, "autolag": None})
|
||||
kpss_settings.update({"lags:": lags})
|
||||
arch_test_settings = {"lags": lags}
|
||||
# Run individual tests
|
||||
adf = adf_test(series, **adf_settings) # ADF test
|
||||
kpss = kpss_test(series, **kpss_settings) # KPSS test
|
||||
@@ -160,14 +157,26 @@ def unit_root_test_wrapper(series, lags=None):
|
||||
za = unitroot.ZivotAndrews(series, **arch_test_settings) # Zivot-Andrews test
|
||||
|
||||
# generate output table
|
||||
adf_dict = format_test_output(test_name='ADF', test_res=adf, H0_unit_root=True)
|
||||
kpss_dict = format_test_output(test_name='KPSS', test_res=kpss, H0_unit_root=False)
|
||||
pp_dict = format_test_output(test_name='Philips Perron', test_res=pp, H0_unit_root=True)
|
||||
adfgls_dict = format_test_output(test_name='ADF GLS', test_res=adfgls, H0_unit_root=True)
|
||||
za_dict = format_test_output(test_name='Zivot-Andrews', test_res=za, H0_unit_root=True)
|
||||
adf_dict = format_test_output(test_name="ADF", test_res=adf, H0_unit_root=True)
|
||||
kpss_dict = format_test_output(test_name="KPSS", test_res=kpss, H0_unit_root=False)
|
||||
pp_dict = format_test_output(
|
||||
test_name="Philips Perron", test_res=pp, H0_unit_root=True
|
||||
)
|
||||
adfgls_dict = format_test_output(
|
||||
test_name="ADF GLS", test_res=adfgls, H0_unit_root=True
|
||||
)
|
||||
za_dict = format_test_output(
|
||||
test_name="Zivot-Andrews", test_res=za, H0_unit_root=True
|
||||
)
|
||||
|
||||
test_dict = {'ADF': adf_dict, 'KPSS': kpss_dict, 'PP': pp_dict, 'ADF GLS': adfgls_dict, 'ZA': za_dict}
|
||||
test_sum = pd.DataFrame.from_dict(test_dict, orient='index').reset_index(drop=True)
|
||||
test_dict = {
|
||||
"ADF": adf_dict,
|
||||
"KPSS": kpss_dict,
|
||||
"PP": pp_dict,
|
||||
"ADF GLS": adfgls_dict,
|
||||
"ZA": za_dict,
|
||||
}
|
||||
test_sum = pd.DataFrame.from_dict(test_dict, orient="index").reset_index(drop=True)
|
||||
|
||||
# decision based on the majority rule
|
||||
if test_sum.shape[0] > 0:
|
||||
@@ -176,9 +185,9 @@ def unit_root_test_wrapper(series, lags=None):
|
||||
ratio = 1 # all tests fail, assume the series is stationary
|
||||
|
||||
# Majority rule. If the ratio is exactly 0.5, assume the series in non-stationary.
|
||||
stationary = 'YES' if (ratio > 0.5) else 'NO'
|
||||
stationary = "YES" if (ratio > 0.5) else "NO"
|
||||
|
||||
out = {'summary': test_sum, 'stationary': stationary}
|
||||
out = {"summary": test_sum, "stationary": stationary}
|
||||
return out
|
||||
|
||||
|
||||
@@ -196,10 +205,12 @@ def ts_train_test_split(df_input, n, time_colname, ts_id_colnames=None):
|
||||
ts_id_colnames = []
|
||||
ts_id_colnames_original = ts_id_colnames.copy()
|
||||
if len(ts_id_colnames) == 0:
|
||||
ts_id_colnames = ['Grain']
|
||||
df_input[ts_id_colnames[0]] = 'dummy'
|
||||
ts_id_colnames = ["Grain"]
|
||||
df_input[ts_id_colnames[0]] = "dummy"
|
||||
# Sort by ascending time
|
||||
df_grouped = (df_input.sort_values(time_colname).groupby(ts_id_colnames, group_keys=False))
|
||||
df_grouped = df_input.sort_values(time_colname).groupby(
|
||||
ts_id_colnames, group_keys=False
|
||||
)
|
||||
df_head = df_grouped.apply(lambda dfg: dfg.iloc[:-n])
|
||||
df_tail = df_grouped.apply(lambda dfg: dfg.iloc[-n:])
|
||||
# drop group column name if it was not originally provided
|
||||
@@ -221,30 +232,32 @@ def compute_metrics(fcst_df, metric_name=None, ts_id_colnames=None):
|
||||
if ts_id_colnames is None:
|
||||
ts_id_colnames = []
|
||||
if len(ts_id_colnames) == 0:
|
||||
ts_id_colnames = ['TS_ID']
|
||||
fcst_df[ts_id_colnames[0]] = 'dummy'
|
||||
ts_id_colnames = ["TS_ID"]
|
||||
fcst_df[ts_id_colnames[0]] = "dummy"
|
||||
metrics_list = []
|
||||
for grain, df in fcst_df.groupby(ts_id_colnames):
|
||||
try:
|
||||
scores = scoring.score_regression(
|
||||
y_test=df['actual_level'],
|
||||
y_pred=df['predicted_level'],
|
||||
metrics=list(constants.Metric.SCALAR_REGRESSION_SET))
|
||||
y_test=df["actual_level"],
|
||||
y_pred=df["predicted_level"],
|
||||
metrics=list(constants.Metric.SCALAR_REGRESSION_SET),
|
||||
)
|
||||
except BaseException:
|
||||
msg = '{}: metrics calculation failed.'.format(grain)
|
||||
msg = "{}: metrics calculation failed.".format(grain)
|
||||
print(msg)
|
||||
scores = {}
|
||||
one_grain_metrics_df = pd.DataFrame(list(scores.items()), columns=['metric_name', 'metric']).\
|
||||
sort_values(['metric_name'])
|
||||
one_grain_metrics_df = pd.DataFrame(
|
||||
list(scores.items()), columns=["metric_name", "metric"]
|
||||
).sort_values(["metric_name"])
|
||||
one_grain_metrics_df.reset_index(inplace=True, drop=True)
|
||||
if len(ts_id_colnames) < 2:
|
||||
one_grain_metrics_df['grain'] = ts_id_colnames[0]
|
||||
one_grain_metrics_df["grain"] = ts_id_colnames[0]
|
||||
else:
|
||||
one_grain_metrics_df['grain'] = "|".join(list(grain))
|
||||
one_grain_metrics_df["grain"] = "|".join(list(grain))
|
||||
|
||||
metrics_list.append(one_grain_metrics_df)
|
||||
# collect into a data frame
|
||||
grain_metrics = pd.concat(metrics_list)
|
||||
if metric_name is not None:
|
||||
grain_metrics = grain_metrics.query('metric_name == @metric_name')
|
||||
grain_metrics = grain_metrics.query("metric_name == @metric_name")
|
||||
return grain_metrics
|
||||
|
||||
@@ -3,36 +3,47 @@ import shutil
|
||||
from azureml.core import ScriptRunConfig
|
||||
|
||||
|
||||
def run_remote_inference(test_experiment, compute_target, train_run,
|
||||
test_dataset, target_column_name, inference_folder='./forecast'):
|
||||
def run_remote_inference(
|
||||
test_experiment,
|
||||
compute_target,
|
||||
train_run,
|
||||
test_dataset,
|
||||
target_column_name,
|
||||
inference_folder="./forecast",
|
||||
):
|
||||
# Create local directory to copy the model.pkl and forecsting_script.py files into.
|
||||
# These files will be uploaded to and executed on the compute instance.
|
||||
os.makedirs(inference_folder, exist_ok=True)
|
||||
shutil.copy('forecasting_script.py', inference_folder)
|
||||
shutil.copy("forecasting_script.py", inference_folder)
|
||||
|
||||
train_run.download_file('outputs/model.pkl',
|
||||
os.path.join(inference_folder, 'model.pkl'))
|
||||
train_run.download_file(
|
||||
"outputs/model.pkl", os.path.join(inference_folder, "model.pkl")
|
||||
)
|
||||
|
||||
inference_env = train_run.get_environment()
|
||||
|
||||
config = ScriptRunConfig(source_directory=inference_folder,
|
||||
script='forecasting_script.py',
|
||||
arguments=['--target_column_name',
|
||||
config = ScriptRunConfig(
|
||||
source_directory=inference_folder,
|
||||
script="forecasting_script.py",
|
||||
arguments=[
|
||||
"--target_column_name",
|
||||
target_column_name,
|
||||
'--test_dataset',
|
||||
test_dataset.as_named_input(test_dataset.name)],
|
||||
"--test_dataset",
|
||||
test_dataset.as_named_input(test_dataset.name),
|
||||
],
|
||||
compute_target=compute_target,
|
||||
environment=inference_env)
|
||||
environment=inference_env,
|
||||
)
|
||||
|
||||
run = test_experiment.submit(config,
|
||||
tags={'training_run_id':
|
||||
train_run.id,
|
||||
'run_algorithm':
|
||||
train_run.properties['run_algorithm'],
|
||||
'valid_score':
|
||||
train_run.properties['score'],
|
||||
'primary_metric':
|
||||
train_run.properties['primary_metric']})
|
||||
run = test_experiment.submit(
|
||||
config,
|
||||
tags={
|
||||
"training_run_id": train_run.id,
|
||||
"run_algorithm": train_run.properties["run_algorithm"],
|
||||
"valid_score": train_run.properties["score"],
|
||||
"primary_metric": train_run.properties["primary_metric"],
|
||||
},
|
||||
)
|
||||
|
||||
run.log("run_algorithm", run.tags['run_algorithm'])
|
||||
run.log("run_algorithm", run.tags["run_algorithm"])
|
||||
return run
|
||||
|
||||
@@ -96,7 +96,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.36.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -95,7 +95,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.36.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -92,7 +92,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.36.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -106,7 +106,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.36.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -0,0 +1,49 @@
|
||||
# Copyright (c) Microsoft. All rights reserved.
|
||||
# Licensed under the MIT license.
|
||||
|
||||
import os
|
||||
|
||||
|
||||
def init():
|
||||
print("Init")
|
||||
|
||||
|
||||
# For partition per folder/column jobs, ParallelRunStep pass an optional positional parameter `mini_batch_context`
|
||||
# to the `run` function in user's entry script, which contains information of the mini_batch.
|
||||
def run(mini_batch, mini_batch_context):
|
||||
print(f"run method start: {__file__}, run({mini_batch}, {mini_batch_context})")
|
||||
# `partition_key_value` is a dict that corresponds to the mini_batch, the keys of the dict are those specified
|
||||
# in `partition_keys` in ParallelRunConfig.
|
||||
print(f"partition_key_value = {mini_batch_context.partition_key_value}")
|
||||
# `dataset` is the dataset object that corresponds to the mini_batch, which is a subset of the input dataset
|
||||
# filtered by condition specified in `partition_key_value`.
|
||||
print(f"dataset = {mini_batch_context.dataset}")
|
||||
|
||||
print(f"file_count_of_mini_batch = {len(mini_batch)}")
|
||||
file_name_list = []
|
||||
file_size_list = []
|
||||
total_file_size_of_mini_batch = 0
|
||||
for file_path in mini_batch:
|
||||
file_name_list.append(os.path.basename(file_path))
|
||||
file_size = os.path.getsize(file_path)
|
||||
file_size_list.append(file_size)
|
||||
total_file_size_of_mini_batch += file_size
|
||||
print(f"total_file_size_of_mini_batch = {total_file_size_of_mini_batch}")
|
||||
file_size_ratio_list = [file_size * 1.0 / total_file_size_of_mini_batch for file_size in file_size_list]
|
||||
|
||||
# If `output_action` is set to `append_row` in ParallelRunConfig for FileDataset input(as is in this sample
|
||||
# notebook), the return value of `run` method is expected to be a list/tuple of same length with the
|
||||
# input parameter `mini_batch`, and each element in the list/tuple would form a row in the result file by
|
||||
# calling the Python builtin `str` function.
|
||||
# If you want to specify the output format, please format and return str value as in this example.
|
||||
return [
|
||||
",".join([str(x) for x in fields])
|
||||
for fields in zip(
|
||||
file_name_list,
|
||||
file_size_list,
|
||||
file_size_ratio_list,
|
||||
[mini_batch_context.partition_key_value["user"]] * len(mini_batch),
|
||||
[mini_batch_context.partition_key_value["genres"]] * len(mini_batch),
|
||||
[total_file_size_of_mini_batch] * len(mini_batch),
|
||||
)
|
||||
]
|
||||
@@ -0,0 +1,17 @@
|
||||
# Copyright (c) Microsoft. All rights reserved.
|
||||
# Licensed under the MIT license.
|
||||
|
||||
import os
|
||||
|
||||
|
||||
def init():
|
||||
print("Init")
|
||||
|
||||
|
||||
def run(mini_batch):
|
||||
print(f'run method start: {__file__}, run({mini_batch})')
|
||||
total_income = mini_batch["INCOME"].sum()
|
||||
print(f'total_income = {total_income}')
|
||||
mini_batch["total_income"] = total_income
|
||||
|
||||
return mini_batch
|
||||
@@ -32,6 +32,7 @@ To run a Batch Inference job, you will need to gather some configuration data.
|
||||
- **node_count**: number of compute nodes to use.
|
||||
- **process_count_per_node**: number of processes per node (optional, default value is 1).
|
||||
- **mini_batch_size**: the approximate amount of input data passed to each run() invocation. For FileDataset input, this is number of files user script can process in one run() call. For TabularDataset input it is approximate size of data user script can process in one run() call. E.g. 1024, 1024KB, 10MB, 1GB (optional, default value 10 files for FileDataset and 1MB for TabularDataset.)
|
||||
- **partition_keys**: the keys used to partition the input data into mini-batches passed to each run() invocation. This parameter is mutually exclusive with `mini_batch_size`, and it requires the input datasets to have `partition_keys` attribute, the value of which is a superset of the value of this parameter. Each run() call would process a part of data that has identical value on the `partition_keys` specified. You can follow the examples in [file-dataset-partition-per-folder.ipynb](./file-dataset-partition-per-folder.ipynb) and [tabular-dataset-partition-per-column.ipynb](./tabular-dataset-partition-per-column.ipynb) to see how to create such datasets.
|
||||
- **logging_level**: log verbosity. Values in increasing verbosity are: 'WARNING', 'INFO', 'DEBUG' (optional, default value is 'INFO').
|
||||
- **run_invocation_timeout**: run method invocation timeout period in seconds (optional, default value is 60).
|
||||
- **environment**: The environment definition. This field configures the Python environment. It can be configured to use an existing Python environment or to set up a temp environment for the experiment. The definition is also responsible for setting the required application dependencies.
|
||||
@@ -121,6 +122,8 @@ pipeline_run.wait_for_completion(show_output=True)
|
||||
- [file-dataset-image-inference-mnist.ipynb](./file-dataset-image-inference-mnist.ipynb) demonstrates how to run batch inference on an MNIST dataset using FileDataset.
|
||||
- [tabular-dataset-inference-iris.ipynb](./tabular-dataset-inference-iris.ipynb) demonstrates how to run batch inference on an IRIS dataset using TabularDataset.
|
||||
- [pipeline-style-transfer.ipynb](../pipeline-style-transfer/pipeline-style-transfer-parallel-run.ipynb) demonstrates using ParallelRunStep in multi-step pipeline and using output from one step as input to ParallelRunStep.
|
||||
- [file-dataset-partition-per-folder.ipynb](./file-dataset-partition-per-folder.ipynb) demonstrates how to run batch inference on file data by treating files inside each leaf folder as a mini-batch.
|
||||
- [tabular-dataset-partition-per-column.ipynb](./tabular-dataset-partition-per-column.ipynb) demonstrates how to run batch inference on tabular data by treating rows with identical value on specified columns as a mini-batch.
|
||||
|
||||
# Troubleshooting guide
|
||||
|
||||
|
||||
@@ -0,0 +1,404 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved. \n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Using Azure Machine Learning Pipelines for Batch Inference for files input partitioned by folder structure\n",
|
||||
"\n",
|
||||
"In this notebook, we will demonstrate how to make predictions on large quantities of data asynchronously using the ML pipelines with Azure Machine Learning. Batch inference (or batch scoring) provides cost-effective inference, with unparalleled throughput for asynchronous applications. Batch prediction pipelines can scale to perform inference on terabytes of production data. Batch prediction is optimized for high throughput, fire-and-forget predictions for a large collection of data.\n",
|
||||
"\n",
|
||||
"> **Tip**\n",
|
||||
"If your system requires low-latency processing (to process a single document or small set of documents quickly), use [real-time scoring](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-consume-web-service) instead of batch prediction.\n",
|
||||
"\n",
|
||||
"This example will create a sample dataset with nested folder structure, where the folder name corresponds to the attribute of the files inside it. The Batch Inference job would split the files inside the dataset according to their attributes, so that all files with identical value on the specified attribute will form up a single mini-batch to be processed.\n",
|
||||
"\n",
|
||||
"The outline of this notebook is as follows:\n",
|
||||
"\n",
|
||||
"- Create a dataset with nested folder structure and `partition_format` to interpret the folder structure into the attributes of files inside.\n",
|
||||
"- Do batch inference on each mini-batch defined by the folder structure.\n",
|
||||
"\n",
|
||||
"## Prerequisites\n",
|
||||
"If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure you go through the configuration Notebook located at https://github.com/Azure/MachineLearningNotebooks first. This sets you up with a working config file that has information on your workspace, subscription id, etc. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Connect to workspace"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.workspace import Workspace\n",
|
||||
"ws = Workspace.from_config()\n",
|
||||
"print('Workspace name: ' + ws.name, \n",
|
||||
" 'Azure region: ' + ws.location, \n",
|
||||
" 'Subscription id: ' + ws.subscription_id, \n",
|
||||
" 'Resource group: ' + ws.resource_group, sep = '\\n')\n",
|
||||
"\n",
|
||||
"datastore = ws.get_default_datastore()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"print(azureml.core.VERSION)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Upload local test data to datastore\n",
|
||||
"The destination folder in the datastore is structured so that the name of each folder layer corresponds to a property of all the files inside the foler."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Dataset\n",
|
||||
"\n",
|
||||
"datastore.upload('test_files/disco', 'dataset_partition_test/user1/winter', overwrite=True, show_progress=False)\n",
|
||||
"datastore.upload('test_files/orchestra', 'dataset_partition_test/user1/fall', overwrite=True, show_progress=False)\n",
|
||||
"datastore.upload('test_files/piano', 'dataset_partition_test/user2/summer', overwrite=True, show_progress=False)\n",
|
||||
"datastore.upload('test_files/spirituality', 'dataset_partition_test/user3/fall', overwrite=True, show_progress=False)\n",
|
||||
"datastore.upload('test_files/piano', 'dataset_partition_test/user4/spring', overwrite=True, show_progress=False)\n",
|
||||
"datastore.upload('test_files/piano', 'dataset_partition_test/user4/fall', overwrite=True, show_progress=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create partitioned file dataset\n",
|
||||
"Create a file dataset partitioned by 'user', 'season', and 'genres', each corresponds to a folder layer specified in `partition_format`. You can get a partition of data by specifying the value of one or more partition keys. E.g., by specifying `user=user1 and genres=piano`, you can get all the file that matches `dataset_partition_test/user1/*/piano.wav`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"partitioned_file_dataset = Dataset.File.from_files(path=(datastore, 'dataset_partition_test/*/*/*.wav'),\n",
|
||||
" partition_format=\"dataset_partition_test/{user}/{season}/{genres}.wav\",\n",
|
||||
" validate=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"partitioned_file_dataset.partition_keys"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create or Attach existing compute resource"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from azureml.core.compute import AmlCompute, ComputeTarget\n",
|
||||
"\n",
|
||||
"# choose a name for your cluster\n",
|
||||
"compute_name = os.environ.get(\"AML_COMPUTE_CLUSTER_NAME\", \"cpu-cluster\")\n",
|
||||
"compute_min_nodes = os.environ.get(\"AML_COMPUTE_CLUSTER_MIN_NODES\", 0)\n",
|
||||
"compute_max_nodes = os.environ.get(\"AML_COMPUTE_CLUSTER_MAX_NODES\", 2)\n",
|
||||
"\n",
|
||||
"# This example uses CPU VM. For using GPU VM, set SKU to STANDARD_NC6\n",
|
||||
"vm_size = os.environ.get(\"AML_COMPUTE_CLUSTER_SKU\", \"STANDARD_D2_V2\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"if compute_name in ws.compute_targets:\n",
|
||||
" compute_target = ws.compute_targets[compute_name]\n",
|
||||
" if compute_target and type(compute_target) is AmlCompute:\n",
|
||||
" print('found compute target. just use it. ' + compute_name)\n",
|
||||
"else:\n",
|
||||
" print('creating a new compute target...')\n",
|
||||
" provisioning_config = AmlCompute.provisioning_configuration(vm_size = vm_size,\n",
|
||||
" min_nodes = compute_min_nodes, \n",
|
||||
" max_nodes = compute_max_nodes)\n",
|
||||
"\n",
|
||||
" # create the cluster\n",
|
||||
" compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)\n",
|
||||
" \n",
|
||||
" # can poll for a minimum number of nodes and for a specific timeout. \n",
|
||||
" # if no min node count is provided it will use the scale settings for the cluster\n",
|
||||
" compute_target.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)\n",
|
||||
" \n",
|
||||
" # For a more detailed view of current AmlCompute status, use get_status()\n",
|
||||
" print(compute_target.get_status().serialize())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Intermediate/Output Data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.pipeline.core import Pipeline, PipelineData\n",
|
||||
"\n",
|
||||
"output_dir = PipelineData(name=\"file_dataset_inferences\", datastore=datastore)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Calculate total file size of each mini-batch partitioned by dataset partition key(s)\n",
|
||||
"The script is to sum up the total size of files in each mini-batch."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"scripts_folder = \"Code\"\n",
|
||||
"script_file = \"total_file_size.py\"\n",
|
||||
"\n",
|
||||
"# peek at contents\n",
|
||||
"with open(os.path.join(scripts_folder, script_file)) as inference_file:\n",
|
||||
" print(inference_file.read())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Build and run the batch inference pipeline\n",
|
||||
"### Specify the environment to run the script\n",
|
||||
"You would need to specify the required private azureml packages in dependencies. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Environment\n",
|
||||
"from azureml.core.runconfig import CondaDependencies, DEFAULT_CPU_IMAGE\n",
|
||||
"\n",
|
||||
"batch_conda_deps = CondaDependencies.create(pip_packages=[\"azureml-core\", \"azureml-dataset-runtime[fuse]\"])\n",
|
||||
"batch_env = Environment(name=\"batch_environment\")\n",
|
||||
"batch_env.python.conda_dependencies = batch_conda_deps\n",
|
||||
"batch_env.docker.base_image = DEFAULT_CPU_IMAGE"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create the configuration to wrap the inference script\n",
|
||||
"The parameter `partition_keys` is a list containing a subset of the dataset partition keys, specifying how is the input dataset partitioned. Each and every possible combination of values of partition_keys will form up a mini-batch. E.g., by specifying `partition_keys=['user', 'genres']` will result in 5 mini-batches, i.e. `user=halit && genres=disco`, `user=halit && genres=orchestra`, `user=chunyu && genres=piano`, `user=kin && genres=spirituality` and `user=ramandeep && genres=piano`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.pipeline.steps import ParallelRunStep, ParallelRunConfig\n",
|
||||
"\n",
|
||||
"# In a real-world scenario, you'll want to shape your process per node and nodes to fit your problem domain.\n",
|
||||
"parallel_run_config = ParallelRunConfig(\n",
|
||||
" source_directory=scripts_folder,\n",
|
||||
" entry_script=script_file, # the user script to run against each input\n",
|
||||
" partition_keys=['user', 'genres'],\n",
|
||||
" error_threshold=5,\n",
|
||||
" output_action='append_row',\n",
|
||||
" append_row_file_name=\"file_size_outputs.txt\",\n",
|
||||
" environment=batch_env,\n",
|
||||
" compute_target=compute_target, \n",
|
||||
" node_count=2,\n",
|
||||
" run_invocation_timeout=600\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create the pipeline step"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"parallel_run_step = ParallelRunStep(\n",
|
||||
" name='summarize-file-size',\n",
|
||||
" inputs=[partitioned_file_dataset.as_named_input(\"partitioned_file_input\")],\n",
|
||||
" output=output_dir,\n",
|
||||
" parallel_run_config=parallel_run_config,\n",
|
||||
" allow_reuse=False\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Run the pipeline"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Experiment\n",
|
||||
"from azureml.pipeline.core import Pipeline\n",
|
||||
"\n",
|
||||
"pipeline = Pipeline(workspace=ws, steps=[parallel_run_step])\n",
|
||||
"\n",
|
||||
"pipeline_run = Experiment(ws, 'file-dataset-partition').submit(pipeline)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pipeline_run.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## View the prediction results\n",
|
||||
"In the total_file_size.py file above you can see that the ResultList with the filename and the prediction result gets returned. These are written to the DataStore specified in the PipelineData object as the output data, which in this case is called inferences. This containers the outputs from all of the worker nodes used in the compute cluster. You can download this data to view the results ... below just filters to the first 10 rows"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import pandas as pd\n",
|
||||
"import tempfile\n",
|
||||
"\n",
|
||||
"batch_run = pipeline_run.find_step_run(parallel_run_step.name)[0]\n",
|
||||
"batch_output = batch_run.get_output_data(output_dir.name)\n",
|
||||
"\n",
|
||||
"target_dir = tempfile.mkdtemp()\n",
|
||||
"batch_output.download(local_path=target_dir)\n",
|
||||
"result_file = os.path.join(target_dir, batch_output.path_on_datastore, parallel_run_config.append_row_file_name)\n",
|
||||
"\n",
|
||||
"df = pd.read_csv(result_file, delimiter=\",\", header=None)\n",
|
||||
"df.columns = [\"File Name\", \"File Size\", \"Ratio of Size in Partition\", \"user\", \"genres\", \"Total File Size of Partition\"]\n",
|
||||
"print(\"Prediction has\", df.shape[0], \"rows\")\n",
|
||||
"df.head(10)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "pansav"
|
||||
},
|
||||
{
|
||||
"name": "tracych"
|
||||
},
|
||||
{
|
||||
"name": "migu"
|
||||
}
|
||||
],
|
||||
"category": "Other notebooks",
|
||||
"compute": [
|
||||
"AML Compute"
|
||||
],
|
||||
"datasets": [
|
||||
"None"
|
||||
],
|
||||
"deployment": [
|
||||
"None"
|
||||
],
|
||||
"exclude_from_index": false,
|
||||
"framework": [
|
||||
"None"
|
||||
],
|
||||
"friendly_name": "Batch inferencing file data partitioned by folder using ParallelRunStep",
|
||||
"index_order": 1,
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
name: file-dataset-partition-per-folder
|
||||
dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
- azureml-pipeline-steps
|
||||
- azureml-widgets
|
||||
- pandas
|
||||
@@ -0,0 +1,427 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved. \n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Using Azure Machine Learning Pipelines for Batch Inference for tabular input partitioned by column value\n",
|
||||
"\n",
|
||||
"In this notebook, we will demonstrate how to make predictions on large quantities of data asynchronously using the ML pipelines with Azure Machine Learning. Batch inference (or batch scoring) provides cost-effective inference, with unparalleled throughput for asynchronous applications. Batch prediction pipelines can scale to perform inference on terabytes of production data. Batch prediction is optimized for high throughput, fire-and-forget predictions for a large collection of data.\n",
|
||||
"\n",
|
||||
"> **Tip**\n",
|
||||
"If your system requires low-latency processing (to process a single document or small set of documents quickly), use [real-time scoring](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-consume-web-service) instead of batch prediction.\n",
|
||||
"\n",
|
||||
"This example will create a partitioned tabular dataset by splitting the rows in a large csv file by its value on specified column. Each partition will form up a mini-batch in the parallel processing procedure.\n",
|
||||
"\n",
|
||||
"The outline of this notebook is as follows:\n",
|
||||
"\n",
|
||||
"- Create a tabular dataset partitioned by value on specified column.\n",
|
||||
"- Do batch inference on the dataset with each mini-batch corresponds to one partition.\n",
|
||||
"\n",
|
||||
"## Prerequisites\n",
|
||||
"If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure you go through the configuration Notebook located at https://github.com/Azure/MachineLearningNotebooks first. This sets you up with a working config file that has information on your workspace, subscription id, etc. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Connect to workspace"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.workspace import Workspace\n",
|
||||
"ws = Workspace.from_config()\n",
|
||||
"print('Workspace name: ' + ws.name, \n",
|
||||
" 'Azure region: ' + ws.location, \n",
|
||||
" 'Subscription id: ' + ws.subscription_id, \n",
|
||||
" 'Resource group: ' + ws.resource_group, sep = '\\n')\n",
|
||||
"\n",
|
||||
"datastore = ws.get_default_datastore()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"print(azureml.core.VERSION)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Download OJ sales data from opendataset url"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import requests\n",
|
||||
"\n",
|
||||
"oj_sales_path = \"./oj.csv\"\n",
|
||||
"r = requests.get(\"http://www.cs.unitn.it/~taufer/Data/oj.csv\")\n",
|
||||
"open(oj_sales_path, \"wb\").write(r.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Upload OJ sales data to datastore"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"datastore.upload_files([oj_sales_path], \".\", \"oj_sales_data\", overwrite=True, show_progress=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create tabular dataset\n",
|
||||
"Create normal tabular dataset"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Dataset\n",
|
||||
"\n",
|
||||
"dataset = Dataset.Tabular.from_delimited_files(path=(datastore, 'oj_sales_data/*.csv'))\n",
|
||||
"print(dataset.to_pandas_dataframe())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Partition the tabular dataset\n",
|
||||
"Partition the dataset by column 'store' and 'brand'. You can get a partition of data by specifying the value of one or more partition keys. E.g., by specifying `store=1000 and brand='tropicana'`, you can get all the rows that matches this condition in the dataset."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"partitioned_dataset = dataset.partition_by(partition_keys=['store', 'brand'], target=(datastore, \"partition_by_key_res\"), name=\"partitioned_oj_data\")\n",
|
||||
"partitioned_dataset.partition_keys"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create or Attach existing compute resource"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from azureml.core.compute import AmlCompute, ComputeTarget\n",
|
||||
"\n",
|
||||
"# choose a name for your cluster\n",
|
||||
"compute_name = os.environ.get(\"AML_COMPUTE_CLUSTER_NAME\", \"cpu-cluster\")\n",
|
||||
"compute_min_nodes = os.environ.get(\"AML_COMPUTE_CLUSTER_MIN_NODES\", 0)\n",
|
||||
"compute_max_nodes = os.environ.get(\"AML_COMPUTE_CLUSTER_MAX_NODES\", 2)\n",
|
||||
"\n",
|
||||
"# This example uses CPU VM. For using GPU VM, set SKU to STANDARD_NC6\n",
|
||||
"vm_size = os.environ.get(\"AML_COMPUTE_CLUSTER_SKU\", \"STANDARD_D2_V2\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"if compute_name in ws.compute_targets:\n",
|
||||
" compute_target = ws.compute_targets[compute_name]\n",
|
||||
" if compute_target and type(compute_target) is AmlCompute:\n",
|
||||
" print('found compute target. just use it. ' + compute_name)\n",
|
||||
"else:\n",
|
||||
" print('creating a new compute target...')\n",
|
||||
" provisioning_config = AmlCompute.provisioning_configuration(vm_size = vm_size,\n",
|
||||
" min_nodes = compute_min_nodes, \n",
|
||||
" max_nodes = compute_max_nodes)\n",
|
||||
"\n",
|
||||
" # create the cluster\n",
|
||||
" compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)\n",
|
||||
" \n",
|
||||
" # can poll for a minimum number of nodes and for a specific timeout. \n",
|
||||
" # if no min node count is provided it will use the scale settings for the cluster\n",
|
||||
" compute_target.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)\n",
|
||||
" \n",
|
||||
" # For a more detailed view of current AmlCompute status, use get_status()\n",
|
||||
" print(compute_target.get_status().serialize())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Intermediate/Output Data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.pipeline.core import Pipeline, PipelineData\n",
|
||||
"\n",
|
||||
"output_dir = PipelineData(name=\"inferences\", datastore=datastore)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Calculate total revenue of each mini-batch partitioned by dataset partition key(s)\n",
|
||||
"The script sum up the total revenue of a mini-batch."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"scripts_folder = \"Code\"\n",
|
||||
"script_file = \"total_income.py\"\n",
|
||||
"\n",
|
||||
"# peek at contents\n",
|
||||
"with open(os.path.join(scripts_folder, script_file)) as inference_file:\n",
|
||||
" print(inference_file.read())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Build and run the batch inference pipeline\n",
|
||||
"### Specify the environment to run the script\n",
|
||||
"You would need to specify the required private azureml packages in dependencies. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Environment\n",
|
||||
"from azureml.core.runconfig import CondaDependencies, DEFAULT_CPU_IMAGE\n",
|
||||
"\n",
|
||||
"batch_conda_deps = CondaDependencies.create(pip_packages=[\"azureml-core\", \"azureml-dataset-runtime[fuse,pandas]\"])\n",
|
||||
"batch_env = Environment(name=\"batch_environment\")\n",
|
||||
"batch_env.python.conda_dependencies = batch_conda_deps\n",
|
||||
"batch_env.docker.base_image = DEFAULT_CPU_IMAGE"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create the configuration to wrap the inference script\n",
|
||||
"The parameter `partition_keys` is a list containing a subset of the dataset partition keys, specifying how is the input dataset partitioned. Each and every possible combination of values of partition_keys will form up a mini-batch. E.g., by specifying `partition_keys=['store', 'brand']` will result in mini-batches like `store=1000 && brand=tropicana`, `store=1000 && brand=dominicks`, `store=1001 && brand=dominicks`, ..."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.pipeline.steps import ParallelRunStep, ParallelRunConfig\n",
|
||||
"\n",
|
||||
"# In a real-world scenario, you'll want to shape your process per node and nodes to fit your problem domain.\n",
|
||||
"parallel_run_config = ParallelRunConfig(\n",
|
||||
" source_directory=scripts_folder,\n",
|
||||
" entry_script=script_file, # the user script to run against each input\n",
|
||||
" partition_keys=['store', 'brand'],\n",
|
||||
" error_threshold=5,\n",
|
||||
" output_action='append_row',\n",
|
||||
" append_row_file_name=\"revenue_outputs.txt\",\n",
|
||||
" environment=batch_env,\n",
|
||||
" compute_target=compute_target, \n",
|
||||
" node_count=2,\n",
|
||||
" run_invocation_timeout=600\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create the pipeline step"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"parallel_run_step = ParallelRunStep(\n",
|
||||
" name='summarize-revenue',\n",
|
||||
" inputs=[partitioned_dataset.as_named_input(\"partitioned_tabular_input\")],\n",
|
||||
" output=output_dir,\n",
|
||||
" parallel_run_config=parallel_run_config,\n",
|
||||
" allow_reuse=False\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Run the pipeline"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Experiment\n",
|
||||
"from azureml.pipeline.core import Pipeline\n",
|
||||
"\n",
|
||||
"pipeline = Pipeline(workspace=ws, steps=[parallel_run_step])\n",
|
||||
"\n",
|
||||
"pipeline_run = Experiment(ws, 'tabular-dataset-partition').submit(pipeline)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pipeline_run.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## View the prediction results\n",
|
||||
"In the total_income.py file above you can see that the ResultList with the filename and the prediction result gets returned. These are written to the DataStore specified in the PipelineData object as the output data, which in this case is called inferences. This containers the outputs from all of the worker nodes used in the compute cluster. You can download this data to view the results ... below just filters to the first 10 rows"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import pandas as pd\n",
|
||||
"import tempfile\n",
|
||||
"\n",
|
||||
"batch_run = pipeline_run.find_step_run(parallel_run_step.name)[0]\n",
|
||||
"batch_output = batch_run.get_output_data(output_dir.name)\n",
|
||||
"\n",
|
||||
"target_dir = tempfile.mkdtemp()\n",
|
||||
"batch_output.download(local_path=target_dir)\n",
|
||||
"result_file = os.path.join(target_dir, batch_output.path_on_datastore, parallel_run_config.append_row_file_name)\n",
|
||||
"\n",
|
||||
"df = pd.read_csv(result_file, delimiter=\" \", header=None)\n",
|
||||
"\n",
|
||||
"df.columns = [\"week\", \"logmove\", \"feat\", \"price\", \"AGE60\", \"EDUC\", \"ETHNIC\", \"INCOME\", \"HHLARGE\", \"WORKWOM\", \"HVAL150\", \"SSTRDIST\", \"SSTRVOL\", \"CPDIST5\", \"CPWVOL5\", \"store\", \"brand\", \"total_income\"]\n",
|
||||
"print(\"Prediction has \", df.shape[0], \" rows\")\n",
|
||||
"df.head(10)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "pansav"
|
||||
},
|
||||
{
|
||||
"name": "tracych"
|
||||
},
|
||||
{
|
||||
"name": "migu"
|
||||
}
|
||||
],
|
||||
"category": "Other notebooks",
|
||||
"compute": [
|
||||
"AML Compute"
|
||||
],
|
||||
"datasets": [
|
||||
"OJ Sales Data"
|
||||
],
|
||||
"deployment": [
|
||||
"None"
|
||||
],
|
||||
"exclude_from_index": false,
|
||||
"framework": [
|
||||
"None"
|
||||
],
|
||||
"friendly_name": "Batch inferencing OJ Sales Data partitioned by column using ParallelRunStep",
|
||||
"index_order": 1,
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
name: tabular-dataset-partition-per-column
|
||||
dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
- azureml-pipeline-steps
|
||||
- azureml-widgets
|
||||
- pandas
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -95,7 +95,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.36.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -8,5 +8,5 @@ dependencies:
|
||||
- matplotlib
|
||||
- azureml-dataset-runtime
|
||||
- ipywidgets
|
||||
- raiwidgets~=0.11.0
|
||||
- raiwidgets~=0.13.0
|
||||
- liac-arff
|
||||
|
||||
@@ -100,7 +100,7 @@
|
||||
"\n",
|
||||
"# Check core SDK version number\n",
|
||||
"\n",
|
||||
"print(\"This notebook was created using SDK version 1.35.0, you are currently running version\", azureml.core.VERSION)"
|
||||
"print(\"This notebook was created using SDK version 1.36.0, you are currently running version\", azureml.core.VERSION)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -102,7 +102,7 @@
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"\n",
|
||||
"print(\"This notebook was created using version 1.35.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.36.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -213,10 +213,7 @@
|
||||
"* You do not have permission to create a resource group if it's non-existing.\n",
|
||||
"* You are not a subscription owner or contributor and no Azure ML workspaces have ever been created in this subscription\n",
|
||||
"\n",
|
||||
"If workspace creation fails, please work with your IT admin to provide you with the appropriate permissions or to provision the required resources.\n",
|
||||
"\n",
|
||||
"**Note**: A Basic workspace is created by default. If you would like to create an Enterprise workspace, please specify sku = 'enterprise'.\n",
|
||||
"Please visit our [pricing page](https://azure.microsoft.com/en-us/pricing/details/machine-learning/) for more details on our Enterprise edition.\n"
|
||||
"If workspace creation fails, please work with your IT admin to provide you with the appropriate permissions or to provision the required resources.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -237,7 +234,6 @@
|
||||
" resource_group = resource_group, \n",
|
||||
" location = workspace_region,\n",
|
||||
" create_resource_group = True,\n",
|
||||
" sku = 'basic',\n",
|
||||
" exist_ok = True)\n",
|
||||
"ws.get_details()\n",
|
||||
"\n",
|
||||
|
||||
Reference in New Issue
Block a user