Compare commits

...

28 Commits

Author SHA1 Message Date
amlrelsa-ms
cc85949d6d update samples from Release-171 as a part of 1.51 SDK stable release 2023-06-06 21:58:24 +05:30
amlrelsa-sa
3a1824e3ad update samples from Release-170 as a part of 1.51 SDK stable release 2023-06-06 10:50:33 +05:30
Paul Shealy
579643326d Merge pull request #1911 from diondrapeck/add-deprecation-disclaimer
Add repository deprecation disclaimer and pointer to v2 repo
2023-05-25 08:04:29 -07:00
Diondra Peck
14f76f227e Add deprecation disclaimer 2023-05-23 12:48:14 -07:00
Paul Shealy
25baf5203a Merge pull request #1899 from Azure/release_update/Release-177
update samples from Release-177 as a part of  SDK release
2023-04-17 13:01:27 -07:00
amlrelsa-ms
1178fcb0ba update samples from Release-177 as a part of SDK release 2023-04-17 10:22:59 +00:00
Sasidhar Kasturi
e4d84c8e45 update samples from Release-169 as a part of 1.50.0 SDK stable release (#1898)
Co-authored-by: amlrelsa-ms <amlrelsa@microsoft.com>
2023-04-14 10:39:38 -04:00
Harneet Virk
7a3ab1e44c Merge pull request #1895 from Azure/release_update/Release-175
update samples from Release-175 as a part of  SDK release
2023-03-28 10:17:27 -07:00
amlrelsa-ms
598a293dfa update samples from Release-175 as a part of SDK release 2023-03-28 01:02:26 +00:00
Harneet Virk
40b3068462 Merge pull request #1884 from Azure/release_update_stablev2/Release-166
update samples from Release-166 as a part of 1.49.0 SDK stable release
2023-02-13 21:22:05 -08:00
amlrelsa-ms
0ecbbbce75 update samples from Release-166 as a part of 1.49.0 SDK stable release 2023-02-14 02:46:24 +00:00
Harneet Virk
9b1e130d18 Merge pull request #1867 from Azure/release_update/Release-173
update samples from Release-173 as a part of  SDK release
2022-12-19 19:37:41 -08:00
amlrelsa-ms
0e17b33d2a update samples from Release-173 as a part of SDK release 2022-12-20 03:35:58 +00:00
Harneet Virk
34d80abd26 Merge pull request #1864 from Azure/release_update/Release-172
update samples from Release-172 as a part of  SDK release
2022-12-16 09:28:16 -08:00
amlrelsa-ms
249278ab77 update samples from Release-172 as a part of SDK release 2022-12-15 17:32:05 +00:00
Harneet Virk
25fdb17f80 Merge pull request #1862 from Azure/release_update/Release-170
update samples from Release-170 as a part of  SDK release
2022-12-06 10:06:06 -08:00
amlrelsa-ms
3a02a27f1e update samples from Release-170 as a part of SDK release 2022-12-06 03:22:18 +00:00
Harneet Virk
4eed9d529f Merge pull request #1861 from Azure/release_update/Release-169
update samples from Release-169 as a part of  SDK release
2022-12-05 12:33:52 -08:00
amlrelsa-ms
f344d410a2 update samples from Release-169 as a part of SDK release 2022-12-05 20:12:47 +00:00
Harneet Virk
9dc1228063 Merge pull request #1860 from Azure/release_update/Release-168
update samples from Release-168 as a part of  SDK release
2022-12-05 09:54:01 -08:00
amlrelsa-ms
4404e62f58 update samples from Release-168 as a part of SDK release 2022-12-05 17:52:07 +00:00
Harneet Virk
38d5743bbb Merge pull request #1852 from Azure/release_update/Release-167
update samples from Release-167 as a part of  SDK release
2022-11-08 11:01:10 -08:00
amlrelsa-ms
0814eee151 update samples from Release-167 as a part of SDK release 2022-11-08 01:17:48 +00:00
Harneet Virk
f45b815221 Merge pull request #1848 from Azure/release_update/Release-166
update samples from Release-166 as a part of  SDK release
2022-10-26 12:04:10 -07:00
amlrelsa-ms
bd629ae454 update samples from Release-166 as a part of SDK release 2022-10-26 18:46:34 +00:00
Harneet Virk
41de75a584 Merge pull request #1846 from Azure/release_update_stablev2/Release-156
update samples from Release-156 as a part of 1.47.0 SDK stable release
2022-10-25 21:01:03 -07:00
amlrelsa-ms
96a426dc36 update samples from Release-156 as a part of 1.47.0 SDK stable release 2022-10-25 21:28:24 +00:00
Harneet Virk
824dd40f7e Merge pull request #1836 from Azure/release_update/Release-165
update samples from Release-165 as a part of  SDK release
2022-10-11 13:07:26 -07:00
126 changed files with 2335 additions and 6681 deletions

View File

@@ -103,7 +103,7 @@
"source": [ "source": [
"import azureml.core\n", "import azureml.core\n",
"\n", "\n",
"print(\"This notebook was created using version 1.46.0 of the Azure ML SDK\")\n", "print(\"This notebook was created using version 1.51.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
] ]
}, },

View File

@@ -398,7 +398,7 @@
"# run_config.target = gpu_cluster_name\n", "# run_config.target = gpu_cluster_name\n",
"# run_config.environment.docker.enabled = True\n", "# run_config.environment.docker.enabled = True\n",
"# run_config.environment.docker.gpu_support = True\n", "# run_config.environment.docker.gpu_support = True\n",
"# run_config.environment.docker.base_image = \"rapidsai/rapidsai:cuda9.2-runtime-ubuntu18.04\"\n", "# run_config.environment.docker.base_image = \"rapidsai/rapidsai:cuda9.2-runtime-ubuntu20.04\"\n",
"# # run_config.environment.docker.base_image_registry.address = '<registry_url>' # not required if the base_image is in Docker hub\n", "# # run_config.environment.docker.base_image_registry.address = '<registry_url>' # not required if the base_image is in Docker hub\n",
"# # run_config.environment.docker.base_image_registry.username = '<user_name>' # needed only for private images\n", "# # run_config.environment.docker.base_image_registry.username = '<user_name>' # needed only for private images\n",
"# # run_config.environment.docker.base_image_registry.password = '<password>' # needed only for private images\n", "# # run_config.environment.docker.base_image_registry.password = '<password>' # needed only for private images\n",

View File

@@ -6,7 +6,8 @@ dependencies:
- fairlearn>=0.6.2 - fairlearn>=0.6.2
- joblib - joblib
- liac-arff - liac-arff
- raiwidgets~=0.22.0 - raiwidgets~=0.26.0
- itsdangerous==2.0.1 - itsdangerous==2.0.1
- markupsafe<2.1.0 - markupsafe<2.1.0
- protobuf==3.20.0 - protobuf==3.20.0
- numpy<1.24.0

View File

@@ -6,7 +6,8 @@ dependencies:
- fairlearn>=0.6.2 - fairlearn>=0.6.2
- joblib - joblib
- liac-arff - liac-arff
- raiwidgets~=0.22.0 - raiwidgets~=0.26.0
- itsdangerous==2.0.1 - itsdangerous==2.0.1
- markupsafe<2.1.0 - markupsafe<2.1.0
- protobuf==3.20.0 - protobuf==3.20.0
- numpy<1.24.0

View File

@@ -5,32 +5,21 @@ channels:
- main - main
dependencies: dependencies:
# The python interpreter version. # The python interpreter version.
# Currently Azure ML only supports 3.6.0 and later. # Azure ML only supports 3.7.0 and later.
- pip==20.2.4 - pip==22.3.1
- python>=3.6,<3.9 - python>=3.7,<3.9
- matplotlib==3.2.1
- py-xgboost==1.3.3
- pytorch::pytorch=1.4.0
- conda-forge::fbprophet==0.7.1 - conda-forge::fbprophet==0.7.1
- cudatoolkit=10.1.243 - pandas==1.1.5
- scipy==1.5.3 - scipy==1.5.3
- notebook - Cython==0.29.14
- pywin32==227 - tqdm==4.65.0
- PySocks==1.7.1
- conda-forge::pyqt==5.12.3
- jinja2<=2.11.2
- markupsafe<2.1.0
- tqdm==4.64.1
- jsonschema==4.16.0
- pip: - pip:
# Required packages for AzureML execution, history, and data preparation. # Required packages for AzureML execution, history, and data preparation.
- azureml-widgets~=1.46.0 - azureml-widgets~=1.51.0
- azureml-defaults~=1.46.0 - azureml-defaults~=1.51.0
- pytorch-transformers==1.0.0 - -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.51.0/validated_win32_requirements.txt [--no-deps]
- spacy==2.2.4 - matplotlib==3.6.2
- pystan==2.19.1.1 - xgboost==1.3.3
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz - cmdstanpy==0.9.5
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.46.0/validated_win32_requirements.txt [--no-deps] - setuptools-git==1.2
- arch==4.14
- wasabi==0.9.1

View File

@@ -5,11 +5,9 @@ channels:
- main - main
dependencies: dependencies:
# The python interpreter version. # The python interpreter version.
# Currently Azure ML only supports 3.6.0 and later. # Azure ML only supports 3.7 and later.
- pip==20.2.4 - pip==22.3.1
- python>=3.6,<3.9 - python>=3.7,<3.9
- boto3==1.20.19
- botocore<=1.23.19
- matplotlib==3.2.1 - matplotlib==3.2.1
- numpy>=1.21.6,<=1.22.3 - numpy>=1.21.6,<=1.22.3
- cython==0.29.14 - cython==0.29.14
@@ -19,19 +17,16 @@ dependencies:
- py-xgboost<=1.3.3 - py-xgboost<=1.3.3
- holidays==0.10.3 - holidays==0.10.3
- conda-forge::fbprophet==0.7.1 - conda-forge::fbprophet==0.7.1
- pytorch::pytorch=1.4.0 - pytorch::pytorch=1.11.0
- cudatoolkit=10.1.243 - cudatoolkit=10.1.243
- jinja2<=2.11.2 - notebook
- markupsafe<2.1.0
- jsonschema==4.15.0
- pip: - pip:
# Required packages for AzureML execution, history, and data preparation. # Required packages for AzureML execution, history, and data preparation.
- azureml-widgets~=1.46.0 - azureml-widgets~=1.51.0
- azureml-defaults~=1.46.0 - azureml-defaults~=1.51.0
- pytorch-transformers==1.0.0 - pytorch-transformers==1.0.0
- spacy==2.2.4 - spacy==2.2.4
- pystan==2.19.1.1 - pystan==2.19.1.1
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz - https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.46.0/validated_linux_requirements.txt [--no-deps] - -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.51.0/validated_linux_requirements.txt [--no-deps]
- arch==4.14

View File

@@ -5,12 +5,9 @@ channels:
- main - main
dependencies: dependencies:
# The python interpreter version. # The python interpreter version.
# Currently Azure ML only supports 3.6.0 and later. # Currently Azure ML only supports 3.7 and later.
- pip==20.2.4 - pip==22.3.1
- nomkl - python>=3.7,<3.9
- python>=3.6,<3.9
- boto3==1.20.19
- botocore<=1.23.19
- matplotlib==3.2.1 - matplotlib==3.2.1
- numpy>=1.21.6,<=1.22.3 - numpy>=1.21.6,<=1.22.3
- cython==0.29.14 - cython==0.29.14
@@ -19,20 +16,17 @@ dependencies:
- scikit-learn==0.22.1 - scikit-learn==0.22.1
- py-xgboost<=1.3.3 - py-xgboost<=1.3.3
- holidays==0.10.3 - holidays==0.10.3
- conda-forge::fbprophet==0.7.1 - pytorch::pytorch=1.11.0
- pytorch::pytorch=1.4.0
- cudatoolkit=9.0 - cudatoolkit=9.0
- jinja2<=2.11.2 - notebook
- markupsafe<2.1.0
- jsonschema==4.15.0
- pip: - pip:
# Required packages for AzureML execution, history, and data preparation. # Required packages for AzureML execution, history, and data preparation.
- azureml-widgets~=1.46.0 - azureml-widgets~=1.51.0
- azureml-defaults~=1.46.0 - azureml-defaults~=1.51.0
- pytorch-transformers==1.0.0 - pytorch-transformers==1.0.0
- spacy==2.2.4 - spacy==2.2.4
- pystan==2.19.1.1 - pystan==2.19.1.1
- fbprophet==0.7.1
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz - https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.46.0/validated_darwin_requirements.txt [--no-deps] - -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.51.0/validated_darwin_requirements.txt [--no-deps]
- arch==4.14

View File

@@ -33,6 +33,8 @@ if not errorlevel 1 (
call conda env create -f %automl_env_file% -n %conda_env_name% call conda env create -f %automl_env_file% -n %conda_env_name%
) )
python "%conda_prefix%\scripts\pywin32_postinstall.py" -install
call conda activate %conda_env_name% 2>nul: call conda activate %conda_env_name% 2>nul:
if errorlevel 1 goto ErrorExit if errorlevel 1 goto ErrorExit

View File

@@ -1,4 +1,4 @@
from distutils.version import LooseVersion from setuptools._vendor.packaging import version
import platform import platform
try: try:
@@ -17,7 +17,7 @@ if architecture != "64bit":
minimumVersion = "4.7.8" minimumVersion = "4.7.8"
versionInvalid = (LooseVersion(conda.__version__) < LooseVersion(minimumVersion)) versionInvalid = (version.parse(conda.__version__) < version.parse(minimumVersion))
if versionInvalid: if versionInvalid:
print('Setup requires conda version ' + minimumVersion + ' or higher.') print('Setup requires conda version ' + minimumVersion + ' or higher.')

View File

@@ -712,7 +712,9 @@
"from azureml.core.model import Model\n", "from azureml.core.model import Model\n",
"from azureml.core.environment import Environment\n", "from azureml.core.environment import Environment\n",
"\n", "\n",
"inference_config = InferenceConfig(entry_script=script_file_name)\n", "inference_config = InferenceConfig(\n",
" environment=best_run.get_environment(), entry_script=script_file_name\n",
")\n",
"\n", "\n",
"aciconfig = AciWebservice.deploy_configuration(\n", "aciconfig = AciWebservice.deploy_configuration(\n",
" cpu_cores=2,\n", " cpu_cores=2,\n",

View File

@@ -97,7 +97,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"print(\"This notebook was created using version 1.46.0 of the Azure ML SDK\")\n", "print(\"This notebook was created using version 1.51.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
] ]
}, },

View File

@@ -97,7 +97,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"print(\"This notebook was created using version 1.46.0 of the Azure ML SDK\")\n", "print(\"This notebook was created using version 1.51.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
] ]
}, },
@@ -454,10 +454,13 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"**Note:** Not all datasets produce a y_transformer. The dataset used in the current notebook requires a transformer as the y column data is categorical." "**Note:** Not all datasets produce a y_transformer. The dataset used in the current notebook requires a transformer as the y column data is categorical. \n",
"\n",
"We will go ahead and download the mlflow transformer model and use it to transform test data that can be used for further experimentation below. To run the commented code, make sure the environment requirement is satisfied. You can go ahead and create the environment from the `conda.yaml` file under `/outputs/featurization/pipeline/` and run the given code in it."
] ]
}, },
{ {
@@ -466,7 +469,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.automl.core.shared.constants import Transformers\n", "''' from azureml.automl.core.shared.constants import Transformers\n",
"\n", "\n",
"transformers = mlflow.sklearn.load_model(uri) # Using method 1\n", "transformers = mlflow.sklearn.load_model(uri) # Using method 1\n",
"data_transformers = transformers.get_transformers()\n", "data_transformers = transformers.get_transformers()\n",
@@ -474,14 +477,15 @@
"y_transformer = data_transformers[Transformers.Y_TRANSFORMER]\n", "y_transformer = data_transformers[Transformers.Y_TRANSFORMER]\n",
"\n", "\n",
"X_test = x_transformer.transform(X_test_data)\n", "X_test = x_transformer.transform(X_test_data)\n",
"y_test = y_transformer.transform(y_test_data)" "y_test = y_transformer.transform(y_test_data) '''"
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"Run the following cell to see the featurization summary of X and y transformers. " "Run the following cell to see the featurization summary of X and y transformers. Uncomment to use. "
] ]
}, },
{ {
@@ -490,10 +494,10 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"X_data_summary = x_transformer.get_featurization_summary(is_user_friendly=False)\n", "''' X_data_summary = x_transformer.get_featurization_summary(is_user_friendly=False)\n",
"\n", "\n",
"summary_df = pd.DataFrame.from_records(X_data_summary)\n", "summary_df = pd.DataFrame.from_records(X_data_summary)\n",
"summary_df" "summary_df '''"
] ]
}, },
{ {
@@ -544,10 +548,11 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"Another way to load the data is to go to the above autofeaturization experiment and check for the featurized dataset ids under `Output datasets`. Uncomment and replace them accordingly below to use." "Another way to load the data is to go to the above autofeaturization experiment and check for the featurized dataset ids under `Output datasets`. Uncomment and replace them accordingly below, to use."
] ]
}, },
{ {
@@ -597,10 +602,20 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"Here we are passing our training data to the lightgbm classifier, any custom model can be used with your data." "Here we are passing our training data to the lightgbm classifier, any custom model can be used with your data. Let us first install lightgbm."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"! pip install lightgbm"
] ]
}, },
{ {
@@ -612,11 +627,27 @@
"import lightgbm as lgb\n", "import lightgbm as lgb\n",
"\n", "\n",
"model = lgb.LGBMClassifier(learning_rate=0.08,max_depth=-5,random_state=42)\n", "model = lgb.LGBMClassifier(learning_rate=0.08,max_depth=-5,random_state=42)\n",
"model.fit(X_train, y_train, sample_weight=sample_weight, eval_set=[(X_test, y_test),(X_train, y_train)],\n", "model.fit(X_train, y_train, sample_weight=sample_weight)"
" verbose=20,eval_metric='logloss')\n", ]
"\n", },
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"Once training is done, the test data obtained after transforming from the above downloaded transformer can be used to calculate the accuracy "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print('Training accuracy {:.4f}'.format(model.score(X_train, y_train)))\n", "print('Training accuracy {:.4f}'.format(model.score(X_train, y_train)))\n",
"print('Testing accuracy {:.4f}'.format(model.score(X_test, y_test)))" "\n",
"# Uncomment below to test the model on test data \n",
"# print('Testing accuracy {:.4f}'.format(model.score(X_test, y_test)))"
] ]
}, },
{ {
@@ -654,45 +685,8 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"y_pred = model.predict(X_test)" "# Uncomment below to test the model on test data\n",
] "# y_pred = model.predict(X_test)"
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Calculate metrics for the prediction\n",
"\n",
"Now visualize the data on a scatter plot to show what our truth (actual) values are compared to the predicted values \n",
"from the trained model that was returned."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from sklearn.metrics import confusion_matrix\n",
"from matplotlib import pyplot as plt\n",
"import numpy as np\n",
"import itertools\n",
"\n",
"cf =confusion_matrix(y_test,y_pred)\n",
"plt.imshow(cf,cmap=plt.cm.Blues,interpolation='nearest')\n",
"plt.colorbar()\n",
"plt.title('Confusion Matrix')\n",
"plt.xlabel('Predicted')\n",
"plt.ylabel('Actual')\n",
"class_labels = ['False','True']\n",
"tick_marks = np.arange(len(class_labels))\n",
"plt.xticks(tick_marks,class_labels)\n",
"plt.yticks([-0.5,0,1,1.5],['','False','True',''])\n",
"# plotting text value inside cells\n",
"thresh = cf.max() / 2.\n",
"for i,j in itertools.product(range(cf.shape[0]),range(cf.shape[1])):\n",
" plt.text(j,i,format(cf[i,j],'d'),horizontalalignment='center',color='white' if cf[i,j] >thresh else 'black')\n",
"plt.show()"
] ]
}, },
{ {

View File

@@ -1,6 +1,7 @@
name: azure_automl_experimental name: azure_automl_experimental
dependencies: dependencies:
# The python interpreter version. # The python interpreter version.
<<<<<<< HEAD
# Currently Azure ML only supports 3.6.0 and later. # Currently Azure ML only supports 3.6.0 and later.
- pip<=20.2.4 - pip<=20.2.4
- python>=3.6.0,<3.10 - python>=3.6.0,<3.10
@@ -10,11 +11,14 @@ dependencies:
- numpy==1.22.3 - numpy==1.22.3
- pywin32==227 - pywin32==227
- cryptography<37.0.0 - cryptography<37.0.0
=======
# Currently Azure ML only supports 3.7.0 and later.
- pip<=22.3.1
- python>=3.7.0,<3.11
>>>>>>> 4671acd451ce979c3cebcd3917804861a333b710
- pip: - pip:
# Required packages for AzureML execution, history, and data preparation. # Required packages for AzureML execution, history, and data preparation.
- azure-core==1.24.1
- azure-identity==1.7.0
- azureml-defaults - azureml-defaults
- azureml-sdk - azureml-sdk
- azureml-widgets - azureml-widgets

View File

@@ -4,14 +4,13 @@ channels:
- main - main
dependencies: dependencies:
# The python interpreter version. # The python interpreter version.
# Currently Azure ML only supports 3.6.0 and later. # Currently Azure ML only supports 3.7.0 and later.
- pip<=20.2.4 - pip<=20.2.4
- nomkl - nomkl
- python>=3.6.0,<3.10 - python>=3.7.0,<3.11
- urllib3==1.26.7 - urllib3==1.26.7
- PyJWT < 2.0.0 - PyJWT < 2.0.0
- numpy>=1.21.6,<=1.22.3 - numpy>=1.21.6,<=1.22.3
- cryptography<37.0.0
- pip: - pip:
# Required packages for AzureML execution, history, and data preparation. # Required packages for AzureML execution, history, and data preparation.

View File

@@ -92,7 +92,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"print(\"This notebook was created using version 1.46.0 of the Azure ML SDK\")\n", "print(\"This notebook was created using version 1.51.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
] ]
}, },

View File

@@ -91,7 +91,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"print(\"This notebook was created using version 1.46.0 of the Azure ML SDK\")\n", "print(\"This notebook was created using version 1.51.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
] ]
}, },

View File

@@ -122,7 +122,10 @@ def calculate_scores_and_build_plots(
input_dir: str, output_dir: str, automl_settings: Dict[str, Any] input_dir: str, output_dir: str, automl_settings: Dict[str, Any]
): ):
os.makedirs(output_dir, exist_ok=True) os.makedirs(output_dir, exist_ok=True)
grains = automl_settings.get(constants.TimeSeries.TIME_SERIES_ID_COLUMN_NAMES) grains = automl_settings.get(
constants.TimeSeries.TIME_SERIES_ID_COLUMN_NAMES,
automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES, None),
)
time_column_name = automl_settings.get(constants.TimeSeries.TIME_COLUMN_NAME) time_column_name = automl_settings.get(constants.TimeSeries.TIME_COLUMN_NAME)
if grains is None: if grains is None:
grains = [] grains = []

View File

@@ -33,6 +33,7 @@
"For this notebook we are using a synthetic dataset to demonstrate the back testing in many model scenario. This allows us to check historical performance of AutoML on a historical data. To do that we step back on the backtesting period by the data set several times and split the data to train and test sets. Then these data sets are used for training and evaluation of model.<br>\n", "For this notebook we are using a synthetic dataset to demonstrate the back testing in many model scenario. This allows us to check historical performance of AutoML on a historical data. To do that we step back on the backtesting period by the data set several times and split the data to train and test sets. Then these data sets are used for training and evaluation of model.<br>\n",
"\n", "\n",
"Thus, it is a quick way of evaluating AutoML as if it was in production. Here, we do not test historical performance of a particular model, for this see the [notebook](../forecasting-backtest-single-model/auto-ml-forecasting-backtest-single-model.ipynb). Instead, the best model for every backtest iteration can be different since AutoML chooses the best model for a given training set.\n", "Thus, it is a quick way of evaluating AutoML as if it was in production. Here, we do not test historical performance of a particular model, for this see the [notebook](../forecasting-backtest-single-model/auto-ml-forecasting-backtest-single-model.ipynb). Instead, the best model for every backtest iteration can be different since AutoML chooses the best model for a given training set.\n",
"\n",
"![Backtesting](Backtesting.png)\n", "![Backtesting](Backtesting.png)\n",
"\n", "\n",
"**NOTE: There are limits on how many runs we can do in parallel per workspace, and we currently recommend to set the parallelism to maximum of 320 runs per experiment per workspace. If users want to have more parallelism and increase this limit they might encounter Too Many Requests errors (HTTP 429).**" "**NOTE: There are limits on how many runs we can do in parallel per workspace, and we currently recommend to set the parallelism to maximum of 320 runs per experiment per workspace. If users want to have more parallelism and increase this limit they might encounter Too Many Requests errors (HTTP 429).**"
@@ -43,7 +44,7 @@
"metadata": {}, "metadata": {},
"source": [ "source": [
"### Prerequisites\n", "### Prerequisites\n",
"You'll need to create a compute Instance by following the instructions in the [EnvironmentSetup.md](../Setup_Resources/EnvironmentSetup.md)." "You'll need to create a compute Instance by following [these](https://learn.microsoft.com/en-us/azure/machine-learning/v1/how-to-create-manage-compute-instance?tabs=python) instructions."
] ]
}, },
{ {
@@ -313,22 +314,37 @@
"source": [ "source": [
"### Set up training parameters\n", "### Set up training parameters\n",
"\n", "\n",
"This dictionary defines the AutoML and many models settings. For this forecasting task we need to define several settings including the name of the time column, the maximum forecast horizon, and the partition column name definition. Please note, that in this case we are setting grain_column_names to be the time series ID column plus iteration, because we want to train a separate model for each time series and iteration.\n", "We need to provide ``ForecastingParameters``, ``AutoMLConfig`` and ``ManyModelsTrainParameters`` objects. For the forecasting task we also need to define several settings including the name of the time column, the maximum forecast horizon, and the partition column name(s) definition.\n",
"\n", "\n",
"#### ``ForecastingParameters`` arguments\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **forecast_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n",
"| **time_column_name** | The name of your time column. |\n",
"| **time_series_id_column_names** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |\n",
"| **cv_step_size** | Number of periods between two consecutive cross-validation folds. The default value is \\\"auto\\\", in which case AutoMl determines the cross-validation step size automatically, if a validation set is not provided. Or users could specify an integer value. |\n",
"\n",
"#### ``AutoMLConfig`` arguments\n",
"| Property | Description|\n", "| Property | Description|\n",
"| :--------------- | :------------------- |\n", "| :--------------- | :------------------- |\n",
"| **task** | forecasting |\n", "| **task** | forecasting |\n",
"| **primary_metric** | This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>normalized_root_mean_squared_error</i><br><i>normalized_mean_absolute_error</i> |\n", "| **primary_metric** | This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i> |\n",
"| **blocked_models** | Blocked models won't be used by AutoML. |\n",
"| **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. |\n", "| **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. |\n",
"| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n", "| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n",
"| **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. |\n", "| **experiment_timeout_hours** | Maximum amount of time in hours that each experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. **It does not control the overall timeout for the pipeline run, instead controls the timeout for each training run per partitioned time series.** |\n",
"| **label_column_name** | The name of the label column. |\n", "| **label_column_name** | The name of the label column. |\n",
"| **forecast_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n", "| **n_cross_validations** | Number of cross validation splits. The default value is \\\"auto\\\", in which case AutoMl determines the number of cross-validations automatically, if a validation set is not provided. Or users could specify an integer value. Rolling Origin Validation is used to split time-series in a temporally consistent way. |\n",
"| **n_cross_validations** | Number of cross validation splits. The default value is \"auto\", in which case AutoMl determines the number of cross-validations automatically, if a validation set is not provided. Or users could specify an integer value. Rolling Origin Validation is used to split time-series in a temporally consistent way. |\n", "| **enable_early_stopping** | Flag to enable early termination if the primary metric is no longer improving. |\n",
"|**cv_step_size**|Number of periods between two consecutive cross-validation folds. The default value is \"auto\", in which case AutoMl determines the cross-validation step size automatically, if a validation set is not provided. Or users could specify an integer value.\n", "| **enable_engineered_explanations** | Engineered feature explanations will be downloaded if enable_engineered_explanations flag is set to True. By default it is set to False to save storage space. |\n",
"| **time_column_name** | The name of your time column. |\n",
"| **time_series_id_column_names** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |\n",
"| **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). |\n", "| **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). |\n",
"| **pipeline_fetch_max_batch_size** | Determines how many pipelines (training algorithms) to fetch at a time for training, this helps reduce throttling when training at large scale. |\n",
"\n",
"\n",
"#### ``ManyModelsTrainParameters`` arguments\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **automl_settings** | The ``AutoMLConfig`` object defined above. |\n",
"| **partition_column_names** | The names of columns used to group your models. For timeseries, the groups must not split up individual time-series. That is, each group must contain one or more whole time-series. |" "| **partition_column_names** | The names of columns used to group your models. For timeseries, the groups must not split up individual time-series. That is, each group must contain one or more whole time-series. |"
] ]
}, },
@@ -345,22 +361,30 @@
"from azureml.train.automl.runtime._many_models.many_models_parameters import (\n", "from azureml.train.automl.runtime._many_models.many_models_parameters import (\n",
" ManyModelsTrainParameters,\n", " ManyModelsTrainParameters,\n",
")\n", ")\n",
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
"from azureml.train.automl.automlconfig import AutoMLConfig\n",
"\n", "\n",
"partition_column_names = [TIME_SERIES_ID_COLNAME, \"backtest_iteration\"]\n", "partition_column_names = [TIME_SERIES_ID_COLNAME, \"backtest_iteration\"]\n",
"automl_settings = {\n", "\n",
" \"task\": \"forecasting\",\n", "forecasting_parameters = ForecastingParameters(\n",
" \"primary_metric\": \"normalized_root_mean_squared_error\",\n", " time_column_name=TIME_COLNAME,\n",
" \"iteration_timeout_minutes\": 10, # This needs to be changed based on the dataset. We ask customer to explore how long training is taking before settings this value\n", " forecast_horizon=6,\n",
" \"iterations\": 15,\n", " time_series_id_column_names=partition_column_names,\n",
" \"experiment_timeout_hours\": 0.25, # This also needs to be changed based on the dataset. For larger data set this number needs to be bigger.\n", " cv_step_size=\"auto\",\n",
" \"label_column_name\": TARGET_COLNAME,\n", ")\n",
" \"n_cross_validations\": \"auto\", # Feel free to set to a small integer (>=2) if runtime is an issue.\n", "\n",
" \"cv_step_size\": \"auto\",\n", "automl_settings = AutoMLConfig(\n",
" \"time_column_name\": TIME_COLNAME,\n", " task=\"forecasting\",\n",
" \"forecast_horizon\": 6,\n", " primary_metric=\"normalized_root_mean_squared_error\",\n",
" \"time_series_id_column_names\": partition_column_names,\n", " iteration_timeout_minutes=10,\n",
" \"track_child_runs\": False,\n", " iterations=15,\n",
"}\n", " experiment_timeout_hours=0.25,\n",
" label_column_name=TARGET_COLNAME,\n",
" n_cross_validations=\"auto\", # Feel free to set to a small integer (>=2) if runtime is an issue.\n",
" track_child_runs=False,\n",
" forecasting_parameters=forecasting_parameters,\n",
")\n",
"\n",
"\n", "\n",
"mm_paramters = ManyModelsTrainParameters(\n", "mm_paramters = ManyModelsTrainParameters(\n",
" automl_settings=automl_settings, partition_column_names=partition_column_names\n", " automl_settings=automl_settings, partition_column_names=partition_column_names\n",
@@ -387,8 +411,16 @@
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with 3 and increase the node_count if the training time is taking too long. |\n", "| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with 3 and increase the node_count if the training time is taking too long. |\n",
"| **process_count_per_node** | Process count per node, we recommend 2:1 ratio for number of cores: number of processes per node. eg. If node has 16 cores then configure 8 or less process count per node or optimal performance. |\n", "| **process_count_per_node** | Process count per node, we recommend 2:1 ratio for number of cores: number of processes per node. eg. If node has 16 cores then configure 8 or less process count per node or optimal performance. |\n",
"| **train_pipeline_parameters** | The set of configuration parameters defined in the previous section. |\n", "| **train_pipeline_parameters** | The set of configuration parameters defined in the previous section. |\n",
"| **run_invocation_timeout** | Maximum amount of time in seconds that the ``ParallelRunStep`` class is allowed. This is optional but provides customers with greater control on exit criteria. This must be greater than ``experiment_timeout_hours`` by at least 300 seconds. |\n",
"\n", "\n",
"Calling this method will create a new aggregated dataset which is generated dynamically on pipeline execution." "Calling this method will create a new aggregated dataset which is generated dynamically on pipeline execution.\n",
"\n",
"**Note**: Total time taken for the **training step** in the pipeline to complete = $ \\frac{t}{ p \\times n } \\times ts $\n",
"where,\n",
"- $ t $ is time taken for training one partition (can be viewed in the training logs)\n",
"- $ p $ is ``process_count_per_node``\n",
"- $ n $ is ``node_count``\n",
"- $ ts $ is total number of partitions in time series based on ``partition_column_names``"
] ]
}, },
{ {
@@ -406,7 +438,7 @@
" compute_target=compute_target,\n", " compute_target=compute_target,\n",
" node_count=2,\n", " node_count=2,\n",
" process_count_per_node=2,\n", " process_count_per_node=2,\n",
" run_invocation_timeout=920,\n", " run_invocation_timeout=1200,\n",
" train_pipeline_parameters=mm_paramters,\n", " train_pipeline_parameters=mm_paramters,\n",
")" ")"
] ]
@@ -491,25 +523,31 @@
"source": [ "source": [
"For many models we need to provide the ManyModelsInferenceParameters object.\n", "For many models we need to provide the ManyModelsInferenceParameters object.\n",
"\n", "\n",
"#### ManyModelsInferenceParameters arguments\n", "#### ``ManyModelsInferenceParameters`` arguments\n",
"| Property | Description|\n", "| Property | Description|\n",
"| :--------------- | :------------------- |\n", "| :--------------- | :------------------- |\n",
"| **partition_column_names** | List of column names that identifies groups. |\n", "| **partition_column_names** | List of column names that identifies groups. |\n",
"| **target_column_name** | \\[Optional\\] Column name only if the inference dataset has the target. |\n", "| **target_column_name** | \\[Optional] Column name only if the inference dataset has the target. |\n",
"| **time_column_name** | Column name only if it is timeseries. |\n", "| **time_column_name** | \\[Optional] Time column name only if it is timeseries. |\n",
"| **many_models_run_id** | \\[Optional\\] Many models pipeline run id where models were trained. |\n", "| **inference_type** | \\[Optional] Which inference method to use on the model. Possible values are 'forecast', 'predict_proba', and 'predict'. |\n",
"| **forecast_mode** | \\[Optional] The type of forecast to be used, either 'rolling' or 'recursive'; defaults to 'recursive'. |\n",
"| **step** | \\[Optional] Number of periods to advance the forecasting window in each iteration **(for rolling forecast only)**; defaults to 1. |\n",
"\n", "\n",
"#### get_many_models_batch_inference_steps arguments\n", "#### ``get_many_models_batch_inference_steps`` arguments\n",
"| Property | Description|\n", "| Property | Description|\n",
"| :--------------- | :------------------- |\n", "| :--------------- | :------------------- |\n",
"| **experiment** | The experiment used for inference run. |\n", "| **experiment** | The experiment used for inference run. |\n",
"| **inference_data** | The data to use for inferencing. It should be the same schema as used for training.\n", "| **inference_data** | The data to use for inferencing. It should be the same schema as used for training.\n",
"| **compute_target** | The compute target that runs the inference pipeline.|\n", "| **compute_target** | The compute target that runs the inference pipeline. |\n",
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku). |\n", "| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku). |\n",
"| **process_count_per_node** | The number of processes per node.\n", "| **process_count_per_node** | \\[Optional] The number of processes per node. By default it's 2 (should be at most half of the number of cores in a single node of the compute cluster that will be used for the experiment).\n",
"| **train_run_id** | \\[Optional\\] The run id of the hierarchy training, by default it is the latest successful training many model run in the experiment. |\n", "| **inference_pipeline_parameters** | \\[Optional] The ``ManyModelsInferenceParameters`` object defined above. |\n",
"| **train_experiment_name** | \\[Optional\\] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline. |\n", "| **append_row_file_name** | \\[Optional] The name of the output file (optional, default value is 'parallel_run_step.txt'). Supports 'txt' and 'csv' file extension. A 'txt' file extension generates the output in 'txt' format with space as separator without column names. A 'csv' file extension generates the output in 'csv' format with comma as separator and with column names. |\n",
"| **process_count_per_node** | \\[Optional\\] The number of processes per node, by default it's 4. |" "| **train_run_id** | \\[Optional] The run id of the **training pipeline**. By default it is the latest successful training pipeline run in the experiment. |\n",
"| **train_experiment_name** | \\[Optional] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline. |\n",
"| **run_invocation_timeout** | \\[Optional] Maximum amount of time in seconds that the ``ParallelRunStep`` class is allowed. This is optional but provides customers with greater control on exit criteria. |\n",
"| **output_datastore** | \\[Optional] The ``Datastore`` or ``OutputDatasetConfig`` to be used for output. If specified any pipeline output will be written to that location. If unspecified the default datastore will be used. |\n",
"| **arguments** | \\[Optional] Arguments to be passed to inference script. Possible argument is '--forecast_quantiles' followed by quantile values. |"
] ]
}, },
{ {
@@ -529,6 +567,8 @@
" target_column_name=TARGET_COLNAME,\n", " target_column_name=TARGET_COLNAME,\n",
")\n", ")\n",
"\n", "\n",
"output_file_name = \"parallel_run_step.csv\"\n",
"\n",
"inference_steps = AutoMLPipelineBuilder.get_many_models_batch_inference_steps(\n", "inference_steps = AutoMLPipelineBuilder.get_many_models_batch_inference_steps(\n",
" experiment=experiment,\n", " experiment=experiment,\n",
" inference_data=test_data,\n", " inference_data=test_data,\n",
@@ -540,6 +580,7 @@
" train_run_id=training_run.id,\n", " train_run_id=training_run.id,\n",
" train_experiment_name=training_run.experiment.name,\n", " train_experiment_name=training_run.experiment.name,\n",
" inference_pipeline_parameters=mm_parameters,\n", " inference_pipeline_parameters=mm_parameters,\n",
" append_row_file_name=output_file_name,\n",
")" ")"
] ]
}, },
@@ -587,18 +628,21 @@
"source": [ "source": [
"from azureml.contrib.automl.pipeline.steps.utilities import get_output_from_mm_pipeline\n", "from azureml.contrib.automl.pipeline.steps.utilities import get_output_from_mm_pipeline\n",
"\n", "\n",
"PREDICTION_COLNAME = \"Predictions\"\n",
"forecasting_results_name = \"forecasting_results\"\n", "forecasting_results_name = \"forecasting_results\"\n",
"forecasting_output_name = \"many_models_inference_output\"\n", "forecasting_output_name = \"many_models_inference_output\"\n",
"forecast_file = get_output_from_mm_pipeline(\n", "forecast_file = get_output_from_mm_pipeline(\n",
" inference_run, forecasting_results_name, forecasting_output_name\n", " inference_run, forecasting_results_name, forecasting_output_name, output_file_name\n",
")\n", ")\n",
"df = pd.read_csv(forecast_file, delimiter=\" \", header=None, parse_dates=[0])\n", "df = pd.read_csv(forecast_file, parse_dates=[0])\n",
"df.columns = list(X_train.columns) + [\"predicted_level\"]\n",
"print(\n", "print(\n",
" \"Prediction has \", df.shape[0], \" rows. Here the first 10 rows are being displayed.\"\n", " \"Prediction has \", df.shape[0], \" rows. Here the first 10 rows are being displayed.\"\n",
")\n", ")\n",
"# Save the scv file with header to read it in the next step.\n", "# Save the csv file to read it in the next step.\n",
"df.rename(columns={TARGET_COLNAME: \"actual_level\"}, inplace=True)\n", "df.rename(\n",
" columns={TARGET_COLNAME: \"actual_level\", PREDICTION_COLNAME: \"predicted_level\"},\n",
" inplace=True,\n",
")\n",
"df.to_csv(os.path.join(forecasting_results_name, \"forecast.csv\"), index=False)\n", "df.to_csv(os.path.join(forecasting_results_name, \"forecast.csv\"), index=False)\n",
"df.head(10)" "df.head(10)"
] ]
@@ -622,7 +666,9 @@
"backtesting_results = \"backtesting_mm_results\"\n", "backtesting_results = \"backtesting_mm_results\"\n",
"os.makedirs(backtesting_results, exist_ok=True)\n", "os.makedirs(backtesting_results, exist_ok=True)\n",
"calculate_scores_and_build_plots(\n", "calculate_scores_and_build_plots(\n",
" forecasting_results_name, backtesting_results, automl_settings\n", " forecasting_results_name,\n",
" backtesting_results,\n",
" automl_settings.as_serializable_dict(),\n",
")\n", ")\n",
"pd.DataFrame({\"File\": os.listdir(backtesting_results)})" "pd.DataFrame({\"File\": os.listdir(backtesting_results)})"
] ]

View File

@@ -43,11 +43,20 @@ def init():
global output_dir global output_dir
global automl_settings global automl_settings
global model_uid global model_uid
global forecast_quantiles
logger.info("Initialization of the run.") logger.info("Initialization of the run.")
parser = argparse.ArgumentParser("Parsing input arguments.") parser = argparse.ArgumentParser("Parsing input arguments.")
parser.add_argument("--output-dir", dest="out", required=True) parser.add_argument("--output-dir", dest="out", required=True)
parser.add_argument("--model-name", dest="model", default=None) parser.add_argument("--model-name", dest="model", default=None)
parser.add_argument("--model-uid", dest="model_uid", default=None) parser.add_argument("--model-uid", dest="model_uid", default=None)
parser.add_argument(
"--forecast_quantiles",
nargs="*",
type=float,
help="forecast quantiles list",
default=None,
)
parsed_args, _ = parser.parse_known_args() parsed_args, _ = parser.parse_known_args()
model_name = parsed_args.model model_name = parsed_args.model
@@ -55,6 +64,7 @@ def init():
target_column_name = automl_settings.get("label_column_name") target_column_name = automl_settings.get("label_column_name")
output_dir = parsed_args.out output_dir = parsed_args.out
model_uid = parsed_args.model_uid model_uid = parsed_args.model_uid
forecast_quantiles = parsed_args.forecast_quantiles
os.makedirs(output_dir, exist_ok=True) os.makedirs(output_dir, exist_ok=True)
os.environ["AUTOML_IGNORE_PACKAGE_VERSION_INCOMPATIBILITIES".lower()] = "True" os.environ["AUTOML_IGNORE_PACKAGE_VERSION_INCOMPATIBILITIES".lower()] = "True"
@@ -126,23 +136,18 @@ def run_backtest(data_input_name: str, file_name: str, experiment: Experiment):
) )
print(f"The model {best_run.properties['model_name']} was registered.") print(f"The model {best_run.properties['model_name']} was registered.")
_, x_pred = fitted_model.forecast(X_test) # By default we will have forecast quantiles of 0.5, which is our target
x_pred.reset_index(inplace=True, drop=False) if forecast_quantiles:
columns = [automl_settings[constants.TimeSeries.TIME_COLUMN_NAME]] if 0.5 not in forecast_quantiles:
if automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES): forecast_quantiles.append(0.5)
# We know that fitted_model.grain_column_names is a list. fitted_model.quantiles = forecast_quantiles
columns.extend(fitted_model.grain_column_names)
columns.append(constants.TimeSeriesInternal.DUMMY_TARGET_COLUMN) x_pred = fitted_model.forecast_quantiles(X_test)
# Remove featurized columns.
x_pred = x_pred[columns]
x_pred.rename(
{constants.TimeSeriesInternal.DUMMY_TARGET_COLUMN: "predicted_level"},
axis=1,
inplace=True,
)
x_pred["actual_level"] = y_test x_pred["actual_level"] = y_test
x_pred["backtest_iteration"] = f"iteration_{last_training_date}" x_pred["backtest_iteration"] = f"iteration_{last_training_date}"
x_pred.rename({0.5: "predicted_level"}, axis=1, inplace=True)
date_safe = RE_INVALID_SYMBOLS.sub("_", last_training_date) date_safe = RE_INVALID_SYMBOLS.sub("_", last_training_date)
x_pred.to_csv(os.path.join(output_dir, f"iteration_{date_safe}.csv"), index=False) x_pred.to_csv(os.path.join(output_dir, f"iteration_{date_safe}.csv"), index=False)
return x_pred return x_pred

View File

@@ -365,6 +365,7 @@
" step_size=BACKTESTING_PERIOD,\n", " step_size=BACKTESTING_PERIOD,\n",
" step_number=NUMBER_OF_BACKTESTS,\n", " step_number=NUMBER_OF_BACKTESTS,\n",
" model_uid=model_uid,\n", " model_uid=model_uid,\n",
" forecast_quantiles=[0.025, 0.975], # Optional\n",
")" ")"
] ]
}, },
@@ -590,6 +591,7 @@
" step_size=BACKTESTING_PERIOD,\n", " step_size=BACKTESTING_PERIOD,\n",
" step_number=NUMBER_OF_BACKTESTS,\n", " step_number=NUMBER_OF_BACKTESTS,\n",
" model_name=model_name,\n", " model_name=model_name,\n",
" forecast_quantiles=[0.025, 0.975],\n",
")" ")"
] ]
}, },

View File

@@ -31,6 +31,7 @@ def get_backtest_pipeline(
step_number: int, step_number: int,
model_name: Optional[str] = None, model_name: Optional[str] = None,
model_uid: Optional[str] = None, model_uid: Optional[str] = None,
forecast_quantiles: Optional[list] = None,
) -> Pipeline: ) -> Pipeline:
""" """
:param experiment: The experiment used to run the pipeline. :param experiment: The experiment used to run the pipeline.
@@ -44,6 +45,7 @@ def get_backtest_pipeline(
:param step_size: The number of periods to step back in backtesting. :param step_size: The number of periods to step back in backtesting.
:param step_number: The number of backtesting iterations. :param step_number: The number of backtesting iterations.
:param model_uid: The uid to mark models from this run of the experiment. :param model_uid: The uid to mark models from this run of the experiment.
:param forecast_quantiles: The forecast quantiles that are required in the inference.
:return: The pipeline to be used for model retraining. :return: The pipeline to be used for model retraining.
**Note:** The output will be uploaded in the pipeline output **Note:** The output will be uploaded in the pipeline output
called 'score'. called 'score'.
@@ -135,6 +137,9 @@ def get_backtest_pipeline(
if model_uid is not None: if model_uid is not None:
prs_args.append("--model-uid") prs_args.append("--model-uid")
prs_args.append(model_uid) prs_args.append(model_uid)
if forecast_quantiles:
prs_args.append("--forecast_quantiles")
prs_args.extend(forecast_quantiles)
backtest_prs = ParallelRunStep( backtest_prs = ParallelRunStep(
name=parallel_step_name, name=parallel_step_name,
parallel_run_config=back_test_config, parallel_run_config=back_test_config,

View File

@@ -1,6 +1,7 @@
{ {
"cells": [ "cells": [
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -10,6 +11,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -17,6 +19,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -34,6 +37,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -42,7 +46,7 @@
"\n", "\n",
"AutoML highlights here include built-in holiday featurization, accessing engineered feature names, and working with the `forecast` function. Please also look at the additional forecasting notebooks, which document lagging, rolling windows, forecast quantiles, other ways to use the forecast function, and forecaster deployment.\n", "AutoML highlights here include built-in holiday featurization, accessing engineered feature names, and working with the `forecast` function. Please also look at the additional forecasting notebooks, which document lagging, rolling windows, forecast quantiles, other ways to use the forecast function, and forecaster deployment.\n",
"\n", "\n",
"Make sure you have executed the [configuration notebook](../../../configuration.ipynb) before running this notebook.\n", "Make sure you have executed the [configuration notebook](https://github.com/Azure/MachineLearningNotebooks/blob/master/configuration.ipynb) before running this notebook.\n",
"\n", "\n",
"Notebook synopsis:\n", "Notebook synopsis:\n",
"1. Creating an Experiment in an existing Workspace\n", "1. Creating an Experiment in an existing Workspace\n",
@@ -52,6 +56,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -61,7 +66,11 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {}, "metadata": {
"gather": {
"logged": 1680248038565
}
},
"outputs": [], "outputs": [],
"source": [ "source": [
"import json\n", "import json\n",
@@ -77,6 +86,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -93,6 +103,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -126,6 +137,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -165,30 +177,12 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"## Data\n", "## Data\n",
"\n", "\n",
"The [Machine Learning service workspace](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-workspace) is paired with the storage account, which contains the default data store. We will use it to upload the bike share data and create [tabular dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py) for training. A tabular dataset defines a series of lazily-evaluated, immutable operations to load data from the data source into tabular representation."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"datastore = ws.get_default_datastore()\n",
"datastore.upload_files(\n",
" files=[\"./bike-no.csv\"], target_path=\"dataset/\", overwrite=True, show_progress=True\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Let's set up what we know about the dataset. \n", "Let's set up what we know about the dataset. \n",
"\n", "\n",
"**Target column** is what we want to forecast.\n", "**Target column** is what we want to forecast.\n",
@@ -207,24 +201,51 @@
] ]
}, },
{ {
"cell_type": "code", "attachments": {},
"execution_count": null, "cell_type": "markdown",
"metadata": {}, "metadata": {
"outputs": [], "nteract": {
"transient": {
"deleting": false
}
}
},
"source": [ "source": [
"dataset = Dataset.Tabular.from_delimited_files(\n", "You are now ready to load the historical bike share data. We will load the CSV file into a plain pandas DataFrame."
" path=[(datastore, \"dataset/bike-no.csv\")]\n",
").with_timestamp_columns(fine_grain_timestamp=time_column_name)\n",
"\n",
"# Drop the columns 'casual' and 'registered' as these columns are a breakdown of the total and therefore a leak.\n",
"dataset = dataset.drop_columns(columns=[\"casual\", \"registered\"])\n",
"\n",
"dataset.take(5).to_pandas_dataframe().reset_index(drop=True)"
] ]
}, },
{ {
"cell_type": "code",
"execution_count": null,
"metadata": {
"jupyter": {
"outputs_hidden": false,
"source_hidden": false
},
"nteract": {
"transient": {
"deleting": false
}
}
},
"outputs": [],
"source": [
"all_data = pd.read_csv(\"bike-no.csv\", parse_dates=[time_column_name])\n",
"\n",
"# Drop the columns 'casual' and 'registered' as these columns are a breakdown of the total and therefore a leak.\n",
"all_data.drop([\"casual\", \"registered\"], axis=1, inplace=True)"
]
},
{
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {
"nteract": {
"transient": {
"deleting": false
}
}
},
"source": [ "source": [
"### Split the data\n", "### Split the data\n",
"\n", "\n",
@@ -234,25 +255,68 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {}, "metadata": {
"gather": {
"logged": 1680247376789
},
"jupyter": {
"outputs_hidden": false,
"source_hidden": false
},
"nteract": {
"transient": {
"deleting": false
}
}
},
"outputs": [], "outputs": [],
"source": [ "source": [
"# select data that occurs before a specified date\n", "# select data that occurs before a specified date\n",
"train = dataset.time_before(datetime(2012, 8, 31), include_boundary=True)\n", "train = all_data[all_data[time_column_name] <= pd.Timestamp(\"2012-08-31\")].copy()\n",
"train.to_pandas_dataframe().tail(5).reset_index(drop=True)" "test = all_data[all_data[time_column_name] >= pd.Timestamp(\"2012-09-01\")].copy()"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"### Upload data to datastore\n",
"\n",
"The [Machine Learning service workspace](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-workspace) is paired with the storage account, which contains the default data store. We will use it to upload the bike share data and create [tabular dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py) for training. A tabular dataset defines a series of lazily-evaluated, immutable operations to load data from the data source into tabular representation."
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {}, "metadata": {
"jupyter": {
"outputs_hidden": false,
"source_hidden": false
},
"nteract": {
"transient": {
"deleting": false
}
}
},
"outputs": [], "outputs": [],
"source": [ "source": [
"test = dataset.time_after(datetime(2012, 9, 1), include_boundary=True)\n", "from azureml.data.dataset_factory import TabularDatasetFactory\n",
"test.to_pandas_dataframe().head(5).reset_index(drop=True)" "\n",
"datastore = ws.get_default_datastore()\n",
"\n",
"train_dataset = TabularDatasetFactory.register_pandas_dataframe(\n",
" train, target=(datastore, \"dataset/\"), name=\"bike_no_train\"\n",
")\n",
"\n",
"test_dataset = TabularDatasetFactory.register_pandas_dataframe(\n",
" test, target=(datastore, \"dataset/\"), name=\"bike_no_test\"\n",
")"
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -270,6 +334,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -294,6 +359,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -312,6 +378,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -331,6 +398,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -360,7 +428,7 @@
" featurization=featurization_config,\n", " featurization=featurization_config,\n",
" blocked_models=[\"ExtremeRandomTrees\"],\n", " blocked_models=[\"ExtremeRandomTrees\"],\n",
" experiment_timeout_hours=0.3,\n", " experiment_timeout_hours=0.3,\n",
" training_data=train,\n", " training_data=train_dataset,\n",
" label_column_name=target_column_name,\n", " label_column_name=target_column_name,\n",
" compute_target=compute_target,\n", " compute_target=compute_target,\n",
" enable_early_stopping=True,\n", " enable_early_stopping=True,\n",
@@ -373,6 +441,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -398,6 +467,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -416,6 +486,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -441,6 +512,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -484,6 +556,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -491,6 +564,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -509,6 +583,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -531,6 +606,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -546,7 +622,7 @@
"from run_forecast import run_rolling_forecast\n", "from run_forecast import run_rolling_forecast\n",
"\n", "\n",
"remote_run = run_rolling_forecast(\n", "remote_run = run_rolling_forecast(\n",
" test_experiment, compute_target, best_run, test, target_column_name\n", " test_experiment, compute_target, best_run, test_dataset, target_column_name\n",
")\n", ")\n",
"remote_run" "remote_run"
] ]
@@ -561,6 +637,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -575,7 +652,34 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"remote_run.download_file(\"outputs/predictions.csv\", \"predictions.csv\")\n", "remote_run.download_file(\"outputs/predictions.csv\", \"predictions.csv\")\n",
"df_all = pd.read_csv(\"predictions.csv\")" "fcst_df = pd.read_csv(\"predictions.csv\")"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"Note that the rolling forecast can contain multiple predictions for each date, each from a different forecast origin. For example, consider 2012-09-05:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"fcst_df[fcst_df.date == \"2012-09-05\"]"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"Here, the forecast origin refers to the latest date of actuals available for a given forecast. The earliest origin in the rolling forecast, 2012-08-31, is the last day in the training data. For origin date 2012-09-01, the forecasts use actual recorded counts from the training data *and* the actual count recorded on 2012-09-01. Note that the model is not retrained for origin dates later than 2012-08-31, but the values for model features, such as lagged values of daily count, are updated.\n",
"\n",
"Let's calculate the metrics over all rolling forecasts:"
] ]
}, },
{ {
@@ -587,67 +691,36 @@
"from azureml.automl.core.shared import constants\n", "from azureml.automl.core.shared import constants\n",
"from azureml.automl.runtime.shared.score import scoring\n", "from azureml.automl.runtime.shared.score import scoring\n",
"from sklearn.metrics import mean_absolute_error, mean_squared_error\n", "from sklearn.metrics import mean_absolute_error, mean_squared_error\n",
"from matplotlib import pyplot as plt\n",
"\n", "\n",
"# use automl metrics module\n", "# use automl metrics module\n",
"scores = scoring.score_regression(\n", "scores = scoring.score_regression(\n",
" y_test=df_all[target_column_name],\n", " y_test=fcst_df[target_column_name],\n",
" y_pred=df_all[\"predicted\"],\n", " y_pred=fcst_df[\"predicted\"],\n",
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET),\n", " metrics=list(constants.Metric.SCALAR_REGRESSION_SET),\n",
")\n", ")\n",
"\n", "\n",
"print(\"[Test data scores]\\n\")\n", "print(\"[Test data scores]\\n\")\n",
"for key, value in scores.items():\n", "for key, value in scores.items():\n",
" print(\"{}: {:.3f}\".format(key, value))\n", " print(\"{}: {:.3f}\".format(key, value))"
"\n",
"# Plot outputs\n",
"%matplotlib inline\n",
"test_pred = plt.scatter(df_all[target_column_name], df_all[\"predicted\"], color=\"b\")\n",
"test_test = plt.scatter(\n",
" df_all[target_column_name], df_all[target_column_name], color=\"g\"\n",
")\n",
"plt.legend(\n",
" (test_pred, test_test), (\"prediction\", \"truth\"), loc=\"upper left\", fontsize=8\n",
")\n",
"plt.show()"
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"For more details on what metrics are included and how they are calculated, please refer to [supported metrics](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-understand-automated-ml#regressionforecasting-metrics). You could also calculate residuals, like described [here](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-understand-automated-ml#residuals).\n", "For more details on what metrics are included and how they are calculated, please refer to [supported metrics](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-understand-automated-ml#regressionforecasting-metrics). You could also calculate residuals, like described [here](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-understand-automated-ml#residuals).\n",
"\n", "\n",
"\n", "The rolling forecast metric values are very high in comparison to the validation metrics reported by the AutoML job. What's going on here? We will investigate in the following cells!"
"Since we did a rolling evaluation on the test set, we can analyze the predictions by their forecast horizon relative to the rolling origin. The model was initially trained at a forecast horizon of 14, so each prediction from the model is associated with a horizon value from 1 to 14. The horizon values are in a column named, \"horizon_origin,\" in the prediction set. For example, we can calculate some of the error metrics grouped by the horizon:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from metrics_helper import MAPE, APE\n",
"\n",
"df_all.groupby(\"horizon_origin\").apply(\n",
" lambda df: pd.Series(\n",
" {\n",
" \"MAPE\": MAPE(df[target_column_name], df[\"predicted\"]),\n",
" \"RMSE\": np.sqrt(\n",
" mean_squared_error(df[target_column_name], df[\"predicted\"])\n",
" ),\n",
" \"MAE\": mean_absolute_error(df[target_column_name], df[\"predicted\"]),\n",
" }\n",
" )\n",
")"
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"To drill down more, we can look at the distributions of APE (absolute percentage error) by horizon. From the chart, it is clear that the overall MAPE is being skewed by one particular point where the actual value is of small absolute value." "### Forecast versus actuals plot\n",
"We will plot predictions and actuals on a time series plot. Since there are many forecasts for each date, we select the 14-day-ahead forecast from each forecast origin for our comparison."
] ]
}, },
{ {
@@ -656,21 +729,56 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"df_all_APE = df_all.assign(APE=APE(df_all[target_column_name], df_all[\"predicted\"]))\n", "from matplotlib import pyplot as plt\n",
"APEs = [\n",
" df_all_APE[df_all[\"horizon_origin\"] == h].APE.values\n",
" for h in range(1, forecast_horizon + 1)\n",
"]\n",
"\n", "\n",
"%matplotlib inline\n", "%matplotlib inline\n",
"plt.boxplot(APEs)\n",
"plt.yscale(\"log\")\n",
"plt.xlabel(\"horizon\")\n",
"plt.ylabel(\"APE (%)\")\n",
"plt.title(\"Absolute Percentage Errors by Forecast Horizon\")\n",
"\n", "\n",
"fcst_df_h14 = (\n",
" fcst_df.groupby(\"forecast_origin\", as_index=False)\n",
" .last()\n",
" .drop(columns=[\"forecast_origin\"])\n",
")\n",
"fcst_df_h14.set_index(time_column_name, inplace=True)\n",
"plt.plot(fcst_df_h14[[target_column_name, \"predicted\"]])\n",
"plt.xticks(rotation=45)\n",
"plt.title(f\"Predicted vs. Actuals\")\n",
"plt.legend([\"actual\", \"14-day-ahead forecast\"])\n",
"plt.show()" "plt.show()"
] ]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"Looking at the plot, there are two clear issues:\n",
"1. An anomalously low count value on October 29th, 2012.\n",
"2. End-of-year holidays (Thanksgiving and Christmas) in late November and late December.\n",
"\n",
"What happened on Oct. 29th, 2012? That day, Hurricane Sandy brought severe storm surge flooding to the east coast of the United States, particularly around New York City. This is certainly an anomalous event that the model did not account for!\n",
"\n",
"As for the late year holidays, the model apparently did not learn to account for the full reduction of bike share rentals on these major holidays. The training data covers 2011 and early 2012, so the model fit only had access to a single occurrence of these holidays. This makes it challenging to resolve holiday effects; however, a larger AutoML model search may result in a better model that is more holiday-aware.\n",
"\n",
"If we filter the predictions prior to the Thanksgiving holiday and remove the anomalous day of 2012-10-29, the metrics are closer to validation levels:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"date_filter = (fcst_df.date != \"2012-10-29\") & (fcst_df.date < \"2012-11-22\")\n",
"scores = scoring.score_regression(\n",
" y_test=fcst_df[date_filter][target_column_name],\n",
" y_pred=fcst_df[date_filter][\"predicted\"],\n",
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET),\n",
")\n",
"\n",
"print(\"[Test data scores (filtered)]\\n\")\n",
"for key, value in scores.items():\n",
" print(\"{}: {:.3f}\".format(key, value))"
]
} }
], ],
"metadata": { "metadata": {
@@ -696,6 +804,9 @@
], ],
"friendly_name": "Forecasting BikeShare Demand", "friendly_name": "Forecasting BikeShare Demand",
"index_order": 1, "index_order": 1,
"kernel_info": {
"name": "python38-azureml"
},
"kernelspec": { "kernelspec": {
"display_name": "Python 3.8 - AzureML", "display_name": "Python 3.8 - AzureML",
"language": "python", "language": "python",
@@ -711,11 +822,19 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.8.5" "version": "3.10.9"
},
"microsoft": {
"ms_spell_check": {
"ms_spell_check_language": "en"
}
}, },
"mimetype": "text/x-python", "mimetype": "text/x-python",
"name": "python", "name": "python",
"npconvert_exporter": "python", "npconvert_exporter": "python",
"nteract": {
"version": "nteract-front-end@1.0.0"
},
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"tags": [ "tags": [
"Forecasting" "Forecasting"

View File

@@ -36,18 +36,18 @@ y_test_df = (
fitted_model = joblib.load("model.pkl") fitted_model = joblib.load("model.pkl")
y_pred, X_trans = fitted_model.rolling_evaluation(X_test_df, y_test_df.values) X_rf = fitted_model.rolling_forecast(X_test_df, y_test_df.values, step=1)
# Add predictions, actuals, and horizon relative to rolling origin to the test feature data # Add predictions, actuals, and horizon relative to rolling origin to the test feature data
assign_dict = { assign_dict = {
"horizon_origin": X_trans["horizon_origin"].values, fitted_model.forecast_origin_column_name: "forecast_origin",
"predicted": y_pred, fitted_model.forecast_column_name: "predicted",
target_column_name: y_test_df[target_column_name].values, fitted_model.actual_column_name: target_column_name,
} }
df_all = X_test_df.assign(**assign_dict) X_rf.rename(columns=assign_dict, inplace=True)
file_name = "outputs/predictions.csv" file_name = "outputs/predictions.csv"
export_csv = df_all.to_csv(file_name, header=True) export_csv = X_rf.to_csv(file_name, header=True)
# Upload the predictions into artifacts # Upload the predictions into artifacts
run.upload_file(name=file_name, path_or_stream=file_name) run.upload_file(name=file_name, path_or_stream=file_name)

View File

@@ -2,23 +2,22 @@
"cells": [ "cells": [
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n", "Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n", "\n",
"Licensed under the MIT License." "Licensed under the MIT License."
] ],
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.png)" "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.png)"
] ],
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"# Automated Machine Learning\n", "# Automated Machine Learning\n",
"_**Forecasting using the Energy Demand Dataset**_\n", "_**Forecasting using the Energy Demand Dataset**_\n",
@@ -33,17 +32,17 @@
"Advanced Forecasting\n", "Advanced Forecasting\n",
"1. [Advanced Training](#advanced_training)\n", "1. [Advanced Training](#advanced_training)\n",
"1. [Advanced Results](#advanced_results)" "1. [Advanced Results](#advanced_results)"
] ],
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"# Introduction<a id=\"introduction\"></a>\n", "# Introduction<a id=\"introduction\"></a>\n",
"\n", "\n",
"In this example we use the associated New York City energy demand dataset to showcase how you can use AutoML for a simple forecasting problem and explore the results. The goal is predict the energy demand for the next 48 hours based on historic time-series data.\n", "In this example we use the associated New York City energy demand dataset to showcase how you can use AutoML for a simple forecasting problem and explore the results. The goal is predict the energy demand for the next 48 hours based on historic time-series data.\n",
"\n", "\n",
"If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration notebook](../../../configuration.ipynb) first, if you haven't already, to establish your connection to the AzureML Workspace.\n", "If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration notebook](https://github.com/Azure/MachineLearningNotebooks/blob/master/configuration.ipynb) first, if you haven't already, to establish your connection to the AzureML Workspace.\n",
"\n", "\n",
"In this notebook you will learn how to:\n", "In this notebook you will learn how to:\n",
"1. Creating an Experiment using an existing Workspace\n", "1. Creating an Experiment using an existing Workspace\n",
@@ -53,20 +52,18 @@
"1. Generate the forecast and compute the out-of-sample accuracy metrics\n", "1. Generate the forecast and compute the out-of-sample accuracy metrics\n",
"1. Configuration and remote run of AutoML for a time-series model with lag and rolling window features\n", "1. Configuration and remote run of AutoML for a time-series model with lag and rolling window features\n",
"1. Run and explore the forecast with lagging features" "1. Run and explore the forecast with lagging features"
] ],
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"# Setup<a id=\"setup\"></a>" "# Setup<a id=\"setup\"></a>"
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"import json\n", "import json\n",
"import logging\n", "import logging\n",
@@ -85,36 +82,36 @@
"from azureml.core import Experiment, Workspace, Dataset\n", "from azureml.core import Experiment, Workspace, Dataset\n",
"from azureml.train.automl import AutoMLConfig\n", "from azureml.train.automl import AutoMLConfig\n",
"from datetime import datetime" "from datetime import datetime"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"This notebook is compatible with Azure ML SDK version 1.35.0 or later." "This notebook is compatible with Azure ML SDK version 1.35.0 or later."
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"As part of the setup you have already created an Azure ML `Workspace` object. For Automated ML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments." "As part of the setup you have already created an Azure ML `Workspace` object. For Automated ML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments."
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"ws = Workspace.from_config()\n", "ws = Workspace.from_config()\n",
"\n", "\n",
@@ -136,11 +133,13 @@
"pd.set_option(\"display.max_colwidth\", None)\n", "pd.set_option(\"display.max_colwidth\", None)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n", "outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T" "outputDf.T"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"## Create or Attach existing AmlCompute\n", "## Create or Attach existing AmlCompute\n",
"A compute target is required to execute a remote Automated ML run. \n", "A compute target is required to execute a remote Automated ML run. \n",
@@ -150,13 +149,11 @@
"#### Creation of AmlCompute takes approximately 5 minutes. \n", "#### Creation of AmlCompute takes approximately 5 minutes. \n",
"If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n", "If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota." "As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"from azureml.core.compute import ComputeTarget, AmlCompute\n", "from azureml.core.compute import ComputeTarget, AmlCompute\n",
"from azureml.core.compute_target import ComputeTargetException\n", "from azureml.core.compute_target import ComputeTargetException\n",
@@ -175,22 +172,24 @@
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n", " compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
"\n", "\n",
"compute_target.wait_for_completion(show_output=True)" "compute_target.wait_for_completion(show_output=True)"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"# Data<a id=\"data\"></a>\n", "# Data<a id=\"data\"></a>\n",
"\n", "\n",
"We will use energy consumption [data from New York City](http://mis.nyiso.com/public/P-58Blist.htm) for model training. The data is stored in a tabular format and includes energy demand and basic weather data at an hourly frequency. \n", "We will use energy consumption [data from New York City](http://mis.nyiso.com/public/P-58Blist.htm) for model training. The data is stored in a tabular format and includes energy demand and basic weather data at an hourly frequency. \n",
"\n", "\n",
"With Azure Machine Learning datasets you can keep a single copy of data in your storage, easily access data during model training, share data and collaborate with other users. Below, we will upload the datatset and create a [tabular dataset](https://docs.microsoft.com/bs-latn-ba/azure/machine-learning/service/how-to-create-register-datasets#dataset-types) to be used training and prediction." "With Azure Machine Learning datasets you can keep a single copy of data in your storage, easily access data during model training, share data and collaborate with other users. Below, we will upload the datatset and create a [tabular dataset](https://docs.microsoft.com/bs-latn-ba/azure/machine-learning/service/how-to-create-register-datasets#dataset-types) to be used training and prediction."
] ],
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"Let's set up what we know about the dataset.\n", "Let's set up what we know about the dataset.\n",
"\n", "\n",
@@ -198,86 +197,122 @@
"<b>Time column</b> is the time axis along which to predict.\n", "<b>Time column</b> is the time axis along which to predict.\n",
"\n", "\n",
"The other columns, \"temp\" and \"precip\", are implicitly designated as features." "The other columns, \"temp\" and \"precip\", are implicitly designated as features."
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"target_column_name = \"demand\"\n", "target_column_name = \"demand\"\n",
"time_column_name = \"timeStamp\"" "time_column_name = \"timeStamp\""
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"dataset = Dataset.Tabular.from_delimited_files(\n", "dataset = Dataset.Tabular.from_delimited_files(\n",
" path=\"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/nyc_energy.csv\"\n", " path=\"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/nyc_energy.csv\"\n",
").with_timestamp_columns(fine_grain_timestamp=time_column_name)\n", ").with_timestamp_columns(fine_grain_timestamp=time_column_name)\n",
"dataset.take(5).to_pandas_dataframe().reset_index(drop=True)" "dataset.take(5).to_pandas_dataframe().reset_index(drop=True)"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"The NYC Energy dataset is missing energy demand values for all datetimes later than August 10th, 2017 5AM. Below, we trim the rows containing these missing values from the end of the dataset." "The NYC Energy dataset is missing energy demand values for all datetimes later than August 10th, 2017 5AM. Below, we trim the rows containing these missing values from the end of the dataset."
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"# Cut off the end of the dataset due to large number of nan values\n", "# Cut off the end of the dataset due to large number of nan values\n",
"dataset = dataset.time_before(datetime(2017, 10, 10, 5))" "dataset = dataset.time_before(datetime(2017, 10, 10, 5))"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"## Split the data into train and test sets" "## Split the data into train and test sets"
] ],
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"The first split we make is into train and test sets. Note that we are splitting on time. Data before and including August 8th, 2017 5AM will be used for training, and data after will be used for testing." "The first split we make is into train and test sets. Note that we are splitting on time. Data before and including August 8th, 2017 5AM will be used for training, and data after will be used for testing."
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"# split into train based on time\n", "# split into train based on time\n",
"train = dataset.time_before(datetime(2017, 8, 8, 5), include_boundary=True)\n", "train = (\n",
"train.to_pandas_dataframe().reset_index(drop=True).sort_values(time_column_name).tail(5)" " dataset.time_before(datetime(2017, 8, 8, 5), include_boundary=True)\n",
] " .to_pandas_dataframe()\n",
" .reset_index(drop=True)\n",
")\n",
"train.sort_values(time_column_name).tail(5)"
],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"# split into test based on time\n", "# split into test based on time\n",
"test = dataset.time_between(datetime(2017, 8, 8, 6), datetime(2017, 8, 10, 5))\n", "test = (\n",
"test.to_pandas_dataframe().reset_index(drop=True).head(5)" " dataset.time_between(datetime(2017, 8, 8, 6), datetime(2017, 8, 10, 5))\n",
] " .to_pandas_dataframe()\n",
" .reset_index(drop=True)\n",
")\n",
"test.head(5)"
],
"outputs": [],
"execution_count": null,
"metadata": {}
},
{
"cell_type": "code",
"source": [
"# register the splitted train and test data in workspace storage\n",
"from azureml.data.dataset_factory import TabularDatasetFactory\n",
"\n",
"datastore = ws.get_default_datastore()\n",
"train_dataset = TabularDatasetFactory.register_pandas_dataframe(\n",
" train, target=(datastore, \"dataset/\"), name=\"nyc_energy_train\"\n",
")\n",
"test_dataset = TabularDatasetFactory.register_pandas_dataframe(\n",
" test, target=(datastore, \"dataset/\"), name=\"nyc_energy_test\"\n",
")"
],
"outputs": [],
"execution_count": null,
"metadata": {
"jupyter": {
"source_hidden": false,
"outputs_hidden": false
},
"nteract": {
"transient": {
"deleting": false
}
}
}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"### Setting the maximum forecast horizon\n", "### Setting the maximum forecast horizon\n",
"\n", "\n",
@@ -286,20 +321,20 @@
"Learn more about forecast horizons in our [Auto-train a time-series forecast model](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-auto-train-forecast#configure-and-run-experiment) guide.\n", "Learn more about forecast horizons in our [Auto-train a time-series forecast model](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-auto-train-forecast#configure-and-run-experiment) guide.\n",
"\n", "\n",
"In this example, we set the horizon to 48 hours." "In this example, we set the horizon to 48 hours."
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"forecast_horizon = 48" "forecast_horizon = 48"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"## Forecasting Parameters\n", "## Forecasting Parameters\n",
"To define forecasting parameters for your experiment training, you can leverage the ForecastingParameters class. The table below details the forecasting parameter we will be passing into our experiment.\n", "To define forecasting parameters for your experiment training, you can leverage the ForecastingParameters class. The table below details the forecasting parameter we will be passing into our experiment.\n",
@@ -310,11 +345,11 @@
"|**forecast_horizon**|The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly).|\n", "|**forecast_horizon**|The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly).|\n",
"|**freq**|Forecast frequency. This optional parameter represents the period with which the forecast is desired, for example, daily, weekly, yearly, etc. Use this parameter for the correction of time series containing irregular data points or for padding of short time series. The frequency needs to be a pandas offset alias. Please refer to [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects) for more information.\n", "|**freq**|Forecast frequency. This optional parameter represents the period with which the forecast is desired, for example, daily, weekly, yearly, etc. Use this parameter for the correction of time series containing irregular data points or for padding of short time series. The frequency needs to be a pandas offset alias. Please refer to [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects) for more information.\n",
"|**cv_step_size**|Number of periods between two consecutive cross-validation folds. The default value is \"auto\", in which case AutoMl determines the cross-validation step size automatically, if a validation set is not provided. Or users could specify an integer value." "|**cv_step_size**|Number of periods between two consecutive cross-validation folds. The default value is \"auto\", in which case AutoMl determines the cross-validation step size automatically, if a validation set is not provided. Or users could specify an integer value."
] ],
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"# Train<a id=\"train\"></a>\n", "# Train<a id=\"train\"></a>\n",
"\n", "\n",
@@ -332,20 +367,18 @@
"|**n_cross_validations**|Number of cross-validation folds to use for model/pipeline selection. The default value is \"auto\", in which case AutoMl determines the number of cross-validations automatically, if a validation set is not provided. Or users could specify an integer value.\n", "|**n_cross_validations**|Number of cross-validation folds to use for model/pipeline selection. The default value is \"auto\", in which case AutoMl determines the number of cross-validations automatically, if a validation set is not provided. Or users could specify an integer value.\n",
"|**enable_early_stopping**|Flag to enble early termination if the score is not improving in the short term.|\n", "|**enable_early_stopping**|Flag to enble early termination if the score is not improving in the short term.|\n",
"|**forecasting_parameters**|A class holds all the forecasting related parameters.|\n" "|**forecasting_parameters**|A class holds all the forecasting related parameters.|\n"
] ],
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"This notebook uses the blocked_models parameter to exclude some models that take a longer time to train on this dataset. You can choose to remove models from the blocked_models list but you may need to increase the experiment_timeout_hours parameter value to get results." "This notebook uses the blocked_models parameter to exclude some models that take a longer time to train on this dataset. You can choose to remove models from the blocked_models list but you may need to increase the experiment_timeout_hours parameter value to get results."
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n", "from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
"\n", "\n",
@@ -361,7 +394,7 @@
" primary_metric=\"normalized_root_mean_squared_error\",\n", " primary_metric=\"normalized_root_mean_squared_error\",\n",
" blocked_models=[\"ExtremeRandomTrees\", \"AutoArima\", \"Prophet\"],\n", " blocked_models=[\"ExtremeRandomTrees\", \"AutoArima\", \"Prophet\"],\n",
" experiment_timeout_hours=0.3,\n", " experiment_timeout_hours=0.3,\n",
" training_data=train,\n", " training_data=train_dataset,\n",
" label_column_name=target_column_name,\n", " label_column_name=target_column_name,\n",
" compute_target=compute_target,\n", " compute_target=compute_target,\n",
" enable_early_stopping=True,\n", " enable_early_stopping=True,\n",
@@ -369,65 +402,65 @@
" verbosity=logging.INFO,\n", " verbosity=logging.INFO,\n",
" forecasting_parameters=forecasting_parameters,\n", " forecasting_parameters=forecasting_parameters,\n",
")" ")"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"Call the `submit` method on the experiment object and pass the run configuration. Depending on the data and the number of iterations this can run for a while.\n", "Call the `submit` method on the experiment object and pass the run configuration. Depending on the data and the number of iterations this can run for a while.\n",
"One may specify `show_output = True` to print currently running iterations to the console." "One may specify `show_output = True` to print currently running iterations to the console."
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"remote_run = experiment.submit(automl_config, show_output=False)" "remote_run = experiment.submit(automl_config, show_output=False)"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"remote_run.wait_for_completion()" "remote_run.wait_for_completion()"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"## Retrieve the Best Run details\n", "## Retrieve the Best Run details\n",
"Below we retrieve the best Run object from among all the runs in the experiment." "Below we retrieve the best Run object from among all the runs in the experiment."
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"best_run = remote_run.get_best_child()\n", "best_run = remote_run.get_best_child()\n",
"best_run" "best_run"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"## Featurization\n", "## Featurization\n",
"We can look at the engineered feature names generated in time-series featurization via. the JSON file named 'engineered_feature_names.json' under the run outputs." "We can look at the engineered feature names generated in time-series featurization via. the JSON file named 'engineered_feature_names.json' under the run outputs."
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"# Download the JSON file locally\n", "# Download the JSON file locally\n",
"best_run.download_file(\n", "best_run.download_file(\n",
@@ -437,11 +470,13 @@
" records = json.load(f)\n", " records = json.load(f)\n",
"\n", "\n",
"records" "records"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"### View featurization summary\n", "### View featurization summary\n",
"You can also see what featurization steps were performed on different raw features in the user data. For each raw feature in the user data, the following information is displayed:\n", "You can also see what featurization steps were performed on different raw features in the user data. For each raw feature in the user data, the following information is displayed:\n",
@@ -451,13 +486,11 @@
"+ Type detected\n", "+ Type detected\n",
"+ If feature was dropped\n", "+ If feature was dropped\n",
"+ List of feature transformations for the raw feature" "+ List of feature transformations for the raw feature"
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"# Download the featurization summary JSON file locally\n", "# Download the featurization summary JSON file locally\n",
"best_run.download_file(\n", "best_run.download_file(\n",
@@ -479,41 +512,41 @@
" \"Transformations\",\n", " \"Transformations\",\n",
" ]\n", " ]\n",
"]" "]"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"# Forecasting<a id=\"forecast\"></a>\n", "# Forecasting<a id=\"forecast\"></a>\n",
"\n", "\n",
"Now that we have retrieved the best pipeline/model, it can be used to make predictions on test data. We will do batch scoring on the test dataset which should have the same schema as training dataset.\n", "Now that we have retrieved the best pipeline/model, it can be used to make predictions on test data. We will do batch scoring on the test dataset which should have the same schema as training dataset.\n",
"\n", "\n",
"The inference will run on a remote compute. In this example, it will re-use the training compute." "The inference will run on a remote compute. In this example, it will re-use the training compute."
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"test_experiment = Experiment(ws, experiment_name + \"_inference\")" "test_experiment = Experiment(ws, experiment_name + \"_inference\")"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"### Retrieving forecasts from the model\n", "### Retrieving forecasts from the model\n",
"We have created a function called `run_forecast` that submits the test data to the best model determined during the training run and retrieves forecasts. This function uses a helper script `forecasting_script` which is uploaded and expecuted on the remote compute." "We have created a function called `run_forecast` that submits the test data to the best model determined during the training run and retrieves forecasts. This function uses a helper script `forecasting_script` which is uploaded and expecuted on the remote compute."
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"from run_forecast import run_remote_inference\n", "from run_forecast import run_remote_inference\n",
"\n", "\n",
@@ -521,39 +554,39 @@
" test_experiment=test_experiment,\n", " test_experiment=test_experiment,\n",
" compute_target=compute_target,\n", " compute_target=compute_target,\n",
" train_run=best_run,\n", " train_run=best_run,\n",
" test_dataset=test,\n", " test_dataset=test_dataset,\n",
" target_column_name=target_column_name,\n", " target_column_name=target_column_name,\n",
")\n", ")\n",
"remote_run_infer.wait_for_completion(show_output=False)\n", "remote_run_infer.wait_for_completion(show_output=False)\n",
"\n", "\n",
"# download the inference output file to the local machine\n", "# download the inference output file to the local machine\n",
"remote_run_infer.download_file(\"outputs/predictions.csv\", \"predictions.csv\")" "remote_run_infer.download_file(\"outputs/predictions.csv\", \"predictions.csv\")"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"### Evaluate\n", "### Evaluate\n",
"To evaluate the accuracy of the forecast, we'll compare against the actual sales quantities for some select metrics, included the mean absolute percentage error (MAPE). For more metrics that can be used for evaluation after training, please see [supported metrics](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-understand-automated-ml#regressionforecasting-metrics), and [how to calculate residuals](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-understand-automated-ml#residuals)." "To evaluate the accuracy of the forecast, we'll compare against the actual sales quantities for some select metrics, included the mean absolute percentage error (MAPE). For more metrics that can be used for evaluation after training, please see [supported metrics](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-understand-automated-ml#regressionforecasting-metrics), and [how to calculate residuals](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-understand-automated-ml#residuals)."
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"# load forecast data frame\n", "# load forecast data frame\n",
"fcst_df = pd.read_csv(\"predictions.csv\", parse_dates=[time_column_name])\n", "fcst_df = pd.read_csv(\"predictions.csv\", parse_dates=[time_column_name])\n",
"fcst_df.head()" "fcst_df.head()"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"from azureml.automl.core.shared import constants\n", "from azureml.automl.core.shared import constants\n",
"from azureml.automl.runtime.shared.score import scoring\n", "from azureml.automl.runtime.shared.score import scoring\n",
@@ -580,31 +613,31 @@
" (test_pred, test_test), (\"prediction\", \"truth\"), loc=\"upper left\", fontsize=8\n", " (test_pred, test_test), (\"prediction\", \"truth\"), loc=\"upper left\", fontsize=8\n",
")\n", ")\n",
"plt.show()" "plt.show()"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"# Advanced Training <a id=\"advanced_training\"></a>\n", "# Advanced Training <a id=\"advanced_training\"></a>\n",
"We did not use lags in the previous model specification. In effect, the prediction was the result of a simple regression on date, time series identifier columns and any additional features. This is often a very good prediction as common time series patterns like seasonality and trends can be captured in this manner. Such simple regression is horizon-less: it doesn't matter how far into the future we are predicting, because we are not using past data. In the previous example, the horizon was only used to split the data for cross-validation." "We did not use lags in the previous model specification. In effect, the prediction was the result of a simple regression on date, time series identifier columns and any additional features. This is often a very good prediction as common time series patterns like seasonality and trends can be captured in this manner. Such simple regression is horizon-less: it doesn't matter how far into the future we are predicting, because we are not using past data. In the previous example, the horizon was only used to split the data for cross-validation."
] ],
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"### Using lags and rolling window features\n", "### Using lags and rolling window features\n",
"Now we will configure the target lags, that is the previous values of the target variables, meaning the prediction is no longer horizon-less. We therefore must still specify the `forecast_horizon` that the model will learn to forecast. The `target_lags` keyword specifies how far back we will construct the lags of the target variable, and the `target_rolling_window_size` specifies the size of the rolling window over which we will generate the `max`, `min` and `sum` features.\n", "Now we will configure the target lags, that is the previous values of the target variables, meaning the prediction is no longer horizon-less. We therefore must still specify the `forecast_horizon` that the model will learn to forecast. The `target_lags` keyword specifies how far back we will construct the lags of the target variable, and the `target_rolling_window_size` specifies the size of the rolling window over which we will generate the `max`, `min` and `sum` features.\n",
"\n", "\n",
"This notebook uses the blocked_models parameter to exclude some models that take a longer time to train on this dataset. You can choose to remove models from the blocked_models list but you may need to increase the iteration_timeout_minutes parameter value to get results." "This notebook uses the blocked_models parameter to exclude some models that take a longer time to train on this dataset. You can choose to remove models from the blocked_models list but you may need to increase the iteration_timeout_minutes parameter value to get results."
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"advanced_forecasting_parameters = ForecastingParameters(\n", "advanced_forecasting_parameters = ForecastingParameters(\n",
" time_column_name=time_column_name,\n", " time_column_name=time_column_name,\n",
@@ -627,7 +660,7 @@
" \"Prophet\",\n", " \"Prophet\",\n",
" ], # These models are blocked for tutorial purposes, remove this for real use cases.\n", " ], # These models are blocked for tutorial purposes, remove this for real use cases.\n",
" experiment_timeout_hours=0.3,\n", " experiment_timeout_hours=0.3,\n",
" training_data=train,\n", " training_data=train_dataset,\n",
" label_column_name=target_column_name,\n", " label_column_name=target_column_name,\n",
" compute_target=compute_target,\n", " compute_target=compute_target,\n",
" enable_early_stopping=True,\n", " enable_early_stopping=True,\n",
@@ -635,70 +668,70 @@
" verbosity=logging.INFO,\n", " verbosity=logging.INFO,\n",
" forecasting_parameters=advanced_forecasting_parameters,\n", " forecasting_parameters=advanced_forecasting_parameters,\n",
")" ")"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"We now start a new remote run, this time with lag and rolling window featurization. AutoML applies featurizations in the setup stage, prior to iterating over ML models. The full training set is featurized first, followed by featurization of each of the CV splits. Lag and rolling window features introduce additional complexity, so the run will take longer than in the previous example that lacked these featurizations." "We now start a new remote run, this time with lag and rolling window featurization. AutoML applies featurizations in the setup stage, prior to iterating over ML models. The full training set is featurized first, followed by featurization of each of the CV splits. Lag and rolling window features introduce additional complexity, so the run will take longer than in the previous example that lacked these featurizations."
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"advanced_remote_run = experiment.submit(automl_config, show_output=False)" "advanced_remote_run = experiment.submit(automl_config, show_output=False)"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"advanced_remote_run.wait_for_completion()" "advanced_remote_run.wait_for_completion()"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"### Retrieve the Best Run details" "### Retrieve the Best Run details"
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"best_run_lags = remote_run.get_best_child()\n", "best_run_lags = remote_run.get_best_child()\n",
"best_run_lags" "best_run_lags"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"# Advanced Results<a id=\"advanced_results\"></a>\n", "# Advanced Results<a id=\"advanced_results\"></a>\n",
"We did not use lags in the previous model specification. In effect, the prediction was the result of a simple regression on date, time series identifier columns and any additional features. This is often a very good prediction as common time series patterns like seasonality and trends can be captured in this manner. Such simple regression is horizon-less: it doesn't matter how far into the future we are predicting, because we are not using past data. In the previous example, the horizon was only used to split the data for cross-validation." "We did not use lags in the previous model specification. In effect, the prediction was the result of a simple regression on date, time series identifier columns and any additional features. This is often a very good prediction as common time series patterns like seasonality and trends can be captured in this manner. Such simple regression is horizon-less: it doesn't matter how far into the future we are predicting, because we are not using past data. In the previous example, the horizon was only used to split the data for cross-validation."
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"test_experiment_advanced = Experiment(ws, experiment_name + \"_inference_advanced\")\n", "test_experiment_advanced = Experiment(ws, experiment_name + \"_inference_advanced\")\n",
"advanced_remote_run_infer = run_remote_inference(\n", "advanced_remote_run_infer = run_remote_inference(\n",
" test_experiment=test_experiment_advanced,\n", " test_experiment=test_experiment_advanced,\n",
" compute_target=compute_target,\n", " compute_target=compute_target,\n",
" train_run=best_run_lags,\n", " train_run=best_run_lags,\n",
" test_dataset=test,\n", " test_dataset=test_dataset,\n",
" target_column_name=target_column_name,\n", " target_column_name=target_column_name,\n",
" inference_folder=\"./forecast_advanced\",\n", " inference_folder=\"./forecast_advanced\",\n",
")\n", ")\n",
@@ -708,23 +741,23 @@
"advanced_remote_run_infer.download_file(\n", "advanced_remote_run_infer.download_file(\n",
" \"outputs/predictions.csv\", \"predictions_advanced.csv\"\n", " \"outputs/predictions.csv\", \"predictions_advanced.csv\"\n",
")" ")"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"fcst_adv_df = pd.read_csv(\"predictions_advanced.csv\", parse_dates=[time_column_name])\n", "fcst_adv_df = pd.read_csv(\"predictions_advanced.csv\", parse_dates=[time_column_name])\n",
"fcst_adv_df.head()" "fcst_adv_df.head()"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"from azureml.automl.core.shared import constants\n", "from azureml.automl.core.shared import constants\n",
"from azureml.automl.runtime.shared.score import scoring\n", "from azureml.automl.runtime.shared.score import scoring\n",
@@ -753,7 +786,10 @@
" (test_pred, test_test), (\"prediction\", \"truth\"), loc=\"upper left\", fontsize=8\n", " (test_pred, test_test), (\"prediction\", \"truth\"), loc=\"upper left\", fontsize=8\n",
")\n", ")\n",
"plt.show()" "plt.show()"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
} }
], ],
"metadata": { "metadata": {
@@ -767,26 +803,37 @@
"automated-machine-learning" "automated-machine-learning"
], ],
"kernelspec": { "kernelspec": {
"display_name": "Python 3.8 - AzureML", "name": "python38-azureml",
"language": "python", "language": "python",
"name": "python38-azureml" "display_name": "Python 3.8 - AzureML"
}, },
"language_info": { "language_info": {
"name": "python",
"version": "3.8.5",
"mimetype": "text/x-python",
"codemirror_mode": { "codemirror_mode": {
"name": "ipython", "name": "ipython",
"version": 3 "version": 3
}, },
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.8.5" "nbconvert_exporter": "python",
"file_extension": ".py"
}, },
"vscode": { "vscode": {
"interpreter": { "interpreter": {
"hash": "6bd77c88278e012ef31757c15997a7bea8c943977c43d6909403c00ae11d43ca" "hash": "6bd77c88278e012ef31757c15997a7bea8c943977c43d6909403c00ae11d43ca"
} }
},
"microsoft": {
"ms_spell_check": {
"ms_spell_check_language": "en"
}
},
"kernel_info": {
"name": "python3"
},
"nteract": {
"version": "nteract-front-end@1.0.0"
} }
}, },
"nbformat": 4, "nbformat": 4,

View File

@@ -52,7 +52,7 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"Please make sure you have followed the `configuration.ipynb` notebook so that your ML workspace information is saved in the config file." "Please make sure you have followed the [configuration notebook](https://github.com/Azure/MachineLearningNotebooks/blob/master/configuration.ipynb) so that your ML workspace information is saved in the config file."
] ]
}, },
{ {
@@ -758,7 +758,15 @@
"metadata": {}, "metadata": {},
"source": [ "source": [
"## Forecasting farther than the forecast horizon <a id=\"recursive forecasting\"></a>\n", "## Forecasting farther than the forecast horizon <a id=\"recursive forecasting\"></a>\n",
"When the forecast destination, or the latest date in the prediction data frame, is farther into the future than the specified forecast horizon, the `forecast()` function will still make point predictions out to the later date using a recursive operation mode. Internally, the method recursively applies the regular forecaster to generate context so that we can forecast further into the future. \n", "When the forecast destination, or the latest date in the prediction data frame, is farther into the future than the specified forecast horizon, the forecaster must be iteratively applied. Here, we advance the forecast origin on each iteration over the prediction window, predicting `max_horizon` periods ahead on each iteration. There are two choices for the context data to use as the forecaster advances into the prediction window:\n",
"\n",
"1. We can use forecasted values from previous iterations (recursive forecast),\n",
"2. We can use known, actual values of the target if they are available (rolling forecast).\n",
"\n",
"The first method is useful in a true forecasting scenario when we do not yet know the actual target values while the second is useful in an evaluation scenario where we want to compute accuracy metrics for the `max_horizon`-period-ahead forecaster over a long test set. We refer to the first as a **recursive forecast** since we apply the forecaster recursively over the prediction window and the second as a **rolling forecast** since we roll forward over known actuals.\n",
"\n",
"### Recursive forecasting\n",
"By default, the `forecast()` function will make point predictions out to the later date using a recursive operation mode. Internally, the method recursively applies the regular forecaster to generate context so that we can forecast further into the future. \n",
"\n", "\n",
"To illustrate the use-case and operation of recursive forecasting, we'll consider an example with a single time-series where the forecasting period directly follows the training period and is twice as long as the forecasting horizon given at training time.\n", "To illustrate the use-case and operation of recursive forecasting, we'll consider an example with a single time-series where the forecasting period directly follows the training period and is twice as long as the forecasting horizon given at training time.\n",
"\n", "\n",
@@ -818,6 +826,35 @@
"np.array_equal(y_pred_all, y_pred_long)" "np.array_equal(y_pred_all, y_pred_long)"
] ]
}, },
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Rolling forecasts\n",
"A rolling forecast is a similar concept to the recursive forecasts described above except that we use known actual values of the target for our context data. We have provided a different, public method for this called `rolling_forecast`. In addition to test data and actuals (`X_test` and `y_test`), `rolling_forecast` also accepts an optional `step` parameter that controls how far the origin advances on each iteration. The recursive forecast mode uses a fixed step of `max_horizon` while `rolling_forecast` defaults to a step size of 1, but can be set to any integer from 1 to `max_horizon`, inclusive.\n",
"\n",
"Let's see what the rolling forecast looks like on the long test set with the step set to 1:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"X_rf = fitted_model.rolling_forecast(X_test_long, y_test_long, step=1)\n",
"X_rf.head(n=12)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Notice that `rolling_forecast` has returned a single DataFrame containing all results and has generated some new columns: `_automl_forecast_origin`, `_automl_forecast_y`, and `_automl_actual_y`. These are the origin date for each forecast, the forecasted value and the actual value, respectively. Note that \"y\" in the forecast and actual column names will generally be replaced by the target column name supplied to AutoML.\n",
"\n",
"The output above shows forecasts for two prediction windows, the first with origin at the end of the training set and the second including the first observation in the test set (2000-01-01 06:00:00). Since the forecast windows overlap, there are multiple forecasts for most dates which are associated with different origin dates."
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@@ -880,7 +917,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.8.5" "version": "3.7.13"
}, },
"tags": [ "tags": [
"Forecasting", "Forecasting",
@@ -894,5 +931,5 @@
} }
}, },
"nbformat": 4, "nbformat": 4,
"nbformat_minor": 2 "nbformat_minor": 4
} }

View File

@@ -52,7 +52,7 @@
"\n", "\n",
"AutoML highlights here include using Deep Learning forecasts, Arima, Prophet, Remote Execution and Remote Inferencing, and working with the `forecast` function. Please also look at the additional forecasting notebooks, which document lagging, rolling windows, forecast quantiles, other ways to use the forecast function, and forecaster deployment.\n", "AutoML highlights here include using Deep Learning forecasts, Arima, Prophet, Remote Execution and Remote Inferencing, and working with the `forecast` function. Please also look at the additional forecasting notebooks, which document lagging, rolling windows, forecast quantiles, other ways to use the forecast function, and forecaster deployment.\n",
"\n", "\n",
"Make sure you have executed the [configuration](../../../configuration.ipynb) before running this notebook.\n", "Make sure you have executed the [configuration](https://github.com/Azure/MachineLearningNotebooks/blob/master/configuration.ipynb) before running this notebook.\n",
"\n", "\n",
"Notebook synopsis:\n", "Notebook synopsis:\n",
"\n", "\n",
@@ -325,7 +325,7 @@
"source": [ "source": [
"### Setting forecaster maximum horizon \n", "### Setting forecaster maximum horizon \n",
"\n", "\n",
"The forecast horizon is the number of periods into the future that the model should predict. Here, we set the horizon to 12 periods (i.e. 12 months). Notice that this is much shorter than the number of months in the test set; we will need to use a rolling test to evaluate the performance on the whole test set. For more discussion of forecast horizons and guiding principles for setting them, please see the [energy demand notebook](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand). " "The forecast horizon is the number of periods into the future that the model should predict. Here, we set the horizon to 14 periods (i.e. 14 days). Notice that this is much shorter than the number of months in the test set; we will need to use a rolling test to evaluate the performance on the whole test set. For more discussion of forecast horizons and guiding principles for setting them, please see the [energy demand notebook](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand). "
] ]
}, },
{ {
@@ -337,7 +337,7 @@
}, },
"outputs": [], "outputs": [],
"source": [ "source": [
"forecast_horizon = 12" "forecast_horizon = 14"
] ]
}, },
{ {
@@ -699,5 +699,5 @@
} }
}, },
"nbformat": 4, "nbformat": 4,
"nbformat_minor": 2 "nbformat_minor": 4
} }

View File

@@ -4,7 +4,6 @@ import os
import numpy as np import numpy as np
import pandas as pd import pandas as pd
from pandas.tseries.frequencies import to_offset
from sklearn.externals import joblib from sklearn.externals import joblib
from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.metrics import mean_absolute_error, mean_squared_error
@@ -19,219 +18,8 @@ except ImportError:
_torch_present = False _torch_present = False
def align_outputs( def map_location_cuda(storage, loc):
y_predicted, return storage.cuda()
X_trans,
X_test,
y_test,
predicted_column_name="predicted",
horizon_colname="horizon_origin",
):
"""
Demonstrates how to get the output aligned to the inputs
using pandas indexes. Helps understand what happened if
the output's shape differs from the input shape, or if
the data got re-sorted by time and grain during forecasting.
Typical causes of misalignment are:
* we predicted some periods that were missing in actuals -> drop from eval
* model was asked to predict past max_horizon -> increase max horizon
* data at start of X_test was needed for lags -> provide previous periods
"""
if horizon_colname in X_trans:
df_fcst = pd.DataFrame(
{
predicted_column_name: y_predicted,
horizon_colname: X_trans[horizon_colname],
}
)
else:
df_fcst = pd.DataFrame({predicted_column_name: y_predicted})
# y and X outputs are aligned by forecast() function contract
df_fcst.index = X_trans.index
# align original X_test to y_test
X_test_full = X_test.copy()
X_test_full[target_column_name] = y_test
# X_test_full's index does not include origin, so reset for merge
df_fcst.reset_index(inplace=True)
X_test_full = X_test_full.reset_index().drop(columns="index")
together = df_fcst.merge(X_test_full, how="right")
# drop rows where prediction or actuals are nan
# happens because of missing actuals
# or at edges of time due to lags/rolling windows
clean = together[
together[[target_column_name, predicted_column_name]].notnull().all(axis=1)
]
return clean
def do_rolling_forecast_with_lookback(
fitted_model, X_test, y_test, max_horizon, X_lookback, y_lookback, freq="D"
):
"""
Produce forecasts on a rolling origin over the given test set.
Each iteration makes a forecast for the next 'max_horizon' periods
with respect to the current origin, then advances the origin by the
horizon time duration. The prediction context for each forecast is set so
that the forecaster uses the actual target values prior to the current
origin time for constructing lag features.
This function returns a concatenated DataFrame of rolling forecasts.
"""
print("Using lookback of size: ", y_lookback.size)
df_list = []
origin_time = X_test[time_column_name].min()
X = X_lookback.append(X_test)
y = np.concatenate((y_lookback, y_test), axis=0)
while origin_time <= X_test[time_column_name].max():
# Set the horizon time - end date of the forecast
horizon_time = origin_time + max_horizon * to_offset(freq)
# Extract test data from an expanding window up-to the horizon
expand_wind = X[time_column_name] < horizon_time
X_test_expand = X[expand_wind]
y_query_expand = np.zeros(len(X_test_expand)).astype(float)
y_query_expand.fill(np.NaN)
if origin_time != X[time_column_name].min():
# Set the context by including actuals up-to the origin time
test_context_expand_wind = X[time_column_name] < origin_time
context_expand_wind = X_test_expand[time_column_name] < origin_time
y_query_expand[context_expand_wind] = y[test_context_expand_wind]
# Print some debug info
print(
"Horizon_time:",
horizon_time,
" origin_time: ",
origin_time,
" max_horizon: ",
max_horizon,
" freq: ",
freq,
)
print("expand_wind: ", expand_wind)
print("y_query_expand")
print(y_query_expand)
print("X_test")
print(X)
print("X_test_expand")
print(X_test_expand)
print("Type of X_test_expand: ", type(X_test_expand))
print("Type of y_query_expand: ", type(y_query_expand))
print("y_query_expand")
print(y_query_expand)
# Make a forecast out to the maximum horizon
# y_fcst, X_trans = y_query_expand, X_test_expand
y_fcst, X_trans = fitted_model.forecast(X_test_expand, y_query_expand)
print("y_fcst")
print(y_fcst)
# Align forecast with test set for dates within
# the current rolling window
trans_tindex = X_trans.index.get_level_values(time_column_name)
trans_roll_wind = (trans_tindex >= origin_time) & (trans_tindex < horizon_time)
test_roll_wind = expand_wind & (X[time_column_name] >= origin_time)
df_list.append(
align_outputs(
y_fcst[trans_roll_wind],
X_trans[trans_roll_wind],
X[test_roll_wind],
y[test_roll_wind],
)
)
# Advance the origin time
origin_time = horizon_time
return pd.concat(df_list, ignore_index=True)
def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq="D"):
"""
Produce forecasts on a rolling origin over the given test set.
Each iteration makes a forecast for the next 'max_horizon' periods
with respect to the current origin, then advances the origin by the
horizon time duration. The prediction context for each forecast is set so
that the forecaster uses the actual target values prior to the current
origin time for constructing lag features.
This function returns a concatenated DataFrame of rolling forecasts.
"""
df_list = []
origin_time = X_test[time_column_name].min()
while origin_time <= X_test[time_column_name].max():
# Set the horizon time - end date of the forecast
horizon_time = origin_time + max_horizon * to_offset(freq)
# Extract test data from an expanding window up-to the horizon
expand_wind = X_test[time_column_name] < horizon_time
X_test_expand = X_test[expand_wind]
y_query_expand = np.zeros(len(X_test_expand)).astype(float)
y_query_expand.fill(np.NaN)
if origin_time != X_test[time_column_name].min():
# Set the context by including actuals up-to the origin time
test_context_expand_wind = X_test[time_column_name] < origin_time
context_expand_wind = X_test_expand[time_column_name] < origin_time
y_query_expand[context_expand_wind] = y_test[test_context_expand_wind]
# Print some debug info
print(
"Horizon_time:",
horizon_time,
" origin_time: ",
origin_time,
" max_horizon: ",
max_horizon,
" freq: ",
freq,
)
print("expand_wind: ", expand_wind)
print("y_query_expand")
print(y_query_expand)
print("X_test")
print(X_test)
print("X_test_expand")
print(X_test_expand)
print("Type of X_test_expand: ", type(X_test_expand))
print("Type of y_query_expand: ", type(y_query_expand))
print("y_query_expand")
print(y_query_expand)
# Make a forecast out to the maximum horizon
y_fcst, X_trans = fitted_model.forecast(X_test_expand, y_query_expand)
print("y_fcst")
print(y_fcst)
# Align forecast with test set for dates within the
# current rolling window
trans_tindex = X_trans.index.get_level_values(time_column_name)
trans_roll_wind = (trans_tindex >= origin_time) & (trans_tindex < horizon_time)
test_roll_wind = expand_wind & (X_test[time_column_name] >= origin_time)
df_list.append(
align_outputs(
y_fcst[trans_roll_wind],
X_trans[trans_roll_wind],
X_test[test_roll_wind],
y_test[test_roll_wind],
)
)
# Advance the origin time
origin_time = horizon_time
return pd.concat(df_list, ignore_index=True)
def APE(actual, pred): def APE(actual, pred):
@@ -254,10 +42,6 @@ def MAPE(actual, pred):
return np.mean(APE(actual_safe, pred_safe)) return np.mean(APE(actual_safe, pred_safe))
def map_location_cuda(storage, loc):
return storage.cuda()
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument( parser.add_argument(
"--max_horizon", "--max_horizon",
@@ -303,7 +87,6 @@ print(model_path)
run = Run.get_context() run = Run.get_context()
# get input dataset by name # get input dataset by name
test_dataset = run.input_datasets["test_data"] test_dataset = run.input_datasets["test_data"]
lookback_dataset = run.input_datasets["lookback_data"]
grain_column_names = [] grain_column_names = []
@@ -312,15 +95,8 @@ df = test_dataset.to_pandas_dataframe()
print("Read df") print("Read df")
print(df) print(df)
X_test_df = test_dataset.drop_columns(columns=[target_column_name]) X_test_df = df
y_test_df = test_dataset.with_timestamp_columns(None).keep_columns( y_test = df.pop(target_column_name).to_numpy()
columns=[target_column_name]
)
X_lookback_df = lookback_dataset.drop_columns(columns=[target_column_name])
y_lookback_df = lookback_dataset.with_timestamp_columns(None).keep_columns(
columns=[target_column_name]
)
_, ext = os.path.splitext(model_path) _, ext = os.path.splitext(model_path)
if ext == ".pt": if ext == ".pt":
@@ -336,37 +112,20 @@ else:
# Load the sklearn pipeline. # Load the sklearn pipeline.
fitted_model = joblib.load(model_path) fitted_model = joblib.load(model_path)
if hasattr(fitted_model, "get_lookback"): X_rf = fitted_model.rolling_forecast(X_test_df, y_test, step=1)
lookback = fitted_model.get_lookback() assign_dict = {
df_all = do_rolling_forecast_with_lookback( fitted_model.forecast_origin_column_name: "forecast_origin",
fitted_model, fitted_model.forecast_column_name: "predicted",
X_test_df.to_pandas_dataframe(), fitted_model.actual_column_name: target_column_name,
y_test_df.to_pandas_dataframe().values.T[0], }
max_horizon, X_rf.rename(columns=assign_dict, inplace=True)
X_lookback_df.to_pandas_dataframe()[-lookback:],
y_lookback_df.to_pandas_dataframe().values.T[0][-lookback:],
freq,
)
else:
df_all = do_rolling_forecast(
fitted_model,
X_test_df.to_pandas_dataframe(),
y_test_df.to_pandas_dataframe().values.T[0],
max_horizon,
freq,
)
print(df_all) print(X_rf.head())
print("target values:::")
print(df_all[target_column_name])
print("predicted values:::")
print(df_all["predicted"])
# Use the AutoML scoring module # Use the AutoML scoring module
regression_metrics = list(constants.REGRESSION_SCALAR_SET) regression_metrics = list(constants.REGRESSION_SCALAR_SET)
y_test = np.array(df_all[target_column_name]) y_test = np.array(X_rf[target_column_name])
y_pred = np.array(df_all["predicted"]) y_pred = np.array(X_rf["predicted"])
scores = scoring.score_regression(y_test, y_pred, regression_metrics) scores = scoring.score_regression(y_test, y_pred, regression_metrics)
print("scores:") print("scores:")
@@ -376,11 +135,11 @@ for key, value in scores.items():
run.log(key, value) run.log(key, value)
print("Simple forecasting model") print("Simple forecasting model")
rmse = np.sqrt(mean_squared_error(df_all[target_column_name], df_all["predicted"])) rmse = np.sqrt(mean_squared_error(X_rf[target_column_name], X_rf["predicted"]))
print("[Test Data] \nRoot Mean squared error: %.2f" % rmse) print("[Test Data] \nRoot Mean squared error: %.2f" % rmse)
mae = mean_absolute_error(df_all[target_column_name], df_all["predicted"]) mae = mean_absolute_error(X_rf[target_column_name], X_rf["predicted"])
print("mean_absolute_error score: %.2f" % mae) print("mean_absolute_error score: %.2f" % mae)
print("MAPE: %.2f" % MAPE(df_all[target_column_name], df_all["predicted"])) print("MAPE: %.2f" % MAPE(X_rf[target_column_name], X_rf["predicted"]))
run.log("rmse", rmse) run.log("rmse", rmse)
run.log("mae", mae) run.log("mae", mae)

View File

@@ -40,7 +40,7 @@
"metadata": {}, "metadata": {},
"source": [ "source": [
"### Prerequisites\n", "### Prerequisites\n",
"You'll need to create a compute Instance by following the instructions in the [EnvironmentSetup.md](../Setup_Resources/EnvironmentSetup.md)." "You'll need to create a compute Instance by following [these](https://learn.microsoft.com/en-us/azure/machine-learning/v1/how-to-create-manage-compute-instance?tabs=python) instructions."
] ]
}, },
{ {
@@ -251,8 +251,17 @@
"source": [ "source": [
"### Set up training parameters\n", "### Set up training parameters\n",
"\n", "\n",
"This dictionary defines the AutoML and hierarchy settings. For this forecasting task we need to define several settings inncluding the name of the time column, the maximum forecast horizon, the hierarchy definition, and the level of the hierarchy at which to train.\n", "We need to provide ``ForecastingParameters``, ``AutoMLConfig`` and ``HTSTrainParameters`` objects. For the forecasting task we need to define several settings including the name of the time column, the maximum forecast horizon, the hierarchy definition, and the level of the hierarchy at which to train.\n",
"\n", "\n",
"#### ``ForecastingParameters`` arguments\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **forecast_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n",
"| **time_column_name** | The name of your time column. |\n",
"| **time_series_id_column_names** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |\n",
"| **cv_step_size** | Number of periods between two consecutive cross-validation folds. The default value is \\\"auto\\\", in which case AutoMl determines the cross-validation step size automatically, if a validation set is not provided. Or users could specify an integer value. |\n",
"\n",
"#### ``AutoMLConfig`` arguments\n",
"| Property | Description|\n", "| Property | Description|\n",
"| :--------------- | :------------------- |\n", "| :--------------- | :------------------- |\n",
"| **task** | forecasting |\n", "| **task** | forecasting |\n",
@@ -260,20 +269,22 @@
"| **blocked_models** | Blocked models won't be used by AutoML. |\n", "| **blocked_models** | Blocked models won't be used by AutoML. |\n",
"| **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. |\n", "| **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. |\n",
"| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n", "| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n",
"| **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. |\n", "| **experiment_timeout_hours** | Maximum amount of time in hours that each experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. **It does not control the overall timeout for the pipeline run, instead controls the timeout for each training run per partitioned time series.** |\n",
"| **label_column_name** | The name of the label column. |\n", "| **label_column_name** | The name of the label column. |\n",
"| **forecast_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n", "| **n_cross_validations** | Number of cross validation splits. The default value is \\\"auto\\\", in which case AutoMl determines the number of cross-validations automatically, if a validation set is not provided. Or users could specify an integer value. Rolling Origin Validation is used to split time-series in a temporally consistent way. |\n",
"|**n_cross_validations**|Number of cross-validation folds to use for model/pipeline selection. The default value is \"auto\", in which case AutoMl determines the number of cross-validations automatically, if a validation set is not provided. Or users could specify an integer value.\n", "| **enable_early_stopping** | Flag to enable early termination if the primary metric is no longer improving. |\n",
"|**cv_step_size**|Number of periods between two consecutive cross-validation folds. The default value is \"auto\", in which case AutoMl determines the cross-validation step size automatically, if a validation set is not provided. Or users could specify an integer value.\n",
"| **enable_early_stopping** | Flag to enable early termination if the score is not improving in the short term. |\n",
"| **time_column_name** | The name of your time column. |\n",
"| **hierarchy_column_names** | The names of columns that define the hierarchical structure of the data from highest level to most granular. |\n",
"| **training_level** | The level of the hierarchy to be used for training models. |\n",
"| **enable_engineered_explanations** | Engineered feature explanations will be downloaded if enable_engineered_explanations flag is set to True. By default it is set to False to save storage space. |\n", "| **enable_engineered_explanations** | Engineered feature explanations will be downloaded if enable_engineered_explanations flag is set to True. By default it is set to False to save storage space. |\n",
"| **time_series_id_column_name** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |\n",
"| **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). |\n", "| **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). |\n",
"| **pipeline_fetch_max_batch_size** | Determines how many pipelines (training algorithms) to fetch at a time for training, this helps reduce throttling when training at large scale. |\n", "| **pipeline_fetch_max_batch_size** | Determines how many pipelines (training algorithms) to fetch at a time for training, this helps reduce throttling when training at large scale. |\n",
"| **model_explainability** | Flag to disable explaining the best automated ML model at the end of all training iterations. The default is True and will block non-explainable models which may impact the forecast accuracy. For more information, see [Interpretability: model explanations in automated machine learning](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-machine-learning-interpretability-automl). |" "| **model_explainability** | Flag to disable explaining the best automated ML model at the end of all training iterations. The default is True and will block non-explainable models which may impact the forecast accuracy. For more information, see [Interpretability: model explanations in automated machine learning](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-machine-learning-interpretability-automl). |\n",
"\n",
"#### ``HTSTrainParameters`` arguments\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **automl_settings** | The ``AutoMLConfig`` object defined above. |\n",
"| **hierarchy_column_names** | The names of columns that define the hierarchical structure of the data from highest level to most granular. |\n",
"| **training_level** | The level of the hierarchy to be used for training models. |\n",
"| **enable_engineered_explanations** | The switch controls engineered explanations. |"
] ]
}, },
{ {
@@ -287,6 +298,9 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.train.automl.runtime._hts.hts_parameters import HTSTrainParameters\n", "from azureml.train.automl.runtime._hts.hts_parameters import HTSTrainParameters\n",
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
"from azureml.train.automl.automlconfig import AutoMLConfig\n",
"\n",
"\n", "\n",
"model_explainability = True\n", "model_explainability = True\n",
"\n", "\n",
@@ -300,24 +314,26 @@
"label_column_name = \"quantity\"\n", "label_column_name = \"quantity\"\n",
"forecast_horizon = 7\n", "forecast_horizon = 7\n",
"\n", "\n",
"forecasting_parameters = ForecastingParameters(\n",
" time_column_name=time_column_name,\n",
" forecast_horizon=forecast_horizon,\n",
")\n",
"\n", "\n",
"automl_settings = {\n", "automl_settings = AutoMLConfig(\n",
" \"task\": \"forecasting\",\n", " task=\"forecasting\",\n",
" \"primary_metric\": \"normalized_root_mean_squared_error\",\n", " primary_metric=\"normalized_root_mean_squared_error\",\n",
" \"label_column_name\": label_column_name,\n", " experiment_timeout_hours=1,\n",
" \"time_column_name\": time_column_name,\n", " label_column_name=label_column_name,\n",
" \"forecast_horizon\": forecast_horizon,\n", " track_child_runs=False,\n",
" \"hierarchy_column_names\": hierarchy,\n", " forecasting_parameters=forecasting_parameters,\n",
" \"hierarchy_training_level\": training_level,\n", " pipeline_fetch_max_batch_size=15,\n",
" \"track_child_runs\": False,\n", " model_explainability=model_explainability,\n",
" \"pipeline_fetch_max_batch_size\": 15,\n", " n_cross_validations=\"auto\", # Feel free to set to a small integer (>=2) if runtime is an issue.\n",
" \"model_explainability\": model_explainability,\n", " cv_step_size=\"auto\",\n",
" \"n_cross_validations\": \"auto\", # Feel free to set to a small integer (>=2) if runtime is an issue.\n",
" \"cv_step_size\": \"auto\",\n",
" # The following settings are specific to this sample and should be adjusted according to your own needs.\n", " # The following settings are specific to this sample and should be adjusted according to your own needs.\n",
" \"iteration_timeout_minutes\": 10,\n", " iteration_timeout_minutes=10,\n",
" \"iterations\": 10,\n", " iterations=15,\n",
"}\n", ")\n",
"\n", "\n",
"hts_parameters = HTSTrainParameters(\n", "hts_parameters = HTSTrainParameters(\n",
" automl_settings=automl_settings,\n", " automl_settings=automl_settings,\n",
@@ -338,15 +354,25 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"Parallel run step is leveraged to train the hierarchy. To configure the ParallelRunConfig you will need to determine the appropriate number of workers and nodes for your use case. The `process_count_per_node` is based off the number of cores of the compute VM. The node_count will determine the number of master nodes to use, increasing the node count will speed up the training process.\n", "Parallel run step is leveraged to train multiple models at once. To configure the ParallelRunConfig you will need to determine the appropriate number of workers and nodes for your use case. The ``process_count_per_node`` is based off the number of cores of the compute VM. The node_count will determine the number of master nodes to use, increasing the node count will speed up the training process.\n",
"\n", "\n",
"* **experiment:** The experiment used for training.\n", "| Property | Description|\n",
"* **train_data:** The tabular dataset to be used as input to the training run.\n", "| :--------------- | :------------------- |\n",
"* **node_count:** The number of compute nodes to be used for running the user script. We recommend to start with 3 and increase the node_count if the training time is taking too long.\n", "| **experiment** | The experiment used for training. |\n",
"* **process_count_per_node:** Process count per node, we recommend 2:1 ratio for number of cores: number of processes per node. eg. If node has 16 cores then configure 8 or less process count per node or optimal performance.\n", "| **train_data** | The file dataset to be used as input to the training run. |\n",
"* **train_pipeline_parameters:** The set of configuration parameters defined in the previous section. \n", "| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with 3 and increase the node_count if the training time is taking too long. |\n",
"| **process_count_per_node** | Process count per node, we recommend 2:1 ratio for number of cores: number of processes per node. eg. If node has 16 cores then configure 8 or less process count per node for optimal performance. |\n",
"| **train_pipeline_parameters** | The set of configuration parameters defined in the previous section. |\n",
"| **run_invocation_timeout** | Maximum amount of time in seconds that the ``ParallelRunStep`` class is allowed. This is optional but provides customers with greater control on exit criteria. This must be greater than ``experiment_timeout_hours`` by at least 300 seconds. |\n",
"\n", "\n",
"Calling this method will create a new aggregated dataset which is generated dynamically on pipeline execution." "Calling this method will create a new aggregated dataset which is generated dynamically on pipeline execution.\n",
"\n",
"**Note**: Total time taken for the **training step** in the pipeline to complete = $ \\frac{t}{ p \\times n } \\times ts $\n",
"where,\n",
"- $ t $ is time taken for training one partition (can be viewed in the training logs)\n",
"- $ p $ is ``process_count_per_node``\n",
"- $ n $ is ``node_count``\n",
"- $ ts $ is total number of partitions in time series based on ``partition_column_names``"
] ]
}, },
{ {
@@ -365,6 +391,7 @@
" node_count=2,\n", " node_count=2,\n",
" process_count_per_node=8,\n", " process_count_per_node=8,\n",
" train_pipeline_parameters=hts_parameters,\n", " train_pipeline_parameters=hts_parameters,\n",
" run_invocation_timeout=3900,\n",
")" ")"
] ]
}, },
@@ -509,19 +536,24 @@
"source": [ "source": [
"## 5.0 Forecasting\n", "## 5.0 Forecasting\n",
"For hierarchical forecasting we need to provide the HTSInferenceParameters object.\n", "For hierarchical forecasting we need to provide the HTSInferenceParameters object.\n",
"#### HTSInferenceParameters arguments\n", "#### ``HTSInferenceParameters`` arguments\n",
"* **hierarchy_forecast_level:** The default level of the hierarchy to produce prediction/forecast on.\n", "| Property | Description|\n",
"* **allocation_method:** \\[Optional] The disaggregation method to use if the hierarchy forecast level specified is below the define hierarchy training level. <br><i>(average historical proportions) 'average_historical_proportions'</i><br><i>(proportions of the historical averages) 'proportions_of_historical_average'</i>\n", "| :--------------- | :------------------- |\n",
"| **hierarchy_forecast_level:** | The default level of the hierarchy to produce prediction/forecast on. |\n",
"| **allocation_method:** | \\[Optional] The disaggregation method to use if the hierarchy forecast level specified is below the define hierarchy training level. <br><i>(average historical proportions) 'average_historical_proportions'</i><br><i>(proportions of the historical averages) 'proportions_of_historical_average'</i> |\n",
"\n", "\n",
"#### get_many_models_batch_inference_steps arguments\n", "#### ``get_many_models_batch_inference_steps`` arguments\n",
"* **experiment:** The experiment used for inference run.\n", "| Property | Description|\n",
"* **inference_data:** The data to use for inferencing. It should be the same schema as used for training.\n", "| :--------------- | :------------------- |\n",
"* **compute_target:** The compute target that runs the inference pipeline.\n", "| **experiment** | The experiment used for inference run. |\n",
"* **node_count:** The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku).\n", "| **inference_data** | The data to use for inferencing. It should be the same schema as used for training.\n",
"* **process_count_per_node:** The number of processes per node.\n", "| **compute_target** | The compute target that runs the inference pipeline. |\n",
"* **train_run_id:** \\[Optional] The run id of the hierarchy training, by default it is the latest successful training hts run in the experiment.\n", "| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku). |\n",
"* **train_experiment_name:** \\[Optional] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline.\n", "| **process_count_per_node** | \\[Optional] The number of processes per node. By default it's 2 (should be at most half of the number of cores in a single node of the compute cluster that will be used for the experiment).\n",
"* **process_count_per_node:** \\[Optional] The number of processes per node, by default it's 4." "| **inference_pipeline_parameters** | \\[Optional] The ``HTSInferenceParameters`` object defined above. |\n",
"| **train_run_id** | \\[Optional] The run id of the **training pipeline**. By default it is the latest successful training pipeline run in the experiment. |\n",
"| **train_experiment_name** | \\[Optional] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline. |\n",
"| **run_invocation_timeout** | \\[Optional] Maximum amount of time in seconds that the ``ParallelRunStep`` class is allowed. This is optional but provides customers with greater control on exit criteria. |"
] ]
}, },
{ {

View File

@@ -40,7 +40,7 @@
"metadata": {}, "metadata": {},
"source": [ "source": [
"### Prerequisites\n", "### Prerequisites\n",
"You'll need to create a compute Instance by following the instructions in the [EnvironmentSetup.md](../Setup_Resources/EnvironmentSetup.md)." "You'll need to create a compute Instance by following [these](https://learn.microsoft.com/en-us/azure/machine-learning/v1/how-to-create-manage-compute-instance?tabs=python) instructions."
] ]
}, },
{ {
@@ -379,8 +379,17 @@
"source": [ "source": [
"### Set up training parameters\n", "### Set up training parameters\n",
"\n", "\n",
"This dictionary defines the AutoML and many models settings. For this forecasting task we need to define several settings inncluding the name of the time column, the maximum forecast horizon, and the partition column name definition.\n", "We need to provide ``ForecastingParameters``, ``AutoMLConfig`` and ``ManyModelsTrainParameters`` objects. For the forecasting task we also need to define several settings including the name of the time column, the maximum forecast horizon, and the partition column name(s) definition.\n",
"\n", "\n",
"#### ``ForecastingParameters`` arguments\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **forecast_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n",
"| **time_column_name** | The name of your time column. |\n",
"| **time_series_id_column_names** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |\n",
"| **cv_step_size** | Number of periods between two consecutive cross-validation folds. The default value is \\\"auto\\\", in which case AutoMl determines the cross-validation step size automatically, if a validation set is not provided. Or users could specify an integer value. |\n",
"\n",
"#### ``AutoMLConfig`` arguments\n",
"| Property | Description|\n", "| Property | Description|\n",
"| :--------------- | :------------------- |\n", "| :--------------- | :------------------- |\n",
"| **task** | forecasting |\n", "| **task** | forecasting |\n",
@@ -388,17 +397,19 @@
"| **blocked_models** | Blocked models won't be used by AutoML. |\n", "| **blocked_models** | Blocked models won't be used by AutoML. |\n",
"| **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. |\n", "| **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. |\n",
"| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n", "| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n",
"| **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. |\n", "| **experiment_timeout_hours** | Maximum amount of time in hours that each experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. **It does not control the overall timeout for the pipeline run, instead controls the timeout for each training run per partitioned time series.** |\n",
"| **label_column_name** | The name of the label column. |\n", "| **label_column_name** | The name of the label column. |\n",
"| **forecast_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n", "| **n_cross_validations** | Number of cross validation splits. The default value is \\\"auto\\\", in which case AutoMl determines the number of cross-validations automatically, if a validation set is not provided. Or users could specify an integer value. Rolling Origin Validation is used to split time-series in a temporally consistent way. |\n",
"| **n_cross_validations** | Number of cross validation splits. The default value is \"auto\", in which case AutoMl determines the number of cross-validations automatically, if a validation set is not provided. Or users could specify an integer value. Rolling Origin Validation is used to split time-series in a temporally consistent way. |\n", "| **enable_early_stopping** | Flag to enable early termination if the primary metric is no longer improving. |\n",
"|**cv_step_size**|Number of periods between two consecutive cross-validation folds. The default value is \"auto\", in which case AutoMl determines the cross-validation step size automatically, if a validation set is not provided. Or users could specify an integer value.\n",
"| **enable_early_stopping** | Flag to enable early termination if the score is not improving in the short term. |\n",
"| **time_column_name** | The name of your time column. |\n",
"| **enable_engineered_explanations** | Engineered feature explanations will be downloaded if enable_engineered_explanations flag is set to True. By default it is set to False to save storage space. |\n", "| **enable_engineered_explanations** | Engineered feature explanations will be downloaded if enable_engineered_explanations flag is set to True. By default it is set to False to save storage space. |\n",
"| **time_series_id_column_names** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |\n",
"| **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). |\n", "| **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). |\n",
"| **pipeline_fetch_max_batch_size** | Determines how many pipelines (training algorithms) to fetch at a time for training, this helps reduce throttling when training at large scale. |\n", "| **pipeline_fetch_max_batch_size** | Determines how many pipelines (training algorithms) to fetch at a time for training, this helps reduce throttling when training at large scale. |\n",
"\n",
"\n",
"#### ``ManyModelsTrainParameters`` arguments\n",
"| Property | Description|\n",
"| :--------------- | :------------------- |\n",
"| **automl_settings** | The ``AutoMLConfig`` object defined above. |\n",
"| **partition_column_names** | The names of columns used to group your models. For timeseries, the groups must not split up individual time-series. That is, each group must contain one or more whole time-series. |" "| **partition_column_names** | The names of columns used to group your models. For timeseries, the groups must not split up individual time-series. That is, each group must contain one or more whole time-series. |"
] ]
}, },
@@ -415,23 +426,29 @@
"from azureml.train.automl.runtime._many_models.many_models_parameters import (\n", "from azureml.train.automl.runtime._many_models.many_models_parameters import (\n",
" ManyModelsTrainParameters,\n", " ManyModelsTrainParameters,\n",
")\n", ")\n",
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
"from azureml.train.automl.automlconfig import AutoMLConfig\n",
"\n", "\n",
"partition_column_names = [\"Store\", \"Brand\"]\n", "partition_column_names = [\"Store\", \"Brand\"]\n",
"automl_settings = {\n", "\n",
" \"task\": \"forecasting\",\n", "forecasting_parameters = ForecastingParameters(\n",
" \"primary_metric\": \"normalized_root_mean_squared_error\",\n", " time_column_name=\"WeekStarting\",\n",
" \"iteration_timeout_minutes\": 10, # This needs to be changed based on the dataset. We ask customer to explore how long training is taking before settings this value\n", " forecast_horizon=6,\n",
" \"iterations\": 15,\n", " time_series_id_column_names=partition_column_names,\n",
" \"experiment_timeout_hours\": 0.25,\n", " cv_step_size=\"auto\",\n",
" \"label_column_name\": \"Quantity\",\n", ")\n",
" \"n_cross_validations\": \"auto\", # Feel free to set to a small integer (>=2) if runtime is an issue.\n", "\n",
" \"cv_step_size\": \"auto\",\n", "automl_settings = AutoMLConfig(\n",
" \"time_column_name\": \"WeekStarting\",\n", " task=\"forecasting\",\n",
" \"drop_column_names\": \"Revenue\",\n", " primary_metric=\"normalized_root_mean_squared_error\",\n",
" \"forecast_horizon\": 6,\n", " iteration_timeout_minutes=10,\n",
" \"time_series_id_column_names\": partition_column_names,\n", " iterations=15,\n",
" \"track_child_runs\": False,\n", " experiment_timeout_hours=0.25,\n",
"}\n", " label_column_name=\"Quantity\",\n",
" n_cross_validations=\"auto\", # Feel free to set to a small integer (>=2) if runtime is an issue.\n",
" track_child_runs=False,\n",
" forecasting_parameters=forecasting_parameters,\n",
")\n",
"\n", "\n",
"mm_paramters = ManyModelsTrainParameters(\n", "mm_paramters = ManyModelsTrainParameters(\n",
" automl_settings=automl_settings, partition_column_names=partition_column_names\n", " automl_settings=automl_settings, partition_column_names=partition_column_names\n",
@@ -451,7 +468,9 @@
"\n", "\n",
"Reuse of previous results (``allow_reuse``) is key when using pipelines in a collaborative environment since eliminating unnecessary reruns offers agility. Reuse is the default behavior when the ``script_name``, ``inputs``, and the parameters of a step remain the same. When reuse is allowed, results from the previous run are immediately sent to the next step. If ``allow_reuse`` is set to False, a new run will always be generated for this step during pipeline execution.\n", "Reuse of previous results (``allow_reuse``) is key when using pipelines in a collaborative environment since eliminating unnecessary reruns offers agility. Reuse is the default behavior when the ``script_name``, ``inputs``, and the parameters of a step remain the same. When reuse is allowed, results from the previous run are immediately sent to the next step. If ``allow_reuse`` is set to False, a new run will always be generated for this step during pipeline execution.\n",
"\n", "\n",
"> Note that we only support partitioned FileDataset and TabularDataset without partition when using such output as input." "> Note that we only support partitioned FileDataset and TabularDataset without partition when using such output as input.\n",
"\n",
"> Note that we **drop column** \"Revenue\" from the dataset in this step to avoid information leak as \"Quantity\" = \"Revenue\" / \"Price\". **Please modify the logic based on your data**."
] ]
}, },
{ {
@@ -489,17 +508,25 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"Parallel run step is leveraged to train multiple models at once. To configure the ParallelRunConfig you will need to determine the appropriate number of workers and nodes for your use case. The process_count_per_node is based off the number of cores of the compute VM. The node_count will determine the number of master nodes to use, increasing the node count will speed up the training process.\n", "Parallel run step is leveraged to train multiple models at once. To configure the ParallelRunConfig you will need to determine the appropriate number of workers and nodes for your use case. The ``process_count_per_node`` is based off the number of cores of the compute VM. The node_count will determine the number of master nodes to use, increasing the node count will speed up the training process.\n",
"\n", "\n",
"| Property | Description|\n", "| Property | Description|\n",
"| :--------------- | :------------------- |\n", "| :--------------- | :------------------- |\n",
"| **experiment** | The experiment used for training. |\n", "| **experiment** | The experiment used for training. |\n",
"| **train_data** | The file dataset to be used as input to the training run. |\n", "| **train_data** | The file dataset to be used as input to the training run. |\n",
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with 3 and increase the node_count if the training time is taking too long. |\n", "| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with 3 and increase the node_count if the training time is taking too long. |\n",
"| **process_count_per_node** | Process count per node, we recommend 2:1 ratio for number of cores: number of processes per node. eg. If node has 16 cores then configure 8 or less process count per node or optimal performance. |\n", "| **process_count_per_node** | Process count per node, we recommend 2:1 ratio for number of cores: number of processes per node. eg. If node has 16 cores then configure 8 or less process count per node for optimal performance. |\n",
"| **train_pipeline_parameters** | The set of configuration parameters defined in the previous section. |\n", "| **train_pipeline_parameters** | The set of configuration parameters defined in the previous section. |\n",
"| **run_invocation_timeout** | Maximum amount of time in seconds that the ``ParallelRunStep`` class is allowed. This is optional but provides customers with greater control on exit criteria. This must be greater than ``experiment_timeout_hours`` by at least 300 seconds. |\n",
"\n", "\n",
"Calling this method will create a new aggregated dataset which is generated dynamically on pipeline execution." "Calling this method will create a new aggregated dataset which is generated dynamically on pipeline execution.\n",
"\n",
"**Note**: Total time taken for the **training step** in the pipeline to complete = $ \\frac{t}{ p \\times n } \\times ts $\n",
"where,\n",
"- $ t $ is time taken for training one partition (can be viewed in the training logs)\n",
"- $ p $ is ``process_count_per_node``\n",
"- $ n $ is ``node_count``\n",
"- $ ts $ is total number of partitions in time series based on ``partition_column_names``"
] ]
}, },
{ {
@@ -517,7 +544,7 @@
" compute_target=compute_target,\n", " compute_target=compute_target,\n",
" node_count=2,\n", " node_count=2,\n",
" process_count_per_node=8,\n", " process_count_per_node=8,\n",
" run_invocation_timeout=920,\n", " run_invocation_timeout=1200,\n",
" train_pipeline_parameters=mm_paramters,\n", " train_pipeline_parameters=mm_paramters,\n",
")" ")"
] ]
@@ -598,7 +625,7 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"### 7.2 Schedule the pipeline\n", "### 5.2 Schedule the pipeline\n",
"You can also [schedule the pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-schedule-pipelines) to run on a time-based or change-based schedule. This could be used to automatically retrain models every month or based on another trigger such as data drift." "You can also [schedule the pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-schedule-pipelines) to run on a time-based or change-based schedule. This could be used to automatically retrain models every month or based on another trigger such as data drift."
] ]
}, },
@@ -654,25 +681,31 @@
"source": [ "source": [
"For many models we need to provide the ManyModelsInferenceParameters object.\n", "For many models we need to provide the ManyModelsInferenceParameters object.\n",
"\n", "\n",
"#### ManyModelsInferenceParameters arguments\n", "#### ``ManyModelsInferenceParameters`` arguments\n",
"| Property | Description|\n", "| Property | Description|\n",
"| :--------------- | :------------------- |\n", "| :--------------- | :------------------- |\n",
"| **partition_column_names** | List of column names that identifies groups. |\n", "| **partition_column_names** | List of column names that identifies groups. |\n",
"| **target_column_name** | \\[Optional] Column name only if the inference dataset has the target. |\n", "| **target_column_name** | \\[Optional] Column name only if the inference dataset has the target. |\n",
"| **time_column_name** | \\[Optional] Column name only if it is timeseries. |\n", "| **time_column_name** | \\[Optional] Time column name only if it is timeseries. |\n",
"| **many_models_run_id** | \\[Optional] Many models run id where models were trained. |\n", "| **inference_type** | \\[Optional] Which inference method to use on the model. Possible values are 'forecast', 'predict_proba', and 'predict'. |\n",
"| **forecast_mode** | \\[Optional] The type of forecast to be used, either 'rolling' or 'recursive'; defaults to 'recursive'. |\n",
"| **step** | \\[Optional] Number of periods to advance the forecasting window in each iteration **(for rolling forecast only)**; defaults to 1. |\n",
"\n", "\n",
"#### get_many_models_batch_inference_steps arguments\n", "#### ``get_many_models_batch_inference_steps`` arguments\n",
"| Property | Description|\n", "| Property | Description|\n",
"| :--------------- | :------------------- |\n", "| :--------------- | :------------------- |\n",
"| **experiment** | The experiment used for inference run. |\n", "| **experiment** | The experiment used for inference run. |\n",
"| **inference_data** | The data to use for inferencing. It should be the same schema as used for training.\n", "| **inference_data** | The data to use for inferencing. It should be the same schema as used for training.\n",
"| **compute_target** The compute target that runs the inference pipeline.|\n", "| **compute_target** | The compute target that runs the inference pipeline. |\n",
"| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku). |\n", "| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku). |\n",
"| **process_count_per_node** The number of processes per node.\n", "| **process_count_per_node** | \\[Optional] The number of processes per node. By default it's 2 (should be at most half of the number of cores in a single node of the compute cluster that will be used for the experiment).\n",
"| **train_run_id** | \\[Optional] The run id of the hierarchy training, by default it is the latest successful training many model run in the experiment. |\n", "| **inference_pipeline_parameters** | \\[Optional] The ``ManyModelsInferenceParameters`` object defined above. |\n",
"| **append_row_file_name** | \\[Optional] The name of the output file (optional, default value is 'parallel_run_step.txt'). Supports 'txt' and 'csv' file extension. A 'txt' file extension generates the output in 'txt' format with space as separator without column names. A 'csv' file extension generates the output in 'csv' format with comma as separator and with column names. |\n",
"| **train_run_id** | \\[Optional] The run id of the **training pipeline**. By default it is the latest successful training pipeline run in the experiment. |\n",
"| **train_experiment_name** | \\[Optional] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline. |\n", "| **train_experiment_name** | \\[Optional] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline. |\n",
"| **process_count_per_node** | \\[Optional] The number of processes per node, by default it's 4. |" "| **run_invocation_timeout** | \\[Optional] Maximum amount of time in seconds that the ``ParallelRunStep`` class is allowed. This is optional but provides customers with greater control on exit criteria. |\n",
"| **output_datastore** | \\[Optional] The ``Datastore`` or ``OutputDatasetConfig`` to be used for output. If specified any pipeline output will be written to that location. If unspecified the default datastore will be used. |\n",
"| **arguments** | \\[Optional] Arguments to be passed to inference script. Possible argument is '--forecast_quantiles' followed by quantile values. |"
] ]
}, },
{ {
@@ -692,6 +725,8 @@
" target_column_name=\"Quantity\",\n", " target_column_name=\"Quantity\",\n",
")\n", ")\n",
"\n", "\n",
"output_file_name = \"parallel_run_step.csv\"\n",
"\n",
"inference_steps = AutoMLPipelineBuilder.get_many_models_batch_inference_steps(\n", "inference_steps = AutoMLPipelineBuilder.get_many_models_batch_inference_steps(\n",
" experiment=experiment,\n", " experiment=experiment,\n",
" inference_data=inference_ds_small,\n", " inference_data=inference_ds_small,\n",
@@ -703,6 +738,8 @@
" train_run_id=training_run.id,\n", " train_run_id=training_run.id,\n",
" train_experiment_name=training_run.experiment.name,\n", " train_experiment_name=training_run.experiment.name,\n",
" inference_pipeline_parameters=mm_parameters,\n", " inference_pipeline_parameters=mm_parameters,\n",
" append_row_file_name=output_file_name,\n",
" arguments=[\"--forecast_quantiles\", 0.1, 0.9],\n",
")" ")"
] ]
}, },
@@ -737,7 +774,7 @@
"\n", "\n",
"The following code snippet:\n", "The following code snippet:\n",
"1. Downloads the contents of the output folder that is passed in the parallel run step \n", "1. Downloads the contents of the output folder that is passed in the parallel run step \n",
"2. Reads the parallel_run_step.txt file that has the predictions as pandas dataframe and \n", "2. Reads the output file that has the predictions as pandas dataframe and \n",
"3. Displays the top 10 rows of the predictions" "3. Displays the top 10 rows of the predictions"
] ]
}, },
@@ -752,19 +789,9 @@
"forecasting_results_name = \"forecasting_results\"\n", "forecasting_results_name = \"forecasting_results\"\n",
"forecasting_output_name = \"many_models_inference_output\"\n", "forecasting_output_name = \"many_models_inference_output\"\n",
"forecast_file = get_output_from_mm_pipeline(\n", "forecast_file = get_output_from_mm_pipeline(\n",
" inference_run, forecasting_results_name, forecasting_output_name\n", " inference_run, forecasting_results_name, forecasting_output_name, output_file_name\n",
")\n", ")\n",
"df = pd.read_csv(forecast_file, delimiter=\" \", header=None)\n", "df = pd.read_csv(forecast_file)\n",
"df.columns = [\n",
" \"Week Starting\",\n",
" \"Store\",\n",
" \"Brand\",\n",
" \"Quantity\",\n",
" \"Advert\",\n",
" \"Price\",\n",
" \"Revenue\",\n",
" \"Predicted\",\n",
"]\n",
"print(\n", "print(\n",
" \"Prediction has \", df.shape[0], \" rows. Here the first 10 rows are being displayed.\"\n", " \"Prediction has \", df.shape[0], \" rows. Here the first 10 rows are being displayed.\"\n",
")\n", ")\n",

View File

@@ -11,6 +11,12 @@ def main(args):
dataset = run_context.input_datasets["train_10_models"] dataset = run_context.input_datasets["train_10_models"]
df = dataset.to_pandas_dataframe() df = dataset.to_pandas_dataframe()
# Drop the column "Revenue" from the dataset to avoid information leak as
# "Quantity" = "Revenue" / "Price". Please modify the logic based on your data.
drop_column_name = "Revenue"
if drop_column_name in df.columns:
df.drop(drop_column_name, axis=1, inplace=True)
# Apply any data pre-processing techniques here # Apply any data pre-processing techniques here
df.to_parquet(output / "data_prepared_result.parquet", compression=None) df.to_parquet(output / "data_prepared_result.parquet", compression=None)

View File

@@ -1,6 +1,7 @@
{ {
"cells": [ "cells": [
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -10,6 +11,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -17,6 +19,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -34,18 +37,20 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"## Introduction<a id=\"introduction\"></a>\n", "## Introduction<a id=\"introduction\"></a>\n",
"In this example, we use AutoML to train, select, and operationalize a time-series forecasting model for multiple time-series.\n", "In this example, we use AutoML to train, select, and operationalize a time-series forecasting model for multiple time-series.\n",
"\n", "\n",
"Make sure you have executed the [configuration notebook](../../../configuration.ipynb) before running this notebook.\n", "Make sure you have executed the [configuration notebook](https://github.com/Azure/MachineLearningNotebooks/blob/master/configuration.ipynb) before running this notebook.\n",
"\n", "\n",
"The examples in the follow code samples use the University of Chicago's Dominick's Finer Foods dataset to forecast orange juice sales. Dominick's was a grocery chain in the Chicago metropolitan area." "The examples in the follow code samples use the University of Chicago's Dominick's Finer Foods dataset to forecast orange juice sales. Dominick's was a grocery chain in the Chicago metropolitan area."
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -70,6 +75,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -86,6 +92,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -119,6 +126,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -158,6 +166,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -181,6 +190,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -201,6 +211,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -220,6 +231,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -250,6 +262,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -275,6 +288,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -291,6 +305,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -318,6 +333,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -356,6 +372,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -373,6 +390,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -446,6 +464,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -472,6 +491,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -491,6 +511,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -528,6 +549,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -548,6 +570,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -556,6 +579,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -584,6 +608,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -639,6 +664,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -646,6 +672,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -668,6 +695,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -687,6 +715,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -715,7 +744,7 @@
" description=\"Automl forecasting sample service\",\n", " description=\"Automl forecasting sample service\",\n",
")\n", ")\n",
"\n", "\n",
"aci_service_name = \"automl-oj-forecast-01\"\n", "aci_service_name = \"automl-oj-forecast-03\"\n",
"print(aci_service_name)\n", "print(aci_service_name)\n",
"aci_service = Model.deploy(ws, aci_service_name, [model], inference_config, aciconfig)\n", "aci_service = Model.deploy(ws, aci_service_name, [model], inference_config, aciconfig)\n",
"aci_service.wait_for_deployment(True)\n", "aci_service.wait_for_deployment(True)\n",
@@ -732,6 +761,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -780,6 +810,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
@@ -792,7 +823,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"serv = Webservice(ws, \"automl-oj-forecast-01\")\n", "serv = Webservice(ws, \"automl-oj-forecast-03\")\n",
"serv.delete() # don't do it accidentally" "serv.delete() # don't do it accidentally"
] ]
} }

View File

@@ -13,7 +13,7 @@
"source": [ "source": [
"## Introduction\n", "## Introduction\n",
"\n", "\n",
"In this notebook, we demonstrate how to use piplines to train and inference on AutoML Forecasting model. Two pipelines will be created: one for training AutoML model, and the other is for inference on AutoML model. We'll also demonstrate how to schedule the inference pipeline so you can get inference results periodically (with refreshed test dataset). Make sure you have executed the configuration notebook before running this notebook. In this notebook you will learn how to:\n", "In this notebook, we demonstrate how to use piplines to train and inference on AutoML Forecasting model. Two pipelines will be created: one for training AutoML model, and the other is for inference on AutoML model. We'll also demonstrate how to schedule the inference pipeline so you can get inference results periodically (with refreshed test dataset). Make sure you have executed the [configuration notebook](https://github.com/Azure/MachineLearningNotebooks/blob/master/configuration.ipynb) before running this notebook. In this notebook you will learn how to:\n",
"\n", "\n",
"- Configure AutoML using AutoMLConfig for forecasting tasks using pipeline AutoMLSteps.\n", "- Configure AutoML using AutoMLConfig for forecasting tasks using pipeline AutoMLSteps.\n",
"- Create and register an AutoML model using AzureML pipeline.\n", "- Create and register an AutoML model using AzureML pipeline.\n",

View File

@@ -2,25 +2,24 @@
"cells": [ "cells": [
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n", "Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n", "\n",
"Licensed under the MIT License." "Licensed under the MIT License."
] ],
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/forecasting-recipes-univariate/1_determine_experiment_settings.png)" "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/forecasting-recipes-univariate/1_determine_experiment_settings.png)"
] ],
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"In this notebook we will explore the univaraite time-series data to determine the settings for an automated ML experiment. We will follow the thought process depicted in the following diagram:<br/>\n", "In this notebook we will explore the univariate time-series data to determine the settings for an automated ML experiment. We will follow the thought process depicted in the following diagram:<br/>\n",
"![Forecasting after training](figures/univariate_settings_map_20210408.jpg)\n", "![Forecasting after training](figures/univariate_settings_map_20210408.jpg)\n",
"\n", "\n",
"The objective is to answer the following questions:\n", "The objective is to answer the following questions:\n",
@@ -32,22 +31,20 @@
" </ul>\n", " </ul>\n",
" <li>Is the data stationary? </li>\n", " <li>Is the data stationary? </li>\n",
" <ul style=\"margin-top:-1px; list-style-type:none\"> \n", " <ul style=\"margin-top:-1px; list-style-type:none\"> \n",
" <li> Importance: In the absense of features that capture trend behavior, ML models (regression and tree based) are not well equiped to predict stochastic trends. Working with stationary data solves this problem. </li>\n", " <li> Importance: In the absence of features that capture trend behavior, ML models (regression and tree based) are not well equipped to predict stochastic trends. Working with stationary data solves this problem. </li>\n",
" </ul>\n", " </ul>\n",
" <li>Is there a detectable auto-regressive pattern in the stationary data? </li>\n", " <li>Is there a detectable auto-regressive pattern in the stationary data? </li>\n",
" <ul style=\"margin-top:-1px; list-style-type:none\"> \n", " <ul style=\"margin-top:-1px; list-style-type:none\"> \n",
" <li> Importance: The accuracy of ML models can be improved if serial correlation is modeled by including lags of the dependent/target varaible as features. Including target lags in every experiment by default will result in a regression in accuracy scores if such setting is not warranted. </li>\n", " <li> Importance: The accuracy of ML models can be improved if serial correlation is modeled by including lags of the dependent/target variable as features. Including target lags in every experiment by default will result in a regression in accuracy scores if such setting is not warranted. </li>\n",
" </ul>\n", " </ul>\n",
"</ol>\n", "</ol>\n",
"\n", "\n",
"The answers to these questions will help determine the appropriate settings for the automated ML experiment.\n" "The answers to these questions will help determine the appropriate settings for the automated ML experiment.\n"
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"import os\n", "import os\n",
"import warnings\n", "import warnings\n",
@@ -68,13 +65,13 @@
"# set printing options\n", "# set printing options\n",
"pd.set_option(\"display.max_columns\", 500)\n", "pd.set_option(\"display.max_columns\", 500)\n",
"pd.set_option(\"display.width\", 1000)" "pd.set_option(\"display.width\", 1000)"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"# load data\n", "# load data\n",
"main_data_loc = \"data\"\n", "main_data_loc = \"data\"\n",
@@ -89,13 +86,13 @@
"df.sort_values(by=TIME_COLNAME, inplace=True)\n", "df.sort_values(by=TIME_COLNAME, inplace=True)\n",
"df.set_index(TIME_COLNAME, inplace=True)\n", "df.set_index(TIME_COLNAME, inplace=True)\n",
"df.head(2)" "df.head(2)"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"# plot the entire dataset\n", "# plot the entire dataset\n",
"fig, ax = plt.subplots(figsize=(6, 2), dpi=180)\n", "fig, ax = plt.subplots(figsize=(6, 2), dpi=180)\n",
@@ -103,20 +100,20 @@
"ax.title.set_text(\"Original Data Series\")\n", "ax.title.set_text(\"Original Data Series\")\n",
"locs, labels = plt.xticks()\n", "locs, labels = plt.xticks()\n",
"plt.xticks(rotation=45)" "plt.xticks(rotation=45)"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"The graph plots the alcohol sales in the United States. Because the data is trending, it can be difficult to see cycles, seasonality or other interestng behaviors due to the scaling issues. For example, if there is a seasonal pattern, which we will discuss later, we cannot see them on the trending data. In such case, it is worth plotting the same data in first differences." "The graph plots the alcohol sales in the United States. Because the data is trending, it can be difficult to see cycles, seasonality or other interesting behaviors due to the scaling issues. For example, if there is a seasonal pattern, which we will discuss later, we cannot see them on the trending data. In such case, it is worth plotting the same data in first differences."
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"# plot the entire dataset in first differences\n", "# plot the entire dataset in first differences\n",
"fig, ax = plt.subplots(figsize=(6, 2), dpi=180)\n", "fig, ax = plt.subplots(figsize=(6, 2), dpi=180)\n",
@@ -124,18 +121,20 @@
"ax.title.set_text(\"Data in first differences\")\n", "ax.title.set_text(\"Data in first differences\")\n",
"locs, labels = plt.xticks()\n", "locs, labels = plt.xticks()\n",
"plt.xticks(rotation=45)" "plt.xticks(rotation=45)"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"In the previous plot we observe that the data is more volatile towards the end of the series. This period coincides with the Covid-19 period, so we will exclude it from our experiment. Since in this example there are no user-provided features it is hard to make an argument that a model trained on the less volatile pre-covid data will be able to accurately predict the covid period." "In the previous plot we observe that the data is more volatile towards the end of the series. This period coincides with the Covid-19 period, so we will exclude it from our experiment. Since in this example there are no user-provided features it is hard to make an argument that a model trained on the less volatile pre-covid data will be able to accurately predict the covid period."
] ],
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"# 1. Seasonality\n", "# 1. Seasonality\n",
"\n", "\n",
@@ -144,13 +143,11 @@
"2. If it's seasonal, does the data exhibit a trend (up or down)?\n", "2. If it's seasonal, does the data exhibit a trend (up or down)?\n",
"\n", "\n",
"It is hard to visually detect seasonality when the data is trending. The reason being is scale of seasonal fluctuations is dwarfed by the range of the trend in the data. One way to deal with this is to de-trend the data by taking the first differences. We will discuss this in more detail in the next section." "It is hard to visually detect seasonality when the data is trending. The reason being is scale of seasonal fluctuations is dwarfed by the range of the trend in the data. One way to deal with this is to de-trend the data by taking the first differences. We will discuss this in more detail in the next section."
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"# plot the entire dataset in first differences\n", "# plot the entire dataset in first differences\n",
"fig, ax = plt.subplots(figsize=(6, 2), dpi=180)\n", "fig, ax = plt.subplots(figsize=(6, 2), dpi=180)\n",
@@ -158,20 +155,20 @@
"ax.title.set_text(\"Data in first differences\")\n", "ax.title.set_text(\"Data in first differences\")\n",
"locs, labels = plt.xticks()\n", "locs, labels = plt.xticks()\n",
"plt.xticks(rotation=45)" "plt.xticks(rotation=45)"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"For the next plot, we will exclude the Covid period again. We will also shorten the length of data because plotting a very long time series may prevent us from seeing seasonal patterns, if there are any, because the plot may look like a random walk." "For the next plot, we will exclude the Covid period again. We will also shorten the length of data because plotting a very long time series may prevent us from seeing seasonal patterns, if there are any, because the plot may look like a random walk."
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"# remove COVID period\n", "# remove COVID period\n",
"df = df[:COVID_PERIOD_START]\n", "df = df[:COVID_PERIOD_START]\n",
@@ -182,11 +179,13 @@
"ax.title.set_text(\"Data in first differences\")\n", "ax.title.set_text(\"Data in first differences\")\n",
"locs, labels = plt.xticks()\n", "locs, labels = plt.xticks()\n",
"plt.xticks(rotation=45)" "plt.xticks(rotation=45)"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"<p style=\"font-size:150%; color:blue\"> Conclusion </p>\n", "<p style=\"font-size:150%; color:blue\"> Conclusion </p>\n",
"\n", "\n",
@@ -205,11 +204,11 @@
" <li> In the first case, by taking first differences we are removing stochastic trend, but we do not remove seasonal patterns. In the second case, we do not remove the stochastic trend and it can be captured by the trend component of the STL decomposition. It is hard to say which option will work best in your case, hence you will need to run both options to see which one results in more accurate forecasts. </li>\n", " <li> In the first case, by taking first differences we are removing stochastic trend, but we do not remove seasonal patterns. In the second case, we do not remove the stochastic trend and it can be captured by the trend component of the STL decomposition. It is hard to say which option will work best in your case, hence you will need to run both options to see which one results in more accurate forecasts. </li>\n",
" </ul>\n", " </ul>\n",
"</ol>" "</ol>"
] ],
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"# 2. Stationarity\n", "# 2. Stationarity\n",
"If the data does not exhibit seasonal patterns, we would like to see if the data is non-stationary. Particularly, we want to see if there is a clear trending behavior. If such behavior is observed, we would like to first difference the data and examine the plot of an auto-correlation function (ACF) known as correlogram. If the data is seasonal, differencing it will not get rid off the seasonality and this will be shown on the correlogram as well.\n", "If the data does not exhibit seasonal patterns, we would like to see if the data is non-stationary. Particularly, we want to see if there is a clear trending behavior. If such behavior is observed, we would like to first difference the data and examine the plot of an auto-correlation function (ACF) known as correlogram. If the data is seasonal, differencing it will not get rid off the seasonality and this will be shown on the correlogram as well.\n",
@@ -237,13 +236,11 @@
"</ol>\n", "</ol>\n",
"\n", "\n",
"To answer the first question, we run a series of tests (we call them unit root tests)." "To answer the first question, we run a series of tests (we call them unit root tests)."
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"# unit root tests\n", "# unit root tests\n",
"test = unit_root_test_wrapper(df[TARGET_COLNAME])\n", "test = unit_root_test_wrapper(df[TARGET_COLNAME])\n",
@@ -251,11 +248,13 @@
"print(\"Summary table\", \"\\n\", test[\"summary\"], \"\\n\")\n", "print(\"Summary table\", \"\\n\", test[\"summary\"], \"\\n\")\n",
"print(\"Is the {} series stationary?: {}\".format(TARGET_COLNAME, test[\"stationary\"]))\n", "print(\"Is the {} series stationary?: {}\".format(TARGET_COLNAME, test[\"stationary\"]))\n",
"print(\"---------------\", \"\\n\")" "print(\"---------------\", \"\\n\")"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"In the previous cell, we ran a series of unit root tests. The summary table contains the following columns:\n", "In the previous cell, we ran a series of unit root tests. The summary table contains the following columns:\n",
"<ul> \n", "<ul> \n",
@@ -277,13 +276,11 @@
"Each of the tests shows that the original time series is non-stationary. The final decision is based on the majority rule. If, there is a split decision, the algorithm will claim it is stationary. We run a series of tests because each test by itself may not be accurate. In many cases when there are conflicting test results, the user needs to make determination if the series is stationary or not.\n", "Each of the tests shows that the original time series is non-stationary. The final decision is based on the majority rule. If, there is a split decision, the algorithm will claim it is stationary. We run a series of tests because each test by itself may not be accurate. In many cases when there are conflicting test results, the user needs to make determination if the series is stationary or not.\n",
"\n", "\n",
"Since we found the series to be non-stationary, we will difference it and then test if the differenced series is stationary." "Since we found the series to be non-stationary, we will difference it and then test if the differenced series is stationary."
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"# unit root tests\n", "# unit root tests\n",
"test = unit_root_test_wrapper(df[TARGET_COLNAME].diff().dropna())\n", "test = unit_root_test_wrapper(df[TARGET_COLNAME].diff().dropna())\n",
@@ -291,20 +288,20 @@
"print(\"Summary table\", \"\\n\", test[\"summary\"], \"\\n\")\n", "print(\"Summary table\", \"\\n\", test[\"summary\"], \"\\n\")\n",
"print(\"Is the {} series stationary?: {}\".format(TARGET_COLNAME, test[\"stationary\"]))\n", "print(\"Is the {} series stationary?: {}\".format(TARGET_COLNAME, test[\"stationary\"]))\n",
"print(\"---------------\", \"\\n\")" "print(\"---------------\", \"\\n\")"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"Four out of five tests show that the series in first differences is stationary. Notice that this decision is not unanimous. Next, let's plot the original series in first-differences to illustrate the difference between non-stationary (unit root) process vs the stationary one." "Four out of five tests show that the series in first differences is stationary. Notice that this decision is not unanimous. Next, let's plot the original series in first-differences to illustrate the difference between non-stationary (unit root) process vs the stationary one."
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"# plot original and stationary data\n", "# plot original and stationary data\n",
"fig = plt.figure(figsize=(10, 10))\n", "fig = plt.figure(figsize=(10, 10))\n",
@@ -314,29 +311,31 @@
"ax2.plot(df[TARGET_COLNAME].diff().dropna(), \"-b\")\n", "ax2.plot(df[TARGET_COLNAME].diff().dropna(), \"-b\")\n",
"ax1.title.set_text(\"Original data\")\n", "ax1.title.set_text(\"Original data\")\n",
"ax2.title.set_text(\"Data in first differences\")" "ax2.title.set_text(\"Data in first differences\")"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"If you were asked a question \"What is the mean of the series before and after 2008?\", for the series titled \"Original data\" the mean values will be significantly different. This implies that the first moment of the series (in this case, it is the mean) is time dependent, i.e., mean changes depending on the interval one is looking at. Thus, the series is deemed to be non-stationary. On the other hand, for the series titled \"Data in first differences\" the means for both periods are roughly the same. Hence, the first moment is time invariant; meaning it does not depend on the interval of time one is looking at. In this example it is easy to visually distinguish between stationary and non-stationary data. Often this distinction is not easy to make, therefore we rely on the statistical tests described above to help us make an informed decision. " "If you were asked a question \"What is the mean of the series before and after 2008?\", for the series titled \"Original data\" the mean values will be significantly different. This implies that the first moment of the series (in this case, it is the mean) is time dependent, i.e., mean changes depending on the interval one is looking at. Thus, the series is deemed to be non-stationary. On the other hand, for the series titled \"Data in first differences\" the means for both periods are roughly the same. Hence, the first moment is time invariant; meaning it does not depend on the interval of time one is looking at. In this example it is easy to visually distinguish between stationary and non-stationary data. Often this distinction is not easy to make, therefore we rely on the statistical tests described above to help us make an informed decision. "
] ],
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"<p style=\"font-size:150%; color:blue\"> Conclusion </p>\n", "<p style=\"font-size:150%; color:blue\"> Conclusion </p>\n",
"Since we found the original process to be non-stationary (contains unit root), we will have to model the data in first differences. As a result, we will set the DIFFERENCE_SERIES parameter to True." "Since we found the original process to be non-stationary (contains unit root), we will have to model the data in first differences. As a result, we will set the DIFFERENCE_SERIES parameter to True."
] ],
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"# 3 Check if there is a clear autoregressive pattern\n", "# 3 Check if there is a clear auto-regressive pattern\n",
"We need to determine if we should include lags of the target variable as features in order to improve forecast accuracy. To do this, we will examine the ACF and partial ACF (PACF) plots of the stationary series. In our case, it is a series in first diffrences.\n", "We need to determine if we should include lags of the target variable as features in order to improve forecast accuracy. To do this, we will examine the ACF and partial ACF (PACF) plots of the stationary series. In our case, it is a series in first differences.\n",
"\n", "\n",
"<ul>\n", "<ul>\n",
" <li> Question: What is an Auto-regressive pattern? What are we looking for? </li>\n", " <li> Question: What is an Auto-regressive pattern? What are we looking for? </li>\n",
@@ -347,11 +346,11 @@
" The lag order is on the x-axis while the auto- and partial-correlation coefficients are on the y-axis. Vertical lines that are outside the shaded area represent statistically significant lags. Notice, the ACF function decays to zero and the PACF shows 2 significant spikes (we ignore the first spike for lag 0 in both plots since the linear relationship of any series with itself is always 1). <li/>\n", " The lag order is on the x-axis while the auto- and partial-correlation coefficients are on the y-axis. Vertical lines that are outside the shaded area represent statistically significant lags. Notice, the ACF function decays to zero and the PACF shows 2 significant spikes (we ignore the first spike for lag 0 in both plots since the linear relationship of any series with itself is always 1). <li/>\n",
" </ul>\n", " </ul>\n",
"<ul/>" "<ul/>"
] ],
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"<ul>\n", "<ul>\n",
" <li> Question: What do I do if I observe an auto-regressive behavior? </li>\n", " <li> Question: What do I do if I observe an auto-regressive behavior? </li>\n",
@@ -365,32 +364,32 @@
" <br/>\n", " <br/>\n",
" <li> Next, let's examine the ACF and PACF plots of the stationary target variable (depicted below). Here, we do not see a decay in the ACF, instead we see a decay in PACF. It is hard to make an argument the the target variable exhibits auto-regressive behavior. </li>\n", " <li> Next, let's examine the ACF and PACF plots of the stationary target variable (depicted below). Here, we do not see a decay in the ACF, instead we see a decay in PACF. It is hard to make an argument the the target variable exhibits auto-regressive behavior. </li>\n",
" </ul>" " </ul>"
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"# Plot the ACF/PACF for the series in differences\n", "# Plot the ACF/PACF for the series in differences\n",
"fig, ax = plt.subplots(1, 2, figsize=(10, 5))\n", "fig, ax = plt.subplots(1, 2, figsize=(10, 5))\n",
"plot_acf(df[TARGET_COLNAME].diff().dropna().values.squeeze(), ax=ax[0])\n", "plot_acf(df[TARGET_COLNAME].diff().dropna().values.squeeze(), ax=ax[0])\n",
"plot_pacf(df[TARGET_COLNAME].diff().dropna().values.squeeze(), ax=ax[1])\n", "plot_pacf(df[TARGET_COLNAME].diff().dropna().values.squeeze(), ax=ax[1])\n",
"plt.show()" "plt.show()"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"<p style=\"font-size:150%; color:blue\"> Conclusion </p>\n", "<p style=\"font-size:150%; color:blue\"> Conclusion </p>\n",
"Since we do not see a clear indication of an AR(p) process, we will not be using target lags and will set the TARGET_LAGS parameter to None." "Since we do not see a clear indication of an AR(p) process, we will not be using target lags and will set the TARGET_LAGS parameter to None."
] ],
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"<p style=\"font-size:150%; color:blue; font-weight: bold\"> AutoML Experiment Settings </p>\n", "<p style=\"font-size:150%; color:blue; font-weight: bold\"> AutoML Experiment Settings </p>\n",
"Based on the analysis performed, we should try the following settings for the AutoML experiment and use them in the \"2_run_experiment\" notebook.\n", "Based on the analysis performed, we should try the following settings for the AutoML experiment and use them in the \"2_run_experiment\" notebook.\n",
@@ -399,11 +398,11 @@
" <li> DIFFERENCE_SERIES=True </li>\n", " <li> DIFFERENCE_SERIES=True </li>\n",
" <li> TARGET_LAGS=None </li>\n", " <li> TARGET_LAGS=None </li>\n",
"</ul>" "</ul>"
] ],
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"# Appendix: ACF, PACF and Lag Selection\n", "# Appendix: ACF, PACF and Lag Selection\n",
"To do this, we will examine the ACF and partial ACF (PACF) plots of the differenced series. \n", "To do this, we will examine the ACF and partial ACF (PACF) plots of the differenced series. \n",
@@ -418,23 +417,23 @@
" </li>\n", " </li>\n",
" where $\\sigma_{xzy}$ is the covariance between two random variables $X$ and $Z$; $\\sigma_x$ and $\\sigma_z$ is the variance for $X$ and $Z$, respectively. The correlation coefficient measures the strength of linear relationship between two random variables. This metric can take any value from -1 to 1. <li/>\n", " where $\\sigma_{xzy}$ is the covariance between two random variables $X$ and $Z$; $\\sigma_x$ and $\\sigma_z$ is the variance for $X$ and $Z$, respectively. The correlation coefficient measures the strength of linear relationship between two random variables. This metric can take any value from -1 to 1. <li/>\n",
" <br/>\n", " <br/>\n",
" <li> The auto-correlation coefficient $\\rho_{Y_{t} Y_{t-k}}$ is the time series equivalent of the correlation coefficient, except instead of measuring linear association between two random variables $X$ and $Z$, it measures the strength of a linear relationship between a random variable $Y_t$ and its lag $Y_{t-k}$ for any positive interger value of $k$. </li> \n", " <li> The auto-correlation coefficient $\\rho_{Y_{t} Y_{t-k}}$ is the time series equivalent of the correlation coefficient, except instead of measuring linear association between two random variables $X$ and $Z$, it measures the strength of a linear relationship between a random variable $Y_t$ and its lag $Y_{t-k}$ for any positive integer value of $k$. </li> \n",
" <br />\n", " <br />\n",
" <li> To visualize the ACF for a particular lag, say lag 2, plot the second lag of a series $y_{t-2}$ on the x-axis, and plot the series itself $y_t$ on the y-axis. The autocorrelation coefficient is the slope of the best fitted regression line and can be interpreted as follows. A one unit increase in the lag of a variable one period ago leads to a $\\rho_{Y_{t} Y_{t-2}}$ units change in the variable in the current period. This interpreation can be applied to any lag. </li> \n", " <li> To visualize the ACF for a particular lag, say lag 2, plot the second lag of a series $y_{t-2}$ on the x-axis, and plot the series itself $y_t$ on the y-axis. The autocorrelation coefficient is the slope of the best fitted regression line and can be interpreted as follows. A one unit increase in the lag of a variable one period ago leads to a $\\rho_{Y_{t} Y_{t-2}}$ units change in the variable in the current period. This interpretation can be applied to any lag. </li> \n",
" <br />\n", " <br />\n",
" <li> In the interpretation posted above we need to be careful not to confuse the word \"leads\" with \"causes\" since these are not the same thing. We do not know the lagged value of the varaible causes it to change. Afterall, there are probably many other features that may explain the movement in $Y_t$. All we are trying to do in this section is to identify situations when the variable contains the strong auto-regressive components that needs to be included in the model to improve forecast accuracy. </li>\n", " <li> In the interpretation posted above we need to be careful not to confuse the word \"leads\" with \"causes\" since these are not the same thing. We do not know the lagged value of the variable causes it to change. After all, there are probably many other features that may explain the movement in $Y_t$. All we are trying to do in this section is to identify situations when the variable contains the strong auto-regressive components that needs to be included in the model to improve forecast accuracy. </li>\n",
" </ul>\n", " </ul>\n",
"</ul>" "</ul>"
] ],
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"<ul>\n", "<ul>\n",
" <li> Question: What is the PACF? </li>\n", " <li> Question: What is the PACF? </li>\n",
" <ul style=\"list-style-type:none;\">\n", " <ul style=\"list-style-type:none;\">\n",
" <li> When describing the ACF we essentially running a regression between a partigular lag of a series, say, lag 4, and the series itself. What this implies is the regression coefficient for lag 4 captures the impact of everything that happens in lags 1, 2 and 3. In other words, if lag 1 is the most important lag and we exclude it from the regression, naturally, the regression model will assign the importance of the 1st lag to the 4th one. Partial auto-correlation function fixes this problem since it measures the contribution of each lag accounting for the information added by the intermediary lags. If we were to illustrate ACF and PACF for the fourth lag using the regression analogy, the difference is a follows: \n", " <li> When describing the ACF we essentially running a regression between a particular lag of a series, say, lag 4, and the series itself. What this implies is the regression coefficient for lag 4 captures the impact of everything that happens in lags 1, 2 and 3. In other words, if lag 1 is the most important lag and we exclude it from the regression, naturally, the regression model will assign the importance of the 1st lag to the 4th one. Partial auto-correlation function fixes this problem since it measures the contribution of each lag accounting for the information added by the intermediary lags. If we were to illustrate ACF and PACF for the fourth lag using the regression analogy, the difference is a follows: \n",
" \\begin{align}\n", " \\begin{align}\n",
" Y_{t} &= a_{0} + a_{4} Y_{t-4} + e_{t} \\\\\n", " Y_{t} &= a_{0} + a_{4} Y_{t-4} + e_{t} \\\\\n",
" Y_{t} &= b_{0} + b_{1} Y_{t-1} + b_{2} Y_{t-2} + b_{3} Y_{t-3} + b_{4} Y_{t-4} + \\varepsilon_{t} \\\\\n", " Y_{t} &= b_{0} + b_{1} Y_{t-1} + b_{2} Y_{t-2} + b_{3} Y_{t-3} + b_{4} Y_{t-4} + \\varepsilon_{t} \\\\\n",
@@ -442,27 +441,28 @@
" </li>\n", " </li>\n",
" <br/>\n", " <br/>\n",
" <li>\n", " <li>\n",
" Here, you can think of $a_4$ and $b_{4}$ as the auto- and partial auto-correlation coefficients for lag 4. Notice, in the second equation we explicitely accounting for the intermediate lags by adding them as regrerssors.\n", " Here, you can think of $a_4$ and $b_{4}$ as the auto- and partial auto-correlation coefficients for lag 4. Notice, in the second equation we explicitly accounting for the intermediate lags by adding them as regressors.\n",
" </li>\n", " </li>\n",
" </ul>\n", " </ul>\n",
"</ul>" "</ul>"
] ],
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"<ul>\n", "<ul>\n",
" <li> Question: Auto-regressive pattern? What are we looking for? </li>\n", " <li> Question: Auto-regressive pattern? What are we looking for? </li>\n",
" <ul style=\"list-style-type:none;\">\n", " <ul style=\"list-style-type:none;\">\n",
" <li> We are looking for a classical profiles for an AR(p) process such as an exponential decay of an ACF and a the first $p$ significant lags of the PACF. Let's examine the ACF/PACF profiles of the same simulated AR(2) shown in Section 3, and check if the ACF/PACF explanation are refelcted in these plots. <li/>\n", " <li> We are looking for a classical profiles for an AR(p) process such as an exponential decay of an ACF and a the first $p$ significant lags of the PACF. Let's examine the ACF/PACF profiles of the same simulated AR(2) shown in Section 3, and check if the ACF/PACF explanation are reflected in these plots. <li/>\n",
" <li><img src=\"figures/ACF_PACF_for_AR2.png\" class=\"img_class\">\n", " <li><img src=\"figures/ACF_PACF_for_AR2.png\" class=\"img_class\">\n",
" <li> The autocorrelation coefficient for the 3rd lag is 0.6, which can be interpreted that a one unit increase in the value of the target varaible three periods ago leads to 0.6 units increase in the current period. However, the PACF plot shows that the partial autocorrealtion coefficient is zero (from a statistical point of view since it lies within the shaded region). This is happening because the 1st and 2nd lags are good predictors of the target variable. Ommiting these two lags from the regression results in the misleading conclusion that the third lag is a good prediciton. <li/>\n", " <li> The autocorrelation coefficient for the 3rd lag is 0.6, which can be interpreted that a one unit increase in the value of the target variable three periods ago leads to 0.6 units increase in the current period. However, the PACF plot shows that the partial autocorrelation coefficient is zero (from a statistical point of view since it lies within the shaded region). This is happening because the 1st and 2nd lags are good predictors of the target variable. Omitting these two lags from the regression results in the misleading conclusion that the third lag is a good prediction. <li/>\n",
" <br/>\n", " <br/>\n",
" <li> This is why it is important to examine both the ACF and the PACF plots when tring to determine the auto regressive order for the variable in question. <li/>\n", " <li> This is why it is important to examine both the ACF and the PACF plots when trying to determine the auto regressive order for the variable in question. <li/>\n",
" </ul>\n", " </ul>\n",
"</ul> " "</ul> "
] ],
"metadata": {}
} }
], ],
"metadata": { "metadata": {
@@ -472,21 +472,32 @@
} }
], ],
"kernelspec": { "kernelspec": {
"display_name": "Python 3.8 - AzureML", "name": "python38-azureml",
"language": "python", "language": "python",
"name": "python38-azureml" "display_name": "Python 3.8 - AzureML"
}, },
"language_info": { "language_info": {
"name": "python",
"version": "3.8.10",
"mimetype": "text/x-python",
"codemirror_mode": { "codemirror_mode": {
"name": "ipython", "name": "ipython",
"version": 3 "version": 3
}, },
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.6.9" "nbconvert_exporter": "python",
"file_extension": ".py"
},
"microsoft": {
"ms_spell_check": {
"ms_spell_check_language": "en"
}
},
"kernel_info": {
"name": "python38-azureml"
},
"nteract": {
"version": "nteract-front-end@1.0.0"
} }
}, },
"nbformat": 4, "nbformat": 4,

View File

@@ -2,23 +2,22 @@
"cells": [ "cells": [
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n", "Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n", "\n",
"Licensed under the MIT License." "Licensed under the MIT License."
] ],
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/forecasting-recipes-univariate/2_run_experiment.png)" "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/forecasting-recipes-univariate/2_run_experiment.png)"
] ],
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"# Running AutoML experiments\n", "# Running AutoML experiments\n",
"\n", "\n",
@@ -27,20 +26,18 @@
"<br/>\n", "<br/>\n",
"\n", "\n",
"The output generated by this notebook is saved in the `experiment_output`folder." "The output generated by this notebook is saved in the `experiment_output`folder."
] ],
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"### Setup" "### Setup"
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"import os\n", "import os\n",
"import logging\n", "import logging\n",
@@ -63,21 +60,21 @@
"np.set_printoptions(precision=4, suppress=True, linewidth=100)\n", "np.set_printoptions(precision=4, suppress=True, linewidth=100)\n",
"pd.set_option(\"display.max_columns\", 500)\n", "pd.set_option(\"display.max_columns\", 500)\n",
"pd.set_option(\"display.width\", 1000)" "pd.set_option(\"display.width\", 1000)"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"As part of the setup you have already created a **Workspace**. You will also need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n", "As part of the setup you have already created a **Workspace**. You will also need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist." "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"ws = Workspace.from_config()\n", "ws = Workspace.from_config()\n",
"amlcompute_cluster_name = \"recipe-cluster\"\n", "amlcompute_cluster_name = \"recipe-cluster\"\n",
@@ -107,22 +104,22 @@
"compute_target.wait_for_completion(\n", "compute_target.wait_for_completion(\n",
" show_output=True, min_node_count=None, timeout_in_minutes=20\n", " show_output=True, min_node_count=None, timeout_in_minutes=20\n",
")" ")"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"### Data\n", "### Data\n",
"\n", "\n",
"Here, we will load the data from the csv file and drop the Covid period." "Here, we will load the data from the csv file and drop the Covid period."
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"main_data_loc = \"data\"\n", "main_data_loc = \"data\"\n",
"train_file_name = \"S4248SM144SCEN.csv\"\n", "train_file_name = \"S4248SM144SCEN.csv\"\n",
@@ -140,32 +137,34 @@
"\n", "\n",
"# remove the Covid period\n", "# remove the Covid period\n",
"df = df.query('{} <= \"{}\"'.format(TIME_COLNAME, COVID_PERIOD_START))" "df = df.query('{} <= \"{}\"'.format(TIME_COLNAME, COVID_PERIOD_START))"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"### Set parameters\n", "### Set parameters\n",
"\n", "\n",
"The first set of parameters is based on the analysis performed in the `auto-ml-forecasting-univariate-recipe-experiment-settings` notebook. " "The first set of parameters is based on the analysis performed in the `auto-ml-forecasting-univariate-recipe-experiment-settings` notebook. "
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"# set parameters based on the settings notebook analysis\n", "# set parameters based on the settings notebook analysis\n",
"DIFFERENCE_SERIES = True\n", "DIFFERENCE_SERIES = True\n",
"TARGET_LAGS = None\n", "TARGET_LAGS = None\n",
"STL_TYPE = None" "STL_TYPE = None"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"Next, define additional parameters to be used in the <a href=\"https://docs.microsoft.com/en-us/python/api/azureml-train-automl-client/azureml.train.automl.automlconfig?view=azure-ml-py\"> AutoML config </a> class.\n", "Next, define additional parameters to be used in the <a href=\"https://docs.microsoft.com/en-us/python/api/azureml-train-automl-client/azureml.train.automl.automlconfig?view=azure-ml-py\"> AutoML config </a> class.\n",
"\n", "\n",
@@ -180,32 +179,30 @@
" </ul>\n", " </ul>\n",
" </li>\n", " </li>\n",
"</ul>\n" "</ul>\n"
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"# set other parameters\n", "# set other parameters\n",
"FORECAST_HORIZON = 12\n", "FORECAST_HORIZON = 12\n",
"TIME_SERIES_ID_COLNAMES = []\n", "TIME_SERIES_ID_COLNAMES = []\n",
"BLOCKED_MODELS = []" "BLOCKED_MODELS = []"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"To run AutoML, you also need to create an **Experiment**. An Experiment corresponds to a prediction problem you are trying to solve, while a Run corresponds to a specific approach to the problem." "To run AutoML, you also need to create an **Experiment**. An Experiment corresponds to a prediction problem you are trying to solve, while a Run corresponds to a specific approach to the problem."
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"# choose a name for the run history container in the workspace\n", "# choose a name for the run history container in the workspace\n",
"if isinstance(TARGET_LAGS, list):\n", "if isinstance(TARGET_LAGS, list):\n",
@@ -232,38 +229,38 @@
"pd.set_option(\"display.max_colwidth\", None)\n", "pd.set_option(\"display.max_colwidth\", None)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n", "outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"print(outputDf.T)" "print(outputDf.T)"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"# create output directory\n", "# create output directory\n",
"output_dir = \"experiment_output/{}\".format(experiment_desc)\n", "output_dir = \"experiment_output/{}\".format(experiment_desc)\n",
"if not os.path.exists(output_dir):\n", "if not os.path.exists(output_dir):\n",
" os.makedirs(output_dir)" " os.makedirs(output_dir)"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"# difference data and test for unit root\n", "# difference data and test for unit root\n",
"if DIFFERENCE_SERIES:\n", "if DIFFERENCE_SERIES:\n",
" df_delta = df.copy()\n", " df_delta = df.copy()\n",
" df_delta[TARGET_COLNAME] = df[TARGET_COLNAME].diff()\n", " df_delta[TARGET_COLNAME] = df[TARGET_COLNAME].diff()\n",
" df_delta.dropna(axis=0, inplace=True)" " df_delta.dropna(axis=0, inplace=True)"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"# split the data into train and test set\n", "# split the data into train and test set\n",
"if DIFFERENCE_SERIES:\n", "if DIFFERENCE_SERIES:\n",
@@ -281,64 +278,51 @@
" time_colname=TIME_COLNAME,\n", " time_colname=TIME_COLNAME,\n",
" ts_id_colnames=TIME_SERIES_ID_COLNAMES,\n", " ts_id_colnames=TIME_SERIES_ID_COLNAMES,\n",
" )" " )"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"### Upload files to the Datastore\n", "### Upload files to the Datastore\n",
"The [Machine Learning service workspace](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-workspace) is paired with the storage account, which contains the default data store. We will use it to upload the bike share data and create [tabular dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py) for training. A tabular dataset defines a series of lazily-evaluated, immutable operations to load data from the data source into tabular representation." "The [Machine Learning service workspace](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-workspace) is paired with the storage account, which contains the default data store. We will use it to upload the bike share data and create [tabular dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py) for training. A tabular dataset defines a series of lazily-evaluated, immutable operations to load data from the data source into tabular representation."
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"df_train.to_csv(\"train.csv\", index=False)\n", "df_train.to_csv(\"train.csv\", index=False)\n",
"df_test.to_csv(\"test.csv\", index=False)\n", "df_test.to_csv(\"test.csv\", index=False)\n",
"\n", "\n",
"from azureml.data.dataset_factory import TabularDatasetFactory\n",
"\n",
"datastore = ws.get_default_datastore()\n", "datastore = ws.get_default_datastore()\n",
"datastore.upload_files(\n", "train_dataset = TabularDatasetFactory.register_pandas_dataframe(\n",
" files=[\"./train.csv\"],\n", " df_train, target=(datastore, \"dataset/\"), name=\"train\"\n",
" target_path=\"uni-recipe-dataset/tabular/\",\n",
" overwrite=True,\n",
" show_progress=True,\n",
")\n", ")\n",
"datastore.upload_files(\n", "test_dataset = TabularDatasetFactory.register_pandas_dataframe(\n",
" files=[\"./test.csv\"],\n", " df_test, target=(datastore, \"dataset/\"), name=\"test\"\n",
" target_path=\"uni-recipe-dataset/tabular/\",\n",
" overwrite=True,\n",
" show_progress=True,\n",
")\n",
"\n",
"from azureml.core import Dataset\n",
"\n",
"train_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"uni-recipe-dataset/tabular/train.csv\")]\n",
")\n",
"test_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"uni-recipe-dataset/tabular/test.csv\")]\n",
")\n", ")\n",
"\n", "\n",
"# print the first 5 rows of the Dataset\n", "# print the first 5 rows of the Dataset\n",
"train_dataset.to_pandas_dataframe().reset_index(drop=True).head(5)" "train_dataset.to_pandas_dataframe().reset_index(drop=True).head(5)"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"### Config AutoML" "### Config AutoML"
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"time_series_settings = {\n", "time_series_settings = {\n",
" \"time_column_name\": TIME_COLNAME,\n", " \"time_column_name\": TIME_COLNAME,\n",
@@ -365,76 +349,76 @@
" compute_target=compute_target,\n", " compute_target=compute_target,\n",
" **time_series_settings,\n", " **time_series_settings,\n",
")" ")"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"We will now run the experiment, you can go to Azure ML portal to view the run details." "We will now run the experiment, you can go to Azure ML portal to view the run details."
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"remote_run = experiment.submit(automl_config, show_output=False)\n", "remote_run = experiment.submit(automl_config, show_output=False)\n",
"remote_run.wait_for_completion()" "remote_run.wait_for_completion()"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"### Retrieve the Best Run details\n", "### Retrieve the Best Run details\n",
"Below we retrieve the best Run object from among all the runs in the experiment." "Below we retrieve the best Run object from among all the runs in the experiment."
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"best_run = remote_run.get_best_child()\n", "best_run = remote_run.get_best_child()\n",
"best_run" "best_run"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"### Inference\n", "### Inference\n",
"\n", "\n",
"We now use the best fitted model from the AutoML Run to make forecasts for the test set. We will do batch scoring on the test dataset which should have the same schema as training dataset.\n", "We now use the best fitted model from the AutoML Run to make forecasts for the test set. We will do batch scoring on the test dataset which should have the same schema as training dataset.\n",
"\n", "\n",
"The inference will run on a remote compute. In this example, it will re-use the training compute." "The inference will run on a remote compute. In this example, it will re-use the training compute."
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"test_experiment = Experiment(ws, experiment_name + \"_inference\")" "test_experiment = Experiment(ws, experiment_name + \"_inference\")"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"## Retreiving forecasts from the model\n", "## Retreiving forecasts from the model\n",
"We have created a function called `run_forecast` that submits the test data to the best model determined during the training run and retrieves forecasts. This function uses a helper script `forecasting_script` which is uploaded and expecuted on the remote compute." "We have created a function called `run_forecast` that submits the test data to the best model determined during the training run and retrieves forecasts. This function uses a helper script `forecasting_script` which is uploaded and expecuted on the remote compute."
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"from run_forecast import run_remote_inference\n", "from run_forecast import run_remote_inference\n",
"\n", "\n",
@@ -448,31 +432,31 @@
"remote_run.wait_for_completion(show_output=False)\n", "remote_run.wait_for_completion(show_output=False)\n",
"\n", "\n",
"remote_run.download_file(\"outputs/predictions.csv\", f\"{output_dir}/predictions.csv\")" "remote_run.download_file(\"outputs/predictions.csv\", f\"{output_dir}/predictions.csv\")"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"### Download the prediction result for metrics calcuation\n", "### Download the prediction result for metrics calcuation\n",
"The test data with predictions are saved in artifact `outputs/predictions.csv`. We will use it to calculate accuracy metrics and vizualize predictions versus actuals." "The test data with predictions are saved in artifact `outputs/predictions.csv`. We will use it to calculate accuracy metrics and vizualize predictions versus actuals."
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"X_trans = pd.read_csv(f\"{output_dir}/predictions.csv\", parse_dates=[TIME_COLNAME])\n", "X_trans = pd.read_csv(f\"{output_dir}/predictions.csv\", parse_dates=[TIME_COLNAME])\n",
"X_trans.head()" "X_trans.head()"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"# convert forecast in differences to levels\n", "# convert forecast in differences to levels\n",
"def convert_fcst_diff_to_levels(fcst, yt, df_orig):\n", "def convert_fcst_diff_to_levels(fcst, yt, df_orig):\n",
@@ -486,13 +470,13 @@
" )\n", " )\n",
" out.rename(columns={TARGET_COLNAME: \"actual_level\"}, inplace=True)\n", " out.rename(columns={TARGET_COLNAME: \"actual_level\"}, inplace=True)\n",
" return out" " return out"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"if DIFFERENCE_SERIES:\n", "if DIFFERENCE_SERIES:\n",
" # convert forecast in differences to the levels\n", " # convert forecast in differences to the levels\n",
@@ -506,20 +490,20 @@
" fcst_df[\"predicted_level\"] = y_predictions\n", " fcst_df[\"predicted_level\"] = y_predictions\n",
"\n", "\n",
"del X_trans" "del X_trans"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"### Calculate metrics and save output" "### Calculate metrics and save output"
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"# compute metrics\n", "# compute metrics\n",
"metrics_df = compute_metrics(fcst_df=fcst_df, metric_name=None, ts_id_colnames=None)\n", "metrics_df = compute_metrics(fcst_df=fcst_df, metric_name=None, ts_id_colnames=None)\n",
@@ -530,20 +514,20 @@
"\n", "\n",
"metrics_df.to_csv(os.path.join(output_dir, metrics_file_name), index=True)\n", "metrics_df.to_csv(os.path.join(output_dir, metrics_file_name), index=True)\n",
"fcst_df.to_csv(os.path.join(output_dir, fcst_file_name), index=True)" "fcst_df.to_csv(os.path.join(output_dir, fcst_file_name), index=True)"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"### Generate and save visuals" "### Generate and save visuals"
] ],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"plot_df = df.query('{} > \"2010-01-01\"'.format(TIME_COLNAME))\n", "plot_df = df.query('{} > \"2010-01-01\"'.format(TIME_COLNAME))\n",
"plot_df.set_index(TIME_COLNAME, inplace=True)\n", "plot_df.set_index(TIME_COLNAME, inplace=True)\n",
@@ -562,7 +546,10 @@
"\n", "\n",
"plt.setp(labels, rotation=45)\n", "plt.setp(labels, rotation=45)\n",
"plt.savefig(os.path.join(output_dir, plot_file_name))" "plt.savefig(os.path.join(output_dir, plot_file_name))"
] ],
"outputs": [],
"execution_count": null,
"metadata": {}
} }
], ],
"metadata": { "metadata": {
@@ -572,26 +559,37 @@
} }
], ],
"kernelspec": { "kernelspec": {
"display_name": "Python 3.8 - AzureML", "name": "python38-azureml",
"language": "python", "language": "python",
"name": "python38-azureml" "display_name": "Python 3.8 - AzureML"
}, },
"language_info": { "language_info": {
"name": "python",
"version": "3.8.5",
"mimetype": "text/x-python",
"codemirror_mode": { "codemirror_mode": {
"name": "ipython", "name": "ipython",
"version": 3 "version": 3
}, },
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.8.5" "nbconvert_exporter": "python",
"file_extension": ".py"
}, },
"vscode": { "vscode": {
"interpreter": { "interpreter": {
"hash": "6bd77c88278e012ef31757c15997a7bea8c943977c43d6909403c00ae11d43ca" "hash": "6bd77c88278e012ef31757c15997a7bea8c943977c43d6909403c00ae11d43ca"
} }
},
"microsoft": {
"ms_spell_check": {
"ms_spell_check_language": "en"
}
},
"kernel_info": {
"name": "python3"
},
"nteract": {
"version": "nteract-front-end@1.0.0"
} }
}, },
"nbformat": 4, "nbformat": 4,

View File

@@ -859,8 +859,8 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"%matplotlib inline\n", "%matplotlib inline\n",
"test_pred = plt.scatter(y_test, y_pred_test, color=\"\")\n", "test_pred = plt.scatter(y_test, y_pred_test, c=[\"b\"])\n",
"test_test = plt.scatter(y_test, y_test, color=\"g\")\n", "test_test = plt.scatter(y_test, y_test, c=[\"g\"])\n",
"plt.legend(\n", "plt.legend(\n",
" (test_pred, test_test), (\"prediction\", \"truth\"), loc=\"upper left\", fontsize=8\n", " (test_pred, test_test), (\"prediction\", \"truth\"), loc=\"upper left\", fontsize=8\n",
")\n", ")\n",

View File

@@ -422,8 +422,8 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"%matplotlib inline\n", "%matplotlib inline\n",
"test_pred = plt.scatter(y_test, y_pred_test, color=\"\")\n", "test_pred = plt.scatter(y_test, y_pred_test, c=[\"b\"])\n",
"test_test = plt.scatter(y_test, y_test, color=\"g\")\n", "test_test = plt.scatter(y_test, y_test, c=[\"g\"])\n",
"plt.legend(\n", "plt.legend(\n",
" (test_pred, test_test), (\"prediction\", \"truth\"), loc=\"upper left\", fontsize=8\n", " (test_pred, test_test), (\"prediction\", \"truth\"), loc=\"upper left\", fontsize=8\n",
")\n", ")\n",

View File

@@ -239,7 +239,7 @@
"\n", "\n",
"env = Environment(\"deploytocloudenv\")\n", "env = Environment(\"deploytocloudenv\")\n",
"env.python.conda_dependencies.add_pip_package(\"joblib\")\n", "env.python.conda_dependencies.add_pip_package(\"joblib\")\n",
"env.python.conda_dependencies.add_pip_package(\"numpy\")\n", "env.python.conda_dependencies.add_pip_package(\"numpy==1.23\")\n",
"env.python.conda_dependencies.add_pip_package(\"scikit-learn=={}\".format(sklearn.__version__))" "env.python.conda_dependencies.add_pip_package(\"scikit-learn=={}\".format(sklearn.__version__))"
] ]
}, },

View File

@@ -91,7 +91,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"import joblib\n", "import dill\n",
"\n", "\n",
"from sklearn.datasets import load_diabetes\n", "from sklearn.datasets import load_diabetes\n",
"from sklearn.linear_model import Ridge\n", "from sklearn.linear_model import Ridge\n",
@@ -101,7 +101,7 @@
"\n", "\n",
"model = Ridge().fit(dataset_x, dataset_y)\n", "model = Ridge().fit(dataset_x, dataset_y)\n",
"\n", "\n",
"joblib.dump(model, 'sklearn_regression_model.pkl')" "dill.dump(model, open('sklearn_regression_model.pkl', 'wb'))"
] ]
}, },
{ {
@@ -285,7 +285,8 @@
" 'azureml-defaults',\n", " 'azureml-defaults',\n",
" 'inference-schema[numpy-support]',\n", " 'inference-schema[numpy-support]',\n",
" 'joblib',\n", " 'joblib',\n",
" 'numpy',\n", " 'dill==0.3.6',\n",
" 'numpy==1.23',\n",
" 'scikit-learn=={}'.format(sklearn.__version__)\n", " 'scikit-learn=={}'.format(sklearn.__version__)\n",
"])" "])"
] ]
@@ -486,7 +487,8 @@
" 'azureml-defaults',\n", " 'azureml-defaults',\n",
" 'inference-schema[numpy-support]',\n", " 'inference-schema[numpy-support]',\n",
" 'joblib',\n", " 'joblib',\n",
" 'numpy',\n", " 'dill==0.3.6',\n",
" 'numpy==1.23',\n",
" 'scikit-learn=={}'.format(sklearn.__version__)\n", " 'scikit-learn=={}'.format(sklearn.__version__)\n",
"])\n", "])\n",
"inference_config = InferenceConfig(entry_script='score.py', environment=environment)\n", "inference_config = InferenceConfig(entry_script='score.py', environment=environment)\n",

View File

@@ -1,373 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Deploy models to Azure Kubernetes Service (AKS) using controlled roll out\n",
"This notebook will show you how to deploy mulitple AKS webservices with the same scoring endpoint and how to roll out your models in a controlled manner by configuring % of scoring traffic going to each webservice. If you are using a Notebook VM, you are all set. Otherwise, go through the [configuration notebook](../../../configuration.ipynb) to install the Azure Machine Learning Python SDK and create an Azure ML Workspace."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Check for latest version\n",
"import azureml.core\n",
"print(azureml.core.VERSION)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Initialize workspace\n",
"Create a [Workspace](https://docs.microsoft.com/python/api/azureml-core/azureml.core.workspace%28class%29?view=azure-ml-py) object from your persisted configuration."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.workspace import Workspace\n",
"\n",
"ws = Workspace.from_config()\n",
"print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\\n')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Register the model\n",
"Register a file or folder as a model by calling [Model.register()](https://docs.microsoft.com/python/api/azureml-core/azureml.core.model.model?view=azure-ml-py#register-workspace--model-path--model-name--tags-none--properties-none--description-none--datasets-none--model-framework-none--model-framework-version-none--child-paths-none-).\n",
"In addition to the content of the model file itself, your registered model will also store model metadata -- model description, tags, and framework information -- that will be useful when managing and deploying models in your workspace. Using tags, for instance, you can categorize your models and apply filters when listing models in your workspace."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Model\n",
"\n",
"model = Model.register(workspace=ws,\n",
" model_name='sklearn_regression_model.pkl', # Name of the registered model in your workspace.\n",
" model_path='./sklearn_regression_model.pkl', # Local file to upload and register as a model.\n",
" model_framework=Model.Framework.SCIKITLEARN, # Framework used to create the model.\n",
" model_framework_version='0.19.1', # Version of scikit-learn used to create the model.\n",
" description='Ridge regression model to predict diabetes progression.',\n",
" tags={'area': 'diabetes', 'type': 'regression'})\n",
"\n",
"print('Name:', model.name)\n",
"print('Version:', model.version)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Register an environment (for all models)\n",
"\n",
"If you control over how your model is run, or if it has special runtime requirements, you can specify your own environment and scoring method.\n",
"\n",
"Specify the model's runtime environment by creating an [Environment](https://docs.microsoft.com/python/api/azureml-core/azureml.core.environment%28class%29?view=azure-ml-py) object and providing the [CondaDependencies](https://docs.microsoft.com/python/api/azureml-core/azureml.core.conda_dependencies.condadependencies?view=azure-ml-py) needed by your model."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Environment\n",
"from azureml.core.conda_dependencies import CondaDependencies\n",
"\n",
"environment=Environment('my-sklearn-environment')\n",
"environment.python.conda_dependencies = CondaDependencies.create(conda_packages=[\n",
" 'pip==20.2.4'],\n",
" pip_packages=[\n",
" 'azureml-defaults',\n",
" 'inference-schema[numpy-support]',\n",
" 'numpy',\n",
" 'scikit-learn==0.22.1',\n",
" 'scipy'\n",
"])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"When using a custom environment, you must also provide Python code for initializing and running your model. An example script is included with this notebook."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"with open('score.py') as f:\n",
" print(f.read())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Create the InferenceConfig\n",
"Create the inference configuration to reference your environment and entry script during deployment"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.model import InferenceConfig\n",
"\n",
"inference_config = InferenceConfig(entry_script='score.py', \n",
" source_directory='.',\n",
" environment=environment)\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Provision the AKS Cluster\n",
"If you already have an AKS cluster attached to this workspace, skip the step below and provide the name of the cluster.\n",
"\n",
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.compute import AksCompute\n",
"from azureml.core.compute import ComputeTarget\n",
"# Use the default configuration (can also provide parameters to customize)\n",
"prov_config = AksCompute.provisioning_configuration()\n",
"\n",
"aks_name = 'my-aks' \n",
"# Create the cluster\n",
"aks_target = ComputeTarget.create(workspace = ws, \n",
" name = aks_name, \n",
" provisioning_configuration = prov_config) \n",
"aks_target.wait_for_completion(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Create an Endpoint and add a version (AKS service)\n",
"This creates a new endpoint and adds a version behind it. By default the first version added is the default version. You can specify the traffic percentile a version takes behind an endpoint. \n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# deploying the model and create a new endpoint\n",
"from azureml.core.webservice import AksEndpoint\n",
"# from azureml.core.compute import ComputeTarget\n",
"\n",
"#select a created compute\n",
"compute = ComputeTarget(ws, 'my-aks')\n",
"namespace_name=\"endpointnamespace\"\n",
"# define the endpoint name\n",
"endpoint_name = \"myendpoint1\"\n",
"# define the service name\n",
"version_name= \"versiona\"\n",
"\n",
"endpoint_deployment_config = AksEndpoint.deploy_configuration(tags = {'modelVersion':'firstversion', 'department':'finance'}, \n",
" description = \"my first version\", namespace = namespace_name, \n",
" version_name = version_name, traffic_percentile = 40)\n",
"\n",
"endpoint = Model.deploy(ws, endpoint_name, [model], inference_config, endpoint_deployment_config, compute)\n",
"endpoint.wait_for_deployment(True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"endpoint.get_logs()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Add another version of the service to an existing endpoint\n",
"This adds another version behind an existing endpoint. You can specify the traffic percentile the new version takes. If no traffic_percentile is specified then it defaults to 0. All the unspecified traffic percentile (in this example 50) across all versions goes to default version."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Adding a new version to an existing Endpoint.\n",
"version_name_add=\"versionb\" \n",
"\n",
"endpoint.create_version(version_name = version_name_add, inference_config=inference_config, models=[model], tags = {'modelVersion':'secondversion', 'department':'finance'}, \n",
" description = \"my second version\", traffic_percentile = 10)\n",
"endpoint.wait_for_deployment(True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Update an existing version in an endpoint\n",
"There are two types of versions: control and treatment. An endpoint contains one or more treatment versions but only one control version. This categorization helps compare the different versions against the defined control version."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"endpoint.update_version(version_name=endpoint.versions[version_name_add].name, description=\"my second version update\", traffic_percentile=40, is_default=True, is_control_version_type=True)\n",
"endpoint.wait_for_deployment(True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Test the web service using run method\n",
"Test the web sevice by passing in data. Run() method retrieves API keys behind the scenes to make sure that call is authenticated."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Scoring on endpoint\n",
"import json\n",
"test_sample = json.dumps({'data': [\n",
" [1,2,3,4,5,6,7,8,9,10], \n",
" [10,9,8,7,6,5,4,3,2,1]\n",
"]})\n",
"\n",
"test_sample_encoded = bytes(test_sample, encoding='utf8')\n",
"prediction = endpoint.run(input_data=test_sample_encoded)\n",
"print(prediction)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Delete Resources"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# deleting a version in an endpoint\n",
"endpoint.delete_version(version_name=version_name)\n",
"endpoint.wait_for_deployment(True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# deleting an endpoint, this will delete all versions in the endpoint and the endpoint itself\n",
"endpoint.delete()"
]
}
],
"metadata": {
"authors": [
{
"name": "shipatel"
}
],
"category": "deployment",
"compute": [
"None"
],
"datasets": [
"Diabetes"
],
"deployment": [
"Azure Kubernetes Service"
],
"exclude_from_index": false,
"framework": [
"Scikit-learn"
],
"friendly_name": "Deploy models to AKS using controlled roll out",
"index_order": 3,
"kernelspec": {
"display_name": "Python 3.8 - AzureML",
"language": "python",
"name": "python38-azureml"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.0"
},
"star_tag": [
"featured"
],
"tags": [
"None"
],
"task": "Deploy a model with Azure Machine Learning"
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -1,4 +0,0 @@
name: deploy-aks-with-controlled-rollout
dependencies:
- pip:
- azureml-sdk

View File

@@ -1,28 +0,0 @@
import pickle
import json
import numpy
from sklearn.externals import joblib
from sklearn.linear_model import Ridge
from azureml.core.model import Model
def init():
global model
# note here "sklearn_regression_model.pkl" is the name of the model registered under
# this is a different behavior than before when the code is run locally, even though the code is the same.
model_path = Model.get_model_path('sklearn_regression_model.pkl')
# deserialize the model file back into a sklearn model
model = joblib.load(model_path)
# note you can pass in multiple rows for scoring
def run(raw_data):
try:
data = json.loads(raw_data)['data']
data = numpy.array(data)
result = model.predict(data)
# you can return any data type as long as it is JSON-serializable
return result.tolist()
except Exception as e:
error = str(e)
return error

View File

@@ -240,9 +240,9 @@
"# Please see [Azure ML Containers repository](https://github.com/Azure/AzureML-Containers#featured-tags)\n", "# Please see [Azure ML Containers repository](https://github.com/Azure/AzureML-Containers#featured-tags)\n",
"# for open-sourced GPU base images.\n", "# for open-sourced GPU base images.\n",
"env.docker.base_image = DEFAULT_GPU_IMAGE\n", "env.docker.base_image = DEFAULT_GPU_IMAGE\n",
"env.python.conda_dependencies = CondaDependencies.create(python_version=\"3.6.2\", \n", "env.python.conda_dependencies = CondaDependencies.create(python_version=\"3.6.2\", pin_sdk_version=False,\n",
" conda_packages=['tensorflow-gpu==1.12.0','numpy'],\n", " conda_packages=['tensorflow-gpu==1.12.0','numpy'],\n",
" pip_packages=['azureml-contrib-services', 'azureml-defaults'])\n", " pip_packages=['azureml-contrib-services==1.47.0', 'azureml-defaults==1.47.0'])\n",
"\n", "\n",
"inference_config = InferenceConfig(entry_script=\"score.py\", environment=env)\n", "inference_config = InferenceConfig(entry_script=\"score.py\", environment=env)\n",
"aks_config = AksWebservice.deploy_configuration()\n", "aks_config = AksWebservice.deploy_configuration()\n",
@@ -343,7 +343,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.6.6" "version": "3.7.0"
} }
}, },
"nbformat": 4, "nbformat": 4,

View File

@@ -5,4 +5,4 @@ dependencies:
- matplotlib - matplotlib
- tqdm - tqdm
- scipy - scipy
- sklearn - scikit-learn

View File

@@ -5,4 +5,4 @@ dependencies:
- matplotlib - matplotlib
- tqdm - tqdm
- scipy - scipy
- sklearn - scikit-learn

View File

@@ -137,7 +137,7 @@
"myenv = Environment('my-pyspark-environment')\r\n", "myenv = Environment('my-pyspark-environment')\r\n",
"myenv.docker.base_image = \"mcr.microsoft.com/mmlspark/release:0.15\"\r\n", "myenv.docker.base_image = \"mcr.microsoft.com/mmlspark/release:0.15\"\r\n",
"myenv.inferencing_stack_version = \"latest\"\r\n", "myenv.inferencing_stack_version = \"latest\"\r\n",
"myenv.python.conda_dependencies = CondaDependencies.create(pip_packages=[\"azureml-core\",\"azureml-defaults\",\"azureml-telemetry\",\"azureml-train-restclients-hyperdrive\",\"azureml-train-core\"], python_version=\"3.6.2\")\r\n", "myenv.python.conda_dependencies = CondaDependencies.create(pip_packages=[\"azureml-core\",\"azureml-defaults\",\"azureml-telemetry\",\"azureml-train-restclients-hyperdrive\",\"azureml-train-core\"], python_version=\"3.7.0\")\r\n",
"myenv.python.conda_dependencies.add_channel(\"conda-forge\")\r\n", "myenv.python.conda_dependencies.add_channel(\"conda-forge\")\r\n",
"myenv.spark.packages = [SparkPackage(\"com.microsoft.ml.spark\", \"mmlspark_2.11\", \"0.15\"), SparkPackage(\"com.microsoft.azure\", \"azure-storage\", \"2.0.0\"), SparkPackage(\"org.apache.hadoop\", \"hadoop-azure\", \"2.7.0\")]\r\n", "myenv.spark.packages = [SparkPackage(\"com.microsoft.ml.spark\", \"mmlspark_2.11\", \"0.15\"), SparkPackage(\"com.microsoft.azure\", \"azure-storage\", \"2.0.0\"), SparkPackage(\"org.apache.hadoop\", \"hadoop-azure\", \"2.7.0\")]\r\n",
"myenv.spark.repositories = [\"https://mmlspark.azureedge.net/maven\"]\r\n" "myenv.spark.repositories = [\"https://mmlspark.azureedge.net/maven\"]\r\n"
@@ -341,7 +341,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.6.2" "version": "3.7.0"
} }
}, },
"nbformat": 4, "nbformat": 4,

View File

@@ -106,7 +106,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"print(\"This notebook was created using version 1.46.0 of the Azure ML SDK\")\n", "print(\"This notebook was created using version 1.51.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
] ]
}, },
@@ -235,20 +235,30 @@
"# Note: this is to pin the pandas and xgboost versions to be same as notebook.\n", "# Note: this is to pin the pandas and xgboost versions to be same as notebook.\n",
"# In production scenario user would choose their dependencies\n", "# In production scenario user would choose their dependencies\n",
"import pkg_resources\n", "import pkg_resources\n",
"from distutils.version import LooseVersion\n",
"available_packages = pkg_resources.working_set\n", "available_packages = pkg_resources.working_set\n",
"pandas_ver = None\n", "pandas_ver = None\n",
"numpy_ver = None\n", "numpy_ver = None\n",
"sklearn_ver = None\n",
"for dist in list(available_packages):\n", "for dist in list(available_packages):\n",
" if dist.key == 'pandas':\n", " if dist.key == 'pandas':\n",
" pandas_ver = dist.version\n", " pandas_ver = dist.version\n",
" if dist.key == 'numpy':\n", " if dist.key == 'numpy':\n",
" numpy_ver = dist.version\n", " if LooseVersion(dist.version) >= LooseVersion('1.20.0'):\n",
" numpy_ver = dist.version\n",
" else:\n",
" numpy_ver = '1.21.6'\n",
" if dist.key == 'scikit-learn':\n",
" sklearn_ver = dist.version\n",
"pandas_dep = 'pandas'\n", "pandas_dep = 'pandas'\n",
"numpy_dep = 'numpy'\n", "numpy_dep = 'numpy'\n",
"sklearn_dep = 'scikit-learn'\n",
"if pandas_ver:\n", "if pandas_ver:\n",
" pandas_dep = 'pandas=={}'.format(pandas_ver)\n", " pandas_dep = 'pandas=={}'.format(pandas_ver)\n",
"if numpy_ver:\n", "if numpy_ver:\n",
" numpy_dep = 'numpy=={}'.format(numpy_ver)\n", " numpy_dep = 'numpy=={}'.format(numpy_ver)\n",
"if sklearn_ver:\n",
" sklearn_dep = 'scikit-learn=={}'.format(sklearn_ver)\n",
"\n", "\n",
"# Note: we build shap at commit 690245 for Tesla K80 GPUs\n", "# Note: we build shap at commit 690245 for Tesla K80 GPUs\n",
"env.docker.base_dockerfile = f\"\"\"\n", "env.docker.base_dockerfile = f\"\"\"\n",
@@ -288,7 +298,9 @@
"pip uninstall -y xgboost && \\\n", "pip uninstall -y xgboost && \\\n",
"conda install py-xgboost==1.3.3 && \\\n", "conda install py-xgboost==1.3.3 && \\\n",
"pip uninstall -y numpy && \\\n", "pip uninstall -y numpy && \\\n",
"pip install {numpy_dep} \\\n", "pip install {numpy_dep} && \\\n",
"pip install {sklearn_dep} && \\\n",
"pip install chardet \\\n",
"\"\"\"\n", "\"\"\"\n",
"\n", "\n",
"env.python.user_managed_dependencies = True\n", "env.python.user_managed_dependencies = True\n",

View File

@@ -10,7 +10,7 @@ dependencies:
- ipython - ipython
- matplotlib - matplotlib
- ipywidgets - ipywidgets
- raiwidgets~=0.22.0 - raiwidgets~=0.26.0
- itsdangerous==2.0.1 - itsdangerous==2.0.1
- markupsafe<2.1.0 - markupsafe<2.1.0
- scipy>=1.5.3 - scipy>=1.5.3

View File

@@ -10,7 +10,7 @@ dependencies:
- matplotlib - matplotlib
- azureml-dataset-runtime - azureml-dataset-runtime
- ipywidgets - ipywidgets
- raiwidgets~=0.22.0 - raiwidgets~=0.26.0
- itsdangerous==2.0.1 - itsdangerous==2.0.1
- markupsafe<2.1.0 - markupsafe<2.1.0
- scipy>=1.5.3 - scipy>=1.5.3

View File

@@ -9,7 +9,7 @@ dependencies:
- ipython - ipython
- matplotlib - matplotlib
- ipywidgets - ipywidgets
- raiwidgets~=0.22.0 - raiwidgets~=0.26.0
- packaging>=20.9 - packaging>=20.9
- itsdangerous==2.0.1 - itsdangerous==2.0.1
- markupsafe<2.1.0 - markupsafe<2.1.0

View File

@@ -9,7 +9,7 @@ dependencies:
- ipython - ipython
- matplotlib - matplotlib
- ipywidgets - ipywidgets
- raiwidgets~=0.22.0 - raiwidgets~=0.26.0
- packaging>=20.9 - packaging>=20.9
- itsdangerous==2.0.1 - itsdangerous==2.0.1
- markupsafe<2.1.0 - markupsafe<2.1.0

View File

@@ -11,7 +11,7 @@ dependencies:
- azureml-dataset-runtime - azureml-dataset-runtime
- azureml-core - azureml-core
- ipywidgets - ipywidgets
- raiwidgets~=0.22.0 - raiwidgets~=0.26.0
- itsdangerous==2.0.1 - itsdangerous==2.0.1
- markupsafe<2.1.0 - markupsafe<2.1.0
- scipy>=1.5.3 - scipy>=1.5.3

View File

@@ -175,7 +175,7 @@
"store_name=os.getenv(\"ADL_STORENAME_62\", \"<my-datastore-name>\") # ADLS account name\n", "store_name=os.getenv(\"ADL_STORENAME_62\", \"<my-datastore-name>\") # ADLS account name\n",
"tenant_id=os.getenv(\"ADL_TENANT_62\", \"<my-tenant-id>\") # tenant id of service principal\n", "tenant_id=os.getenv(\"ADL_TENANT_62\", \"<my-tenant-id>\") # tenant id of service principal\n",
"client_id=os.getenv(\"ADL_CLIENTID_62\", \"<my-client-id>\") # client id of service principal\n", "client_id=os.getenv(\"ADL_CLIENTID_62\", \"<my-client-id>\") # client id of service principal\n",
"client_secret=os.getenv(\"ADL_CLIENT_SECRET_62\", \"<my-client-secret>\") # the secret of service principal\n", "client_st=os.getenv(\"ADL_CLIENT_SECRET_62\", \"<my-client-secret>\") # the secret of service principal\n",
"\n", "\n",
"try:\n", "try:\n",
" adls_datastore = Datastore.get(ws, datastore_name)\n", " adls_datastore = Datastore.get(ws, datastore_name)\n",
@@ -189,7 +189,7 @@
" store_name=store_name, # ADLS account name\n", " store_name=store_name, # ADLS account name\n",
" tenant_id=tenant_id, # tenant id of service principal\n", " tenant_id=tenant_id, # tenant id of service principal\n",
" client_id=client_id, # client id of service principal\n", " client_id=client_id, # client id of service principal\n",
" client_secret=client_secret) # the secret of service principal\n", " client_secret=client_st) # the secret of service principal\n",
" print(\"Registered datastore with name: %s\" % datastore_name)\n", " print(\"Registered datastore with name: %s\" % datastore_name)\n",
"\n", "\n",
"adls_data_ref = DataReference(\n", "adls_data_ref = DataReference(\n",

View File

@@ -147,7 +147,7 @@
"store_name = os.getenv(\"ADL_STORENAME_62\", \"<my-datastore-name>\") # ADLS account name\n", "store_name = os.getenv(\"ADL_STORENAME_62\", \"<my-datastore-name>\") # ADLS account name\n",
"tenant_id = os.getenv(\"ADL_TENANT_62\", \"<my-tenant-id>\") # tenant id of service principal\n", "tenant_id = os.getenv(\"ADL_TENANT_62\", \"<my-tenant-id>\") # tenant id of service principal\n",
"client_id = os.getenv(\"ADL_CLIENTID_62\", \"<my-client-id>\") # client id of service principal\n", "client_id = os.getenv(\"ADL_CLIENTID_62\", \"<my-client-id>\") # client id of service principal\n",
"client_secret = os.getenv(\"ADL_CLIENT_62_SECRET\", \"<my-client-secret>\") # the secret of service principal\n", "client_st = os.getenv(\"ADL_CLIENT_62_SECRET\", \"<my-client-secret>\") # the secret of service principal\n",
"\n", "\n",
"try:\n", "try:\n",
" adls_datastore = Datastore.get(ws, datastore_name)\n", " adls_datastore = Datastore.get(ws, datastore_name)\n",
@@ -161,7 +161,7 @@
" store_name=store_name, # ADLS account name\n", " store_name=store_name, # ADLS account name\n",
" tenant_id=tenant_id, # tenant id of service principal\n", " tenant_id=tenant_id, # tenant id of service principal\n",
" client_id=client_id, # client id of service principal\n", " client_id=client_id, # client id of service principal\n",
" client_secret=client_secret) # the secret of service principal\n", " client_secret=client_st) # the secret of service principal\n",
" print(\"registered datastore with name: %s\" % datastore_name)" " print(\"registered datastore with name: %s\" % datastore_name)"
] ]
}, },

View File

@@ -330,7 +330,7 @@
"- **inputs:** List of input connections for data consumed by this step. Fetch this inside the notebook using dbutils.widgets.get(\"input\")\n", "- **inputs:** List of input connections for data consumed by this step. Fetch this inside the notebook using dbutils.widgets.get(\"input\")\n",
"- **outputs:** List of output port definitions for outputs produced by this step. Fetch this inside the notebook using dbutils.widgets.get(\"output\")\n", "- **outputs:** List of output port definitions for outputs produced by this step. Fetch this inside the notebook using dbutils.widgets.get(\"output\")\n",
"- **existing_cluster_id:** Cluster ID of an existing Interactive cluster on the Databricks workspace. If you are providing this, do not provide any of the parameters below that are used to create a new cluster such as spark_version, node_type, etc.\n", "- **existing_cluster_id:** Cluster ID of an existing Interactive cluster on the Databricks workspace. If you are providing this, do not provide any of the parameters below that are used to create a new cluster such as spark_version, node_type, etc.\n",
"- **spark_version:** Version of spark for the databricks run cluster. default value: 4.0.x-scala2.11\n", "- **spark_version:** Version of spark for the databricks run cluster. You can refer to [DataBricks runtime version](https://learn.microsoft.com/azure/databricks/dev-tools/api/#--runtime-version-strings) to specify the spark version. default value: 10.4.x-scala2.12\n",
"- **node_type:** Azure vm node types for the databricks run cluster. default value: Standard_D3_v2\n", "- **node_type:** Azure vm node types for the databricks run cluster. default value: Standard_D3_v2\n",
"- **num_workers:** Specifies a static number of workers for the databricks run cluster\n", "- **num_workers:** Specifies a static number of workers for the databricks run cluster\n",
"- **min_workers:** Specifies a min number of workers to use for auto-scaling the databricks run cluster\n", "- **min_workers:** Specifies a min number of workers to use for auto-scaling the databricks run cluster\n",

View File

@@ -252,7 +252,7 @@
"# is_directory=None)\n", "# is_directory=None)\n",
"\n", "\n",
"# Naming the intermediate data as processed_data1 and assigning it to the variable processed_data1.\n", "# Naming the intermediate data as processed_data1 and assigning it to the variable processed_data1.\n",
"processed_data1 = PipelineData(\"processed_data1\",datastore=def_blob_store)\n", "processed_data1 = PipelineData(\"processed_data1\",datastore=def_blob_store, is_directory=True)\n",
"print(\"PipelineData object created\")" "print(\"PipelineData object created\")"
] ]
}, },
@@ -347,7 +347,7 @@
"source": [ "source": [
"# step5 to use the intermediate data produced by step4\n", "# step5 to use the intermediate data produced by step4\n",
"# This step also produces an output processed_data2\n", "# This step also produces an output processed_data2\n",
"processed_data2 = PipelineData(\"processed_data2\", datastore=def_blob_store)\n", "processed_data2 = PipelineData(\"processed_data2\", datastore=def_blob_store, is_directory=True)\n",
"source_directory = \"data_dependency_run_extract\"\n", "source_directory = \"data_dependency_run_extract\"\n",
"\n", "\n",
"extractStep = PythonScriptStep(\n", "extractStep = PythonScriptStep(\n",
@@ -394,7 +394,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"# Now define the compare step which takes two inputs and produces an output\n", "# Now define the compare step which takes two inputs and produces an output\n",
"processed_data3 = PipelineData(\"processed_data3\", datastore=def_blob_store)\n", "processed_data3 = PipelineData(\"processed_data3\", datastore=def_blob_store, is_directory=True)\n",
"source_directory = \"data_dependency_run_compare\"\n", "source_directory = \"data_dependency_run_compare\"\n",
"\n", "\n",
"compareStep = PythonScriptStep(\n", "compareStep = PythonScriptStep(\n",

View File

@@ -235,7 +235,8 @@
" path_on_datastore=\"titanic/Titanic.csv\")\n", " path_on_datastore=\"titanic/Titanic.csv\")\n",
"\n", "\n",
"output_data = PipelineData(name=\"processed_data\",\n", "output_data = PipelineData(name=\"processed_data\",\n",
" datastore=Datastore.get(ws, \"workspaceblobstore\"))" " datastore=Datastore.get(ws, \"workspaceblobstore\"),\n",
" is_directory=True)"
] ]
}, },
{ {
@@ -306,7 +307,8 @@
"from azureml.pipeline.core import PipelineParameter\n", "from azureml.pipeline.core import PipelineParameter\n",
"\n", "\n",
"output_from_notebook = PipelineData(name=\"notebook_processed_data\",\n", "output_from_notebook = PipelineData(name=\"notebook_processed_data\",\n",
" datastore=Datastore.get(ws, \"workspaceblobstore\"))\n", " datastore=Datastore.get(ws, \"workspaceblobstore\"),\n",
" is_directory=True)\n",
"\n", "\n",
"my_pipeline_param = PipelineParameter(name=\"pipeline_param\", default_value=\"my_param\")\n", "my_pipeline_param = PipelineParameter(name=\"pipeline_param\", default_value=\"my_param\")\n",
"\n", "\n",

View File

@@ -1,5 +1,5 @@
# DisableDockerDetector "Disabled to unblock PRs until the owner can fix the file. Not used in any prod deployments - only as a documentation for the customers" # DisableDockerDetector "Disabled to unblock PRs until the owner can fix the file. Not used in any prod deployments - only as a documentation for the customers"
FROM rocker/tidyverse:4.0.0-ubuntu18.04 FROM rocker/tidyverse:4.0.0-ubuntu20.04
# Install python # Install python
RUN apt-get update -qq && \ RUN apt-get update -qq && \

View File

@@ -363,7 +363,7 @@
"}).replace(\",\", \";\")\n", "}).replace(\",\", \";\")\n",
"\n", "\n",
"# Define output after cleansing step\n", "# Define output after cleansing step\n",
"cleansed_green_data = PipelineData(\"cleansed_green_data\", datastore=default_store).as_dataset()\n", "cleansed_green_data = PipelineData(\"cleansed_green_data\", datastore=default_store, is_directory=True).as_dataset()\n",
"\n", "\n",
"print('Cleanse script is in {}.'.format(os.path.realpath(prepare_data_folder)))\n", "print('Cleanse script is in {}.'.format(os.path.realpath(prepare_data_folder)))\n",
"\n", "\n",
@@ -414,7 +414,7 @@
"}).replace(\",\", \";\")\n", "}).replace(\",\", \";\")\n",
"\n", "\n",
"# Define output after cleansing step\n", "# Define output after cleansing step\n",
"cleansed_yellow_data = PipelineData(\"cleansed_yellow_data\", datastore=default_store).as_dataset()\n", "cleansed_yellow_data = PipelineData(\"cleansed_yellow_data\", datastore=default_store, is_directory=True).as_dataset()\n",
"\n", "\n",
"print('Cleanse script is in {}.'.format(os.path.realpath(prepare_data_folder)))\n", "print('Cleanse script is in {}.'.format(os.path.realpath(prepare_data_folder)))\n",
"\n", "\n",
@@ -452,7 +452,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"# Define output after merging step\n", "# Define output after merging step\n",
"merged_data = PipelineData(\"merged_data\", datastore=default_store).as_dataset()\n", "merged_data = PipelineData(\"merged_data\", datastore=default_store, is_directory=True).as_dataset()\n",
"\n", "\n",
"print('Merge script is in {}.'.format(os.path.realpath(prepare_data_folder)))\n", "print('Merge script is in {}.'.format(os.path.realpath(prepare_data_folder)))\n",
"\n", "\n",
@@ -489,7 +489,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"# Define output after merging step\n", "# Define output after merging step\n",
"filtered_data = PipelineData(\"filtered_data\", datastore=default_store).as_dataset()\n", "filtered_data = PipelineData(\"filtered_data\", datastore=default_store, is_directory=True).as_dataset()\n",
"\n", "\n",
"print('Filter script is in {}.'.format(os.path.realpath(prepare_data_folder)))\n", "print('Filter script is in {}.'.format(os.path.realpath(prepare_data_folder)))\n",
"\n", "\n",
@@ -525,7 +525,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"# Define output after normalize step\n", "# Define output after normalize step\n",
"normalized_data = PipelineData(\"normalized_data\", datastore=default_store).as_dataset()\n", "normalized_data = PipelineData(\"normalized_data\", datastore=default_store, is_directory=True).as_dataset()\n",
"\n", "\n",
"print('Normalize script is in {}.'.format(os.path.realpath(prepare_data_folder)))\n", "print('Normalize script is in {}.'.format(os.path.realpath(prepare_data_folder)))\n",
"\n", "\n",
@@ -566,7 +566,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"# Define output after transform step\n", "# Define output after transform step\n",
"transformed_data = PipelineData(\"transformed_data\", datastore=default_store).as_dataset()\n", "transformed_data = PipelineData(\"transformed_data\", datastore=default_store, is_directory=True).as_dataset()\n",
"\n", "\n",
"print('Transform script is in {}.'.format(os.path.realpath(prepare_data_folder)))\n", "print('Transform script is in {}.'.format(os.path.realpath(prepare_data_folder)))\n",
"\n", "\n",
@@ -604,8 +604,8 @@
"train_model_folder = './scripts/trainmodel'\n", "train_model_folder = './scripts/trainmodel'\n",
"\n", "\n",
"# train and test splits output\n", "# train and test splits output\n",
"output_split_train = PipelineData(\"output_split_train\", datastore=default_store).as_dataset()\n", "output_split_train = PipelineData(\"output_split_train\", datastore=default_store, is_directory=True).as_dataset()\n",
"output_split_test = PipelineData(\"output_split_test\", datastore=default_store).as_dataset()\n", "output_split_test = PipelineData(\"output_split_test\", datastore=default_store, is_directory=True).as_dataset()\n",
"\n", "\n",
"print('Data spilt script is in {}.'.format(os.path.realpath(train_model_folder)))\n", "print('Data spilt script is in {}.'.format(os.path.realpath(train_model_folder)))\n",
"\n", "\n",

View File

@@ -86,7 +86,7 @@
"import requests\n", "import requests\n",
"\n", "\n",
"oj_sales_path = \"./oj.csv\"\n", "oj_sales_path = \"./oj.csv\"\n",
"r = requests.get(\"http://www.cs.unitn.it/~taufer/Data/oj.csv\")\n", "r = requests.get(\"https://raw.githubusercontent.com/Azure/azureml-examples/main/sdk/python/jobs/automl-standalone-jobs/automl-forecasting-orange-juice-sales/data/dominicks_OJ.csv\")\n",
"open(oj_sales_path, \"wb\").write(r.content)" "open(oj_sales_path, \"wb\").write(r.content)"
] ]
}, },
@@ -140,7 +140,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"partitioned_dataset = dataset.partition_by(partition_keys=['store', 'brand'], target=(datastore, \"partition_by_key_res\"), name=\"partitioned_oj_data\")\n", "partitioned_dataset = dataset.partition_by(partition_keys=['Store', 'Brand'], target=(datastore, \"partition_by_key_res\"), name=\"partitioned_oj_data\")\n",
"partitioned_dataset.partition_keys" "partitioned_dataset.partition_keys"
] ]
}, },
@@ -274,7 +274,7 @@
"parallel_run_config = ParallelRunConfig(\n", "parallel_run_config = ParallelRunConfig(\n",
" source_directory=scripts_folder,\n", " source_directory=scripts_folder,\n",
" entry_script=script_file, # the user script to run against each input\n", " entry_script=script_file, # the user script to run against each input\n",
" partition_keys=['store', 'brand'],\n", " partition_keys=['Store', 'Brand'],\n",
" error_threshold=5,\n", " error_threshold=5,\n",
" output_action='append_row',\n", " output_action='append_row',\n",
" append_row_file_name=\"revenue_outputs.txt\",\n", " append_row_file_name=\"revenue_outputs.txt\",\n",
@@ -362,8 +362,8 @@
"result_file = os.path.join(target_dir, batch_output.path_on_datastore, parallel_run_config.append_row_file_name)\n", "result_file = os.path.join(target_dir, batch_output.path_on_datastore, parallel_run_config.append_row_file_name)\n",
"\n", "\n",
"df = pd.read_csv(result_file, delimiter=\" \", header=None)\n", "df = pd.read_csv(result_file, delimiter=\" \", header=None)\n",
"df.columns=[\"WeekStarting\", \"Quantity\", \"logQuantity\", \"Advert\", \"Price\", \"Age60\", \"COLLEGE\", \"INCOME\", \"Hincome150\", \"Large HH\", \"Minorities\", \"WorkingWoman\", \"SSTRDIST\", \"SSTRVOL\", \"CPDIST5\", \"CPWVOL5\", \"Store\", \"Brand\", \"total_income\"]\n",
"\n", "\n",
"df.columns = [\"week\", \"logmove\", \"feat\", \"price\", \"AGE60\", \"EDUC\", \"ETHNIC\", \"INCOME\", \"HHLARGE\", \"WORKWOM\", \"HVAL150\", \"SSTRDIST\", \"SSTRVOL\", \"CPDIST5\", \"CPWVOL5\", \"store\", \"brand\", \"total_income\"]\n",
"print(\"Prediction has \", df.shape[0], \" rows\")\n", "print(\"Prediction has \", df.shape[0], \" rows\")\n",
"df.head(10)" "df.head(10)"
] ]
@@ -413,7 +413,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.6.9" "version": "3.8.13"
} }
}, },
"nbformat": 4, "nbformat": 4,

View File

@@ -1,358 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/ml-frameworks/chainer/distributed-chainer/distributed-chainer.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Distributed Chainer\n",
"In this tutorial, you will run a Chainer training example on the [MNIST](http://yann.lecun.com/exdb/mnist/) dataset using ChainerMN distributed training across a GPU cluster."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Prerequisites\n",
"* If you are using an Azure Machine Learning compute instance, you are all set. Otherwise, go through the [Configuration](../../../../configuration.ipynb) notebook to install the Azure Machine Learning Python SDK and create an Azure ML `Workspace`"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Check core SDK version number\n",
"import azureml.core\n",
"\n",
"print(\"SDK version:\", azureml.core.VERSION)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Diagnostics\n",
"Opt-in diagnostics for better experience, quality, and security of future releases."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": [
"Diagnostics"
]
},
"outputs": [],
"source": [
"from azureml.telemetry import set_diagnostics_collection\n",
"\n",
"set_diagnostics_collection(send_diagnostics=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Initialize workspace\n",
"\n",
"Initialize a [Workspace](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#workspace) object from the existing workspace you created in the Prerequisites step. `Workspace.from_config()` creates a workspace object from the details stored in `config.json`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.workspace import Workspace\n",
"\n",
"ws = Workspace.from_config()\n",
"print('Workspace name: ' + ws.name, \n",
" 'Azure region: ' + ws.location, \n",
" 'Subscription id: ' + ws.subscription_id, \n",
" 'Resource group: ' + ws.resource_group, sep = '\\n')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Create or attach existing AmlCompute\n",
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, we use Azure ML managed compute ([AmlCompute](https://docs.microsoft.com/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute)) for our remote training compute resource. Specifically, the below code creates an `STANDARD_NC6` GPU cluster that autoscales from `0` to `4` nodes.\n",
"\n",
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
"\n",
"**Creation of AmlCompute takes approximately 5 minutes.** If the AmlCompute with that name is already in your workspace, this code will skip the creation process.\n",
"\n",
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
"from azureml.core.compute_target import ComputeTargetException\n",
"\n",
"# choose a name for your cluster\n",
"cluster_name = \"gpu-cluster\"\n",
"\n",
"try:\n",
" compute_target = ComputeTarget(workspace=ws, name=cluster_name)\n",
" print('Found existing compute target.')\n",
"except ComputeTargetException:\n",
" print('Creating a new compute target...')\n",
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6',\n",
" max_nodes=4)\n",
"\n",
" # create the cluster\n",
" compute_target = ComputeTarget.create(ws, cluster_name, compute_config)\n",
"\n",
" compute_target.wait_for_completion(show_output=True)\n",
"\n",
"# use get_status() to get a detailed status for the current AmlCompute. \n",
"print(compute_target.get_status().serialize())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The above code creates GPU compute. If you instead want to create CPU compute, provide a different VM size to the `vm_size` parameter, such as `STANDARD_D2_V2`."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Train model on the remote compute\n",
"Now that we have the AmlCompute ready to go, let's run our distributed training job."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create a project directory\n",
"Create a directory that will contain all the necessary code from your local machine that you will need access to on the remote resource. This includes the training script and any additional files your training script depends on."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"project_folder = './chainer-distr'\n",
"os.makedirs(project_folder, exist_ok=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Prepare training script\n",
"Now you will need to create your training script. In this tutorial, the script for distributed training of MNIST is already provided for you at `train_mnist.py`. In practice, you should be able to take any custom Chainer training script as is and run it with Azure ML without having to modify your code."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Once your script is ready, copy the training script `train_mnist.py` into the project directory."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import shutil\n",
"\n",
"shutil.copy('train_mnist.py', project_folder)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create an experiment\n",
"Create an [Experiment](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#experiment) to track all the runs in your workspace for this distributed Chainer tutorial. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Experiment\n",
"\n",
"experiment_name = 'chainer-distr'\n",
"experiment = Experiment(ws, name=experiment_name)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create an environment\n",
"\n",
"In this tutorial, we will use one of the Azure ML Chainer curated environments for training."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Environment\n",
"\n",
"chainer_env = Environment.get(ws, name='AzureML-Chainer-5.1.0-GPU')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Configure your training job\n",
"\n",
"Create a ScriptRunConfig object to specify the configuration details of your training job, including your training script, environment to use, and the compute target to run on.\n",
"\n",
"In order to execute a distributed run using MPI, you must create an `MpiConfiguration` object and specify it to the `distributed_job_config` parameter. The below code will configure a 2-node distributed job. If you would also like to run multiple processes per node (i.e. if your cluster SKU has multiple GPUs), additionally specify the `process_count_per_node` parameter in MpiConfiguration."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import ScriptRunConfig\n",
"from azureml.core.runconfig import MpiConfiguration\n",
"\n",
"src = ScriptRunConfig(source_directory=project_folder,\n",
" script='train_mnist.py',\n",
" compute_target=compute_target,\n",
" environment=chainer_env,\n",
" distributed_job_config=MpiConfiguration(node_count=2))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Submit job\n",
"Run your experiment by submitting your ScriptRunConfig object. Note that this call is asynchronous."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"run = experiment.submit(src)\n",
"print(run)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Monitor your run\n",
"You can monitor the progress of the run with a Jupyter widget. Like the run submission, the widget is asynchronous and provides live updates every 10-15 seconds until the job completes. You can see that the widget automatically plots and visualizes the loss metric that we logged to the Azure ML run."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.widgets import RunDetails\n",
"\n",
"RunDetails(run).show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"run.wait_for_completion(show_output=True)"
]
}
],
"metadata": {
"authors": [
{
"name": "ninhu"
}
],
"category": "training",
"compute": [
"AML Compute"
],
"datasets": [
"MNIST"
],
"deployment": [
"None"
],
"exclude_from_index": false,
"framework": [
"Chainer"
],
"friendly_name": "Distributed Training with Chainer",
"index_order": 1,
"kernelspec": {
"display_name": "Python 3.8 - AzureML",
"language": "python",
"name": "python38-azureml"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.7"
},
"tags": [
"None"
],
"task": "Use the Chainer estimator to perform distributed training"
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -1,5 +0,0 @@
name: distributed-chainer
dependencies:
- pip:
- azureml-sdk
- azureml-widgets

View File

@@ -1,125 +0,0 @@
# Official ChainerMN example taken from
# https://github.com/chainer/chainer/blob/master/examples/chainermn/mnist/train_mnist.py
from __future__ import print_function
import argparse
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import training
from chainer.training import extensions
import chainermn
class MLP(chainer.Chain):
def __init__(self, n_units, n_out):
super(MLP, self).__init__(
# the size of the inputs to each layer will be inferred
l1=L.Linear(784, n_units), # n_in -> n_units
l2=L.Linear(n_units, n_units), # n_units -> n_units
l3=L.Linear(n_units, n_out), # n_units -> n_out
)
def __call__(self, x):
h1 = F.relu(self.l1(x))
h2 = F.relu(self.l2(h1))
return self.l3(h2)
def main():
parser = argparse.ArgumentParser(description='ChainerMN example: MNIST')
parser.add_argument('--batchsize', '-b', type=int, default=100,
help='Number of images in each mini-batch')
parser.add_argument('--communicator', type=str,
default='non_cuda_aware', help='Type of communicator')
parser.add_argument('--epoch', '-e', type=int, default=20,
help='Number of sweeps over the dataset to train')
parser.add_argument('--gpu', '-g', default=True,
help='Use GPU')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
parser.add_argument('--unit', '-u', type=int, default=1000,
help='Number of units')
args = parser.parse_args()
# Prepare ChainerMN communicator.
if args.gpu:
if args.communicator == 'naive':
print("Error: 'naive' communicator does not support GPU.\n")
exit(-1)
comm = chainermn.create_communicator(args.communicator)
device = comm.intra_rank
else:
if args.communicator != 'naive':
print('Warning: using naive communicator '
'because only naive supports CPU-only execution')
comm = chainermn.create_communicator('naive')
device = -1
if comm.rank == 0:
print('==========================================')
print('Num process (COMM_WORLD): {}'.format(comm.size))
if args.gpu:
print('Using GPUs')
print('Using {} communicator'.format(args.communicator))
print('Num unit: {}'.format(args.unit))
print('Num Minibatch-size: {}'.format(args.batchsize))
print('Num epoch: {}'.format(args.epoch))
print('==========================================')
model = L.Classifier(MLP(args.unit, 10))
if device >= 0:
chainer.cuda.get_device_from_id(device).use()
model.to_gpu()
# Create a multi node optimizer from a standard Chainer optimizer.
optimizer = chainermn.create_multi_node_optimizer(
chainer.optimizers.Adam(), comm)
optimizer.setup(model)
# Split and distribute the dataset. Only worker 0 loads the whole dataset.
# Datasets of worker 0 are evenly split and distributed to all workers.
if comm.rank == 0:
train, test = chainer.datasets.get_mnist()
else:
train, test = None, None
train = chainermn.scatter_dataset(train, comm, shuffle=True)
test = chainermn.scatter_dataset(test, comm, shuffle=True)
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
test_iter = chainer.iterators.SerialIterator(test, args.batchsize,
repeat=False, shuffle=False)
updater = training.StandardUpdater(train_iter, optimizer, device=device)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
# Create a multi node evaluator from a standard Chainer evaluator.
evaluator = extensions.Evaluator(test_iter, model, device=device)
evaluator = chainermn.create_multi_node_evaluator(evaluator, comm)
trainer.extend(evaluator)
# Some display and output extensions are necessary only for one worker.
# (Otherwise, there would just be repeated outputs.)
if comm.rank == 0:
trainer.extend(extensions.dump_graph('main/loss'))
trainer.extend(extensions.LogReport())
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
trainer.extend(extensions.ProgressBar())
if args.resume:
chainer.serializers.load_npz(args.resume, trainer)
trainer.run()
if __name__ == '__main__':
main()

View File

@@ -1,142 +0,0 @@
import argparse
import os
import numpy as np
from datautils import download_mnist
import chainer
from chainer import backend
from chainer import backends
from chainer.backends import cuda
from chainer import Function, gradient_check, report, training, utils, Variable
from chainer import datasets, iterators, optimizers, serializers
from chainer import Link, Chain, ChainList
import chainer.functions as F
import chainer.links as L
from chainer.training import extensions
from chainer.dataset import concat_examples
from chainer.backends.cuda import to_cpu
from azureml.core.run import Run
run = Run.get_context()
class MyNetwork(Chain):
def __init__(self, n_mid_units=100, n_out=10):
super(MyNetwork, self).__init__()
with self.init_scope():
self.l1 = L.Linear(None, n_mid_units)
self.l2 = L.Linear(n_mid_units, n_mid_units)
self.l3 = L.Linear(n_mid_units, n_out)
def forward(self, x):
h = F.relu(self.l1(x))
h = F.relu(self.l2(h))
return self.l3(h)
def main():
parser = argparse.ArgumentParser(description='Chainer example: MNIST')
parser.add_argument('--batchsize', '-b', type=int, default=100,
help='Number of images in each mini-batch')
parser.add_argument('--epochs', '-e', type=int, default=20,
help='Number of sweeps over the dataset to train')
parser.add_argument('--output_dir', '-o', default='./outputs',
help='Directory to output the result')
parser.add_argument('--gpu_id', '-g', default=0,
help='ID of the GPU to be used. Set to -1 if you use CPU')
args = parser.parse_args()
# Download the MNIST data if you haven't downloaded it yet
train, test = download_mnist()
gpu_id = args.gpu_id
batchsize = args.batchsize
epochs = args.epochs
run.log('Batch size', np.int(batchsize))
run.log('Epochs', np.int(epochs))
train_iter = iterators.SerialIterator(train, batchsize)
test_iter = iterators.SerialIterator(test, batchsize,
repeat=False, shuffle=False)
model = MyNetwork()
if gpu_id >= 0:
# Make a specified GPU current
chainer.backends.cuda.get_device_from_id(0).use()
model.to_gpu() # Copy the model to the GPU
# Choose an optimizer algorithm
optimizer = optimizers.MomentumSGD(lr=0.01, momentum=0.9)
# Give the optimizer a reference to the model so that it
# can locate the model's parameters.
optimizer.setup(model)
while train_iter.epoch < epochs:
# ---------- One iteration of the training loop ----------
train_batch = train_iter.next()
image_train, target_train = concat_examples(train_batch, gpu_id)
# Calculate the prediction of the network
prediction_train = model(image_train)
# Calculate the loss with softmax_cross_entropy
loss = F.softmax_cross_entropy(prediction_train, target_train)
# Calculate the gradients in the network
model.cleargrads()
loss.backward()
# Update all the trainable parameters
optimizer.update()
# --------------------- until here ---------------------
# Check the validation accuracy of prediction after every epoch
if train_iter.is_new_epoch: # If this iteration is the final iteration of the current epoch
# Display the training loss
print('epoch:{:02d} train_loss:{:.04f} '.format(
train_iter.epoch, float(to_cpu(loss.array))), end='')
test_losses = []
test_accuracies = []
while True:
test_batch = test_iter.next()
image_test, target_test = concat_examples(test_batch, gpu_id)
# Forward the test data
prediction_test = model(image_test)
# Calculate the loss
loss_test = F.softmax_cross_entropy(prediction_test, target_test)
test_losses.append(to_cpu(loss_test.array))
# Calculate the accuracy
accuracy = F.accuracy(prediction_test, target_test)
accuracy.to_cpu()
test_accuracies.append(accuracy.array)
if test_iter.is_new_epoch:
test_iter.epoch = 0
test_iter.current_position = 0
test_iter.is_new_epoch = False
test_iter._pushed_position = None
break
val_accuracy = np.mean(test_accuracies)
print('val_loss:{:.04f} val_accuracy:{:.04f}'.format(
np.mean(test_losses), val_accuracy))
run.log("Accuracy", np.float(val_accuracy))
serializers.save_npz(os.path.join(args.output_dir, 'model.npz'), model)
if __name__ == '__main__':
main()

View File

@@ -1,50 +0,0 @@
import numpy as np
import os
import json
from datautils import download_mnist
from chainer import serializers, using_config, Variable, datasets
import chainer.functions as F
import chainer.links as L
from chainer import Chain
from azureml.core.model import Model
class MyNetwork(Chain):
def __init__(self, n_mid_units=100, n_out=10):
super(MyNetwork, self).__init__()
with self.init_scope():
self.l1 = L.Linear(None, n_mid_units)
self.l2 = L.Linear(n_mid_units, n_mid_units)
self.l3 = L.Linear(n_mid_units, n_out)
def forward(self, x):
h = F.relu(self.l1(x))
h = F.relu(self.l2(h))
return self.l3(h)
def init():
global model
# AZUREML_MODEL_DIR is an environment variable created during deployment.
# It is the path to the model folder (./azureml-models/$MODEL_NAME/$VERSION)
# For multiple models, it points to the folder containing all deployed models (./azureml-models)
model_root = os.path.join(os.getenv('AZUREML_MODEL_DIR'), 'model.npz')
# Load our saved artifacts
model = MyNetwork()
serializers.load_npz(model_root, model)
def run(input_data):
i = np.array(json.loads(input_data)['data'])
_, test = download_mnist()
x = Variable(np.asarray([test[i][0]]))
y = model(x)
return np.ndarray.tolist(y.data.argmax(axis=1))

View File

@@ -1,50 +0,0 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import glob
import gzip
import numpy as np
import os
import struct
from azureml.core import Dataset
from azureml.opendatasets import MNIST
from chainer.datasets import tuple_dataset
# load compressed MNIST gz files and return numpy arrays
def load_data(filename, label=False):
with gzip.open(filename) as gz:
struct.unpack('I', gz.read(4))
n_items = struct.unpack('>I', gz.read(4))
if not label:
n_rows = struct.unpack('>I', gz.read(4))[0]
n_cols = struct.unpack('>I', gz.read(4))[0]
res = np.frombuffer(gz.read(n_items[0] * n_rows * n_cols), dtype=np.uint8)
res = res.reshape(n_items[0], n_rows * n_cols)
else:
res = np.frombuffer(gz.read(n_items[0]), dtype=np.uint8)
res = res.reshape(n_items[0], 1)
return res
def download_mnist():
data_folder = os.path.join(os.getcwd(), 'data/mnist')
os.makedirs(data_folder, exist_ok=True)
mnist_file_dataset = MNIST.get_file_dataset()
mnist_file_dataset.download(data_folder, overwrite=True)
X_train = load_data(glob.glob(os.path.join(data_folder, "**/train-images-idx3-ubyte.gz"),
recursive=True)[0], False) / 255.0
X_test = load_data(glob.glob(os.path.join(data_folder, "**/t10k-images-idx3-ubyte.gz"),
recursive=True)[0], False) / 255.0
y_train = load_data(glob.glob(os.path.join(data_folder, "**/train-labels-idx1-ubyte.gz"),
recursive=True)[0], True).reshape(-1)
y_test = load_data(glob.glob(os.path.join(data_folder, "**/t10k-labels-idx1-ubyte.gz"),
recursive=True)[0], True).reshape(-1)
train = tuple_dataset.TupleDataset(X_train.astype(np.float32), y_train.astype(np.int32))
test = tuple_dataset.TupleDataset(X_test.astype(np.float32), y_test.astype(np.int32))
return train, test

View File

@@ -1,809 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved. \n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/ml-frameworks/chainer/train-hyperparameter-tune-deploy-with-chainer/train-hyperparameter-tune-deploy-with-chainer.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Train and hyperparameter tune with Chainer\n",
"\n",
"In this tutorial, we demonstrate how to use the Azure ML Python SDK to train a Convolutional Neural Network (CNN) on a single-node GPU with Chainer to perform handwritten digit recognition on the popular MNIST dataset. We will also demonstrate how to perform hyperparameter tuning of the model using Azure ML's HyperDrive service."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Prerequisites\n",
"* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [Configuration](../../../../configuration.ipynb) notebook to install the Azure Machine Learning Python SDK and create an Azure ML `Workspace`"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Check core SDK version number\n",
"import azureml.core\n",
"\n",
"print(\"SDK version:\", azureml.core.VERSION)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Diagnostics\n",
"Opt-in diagnostics for better experience, quality, and security of future releases."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": [
"Diagnostics"
]
},
"outputs": [],
"source": [
"from azureml.telemetry import set_diagnostics_collection\n",
"\n",
"set_diagnostics_collection(send_diagnostics=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Initialize workspace\n",
"Initialize a [Workspace](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#workspace) object from the existing workspace you created in the Prerequisites step. `Workspace.from_config()` creates a workspace object from the details stored in `config.json`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.workspace import Workspace\n",
"\n",
"ws = Workspace.from_config()\n",
"print('Workspace name: ' + ws.name, \n",
" 'Azure region: ' + ws.location, \n",
" 'Subscription id: ' + ws.subscription_id, \n",
" 'Resource group: ' + ws.resource_group, sep = '\\n')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Create or Attach existing AmlCompute\n",
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, we use Azure ML managed compute ([AmlCompute](https://docs.microsoft.com/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute)) for our remote training compute resource.\n",
"\n",
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
"\n",
"**Creation of AmlCompute takes approximately 5 minutes.** If the AmlCompute with that name is already in your workspace, this code will skip the creation process.\n",
"\n",
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
"from azureml.core.compute_target import ComputeTargetException\n",
"\n",
"# choose a name for your cluster\n",
"cluster_name = \"gpu-cluster\"\n",
"\n",
"try:\n",
" compute_target = ComputeTarget(workspace=ws, name=cluster_name)\n",
" print('Found existing compute target.')\n",
"except ComputeTargetException:\n",
" print('Creating a new compute target...')\n",
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6',\n",
" max_nodes=4)\n",
"\n",
" # create the cluster\n",
" compute_target = ComputeTarget.create(ws, cluster_name, compute_config)\n",
"\n",
"compute_target.wait_for_completion(show_output=True)\n",
"\n",
"# use get_status() to get a detailed status for the current cluster. \n",
"print(compute_target.get_status().serialize())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The above code creates a GPU cluster. If you instead want to create a CPU cluster, provide a different VM size to the `vm_size` parameter, such as `STANDARD_D2_V2`."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Train model on the remote compute\n",
"Now that you have your data and training script prepared, you are ready to train on your remote compute cluster. You can take advantage of Azure compute to leverage GPUs to cut down your training time. "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create a project directory\n",
"Create a directory that will contain all the necessary code from your local machine that you will need access to on the remote resource. This includes the training script and any additional files your training script depends on."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"project_folder = './chainer-mnist'\n",
"os.makedirs(project_folder, exist_ok=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Prepare training script\n",
"Now you will need to create your training script. In this tutorial, the training script is already provided for you at `chainer_mnist.py`. In practice, you should be able to take any custom training script as is and run it with Azure ML without having to modify your code.\n",
"\n",
"However, if you would like to use Azure ML's [tracking and metrics](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#metrics) capabilities, you will have to add a small amount of Azure ML code inside your training script. \n",
"\n",
"In `chainer_mnist.py`, we will log some metrics to our Azure ML run. To do so, we will access the Azure ML `Run` object within the script:\n",
"```Python\n",
"from azureml.core.run import Run\n",
"run = Run.get_context()\n",
"```\n",
"Further within `chainer_mnist.py`, we log the batchsize and epochs parameters, and the highest accuracy the model achieves:\n",
"```Python\n",
"run.log('Batch size', np.int(args.batchsize))\n",
"run.log('Epochs', np.int(args.epochs))\n",
"\n",
"run.log('Accuracy', np.float(val_accuracy))\n",
"```\n",
"These run metrics will become particularly important when we begin hyperparameter tuning our model in the \"Tune model hyperparameters\" section."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Once your script is ready, copy the training script `chainer_mnist.py` into your project directory."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import shutil\n",
"\n",
"shutil.copy('chainer_mnist.py', project_folder)\n",
"shutil.copy('chainer_score.py', project_folder)\n",
"shutil.copy('datautils.py', project_folder)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create an experiment\n",
"Create an [Experiment](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#experiment) to track all the runs in your workspace for this Chainer tutorial. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Experiment\n",
"\n",
"experiment_name = 'chainer-mnist'\n",
"experiment = Experiment(ws, name=experiment_name)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create an environment\n",
"\n",
"Define a conda environment YAML file with your training script dependencies and create an Azure ML environment."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%%writefile conda_dependencies.yml\n",
"\n",
"channels:\n",
"- conda-forge\n",
"dependencies:\n",
"- python=3.6.2\n",
"- pip=21.3.1\n",
"- pip:\n",
" - azureml-defaults\n",
" - azureml-opendatasets\n",
" - chainer==5.1.0\n",
" - cupy-cuda100==5.1.0\n",
" - mpi4py==3.0.0\n",
" - pytest"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Environment\n",
"from azureml.core.runconfig import DockerConfiguration\n",
"\n",
"chainer_env = Environment.from_conda_specification(name = 'chainer-5.1.0-gpu', file_path = './conda_dependencies.yml')\n",
"\n",
"# Specify a GPU base image\n",
"chainer_env.docker.base_image = 'mcr.microsoft.com/azureml/openmpi3.1.2-cuda10.0-cudnn7-ubuntu18.04'\n",
"\n",
"docker_config = DockerConfiguration(use_docker=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Configure your training job\n",
"\n",
"Create a ScriptRunConfig object to specify the configuration details of your training job, including your training script, environment to use, and the compute target to run on."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import ScriptRunConfig\n",
"\n",
"src = ScriptRunConfig(source_directory=project_folder,\n",
" script='chainer_mnist.py',\n",
" arguments=['--epochs', 10, '--batchsize', 128, '--output_dir', './outputs'],\n",
" compute_target=compute_target,\n",
" environment=chainer_env,\n",
" docker_runtime_config=docker_config)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Submit job\n",
"Run your experiment by submitting your ScriptRunConfig object. Note that this call is asynchronous."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"run = experiment.submit(src)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Monitor your run\n",
"You can monitor the progress of the run with a Jupyter widget. Like the run submission, the widget is asynchronous and provides live updates every 10-15 seconds until the job completes."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.widgets import RunDetails\n",
"\n",
"RunDetails(run).show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# to get more details of your run\n",
"print(run.get_details())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Tune model hyperparameters\n",
"Now that we've seen how to do a simple Chainer training run using the SDK, let's see if we can further improve the accuracy of our model. We can optimize our model's hyperparameters using Azure Machine Learning's hyperparameter tuning capabilities."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Start a hyperparameter sweep\n",
"First, we will define the hyperparameter space to sweep over. Let's tune the batch size and epochs parameters. In this example we will use random sampling to try different configuration sets of hyperparameters to maximize our primary metric, accuracy.\n",
"\n",
"Then, we specify the early termination policy to use to early terminate poorly performing runs. Here we use the `BanditPolicy`, which will terminate any run that doesn't fall within the slack factor of our primary evaluation metric. In this tutorial, we will apply this policy every epoch (since we report our `Accuracy` metric every epoch and `evaluation_interval=1`). Notice we will delay the first policy evaluation until after the first `3` epochs (`delay_evaluation=3`).\n",
"Refer [here](https://docs.microsoft.com/azure/machine-learning/service/how-to-tune-hyperparameters#specify-an-early-termination-policy) for more information on the BanditPolicy and other policies available."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.train.hyperdrive.runconfig import HyperDriveConfig\n",
"from azureml.train.hyperdrive.sampling import RandomParameterSampling\n",
"from azureml.train.hyperdrive.policy import BanditPolicy\n",
"from azureml.train.hyperdrive.run import PrimaryMetricGoal\n",
"from azureml.train.hyperdrive.parameter_expressions import choice\n",
" \n",
"\n",
"param_sampling = RandomParameterSampling( {\n",
" \"--batchsize\": choice(128, 256),\n",
" \"--epochs\": choice(5, 10, 20, 40)\n",
" }\n",
")\n",
"\n",
"hyperdrive_config = HyperDriveConfig(run_config=src,\n",
" hyperparameter_sampling=param_sampling, \n",
" primary_metric_name='Accuracy',\n",
" policy=BanditPolicy(evaluation_interval=1, slack_factor=0.1, delay_evaluation=3),\n",
" primary_metric_goal=PrimaryMetricGoal.MAXIMIZE,\n",
" max_total_runs=8,\n",
" max_concurrent_runs=4)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Finally, lauch the hyperparameter tuning job."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# start the HyperDrive run\n",
"hyperdrive_run = experiment.submit(hyperdrive_config)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Monitor HyperDrive runs\n",
"You can monitor the progress of the runs with the following Jupyter widget. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"RunDetails(hyperdrive_run).show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"hyperdrive_run.wait_for_completion(show_output=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"assert(hyperdrive_run.get_status() == \"Completed\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Warm start a Hyperparameter Tuning experiment and resuming child runs\n",
"Often times, finding the best hyperparameter values for your model can be an iterative process, needing multiple tuning runs that learn from previous hyperparameter tuning runs. Reusing knowledge from these previous runs will accelerate the hyperparameter tuning process, thereby reducing the cost of tuning the model and will potentially improve the primary metric of the resulting model. When warm starting a hyperparameter tuning experiment with Bayesian sampling, trials from the previous run will be used as prior knowledge to intelligently pick new samples, so as to improve the primary metric. Additionally, when using Random or Grid sampling, any early termination decisions will leverage metrics from the previous runs to determine poorly performing training runs. \n",
"\n",
"Azure Machine Learning allows you to warm start your hyperparameter tuning run by leveraging knowledge from up to 5 previously completed hyperparameter tuning parent runs. \n",
"\n",
"Additionally, there might be occasions when individual training runs of a hyperparameter tuning experiment are cancelled due to budget constraints or fail due to other reasons. It is now possible to resume such individual training runs from the last checkpoint (assuming your training script handles checkpoints). Resuming an individual training run will use the same hyperparameter configuration and mount the storage used for that run. The training script should accept the \"--resume-from\" argument, which contains the checkpoint or model files from which to resume the training run. You can also resume individual runs as part of an experiment that spends additional budget on hyperparameter tuning. Any additional budget, after resuming the specified training runs is used for exploring additional configurations.\n",
"\n",
"For more information on warm starting and resuming hyperparameter tuning runs, please refer to the [Hyperparameter Tuning for Azure Machine Learning documentation](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-tune-hyperparameters) \n",
"\n",
"### Find and register best model\n",
"When all jobs finish, we can find out the one that has the highest accuracy."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"best_run = hyperdrive_run.get_best_run_by_primary_metric()\n",
"print(best_run.get_details()['runDefinition']['arguments'])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Now, let's list the model files uploaded during the run."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(best_run.get_file_names())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We can then register the folder (and all files in it) as a model named `chainer-dnn-mnist` under the workspace for deployment"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model = best_run.register_model(model_name='chainer-dnn-mnist', model_path='outputs/model.npz')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Deploy the model in ACI\n",
"Now, we are ready to deploy the model as a web service running in Azure Container Instance, [ACI](https://azure.microsoft.com/en-us/services/container-instances/). Azure Machine Learning accomplishes this by constructing a Docker image with the scoring logic and model baked in.\n",
"\n",
"### Create scoring script\n",
"First, we will create a scoring script that will be invoked by the web service call.\n",
"+ Now that the scoring script must have two required functions, `init()` and `run(input_data)`.\n",
" + In `init()`, you typically load the model into a global object. This function is executed only once when the Docker contianer is started.\n",
" + In `run(input_data)`, the model is used to predict a value based on the input data. The input and output to `run` uses NPZ as the serialization and de-serialization format because it is the preferred format for Chainer, but you are not limited to it.\n",
" \n",
"Refer to the scoring script `chainer_score.py` for this tutorial. Our web service will use this file to predict. When writing your own scoring script, don't forget to test it locally first before you go and deploy the web service."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"shutil.copy('chainer_score.py', project_folder)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create myenv.yml\n",
"We also need to create an environment file so that Azure Machine Learning can install the necessary packages in the Docker image which are required by your scoring script. In this case, we need to specify conda package `numpy` and pip install `chainer`. Please note that you must indicate azureml-defaults with verion >= 1.0.45 as a pip dependency, because it contains the functionality needed to host the model as a web service."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.runconfig import CondaDependencies\n",
"\n",
"cd = CondaDependencies.create()\n",
"cd.add_conda_package('numpy')\n",
"cd.add_pip_package('chainer==5.1.0')\n",
"cd.add_pip_package(\"azureml-defaults==1.43.0\")\n",
"cd.add_pip_package(\"azureml-opendatasets\")\n",
"cd.save_to_file(base_directory='./', conda_file_path='myenv.yml')\n",
"\n",
"print(cd.serialize_to_string())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Deploy to ACI\n",
"We are almost ready to deploy. Create the inference configuration and deployment configuration and deploy to ACI. This cell will run for about 7-8 minutes."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.webservice import AciWebservice\n",
"from azureml.core.model import InferenceConfig\n",
"from azureml.core.webservice import Webservice\n",
"from azureml.core.model import Model\n",
"from azureml.core.environment import Environment\n",
"\n",
"\n",
"myenv = Environment.from_conda_specification(name=\"myenv\", file_path=\"myenv.yml\")\n",
"inference_config = InferenceConfig(entry_script=\"chainer_score.py\", environment=myenv,\n",
" source_directory=project_folder)\n",
"\n",
"aciconfig = AciWebservice.deploy_configuration(cpu_cores=1,\n",
" auth_enabled=True, # this flag generates API keys to secure access\n",
" memory_gb=2,\n",
" tags={'name': 'mnist', 'framework': 'Chainer'},\n",
" description='Chainer DNN with MNIST')\n",
"\n",
"service = Model.deploy(workspace=ws,\n",
" name='chainer-mnist-1',\n",
" models=[model],\n",
" inference_config=inference_config,\n",
" deployment_config=aciconfig)\n",
"service.wait_for_deployment(True)\n",
"print(service.state)\n",
"print(service.scoring_uri)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"**Tip: If something goes wrong with the deployment, the first thing to look at is the logs from the service by running the following command:** "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(service.get_logs())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This is the scoring web service endpoint:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(service.scoring_uri)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Test the deployed model\n",
"Let's test the deployed model. Pick a random sample from the test set, and send it to the web service hosted in ACI for a prediction. Note, here we are using the an HTTP request to invoke the service.\n",
"\n",
"We can retrieve the API keys used for accessing the HTTP endpoint and construct a raw HTTP request to send to the service. Don't forget to add key to the HTTP header."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# retreive the API keys. two keys were generated.\n",
"key1, Key2 = service.get_keys()\n",
"print(key1)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%matplotlib inline\n",
"import matplotlib.pyplot as plt\n",
"import urllib\n",
"import gzip\n",
"import numpy as np\n",
"import struct\n",
"import requests\n",
"\n",
"\n",
"# load compressed MNIST gz files and return numpy arrays\n",
"def load_data(filename, label=False):\n",
" with gzip.open(filename) as gz:\n",
" struct.unpack('I', gz.read(4))\n",
" n_items = struct.unpack('>I', gz.read(4))\n",
" if not label:\n",
" n_rows = struct.unpack('>I', gz.read(4))[0]\n",
" n_cols = struct.unpack('>I', gz.read(4))[0]\n",
" res = np.frombuffer(gz.read(n_items[0] * n_rows * n_cols), dtype=np.uint8)\n",
" res = res.reshape(n_items[0], n_rows * n_cols)\n",
" else:\n",
" res = np.frombuffer(gz.read(n_items[0]), dtype=np.uint8)\n",
" res = res.reshape(n_items[0], 1)\n",
" return res\n",
"\n",
"data_folder = os.path.join(os.getcwd(), 'data/mnist')\n",
"os.makedirs(data_folder, exist_ok=True)\n",
"\n",
"urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/t10k-images-idx3-ubyte.gz',\n",
" filename=os.path.join(data_folder, 't10k-images-idx3-ubyte.gz'))\n",
"urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/t10k-labels-idx1-ubyte.gz',\n",
" filename=os.path.join(data_folder, 't10k-labels-idx1-ubyte.gz'))\n",
"\n",
"X_test = load_data(os.path.join(data_folder, 't10k-images-idx3-ubyte.gz'), False) / np.float32(255.0)\n",
"y_test = load_data(os.path.join(data_folder, 't10k-labels-idx1-ubyte.gz'), True).reshape(-1)\n",
"\n",
"# send a random row from the test set to score\n",
"random_index = np.random.randint(0, len(X_test)-1)\n",
"input_data = \"{\\\"data\\\": [\" + str(random_index) + \"]}\"\n",
"\n",
"headers = {'Content-Type':'application/json', 'Authorization': 'Bearer ' + key1}\n",
"\n",
"# send sample to service for scoring\n",
"resp = requests.post(service.scoring_uri, input_data, headers=headers)\n",
"\n",
"print(\"label:\", y_test[random_index])\n",
"print(\"prediction:\", resp.text[1])\n",
"\n",
"plt.imshow(X_test[random_index].reshape((28,28)), cmap='gray')\n",
"plt.axis('off')\n",
"plt.show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Let's look at the workspace after the web service was deployed. You should see\n",
"\n",
" + a registered model named 'chainer-dnn-mnist' and with the id 'chainer-dnn-mnist:1'\n",
" + a webservice called 'chainer-mnist-svc' with some scoring URL"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model = ws.models['chainer-dnn-mnist']\n",
"print(\"Model: {}, ID: {}\".format('chainer-dnn-mnist', model.id))\n",
" \n",
"webservice = ws.webservices['chainer-mnist-1']\n",
"print(\"Webservice: {}, scoring URI: {}\".format('chainer-mnist-1', webservice.scoring_uri))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Clean up"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You can delete the ACI deployment with a simple delete API call."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"service.delete()"
]
}
],
"metadata": {
"authors": [
{
"name": "nagaur"
}
],
"category": "training",
"compute": [
"AML Compute"
],
"datasets": [
"MNIST"
],
"deployment": [
"Azure Container Instance"
],
"exclude_from_index": false,
"framework": [
"Chainer"
],
"friendly_name": "Train a model with hyperparameter tuning",
"index_order": 1,
"kernelspec": {
"display_name": "Python 3.8 - AzureML",
"language": "python",
"name": "python38-azureml"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.7"
},
"tags": [
"None"
],
"task": "Train a Convolutional Neural Network (CNN)"
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -1,13 +0,0 @@
name: train-hyperparameter-tune-deploy-with-chainer
dependencies:
- pip:
- azureml-sdk
- azureml-widgets
- numpy
- matplotlib
- json
- urllib
- gzip
- struct
- requests
- azureml-opendatasets

View File

@@ -166,7 +166,7 @@ def download_data():
from zipfile import ZipFile from zipfile import ZipFile
# download data # download data
data_file = './fowl_data.zip' data_file = './fowl_data.zip'
download_url = 'https://azureopendatastorage.blob.core.windows.net/testpublic/temp/fowl_data.zip' download_url = 'https://azuremlexamples.blob.core.windows.net/datasets/fowl_data.zip'
urllib.request.urlretrieve(download_url, filename=data_file) urllib.request.urlretrieve(download_url, filename=data_file)
# extract files # extract files

View File

@@ -176,7 +176,7 @@
"metadata": {}, "metadata": {},
"source": [ "source": [
"### Download training data\n", "### Download training data\n",
"The dataset we will use (located on a public blob [here](https://azureopendatastorage.blob.core.windows.net/testpublic/temp/fowl_data.zip) as a zip file) consists of about 120 training images each for turkeys and chickens, with 100 validation images for each class. The images are a subset of the [Open Images v5 Dataset](https://storage.googleapis.com/openimages/web/index.html). We will download and extract the dataset as part of our training script `pytorch_train.py`" "The dataset we will use (located on a public blob [here](https://azuremlexamples.blob.core.windows.net/datasets/fowl_data.zip) as a zip file) consists of about 120 training images each for turkeys and chickens, with 100 validation images for each class. The images are a subset of the [Open Images v5 Dataset](https://storage.googleapis.com/openimages/web/index.html). We will download and extract the dataset as part of our training script `pytorch_train.py`"
] ]
}, },
{ {
@@ -260,15 +260,14 @@
"\n", "\n",
"channels:\n", "channels:\n",
"- conda-forge\n", "- conda-forge\n",
"- pytorch\n",
"dependencies:\n", "dependencies:\n",
"- python=3.6.2\n", "- python=3.8.12\n",
"- pip=21.3.1\n", "- pip=21.3.1\n",
"- pytorch::pytorch==1.8.1\n",
"- pytorch::torchvision==0.9.1\n",
"- pip:\n", "- pip:\n",
" - azureml-defaults==1.43.0\n", " - azureml-defaults"
" - torch==1.6.0\n",
" - torchvision==0.7.0\n",
" - future==0.17.1\n",
" - pillow"
] ]
}, },
{ {

View File

@@ -21,7 +21,7 @@
"metadata": {}, "metadata": {},
"source": [ "source": [
"# Distributed TensorFlow with Horovod\n", "# Distributed TensorFlow with Horovod\n",
"In this tutorial, you will train a word2vec model in TensorFlow using distributed training via [Horovod](https://github.com/uber/horovod)." "In this tutorial, you will train a model in TensorFlow using distributed training via [Horovod](https://github.com/uber/horovod)."
] ]
}, },
{ {
@@ -144,26 +144,6 @@
"The above code creates a GPU cluster. If you instead want to create a CPU cluster, provide a different VM size to the `vm_size` parameter, such as `STANDARD_D2_V2`." "The above code creates a GPU cluster. If you instead want to create a CPU cluster, provide a different VM size to the `vm_size` parameter, such as `STANDARD_D2_V2`."
] ]
}, },
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Create a Dataset for Files\n",
"A Dataset can reference single or multiple files in your datastores or public urls. The files can be of any format. FileDataset provides you with the ability to download or mount the files to your compute. By creating a dataset, you create a reference to the data source location. The data remains in its existing location, so no extra storage cost is incurred. [Learn More](https://aka.ms/azureml/howto/createdatasets)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Dataset\n",
"\n",
"web_paths = ['https://azureopendatastorage.blob.core.windows.net/testpublic/text8.zip']\n",
"dataset = Dataset.File.from_files(path=web_paths)"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@@ -171,28 +151,6 @@
"You may want to register datasets using the register() method to your workspace so that the dataset can be shared with others, reused across various experiments, and referred to by name in your training script." "You may want to register datasets using the register() method to your workspace so that the dataset can be shared with others, reused across various experiments, and referred to by name in your training script."
] ]
}, },
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"dataset = dataset.register(workspace=ws,\n",
" name='wikipedia-text',\n",
" description='Wikipedia text training and test dataset',\n",
" create_new_version=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# list the files referenced by the dataset\n",
"dataset.to_path()"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@@ -200,43 +158,6 @@
"## Train model on the remote compute" "## Train model on the remote compute"
] ]
}, },
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create a project directory\n",
"Create a directory that will contain all the necessary code from your local machine that you will need access to on the remote resource. This includes the training script, and any additional files your training script depends on."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"project_folder = './tf-distr-hvd'\n",
"os.makedirs(project_folder, exist_ok=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copy the training script `tf_horovod_word2vec.py` into this project directory."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import shutil\n",
"\n",
"shutil.copy('tf_horovod_word2vec.py', project_folder)"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@@ -274,7 +195,7 @@
"source": [ "source": [
"from azureml.core import Environment\n", "from azureml.core import Environment\n",
"\n", "\n",
"tf_env = Environment.get(ws, name='AzureML-TensorFlow-1.13-GPU')" "tf_env = Environment.get(ws, name='AzureML-tensorflow-2.7-ubuntu20.04-py38-cuda11-gpu')"
] ]
}, },
{ {
@@ -297,9 +218,8 @@
"from azureml.core import ScriptRunConfig\n", "from azureml.core import ScriptRunConfig\n",
"from azureml.core.runconfig import MpiConfiguration\n", "from azureml.core.runconfig import MpiConfiguration\n",
"\n", "\n",
"src = ScriptRunConfig(source_directory=project_folder,\n", "src = ScriptRunConfig(source_directory=\"src\",\n",
" script='tf_horovod_word2vec.py',\n", " script='train.py',\n",
" arguments=['--input_data', dataset.as_mount()],\n",
" compute_target=compute_target,\n", " compute_target=compute_target,\n",
" environment=tf_env,\n", " environment=tf_env,\n",
" distributed_job_config=MpiConfiguration(node_count=2))" " distributed_job_config=MpiConfiguration(node_count=2))"

View File

@@ -2,10 +2,3 @@ name: distributed-tensorflow-with-horovod
dependencies: dependencies:
- pip: - pip:
- azureml-sdk - azureml-sdk
- azureml-widgets
- keras
- tensorflow-gpu==1.13.2
- horovod==0.19.1
- matplotlib
- pandas
- fuse

View File

@@ -0,0 +1,120 @@
# Copyright 2019 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Script adapted from: https://github.com/horovod/horovod/blob/master/examples/tensorflow2_keras_mnist.py
# ==============================================================================
import tensorflow as tf
import horovod.tensorflow.keras as hvd
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--learning-rate", "-lr", type=float, default=0.001)
parser.add_argument("--epochs", type=int, default=24)
parser.add_argument("--steps-per-epoch", type=int, default=500)
args = parser.parse_args()
# Horovod: initialize Horovod.
hvd.init()
# Horovod: pin GPU to be used to process local rank (one GPU per process)
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
if gpus:
tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], "GPU")
(mnist_images, mnist_labels), _ = tf.keras.datasets.mnist.load_data(
path="mnist-%d.npz" % hvd.rank()
)
dataset = tf.data.Dataset.from_tensor_slices(
(
tf.cast(mnist_images[..., tf.newaxis] / 255.0, tf.float32),
tf.cast(mnist_labels, tf.int64),
)
)
dataset = dataset.repeat().shuffle(10000).batch(128)
mnist_model = tf.keras.Sequential(
[
tf.keras.layers.Conv2D(32, [3, 3], activation="relu"),
tf.keras.layers.Conv2D(64, [3, 3], activation="relu"),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation="relu"),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(10, activation="softmax"),
]
)
# Horovod: adjust learning rate based on number of GPUs.
scaled_lr = args.learning_rate * hvd.size()
opt = tf.optimizers.Adam(scaled_lr)
# Horovod: add Horovod DistributedOptimizer.
opt = hvd.DistributedOptimizer(opt)
# Horovod: Specify `experimental_run_tf_function=False` to ensure TensorFlow
# uses hvd.DistributedOptimizer() to compute gradients.
mnist_model.compile(
loss=tf.losses.SparseCategoricalCrossentropy(),
optimizer=opt,
metrics=["accuracy"],
experimental_run_tf_function=False,
)
callbacks = [
# Horovod: broadcast initial variable states from rank 0 to all other processes.
# This is necessary to ensure consistent initialization of all workers when
# training is started with random weights or restored from a checkpoint.
hvd.callbacks.BroadcastGlobalVariablesCallback(0),
# Horovod: average metrics among workers at the end of every epoch.
#
# Note: This callback must be in the list before the ReduceLROnPlateau,
# TensorBoard or other metrics-based callbacks.
hvd.callbacks.MetricAverageCallback(),
# Horovod: using `lr = 1.0 * hvd.size()` from the very beginning leads to worse final
# accuracy. Scale the learning rate `lr = 1.0` ---> `lr = 1.0 * hvd.size()` during
# the first three epochs. See https://arxiv.org/abs/1706.02677 for details.
hvd.callbacks.LearningRateWarmupCallback(
warmup_epochs=3, initial_lr=scaled_lr, verbose=1
),
]
# Horovod: save checkpoints only on worker 0 to prevent other workers from corrupting them.
if hvd.rank() == 0:
output_dir = "./outputs"
os.makedirs(output_dir, exist_ok=True)
callbacks.append(
tf.keras.callbacks.ModelCheckpoint(
os.path.join(output_dir, "checkpoint-{epoch}.h5")
)
)
# Horovod: write logs on worker 0.
verbose = 1 if hvd.rank() == 0 else 0
# Train the model.
# Horovod: adjust number of steps based on number of GPUs.
mnist_model.fit(
dataset,
steps_per_epoch=args.steps_per_epoch // hvd.size(),
callbacks=callbacks,
epochs=args.epochs,
verbose=verbose,
)

View File

@@ -1,238 +0,0 @@
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
# Modifications copyright (C) 2017 Uber Technologies, Inc.
# Additional modifications copyright (C) Microsoft Corporation
# Licensed under the Apache License, Version 2.0
# Script adapted from: https://github.com/uber/horovod/blob/master/examples/tensorflow_word2vec.py
# ======================================
"""Basic word2vec example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
import zipfile
import argparse
import glob
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import horovod.tensorflow as hvd
from azureml.core.run import Run
# Horovod: initialize Horovod.
hvd.init()
parser = argparse.ArgumentParser()
parser.add_argument('--input_data', type=str, help='training data')
args = parser.parse_args()
input_data = args.input_data
print("the input data is at %s" % input_data)
# Step 1: Read data.
filename = input_data
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words."""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
vocabulary = read_data(filename)
print('Data size', len(vocabulary))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words, n_words):
"""Process raw inputs into a dataset."""
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(n_words - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(vocabulary,
vocabulary_size)
del vocabulary # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
assert num_skips <= 2 * skip_window
# Adjust batch_size to match num_skips
batch_size = batch_size // num_skips * num_skips
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = random.randint(0, len(data) - span - 1)
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [skip_window]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
max_batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[None])
train_labels = tf.placeholder(tf.int32, shape=[None, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Horovod: adjust learning rate based on number of GPUs.
optimizer = tf.train.GradientDescentOptimizer(1.0 * hvd.size())
# Horovod: add Horovod Distributed Optimizer.
optimizer = hvd.DistributedOptimizer(optimizer)
train_op = optimizer.minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.global_variables_initializer()
# Horovod: broadcast initial variable states from rank 0 to all other processes.
# This is necessary to ensure consistent initialization of all workers when
# training is started with random weights or restored from a checkpoint.
bcast = hvd.broadcast_global_variables(0)
# Step 5: Begin training.
# Horovod: adjust number of steps based on number of GPUs.
num_steps = 4000 // hvd.size() + 1
# Horovod: pin GPU to be used to process local rank (one GPU per process)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(hvd.local_rank())
with tf.Session(graph=graph, config=config) as session:
# We must initialize all variables before we use them.
init.run()
bcast.run()
print('Initialized')
run = Run.get_context()
average_loss = 0
for step in xrange(num_steps):
# simulate various sentence length by randomization
batch_size = random.randint(max_batch_size // 2, max_batch_size)
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([train_op, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step ', step, ': ', average_loss)
run.log("Loss", average_loss)
average_loss = 0
final_embeddings = normalized_embeddings.eval()
# Evaluate similarity in the end on worker 0.
if hvd.rank() == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)

View File

@@ -1,354 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/ml-frameworks/tensorflow/distributed-tensorflow-with-parameter-server/distributed-tensorflow-with-parameter-server.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Distributed TensorFlow with parameter server\n",
"In this tutorial, you will train a TensorFlow model on the [MNIST](http://yann.lecun.com/exdb/mnist/) dataset using native [distributed TensorFlow](https://www.tensorflow.org/guide/distributed_training)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Prerequisites\n",
"* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning (AML)\n",
"* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration notebook](../../../../configuration.ipynb) to:\n",
" * install the AML SDK\n",
" * create a workspace and its configuration file (`config.json`)\n",
"* Review the [tutorial](../train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb) on single-node TensorFlow training using the SDK"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Check core SDK version number\n",
"import azureml.core\n",
"\n",
"print(\"SDK version:\", azureml.core.VERSION)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Diagnostics\n",
"Opt-in diagnostics for better experience, quality, and security of future releases."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": [
"Diagnostics"
]
},
"outputs": [],
"source": [
"from azureml.telemetry import set_diagnostics_collection\n",
"\n",
"set_diagnostics_collection(send_diagnostics=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Initialize workspace\n",
"Initialize a [Workspace](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#workspace) object from the existing workspace you created in the Prerequisites step. `Workspace.from_config()` creates a workspace object from the details stored in `config.json`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.workspace import Workspace\n",
"\n",
"ws = Workspace.from_config()\n",
"print('Workspace name: ' + ws.name, \n",
" 'Azure region: ' + ws.location, \n",
" 'Subscription id: ' + ws.subscription_id, \n",
" 'Resource group: ' + ws.resource_group, sep = '\\n')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Create or Attach existing AmlCompute\n",
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, you create `AmlCompute` as your training compute resource.\n",
"\n",
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
"\n",
"**Creation of AmlCompute takes approximately 5 minutes.** If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
"\n",
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
"from azureml.core.compute_target import ComputeTargetException\n",
"\n",
"# choose a name for your cluster\n",
"cluster_name = \"gpu-cluster\"\n",
"\n",
"try:\n",
" compute_target = ComputeTarget(workspace=ws, name=cluster_name)\n",
" print('Found existing compute target.')\n",
"except ComputeTargetException:\n",
" print('Creating a new compute target...')\n",
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6', \n",
" max_nodes=4)\n",
"\n",
" # create the cluster\n",
" compute_target = ComputeTarget.create(ws, cluster_name, compute_config)\n",
"\n",
" compute_target.wait_for_completion(show_output=True)\n",
"\n",
"# use get_status() to get a detailed status for the current cluster. \n",
"print(compute_target.get_status().serialize())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Train model on the remote compute\n",
"Now that we have the cluster ready to go, let's run our distributed training job."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create a project directory\n",
"Create a directory that will contain all the necessary code from your local machine that you will need access to on the remote resource. This includes the training script, and any additional files your training script depends on."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"project_folder = './tf-distr-ps'\n",
"os.makedirs(project_folder, exist_ok=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copy the training script `tf_mnist_replica.py` into this project directory."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import shutil\n",
"\n",
"shutil.copy('tf_mnist_replica.py', project_folder)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create an experiment\n",
"Create an [Experiment](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#experiment) to track all the runs in your workspace for this distributed TensorFlow tutorial. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Experiment\n",
"\n",
"experiment_name = 'tf-distr-ps'\n",
"experiment = Experiment(ws, name=experiment_name)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create an environment\n",
"\n",
"In this tutorial, we will use one of Azure ML's curated TensorFlow environments for training. [Curated environments](https://docs.microsoft.com/azure/machine-learning/how-to-use-environments#use-a-curated-environment) are available in your workspace by default. Specifically, we will use the TensorFlow 1.13 GPU curated environment."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Environment\n",
"\n",
"tf_env = Environment.get(ws, name='AzureML-TensorFlow-1.13-GPU')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Configure the training job\n",
"\n",
"Create a ScriptRunConfig object to specify the configuration details of your training job, including your training script, environment to use, and the compute target to run on.\n",
"\n",
"In order to execute a distributed TensorFlow run with the parameter server strategy, you must create a `TensorflowConfiguration` object and pass it to the `distributed_job_config` parameter of the ScriptRunConfig constructor. The below code configures a distributed TensorFlow run with `2` workers and `1` parameter server."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import ScriptRunConfig\n",
"from azureml.core.runconfig import TensorflowConfiguration\n",
"\n",
"src = ScriptRunConfig(source_directory=project_folder,\n",
" script='tf_mnist_replica.py',\n",
" arguments=['--num_gpus', 1, '--train_steps', 500],\n",
" compute_target=compute_target,\n",
" environment=tf_env,\n",
" distributed_job_config=TensorflowConfiguration(worker_count=2, parameter_server_count=1))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Submit job\n",
"Run your experiment by submitting your ScriptRunConfig object. Note that this call is asynchronous."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"run = experiment.submit(src)\n",
"print(run)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Monitor your run\n",
"You can monitor the progress of the run with a Jupyter widget. Like the run submission, the widget is asynchronous and provides live updates every 10-15 seconds until the job completes."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.widgets import RunDetails\n",
"\n",
"RunDetails(run).show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Alternatively, you can block until the script has completed training before running more code."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"run.wait_for_completion(show_output=True) # this provides a verbose log"
]
}
],
"metadata": {
"authors": [
{
"name": "minxia"
}
],
"category": "training",
"compute": [
"AML Compute"
],
"datasets": [
"MNIST"
],
"deployment": [
"None"
],
"exclude_from_index": false,
"framework": [
"TensorFlow"
],
"friendly_name": "Distributed TensorFlow with parameter server",
"index_order": 1,
"kernelspec": {
"display_name": "Python 3.8 - AzureML",
"language": "python",
"name": "python38-azureml"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.9"
},
"tags": [
"None"
],
"task": "Use the TensorFlow estimator to train a model using distributed training"
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -1,5 +0,0 @@
name: distributed-tensorflow-with-parameter-server
dependencies:
- pip:
- azureml-sdk
- azureml-widgets

View File

@@ -1,271 +0,0 @@
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0
# Script adapted from:
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/dist_test/python/mnist_replica.py
# ==============================================================================
"""Distributed MNIST training and validation, with model replicas.
A simple softmax model with one hidden layer is defined. The parameters
(weights and biases) are located on one parameter server (ps), while the ops
are executed on two worker nodes by default. The TF sessions also run on the
worker node.
Multiple invocations of this script can be done in parallel, with different
values for --task_index. There should be exactly one invocation with
--task_index, which will create a master session that carries out variable
initialization. The other, non-master, sessions will wait for the master
session to finish the initialization before proceeding to the training stage.
The coordination between the multiple worker invocations occurs due to
the definition of the parameters on the same ps devices. The parameter updates
from one worker is visible to all other workers. As such, the workers can
perform forward computation and gradient calculation in parallel, which
should lead to increased training speed for the simple model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import math
import sys
import tempfile
import time
import json
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from azureml.core.run import Run
flags = tf.app.flags
flags.DEFINE_string("data_dir", "/tmp/mnist-data",
"Directory for storing mnist data")
flags.DEFINE_boolean("download_only", False,
"Only perform downloading of data; Do not proceed to "
"session preparation, model definition or training")
flags.DEFINE_integer("num_gpus", 0, "Total number of gpus for each machine."
"If you don't use GPU, please set it to '0'")
flags.DEFINE_integer("replicas_to_aggregate", None,
"Number of replicas to aggregate before parameter update "
"is applied (For sync_replicas mode only; default: "
"num_workers)")
flags.DEFINE_integer("hidden_units", 100,
"Number of units in the hidden layer of the NN")
flags.DEFINE_integer("train_steps", 200,
"Number of (global) training steps to perform")
flags.DEFINE_integer("batch_size", 100, "Training batch size")
flags.DEFINE_float("learning_rate", 0.01, "Learning rate")
flags.DEFINE_boolean(
"sync_replicas", False,
"Use the sync_replicas (synchronized replicas) mode, "
"wherein the parameter updates from workers are aggregated "
"before applied to avoid stale gradients")
flags.DEFINE_boolean(
"existing_servers", False, "Whether servers already exists. If True, "
"will use the worker hosts via their GRPC URLs (one client process "
"per worker host). Otherwise, will create an in-process TensorFlow "
"server.")
FLAGS = flags.FLAGS
IMAGE_PIXELS = 28
def main(unused_argv):
data_root = os.path.join("outputs", "MNIST")
mnist = None
tf_config = os.environ.get("TF_CONFIG")
if not tf_config or tf_config == "":
raise ValueError("TF_CONFIG not found.")
tf_config_json = json.loads(tf_config)
cluster = tf_config_json.get('cluster')
job_name = tf_config_json.get('task', {}).get('type')
task_index = tf_config_json.get('task', {}).get('index')
job_name = "worker" if job_name == "master" else job_name
sentinel_path = os.path.join(data_root, "complete.txt")
if job_name == "worker" and task_index == 0:
mnist = input_data.read_data_sets(data_root, one_hot=True)
with open(sentinel_path, 'w+') as f:
f.write("download complete")
else:
while not os.path.exists(sentinel_path):
time.sleep(0.01)
mnist = input_data.read_data_sets(data_root, one_hot=True)
if FLAGS.download_only:
sys.exit(0)
print("job name = %s" % job_name)
print("task index = %d" % task_index)
print("number of GPUs = %d" % FLAGS.num_gpus)
# Construct the cluster and start the server
cluster_spec = tf.train.ClusterSpec(cluster)
# Get the number of workers.
num_workers = len(cluster_spec.task_indices("worker"))
if not FLAGS.existing_servers:
# Not using existing servers. Create an in-process server.
server = tf.train.Server(
cluster_spec, job_name=job_name, task_index=task_index)
if job_name == "ps":
server.join()
is_chief = (task_index == 0)
if FLAGS.num_gpus > 0:
# Avoid gpu allocation conflict: now allocate task_num -> #gpu
# for each worker in the corresponding machine
gpu = (task_index % FLAGS.num_gpus)
worker_device = "/job:worker/task:%d/gpu:%d" % (task_index, gpu)
elif FLAGS.num_gpus == 0:
# Just allocate the CPU to worker server
cpu = 0
worker_device = "/job:worker/task:%d/cpu:%d" % (task_index, cpu)
# The device setter will automatically place Variables ops on separate
# parameter servers (ps). The non-Variable ops will be placed on the workers.
# The ps use CPU and workers use corresponding GPU
with tf.device(
tf.train.replica_device_setter(
worker_device=worker_device,
ps_device="/job:ps/cpu:0",
cluster=cluster)):
global_step = tf.Variable(0, name="global_step", trainable=False)
# Variables of the hidden layer
hid_w = tf.Variable(
tf.truncated_normal(
[IMAGE_PIXELS * IMAGE_PIXELS, FLAGS.hidden_units],
stddev=1.0 / IMAGE_PIXELS),
name="hid_w")
hid_b = tf.Variable(tf.zeros([FLAGS.hidden_units]), name="hid_b")
# Variables of the softmax layer
sm_w = tf.Variable(
tf.truncated_normal(
[FLAGS.hidden_units, 10],
stddev=1.0 / math.sqrt(FLAGS.hidden_units)),
name="sm_w")
sm_b = tf.Variable(tf.zeros([10]), name="sm_b")
# Ops: located on the worker specified with task_index
x = tf.placeholder(tf.float32, [None, IMAGE_PIXELS * IMAGE_PIXELS])
y_ = tf.placeholder(tf.float32, [None, 10])
hid_lin = tf.nn.xw_plus_b(x, hid_w, hid_b)
hid = tf.nn.relu(hid_lin)
y = tf.nn.softmax(tf.nn.xw_plus_b(hid, sm_w, sm_b))
cross_entropy = -tf.reduce_sum(y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0)))
opt = tf.train.AdamOptimizer(FLAGS.learning_rate)
if FLAGS.sync_replicas:
if FLAGS.replicas_to_aggregate is None:
replicas_to_aggregate = num_workers
else:
replicas_to_aggregate = FLAGS.replicas_to_aggregate
opt = tf.train.SyncReplicasOptimizer(
opt,
replicas_to_aggregate=replicas_to_aggregate,
total_num_replicas=num_workers,
name="mnist_sync_replicas")
train_step = opt.minimize(cross_entropy, global_step=global_step)
if FLAGS.sync_replicas:
local_init_op = opt.local_step_init_op
if is_chief:
local_init_op = opt.chief_init_op
ready_for_local_init_op = opt.ready_for_local_init_op
# Initial token and chief queue runners required by the sync_replicas mode
chief_queue_runner = opt.get_chief_queue_runner()
sync_init_op = opt.get_init_tokens_op()
init_op = tf.global_variables_initializer()
train_dir = tempfile.mkdtemp()
if FLAGS.sync_replicas:
sv = tf.train.Supervisor(
is_chief=is_chief,
logdir=train_dir,
init_op=init_op,
local_init_op=local_init_op,
ready_for_local_init_op=ready_for_local_init_op,
recovery_wait_secs=1,
global_step=global_step)
else:
sv = tf.train.Supervisor(
is_chief=is_chief,
logdir=train_dir,
init_op=init_op,
recovery_wait_secs=1,
global_step=global_step)
sess_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False,
device_filters=["/job:ps",
"/job:worker/task:%d" % task_index])
# The chief worker (task_index==0) session will prepare the session,
# while the remaining workers will wait for the preparation to complete.
if is_chief:
print("Worker %d: Initializing session..." % task_index)
else:
print("Worker %d: Waiting for session to be initialized..." %
task_index)
if FLAGS.existing_servers:
server_grpc_url = "grpc://" + task_index
print("Using existing server at: %s" % server_grpc_url)
sess = sv.prepare_or_wait_for_session(server_grpc_url, config=sess_config)
else:
sess = sv.prepare_or_wait_for_session(server.target, config=sess_config)
print("Worker %d: Session initialization complete." % task_index)
if FLAGS.sync_replicas and is_chief:
# Chief worker will start the chief queue runner and call the init op.
sess.run(sync_init_op)
sv.start_queue_runners(sess, [chief_queue_runner])
# Perform training
time_begin = time.time()
print("Training begins @ %f" % time_begin)
local_step = 0
while True:
# Training feed
batch_xs, batch_ys = mnist.train.next_batch(FLAGS.batch_size)
train_feed = {x: batch_xs, y_: batch_ys}
_, step = sess.run([train_step, global_step], feed_dict=train_feed)
local_step += 1
now = time.time()
print("%f: Worker %d: training step %d done (global step: %d)" %
(now, task_index, local_step, step))
if step >= FLAGS.train_steps:
break
time_end = time.time()
print("Training ends @ %f" % time_end)
training_time = time_end - time_begin
print("Training elapsed time: %f s" % training_time)
# Validation feed
val_feed = {x: mnist.validation.images, y_: mnist.validation.labels}
val_xent = sess.run(cross_entropy, feed_dict=val_feed)
print("After %d training step(s), validation cross entropy = %g" %
(FLAGS.train_steps, val_xent))
if job_name == "worker" and task_index == 0:
run = Run.get_context()
run.log("CrossEntropy", val_xent)
if __name__ == "__main__":
tf.app.run()

View File

@@ -36,8 +36,6 @@ Using these samples, you will learn how to do the following.
| [cartpole_ci.ipynb](cartpole-on-compute-instance/cartpole_ci.ipynb) | Notebook to train a Cartpole playing agent on an Azure Machine Learning Compute Instance | | [cartpole_ci.ipynb](cartpole-on-compute-instance/cartpole_ci.ipynb) | Notebook to train a Cartpole playing agent on an Azure Machine Learning Compute Instance |
| [cartpole_sc.ipynb](cartpole-on-single-compute/cartpole_sc.ipynb) | Notebook to train a Cartpole playing agent on an Azure Machine Learning Compute Cluster (single node) | | [cartpole_sc.ipynb](cartpole-on-single-compute/cartpole_sc.ipynb) | Notebook to train a Cartpole playing agent on an Azure Machine Learning Compute Cluster (single node) |
| [pong_rllib.ipynb](atari-on-distributed-compute/pong_rllib.ipynb) | Notebook for distributed training of Pong agent using RLlib on multiple compute targets | | [pong_rllib.ipynb](atari-on-distributed-compute/pong_rllib.ipynb) | Notebook for distributed training of Pong agent using RLlib on multiple compute targets |
| [minecraft.ipynb](minecraft-on-distributed-compute/minecraft.ipynb) | Notebook to train an agent to navigate through a lava maze in the Minecraft game |
| [particle.ipynb](multiagent-particle-envs/particle.ipynb) | Notebook to train policies in a multiagent cooperative navigation scenario based on OpenAI's Particle environments |
## Prerequisites ## Prerequisites

View File

@@ -1,14 +1,18 @@
FROM mcr.microsoft.com/azureml/openmpi4.1.0-cuda11.0.3-cudnn8-ubuntu18.04 # NC-series GPUs only support the pytorch-1.11/cuda11.3 combo
# See https://learn.microsoft.com/en-us/azure/machine-learning/resource-curated-environments
FROM mcr.microsoft.com/azureml/curated/acpt-pytorch-1.11-cuda11.3
USER root USER root
RUN conda install -c anaconda python=3.7
# CUDA repository key rotation: https://forums.developer.nvidia.com/t/notice-cuda-linux-repository-key-rotation/212771 # ENV AZUREML_CONDA_ENVIRONMENT_PATH /azureml-envs/ray-rllib
RUN apt-key del 7fa2af80 # Create conda environment
ENV distro ubuntu1804 # RUN conda create -p $AZUREML_CONDA_ENVIRONMENT_PATH \
ENV arch x86_64 # python=3.8.5
RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/$distro/$arch/3bf863cc.pub
# Prepend path to AzureML conda environment
# ENV PATH $AZUREML_CONDA_ENVIRONMENT_PATH/bin:$PATH
# Install necessary packages to support videos in rllib/gym
RUN apt-get update && apt-get install -y --no-install-recommends \ RUN apt-get update && apt-get install -y --no-install-recommends \
python-opengl \ python-opengl \
rsync \ rsync \
@@ -17,61 +21,35 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
rm -rf /var/lib/apt/lists/* && \ rm -rf /var/lib/apt/lists/* && \
rm -rf /usr/share/man/* rm -rf /usr/share/man/*
ENV AZUREML_CONDA_ENVIRONMENT_PATH /azureml-envs/tensorflow-2.4
# Create conda environment
RUN conda create -p $AZUREML_CONDA_ENVIRONMENT_PATH \
python=3.7 pip=20.2.4
# Prepend path to AzureML conda environment
ENV PATH $AZUREML_CONDA_ENVIRONMENT_PATH/bin:$PATH
RUN pip --version RUN pip --version
RUN python --version RUN python --version
# Install ray-on-aml RUN pip install ray-on-aml==0.2.4 \
RUN pip install 'ray-on-aml==0.1.6' ray==2.4.0 \
ray[rllib]==2.4.0 \
mlflow==2.3.1 \
azureml-defaults==1.50.0 \
azureml-dataset-runtime[fuse,pandas]==1.50.0 \
azureml-contrib-reinforcementlearning==1.50.0 \
gputil==1.4.0 \
scipy==1.9.1 \
pyglet==2.0.6 \
cloudpickle==2.2.1 \
tensorflow==2.11.0 \
tensorflow-probability==0.19.0 \
tabulate==0.9.0 \
dm_tree==0.1.8 \
lz4==4.3.2 \
psutil==5.9.4 \
setproctitle==1.3.2 \
pygame==2.1.0 \
gymnasium[classic_control]==0.26.3 \
gymnasium[atari]==0.26.3 \
gymnasium[accept-rom-license]==0.26.3 \
gym==0.26.2 \
gym[atari]==0.26.2 \
gym[accept-rom-license]==0.26.2
RUN pip install ray==0.8.7 # Display all versions
RUN pip install gym[atari]==0.19.0 RUN pip freeze
RUN pip install gym[accept-rom-license]==0.19.0
# Install pip dependencies
RUN pip install 'matplotlib>=3.3,<3.4' \
'psutil>=5.8,<5.9' \
'tqdm>=4.59,<4.60' \
'pandas>=1.1,<1.2' \
'scipy>=1.5,<1.6' \
'numpy>=1.10,<1.20' \
'ipykernel~=6.0' \
'azureml-core==1.36.0.post2' \
'azureml-defaults==1.36.0' \
'azureml-mlflow==1.36.0' \
'azureml-telemetry==1.36.0' \
'tensorboard==2.4.0' \
'tensorflow-gpu==2.4.1' \
'tensorflow-datasets==4.3.0' \
'onnxruntime-gpu>=1.7,<1.8' \
'horovod[tensorflow-gpu]==0.21.3'
RUN pip install --no-cache-dir \
azureml-defaults \
azureml-dataset-runtime[fuse,pandas] \
azureml-contrib-reinforcementlearning \
gputil \
cloudpickle==1.3.0 \
tabulate \
dm_tree \
lz4 \
psutil \
setproctitle
# This is required for ray 0.8.7
RUN pip install -U aiohttp==3.7.4
RUN pip install 'msrest<0.7.0'
RUN pip install protobuf==3.20.0
# This is needed for mpi to locate libpython
ENV LD_LIBRARY_PATH $AZUREML_CONDA_ENVIRONMENT_PATH/lib:$LD_LIBRARY_PATH

View File

@@ -0,0 +1,18 @@
pong-impala-vectorized:
env: ALE/Pong-v5
run: IMPALA
config:
# Make analogous to old v4 + NoFrameskip.
env_config:
frameskip: 1
full_action_space: false
repeat_action_probability: 0.0
rollout_fragment_length: 50
train_batch_size: 500
num_workers: 11
num_envs_per_worker: 10
framework: torch
log_level: INFO
stop:
episode_reward_mean: 10
time_total_s: 3600

View File

@@ -1,35 +1,35 @@
from ray_on_aml.core import Ray_On_AML from ray_on_aml.core import Ray_On_AML
import yaml
import ray.tune as tune from ray.tune.tune import run_experiments
from ray.rllib import train
from utils import callbacks from utils import callbacks
import argparse
if __name__ == "__main__": if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--config', help='Path to yaml configuration file')
args = parser.parse_args()
ray_on_aml = Ray_On_AML() ray_on_aml = Ray_On_AML()
ray = ray_on_aml.getRay() ray = ray_on_aml.getRay()
if ray: # in the headnode if ray: # in the headnode
# Parse arguments ray.init(address="auto")
train_parser = train.create_parser() print("Configuring run from file: ", args.config)
experiment_config = None
with open(args.config, "r") as file:
experiment_config = yaml.safe_load(file)
print(f'Config: {experiment_config}')
args = train_parser.parse_args() # Set local_dir in each experiment configuration to ensure generated logs get picked up
print("Algorithm config:", args.config) # by Azure ML
for experiment in experiment_config.values():
experiment["local_dir"] = "./logs"
trials = run_experiments(
experiment_config,
callbacks=[callbacks.TrialCallback()],
verbose=2
)
tune.run(
run_or_experiment=args.run,
config={
"env": args.env,
"num_gpus": args.config["num_gpus"],
"num_workers": args.config["num_workers"],
"callbacks": {"on_train_result": callbacks.on_train_result},
"sample_batch_size": 50,
"train_batch_size": 1000,
"num_sgd_iter": 2,
"num_data_loader_buffers": 2,
"model": {"dim": 42},
},
stop=args.stop,
local_dir='./logs')
else: else:
print("in worker node") print("in worker node")

View File

@@ -3,15 +3,20 @@
''' '''
from azureml.core import Run from azureml.core import Run
from ray import tune
from ray.tune import Callback
from ray.air import session
def on_train_result(info): class TrialCallback(Callback):
'''Callback on train result to record metrics returned by trainer.
''' def on_trial_result(self, iteration, trials, trial, result, **info):
run = Run.get_context() '''Callback on train result to record metrics returned by trainer.
run.log( '''
name='episode_reward_mean', run = Run.get_context()
value=info["result"]["episode_reward_mean"]) run.log(
run.log( name='episode_reward_mean',
name='episodes_total', value=result["episode_reward_mean"])
value=info["result"]["episodes_total"]) run.log(
name='episodes_total',
value=result["episodes_total"])

View File

@@ -22,7 +22,8 @@
"source": [ "source": [
"# Reinforcement Learning in Azure Machine Learning - Pong problem\n", "# Reinforcement Learning in Azure Machine Learning - Pong problem\n",
"Reinforcement Learning in Azure Machine Learning is a managed service for running distributed reinforcement learning training and simulation using the open source Ray framework.\n", "Reinforcement Learning in Azure Machine Learning is a managed service for running distributed reinforcement learning training and simulation using the open source Ray framework.\n",
"This example uses Ray RLlib to train a Pong playing agent on a multi-node cluster.\n", "This noteboook demonstrates how to use Ray to solve a more complex problem using a more complex setup including Ray RLLib running on multiple compute nodes and using a GPU.\n",
"For this example we will train a Pong playing agent on cluster of two NC6 nodes (6 CPU, 1 GPU).\n",
"\n", "\n",
"## Pong problem\n", "## Pong problem\n",
"[Pong](https://en.wikipedia.org/wiki/Pong) is a two-dimensional sports game that simulates table tennis. The player controls an in-game paddle by moving it vertically across the left or right side of the screen. They can compete against another player controlling a second paddle on the opposing side. Players use the paddles to hit a ball back and forth." "[Pong](https://en.wikipedia.org/wiki/Pong) is a two-dimensional sports game that simulates table tennis. The player controls an in-game paddle by moving it vertically across the left or right side of the screen. They can compete against another player controlling a second paddle on the opposing side. Players use the paddles to hit a ball back and forth."
@@ -46,7 +47,7 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"The goal here is to train an agent to win an episode of Pong game against opponent with the score of at least 18 points. An episode in Pong runs until one of the players reaches a score of 21. Episodes are a terminology that is used across all the [OpenAI gym](https://www.gymlibrary.dev/environments/atari/pong/) environments that contains a strictly defined task.\n", "The goal here is to train an agent to win an episode of Pong game against opponent with the score of at least 10 points. An episode in Pong runs until one of the players reaches a score of 21. Episodes are a terminology that is used across all the [OpenAI gym](https://www.gymlibrary.dev/environments/atari/pong/) environments that contains a strictly defined task.\n",
"\n", "\n",
"Training a Pong agent is a compute-intensive task and this example demonstrates the use of Reinforcement Learning in Azure Machine Learning service to train an agent faster in a distributed, parallel environment. You'll learn more about using the head and the worker compute targets to train an agent in this notebook below." "Training a Pong agent is a compute-intensive task and this example demonstrates the use of Reinforcement Learning in Azure Machine Learning service to train an agent faster in a distributed, parallel environment. You'll learn more about using the head and the worker compute targets to train an agent in this notebook below."
] ]
@@ -60,19 +61,6 @@
"It is highly recommended that the user should go through the [Reinforcement Learning in Azure Machine Learning - Cartpole Problem on Single Compute](../cartpole-on-single-compute/cartpole_sc.ipynb) to understand the basics of Reinforcement Learning in Azure Machine Learning and Ray RLlib used in this notebook." "It is highly recommended that the user should go through the [Reinforcement Learning in Azure Machine Learning - Cartpole Problem on Single Compute](../cartpole-on-single-compute/cartpole_sc.ipynb) to understand the basics of Reinforcement Learning in Azure Machine Learning and Ray RLlib used in this notebook."
] ]
}, },
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Set up Development Environment\n",
"The following subsections show typical steps to setup your development environment. Setup includes:\n",
"\n",
"* Connecting to a workspace to enable communication between your local machine and remote resources\n",
"* Creating an experiment to track all your runs\n",
"* Setting up a virtual network\n",
"* Creating remote head and worker compute target on a virtual network to use for training"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@@ -86,7 +74,7 @@
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"gather": { "gather": {
"logged": 1646081765827 "logged": 1683263371795
} }
}, },
"outputs": [], "outputs": [],
@@ -113,7 +101,7 @@
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"gather": { "gather": {
"logged": 1646081772340 "logged": 1683263375690
} }
}, },
"outputs": [], "outputs": [],
@@ -137,7 +125,7 @@
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"gather": { "gather": {
"logged": 1646081775643 "logged": 1683263378789
} }
}, },
"outputs": [], "outputs": [],
@@ -165,7 +153,7 @@
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"gather": { "gather": {
"logged": 1646086081229 "logged": 1683263385677
} }
}, },
"outputs": [], "outputs": [],
@@ -177,7 +165,7 @@
"compute_min_nodes = 0\n", "compute_min_nodes = 0\n",
"compute_max_nodes = 2\n", "compute_max_nodes = 2\n",
"\n", "\n",
"# This example uses GPU VM. For using CPU VM, set SKU to STANDARD_D2_V2\n", "# This example uses GPU VM.\n",
"vm_size = 'STANDARD_NC6'\n", "vm_size = 'STANDARD_NC6'\n",
"\n", "\n",
"if compute_name in ws.compute_targets:\n", "if compute_name in ws.compute_targets:\n",
@@ -207,15 +195,52 @@
" print(compute_target.get_status().serialize())" " print(compute_target.get_status().serialize())"
] ]
}, },
{
"cell_type": "markdown",
"metadata": {
"nteract": {
"transient": {
"deleting": false
}
}
},
"source": [
"### Create Azure ML Environment\r\n",
"\r\n",
"This step creates and registers an Azure ML Environment that includes all of the dependencies needed to run this example, including CUDA drivers Pytorch, RLLib, and associated tools. This step can take a significant time (30 min) on the first run."
]
},
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"gather": { "gather": {
"logged": 1646160884910 "logged": 1683263388781
}, },
"jupyter": { "jupyter": {
"outputs_hidden": true, "outputs_hidden": false,
"source_hidden": false
},
"nteract": {
"transient": {
"deleting": false
}
}
},
"outputs": [],
"source": [
"ray_environment_name = 'pong-gpu'"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"gather": {
"logged": 1683056774047
},
"jupyter": {
"outputs_hidden": false,
"source_hidden": false "source_hidden": false
}, },
"nteract": { "nteract": {
@@ -229,7 +254,6 @@
"import os\n", "import os\n",
"from azureml.core import Environment\n", "from azureml.core import Environment\n",
"\n", "\n",
"ray_environment_name = 'pong-gpu'\n",
"ray_environment_dockerfile_path = os.path.join(os.getcwd(), 'docker', 'Dockerfile-gpu')\n", "ray_environment_dockerfile_path = os.path.join(os.getcwd(), 'docker', 'Dockerfile-gpu')\n",
"\n", "\n",
"# Build GPU image\n", "# Build GPU image\n",
@@ -249,15 +273,7 @@
"\n", "\n",
"The code below submits the training run using a `ScriptRunConfig`. By providing the\n", "The code below submits the training run using a `ScriptRunConfig`. By providing the\n",
"command to run the training, and a `RunConfig` object configured with your\n", "command to run the training, and a `RunConfig` object configured with your\n",
"compute target, number of nodes, and environment image to use.\n", "compute target, number of nodes, and environment image to use."
"\n",
"We specify `episode_reward_mean` to 18 as we want to stop the training as soon as the trained agent reaches an average win margin of at least 18 point over opponent over all episodes in the training epoch.\n",
"Number of Ray worker processes are defined by parameter `num_workers`. We set it to 13 as we have 11 CPUs available in our compute targets. Multiple Ray worker processes parallelizes agent training and helps in achieving our goal faster. \n",
"\n",
"```\n",
"Number of CPUs in the compute cluster = 6 * 2 = 12 CPUs over 2 nodes\n",
"Number of CPUs available = (Number of CPUs in the compute cluster) - (1 CPU for head node) = 12 - 1 = 11\n",
"```"
] ]
}, },
{ {
@@ -265,7 +281,7 @@
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"gather": { "gather": {
"logged": 1646162435310 "logged": 1683264835679
} }
}, },
"outputs": [], "outputs": [],
@@ -282,16 +298,12 @@
"aml_run_config_ml.node_count = 2\n", "aml_run_config_ml.node_count = 2\n",
"aml_run_config_ml.environment = ray_environment\n", "aml_run_config_ml.environment = ray_environment\n",
"\n", "\n",
"training_algorithm = \"IMPALA\"\n",
"rl_environment = \"PongNoFrameskip-v4\"\n",
"script_name='pong_rllib.py'\n", "script_name='pong_rllib.py'\n",
"config_name='pong-impala-vectorized.yaml'\n",
"\n", "\n",
"command=[\n", "command=[\n",
" 'python', script_name,\n", " 'python', script_name,\n",
" '--run', training_algorithm,\n", " '--config', config_name\n",
" '--env', rl_environment,\n",
" '--config', '\\'{\"num_gpus\": 1, \"num_workers\": 11}\\'',\n",
" '--stop', '\\'{\"episode_reward_mean\": 18, \"time_total_s\": 3600}\\''\n",
"]\n", "]\n",
"\n", "\n",
"config = ScriptRunConfig(source_directory='./files',\n", "config = ScriptRunConfig(source_directory='./files',\n",
@@ -305,25 +317,34 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"### Training script\n", "### Training configuration\n",
"As recommended in [RLlib](https://ray.readthedocs.io/en/latest/rllib.html) documentations, we use Ray [Tune](https://ray.readthedocs.io/en/latest/tune.html) API to run the training algorithm. All the RLlib built-in trainers are compatible with the Tune API. Here we use tune.run() to execute a built-in training algorithm. For convenience, down below you can see part of the entry script where we make this call.\n", "All training parameters (including the Reinforcement Learning algorithm) are set through a single configuration file. For this example we'll be using the IMPALA algorithm to train an agent to play Atari Pong.\n",
"We set `num_workers` to 11 because we have 11 CPUs available for worker nodes (6 CPUs on each of 2 machines, with 1 CPU consumed as a head node).\n",
"We set `episode_reward_mean` (under `stop`) to 10 so that we terminate the run once we achieve a reward score of 10.\n",
"\n",
"Here is the configuration we are using for this example:\n",
"\n",
"```yaml\n",
"pong:\n",
" env: ALE/Pong-v5\n",
" run: IMPALA\n",
" config:\n",
" num_workers: 11\n",
" num_gpus: 1\n",
" rollout_fragment_length: 50\n",
" train_batch_size: 1000\n",
" num_sgd_iter: 2\n",
" num_multi_gpu_tower_stacks: 2\n",
" env_config:\n",
" frameskip: 1\n",
" full_action_space: false\n",
" repeat_action_probability: 0.0\n",
" stop:\n",
" episode_reward_mean: 10\n",
" total_time_s: 3600\n",
" model:\n",
" dim: 42\n",
"\n", "\n",
"```python\n",
" tune.run(\n",
" run_or_experiment=args.run,\n",
" config={\n",
" \"env\": args.env,\n",
" \"num_gpus\": args.config[\"num_gpus\"],\n",
" \"num_workers\": args.config[\"num_workers\"],\n",
" \"callbacks\": {\"on_train_result\": callbacks.on_train_result},\n",
" \"sample_batch_size\": 50,\n",
" \"train_batch_size\": 1000,\n",
" \"num_sgd_iter\": 2,\n",
" \"num_data_loader_buffers\": 2,\n",
" \"model\": {\"dim\": 42},\n",
" },\n",
" stop=args.stop,\n",
" local_dir='./logs')\n",
"```" "```"
] ]
}, },
@@ -339,7 +360,11 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {}, "metadata": {
"gather": {
"logged": 1683056781459
}
},
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.widgets import RunDetails\n", "from azureml.widgets import RunDetails\n",
@@ -359,7 +384,11 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {}, "metadata": {
"gather": {
"logged": 1683056781759
}
},
"outputs": [], "outputs": [],
"source": [ "source": [
"# Uncomment line below to cancel the run\n", "# Uncomment line below to cancel the run\n",
@@ -379,7 +408,11 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {}, "metadata": {
"gather": {
"logged": 1682525323059
}
},
"outputs": [], "outputs": [],
"source": [ "source": [
"training_run.wait_for_completion()" "training_run.wait_for_completion()"
@@ -399,7 +432,11 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {}, "metadata": {
"gather": {
"logged": 1683064583273
}
},
"outputs": [], "outputs": [],
"source": [ "source": [
"# Get the reward metrics from training_run\n", "# Get the reward metrics from training_run\n",
@@ -416,7 +453,11 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {}, "metadata": {
"gather": {
"logged": 1682445012908
}
},
"outputs": [], "outputs": [],
"source": [ "source": [
"import matplotlib.pyplot as plt\n", "import matplotlib.pyplot as plt\n",
@@ -431,7 +472,7 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"We observe that during the training over multiple episodes, the agent learns to win the Pong game against opponent with our target of 18 points in each episode of 21 points.\n", "We observe that during the training over multiple episodes, the agent learns to win the Pong game against opponent with our target of 10 points in each episode of 21 points.\n",
"**Congratulations!! You have trained your Pong agent to win a game.**" "**Congratulations!! You have trained your Pong agent to win a game.**"
] ]
}, },
@@ -446,7 +487,11 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {}, "metadata": {
"gather": {
"logged": 1682445012927
}
},
"outputs": [], "outputs": [],
"source": [ "source": [
"# To archive the created experiment:\n", "# To archive the created experiment:\n",
@@ -456,14 +501,6 @@
"#head_compute_target.delete()\n", "#head_compute_target.delete()\n",
"#worker_compute_target.delete()" "#worker_compute_target.delete()"
] ]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Next\n",
"In this example, you learned how to solve distributed reinforcement learning training problems using head and worker compute targets. This was an introductory tutorial on Reinforement Learning in Azure Machine Learning service offering. We would love to hear your feedback to build the features you need!"
]
} }
], ],
"metadata": { "metadata": {
@@ -494,7 +531,17 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.7.3" "version": "3.8.5"
},
"microsoft": {
"host": {
"AzureML": {
"notebookHasBeenCompleted": true
}
},
"ms_spell_check": {
"ms_spell_check_language": "en"
}
}, },
"notice": "Copyright (c) Microsoft Corporation. All rights reserved.\u00c3\u00a2\u00e2\u201a\u00ac\u00c2\u00afLicensed under the MIT License.\u00c3\u00a2\u00e2\u201a\u00ac\u00c2\u00af ", "notice": "Copyright (c) Microsoft Corporation. All rights reserved.\u00c3\u00a2\u00e2\u201a\u00ac\u00c2\u00afLicensed under the MIT License.\u00c3\u00a2\u00e2\u201a\u00ac\u00c2\u00af ",
"nteract": { "nteract": {

View File

@@ -84,7 +84,7 @@
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"gather": { "gather": {
"logged": 1646344676671 "logged": 1683062935076
} }
}, },
"outputs": [], "outputs": [],
@@ -106,7 +106,7 @@
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"gather": { "gather": {
"logged": 1646344680982 "logged": 1683062936280
} }
}, },
"outputs": [], "outputs": [],
@@ -133,7 +133,7 @@
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"gather": { "gather": {
"logged": 1646344684217 "logged": 1683062936485
} }
}, },
"outputs": [], "outputs": [],
@@ -160,7 +160,7 @@
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"gather": { "gather": {
"logged": 1646344690768 "logged": 1683062937126
} }
}, },
"outputs": [], "outputs": [],
@@ -212,14 +212,14 @@
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"gather": { "gather": {
"logged": 1646344835579 "logged": 1683062937499
} }
}, },
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.core.experiment import Experiment\n", "from azureml.core.experiment import Experiment\n",
"\n", "\n",
"experiment_name = 'CartPole-v0-CI'\n", "experiment_name = 'CartPole-v1-CI'\n",
"experiment = Experiment(workspace=ws, name=experiment_name)" "experiment = Experiment(workspace=ws, name=experiment_name)"
] ]
}, },
@@ -228,7 +228,7 @@
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"gather": { "gather": {
"logged": 1646346293902 "logged": 1683064044718
}, },
"jupyter": { "jupyter": {
"outputs_hidden": false, "outputs_hidden": false,
@@ -282,7 +282,7 @@
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"gather": { "gather": {
"logged": 1646347120585 "logged": 1683064046594
}, },
"jupyter": { "jupyter": {
"outputs_hidden": false, "outputs_hidden": false,
@@ -300,18 +300,10 @@
"from azureml.core import RunConfiguration, ScriptRunConfig, Experiment\n", "from azureml.core import RunConfiguration, ScriptRunConfig, Experiment\n",
"from azureml.core.runconfig import DockerConfiguration, RunConfiguration\n", "from azureml.core.runconfig import DockerConfiguration, RunConfiguration\n",
"\n", "\n",
"training_algorithm = 'PPO'\n", "config_name = 'cartpole-ppo.yaml'\n",
"rl_environment = 'CartPole-v0'\n",
"\n",
"script_name = 'cartpole_training.py'\n", "script_name = 'cartpole_training.py'\n",
"script_arguments = [\n", "script_arguments = [\n",
" '--run', training_algorithm,\n", " '--config', config_name\n",
" '--env', rl_environment,\n",
" '--config', '{\"num_gpus\": 0, \"num_workers\": 1}',\n",
" '--stop', '{\"episode_reward_mean\": 200, \"time_total_s\": 300}',\n",
" '--checkpoint-freq', '2',\n",
" '--checkpoint-at-end',\n",
" '--local-dir', './logs'\n",
"]\n", "]\n",
"\n", "\n",
"aml_run_config_ml = RunConfiguration(communicator='OpenMpi')\n", "aml_run_config_ml = RunConfiguration(communicator='OpenMpi')\n",
@@ -331,43 +323,35 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"### Training script\n", "### Training configuration\n",
"\n", "\n",
"As recommended in RLlib documentations, we use Ray Tune API to run the training algorithm. All the RLlib built-in trainers are compatible with the Tune API. Here we use `tune.run()` to execute a built-in training algorithm. For convenience, down below you can see part of the entry script where we make this call.\n", "This is the training configuration (in yaml) that we use to train an agent to solve the CartPole problem using\n",
"the PPO algorithm.\n",
"\n", "\n",
"This is the list of parameters we are passing into `tune.run()` via the `script_params` parameter:\n", "```yaml\n",
"\n", "cartpole-ppo:\n",
"- `run_or_experiment`: name of the [built-in algorithm](https://ray.readthedocs.io/en/latest/rllib-algorithms.html#rllib-algorithms), 'PPO' in our example,\n", " env: CartPole-v1\n",
"- `config`: Algorithm-specific configuration. This includes specifying the environment, `env`, which in our example is the gym **[CartPole-v0](https://www.gymlibrary.dev/environments/classic_control/cart_pole/)** environment,\n", " run: PPO\n",
"- `stop`: stopping conditions, which could be any of the metrics returned by the trainer. Here we use \"mean of episode reward\", and \"total training time in seconds\" as stop conditions, and\n", " stop:\n",
"- `checkpoint_freq` and `checkpoint_at_end`: Frequency of taking checkpoints (number of training iterations between checkpoints), and if a checkpoint should be taken at the end.\n", " episode_reward_mean: 475\n",
"\n", " time_total_s: 300\n",
"We also specify the `local_dir`, the directory in which the training logs, checkpoints and other training artificats will be recorded. \n", " checkpoint_config:\n",
"\n", " checkpoint_frequency: 2\n",
"See [RLlib Training APIs](https://ray.readthedocs.io/en/latest/rllib-training.html#rllib-training-apis) for more details, and also [Training (tune.run, tune.Experiment)](https://ray.readthedocs.io/en/latest/tune/api_docs/execution.html#training-tune-run-tune-experiment) for the complete list of parameters.\n", " checkpoint_at_end: true\n",
"\n", " config:\n",
"```python\n", " # Works for both torch and tf.\n",
"import os\n", " framework: torch\n",
"import ray\n", " gamma: 0.99\n",
"import ray.tune as tune\n", " lr: 0.0003\n",
"\n", " num_workers: 1\n",
"if __name__ == \"__main__\":\n", " observation_filter: MeanStdFilter\n",
"\n", " num_sgd_iter: 6\n",
" # parse arguments ...\n", " vf_loss_coeff: 0.01\n",
" \n", " model:\n",
" # Start ray head (single node)\n", " fcnet_hiddens: [32]\n",
" os.system('ray start --head')\n", " fcnet_activation: linear\n",
" ray.init(address='auto')\n", " vf_share_layers: true\n",
"\n", " enable_connectors: true\n",
" # Run training task using tune.run\n",
" tune.run(\n",
" run_or_experiment=args.run,\n",
" config=dict(args.config, env=args.env),\n",
" stop=args.stop,\n",
" checkpoint_freq=args.checkpoint_freq,\n",
" checkpoint_at_end=args.checkpoint_at_end,\n",
" local_dir=args.local_dir\n",
" )\n",
"```" "```"
] ]
}, },
@@ -386,7 +370,7 @@
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"gather": { "gather": {
"logged": 1646347127671 "logged": 1683064049813
} }
}, },
"outputs": [], "outputs": [],
@@ -408,7 +392,11 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {}, "metadata": {
"gather": {
"logged": 1683064050024
}
},
"outputs": [], "outputs": [],
"source": [ "source": [
"# Uncomment line below to cancel the run\n", "# Uncomment line below to cancel the run\n",
@@ -430,7 +418,7 @@
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"gather": { "gather": {
"logged": 1646347318682 "logged": 1683064304728
} }
}, },
"outputs": [], "outputs": [],
@@ -463,7 +451,7 @@
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"gather": { "gather": {
"logged": 1646347328505 "logged": 1683064305251
} }
}, },
"outputs": [], "outputs": [],
@@ -471,7 +459,7 @@
"from os import path\n", "from os import path\n",
"from distutils import dir_util\n", "from distutils import dir_util\n",
"\n", "\n",
"training_artifacts_path = path.join(\"logs\", training_algorithm)\n", "training_artifacts_path = path.join(\"logs\", \"cartpole-ppo\")\n",
"print(\"Training artifacts path:\", training_artifacts_path)\n", "print(\"Training artifacts path:\", training_artifacts_path)\n",
"\n", "\n",
"if path.exists(training_artifacts_path):\n", "if path.exists(training_artifacts_path):\n",
@@ -493,19 +481,20 @@
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"gather": { "gather": {
"logged": 1646347334571 "logged": 1683064305283
} }
}, },
"outputs": [], "outputs": [],
"source": [ "source": [
"# A helper function to find checkpoint files in a directory\n", "# A helper function to find all of the checkpoint directories located within a larger directory tree\n",
"def find_checkpoints(file_path):\n", "def find_checkpoints(file_path):\n",
" print(\"Looking in path:\", file_path)\n", " print(\"Looking in path:\", file_path)\n",
" checkpoints = []\n", " checkpoints = []\n",
" for root, _, files in os.walk(file_path):\n", " for root, dirs, files in os.walk(file_path):\n",
" for name in files:\n", " trimmed_root = root[len(file_path)+1:]\n",
" if os.path.basename(root).startswith('checkpoint_'):\n", " for name in dirs:\n",
" checkpoints.append(path.join(root, name))\n", " if name.startswith('checkpoint_'):\n",
" checkpoints.append(path.join(trimmed_root, name))\n",
" return checkpoints" " return checkpoints"
] ]
}, },
@@ -514,7 +503,7 @@
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"gather": { "gather": {
"logged": 1646347337724 "logged": 1683064305305
} }
}, },
"outputs": [], "outputs": [],
@@ -522,16 +511,16 @@
"# Find checkpoints and last checkpoint number\n", "# Find checkpoints and last checkpoint number\n",
"checkpoint_files = find_checkpoints(training_artifacts_path)\n", "checkpoint_files = find_checkpoints(training_artifacts_path)\n",
"\n", "\n",
"checkpoint_numbers = []\n", "last_checkpoint_path = None\n",
"for file in checkpoint_files:\n", "last_checkpoint_number = -1\n",
" file = os.path.basename(file)\n", "for checkpoint_file in checkpoint_files:\n",
" if file.startswith('checkpoint-') and not file.endswith('.tune_metadata'):\n", " checkpoint_number = int(os.path.basename(checkpoint_file).split('_')[1])\n",
" checkpoint_numbers.append(int(file.split('-')[1]))\n", " if checkpoint_number > last_checkpoint_number:\n",
" last_checkpoint_path = checkpoint_file\n",
" last_checkpoint_number = checkpoint_number\n",
"\n", "\n",
"print(\"Checkpoints:\", checkpoint_numbers)\n", "print(\"Last checkpoint number:\", last_checkpoint_number)\n",
"\n", "print(\"Last checkpoint path:\", last_checkpoint_path)"
"last_checkpoint_number = max(checkpoint_numbers)\n",
"print(\"Last checkpoint number:\", last_checkpoint_number)"
] ]
}, },
{ {
@@ -546,17 +535,16 @@
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"gather": { "gather": {
"logged": 1646347346085 "logged": 1683064305331
} }
}, },
"outputs": [], "outputs": [],
"source": [ "source": [
"# Upload the checkpoint files and create a DataSet\n", "# Upload the checkpoint files and create a DataSet\n",
"from azureml.core import Dataset\n", "from azureml.data.dataset_factory import FileDatasetFactory\n",
"\n", "\n",
"datastore = ws.get_default_datastore()\n", "datastore = ws.get_default_datastore()\n",
"checkpoint_dataref = datastore.upload_files(checkpoint_files, target_path='cartpole_checkpoints_' + training_run.id, overwrite=True)\n", "checkpoint_ds = FileDatasetFactory.upload_directory(training_artifacts_path, (datastore, 'cartpole_checkpoints_' + training_run.id), overwrite=False, show_progress=True)"
"checkpoint_ds = Dataset.File.from_files(checkpoint_dataref)"
] ]
}, },
{ {
@@ -571,7 +559,7 @@
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"gather": { "gather": {
"logged": 1646347354726 "logged": 1683064305353
} }
}, },
"outputs": [], "outputs": [],
@@ -598,7 +586,7 @@
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"gather": { "gather": {
"logged": 1646347414835 "logged": 1683064305371
}, },
"jupyter": { "jupyter": {
"outputs_hidden": false, "outputs_hidden": false,
@@ -614,23 +602,18 @@
"source": [ "source": [
"ray_environment_name = 'cartpole-ray-ci'\n", "ray_environment_name = 'cartpole-ray-ci'\n",
"\n", "\n",
"experiment_name = 'CartPole-v0-CI'\n", "experiment_name = 'CartPole-v1-CI'\n",
"training_algorithm = 'PPO'\n",
"rl_environment = 'CartPole-v0'\n",
"\n", "\n",
"experiment = Experiment(workspace=ws, name=experiment_name)\n", "experiment = Experiment(workspace=ws, name=experiment_name)\n",
"ray_environment = Environment.get(workspace=ws, name=ray_environment_name)\n", "ray_environment = Environment.get(workspace=ws, name=ray_environment_name)\n",
"\n", "\n",
"script_name = 'cartpole_rollout.py'\n", "script_name = 'cartpole_rollout.py'\n",
"script_arguments = [\n", "script_arguments = [\n",
" '--run', training_algorithm,\n",
" '--env', rl_environment,\n",
" '--config', '{}',\n",
" '--steps', '2000',\n", " '--steps', '2000',\n",
" '--checkpoint-number', str(last_checkpoint_number),\n", " '--checkpoint', last_checkpoint_path,\n",
" '--no-render',\n", " '--algo', 'PPO',\n",
" '--artifacts-dataset', checkpoint_ds.as_named_input('artifacts_dataset'),\n", " '--render', 'false',\n",
" '--artifacts-path', checkpoint_ds.as_named_input('artifacts_path').as_mount()\n", " '--dataset_path', checkpoint_ds.as_named_input('dataset_path').as_mount()\n",
"]\n", "]\n",
"\n", "\n",
"aml_run_config_ml = RunConfiguration(communicator='OpenMpi')\n", "aml_run_config_ml = RunConfiguration(communicator='OpenMpi')\n",
@@ -653,7 +636,7 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"And then, similar to the training section, we can monitor the real-time progress of the rollout run and its chid as follows. If you browse logs of the child run you can see the evaluation results recorded in driver_log.txt file. Note that you may need to wait several minutes before these results become available." "And then, similar to the training section, we can monitor the real-time progress of the rollout run and its chid as follows. If you browse logs of the child run you can see the evaluation results recorded in std_log_process_0.txt file. Note that you may need to wait several minutes before these results become available."
] ]
}, },
{ {
@@ -661,7 +644,7 @@
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"gather": { "gather": {
"logged": 1646347429626 "logged": 1683064305399
} }
}, },
"outputs": [], "outputs": [],
@@ -679,7 +662,11 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {}, "metadata": {
"gather": {
"logged": 1683064305419
}
},
"outputs": [], "outputs": [],
"source": [ "source": [
"# Uncomment line below to cancel the run\n", "# Uncomment line below to cancel the run\n",
@@ -698,7 +685,11 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {}, "metadata": {
"gather": {
"logged": 1683064305437
}
},
"outputs": [], "outputs": [],
"source": [ "source": [
"# To archive the created experiment:\n", "# To archive the created experiment:\n",
@@ -750,13 +741,16 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.7.3" "version": "3.8.5"
}, },
"microsoft": { "microsoft": {
"host": { "host": {
"AzureML": { "AzureML": {
"notebookHasBeenCompleted": true "notebookHasBeenCompleted": true
} }
},
"ms_spell_check": {
"ms_spell_check_language": "en"
} }
}, },
"notice": "Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License.", "notice": "Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License.",

View File

@@ -0,0 +1,23 @@
cartpole-ppo:
env: CartPole-v1
run: PPO
stop:
episode_reward_mean: 475
time_total_s: 300
checkpoint_config:
checkpoint_frequency: 2
checkpoint_at_end: true
config:
# Works for both torch and tf.
framework: torch
gamma: 0.99
lr: 0.0003
num_workers: 1
observation_filter: MeanStdFilter
num_sgd_iter: 6
vf_loss_coeff: 0.01
model:
fcnet_hiddens: [32]
fcnet_activation: linear
vf_share_layers: true
enable_connectors: true

View File

@@ -1,121 +1,108 @@
import os import os
import sys import sys
import argparse
import ray from ray.rllib.evaluate import RolloutSaver, rollout
from ray.rllib import rollout from ray_on_aml.core import Ray_On_AML
from ray.tune.registry import get_trainable_cls import ray.cloudpickle as cloudpickle
from ray.tune.utils import merge_dicts
from ray.tune.registry import get_trainable_cls, _global_registry, ENV_CREATOR
from azureml.core import Run from azureml.core import Run
from utils import callbacks from utils import callbacks
import collections
import copy
import gymnasium as gym
import json
from pathlib import Path
def run_rollout(args, parser):
config = args.config def run_rollout(checkpoint, algo, render, steps, episodes):
if not args.env: config_dir = os.path.dirname(checkpoint)
if not config.get("env"): config_path = os.path.join(config_dir, "params.pkl")
parser.error("the following arguments are required: --env") config = None
args.env = config.get("env")
# Create the Trainer from config. # Try parent directory.
cls = get_trainable_cls(args.run) if not os.path.exists(config_path):
agent = cls(env=args.env, config=config) config_path = os.path.join(config_dir, "../params.pkl")
# Load state from checkpoint. # Load the config from pickled.
agent.restore(args.checkpoint) if os.path.exists(config_path):
num_steps = int(args.steps) with open(config_path, "rb") as f:
num_episodes = int(args.episodes) config = cloudpickle.load(f)
# If no pkl file found, require command line `--config`.
else:
raise ValueError("Could not find params.pkl in either the checkpoint dir or its parent directory")
# Determine the video output directory. # Make sure worker 0 has an Env.
use_arg_monitor = False config["create_env_on_driver"] = True
try:
args.video_dir
except AttributeError:
print("There is no such attribute: args.video_dir")
use_arg_monitor = True
video_dir = None # Merge with `evaluation_config` (first try from command line, then from
if not use_arg_monitor: # pkl file).
if args.monitor: evaluation_config = copy.deepcopy(config.get("evaluation_config", {}))
video_dir = os.path.join("./logs", "video") config = merge_dicts(config, evaluation_config)
elif args.video_dir: env = config.get("env")
video_dir = os.path.expanduser(args.video_dir)
# Make sure we have evaluation workers.
if not config.get("evaluation_num_workers"):
config["evaluation_num_workers"] = config.get("num_workers", 0)
if not config.get("evaluation_duration"):
config["evaluation_duration"] = 1
# Hard-override this as it raises a warning by Algorithm otherwise.
# Makes no sense anyways, to have it set to None as we don't call
# `Algorithm.train()` here.
config["evaluation_interval"] = 1
# Rendering settings.
config["render_env"] = render
# Create the Algorithm from config.
cls = get_trainable_cls(algo)
algorithm = cls(env=env, config=config)
# Load state from checkpoint, if provided.
if checkpoint:
algorithm.restore(checkpoint)
# Do the actual rollout. # Do the actual rollout.
with rollout.RolloutSaver( with RolloutSaver(
args.out, outfile=None,
args.use_shelve, use_shelve=False,
write_update_file=args.track_progress, write_update_file=False,
target_steps=num_steps, target_steps=steps,
target_episodes=num_episodes, target_episodes=episodes,
save_info=args.save_info) as saver: save_info=False,
if use_arg_monitor: ) as saver:
rollout.rollout( rollout(algorithm, env, steps, episodes, saver, not render)
agent, algorithm.stop()
args.env,
num_steps,
num_episodes,
saver,
args.no_render,
args.monitor)
else:
rollout.rollout(
agent, args.env,
num_steps,
num_episodes,
saver,
args.no_render, video_dir)
if __name__ == "__main__": if __name__ == "__main__":
# Start ray head (single node) # Start ray head (single node)
os.system('ray start --head') ray_on_aml = Ray_On_AML()
ray.init(address='auto') ray = ray_on_aml.getRay()
if ray:
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_path', required=True, help='Path to artifacts dataset')
parser.add_argument('--checkpoint', required=True, help='Name of checkpoint file directory')
parser.add_argument('--algo', required=True, help='Name of RL algorithm')
parser.add_argument('--render', default=False, required=False, help='True to render')
parser.add_argument('--steps', required=False, type=int, help='Number of steps to run')
parser.add_argument('--episodes', required=False, type=int, help='Number of episodes to run')
args = parser.parse_args()
# Add positional argument - serves as placeholder for checkpoint # Get a handle to run
argvc = sys.argv[1:] run = Run.get_context()
argvc.insert(0, 'checkpoint-placeholder')
# Parse arguments # Get handles to the tarining artifacts dataset and mount path
rollout_parser = rollout.create_parser() dataset_path = run.input_datasets['dataset_path']
rollout_parser.add_argument( # Find checkpoint file to be evaluated
'--checkpoint-number', required=False, type=int, default=1, checkpoint = os.path.join(dataset_path, args.checkpoint)
help='Checkpoint number of the checkpoint from which to roll out') print('Checkpoint:', checkpoint)
rollout_parser.add_argument( # Start rollout
'--artifacts-dataset', required=True, ray.init(address='auto')
help='The checkpoints artifacts dataset') run_rollout(checkpoint, args.algo, args.render, args.steps, args.episodes)
rollout_parser.add_argument(
'--artifacts-path', required=True,
help='The checkpoints artifacts path')
args = rollout_parser.parse_args(argvc)
# Get a handle to run
run = Run.get_context()
# Get handles to the tarining artifacts dataset and mount path
artifacts_dataset = run.input_datasets['artifacts_dataset']
artifacts_path = run.input_datasets['artifacts_path']
# Find checkpoint file to be evaluated
checkpoint_id = '-' + str(args.checkpoint_number)
checkpoint_files = list(filter(
lambda filename: filename.endswith(checkpoint_id),
artifacts_dataset.to_path()))
checkpoint_file = checkpoint_files[0]
if checkpoint_file[0] == '/':
checkpoint_file = checkpoint_file[1:]
checkpoint = os.path.join(artifacts_path, checkpoint_file)
print('Checkpoint:', checkpoint)
# Set rollout checkpoint
args.checkpoint = checkpoint
# Start rollout
run_rollout(args, rollout_parser)

View File

@@ -1,32 +1,34 @@
import ray from ray_on_aml.core import Ray_On_AML
from ray.rllib import train import yaml
from ray import tune from ray.tune.tune import run_experiments
import os
from utils import callbacks from utils import callbacks
import argparse
if __name__ == "__main__": if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--config', help='Path to yaml configuration file')
args = parser.parse_args()
# Parse arguments and add callbacks to config ray_on_aml = Ray_On_AML()
train_parser = train.create_parser() ray = ray_on_aml.getRay()
if ray: # in the headnode
ray.init(address="auto")
print("Configuring run from file: ", args.config)
experiment_config = None
with open(args.config, "r") as file:
experiment_config = yaml.safe_load(file)
args = train_parser.parse_args() # Set local_dir in each experiment configuration to ensure generated logs get picked up
args.config["callbacks"] = {"on_train_result": callbacks.on_train_result} # Also set monitor to ensure videos are captured
for experiment_name, experiment in experiment_config.items():
experiment["storage_path"] = "./logs"
experiment['config']['monitor'] = True
print(f'Config: {experiment_config}')
# Trace if video capturing is on trials = run_experiments(
if 'monitor' in args.config and args.config['monitor']: experiment_config,
print("Video capturing is ON!") callbacks=[callbacks.TrialCallback()],
verbose=2
# Start ray head (single node) )
os.system('ray start --head') else:
ray.init(address='auto') print("in worker node")
# Run training task using tune.run
tune.run(
run_or_experiment=args.run,
config=dict(args.config, env=args.env),
stop=args.stop,
checkpoint_freq=args.checkpoint_freq,
checkpoint_at_end=args.checkpoint_at_end,
local_dir=args.local_dir
)

View File

@@ -1,19 +1,27 @@
FROM mcr.microsoft.com/azureml/openmpi3.1.2-ubuntu18.04 FROM mcr.microsoft.com/azureml/openmpi4.1.0-ubuntu20.04
USER root RUN pip install ray-on-aml==0.2.4 \
RUN conda install -c anaconda python=3.7 ray==2.4.0 \
ray[rllib]==2.4.0 \
mlflow==2.3.1 \
azureml-defaults==1.50.0 \
azureml-dataset-runtime[fuse,pandas]==1.50.0 \
azureml-contrib-reinforcementlearning==1.50.0 \
gputil==1.4.0 \
scipy==1.9.1 \
pyglet==2.0.6 \
cloudpickle==2.2.1 \
tensorflow==2.11.0 \
tensorflow-probability==0.19.0 \
torch \
tabulate==0.9.0 \
dm_tree==0.1.8 \
lz4==4.3.2 \
psutil==5.9.4 \
setproctitle==1.3.2 \
pygame==2.1.0 \
gymnasium[classic_control]==0.26.3 \
gym[classic_control]==0.26.2
RUN pip install ray-on-aml==0.1.6 # Display the exact versions we have installed
RUN pip install gym[atari]==0.19.0 RUN pip freeze
RUN pip install gym[accept-rom-license]==0.19.0
RUN pip install ale-py==0.7.0
RUN pip install azureml-core
RUN pip install azureml-dataset-runtime
RUN pip install ray==0.8.7
RUN pip install ray[rllib,tune,serve]==0.8.7
RUN pip install tensorflow==1.14.0
RUN pip install 'msrest<0.7.0'
RUN apt-get update
RUN apt-get install -y jq
RUN apt-get install -y rsync

View File

@@ -3,15 +3,20 @@
''' '''
from azureml.core import Run from azureml.core import Run
from ray import tune
from ray.tune import Callback
from ray.air import session
def on_train_result(info): class TrialCallback(Callback):
'''Callback on train result to record metrics returned by trainer.
''' def on_trial_result(self, iteration, trials, trial, result, **info):
run = Run.get_context() '''Callback on train result to record metrics returned by trainer.
run.log( '''
name='episode_reward_mean', run = Run.get_context()
value=info["result"]["episode_reward_mean"]) run.log(
run.log( name='episode_reward_mean',
name='episodes_total', value=result["episode_reward_mean"])
value=info["result"]["episodes_total"]) run.log(
name='episodes_total',
value=result["episodes_total"])

View File

@@ -84,7 +84,7 @@
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"gather": { "gather": {
"logged": 1646347616697 "logged": 1683056824182
} }
}, },
"outputs": [], "outputs": [],
@@ -107,7 +107,7 @@
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"gather": { "gather": {
"logged": 1646429058500 "logged": 1683056825821
} }
}, },
"outputs": [], "outputs": [],
@@ -136,7 +136,7 @@
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"gather": { "gather": {
"logged": 1646359152101 "logged": 1683056826903
} }
}, },
"outputs": [], "outputs": [],
@@ -181,14 +181,14 @@
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"gather": { "gather": {
"logged": 1646348040613 "logged": 1683056827252
} }
}, },
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.core.experiment import Experiment\n", "from azureml.core.experiment import Experiment\n",
"\n", "\n",
"experiment_name = 'CartPole-v0-SC'\n", "experiment_name = 'CartPole-v1-SC'\n",
"experiment = Experiment(workspace=ws, name=experiment_name)" "experiment = Experiment(workspace=ws, name=experiment_name)"
] ]
}, },
@@ -250,7 +250,7 @@
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"gather": { "gather": {
"logged": 1646437786449 "logged": 1683059658819
}, },
"jupyter": { "jupyter": {
"outputs_hidden": false, "outputs_hidden": false,
@@ -264,44 +264,31 @@
}, },
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.core import Environment\n",
"from azureml.core import RunConfiguration, ScriptRunConfig, Experiment\n", "from azureml.core import RunConfiguration, ScriptRunConfig, Experiment\n",
"from azureml.core.runconfig import DockerConfiguration, RunConfiguration\n", "from azureml.core.runconfig import DockerConfiguration, RunConfiguration\n",
"\n", "\n",
"training_algorithm = \"PPO\"\n", "config_name = 'cartpole-ppo.yaml'\n",
"rl_environment = \"CartPole-v0\"\n",
"video_capture = True\n",
"if video_capture:\n",
" algorithm_config = '\\'{\"num_gpus\": 0, \"num_workers\": 1, \"monitor\": true}\\''\n",
"else:\n",
" algorithm_config = '\\'{\"num_gpus\": 0, \"num_workers\": 1, \"monitor\": false}\\''\n",
"\n",
"script_name = 'cartpole_training.py'\n", "script_name = 'cartpole_training.py'\n",
"video_capture = True\n",
"script_arguments = [\n", "script_arguments = [\n",
" '--run', training_algorithm,\n", " '--config', config_name\n",
" '--env', rl_environment,\n",
" '--stop', '\\'{\"episode_reward_mean\": 200, \"time_total_s\": 300}\\'',\n",
" '--config', algorithm_config,\n",
" '--checkpoint-freq', '2',\n",
" '--checkpoint-at-end',\n",
" '--local-dir', './logs'\n",
"]\n", "]\n",
"\n",
"ray_environment = Environment.get(ws, name=ray_environment_name)\n",
"run_config = RunConfiguration(communicator='OpenMpi')\n",
"run_config.target = compute_target\n",
"run_config.node_count = 1\n",
"run_config.environment = ray_environment\n",
"command=[\"python\", script_name, *script_arguments]\n", "command=[\"python\", script_name, *script_arguments]\n",
"\n", "\n",
"aml_run_config_ml = RunConfiguration(communicator='OpenMpi')\n",
"aml_run_config_ml.target = compute_target\n",
"aml_run_config_ml.node_count = 1\n",
"aml_run_config_ml.environment = ray_environment\n",
"\n",
"if video_capture:\n", "if video_capture:\n",
" command = [\"xvfb-run -s '-screen 0 640x480x16 -ac +extension GLX +render' \"] + command\n", " command = [\"xvfb-run -s '-screen 0 640x480x16 -ac +extension GLX +render' \"] + command\n",
" run_config.environment_variables[\"SDL_VIDEODRIVER\"] = \"dummy\"\n", " aml_run_config_ml.environment_variables[\"SDL_VIDEODRIVER\"] = \"dummy\"\n",
"\n", "\n",
"training_config = ScriptRunConfig(source_directory='./files',\n", "training_config = ScriptRunConfig(source_directory='./files',\n",
" command=command,\n", " command=command,\n",
" run_config = run_config\n", " run_config = aml_run_config_ml\n",
" )\n", " )\n",
"\n",
"training_run = experiment.submit(training_config)" "training_run = experiment.submit(training_config)"
] ]
}, },
@@ -309,42 +296,35 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"### Training script\n", "### Training configuration\n",
"\n", "\n",
"As recommended in RLlib documentations, we use Ray Tune API to run the training algorithm. All the RLlib built-in trainers are compatible with the Tune API. Here we use `tune.run()` to execute a built-in training algorithm. For convenience, down below you can see part of the entry script where we make this call.\n", "This is the training configuration (in yaml) that we use to train an agent to solve the CartPole problem using\n",
"the PPO algorithm.\n",
"\n", "\n",
"This is the list of parameters we are passing into `tune.run()` via the `script_params` parameter:\n", "```yaml\n",
"\n", "cartpole-ppo:\n",
"- `run_or_experiment`: name of the [built-in algorithm](https://ray.readthedocs.io/en/latest/rllib-algorithms.html#rllib-algorithms), 'PPO' in our example,\n", " env: CartPole-v1\n",
"- `config`: Algorithm-specific configuration. This includes specifying the environment, `env`, which in our example is the gym **[CartPole-v0](https://www.gymlibrary.dev/environments/classic_control/cart_pole/)** environment,\n", " run: PPO\n",
"- `stop`: stopping conditions, which could be any of the metrics returned by the trainer. Here we use \"mean of episode reward\", and \"total training time in seconds\" as stop conditions, and\n", " stop:\n",
"- `checkpoint_freq` and `checkpoint_at_end`: Frequency of taking checkpoints (number of training iterations between checkpoints), and if a checkpoint should be taken at the end.\n", " episode_reward_mean: 475\n",
"\n", " time_total_s: 300\n",
"We also specify the `local_dir`, the directory in which the training logs, checkpoints and other training artificats will be recorded. \n", " checkpoint_config:\n",
"\n", " checkpoint_frequency: 2\n",
"See [RLlib Training APIs](https://ray.readthedocs.io/en/latest/rllib-training.html#rllib-training-apis) for more details, and also [Training (tune.run, tune.Experiment)](https://ray.readthedocs.io/en/latest/tune/api_docs/execution.html#training-tune-run-tune-experiment) for the complete list of parameters.\n", " checkpoint_at_end: true\n",
"\n", " config:\n",
"```python\n", " # Works for both torch and tf.\n",
"import ray\n", " framework: torch\n",
"import ray.tune as tune\n", " gamma: 0.99\n",
"\n", " lr: 0.0003\n",
"if __name__ == \"__main__\":\n", " num_workers: 1\n",
"\n", " observation_filter: MeanStdFilter\n",
" # parse arguments ...\n", " num_sgd_iter: 6\n",
" \n", " vf_loss_coeff: 0.01\n",
" # Start ray head (single node)\n", " model:\n",
" os.system('ray start --head')\n", " fcnet_hiddens: [32]\n",
" ray.init(address='auto')\n", " fcnet_activation: linear\n",
"\n", " vf_share_layers: true\n",
" # Run training task using tune.run\n", " enable_connectors: true\n",
" tune.run(\n",
" run_or_experiment=args.run,\n",
" config=dict(args.config, env=args.env),\n",
" stop=args.stop,\n",
" checkpoint_freq=args.checkpoint_freq,\n",
" checkpoint_at_end=args.checkpoint_at_end,\n",
" local_dir=args.local_dir\n",
" )\n",
"```" "```"
] ]
}, },
@@ -362,7 +342,7 @@
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"gather": { "gather": {
"logged": 1646437627002 "logged": 1683060289002
} }
}, },
"outputs": [], "outputs": [],
@@ -403,7 +383,11 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {}, "metadata": {
"gather": {
"logged": 1683060297005
}
},
"outputs": [], "outputs": [],
"source": [ "source": [
"training_run.wait_for_completion()" "training_run.wait_for_completion()"
@@ -420,7 +404,11 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {}, "metadata": {
"gather": {
"logged": 1683060517858
}
},
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.core import Run\n", "from azureml.core import Run\n",
@@ -441,7 +429,7 @@
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"gather": { "gather": {
"logged": 1646437652309 "logged": 1683060521847
} }
}, },
"outputs": [], "outputs": [],
@@ -449,7 +437,7 @@
"from os import path\n", "from os import path\n",
"from distutils import dir_util\n", "from distutils import dir_util\n",
"\n", "\n",
"training_artifacts_path = path.join(\"logs\", training_algorithm)\n", "training_artifacts_path = path.join(\"logs\", \"cartpole-ppo\")\n",
"print(\"Training artifacts path:\", training_artifacts_path)\n", "print(\"Training artifacts path:\", training_artifacts_path)\n",
"\n", "\n",
"if path.exists(training_artifacts_path):\n", "if path.exists(training_artifacts_path):\n",
@@ -475,7 +463,7 @@
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"gather": { "gather": {
"logged": 1646437657045 "logged": 1683060867182
} }
}, },
"outputs": [], "outputs": [],
@@ -514,7 +502,7 @@
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"gather": { "gather": {
"logged": 1646437690241 "logged": 1683060871682
} }
}, },
"outputs": [], "outputs": [],
@@ -535,7 +523,7 @@
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"gather": { "gather": {
"logged": 1646437692954 "logged": 1683060900828
} }
}, },
"outputs": [], "outputs": [],
@@ -543,7 +531,8 @@
"first_movie = mp4_files[0] if len(mp4_files) > 0 else None\n", "first_movie = mp4_files[0] if len(mp4_files) > 0 else None\n",
"print(\"First movie:\", first_movie)\n", "print(\"First movie:\", first_movie)\n",
"\n", "\n",
"display_movie(first_movie)" "if first_movie:\n",
" display_movie(first_movie)"
] ]
}, },
{ {
@@ -558,7 +547,7 @@
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"gather": { "gather": {
"logged": 1646437717147 "logged": 1683060914790
} }
}, },
"outputs": [], "outputs": [],
@@ -566,7 +555,8 @@
"last_movie = mp4_files[-1] if len(mp4_files) > 0 else None\n", "last_movie = mp4_files[-1] if len(mp4_files) > 0 else None\n",
"print(\"Last movie:\", last_movie)\n", "print(\"Last movie:\", last_movie)\n",
"\n", "\n",
"display_movie(last_movie)" "if last_movie:\n",
" display_movie(last_movie)"
] ]
}, },
{ {
@@ -597,53 +587,65 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {}, "metadata": {
"gather": {
"logged": 1683061167899
}
},
"outputs": [], "outputs": [],
"source": [ "source": [
"# A helper function to find checkpoint files in a directory\n", "# A helper function to find all of the checkpoint directories located within a larger directory tree\n",
"def find_checkpoints(file_path):\n", "def find_checkpoints(file_path):\n",
" print(\"Looking in path:\", file_path)\n", " print(\"Looking in path:\", file_path)\n",
" checkpoints = []\n", " checkpoints = []\n",
" for root, _, files in os.walk(file_path):\n", " for root, dirs, files in os.walk(file_path):\n",
" for name in files:\n", " trimmed_root = root[len(file_path)+1:]\n",
" if os.path.basename(root).startswith('checkpoint_'):\n", " for name in dirs:\n",
" checkpoints.append(path.join(root, name))\n", " if name.startswith('checkpoint_'):\n",
" return checkpoints\n", " checkpoints.append(path.join(trimmed_root, name))\n",
"\n", " return checkpoints"
"checkpoint_files = find_checkpoints(training_artifacts_path)"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {}, "metadata": {
"gather": {
"logged": 1683061170184
}
},
"outputs": [], "outputs": [],
"source": [ "source": [
"# Find checkpoints and last checkpoint number\n", "# Find checkpoints and last checkpoint number\n",
"checkpoint_numbers = []\n", "checkpoint_files = find_checkpoints(training_artifacts_path)\n",
"for file in checkpoint_files:\n",
" file = os.path.basename(file)\n",
" if file.startswith('checkpoint-') and not file.endswith('.tune_metadata'):\n",
" checkpoint_numbers.append(int(file.split('-')[-1]))\n",
"\n", "\n",
"print(\"Checkpoints:\", checkpoint_numbers)\n", "last_checkpoint_path = None\n",
"last_checkpoint_number = -1\n",
"for checkpoint_file in checkpoint_files:\n",
" checkpoint_number = int(os.path.basename(checkpoint_file).split('_')[1])\n",
" if checkpoint_number > last_checkpoint_number:\n",
" last_checkpoint_path = checkpoint_file\n",
" last_checkpoint_number = checkpoint_number\n",
"\n", "\n",
"last_checkpoint_number = max(checkpoint_numbers)\n", "print(\"Last checkpoint number:\", last_checkpoint_number)\n",
"print(\"Last checkpoint number:\", last_checkpoint_number)" "print(\"Last checkpoint path:\", last_checkpoint_path)"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {}, "metadata": {
"gather": {
"logged": 1683061176740
}
},
"outputs": [], "outputs": [],
"source": [ "source": [
"# Upload the checkpoint files and create a DataSet\n", "# Upload the checkpoint files and create a DataSet\n",
"from azureml.core import Dataset\n", "from azureml.data.dataset_factory import FileDatasetFactory\n",
"\n", "\n",
"datastore = ws.get_default_datastore()\n", "datastore = ws.get_default_datastore()\n",
"checkpoint_dataref = datastore.upload_files(checkpoint_files, target_path='cartpole_checkpoints_' + run_id, overwrite=True)\n", "checkpoint_ds = FileDatasetFactory.upload_directory(training_artifacts_path, (datastore, 'cartpole_checkpoints_' + training_run.id), overwrite=False, show_progress=True)"
"checkpoint_ds = Dataset.File.from_files(checkpoint_dataref)"
] ]
}, },
{ {
@@ -657,54 +659,45 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {}, "metadata": {
"gather": {
"logged": 1683062377151
}
},
"outputs": [], "outputs": [],
"source": [ "source": [
"ray_environment_name = 'cartpole-ray-sc'\n", "ray_environment_name = 'cartpole-ray-sc'\n",
"\n", "\n",
"experiment_name = 'CartPole-v0-SC'\n", "experiment_name = 'CartPole-v1-SC'\n",
"training_algorithm = 'PPO'\n", "training_algorithm = 'PPO'\n",
"rl_environment = 'CartPole-v0'\n", "rl_environment = 'CartPole-v1'\n",
"\n", "\n",
"experiment = Experiment(workspace=ws, name=experiment_name)\n", "experiment = Experiment(workspace=ws, name=experiment_name)\n",
"ray_environment = Environment.get(workspace=ws, name=ray_environment_name)\n", "ray_environment = Environment.get(workspace=ws, name=ray_environment_name)\n",
"\n", "\n",
"script_name = 'cartpole_rollout.py'\n", "script_name = 'cartpole_rollout.py'\n",
"video_capture = True\n", "script_arguments = [\n",
"if video_capture:\n",
" script_arguments = ['--video-dir', './logs/video']\n",
"else:\n",
" script_arguments = ['--no-render']\n",
"script_arguments = script_arguments + [\n",
" '--run', training_algorithm,\n",
" '--env', rl_environment,\n",
" '--config', '{}',\n",
" '--steps', '2000',\n", " '--steps', '2000',\n",
" '--checkpoint-number', str(last_checkpoint_number),\n", " '--checkpoint', last_checkpoint_path,\n",
" '--artifacts-dataset', checkpoint_ds.as_named_input('artifacts_dataset'),\n", " '--algo', 'PPO',\n",
" '--artifacts-path', checkpoint_ds.as_named_input('artifacts_path').as_mount()\n", " '--render', 'true',\n",
" '--dataset_path', checkpoint_ds.as_named_input('dataset_path').as_mount()\n",
"]\n", "]\n",
"\n", "\n",
"command = [\"python\", script_name, *script_arguments]\n", "aml_run_config_ml = RunConfiguration(communicator='OpenMpi')\n",
"\n", "aml_run_config_ml.target = compute_target\n",
"if video_capture:\n", "aml_run_config_ml.node_count = 1\n",
" command = [\"xvfb-run -s '-screen 0 640x480x16 -ac +extension GLX +render' \"] + command\n", "aml_run_config_ml.environment = ray_environment\n",
" run_config.environment_variables[\"SDL_VIDEODRIVER\"] = \"dummy\"\n", "aml_run_config_ml.data\n",
"\n",
"run_config = RunConfiguration(communicator='OpenMpi')\n",
"run_config.target = compute_target\n",
"run_config.docker = DockerConfiguration(use_docker=True)\n",
"run_config.node_count = 1\n",
"run_config.environment = ray_environment\n",
"\n", "\n",
"rollout_config = ScriptRunConfig(\n", "rollout_config = ScriptRunConfig(\n",
" source_directory='./files',\n", " source_directory='./files',\n",
" command=command,\n", " script=script_name,\n",
" run_config=run_config\n", " arguments=script_arguments,\n",
" run_config = aml_run_config_ml\n",
" )\n", " )\n",
"\n", " \n",
"rollout_run = experiment.submit(rollout_config)\n", "rollout_run = experiment.submit(rollout_config)"
"rollout_run"
] ]
}, },
{ {
@@ -717,7 +710,11 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {}, "metadata": {
"gather": {
"logged": 1683062379999
}
},
"outputs": [], "outputs": [],
"source": [ "source": [
"RunDetails(rollout_run).show()" "RunDetails(rollout_run).show()"
@@ -733,7 +730,11 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {}, "metadata": {
"gather": {
"logged": 1683062451723
}
},
"outputs": [], "outputs": [],
"source": [ "source": [
"# Uncomment line below to cancel the run\n", "# Uncomment line below to cancel the run\n",
@@ -753,7 +754,11 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {}, "metadata": {
"gather": {
"logged": 1683062747822
}
},
"outputs": [], "outputs": [],
"source": [ "source": [
"# Download rollout artifacts\n", "# Download rollout artifacts\n",
@@ -777,7 +782,11 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {}, "metadata": {
"gather": {
"logged": 1683062752847
}
},
"outputs": [], "outputs": [],
"source": [ "source": [
"# Look for the downloaded movie in local directory\n", "# Look for the downloaded movie in local directory\n",
@@ -797,13 +806,18 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {}, "metadata": {
"gather": {
"logged": 1683062763275
}
},
"outputs": [], "outputs": [],
"source": [ "source": [
"last_movie = mp4_files[-1] if len(mp4_files) > 0 else None\n", "last_movie = mp4_files[-1] if len(mp4_files) > 0 else None\n",
"print(\"Last movie:\", last_movie)\n", "print(\"Last movie:\", last_movie)\n",
"\n", "\n",
"display_movie(last_movie)" "if last_movie:\n",
" display_movie(last_movie)"
] ]
}, },
{ {
@@ -876,7 +890,17 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.7.3" "version": "3.8.5"
},
"microsoft": {
"host": {
"AzureML": {
"notebookHasBeenCompleted": true
}
},
"ms_spell_check": {
"ms_spell_check_language": "en"
}
}, },
"notice": "Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License.", "notice": "Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License.",
"nteract": { "nteract": {

View File

@@ -0,0 +1,24 @@
cartpole-ppo:
env: CartPole-v1
run: PPO
stop:
episode_reward_mean: 475
time_total_s: 300
checkpoint_config:
checkpoint_frequency: 2
checkpoint_at_end: true
config:
# Works for both torch and tf.
framework: torch
gamma: 0.99
lr: 0.0003
num_workers: 1
observation_filter: MeanStdFilter
num_sgd_iter: 6
vf_loss_coeff: 0.01
model:
fcnet_hiddens: [32]
fcnet_activation: linear
vf_share_layers: true
enable_connectors: true
render_env: true

View File

@@ -1,121 +1,108 @@
import os import os
import sys import sys
import argparse
import ray from ray.rllib.evaluate import RolloutSaver, rollout
from ray.rllib import rollout from ray_on_aml.core import Ray_On_AML
from ray.tune.registry import get_trainable_cls import ray.cloudpickle as cloudpickle
from ray.tune.utils import merge_dicts
from ray.tune.registry import get_trainable_cls, _global_registry, ENV_CREATOR
from azureml.core import Run from azureml.core import Run
from utils import callbacks from utils import callbacks
import collections
import copy
import gymnasium as gym
import json
from pathlib import Path
def run_rollout(args, parser):
config = args.config def run_rollout(checkpoint, algo, render, steps, episodes):
if not args.env: config_dir = os.path.dirname(checkpoint)
if not config.get("env"): config_path = os.path.join(config_dir, "params.pkl")
parser.error("the following arguments are required: --env") config = None
args.env = config.get("env")
# Create the Trainer from config. # Try parent directory.
cls = get_trainable_cls(args.run) if not os.path.exists(config_path):
agent = cls(env=args.env, config=config) config_path = os.path.join(config_dir, "../params.pkl")
# Load state from checkpoint. # Load the config from pickled.
agent.restore(args.checkpoint) if os.path.exists(config_path):
num_steps = int(args.steps) with open(config_path, "rb") as f:
num_episodes = int(args.episodes) config = cloudpickle.load(f)
# If no pkl file found, require command line `--config`.
else:
raise ValueError("Could not find params.pkl in either the checkpoint dir or its parent directory")
# Determine the video output directory. # Make sure worker 0 has an Env.
use_arg_monitor = False config["create_env_on_driver"] = True
try:
args.video_dir
except AttributeError:
print("There is no such attribute: args.video_dir")
use_arg_monitor = True
video_dir = None # Merge with `evaluation_config` (first try from command line, then from
if not use_arg_monitor: # pkl file).
if args.monitor: evaluation_config = copy.deepcopy(config.get("evaluation_config", {}))
video_dir = os.path.join("./logs", "video") config = merge_dicts(config, evaluation_config)
elif args.video_dir: env = config.get("env")
video_dir = os.path.expanduser(args.video_dir)
# Make sure we have evaluation workers.
if not config.get("evaluation_num_workers"):
config["evaluation_num_workers"] = config.get("num_workers", 0)
if not config.get("evaluation_duration"):
config["evaluation_duration"] = 1
# Hard-override this as it raises a warning by Algorithm otherwise.
# Makes no sense anyways, to have it set to None as we don't call
# `Algorithm.train()` here.
config["evaluation_interval"] = 1
# Rendering settings.
config["render_env"] = render
# Create the Algorithm from config.
cls = get_trainable_cls(algo)
algorithm = cls(env=env, config=config)
# Load state from checkpoint, if provided.
if checkpoint:
algorithm.restore(checkpoint)
# Do the actual rollout. # Do the actual rollout.
with rollout.RolloutSaver( with RolloutSaver(
args.out, outfile=None,
args.use_shelve, use_shelve=False,
write_update_file=args.track_progress, write_update_file=False,
target_steps=num_steps, target_steps=steps,
target_episodes=num_episodes, target_episodes=episodes,
save_info=args.save_info) as saver: save_info=False,
if use_arg_monitor: ) as saver:
rollout.rollout( rollout(algorithm, env, steps, episodes, saver, not render)
agent, algorithm.stop()
args.env,
num_steps,
num_episodes,
saver,
args.no_render,
args.monitor)
else:
rollout.rollout(
agent, args.env,
num_steps,
num_episodes,
saver,
args.no_render, video_dir)
if __name__ == "__main__": if __name__ == "__main__":
# Start ray head (single node) # Start ray head (single node)
os.system('ray start --head') ray_on_aml = Ray_On_AML()
ray.init(address='auto') ray = ray_on_aml.getRay()
if ray:
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_path', required=True, help='Path to artifacts dataset')
parser.add_argument('--checkpoint', required=True, help='Name of checkpoint file directory')
parser.add_argument('--algo', required=True, help='Name of RL algorithm')
parser.add_argument('--render', default=False, required=False, help='True to render')
parser.add_argument('--steps', required=False, type=int, help='Number of steps to run')
parser.add_argument('--episodes', required=False, type=int, help='Number of episodes to run')
args = parser.parse_args()
# Add positional argument - serves as placeholder for checkpoint # Get a handle to run
argvc = sys.argv[1:] run = Run.get_context()
argvc.insert(0, 'checkpoint-placeholder')
# Parse arguments # Get handles to the tarining artifacts dataset and mount path
rollout_parser = rollout.create_parser() dataset_path = run.input_datasets['dataset_path']
rollout_parser.add_argument( # Find checkpoint file to be evaluated
'--checkpoint-number', required=False, type=int, default=1, checkpoint = os.path.join(dataset_path, args.checkpoint)
help='Checkpoint number of the checkpoint from which to roll out') print('Checkpoint:', checkpoint)
rollout_parser.add_argument( # Start rollout
'--artifacts-dataset', required=True, ray.init(address='auto')
help='The checkpoints artifacts dataset') run_rollout(checkpoint, args.algo, args.render, args.steps, args.episodes)
rollout_parser.add_argument(
'--artifacts-path', required=True,
help='The checkpoints artifacts path')
args = rollout_parser.parse_args(argvc)
# Get a handle to run
run = Run.get_context()
# Get handles to the tarining artifacts dataset and mount path
artifacts_dataset = run.input_datasets['artifacts_dataset']
artifacts_path = run.input_datasets['artifacts_path']
# Find checkpoint file to be evaluated
checkpoint_id = '-' + str(args.checkpoint_number)
checkpoint_files = list(filter(
lambda filename: filename.endswith(checkpoint_id),
artifacts_dataset.to_path()))
checkpoint_file = checkpoint_files[0]
if checkpoint_file[0] == '/':
checkpoint_file = checkpoint_file[1:]
checkpoint = os.path.join(artifacts_path, checkpoint_file)
print('Checkpoint:', checkpoint)
# Set rollout checkpoint
args.checkpoint = checkpoint
# Start rollout
run_rollout(args, rollout_parser)

View File

@@ -1,32 +1,34 @@
import os from ray_on_aml.core import Ray_On_AML
import ray import yaml
from ray.rllib import train from ray.tune.tune import run_experiments
from ray import tune
from utils import callbacks from utils import callbacks
import argparse
if __name__ == "__main__": if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--config', help='Path to yaml configuration file')
args = parser.parse_args()
# Parse arguments and add callbacks to config ray_on_aml = Ray_On_AML()
train_parser = train.create_parser() ray = ray_on_aml.getRay()
if ray: # in the headnode
ray.init(address="auto")
print("Configuring run from file: ", args.config)
experiment_config = None
with open(args.config, "r") as file:
experiment_config = yaml.safe_load(file)
args = train_parser.parse_args() # Set local_dir in each experiment configuration to ensure generated logs get picked up
args.config["callbacks"] = {"on_train_result": callbacks.on_train_result} # Also set monitor to ensure videos are captured
for experiment_name, experiment in experiment_config.items():
experiment["storage_path"] = "./logs"
experiment['config']['monitor'] = True
print(f'Config: {experiment_config}')
# Trace if video capturing is on trials = run_experiments(
if 'monitor' in args.config and args.config['monitor']: experiment_config,
print("Video capturing is ON!") callbacks=[callbacks.TrialCallback()],
verbose=2
# Start ray head (single node) )
os.system('ray start --head') else:
ray.init(address='auto') print("in worker node")
# Run training task using tune.run
tune.run(
run_or_experiment=args.run,
config=dict(args.config, env=args.env),
stop=args.stop,
checkpoint_freq=args.checkpoint_freq,
checkpoint_at_end=args.checkpoint_at_end,
local_dir=args.local_dir
)

View File

@@ -1,4 +1,4 @@
FROM mcr.microsoft.com/azureml/openmpi3.1.2-ubuntu18.04:20200423.v1 FROM mcr.microsoft.com/azureml/openmpi4.1.0-ubuntu20.04
RUN apt-get update && apt-get install -y --no-install-recommends \ RUN apt-get update && apt-get install -y --no-install-recommends \
python-opengl \ python-opengl \
@@ -8,31 +8,28 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
rm -rf /var/lib/apt/lists/* && \ rm -rf /var/lib/apt/lists/* && \
rm -rf /usr/share/man/* rm -rf /usr/share/man/*
RUN conda install -y conda=4.13.0 python=3.7 && conda clean -ay RUN pip install ray-on-aml==0.2.4 \
RUN pip install ray-on-aml==0.2.1 & \ ray==2.4.0 \
pip install --no-cache-dir \ ray[rllib]==2.4.0 \
azureml-defaults \ mlflow==2.3.1 \
azureml-dataset-runtime[fuse,pandas] \ azureml-defaults==1.50.0 \
azureml-contrib-reinforcementlearning \ azureml-dataset-runtime[fuse,pandas]==1.50.0 \
gputil \ azureml-contrib-reinforcementlearning==1.50.0 \
scipy \ gputil==1.4.0 \
pyglet \ scipy==1.9.1 \
cloudpickle==1.3.0 \ pyglet==2.0.6 \
tensorboardX \ cloudpickle==2.2.1 \
tensorflow==1.14.0 \ tensorflow==2.11.0 \
tabulate \ tensorflow-probability==0.19.0 \
dm_tree \ torch \
lz4 \ tabulate==0.9.0 \
psutil \ dm_tree==0.1.8 \
setproctitle \ lz4==4.3.2 \
pygame \ psutil==5.9.4 \
gym[classic_control]==0.19.0 && \ setproctitle==1.3.2 \
conda install -y -c conda-forge x264='1!152.20180717' ffmpeg=4.0.2 && \ pygame==2.1.0 \
conda install -c anaconda opencv gymnasium[classic_control]==0.26.3 \
gym[classic_control]==0.26.2
RUN pip install protobuf==3.20.0 # Display the exact versions we have installed
RUN pip freeze
RUN pip install --upgrade ray==0.8.3 \
ray[rllib,dashboard,tune]==0.8.3
RUN pip install 'msrest<0.7.0'

View File

@@ -3,21 +3,20 @@
''' '''
from azureml.core import Run from azureml.core import Run
from ray import tune
from ray.tune import Callback
from ray.air import session
def on_train_result(info): class TrialCallback(Callback):
'''Callback on train result to record metrics returned by trainer.
''' def on_trial_result(self, iteration, trials, trial, result, **info):
run = Run.get_context() '''Callback on train result to record metrics returned by trainer.
run.log( '''
name='episode_reward_mean', run = Run.get_context()
value=info["result"]["episode_reward_mean"]) run.log(
run.log( name='episode_reward_mean',
name='episodes_total', value=result["episode_reward_mean"])
value=info["result"]["episodes_total"]) run.log(
run.log( name='episodes_total',
name='perf_cpu_percent', value=result["episodes_total"])
value=info["result"]["perf"]["cpu_util_percent"])
run.log(
name='perf_memory_percent',
value=info["result"]["perf"]["ram_util_percent"])

View File

@@ -1,39 +0,0 @@
# DisableDockerDetector "Disabled to unblock PRs until the owner can fix the file. Not used in any prod deployments - only as a documentation for the customers"
FROM akdmsft/particle-cpu
RUN conda install -c anaconda python=3.7
# Install required pip packages
RUN pip3 install --upgrade pip setuptools && pip3 install --upgrade \
pandas \
matplotlib \
psutil \
numpy \
scipy \
gym \
azureml-defaults \
tensorboardX \
tensorflow==1.15 \
tensorflow-probability==0.8.0 \
onnxruntime \
tf2onnx \
cloudpickle==1.1.1 \
tabulate \
dm_tree \
lz4 \
opencv-python
RUN cd multiagent-particle-envs && \
pip3 install -e . && \
pip3 install --upgrade pyglet==1.3.2
RUN pip3 install ray-on-aml==0.1.6
RUN pip install protobuf==3.20.0
RUN pip3 install --upgrade \
ray==0.8.7 \
ray[rllib]==0.8.7 \
ray[tune]==0.8.7
RUN pip install 'msrest<0.7.0'

View File

@@ -1,70 +0,0 @@
# MIT License
# Copyright (c) 2018 OpenAI
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import gym
class MultiDiscrete(gym.Space):
"""
- The multi-discrete action space consists of a series of discrete action spaces with different
parameters
- It can be adapted to both a Discrete action space or a continuous (Box) action space
- It is useful to represent game controllers or keyboards where each key can be represented as
a discrete action space
- It is parametrized by passing an array of arrays containing [min, max] for each discrete action
space where the discrete action space can take any integers from `min` to `max` (both inclusive)
Note: A value of 0 always need to represent the NOOP action.
e.g. Nintendo Game Controller
- Can be conceptualized as 3 discrete action spaces:
1) Arrow Keys: Discrete 5 - NOOP[0], UP[1], RIGHT[2], DOWN[3], LEFT[4] - params: min: 0, max: 4
2) Button A: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
3) Button B: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
- Can be initialized as
MultiDiscrete([ [0,4], [0,1], [0,1] ])
"""
def __init__(self, array_of_param_array):
self.low = np.array([x[0] for x in array_of_param_array])
self.high = np.array([x[1] for x in array_of_param_array])
self.num_discrete_space = self.low.shape[0]
def sample(self):
""" Returns a array with one sample from each discrete action space """
# For each row: round(random .* (max - min) + min, 0)
# random_array = prng.np_random.rand(self.num_discrete_space)
random_array = np.random.RandomState().rand(self.num_discrete_space)
return [int(x) for x in np.floor(np.multiply((self.high - self.low + 1.), random_array) + self.low)]
def contains(self, x):
return len(x) == self.num_discrete_space \
and (np.array(x) >= self.low).all() \
and (np.array(x) <= self.high).all()
@property
def shape(self):
return self.num_discrete_space
def __repr__(self):
return "MultiDiscrete" + str(self.num_discrete_space)
def __eq__(self, other):
return np.array_equal(self.low, other.low) and np.array_equal(self.high, other.high)

View File

@@ -1,413 +0,0 @@
# MIT License
# Copyright (c) 2018 OpenAI
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
2D rendering framework
"""
from __future__ import division
import os
import six
import sys
from gym import error
import math
import numpy as np
import pyglet
from pyglet.gl import glEnable, glHint, glLineWidth, glBlendFunc, glClearColor, glPushMatrix, \
glTranslatef, glRotatef, glScalef, glPopMatrix, glColor4f, glBegin, glVertex3f, glEnd, glLineStipple, \
glDisable, glVertex2f, GL_BLEND, GL_LINE_SMOOTH, GL_LINE_SMOOTH_HINT, GL_NICEST, GL_SRC_ALPHA, \
GL_ONE_MINUS_SRC_ALPHA, GL_LINE_STIPPLE, GL_POINTS, GL_QUADS, GL_TRIANGLES, GL_POLYGON, GL_LINE_LOOP, \
GL_LINE_STRIP, GL_LINES
if "Apple" in sys.version:
if 'DYLD_FALLBACK_LIBRARY_PATH' in os.environ:
os.environ['DYLD_FALLBACK_LIBRARY_PATH'] += ':/usr/lib'
# (JDS 2016/04/15): avoid bug on Anaconda 2.3.0 / Yosemite
RAD2DEG = 57.29577951308232
def get_display(spec):
"""Convert a display specification (such as :0) into an actual Display
object.
Pyglet only supports multiple Displays on Linux.
"""
if spec is None:
return None
elif isinstance(spec, six.string_types):
return pyglet.canvas.Display(spec)
else:
raise error.Error('Invalid display specification: {}. (Must be a string like :0 or None.)'.format(spec))
class Viewer(object):
def __init__(self, width, height, display=None):
display = get_display(display)
self.width = width
self.height = height
self.window = pyglet.window.Window(width=width, height=height, display=display)
self.window.on_close = self.window_closed_by_user
self.geoms = []
self.onetime_geoms = []
self.transform = Transform()
glEnable(GL_BLEND)
# glEnable(GL_MULTISAMPLE)
glEnable(GL_LINE_SMOOTH)
# glHint(GL_LINE_SMOOTH_HINT, GL_DONT_CARE)
glHint(GL_LINE_SMOOTH_HINT, GL_NICEST)
glLineWidth(2.0)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
def close(self):
self.window.close()
def window_closed_by_user(self):
self.close()
def set_bounds(self, left, right, bottom, top):
assert right > left and top > bottom
scalex = self.width / (right - left)
scaley = self.height / (top - bottom)
self.transform = Transform(
translation=(-left * scalex, -bottom * scaley),
scale=(scalex, scaley))
def add_geom(self, geom):
self.geoms.append(geom)
def add_onetime(self, geom):
self.onetime_geoms.append(geom)
def render(self, return_rgb_array=False):
glClearColor(1, 1, 1, 1)
self.window.clear()
self.window.switch_to()
self.window.dispatch_events()
self.transform.enable()
for geom in self.geoms:
geom.render()
for geom in self.onetime_geoms:
geom.render()
self.transform.disable()
arr = None
if return_rgb_array:
buffer = pyglet.image.get_buffer_manager().get_color_buffer()
image_data = buffer.get_image_data()
arr = np.fromstring(image_data.data, dtype=np.uint8, sep='')
# In https://github.com/openai/gym-http-api/issues/2, we
# discovered that someone using Xmonad on Arch was having
# a window of size 598 x 398, though a 600 x 400 window
# was requested. (Guess Xmonad was preserving a pixel for
# the boundary.) So we use the buffer height/width rather
# than the requested one.
arr = arr.reshape(buffer.height, buffer.width, 4)
arr = arr[::-1, :, 0:3]
self.window.flip()
self.onetime_geoms = []
return arr
# Convenience
def draw_circle(self, radius=10, res=30, filled=True, **attrs):
geom = make_circle(radius=radius, res=res, filled=filled)
_add_attrs(geom, attrs)
self.add_onetime(geom)
return geom
def draw_polygon(self, v, filled=True, **attrs):
geom = make_polygon(v=v, filled=filled)
_add_attrs(geom, attrs)
self.add_onetime(geom)
return geom
def draw_polyline(self, v, **attrs):
geom = make_polyline(v=v)
_add_attrs(geom, attrs)
self.add_onetime(geom)
return geom
def draw_line(self, start, end, **attrs):
geom = Line(start, end)
_add_attrs(geom, attrs)
self.add_onetime(geom)
return geom
def get_array(self):
self.window.flip()
image_data = pyglet.image.get_buffer_manager().get_color_buffer().get_image_data()
self.window.flip()
arr = np.fromstring(image_data.data, dtype=np.uint8, sep='')
arr = arr.reshape(self.height, self.width, 4)
return arr[::-1, :, 0:3]
def _add_attrs(geom, attrs):
if "color" in attrs:
geom.set_color(*attrs["color"])
if "linewidth" in attrs:
geom.set_linewidth(attrs["linewidth"])
class Geom(object):
def __init__(self):
self._color = Color((0, 0, 0, 1.0))
self.attrs = [self._color]
def render(self):
for attr in reversed(self.attrs):
attr.enable()
self.render1()
for attr in self.attrs:
attr.disable()
def render1(self):
raise NotImplementedError
def add_attr(self, attr):
self.attrs.append(attr)
def set_color(self, r, g, b, alpha=1):
self._color.vec4 = (r, g, b, alpha)
class Attr(object):
def enable(self):
raise NotImplementedError
def disable(self):
pass
class Transform(Attr):
def __init__(self, translation=(0.0, 0.0), rotation=0.0, scale=(1, 1)):
self.set_translation(*translation)
self.set_rotation(rotation)
self.set_scale(*scale)
def enable(self):
glPushMatrix()
glTranslatef(self.translation[0], self.translation[1], 0) # translate to GL loc ppint
glRotatef(RAD2DEG * self.rotation, 0, 0, 1.0)
glScalef(self.scale[0], self.scale[1], 1)
def disable(self):
glPopMatrix()
def set_translation(self, newx, newy):
self.translation = (float(newx), float(newy))
def set_rotation(self, new):
self.rotation = float(new)
def set_scale(self, newx, newy):
self.scale = (float(newx), float(newy))
class Color(Attr):
def __init__(self, vec4):
self.vec4 = vec4
def enable(self):
glColor4f(*self.vec4)
class LineStyle(Attr):
def __init__(self, style):
self.style = style
def enable(self):
glEnable(GL_LINE_STIPPLE)
glLineStipple(1, self.style)
def disable(self):
glDisable(GL_LINE_STIPPLE)
class LineWidth(Attr):
def __init__(self, stroke):
self.stroke = stroke
def enable(self):
glLineWidth(self.stroke)
class Point(Geom):
def __init__(self):
Geom.__init__(self)
def render1(self):
glBegin(GL_POINTS) # draw point
glVertex3f(0.0, 0.0, 0.0)
glEnd()
class FilledPolygon(Geom):
def __init__(self, v):
Geom.__init__(self)
self.v = v
def render1(self):
if len(self.v) == 4:
glBegin(GL_QUADS)
elif len(self.v) > 4:
glBegin(GL_POLYGON)
else:
glBegin(GL_TRIANGLES)
for p in self.v:
glVertex3f(p[0], p[1], 0) # draw each vertex
glEnd()
color = (
self._color.vec4[0] * 0.5,
self._color.vec4[1] * 0.5,
self._color.vec4[2] * 0.5,
self._color.vec4[3] * 0.5)
glColor4f(*color)
glBegin(GL_LINE_LOOP)
for p in self.v:
glVertex3f(p[0], p[1], 0) # draw each vertex
glEnd()
def make_circle(radius=10, res=30, filled=True):
points = []
for i in range(res):
ang = 2 * math.pi * i / res
points.append((math.cos(ang) * radius, math.sin(ang) * radius))
if filled:
return FilledPolygon(points)
else:
return PolyLine(points, True)
def make_polygon(v, filled=True):
if filled:
return FilledPolygon(v)
else:
return PolyLine(v, True)
def make_polyline(v):
return PolyLine(v, False)
def make_capsule(length, width):
l, r, t, b = 0, length, width / 2, -width / 2
box = make_polygon([(l, b), (l, t), (r, t), (r, b)])
circ0 = make_circle(width / 2)
circ1 = make_circle(width / 2)
circ1.add_attr(Transform(translation=(length, 0)))
geom = Compound([box, circ0, circ1])
return geom
class Compound(Geom):
def __init__(self, gs):
Geom.__init__(self)
self.gs = gs
for g in self.gs:
g.attrs = [a for a in g.attrs if not isinstance(a, Color)]
def render1(self):
for g in self.gs:
g.render()
class PolyLine(Geom):
def __init__(self, v, close):
Geom.__init__(self)
self.v = v
self.close = close
self.linewidth = LineWidth(1)
self.add_attr(self.linewidth)
def render1(self):
glBegin(GL_LINE_LOOP if self.close else GL_LINE_STRIP)
for p in self.v:
glVertex3f(p[0], p[1], 0) # draw each vertex
glEnd()
def set_linewidth(self, x):
self.linewidth.stroke = x
class Line(Geom):
def __init__(self, start=(0.0, 0.0), end=(0.0, 0.0)):
Geom.__init__(self)
self.start = start
self.end = end
self.linewidth = LineWidth(1)
self.add_attr(self.linewidth)
def render1(self):
glBegin(GL_LINES)
glVertex2f(*self.start)
glVertex2f(*self.end)
glEnd()
class Image(Geom):
def __init__(self, fname, width, height):
Geom.__init__(self)
self.width = width
self.height = height
img = pyglet.image.load(fname)
self.img = img
self.flip = False
def render1(self):
self.img.blit(-self.width / 2, -self.height / 2, width=self.width, height=self.height)
class SimpleImageViewer(object):
def __init__(self, display=None):
self.window = None
self.isopen = False
self.display = display
def imshow(self, arr):
if self.window is None:
height, width, channels = arr.shape
self.window = pyglet.window.Window(width=width, height=height, display=self.display)
self.width = width
self.height = height
self.isopen = True
assert arr.shape == (self.height, self.width, 3), "You passed in an image with the wrong number shape"
image = pyglet.image.ImageData(self.width, self.height, 'RGB', arr.tobytes(), pitch=self.width * -3)
self.window.clear()
self.window.switch_to()
self.window.dispatch_events()
image.blit(0, 0)
self.window.flip()
def close(self):
if self.isopen:
self.window.close()
self.isopen = False
def __del__(self):
self.close()

View File

@@ -1,123 +0,0 @@
import os
from ray_on_aml.core import Ray_On_AML
from ray.tune import run_experiments
from ray.tune.registry import register_trainable, register_env, get_trainable_cls
import ray.rllib.contrib.maddpg.maddpg as maddpg
from rllib_multiagent_particle_env import env_creator
from util import parse_args
def setup_ray():
ray_on_aml = Ray_On_AML()
ray_on_aml.getRay()
register_env('particle', env_creator)
def gen_policy(args, env, id):
use_local_critic = [
args.adv_policy == 'ddpg' if id < args.num_adversaries else
args.good_policy == 'ddpg' for id in range(env.num_agents)
]
return (
None,
env.observation_space_dict[id],
env.action_space_dict[id],
{
'agent_id': id,
'use_local_critic': use_local_critic[id],
'obs_space_dict': env.observation_space_dict,
'act_space_dict': env.action_space_dict,
}
)
def gen_policies(args, env_config):
env = env_creator(env_config)
return {'policy_%d' % i: gen_policy(args, env, i) for i in range(len(env.observation_space_dict))}
def to_multiagent_config(policies):
policy_ids = list(policies.keys())
return {
'policies': policies,
'policy_mapping_fn': lambda index: policy_ids[index]
}
def train(args, env_config):
def stop(trial_id, result):
max_train_time = int(os.environ.get('AML_MAX_TRAIN_TIME_SECONDS', 2 * 60 * 60))
return result['episode_reward_mean'] >= args.final_reward \
or result['time_total_s'] >= max_train_time
run_experiments({
'MADDPG_RLLib': {
'run': 'contrib/MADDPG',
'env': 'particle',
'stop': stop,
# Uncomment to enable more frequent checkpoints:
# 'checkpoint_freq': args.checkpoint_freq,
'checkpoint_at_end': True,
'local_dir': args.local_dir,
'restore': args.restore,
'config': {
# === Log ===
'log_level': 'ERROR',
# === Environment ===
'env_config': env_config,
'num_envs_per_worker': args.num_envs_per_worker,
'horizon': args.max_episode_len,
# === Policy Config ===
# --- Model ---
'good_policy': args.good_policy,
'adv_policy': args.adv_policy,
'actor_hiddens': [args.num_units] * 2,
'actor_hidden_activation': 'relu',
'critic_hiddens': [args.num_units] * 2,
'critic_hidden_activation': 'relu',
'n_step': args.n_step,
'gamma': args.gamma,
# --- Exploration ---
'tau': 0.01,
# --- Replay buffer ---
'buffer_size': int(1e6),
# --- Optimization ---
'actor_lr': args.lr,
'critic_lr': args.lr,
'learning_starts': args.train_batch_size * args.max_episode_len,
'sample_batch_size': args.sample_batch_size,
'train_batch_size': args.train_batch_size,
'batch_mode': 'truncate_episodes',
# --- Parallelism ---
'num_workers': args.num_workers,
'num_gpus': args.num_gpus,
'num_gpus_per_worker': 0,
# === Multi-agent setting ===
'multiagent': to_multiagent_config(gen_policies(args, env_config)),
},
},
}, verbose=1)
if __name__ == '__main__':
args = parse_args()
setup_ray()
env_config = {
'scenario_name': args.scenario,
'horizon': args.max_episode_len,
'video_frequency': args.checkpoint_freq,
}
train(args, env_config)

View File

@@ -1,113 +0,0 @@
# Some code taken from: https://github.com/wsjeon/maddpg-rllib/
import imp
import os
import gym
from gym import wrappers
from ray import rllib
from multiagent.environment import MultiAgentEnv
import multiagent.scenarios as scenarios
CUSTOM_SCENARIOS = ['simple_switch']
class ParticleEnvRenderWrapper(gym.Wrapper):
def __init__(self, env, horizon):
super().__init__(env)
self.horizon = horizon
def reset(self):
self._num_steps = 0
return self.env.reset()
def render(self, mode):
if mode == 'human':
self.env.render(mode=mode)
else:
return self.env.render(mode=mode)[0]
def step(self, actions):
obs_list, rew_list, done_list, info_list = self.env.step(actions)
self._num_steps += 1
done = (all(done_list) or self._num_steps >= self.horizon)
# Gym monitor expects reward to be an int. This is only used for its
# stats reporter, which we're not interested in. To make video recording
# work, we package the rewards in the info object and extract it below.
return obs_list, 0, done, [rew_list, done_list, info_list]
class RLlibMultiAgentParticleEnv(rllib.MultiAgentEnv):
def __init__(self, scenario_name, horizon, monitor_enabled=False, video_frequency=500):
self._env = _make_env(scenario_name, horizon, monitor_enabled, video_frequency)
self.num_agents = self._env.n
self.agent_ids = list(range(self.num_agents))
self.observation_space_dict = self._make_dict(self._env.observation_space)
self.action_space_dict = self._make_dict(self._env.action_space)
def reset(self):
obs_dict = self._make_dict(self._env.reset())
return obs_dict
def step(self, action_dict):
actions = list(action_dict.values())
obs_list, _, _, infos = self._env.step(actions)
rew_list, done_list, _ = infos
obs_dict = self._make_dict(obs_list)
rew_dict = self._make_dict(rew_list)
done_dict = self._make_dict(done_list)
done_dict['__all__'] = all(done_list)
info_dict = self._make_dict([{'done': done} for done in done_list])
return obs_dict, rew_dict, done_dict, info_dict
def render(self, mode='human'):
self._env.render(mode=mode)
def _make_dict(self, values):
return dict(zip(self.agent_ids, values))
def _video_callable(video_frequency):
def should_record_video(episode_id):
if episode_id % video_frequency == 0:
return True
return False
return should_record_video
def _make_env(scenario_name, horizon, monitor_enabled, video_frequency):
if scenario_name in CUSTOM_SCENARIOS:
# Scenario file must exist locally
file_path = os.path.join(os.path.dirname(__file__), scenario_name + '.py')
scenario = imp.load_source('', file_path).Scenario()
else:
scenario = scenarios.load(scenario_name + '.py').Scenario()
world = scenario.make_world()
env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation)
env.metadata['video.frames_per_second'] = 8
env = ParticleEnvRenderWrapper(env, horizon)
if not monitor_enabled:
return env
return wrappers.Monitor(env, './logs/videos', resume=True, video_callable=_video_callable(video_frequency))
def env_creator(config):
monitor_enabled = False
if hasattr(config, 'worker_index') and hasattr(config, 'vector_index'):
monitor_enabled = (config.worker_index == 1 and config.vector_index == 0)
return RLlibMultiAgentParticleEnv(**config, monitor_enabled=monitor_enabled)

Some files were not shown because too many files have changed in this diff Show More