From 11e8ed2bab9c837ee915327476d23527fc81def6 Mon Sep 17 00:00:00 2001 From: vizhur Date: Wed, 27 May 2020 02:45:07 +0000 Subject: [PATCH] update samples from Release-53 as a part of SDK release --- README.md | 1 + configuration.ipynb | 2 +- .../automated-machine-learning/README.md | 2 +- ...fication-bank-marketing-all-features.ipynb | 2 +- ...-ml-classification-credit-card-fraud.ipynb | 2 +- .../auto-ml-classification-text-dnn.ipynb | 2 +- .../auto-ml-continuous-retraining.ipynb | 2 +- .../auto-ml-forecasting-beer-remote.ipynb | 2 +- .../auto-ml-forecasting-bike-share.ipynb | 14 +- .../auto-ml-forecasting-energy-demand.ipynb | 28 +- .../auto-ml-forecasting-function.ipynb | 87 ++- .../auto-ml-forecasting-function.yml | 0 .../forecast_function_at_train.png | Bin .../forecast_function_away_from_train.png | Bin .../recursive_forecast_iter1.png | Bin 0 -> 26559 bytes .../recursive_forecast_iter2.png | Bin 0 -> 30353 bytes .../recursive_forecast_overview_small.png | Bin 0 -> 21897 bytes ...to-ml-forecasting-orange-juice-sales.ipynb | 18 +- ...assification-credit-card-fraud-local.ipynb | 2 +- ...regression-explanation-featurization.ipynb | 2 +- .../score_explain.py | 2 +- .../regression/auto-ml-regression.ipynb | 2 +- .../deployment/deploy-to-local/myenv.yml | 2 +- .../register-model-deploy-local.ipynb | 3 +- ...e-app-insights-in-production-service.ipynb | 144 ++--- .../onnx-convert-aml-deploy-tinyyolo.ipynb | 9 +- .../onnx/onnx-convert-aml-deploy-tinyyolo.yml | 1 + ...e-facial-expression-recognition-deploy.yml | 2 +- .../onnx/onnx-inference-mnist-deploy.yml | 2 +- .../tensorflow-flower-predict-input.json | 10 - ...tensorflow-model-register-and-deploy.ipynb | 260 --------- .../tensorflow-model-register-and-deploy.yml | 4 - .../explain-model-on-amlcompute.ipynb | 6 +- .../remote-explanation/train_explain.py | 2 +- .../scoring-time/score_local_explain.py | 2 +- .../scoring-time/score_remote_explain.py | 2 +- ...ain-explain-model-locally-and-deploy.ipynb | 2 +- ...plain-model-on-amlcompute-and-deploy.ipynb | 6 +- .../scoring-time/train_explain.py | 2 +- ...urebatch-to-run-a-windows-executable.ipynb | 2 +- ...casing-dataset-and-pipelineparameter.ipynb | 510 ++++++++++++++++++ ...owcasing-dataset-and-pipelineparameter.yml | 5 + ...nes-use-databricks-as-compute-target.ipynb | 2 +- ...-taxi-data-regression-model-building.ipynb | 4 +- .../parallel-run/README.md | 11 +- .../file-dataset-image-inference-mnist.ipynb | 86 ++- .../file-dataset-image-inference-mnist.yml | 2 +- .../tabular-dataset-inference-iris.ipynb | 34 +- .../tabular-dataset-inference-iris.yml | 2 +- .../pipeline-style-transfer.ipynb | 42 +- .../pipeline-style-transfer.yml | 1 - .../reinforcement-learning/README.md | 2 +- .../files/pong_rllib.py | 29 +- .../pong_rllib.ipynb | 169 +++--- .../cartpole_ci.ipynb | 50 +- .../{cartpole_cc.ipynb => cartpole_sc.ipynb} | 66 +-- .../{cartpole_cc.yml => cartpole_sc.yml} | 2 +- .../files/minecraft_train.py | 6 +- .../minecraft.ipynb | 7 +- .../setup/devenv_setup.ipynb | 22 +- .../logging-api/logging-api.ipynb | 2 +- .../tensorboard/tensorboard.ipynb | 2 + .../train-in-spark/train-in-spark.ipynb | 27 +- .../train-on-remote-vm.ipynb | 26 +- .../pipeline-for-image-classification.ipynb | 3 +- .../pipeline-for-image-classification.yml | 1 - .../tabular-timeseries-dataset-filtering.yml | 1 - .../train-with-datasets.yml | 1 - index.md | 6 +- setup-environment/configuration.ipynb | 2 +- .../tutorial-1st-experiment-sdk-train.ipynb | 2 +- .../scripts/batch_scoring.py | 13 +- ...ipeline-batch-scoring-classification.ipynb | 27 +- ...-pipeline-batch-scoring-classification.yml | 2 +- 74 files changed, 1085 insertions(+), 713 deletions(-) rename how-to-use-azureml/automated-machine-learning/{forecasting-high-frequency => forecasting-forecast-function}/auto-ml-forecasting-function.ipynb (87%) rename how-to-use-azureml/automated-machine-learning/{forecasting-high-frequency => forecasting-forecast-function}/auto-ml-forecasting-function.yml (100%) rename how-to-use-azureml/automated-machine-learning/{forecasting-high-frequency => forecasting-forecast-function}/forecast_function_at_train.png (100%) rename how-to-use-azureml/automated-machine-learning/{forecasting-high-frequency => forecasting-forecast-function}/forecast_function_away_from_train.png (100%) create mode 100644 how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/recursive_forecast_iter1.png create mode 100644 how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/recursive_forecast_iter2.png create mode 100644 how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/recursive_forecast_overview_small.png delete mode 100644 how-to-use-azureml/deployment/tensorflow/tensorflow-flower-predict-input.json delete mode 100644 how-to-use-azureml/deployment/tensorflow/tensorflow-model-register-and-deploy.ipynb delete mode 100644 how-to-use-azureml/deployment/tensorflow/tensorflow-model-register-and-deploy.yml create mode 100644 how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-dataset-and-pipelineparameter.ipynb create mode 100644 how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-dataset-and-pipelineparameter.yml rename how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/{cartpole_cc.ipynb => cartpole_sc.ipynb} (91%) rename how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/{cartpole_cc.yml => cartpole_sc.yml} (84%) diff --git a/README.md b/README.md index 33d704d1..bc701a4b 100644 --- a/README.md +++ b/README.md @@ -40,6 +40,7 @@ The [How to use Azure ML](./how-to-use-azureml) folder contains specific example - [Deployment](./how-to-use-azureml/deployment) - Examples showing how to deploy and manage machine learning models and solutions - [Azure Databricks](./how-to-use-azureml/azure-databricks) - Examples showing how to use Azure ML with Azure Databricks - [Monitor Models](./how-to-use-azureml/monitor-models) - Examples showing how to enable model monitoring services such as DataDrift +- [Reinforcement Learning](./how-to-use-azureml/reinforcement-learning) - Examples showing how to train reinforcement learning agents --- ## Documentation diff --git a/configuration.ipynb b/configuration.ipynb index 9bc1c3b4..eae88e17 100644 --- a/configuration.ipynb +++ b/configuration.ipynb @@ -103,7 +103,7 @@ "source": [ "import azureml.core\n", "\n", - "print(\"This notebook was created using version 1.5.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.6.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, diff --git a/how-to-use-azureml/automated-machine-learning/README.md b/how-to-use-azureml/automated-machine-learning/README.md index ec88126b..05c61f27 100644 --- a/how-to-use-azureml/automated-machine-learning/README.md +++ b/how-to-use-azureml/automated-machine-learning/README.md @@ -144,7 +144,7 @@ jupyter notebook - Dataset: forecasting for a bike-sharing - Example of training an automated ML forecasting model on multiple time-series -- [auto-ml-forecasting-function.ipynb](forecasting-high-frequency/auto-ml-forecasting-function.ipynb) +- [auto-ml-forecasting-function.ipynb](forecasting-forecast-function/auto-ml-forecasting-function.ipynb) - Example of training an automated ML forecasting model on multiple time-series - [auto-ml-forecasting-beer-remote.ipynb](forecasting-beer-remote/auto-ml-forecasting-beer-remote.ipynb) diff --git a/how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb b/how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb index 324cbffa..84ffd9cc 100644 --- a/how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb +++ b/how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb @@ -105,7 +105,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"This notebook was created using version 1.5.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.6.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, diff --git a/how-to-use-azureml/automated-machine-learning/classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.ipynb b/how-to-use-azureml/automated-machine-learning/classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.ipynb index 96cb9a45..c42ecf2e 100644 --- a/how-to-use-azureml/automated-machine-learning/classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.ipynb +++ b/how-to-use-azureml/automated-machine-learning/classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.ipynb @@ -93,7 +93,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"This notebook was created using version 1.5.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.6.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, diff --git a/how-to-use-azureml/automated-machine-learning/classification-text-dnn/auto-ml-classification-text-dnn.ipynb b/how-to-use-azureml/automated-machine-learning/classification-text-dnn/auto-ml-classification-text-dnn.ipynb index 13dd8cad..a1debd3b 100644 --- a/how-to-use-azureml/automated-machine-learning/classification-text-dnn/auto-ml-classification-text-dnn.ipynb +++ b/how-to-use-azureml/automated-machine-learning/classification-text-dnn/auto-ml-classification-text-dnn.ipynb @@ -97,7 +97,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"This notebook was created using version 1.5.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.6.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, diff --git a/how-to-use-azureml/automated-machine-learning/continuous-retraining/auto-ml-continuous-retraining.ipynb b/how-to-use-azureml/automated-machine-learning/continuous-retraining/auto-ml-continuous-retraining.ipynb index 222d3e88..d7063568 100644 --- a/how-to-use-azureml/automated-machine-learning/continuous-retraining/auto-ml-continuous-retraining.ipynb +++ b/how-to-use-azureml/automated-machine-learning/continuous-retraining/auto-ml-continuous-retraining.ipynb @@ -88,7 +88,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"This notebook was created using version 1.5.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.6.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-beer-remote/auto-ml-forecasting-beer-remote.ipynb b/how-to-use-azureml/automated-machine-learning/forecasting-beer-remote/auto-ml-forecasting-beer-remote.ipynb index 7848165f..32ca6868 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-beer-remote/auto-ml-forecasting-beer-remote.ipynb +++ b/how-to-use-azureml/automated-machine-learning/forecasting-beer-remote/auto-ml-forecasting-beer-remote.ipynb @@ -114,7 +114,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"This notebook was created using version 1.5.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.6.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-bike-share/auto-ml-forecasting-bike-share.ipynb b/how-to-use-azureml/automated-machine-learning/forecasting-bike-share/auto-ml-forecasting-bike-share.ipynb index e1fb4412..bbfe43ac 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-bike-share/auto-ml-forecasting-bike-share.ipynb +++ b/how-to-use-azureml/automated-machine-learning/forecasting-bike-share/auto-ml-forecasting-bike-share.ipynb @@ -87,7 +87,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"This notebook was created using version 1.5.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.6.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, @@ -510,16 +510,16 @@ "metadata": {}, "outputs": [], "source": [ - "from azureml.automl.core.shared import constants, metrics\n", + "from azureml.automl.core.shared import constants\n", + "from azureml.automl.runtime.shared.score import scoring\n", "from sklearn.metrics import mean_absolute_error, mean_squared_error\n", "from matplotlib import pyplot as plt\n", "\n", "# use automl metrics module\n", - "scores = metrics.compute_metrics_regression(\n", - " df_all['predicted'],\n", - " df_all[target_column_name],\n", - " list(constants.Metric.SCALAR_REGRESSION_SET),\n", - " None, None, None)\n", + "scores = scoring.score_regression(\n", + " y_test=df_all[target_column_name],\n", + " y_pred=df_all['predicted'],\n", + " metrics=list(constants.Metric.SCALAR_REGRESSION_SET))\n", "\n", "print(\"[Test data scores]\\n\")\n", "for key, value in scores.items(): \n", diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb b/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb index f439d348..4b31c0eb 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb +++ b/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb @@ -97,7 +97,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"This notebook was created using version 1.5.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.6.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, @@ -465,7 +465,7 @@ "metadata": {}, "source": [ "### Forecast Function\n", - "For forecasting, we will use the forecast function instead of the predict function. Using the predict method would result in getting predictions for EVERY horizon the forecaster can predict at. This is useful when training and evaluating the performance of the forecaster at various horizons, but the level of detail is excessive for normal use. Forecast function also can handle more complicated scenarios, see notebook on [high frequency forecasting](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/forecasting-high-frequency/auto-ml-forecasting-function.ipynb)." + "For forecasting, we will use the forecast function instead of the predict function. Using the predict method would result in getting predictions for EVERY horizon the forecaster can predict at. This is useful when training and evaluating the performance of the forecaster at various horizons, but the level of detail is excessive for normal use. Forecast function also can handle more complicated scenarios, see the [forecast function notebook](../forecasting-forecast-function/auto-ml-forecasting-function.ipynb)." ] }, { @@ -507,15 +507,15 @@ "metadata": {}, "outputs": [], "source": [ - "from azureml.automl.core.shared import constants, metrics\n", + "from azureml.automl.core.shared import constants\n", + "from azureml.automl.runtime.shared.score import scoring\n", "from matplotlib import pyplot as plt\n", "\n", "# use automl metrics module\n", - "scores = metrics.compute_metrics_regression(\n", - " df_all['predicted'],\n", - " df_all[target_column_name],\n", - " list(constants.Metric.SCALAR_REGRESSION_SET),\n", - " None, None, None)\n", + "scores = scoring.score_regression(\n", + " y_test=df_all[target_column_name],\n", + " y_pred=df_all['predicted'],\n", + " metrics=list(constants.Metric.SCALAR_REGRESSION_SET))\n", "\n", "print(\"[Test data scores]\\n\")\n", "for key, value in scores.items(): \n", @@ -667,15 +667,15 @@ "metadata": {}, "outputs": [], "source": [ - "from azureml.automl.core.shared import constants, metrics\n", + "from azureml.automl.core.shared import constants\n", + "from azureml.automl.runtime.shared.score import scoring\n", "from matplotlib import pyplot as plt\n", "\n", "# use automl metrics module\n", - "scores = metrics.compute_metrics_regression(\n", - " df_all['predicted'],\n", - " df_all[target_column_name],\n", - " list(constants.Metric.SCALAR_REGRESSION_SET),\n", - " None, None, None)\n", + "scores = scoring.score_regression(\n", + " y_test=df_all[target_column_name],\n", + " y_pred=df_all['predicted'],\n", + " metrics=list(constants.Metric.SCALAR_REGRESSION_SET))\n", "\n", "print(\"[Test data scores]\\n\")\n", "for key, value in scores.items(): \n", diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-high-frequency/auto-ml-forecasting-function.ipynb b/how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/auto-ml-forecasting-function.ipynb similarity index 87% rename from how-to-use-azureml/automated-machine-learning/forecasting-high-frequency/auto-ml-forecasting-function.ipynb rename to how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/auto-ml-forecasting-function.ipynb index 1c0e43ff..88fcc3e7 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-high-frequency/auto-ml-forecasting-function.ipynb +++ b/how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/auto-ml-forecasting-function.ipynb @@ -35,7 +35,6 @@ "Terminology:\n", "* forecast origin: the last period when the target value is known\n", "* forecast periods(s): the period(s) for which the value of the target is desired.\n", - "* forecast horizon: the number of forecast periods\n", "* lookback: how many past periods (before forecast origin) the model function depends on. The larger of number of lags and length of rolling window.\n", "* prediction context: `lookback` periods immediately preceding the forecast origin\n", "\n", @@ -95,7 +94,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"This notebook was created using version 1.5.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.6.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, @@ -720,6 +719,90 @@ "X_show[['date', 'grain', 'ext_predictor', '_automl_target_col']]\n", "# prediction is in _automl_target_col" ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Forecasting farther than the maximum horizon \n", + "When the forecast destination, or the latest date in the prediction data frame, is farther into the future than the specified maximum horizon, the `forecast()` function will still make point predictions out to the later date using a recursive operation mode. Internally, the method recursively applies the regular forecaster to generate context so that we can forecast further into the future. \n", + "\n", + "To illustrate the use-case and operation of recursive forecasting, we'll consider an example with a single time-series where the forecasting period directly follows the training period and is twice as long as the maximum horizon given at training time.\n", + "\n", + "![Recursive_forecast_overview](recursive_forecast_overview_small.png)\n", + "\n", + "Internally, we apply the forecaster in an iterative manner and finish the forecast task in two interations. In the first iteration, we apply the forecaster and get the prediction for the first max-horizon periods (y_pred1). In the second iteraction, y_pred1 is used as the context to produce the prediction for the next max-horizon periods (y_pred2). The combination of (y_pred1 and y_pred2) gives the results for the total forecast periods. \n", + "\n", + "A caveat: forecast accuracy will likely be worse the farther we predict into the future since errors are compounded with recursive application of the forecaster.\n", + "\n", + "![Recursive_forecast_iter1](recursive_forecast_iter1.png)\n", + "![Recursive_forecast_iter2](recursive_forecast_iter2.png)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# generate the same kind of test data we trained on, but with a single grain/time-series and test period twice as long as the max_horizon\n", + "_, _, X_test_long, y_test_long = get_timeseries(train_len=n_train_periods,\n", + " test_len=max_horizon*2,\n", + " time_column_name=TIME_COLUMN_NAME,\n", + " target_column_name=TARGET_COLUMN_NAME,\n", + " grain_column_name=GRAIN_COLUMN_NAME,\n", + " grains=1)\n", + "\n", + "print(X_test_long.groupby(GRAIN_COLUMN_NAME)[TIME_COLUMN_NAME].min())\n", + "print(X_test_long.groupby(GRAIN_COLUMN_NAME)[TIME_COLUMN_NAME].max())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# forecast() function will invoke the recursive forecast method internally.\n", + "y_pred_long, X_trans_long = fitted_model.forecast(X_test_long)\n", + "y_pred_long" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# What forecast() function does in this case is equivalent to iterating it twice over the test set as the following. \n", + "y_pred1, _ = fitted_model.forecast(X_test_long[:max_horizon])\n", + "y_pred_all, _ = fitted_model.forecast(X_test_long, np.concatenate((y_pred1, np.full(max_horizon, np.nan))))\n", + "np.array_equal(y_pred_all, y_pred_long)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Confidence interval and distributional forecasts\n", + "AutoML cannot currently estimate forecast errors beyond the maximum horizon set during training, so the `forecast_quantiles()` function will return missing values for quantiles not equal to 0.5 beyond the maximum horizon. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fitted_model.forecast_quantiles(X_test_long)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Similarly with the simple senarios illustrated above, forecasting farther than the max horizon in other senarios like 'multiple grain', 'Destination-date forecast', and 'forecast away from the training data' are also automatically handled by the `forecast()` function. " + ] } ], "metadata": { diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-high-frequency/auto-ml-forecasting-function.yml b/how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/auto-ml-forecasting-function.yml similarity index 100% rename from how-to-use-azureml/automated-machine-learning/forecasting-high-frequency/auto-ml-forecasting-function.yml rename to how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/auto-ml-forecasting-function.yml diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-high-frequency/forecast_function_at_train.png b/how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/forecast_function_at_train.png similarity index 100% rename from how-to-use-azureml/automated-machine-learning/forecasting-high-frequency/forecast_function_at_train.png rename to how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/forecast_function_at_train.png diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-high-frequency/forecast_function_away_from_train.png b/how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/forecast_function_away_from_train.png similarity index 100% rename from how-to-use-azureml/automated-machine-learning/forecasting-high-frequency/forecast_function_away_from_train.png rename to how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/forecast_function_away_from_train.png diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/recursive_forecast_iter1.png b/how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/recursive_forecast_iter1.png new file mode 100644 index 0000000000000000000000000000000000000000..2962f5644ec9d6c9a9e116f5ee834db74c01da26 GIT binary patch literal 26559 zcmeFZcUY6z*EaglLqbX5O@vYYtE!3SpR^P1-&C@++4 z?FRkP9gdd`T_A}4BmF;yI(XVG2(s2wyLevD!)%gbo-5SuBSM~Q6H}NyqpdLiXPl$^ zkA88dkAvr(zU~MLSlquF=QriZXTg!L3j2UN&1rRuDe}()_LRKfkT&X(-1g!&xiTh$ zKP|sG8lT2}__}?^?*^9#6Ysp=7Lzi*EoRC}=12CFd4610V}_|=k$Zd7mu3;)^Lpqn zCZ)$DyI4E^hjI_so4b=N6Ipvyi&P(d+y1V3;XsPtEPMsd48PSLjQyUzfj`gfxY zKL&|aEl-vt|1ef0FP7+fNdsd&C%C=Z^%j8^Upqb(P5iUs6bjR?wwRx%<1n- z?B3;w@9_>kKg5)r8^n+!RBbU;Xd8-j7{c9$cIFz$Ldpw`ReK$b!>u2-YEM%JOhYM^ z&`!vWQJjB!H53%VG!xdzL22*n^d62hr;e*DXP9~QRBBHK1bv)+wY}P=wnv~!G`V9z zC!@KKR8A5&{FPe%L@^20sEcw20$gYV3I%a~x_%Kg!_=Yp7->LwrB(G;}^?n>u{ zbV8Bjn-Zykg#j0L(})x`38mn-Pr7XkXV3pYw8LtK+2p(@24X&Lvv>(q4#4e+>+IE{ z+{L3kvz%)S$*Q_DK^n_QyrH_Tqe>TFW4#OUn@nj-yifjKw@qtjf1z2=Z*i`E`6+$rJhY1Nky5w z=AI-Gs=RdF8?P@3b++&8+<5EU)>{V=_F-B>{R4wWcbo#=eT_MJp3tWx9($~MTwf@~G7oFh7>&!LR7?wX1{RPZQMexdTDK=IO zm!GD`I2C3~$J8F2PYX0KgFVeHD!HZSMHP1x>e;_fZznG*S!i?9RiIOc&7I;4z^WEj zfH#wdZlOAM8(CiIVic|0WLEZK=H#A?QSu(bQVY6rxr= zu&pzO!#K$zA@5yHnau~GZgNX6UqMjls1^^(cs1_!d7$IJZ!K365(y*ga82568)(}z z-;reT@MEvO#?|I_H%{UD?XBs}qfwHbBC!h4J{e`}f)jt6>aOZAa?JhQ?bT|9Z+EUG zQ)tq@N+r>Go_SJ{T?lih)1J>8MJuWE72zf!T7ioSpMMnXN>{_@KJmi5>Ug71(E!wvB!XLHNPP-)>uxzm@tP$HI!ntkW zhcY=G=9`K;`EyGDTY)e-HY!BXT=6C@_ZmdjT1Q|D_C?x4NN67w*-!n5K_?X0tTkcx z`2Yk6dJj>KZm7cR!{n3~5m%u8hc|IgetvzP&>}c~j*D!jdW}uE;Nl)_Skx+z2z0{>roJIo>PE`pq(p zu{nI+#9?APC$#OthDe7hNVNe`$`nk+0xRZfN1g91wv!#kd9r+kc|gXOOnHqqfF5k1 zaBV%q@`&HkC(ZlsT1Eb(r+XHGv0T~X*Z9>oefEx z(c(fyE#(i7?x==#MsB`pH;R*i2eoTcdQQSStKkZHvbU!yh?eTUy=;fs+SnMs!^aiG+s7knnMeXjVWPBT&wR>y;r52;V%{kq%>P;e8i-ZT!i|XH2SvMz}Bd3HJiRsp|`Sk8$-8R+IY^6g4?1 zsXJeK1x1;aPhz+wP6xbCZSz5VA@)n#iq@aVe6zi}K7iFhUh%CYiEy&Bw$(BqexKC6 zF6hLT@X{G)dRio4U#uj~Q>O+I4hk#bBj4J14&8l@im6!n@;*G2s_U~z&d*R!u~s5` z5oQYXiTlG@2b^cDJ2bSOOJ)ZGxHnS|v$r<6Dq#ullPzJv0&;g@3CqK+|9HcBxm1D2E)&W$4yuFp#29S*`bU(cj1#Rkt{&#W1nqmy`dx&x^ig{ z@#R1S`R1aPK!f>Q+(omKDV{^=t2k!ta*WYo!O`2vB{N^}zVDPO4xcL?tuNaVKKwNE zO}AO^S{ye2AOnvD=Y|*RItzS=5aX>3_ch_kPq5qb6SEhpX1T7XJ0He&p3zOrd(x(# zaB$G6LXToF^|E_DKci&k9!&y-Z;gs?Vdg8XNPFS$_2I)nZ75!y#=Xn%gDW$qnkB}!_PoUg_2Ic1LdgV^YPe&1{k*}^d+lyY<4?1z?` zGeQ5_wt*5CuWKtus@9!L*Teso;Cgfb8N<$!F{wPq(GI__b(G9#FRcx=6+c798Dn^y#|ny z!iD3Iyr=#ZQE4yDJ?({AaP@4D9X!IoU{lzuQ^n>JWY-EifEUSBTRjI|zk-J8Bd_!9 zF-G3c@;(SeX+bc4h4&y(0UY7JYp=DccUBUIkGLnS&AL0It7bf;uW4_uzLHMLATxEHH1~p4usF=!F%5vZ`ebPrEv0V%X zqD5x<6vG>r?R`)+V5ck=A#cKGmmHt@P%R`=hdFWpJ(N>5+eU#a+|wo*{cSv^b>NrW zw(gH{f=nTnw3l#SCE(FoEzKV!zc^sEwVYnYUF#Dg?;pUFpXQ%4r+k}^4|#9RIFtUd z`E8wQm7`jy5(r+7CwF9KC?DS$fX1A=uJjD(s<|n>5dY)n(hZ`F zUxgxSF6Z0SzeZ;<6CBW{z^%=l^Z04g5mlG$iXu@V;kIv_j z7`Q>g?bnTAJ}$US98u406V8eZs&-mg8dDBMzPo0-C^xVA=*HG_`fKT?qjVcEt*3cqWr*3DeTn)7UQ1IRp z->Q7P3I&yY*8uH+Hfnxi7&GY^>9e}1uW21$5vBm(m$0~)`bsxj zQdhrN#Uh^eZp8^NlhB1+>(5D$TeBy5yKtiXbxA?hr)U|$Y0H_XFWEtMqxA@-6CZ{{ z_~msjtM7Kw_-wze;@O8A2+>nLqn~)9g645$hdr&Z$%R&J9eL9D%N5_`iobsIJW(n8 zoSYFe|P#*BT!;wv52eTFONKsNtggW+t|%u1O% ze^2>*Q4Vr#m;=K#DN^Nb=XMSRfU^t@np%*6UX98CCx+h>fKHBg2aTh7_4K2%8 zqQW+&nl73#?mKi`c;#s%S$YiWY+BP=8(3Ux|9Wjbj@E_WWSn`GQMTRtaRbJ-@Ay5m z#9>sjIQQV6x9tKO(#7WbU)F0k%x2vQ&Ayr40r9B^D6gu#ZQOr0y$*9TKkG@3n%yNu z9Nvbc-mQ<5-zQ~duVy!9$>^A%#5!=npS|Wpv(jo7U@~q2^his*;R*-~5W~7pgz&cd z9Wj45jI|W2(>{zASbh({N=M9>Lk$uz((nAugsRVb)tZ|26o+7OZoJ(%czq19H>;%X zgqj`fkQ>i=DMbfOgzUDl3l}7V9wV58BRifNEhrO$g@)=nzO4YC4l`x1PS1><}M!` zT60~uGctB>z2x!3cUohvH7~&GnNIb8UhQA1lCTV-8q32^DK@}ggXI40$SAqF`)tE; z7tL_EPe=Twx8XtI5ksF^c#)DJiej|x96stNv3OtazVZxKkTBIPPGt%AeWLFQVw**B zzhQeK*W*te<_Am8t(!S;A2XxJS%*ZBc)23PTpeC5RGZ&-l%8<`&~%tLr)q-V>Z?Oa zE{96K9rE=h_q^snu01HU6f7TVZhg_-dAmKu(~dMeF2B~?xSEr<(#V`M-Dl|2P$<@Y z;8uM1c^+cK2gDuKB>4U87_u64{TdcB@%=)XL{N;82_Nt7M$Mnd@x84|`7`gp z<$pT7y(n#TZA3Pw^1Dh~f(V0wN-#&YQ{TI9HojCB27{vOMaXrRyooDMOU@C% zuM+!7WZJv&;ZRICF}Q9jJ>>@z{l`11!+hs;VW{@BEoZ*@OSxV~%3|Iae|zTY8x{+$ zh&T2jMZj_^grLlhtw*=~K9EBGs(UDPB&Vd!90}r%%1rNYm){Dp#;<53W+GO%ZkmE& zv1m7H*ZT0Riwp&UWcWRpni%VUMhm8lGvjy$rG*7~LSr-I(xZP3G$`u1?8w2WxjdSA zgEcBb$1m&Gc*U`q@2um%dse}xb(xu&g5U zjYZ6Jgug8?TO_~GA?mN#-a@>(on%aZxf$h4$bys^%VRZYDx4QyF`ES(Xqg)^&$3R3 zT)8|Lgu~_DZitkpkDy0+ zlFrFGngacWjs5v$b4hmsVE1qAL3th717|N=U7XC!pYAD!;{|jb;@q~=aY$DzXBdPh zt8Q!yUiZ9JMUzvqU8$4_H{+oSlU=z4W2BxQF8}q(SsoDoy^{$6j(w_hEWu~7o0UUA zL)(3#E9_4=g8cbN=tZ6;vAc0@JF4LVKHnoS<#Wlz{vr^~WNNdpfoShgJ8dltNyds% zjmYqp?8vIt-M5H2zQT7YV%~1%e7g61NeS=74k~^<-OO!~%RJEk1mc6ChMaz@47(g; zU*Xg-B3Uis>*zImW1!z>+q<)`uU$#kB4LnZNsP<9$~Z=)+=zTd^_e+U43>hX)#p&u z_;iH>Y>m#mVc|IeW=}{fdkIu``Nf==B-o#)| zz?EQ+L5Zb%`E(cml=J2mkph9RC#6|%C6(3;qjm1R--OG{2$`ql5u_xB-2-dlxY38&d z^F;+gWgv|pE3Uxk*gWyAh#2S6q~;`3#U@uY-1FLfx}P6sA4l3% z4LGvnV3)kte|}y+kCd}AO;a^twn`Y3bIe%({mf|tQ`2DaiWcU9iy%PPG$pa2uAO4! zNR2VZs|>7gq8J$7LnHJmopLLd;NJ55|hB4AD559dO0^jLm8!;?WAtmaT#8hZM$cy-*4 zYM;5`aP3-F)CKCdv1QExP0IcOV!zv5n7lT1SsqQQwe%tT8vxUkHl@IkYu3{=oOa5W z?;;5)i#M+nd2<=lIqccQCfSg#}gmJ8D(vfx{kUzZXo7+8CCyRA5tW zPa}?8ZOMLo)sqNgBEeQR%p$vqfGoAc{Mivi~Vo*FiOrTZJ2b%2SL{o-l|+E@kE#7EF2LRaL0FD#*DK6SroZJx9Mmhh67-7A<=tBB_UeG2 zqy2siP9ePd;0<6A2*bKLL2*{{GCg>XGWT8KYlR<%L>NJ)QTtykR>0WMFkJu}w6Sgb zkE_0VV|vo3lm+pf+2S0)AaHzG*OZpf9RK$Rl8;>OC%2z}wBpPrbr4#*kFTQoty5jEVqpjqo+U-I+E8z&Z(3i_;chJ zIVOTrN;B`-8c_(|oj;Ni>p=E*FB=Mx&lo@R^w-(#dOg>X!+xw0Cig^Dzxn?6-2m+X zUG{~~A(;E0f)$H{c5bh(JOsh_{FIWq`S|wdCBAM~Z&ZPdZ+o?mbEm1heu;dE6Wf9} zUAwo$1h}USGH+eyx4s2CVF9EEPC0+s#Y1|($}7P5l??ENTU)&Kk5^b{H|-t1yeCWZ zPhhHBVgybT)eRPP zA^f6Dbde*fZv1`2z)18}iTS;gM-V&E;K9t8GmrmGz##o(VDQ+lureGDa=nmdsM-JL z#J2P@+0R3d#K$85wA)CTslsKf2|(`3o~+WJ+iXO3W?KNWqMNCmFWUlN6|3EShUM|G zP|8YY;@ZqE4$7jn;1k@FhHRP{0`UOi1GeKeE?)Jt?&=% zruLKPbCgTHrkg_&6^fjHEH8%khieIE)8;~PINLygtXo#N?ngIJZ-o2KIjDaEOc0#% z4-(1~>Ws)x#XM0vrNCco(z9&YIm^z0d_%g%>e$S4&1~2Z#F!8;XsTw7G7YRwh zJ;9S3nP_XD)q9i?+6eLHpz2@Ea04AY3Sx!UO@6LH4F7`z5{W$oVLXOEO(SQ`TWvm- zR3UQ{0yD?YB>boHt%IVMWurA}%u2GCk)P|ejM9#Y-*R;_%>U*ij`M#?${4Gj0Z;&l z400xhoH^~;>^16`?Ea!=+rU~4Y`T^1yXU>JwBS?ZYQe^tE^?jMinE;O2*n-}#_ zQmiT=XGsZca$Ta&NrC3zpVRuMvC^`#vequd%1^yZin;mG{xMYH`0MgFY}My9RU|Bu zYW?i_rV@KEKlac5wlZT^2NOiBeWoZQN)dO>emcHLI%?%QGCC&qdJF~Ycik;mU|0sy zuC+$#);%gX{oC@lT;$xeC*TgcZ%4(uG|3tl+m`~~Bm3Q8pa&h4Dq^p8bnBE9(Tj?Acpn?4Lsw2EVF_!+o=}3_QU3+upvNjF3`g z+X)b086ZG}j?ce?qa?tTcQYT@hKk45g1>^;4I2S7EA1^gSF+7Q+3)QO!dBz~PD8#k z4=4pB$_FmQ4&xOwkn#(UjV?o4qDvgK(H1;)g2`#ov73Zn1-Pi95(HG80Q-B?BC?Vi zT70)Bk{Zk8=zU~iak9rUKe!+bTBC;c!Qw8oH8dX-O(yHF$3fd8e(}0=OQchZsjtex z4iD9ZCVEYk%0m0JWt!eN-1q%4dt?(gn!R$$R`!s$z9Zc9npm;=aJ>Kaj-JEsYKlP7i_Sz-rANOfq-L3e3CDr+(=kMWj)Ky zmi({7N1%U0s-c150eV%;>US?zPA^T#I$L2fSW%a2N~V!YC4We((D$LFBNan7_(mk0 zw@X_3c$BSrmNr-4E`z7dPIHTCnlg%?L(&!7{GRj|zSi-ZOfyZp$8Pwt8r;2FBkQ2_ z{$#>2X}9(FTn|YKQyDt*kQD5qQnN?Kb$5816=1X8^kgG{--MgyVX+6N8o$rM^Q$2~ zJWG6&zW`$GGIbK)Gp*>uFdP%>YoIB$k8#HF7;9+HlCt^3-;=G<0d_22#*kiN#0e0apm(&_qSA#ZS!#ADuKVHt}5{t! z935QQ3ctPj0_&;rY<96Bu1*~XF+<(qnD;CdXW{=kA56m;>_vy zM7AfqW$+%J=NKz(Jw|T&=&$2tbe-~>W0jlir^_7^giJ4F2Oo%Hbbo?iUPMF$p``*~ z``Ta_Cx%volyxziyW)M!0J_d=H0#-L3uhLfQ<+JZg9I0iTQZR%{s12h@zGQZ150DV zRjxKp%21aQJNC60<@H7@2KgG92lfX>)v^wdvHWgm(?@Y0sch9-N-0wg0`?V?zW7gV z49jilxG0u;)0FPn@YFeXLsQy_pgVcO!F$`XF<#lgo2Sbf)&BT8y8xh`Ng3-C(ARe0 z^%3-C#&ozEY3GjHtj&LYAEixSaVkx>RXr?RmxGS{s;8eW_8L=ypV98t~H0>&-V!d=_>&P^5DL0YQMDoDD`jfv~g zqplt0KE7>v%j2FQtA@saL#5q@n8LB~ch6$AS>$}Ymw@;31zzt>IDaDT{DR{s^GL*c zh{CGBvh%FH>&#WAO+{M0@tZvjGaXMYFu#@ShL^N-B7KU^+#c?v?lRn^@cG?2ADr^q z!b+4@92GzMR6(7JJf?N| zQd`@m7OGmiAf7h9Tx`B3aeA=Z_5In%=iz3Rfxn+U2G!Bd;wdc+>6Pyo`3gSyzpTUS zO9&$`(Z*-f8j4BnI*Aejo#ycsnS+&ex&RLrLZFKDlZH@8=yZxC3PdCpid--p=^>;L zmyvk9F5U4PLoL3r)JjEpDn-C!!*s*C59cPQnlG=442`ZWEEDmD>8usY4=2Z!4XppP zibXNh+!>Z*ZFeUk~wJt*@Fu`+(HD2UGNJB1Ke_C7- zEhcA{QRZu05p^^BW-X6nq}2*OyAu8(J8(%ryL{V#9tg$REH|>J*l*6v18=G6ecLN% z)dtkEA>`0hM4uNjQb1v0ISd5Ry;-Zs1!oOeK{_;;-wR5u5rK4*yS_?)sTD1P6lUnS z3rF8n*5LA#E_xgHlF54FW!M*;PgsuD1vJB1k~pEgOMO1utogo#rmu}2q!min$OWC} z&lmkd1p^Fk4ompV`*6|ceUh6Zs6?VHCyWVc&i4sc@|`b?&Wz3jRc$7m_w;ujO!%$3 zGW;(hK=ZB*qa$@N(^Y8}J)J0UkE-XWA%3OmDj&x8h4w*$^jO1ZMnH}1ch?;m2ulpi zw_T?Rri}`i2jYl#?;*d2utVj z>f%SXE{3j~B3IwqqX|bB*wU(6ck{P9I(3 z=2C_oC(V@)a6a7Yhg0_tNm$E-Q>AJ~pKzCkzfCymY#*|NYSoNOZY$54V+t zv~RL^K1^k=c3PTKKxW+;+72A&+48%JMZpcXXLSG`#?_4#EI#>NR-tX3iawO?6WAlF zFSs%02w&_GJMVm%dliMM$&rNx(&m5~QMj$M5V^e?4YII>D7uELLS{MB<==!vjiBqL zguG(k_h>`WGh>`1hoLdUR#B3$@;HpR5jBi&W~}gg^i;{ezSpUazg>aXstn>Qkw|4i zdXwGdV#kb=SS%v!yrB^7-LO^7$tz({);e3Wo~LRv&9A36DSn1k%*SgwUCS^aF`6rp zNsU=!i|ah9lEcPeFmiwsb%ZjSnhj+l)?=LiNZY}gwKJQm)3)E+#{;J(L=mRjtB=6~ z^JP0i08jKB47mO?I-#?{JGhFwK?xKvz;TskEwIix%_h;WXW;jnFsEEV1X4wf=K{lS z#vSXN#F~#q(>f>NDo#52wZJ%PZ{?|9BY#j@0#cE2i~9^7^S@?rn<1>LxH{zfeI}r} zZ|TTDiuulLsT&-okO zwy9g`;3yrRv*@*D^{Va?XRD>IrXzC7}iPx@8ENKqD)?|22o2J2Cmkuueh9PajO z`uca~M1xhAbzliOQFV3^vy@Rzn+jplec0tT{_j$2D>;Xs?c3+OTENk!&6u)1kJws2 z@vA~^#D>5ymO(KG2)L)06rwPgW9@q}7KNwc8~Dv2_%F?8(+VRf^H)+84AmpNFMSBesy~aJ-+xp5}^Z#a?Te8UNjIzKE0(#2_ zc*17i$vh0cB>$N>y>8?m*&m&6w~+gB0Hv$bSGi<8{vMcXH%N1^Wi(Z>x)$PQd zuY~NBXaIIw)Z{O!^i>q9W9gw2HaYy}6p%SdsdQMG7R`U(!Q@!AW*l0+I{blY?On^Sda?yv1c)z@Az?%g4N^&?n7>jkI!g?yjo7M*dvxbd ze#MYpveJVUTUn4lH{~erKk0BEm}lBgMBie>$1TdBxB?=bB(jb9@===)op@pRm+b!E zUMb$^ah3OjZ53 zpsK9|;u~u-?@EM{`LFrmOt##%e_kJ%uPbjH! zTDmfe)u*#uIXwIH_q6cdTB|px1NYQ571^e3iqB0aAxT{ufQ=viSp_>gbfzrQ+$h<)_0#?#`rn{I(wS- z{ksX0N)ka%iwOkm=$N=$`KycCI*m#38)uw}Hj>QurHP15FRyU~mKqV2*JhXKVzn&> zRR2$%{3oc#gBmqmI0ZYYjRf>{aC$SI{7K0CK}6XVwPoFMnGsfz_ z2FlYiRM%O;$f`}_e5DJSUH?_?8~iNj+4}V`YYTQZ!jyx*9zXoYS=&DjNqzyFzy0o? z2Qr|u=a)mRtwW7}e)~VOqn~g7AIT02iqH*gH6o6@j9e*%{MI`L;?@RftMg93>w|;b zzYZuuEx!m|(`>l3_0Uf7^23Z0f7Em&ZkCYduAg}^SivqH+*Z@EQE&n@r#s->1uNo4 zktHzHiHole6>QWXf~G%)t5N0NrKc^1JVR-Ynq+y@Ap3+of(5D^=gzvEIzp zsKtEX^kxwZXd$!`Vchi+&5>)cQNdfldH-=^hxL6kzz-32?~LB43Evpb@D&`n+Jl@E zLC#e(ap*PQc(ys$^;a}ACPQ&eBYPyG4}sYE=eTU(Q2BSi{q#Clhe}chL0cHY0gr@e z0_7z3XGFKH+Kc-ghM1~NKv=J64N6bB)7~-0^&*JqRM?3^n!CF=mhyRm?qR1 zvA!U-y$l8mw?WA*6n%Vs!Q4O6Qxo?1oCke&rF-bhpi7Sj8olz@g14TeSGO`)Wd9|^ z|8M>vpGHKQaDj6K3xGXf=|C3TD(k`li@)~kSJD1}RF8M71r4pn(h_<=#wP-S9#sMw z40NKNeLnd|PYGOGcd9EdiTVO;@A}VC&>1_iKj_hB&De2HzlN?eFd zSK(hj0KVhnNR)ZPhspa)tWAnby&36fr=_XBnhHV657i@FW5*k-ZX@RZT7;x2l&#t^ z8!Q2*62{4-{*Fs$L^cIyAcJEWAb$e&KE?&J%d`If!Zrk}8Y6ZqRSDn(+KBHaK$OASP{Qerq;kGWq6 zR9dRf2|F`dj6>(|E*tT(yLWKSqWqT z&gUQq+6F8ff-HXjf6)h&k}PR7-B^$ugfCrrL+|IUZy(YpZjTpEFd;1FOjQZHt=1r` zvTb9|#SlkXl@mveqGpFBvZjrVRb!D4AtoV+DYsN%cJO0Z7=e^XqvER&gCn+qH@t@0 z>WS3vcuJY1%NWQw0E^F0rBh!!4wMN zp@kXyuB3`l(rk@HM3+gkzd42k#h;$%!9o>I#EJ-L=T}vrp0VoSjIbtp*Xc3wGu9v_ z?xEd=wM)f5VjU>+?)gCPVz#yAJw!iy2d~fCkKz!Nk5;}CLvV4YOmQSs%A0)&8>&j3 zu9*BD+v?{%ezx+3Q8u*})8K;U^Eo#98_5CJiXny5xF>CDrxl!xzlhJaiqK|d=G4^? z(#;WOfnjaaq~HUbd&{fhJBWcc57s)Ilha;$Jqeh{Ewm;zBW3#aDAXrCVqmS53@@_xeiKLFZ5_4ASvxPW0b9c~bpUC?^&(1aeRg9VhT5(CugPMHPC3kppQ-7w**4G-TdNCd zE3p|OB`JI}fnTQW)($xAs^-k@Sv#Xz8Ra4OT~4s*gKqM89mdG5#mXCUxSn8-RpdmG z0PU7r3UE&L+As_Vif84}b8AKCx1aXB7Y%ULOFS;zkS?tKJIox%EWZRgj+|VVBUY8? zTkV=vyiT0@6V{~eq$yTOj&i4+H*riDzSle6ujTMmVs8oe*deanW!r<(-+WY{M5Cg{ zt|S@BlJyWVI4c*H5FiWVS4lnBh{Zgf3BHZY;aT%4ECW(r#BXLUl?=Ib=sy$HL3|m; z>g@aZ(6MyZQ9jwj=W3a;KO#PvNEjwqJa5|_aP&`@fcBTlu)%z-8(Fv>(Ff+P&-cAB z4^|2a2uigkd)3JYM*is%SDI*GX_x(dq0lYnPah%d>+BUXVZ$-rTi#H1P7m2D?$%!e zZiiGI`?RA zwGt9XvJ1fGm419fNI!v;*URw!?yx?BO>M@g^8Vo)dpTvsG|xyup{0y*@}jnFS3AAM zfW#y&Xinc=%hca@*GT5J%ftGI3|(9ns9gY-*LM`HR;1(;!bf_SKZ-3W?Xmh?|6aIR z{@AG$9+Sj;T^{1Xy+DHqyeT4v8cUd-S{?E}R2J=lz07^`(dgNr`{>fg;iFPRop)W* z?5d*rSq9WPcp)OFRMn1)O9T}D63aAC=Dq0pgY+l_L0{~y=ll9ZA?^Hr$N*F_L9q>>s($$GwHyT} zt-UI~pTB3MDHz(7h&B#)UDsodwknZ*!Yvu{MN+pS{_u6_x@T6+c@@#`JPz<@??;7q z{ek_$WbCp_hG}NmEmrtWcAPS0ar!L{jKrBeA1rPPX=!(wz@aQL&`FEE?G0&xr3Js? z<1(6FzB}swE;sUHEgcc)R|4>j((cVAfsO8={b=Iw9t-Gc_2aY95h-mZ zM_C*d-8&Lh+dK4)G`TnLnDWVE*9X!9>MrJAo0Hc(KE~SI&mWtvlsjwp4$N(qc&0uv zM++`O;+A!3^m^$SpHmLl!~D^N9o2Hc#l5OK{G=9J-|}2J*o4Cp0^J-;=R#|^YLv_7 znPj1{o(L8T=nvprzgi|K#T#Slxgq)uGz&d+T&m(f_Qn4)g9V6fAP9AyeisMC!~B2x z14?rk+|fWKOxgL4jRq-IF8PVTLSbl^7uy`cl@t=q42{TuBv=0WKQARLJrkqOyXkX6 z8X7=jIO}JvuG!24>x$&8`Xdv!&20IK47VfLt>2u2n8ewDyftru_E-9i7IG`#+KV+@ zl&_w2k&V_FT4oF}|I-fqw1=tcX_{Nau;Q%`P{k>`ZK&i!K;r|3JwRzeukObHPq5ws zu(qy?F$`ym*0%!J7EPdE^MEJUBZC6M7YJAmUnR_fpY^m>u#;iF8Tv-LTmC3?GatB; z&QNoYgO9^n^`3rj8N(PC-bzmd_CHR5uaz&@uHRd6Ts0BgYms_0V8zC0Fq%rN#e}UU zR7nO4DL&zi&|OUN<`;D(MJ%##uF@S4#V^Ec5(0 zIu2YGVmn!3kjNQwD=-WX+~;6&ngqkyQ{T@VSM8RVzVktvQ(EaEQ!XDJMZ-EYNSlbA zybkd1Pn#)%+ee&kh@ZkI!)S9;%1_{yF=Xx^H=*>S{%J4b32b+ZFA6mj3_IYe-!v8fVTZbA&Ve;Y<5IaQ5^q?$sU9n`TdrC!+C@CXM}N+X5w| zf<2F**{}pfcYd|baZ9g}^P}T+CJ1coS?%yK;QsWoGSwCzO2fn;^$T>hvU!}==G^m9 zCOI`-0_H3IMqQ8MQp~##6??MgglvIyrkg^|1eQU_PPkA4{&ss{x=$N{4&TE|N_@6HD0tUjc6o7W&i$%#R|KK7?xlky6C*R^ZnH3OiJtP5wWA@?Qd z&Z)dRqE7g|ncds~dwv7B<$Uja-8IL0f_MDMmriweW#WOhbJy>i*m9 z*JLw$mlop3+xh$l+ZUwpizdC=vDSRF`7_x17}4yPA%T(CWW^t4FB_%|&*b^ZC@-%d zxUrY8N>%}ymi?FQAP3V#Q`J^CvH^Knz3NfiOPmSX{DCx;zg^_BofXy)G620@6?dtS zg0@V*hvk<_o1HKdr$^9EA(bazR#MYEq*bF#WKFmvi-nut${I@6NPo0$X^l&k%}ont zs|F+ZydLVb?hJE{uPQa7-%k^|@@QC`5EVY?6O2^F?r1f!YhtY4!-*Qxgqm*Zn;^!M zK>=$}LldL;*PYk=L;2T0jHPmMB}ra+CSJrL6N3OZ>WpgWUrZM<^FE7_8g&@H2{0f9 zvUYrI))#+(ciw4E|8u2Lh?n+#Dmhve_e3r?y7DHNI1x%G;Terd*E9Aq=Y&1E{J#;N+>xDrRsUs zaKSEWRD#OB)*%kx89!NrQTc~M$#^Nph9zobEYQR&9!jIYcGZD;8B_3)wQC$Ru{L3Lzv&v3~( z)r73NKB?O#?AScX6t$Bz?$?tQJVC15k@NM^19aWz*#@-s{&A_`(*ExWie7IdFkuSO zQg8tcmr}F|!!`p%k`k8KZwZZF0&}ltatw3304~C@sVun`lXq_p>Xm!`Ryug!wThxV z8#=_;D1fVUXbU}<@TBSJ&`V6!mwrc$-TeUZ1&ealo~#39ji!+YToUXbx0ivU9Ju!o zFKK`3_Ua^I^+AF|)tMf-Xb%jSJOoN7XDZxwRilC7*etF|>$&MN4~(J;1{ba9Ub3r^ zdBBsn7iFAN-Vyv;wUdi;VEl@1IaX9;XSL;}L2!)?^z{gMgC-9u{90lK+726s=GWp! zg?EH~njqc#>lGl}c`JS6lCb@yI#9wGQkoqr6@J$?KxMCP@u`w->!(7&r7I`DlD^-) z3RRH_nK<<-mIDw3u540(gQ|zAqgr94KuJt|ET;`~n~-mlOx5RO`;jfu_55be^+#>8 zr8FB2G`h=rJCx=x6EI9BCW;A%q3PaINfNiZHh+S9zvt`%nV1v0W6o`~!+s~Yq)wK> zqu+?haZjgg_=1k2{#18?HBg0Fo!{a_cPK9p__f<5fe-|gA;)>)OqTFSq>+Qmmr-X` zlHt?0%kkge+CfdJw_e5%^tf4vY;{#R0URqz3Ds}W#UX%7_Z5qb~c|}P>D!4 zOV>_(_(Xaq?IFWi^c(s`XA=d@N**&7?S-XS5oj8exlW!@G&-qR1bUha;nR2w2Eh*H zo&Yb6AOLL9H|B;eg25Ys7J39v6itfL> zz{j~4ekNAjd=glTTl=wOqV1c<Fth7kcy%{EJ}+(ji^@ zOh&SSTg@Gu^O5~2BwD(*&oRZbpvGsenlPWjkZL%0#~7yaHeTUz2eXpdPO;!a964_1 z)u{U(c4<31LxF$UDI{JiS^et<3;R)n( zbniH}_8YJAnf2TD#*UfiW+XmvuWaLv>9iK!X^}K{4=g6dKG?eEL9NNfm|@eg_Vx^( zbEX2f2+G==A`7IW#ai4<#*(X=mDHqU*{6tx=xjm~Iwz4&?{IlX37jwqR0wUpVxJut zeO$U$K4y?$PW)nj>W_XSqe{~3M0bJ6fqA*+%AzT&nwWY*5V7pl>_-914+2Ia4SswD z1)5z*f-QD^mR&KE5L77)9O1mHOJHu;i8GLlb~orZ)96L8SH4c|#NH4|>Q|RA)QT7? z`TT~_QQ}+2!k%EuTD-~4J#3m3y!Sj7tJK*hS79}jR#^$IIEyVeB!0dip`OZumYt9Q zf^o}NyH}j9%7bg`)KASmvp~BF(fQjk^t)2mKrhXYZ4CCN;Jt=Qp_t*u$`r%i;ci4s z6&M=r_f)@f)JyGVUkGN!g+Ria+$l(w?kEm9!)>%v+DhyI?E3Yp*ZHcy>a(}y7R^I-7QpY%pV{-kk9Wi&Yj?Ja16+5TO^ptTj zLJRTq@gy6pLSnuG>J+`N_tMMZ1w^0;zpz3JJpM$0Te%d&*~c-p&y){-`=*~)Ts{?! z_TeUaH~Q*4Ed|%UHR=$Y+-q5azdS?>PuyseH~Kk%d_C00sR0>MYNiS-?CW|&G!MgQ-Gj9FX|8499+kuE3#+qFhh_}Kg?86t7zh9qP zunIzoLXZJF3%OvFi|R`Y90TDOQI??JuS<&!q~gA>Klyc%b{h_bGyk4aDV zm6uSlh24+M%wuLQDhmhQ7_MD@GeEwDl@8aaq>2O_ZP9mPg%e|AKsXf1c0#`YxTj5^m}=>VM8fB)qX7!OnkkQOGwy z3UQg>*9dbRV!fM+gQ0KiU;@5~~niSd}gflB5__ zuIZ!orqVc5$;WiXKz~0rEixNnGYf8_tLT1vr};=|c7BgT`APcSLNOm5vftQ$5V)By zdaroLs=3Q?5zc!Ng;Vz-Nc|jS_S9R+G|pWsTN;(@p!?ye3*L1uMrY*p1^*c9lKOU- zxes|_x$$VecZhub1bXaiHweo>@jka=s({WO-2s;J|v}9o6-~0pHY9`}PMtU^t*Zbu9ZrG5bMjt#_uv zCS$L=v;zfmHI2N~0~hJ{-!)A8l(9(r^PjAdUJ<{+;hCQ;-}8_n0$~2MCrskD%SVQF zMHff!2=uhYGR0mL>cPquGuTpN|AfVQTHD!D{J9|1(Swj!Z8Y1JMyyqnkW*2wK>q$u z#nH3QcQA}I;7W}^V*vh@BcjK`M)eMLxUE$y{ZoKq*xbehJhxGvTGu? z9_`f{b7pbDSj%IT6q@K}=aN*9Sy9m=&-7i1_XsVjLCq>*BrqdyA@f+pu;Yvk#O!;} z*5oEX1C6qBb_=Ws`MM3PXi+!`BAA&(jK?bAHg&of_f2PB=hjJt5{~3 zB6*$6^8e=$)Iv>562!j;)BG1;W*9%Ub(5y?Zn5~-((>74yt z(A6Sj1_PIVUbU$Y-$&Aj6o>u3dfinssd3Gv4!7;TP*I0YvCFiq{3(BV?8_eoLW7#y z&m7pN7;Zpga!R$dtAvqtC2GI?l-#x(5dFyY7EaAu-2;!s1(LyXZwRdqxTxaI_d}M5 zY@~@}%Q6fCfhlcD^xls5OeoRHTq2m?FdwsFe$urlHuJJ2YsTZoLr(}!TP0>D?yFAKY9N^X;1%j1!MyW-6bL|1`HD0_Or=crVH&K=5S> zJcVDH)!69^Un#UiZhLR<%Aj#ksjiF^~C$ zH+VP-uv=dA{f3&`(xcFGUX^Fgrhw`#yL)$WYkZ{jJBcyZ@kG-%0ZE{dbD5@ZL{!+x zO}&3u#RC2Eu{D^x?2}`|*fQr}{M0iKcS_TkPUD(-_rwDb?%2p_8_(V2kz-OyU*DQ@ z!QV4#2z~uGMENzPdl-^)8G+)K`C{DZia|#IFS+l1&L3|a_n#tl`QDW4vmCynMi{W_ z9r2~-g|k5CQd20%-ai46=iKQq6yj3m2`mJ?IRm;r6*p}!+(2i(F%i&8Qr89wh@Z@H zihqCOqWjxkRqUp{9qz#^^p#2IK$`y&*SnmUOXY`?{-!=S+->I<(yTt1uQf&P9&h&z}&3-@hNU+&KFO z)oll>f~<1HQ|yLjOVb?^3ajU!Q}o1yPv&qaPp&Ojf}!ORK1#YHb*uCA>4d@Z%3kk~ zy{-=sv@PnKcmyVYxFNyBFoN{i@6d6xh8DkL;Ngp!?$|!~V15x{F6+cB$t6_l)Ow%u zX;1674-M+$`mtUW0=?)NJc%IVMEdtG2d30uOvY@aLO)VC7G=0J-5{BZYlz&?c2HTz zaymd!*j75wFVXgM2*vzVvN5+KHo(e5vU!@>&5Mgx?^i@&JkJ(eu`9i)6_@CL{GYpw z;u?(r9?^BGdIrink>=CmJpCGW4JPH|HP4&)qNE-kF|1Y#@4$xdHOXQ8vzIJv3kdY^ z{i<8Wi%QnQTjZjP^pML2xN71c4qfv+jThMuTkz`X5B@1cW}SJ&-NubM-a%@_Q{LEFQxkG^fvw-`$UKh6AaUG!I*k!~6*bg| z$`Z=7<{q775;eA`7oR~|cM>?TLIP>BUWk1rBXy}gFR1vE;N$D(C+FI?c+;C(DtzCS z<#b!Nu7EouPcZ`Bmx(q%5MqZPhMfGCoUV=vz2h`ONrYo`Dn-up^s(ICLR`O0fa!-d zE${Vf8YqjNQJOx!6fY!tSXl2+G#JTuM zu};7*Qmd5ZduEj!v7lpZTr7O|;D|ZyR6F_0X}8((i7Wil-u;4|Im`m`pMfIkOmha$ zVvdX5m5V)dQC`HaI}a@$CG%`2Zru$G0`~x|zwF-oaRTRWzn1RVjDhIxQi)NSPMH?L zJB?h6+D?S$Z2Y$*D4@6`nD%`n0R zL=$lfonS>fLJ$4{KbOIZ5gUi*qIGSKEyEoi14#EVLJS#g{uA>KKr0oMWIOXXTrK(={nl?GI;{Kv}#mC zn_$UZ@jttz6!WvG;)5Mc_0k8SJjC$ebrjP*^ib6Hh-xcL$*V1uAHG*_%L(stLNAWI zcAcpGJc7u!`83R&W!~Ud{TjKct=bUeXnbm*4nTIDI`mMwON(~mBb9(nDlt~$mu>*7 zGemSx2I{WEju%sUym)7~qS!Q;J@4FGsx71QyDNNM*P>r-1v>Vr%523WbWh?b*PKUJ z!iE)H1rY{^px?=OefpORwRD1>a2($!PZVBoNwq^ENWeMai5g{!S7z+`Apibbj?#}w zF@N`2Z(LrX^v5GJoW_0I$sCW;`d-oKPT7+W;B*(wREOU2eZ_g@IsPc_ZRA-U&DG42 zDd(MI-CPgQNz4kfibtQGSFObxwg!HJBXc5<2{_kn#)p98+lbhZ)gd^_MQ0kte!h-5GY2SE&R zAMHxu#djI<261vwjC80WVca$Zq5Ey@*WCQN4mV#i(@_q?l1R2Gl<0>F%oW<#NNc`q z)Dy2M^X9~n^ZT15KH2$Or+Ukb6`Ai|qLH6=H&f08T-)&-vz6?WhHtkM^g;Wu>2O=o zQua=U|B>cmJnCN>4h4kIBlgFE6Vv6+8^~q>yWUhbXUrICLw3C)*Urh}rr*B!;s^7J3QtL*vCyd5sdkikh2I zG`WKqaM%L9qjTMfAM?T&zuI7^p2P#vEb01FOyN?RVjspItKv&Jwl*1FPf>Zh?XV!( zUT0IFC;2R)y+jeMvQCA{?)S>ZHU_y6PTQF6UEiYmBU)LvhW!0~k-yjA|Ct96mv#BN Z;isS3*FJpB&;bbsfuBGgXB`c=^&hh|^KJkD literal 0 HcmV?d00001 diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/recursive_forecast_iter2.png b/how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/recursive_forecast_iter2.png new file mode 100644 index 0000000000000000000000000000000000000000..180f59d4fbab48659443fdb3a57752e9dde72af8 GIT binary patch literal 30353 zcmeGEcT`kK^fn3~f{22&m_XtvDgu&3a#9hIBubWys0fmg9J-NF6a-YFf}oNlNRTX{ zTS=0YoV&?Epn+~;r(2CWA&lBnw&gpwwERbMdZh8dpZ;U&|zTkDRuYJMMlNBAM(VgGs zu=?Q3qW%NoQK#^OS_f)QWXincey>>(beimYcW@)4M(5+(zKdtBKDhbjo{id(C~h*N z`fb%41?N6IJw$twHCkD?DD`y$vQ=ep@A@FA++Q)teX(kK$S>J1iICWvt)!~B?uVTA zBl}}J+kM>Is@5O!f|Z#MEfTpcYdB z#>~JTOgDt$+J%C|oYVeD-MSWRLG zzq&PdqlJsLZW)f}&=>G!GDOnE^U<(FQ;yg8zEtxnxVL>HV=F`7y6aRCh1k%pnEv44 z%gr`~gX^_(Zdz4im8NedC7h8WCMP7aT@CN`J=G6xfLfjc!^D;3u+I3Q{f|XjW_gju zroG@^=vU~|Zy7KpMozqbGnv{*fAzWKdrsoH6|~GKEnvlB&dnS_ZjcfdZE^6h8yc1* zOKI(m=7>{~gi2%RmfkzrmX7UB<4|C*ilDcG;!AIapY4VcC#*Ijj2wM1ZHixI+-Y6* zyFS$2!$bF7HG}Ujz8ZjtHPcCv(hSoO*KkJ-!HGb~RxXzatp4`E{Ht;~Aw8tc)XRf8 zCh5b(JmU6#PABJ;fA#M2W^{kd* z4RlU|b8PNA#@KQl@nihmR@dwgmss_PK`_n?-wZFu9_|t>jy3!8Kcr+&x3uY8Jr$9s zBs1gMm%lfj@3x~V#vw4pE%RpIy=yg3Lp-uA4j)3$oZN??u^Ne$q@BDged@-80vhky zvHg}bZNZ}J2Qfn;5Z#q+pA)`A|Kc=cggu&@wGfq8oz8Ix(ZDhy21N$ITrA8WhF3pj zdgdl{dByfx8tWKcY?Ruf-`J;8f7yo5*OQB4qc4S7UO#gR;!oS5*GzSb{;7rKpq4t# zvVu8lS*g;Aa4wR-m-6GR2pV6_A1XQ%09_V11HXDD)9rDKda?0zlCO^$g8Qj*$etwR z)3@xYdUf}@T|?OtH9Z>pr7tF5_+=`16t|x}_NYkXk)^ygwCrucOUo3!<^3r>!$L|{MAOf@D8qhjP*73 z>I*5-Zg-*F;q}QwzsmdQmE`9E2$pT!`rq<%(NgVx9p*qU3B$xbI8Ky<>*=^5gmF?QMB z;(et;D;8cmiN+iiLq%VBr6fUsxc4Cri&~QRh|tn$ssHu*a%dbpI1B}K?hV>%nWwvZ z3A**T(SO~m)Nr>~uZK_+w7^uF`2Go(l>$L->jackdL6#QJi z$niDHZM3YqtkD><(8M3og?Nbcr~|(2L2efn0ag|J(|F4=B$xBFDWRH1|Wu`Ion$zBA_t?R0)uPU5)svI{k6_F*> zSHYCIbpo&nkr({Lhw3%90j3~0?~D)M&TTZ0KSXbZcy4@zb!IbI)PD;NKklrR8!ebp z18n7*uVB^-RXzLGT~bD=V_mivE@}^6Avjo+lszr@3`_PSC-#!iv(^p|Am+2~v|_>l zr6mf^AH7l?@#5LUB6d+zm=L(s0{ry^txGk2>nZxfX=T%PH=jofI z_p!O(lA7-yN(y#$gxZWEvyEKC4ogNFn${Ui*QGqDbE9ELeEq2w@VT$AUHb~Vg=#wK zF}gXz1_FL;=x?S2g5aqKzdXgo!Cyg}IB2^M(ZJAauixejT?S!@V;{oq9K;a+C}wX^ zmb3(0IDnOi7ZT+=K6rAzAFDT8PhNcd^p5ir09fqWKPPz!O!DCLP#Jb-yIF~kpP5-} zqm<&{gx)K)otUw-adzPzw(@ROAUkN^Xub!KjPb8+Gq;j zB!q?@q$y*U+y;|+M)evf@c_CuL2wH;_&~p0Wj6hrg7@+V*9DA_-pJ?gk>rQKF4JoW ztk3Bzwm9c0u6H#f-IlpGSL}S(787{noQJ!CSsGW3XT$0Xt?xo)5NKFluta(S{QH{p z{!ZY2XM5zbD8;GI#J-{3Ra%;b(|<>3xYiFqFZu(bYx(mp=iQ#~5!%(~{X1u#Kw3<+ zVkoVLKiScDxA_Y+{@?j(ZPuf+k&3urZ6nf@ZFOK9X8FC(SbqZxml=nGu}e1Kfrp;{ zShyBfP~ZkJNC%CtF17Srkr#oJgkcu)x8He_?@#A#3@U35>zd=<2?zJw6ibGPRccr* z`ITHD^akEN687Rye{CTQh5u=tV~o+XOR{hr_Go_uw98 z?*Cz=kNB?8V-5tA&TqIh{VDHduIxR=Z@yd#UyEqAgg3sMeis2%HfbpkAT6CAwt6Ds ztSm%XP#M%Md#9bms?lDt_)dDrFAb`s)&A`{<>h&svm^ux!x^(mi$@UvJX2;j9UH zIDGK_AX#iHRU>P+>|4wLx(L87D_3i360fhC( z!}HG*yw_;rlm3>n8+5rKUkV+uxP%(1&|3=A+DP^y+ojg(hDPYLH`x`+SQQ;meWtLk z>3o@@JOt-T_P0rO^TZXowI{+4LeBU@g?>@L){L)6x?zP+nh z5`B4m1k+LcrQ`V6vjK<0;-}NTuwu`!6nHyY2urbOeob35&X3nuS-AM1`S!1%Bj{4WQ4vr*4QD% zR(EKsZjb@O{*3dg0dnm`^RJdf% zh*9DhooBLXY{;b2#>tt~t|aNx51KrB7Z+yx9~u2N(o+6olRcRCu)!X}m))wJ6miC9)^)X954}fAH{gC40 zh_muM8!No#xQbqXLi^6hYg*38`3X%Y#XOE+HGo;o8b7r4K=swbY5|$FeF!%578^+h zSV90a^;4ButZ&%^Qs^|fZcU^jn{7v1au4L-l--{*Y)qTYWpFZ zHwS`r8LB6Ga9QdCNjJ~MoWMusZVq$9{5P*uK+Ipb0n4Lurnli|)7Vczq8}7uWVap+ zfB}?#BJKtHCUIlP;%@g0=fxH<*`J}k{^F|x!PsG3Y4FZd>Gt?NHef=Nd_Ro#{}s`# z<&_X&g@<~?P=k9vYN-cq(^^j zJ?%B*yXlerjQyC}moD2rjyuvaNrHR)P6o1l@^Ae^#gJndy7)8b7O*}vaG0Ox)$kRy zVAPH|J@;D6m@%A{4|czg4gPRV*E78N-Rx+-mMMO`$ltrA3^LW%28O9Rz2H#xQQfX@ zY#@Z?^o_LTy;f&_7$!Sska1moTcD(U2_x!$#lw)v>6|WQOSN!onB<{Fkt7}HG7NW% znH%Jk=|7RyhHr0nUtfQ+Ak=F<`_3^EpAgq-gc!G$DH{3ovc8!c5thxVZ~zf>?O)S2 zO1X6dITSA%6}xidSdF!KahbPCDm%g^(H;^xFnxAVN&RV}UPf0*uP*4s<*N&>Bc;tXm``}YO}{Y&=w4 z%4ABo@>PvYJ5VHXe@<$p_;G97U(} zq06?p1^pTVlCGfWB2tL<*+YIqw>@$#jW?EHVX!r*m&31wn>yF4`UfPGmM#h?M2szb z$?LvPqn?;IGyWN{hy7?;(tN9BAAA8WLo}M+prSv;NIB+Pc|{)YU6Z}E`MUO7O{{Ob za<{JdtnGvw#m6b^2yQql3P`J1w;uv_iu(iWmP(!lzP(}}B1X5 zd*;OoY_&~Zqp+=S0zxej0#aWm*DzALz^P2B- zy^T-i;+Nwuhc7!=50*H76;@Y-@hEsV31l$yAWu3sx;6hbcaYH=s6d7q+Lp4kF#`s? zg6(7sw(0ak$bT*~gN28;#C5!$>w1dHv3ADXWq10nlt?At_9Vr+HTEN^uDbTEM>Z|V z&G6@P8OiQFtzieGQ~D$xwaF^9t0mq0X!9x#_*uLX#JfS}j--T0b8l^A!>YFKH}Lwb zO^GV_)zk$~ANmu@iGES-MlV>Lt)!ns`iPFXd1Vi(JGLw|;plCFnW;oGPQ;xQk4sGH zZOOz5e)pD$Xj8uxkT{zI8e`oA5$D$^iIlZker08!QEX~*0As9fmU9f>l}z*1*DLcd za_*^MR#&g3vL~0r&PfOIxbBdt&rE_{nC{}qj$m1KpR?T4LM!&=Ah4D6U8s-|I^xsb zbfjEl@_P0xk`)0Pf`5ChtO^Wqhp$ck?{j?;WqWDU-2qY>T7Ht~t+~K@J$wAmGbUym z;R$bXe-y-CdHnI`=(|xNNM?b_5jxlik8&!M$Qc03*Ud8e=Tsl%L2MP_Wwd5{a$PCz z_?)%IYjVk#7*S`9wlQlzO&M%;-iTuJ;#)9m$j~0`*M^kaAj!k=pKv+>WXH*jXnVb% zsthDvyT;NDaRn9j=@Kl^*2=KhOioz=a~GC0C0S=S-rXn^k!phmP~h^RnGACCwcsy= zFQsFOD5oGpp?7Nmm|D2^ElbH6rGUnIJcqY6T7}T2nAw-BKe%nq_TRAWa%7JN(l{UG zTbvnTic*d5I%=3i+9-U_l_f5@s3j7x`|jP7JBEoz+KXvq;E`8v^x|EHKX^ThH^FWZO8}?S3c;bFA2d{qE*&A+pN2rullUPCaYWXk|NU{eyRJgQg%EGS4+q(F=&fD}0VvlxjEExj z!sRHNj|SHHkxJf9=NM6nnRC?Hn7UgQ=`IMVz0xJ@&U1F!;I-I_B!7GO5?3i`YG%|n z3JAQA&u)guAPZ3WvfhHYSQe`Z{Rv1HOocRzNAv(qneys>wxBA4QEL&^?DL3QBuiq3 zfKT9-bfA<7g**VKL~3%oEsWrC%DBkLp$!TN>t+$I+3@rYh4iQ)-8OQ|nVgNSw9yvt zJha~<=jdh}1y8|gQs$B2869I3Jd)(Bw2CA;M2vm)2@-4vW?=2KasUw3ZD@n_LwkYug`rEI>bei+sj4;88c!U6nS^0mH>ieEzVx+dbz^d~!+x)I$IF0#DH{ zMYcmpd$3W;vXI*lnB|Uvfm+jNv@VOu zfStEo_#3&@p%I{ba&0nc@ip=+fPmmg#l4n~@I%tM4`X8}0hef9(1Ieg6~ydYrc|F} zdjk+;bGM5CWF&f`g0`h)@-G3*(k;EtqYS%Sk~dMorE!6m+F1@6XVPP;yQ3(*AK)9K z&#+{J!gpLj8VgKU-_g4K**A7H4N`C@x?USDb_$Y6`#WMptwYMCxF))FwC{}QmC@Y* zlnfnOPeZ)qlXj)L+4|pSVzL9X0ZkvUjN0hR)`i*>Q*@rGwRx#!ysFsrP~J}?b0~7{ z3YYznZM@%&dzN9f#J(Lb!3r9q*vOa3gYIP!=yiVQ$I0sfm^c;^=Fsip6?m4@y(S`TG?E!Yraw&K(MeALb-I__^4cr*T*I+2SD`)=Pf7yG! zpuu7}^y75waWx*Fi3o0au!#{G-UuR>9Q(4Ga#MRfnq;hcx4Gl9j?(6h9BP;;z^JmB zpYh&u2_X^IGwKxvGBc81RMg$7t;u&BEHM@naB5-xTgC!F#$hO4;^f94u1}USSBF$- zKyK8CDl9*jhJDDj0@|{Vr7_NA+xA_1K^kx4$$nm<`SZ&xDB1fLpS2u5yQ} zexFX0fZWQ&MhKP1vh)1{ z{}5TBH2lc%k^7g*`KYry0WzM!XMtRIVIhNn$K(tY9zeJ+YA+P}BovrD`IW?^O9Vg> z$vDPkF;ZQmmMO7Q`6x79#;J4stE?~&U6M!7wYs(xIu}>3TPJlsV#L2_QekyJA}pC! z@wSN;ZGI>y;(>s<9n4rt)1X8)pRH%%bxd{k-M{jm(=Q*;$Q1uscFENKRQ-67cWNhl zIbMHG;Tm*Vc_NmTMrQr6LQ>}6-eCYKMX20YjJfbZvBb^ylPAQ*GGUY^sVYztDwCev22rHD& zJX>oiHRwO_6Vrk4ZIai|DZ+y{rUwh+U`6nerb2R|YX0NXJDp@pEBN_##q>FG9gj#)w>ZuV*#_Mv}24lJmG&zvJm5A1ih_REkIfZ{_!dR&dGIn0<*>dfGiUyaYS3N(oQ{Z&~qhcFk7 zv4CX2ucB$u{9Cr_kP==Ig|6D;eY>WnrZNSmOzBi6b&3YeYKm5S^YErB+=K#GKA{`oet~aBC}E-s zRbF50>h}M_!A}!j_F_ckfb}jNl_lXjBW1=OrbNiieO2Sto?Y*=i42E@GP-|80oKB% zykl#_)|q^M>qzdFlKUp11N}OH*Jt5X=TVtaY2~ET`MMg~dZ5JBJnv-@jGUqJe6);XZgXrOut0Xg`>Ow;v0t+_k_dC~%>kGe+0`#kTVq2BhI*Y?17Z zT?#)^9B*p{^2WIGs-AMpy~SZK^h=&x^9m_9Y>bh`^BNb0V*gwF5HKbDJL*(M)_EvH zZ9?PbA3;v?7fEgj42dB>gpm{J0**tXkGT+{)f;0(oI3wYeSiDoJ2A%qC&pVkiy#ks ze;uJS+q#n#t?Q_{ooBIt&3t;y{in`BN+9=$l$(2tSJ_vplD7o;V+dXfUhAu~A-1KI z=>Xo1G-@8DVic^r(JabcU4vUOUPNV|wa)f5+itfwh?x-;mWr=N z6VuVREuUj)c$P)b8~ZRHgX@k^v#>m>|Jrx=CbSWZlSp{?*=xl^$00h}rF*it775Fh zXK|@zoXI<0nygHs_i##GNS>4!geqUrTcvg3w+;H;;`RqANaRMi0%6vcEoj<|R_qTb zj{(>T>w~xiD3KARkvU5Z6cr}J9y2T0oBY%LI?5G{3e%uaPUC%b*mKY^IC<*!7(^Tg z8xlgvwC|$lOt+0YyNn?Y4nGb3_3%drbTugrnB8kHh}Uy{*h^GcxN56APYEb)+J)7a zx=;Go)W$f>$fQOnu}Kx<6T2;mi3glc52Yd7IxM|KBbJ8BqSh^O1td$7zN4aF&Fl8} z76<;$F%4ZwYrOC=FEV~e-)C*y+Fo>3zuAboI?-p0AJoYlsf;M5ErBmg8o#7{P4ix$ z*aseYOjJa1N;#YiSjK9Qd8~jFrez(cT`dPTvMu3HC)0=OcB2SNL}e`LOZE<{wCJXP z{5v}<0_PWOWCd9SY%l5`|M^*X3BfUtI3dG2jWEFt*S4$Ot^seE^h@AG)J zk)Lg?E5?I2m7;Yy`ZM*uxh8*#hJ7!BO^f`;%rjXtvNrvHyqg3Im{Hlh^W>9j1GjVt z9z@+o+j$!(>BseB^XaY5LKaq~K!KPHA6CTvZJ_y{|6A`uA)v}jfS6Rf^O7a z`7_3ITFxmb3HI+rgU#mO#SFGSzFX$6N<8YygxC1181k1!9>6}D9Qj@2dl5#A|Nd6x zC3}DuYkf9)_<+r3OQo5j#Q%^+9>bm9TWD)(N(?E8)Tzu+#X%8BT@Sh)8~IP#K#4Iu!OXKoD+kb zDLo@-%7$Zx!-vgPY_$OSLDk|*>9i0^gPNBGoUk&oVg9?N*3Ur1Nb*-@lf`CaBav>Y z6bqkQSK4aru*@5fTL?y%Ew@z#*TqKtW}(Nv^}Hjn9i&X5i3zO<(lFmsg|a&_8SA_X zo7~Fe6pyC#1RZoGPBHnzo#r^tv@J|Bfc`0_Pgh|I@8S7N?^%Cf>t6;?^aX@i@I`jN zDBPO!0x5jmNeA9XRoVGy#;(5}W3sWco9)gETwQM9wIt=HEDV++=M3`X0J83kj0=Z- z)c9S?pRqK#)xEvzQ2X!eU{%e!lX zBukqPqv;Y=uym30l(CZG*$_*=^_Ke?=jpzLZ+huz1{%5CjsFjK9A?1e`Gne_*-TitrbK$*E*%hrTCCA4VT=j)}||fHY3{3orST*HL`nG(o4% z4>1g)w@KMQpgISzsF0Bj+0EHx^6Vuf6(dq2MX+&Wxd_-tLCSH>NtOI)T_&S{L@n<; zXZqod@|o1A%N!td0VPi0EJqB8f6VA338SVv+grnab96DPqjy`9y30EJrk>l@7o#W2 z#v@DtUZ_vg$P5v_NW{SxMqjSab1Ec>{$>J8)hjW1El0K<9NdSfIR0Cp>~<_2mfEn3 z075M0-I(_SpulAE>y+cE{*6x)4IfO3+PE-dgxuIXzD@MFPhF6zW z=rQ0!aY*9iSI5n}2NC0^vH2^Km;TFb3L1D4?kzsp!^s0DE+=ngCO6TP;axWGdNARs zmOF;@`J)<|{Un4PTuC!8cdB&9zUutlkndmBDi2^nH3rd%R?d=26McJHam31O6IvJE zIbmKTshmz!f$~Y2NDQ?B!)1ZIh{_d@%eIfcgHxRo>k0Fj= zfC&})@%rAHRwRUaG^{7FD@d~en48N+E8sKT(%i>br@v?v3$orO7rO;!w*Dkf|IL3Y z_Mn#2)BqA*Rif8ytsGfON=WwJ3bCuFjI&{z!(C|uS|ill!gQlz7|&2lWRYEe(TUm+ z^F2}b05H9um4c9yfK#Yjq(`nHwKR()8K~ECSwkojFeHp(`{IH}I3` z#S_ncIqqIYOt{OAz}cR&Bo~L^yeNVAXMP$53XoB{-k5=AxikyM9E8|z;F{(wzLh;L*zA*&LpUD1J8=h93uto zWT{>iRT_}(M%qQoiI zw>5LbzgCfV7Z7$_`G>*UTAF`Hm_!pD0!G@T%1e5L&YwJq7Opzn=ZENiZ)hf}p@#+a zMe9drxecfPo`h;P=^nakLqZuRA?nxP981uQ%sRn>V!bA9;i6XkgtL2Yp~7;ggb=MQ z-S7vJ93nk^4$ScLcI2wcB8q&(>_OQZ0y!H9js88u+cmT<#jZ+VAF9r4E`4u8;6||* zoCHi41HzxS`?6Fuz4sD0u#d4YAV2BDk;g_hKB}F~ehy-?K{zQbf-^O4b@BF3`+M{5 zJ(P&9_M@&Ree4`^#}Q~fW6V@#T%l}Zv>?stonXDIbTL&w1;{Sv&5Z>@T$1IYG(!UE zd#ZNO(F4@i@wvkXe8*p+v~wZ&Qad)VJ!4Tr*$h}fslT03u?VSYJ!RUT5k=Z|hE{Ku z?9Cb*_%t2*VQgL??rRCP`gir{6@JOJxUKmxtcVhq;eO&G8^~+7$$Q@I>=V#5tXjhZ z%u-u;)BNLC>v;dyGH}9G$ISz`C2?6vhciO`_;8P~(fl8?+Kf>*ZB~O8RVE`FGc6cHO;3V)kHIUM@V0ZMcQ2dG zUjrx0*3;Kc#3gmjon*b;*1Z(>!Fz|d@q2A$Hh78}J!THMt3W>~0ExwkQga=nqHb1% z?W&KF&@ojel%SC$G#!P}O_P}SH#FakD$eNrSub;JRe+|~#v1NS_Iq_U5Vte)7$?Ho z0=%nbsx@I}^QnYJT9={vMDkp11)RUoO-a%OGAzHi6XRVoit*4Z!-GoMcJM-M3Q-3X_tWM#Ngjn78l3`XcIOKk zZ0}q;3g3ZpEodS#x)i9h0DFKdhh_%s1694YzP}N3`5vx(1ym`)&{@Vuzcl4shc3s! zO~*^i*bxn$LZpPfNbJ|vANJJw3GBvh!CaCILX(o99Jfh7j!t#Q*&EUZ{_iiZ5Sb`T zb(~&IKL*l;BGBLPrip>l?d17k9VK_MK$S2gq2_}DMf;)UW|we#HF|w>yTni=wk-Um zGeJExY1ejG{a%?m`NP$;n7A1*G2iz_e+ihKpU%*146ZA$`tg!(ndJ|ECsE$-*B3`X zq?X6_8|BDXuh(MA0eXHfZv4d+i^NtdL@V5TJH#HwINwaY)1O>jVb$LjvK!J&TY0g zzBlO-skCWXWjCt4V6<*?T#HE$R0u~!{rOksys9bsL+wMv#mPjE&iCR2lJY!_*xiS~io*>4A*{Q+_YBB< z(OKOgR;stCJUVuSd0z3~9NEr#3`+DTH&ZL4A zDYSu)9LC2p{hI=9ZyCB^RobZl<~362yyU%m`8X+*amKzmg(?)6bggqC*fE>T8ES<79XK8_u+_cMcY+f$v z+=<`PZ0d%0CCStej!i+I3OKAgNPR5*KXmvW6yQ)yA$;F-h&z0T<(+*2#QPFw>4Gnh zx7>L0{6B2-8YB{kz|}tWI1$|PeNXYXMIq_7F24z{<5xx*l%(GMgz6~BNZaB8CE{GuQ$=7qzS>X{bV|I<>y&YD?1 z2?XGP_Hj*TUy886d&6%a7jgm|XcB#WDEv~Qp?Ev2cKaB{uX$e4mr+o&U!AyamFsBc z^!t1OPgv%`*yVQa2By@l{}27&pz%H-E(s2a+`*E!!o3!?h1ZqvEv8C=rbT7cB*^%e zBnVj5A%wf9BJdQJ7bjNg94o@$HEx@GPo;Hziur|yZ(~;}nsC94o*KPhY=Hc(9q!Y6 zN7_kqqd7Bz6z%3zS+w&0>bFW@y*}Y3UDfk3qAs+VV_9 zh+bex5B-0P2s}WmR?T8N6_2ze@>p#rhi3mk4Nw{{bKcw<CI z`RBU#B4AwJqrvb%ui2<<-IuG+o%i^jkvIy)ae#U(^^C|r>~R+eWu<1ObqUWKn$T&{ zv7vW9SMN0bXGSR|Tm(|>1V1Vd%teyEf*eY-_=opAUNb~9Z|6hV>HiqGgr;n(z@YzO z#>I^d%MsHL30q#u{@*a=pj7y8JpUpHO-s*M6plYVr}q)S_tQ7?H@rl#mA1*-Vz~Pc zF#r%B(0MyIfy=)2TgjsHQzD(q9PWd1*(|l5GcV0Q41x(K8NX(^8SGPQ_dDc)(R!3UMbW?5AH+0Z*92Q@HWrP5LgFYj>qq-KR1iQM2!2w&&fx;%^5 zpvc^>Pgh0rD1l8?X{z?$dPjUZS4)-Uj2&cm{f3CQ4{qy+qCko0l*V#hwM|-YJ;8e~ z=7EO#23&r*bH%ORwO(^6!;ma6B^ccj({ydl}(>Tpj&?hOWUE15t6uvBM5EiJPd$s&`qP!326Q7zO<7=)gK0VMcnm#F1}4!v?5 zb-OZ25o`mEjW%1Xy%)->JbkD!TzTDHd3`1&vi{z+0u{F**9LIb!*PiVf(}v#dd!IG z&T!Kduf3&AC{&i;m{rX*Z9{=Fz@}cYhHhen5(Gt4yD%ld34sXt#j*oVrOs%w9@roa zPXAC3wgaPMNokf_X_x9YcNgkBXMos&_T!&})JXh&gwx}jVZ4ej^VWN$Xj>I2pKwOR zh2$#I`zUHx*G?9ZJtiTyyUWYZ%x5_JT5_yOSSs6o={jpN_kK|Hl4k1-zZIz#&)!?N zC!LSvgTqYQlMwN&#@u77m!fX=fzu5Jv|`28>oYH(J`G*{*{$Y_AA}_a#e#Fk#U*HDG5fJM zheRN?$MA;9Jh&AWz30WPV>^(lm9&ZD*Rxv`&Pzg~Xz@I-g$KGuz-qxx!dS!gkH)Ns z=|eWLjfpjViQ4-Sd7C}4=Q{7o!BDE3CHG%eapf8PirZ7EXIu_CTnyQ`efqu9n_aC&~I7K6)Y@vSr}m5qxCc98jZ8IQrrfya zoSX>kPylXIx>AB7kNhqprNI{sbQ`X(dGP6ab8c9UK%+inVb2lE$AvYT$Eai*@Xv%4KWFKCRC&{ zc|+53tK;^m%v_F7523{Us)3$pvf@+TqdtfD)HF`9CO*5QR+sk1^s;2Ridld*x5&_1 zNTxfpcjG(WK|%}cjiMpjYoY11vh=tlQkOP1oT*LQso|xAmz#})%52YArFt5p`R?${ z>!>041yr+U+PeYJAUJD%IiZ_QW}eZ=IVi$9getOWGN0TT*(viKm?H0muRMN3h!KDV zw@%m&NVF@sf5E>YDptw9GBkY#4&hugF##uuhOeq|AJjQ6un#${h&`;Z=U{QK1RrdI z_(Vltm$^mO?E>O4OZQAnt?pN0uiMY$juu>)X&Q`jb51SGF<`$_ET4v93!XSQX=~G> zJKa%JGft}IQPouUkjKfm*4Fck;`2*%)Lk{R668EBUvS}Wo|c2yA{||M6S$;bl>)q+ zxz7ltg}yT2ln4z^vpVD!m~HrH+Q1(#(!}$sabwZOp-fYk_WPX3+E{EoVeRmKSfDOT z_hA!ZpG~Y>91jnVvpxC9R2V@x`A7jh-1C*;$n4Kb$XSx_k8B zxJI}1PzdGoM0d|%R{^18B0jROt4MU)4l+CivrXgAFr#w8Nd++o5A<%9pdUtZpEy|` zQy`6=vd%ujzcZn7KEEvfx=Vp=wpfq7mqvjg*4#IqoG^zgCAwDcHJ?+EdbTYuO-`vb zM*eQ4lt@Ppy@ti+PdhLt8urWx0}S`I1`~ZWW^Pn1p49p%?Lesgej5%6c+HpU8cU@E z!=c>Q`|$I&rSL!QKj(6A@>v?_vM>Q3arSG|QKfHUir9GYTEL5ct2Xq3x!5TW5yi(! zHgqm+c6P$WfbNBGd;S^4N>jGhmmX`ZsionXRrQ=wlXCV|5Ms zYIMr^1#Vn|sP3oKbq~l=9(YaJ;tXnwtehyHi)}|Xs$lTLV9L-E35^mT(k<186Oja=lz8#NP(+B>HsLYzbG1fBY3_{=~LBACv^ zx>3fkdWvcrSApoB$jugQE<{v8p4O{mYA}D#_{P=Ut!ToFkFACs3R49v;AEIkC~x^% z3)pj7XaX2CMvb*i531x#y`zL7CO`lC__VfSpA#n;>Z$03o&Tk5C3sF2meONmC zd#`nH<@idka{fA&LE9s1cnWUde&UWp88W4K_}M0J_A%snaIEe**I~pa=wh>4ZC*~# z1}AW~_T#9VpKsRN%zI%kpx6*#D%qBjP(~~#(#yj)o-nnc<6G73L`v()lQ z@`v%!1YH}m*i(j%##sx7!-yHr*3uIHX{>Wt>m@tEl2OnYeAT|OL&ga01HKX{p_oAJ zp0-+#T8AYO)YBxVLhQo%>P0!K_(~PL9S5G_RfgUs4uo*GmzZ{xz%LC(!8CJ=K4WI( z?23$!g_n@XqID)Ki4lD%)_H%jhCMz3(YXUk2UdPcFGg@hceVescQY#@do`mMS=IGS zYlNR$HJ5@2jo0r218J$P_7Gwed^dJm*Ud1+2=TEk0@UoEiJ$X0O?PonidQ#+md- z&OsuMXYCu02;R&X)5sQPZCp9((^q6IerxfHens=FH9n6UXVX4#gdej{#_V>PH!9Gv&>YF@Iz4t!m!QF3b;)2} z)iK<7w-pD#(VW1V)8Ihl+$HW6{$puPM)F3tpv8&ZF5=L1y>54|?|f|0LNbpbnoePh zcC`;mvVJOnQ5aw7oGnooYHy;{#gaWqpMle)i#))O-$hOU9aL zgmsP3DxSAno?|#xvnIz`z*h$;YSY@8I(LU>W;V(9pN4ohW zJfE_Hj!q}w#Kt6zBZ9UuraYBWiV5Ra6cTu*29q?`JEwN2!K<{3O(p9LIjX`qmhqM6H;tHIQBqCIus-HWlT(ZViW^VAR4SDV=oYV(&|(^4XBsmeq8 z8jrYcepB;TabsU=RWGniS${ilL|78cf8{-`SpLxV3bKya1CyLaFSR{9NyRd%vo7~j zmt>oz8@)A>;x(0VaVXqu4tOW1%j}Eu$9%Ecwuomzq0jS`IQ|fL9zw@ zWVVM4zBEwrMrXCsuG;v%f5AGLH%{Ria=L?{%079#P zi$TyAYO?x&`HTB1k*OWNmA?Ly@k5;7nx9%iTX`rm2xB9lv^6*}9gmp@Rs zaB9Y4-~?w{?2~76;phsvB#rNkm;~f`(S}3Jga`1^gxsyxuJ`V}t6xuRi9_ge!W7Qg z+jgtGXzm73N!)PYxt!y_aBx;?IH!6UX^EO#)D}h_boUrZx<2W6=d|}gv9%flV#jva zya46)P@}4@W@>FBo#w$u(#o99Wx}@x3fMeC!V?8|b`5O2unH*&ZVe`|;ic zM0}o?tk)Eh7K;~KjpX{tNIPo4?Gm+WT9Xe=D;XMxTJd~_ z=3HWVRnet5#=10(Bb8lBA2DgIbXKk)rTM~?278!Q53X7`%_((0_rYeqr)u3tcTCH= z5r5s|mue3`b8tU!grADfy1VH$kExLoHGzyT0FC0$OMa4kR11*b6$B6WgKD3xPaBAb zMMz0mPu*#*GmQuf_xbbsL@u`!Tv~Ur-+#$EflU}Xb(BhyUW1!&V44P8N2crFUQgw+ zcKDQN4MkXoje;w$W+Z+-@mvfoXf~HDnwtayQ-cTQ!KRDDCx^hel^MX^Jceoae6gj@ z1ME<3#O}<~Zl4;8C$Op5Fvn!Y3x*s{5OWWqM$jFx{0aVvBL_{ z4X_tsaT{D!hB##~+yC&@rgfATt+aZ)Y#^J^Cr@9gNI;`{Bld1qj$-(veyRY}G&67h z@W-xabKo`*pYiqFw)R7!6ds=WhD^74Rm_8367NW#9O3LX>03l$cqPF7@k{|amyR1t zUlVqsj{ZC0M_l7rn;JFKRyxT9#RgN&_Zh^D3I(R}KlPpep^N?gn4~9mqzXIu4n}~3*P9Nb)U(YFeJ(BH4XKhR?)2o8kA(^` zw1a|0OEje5+zt6JVLpQHlSyzGd8oift*E6TfAu1yH2UZMSi;ezwS_^xt#n73<=Xu) zHH?Z#6}|)9Y6T(sK_2Wlxf4^au1+ZGyj<}uw_?oNL1#BgXmdpdg7VAgF+TedYGv4J z0%*jHKUk~|L8z=FP`jxBs8`YprwUFv9Rbkf3p9|t8b$;xVph7(kDjx zIfr|4u5~K3_Ap@jy3WjF#K9*ofO!b*z(fnS%ZxvMm(n?fp({oV_t2~E91MvIHP(DDn`C$ zB_m_7VYv)$KoExD19t+M?oKpbAJSGFce@?&@+*IFv{GRfer@2`J_qrO5f;t`wry#3 zG$0k^2=`H2sZYI$tJm+9Kfh3CL9P8Dz~<8QJ0D2o=I|AbjOBcp0kLl5K?SJSNnJOg zUh`^YNrB<&iYQnO^DYz)+;yjBv-?liW>-&e+AO0!kw4qI`1bnZu#vfkf$PUzv`hI; z>Sn{Oy|?a2pIGy9fJTb%7|q3A%&vC;)7}1*Ba{p35gB#3hARiP_ad@66Jwm5lVl*Z z40jbExZQ zM?_g;D}}RK6N~9C1A0Fo{HQK$VL!dqEE@#HbJBQ^WYNfEYR50XzS2G84U`^UmYbj- zL`Luc`}dZ&aiPNELWX}+8Ak<*~Ne8)M}2o^Fq9jW+}5I!qc%Y|A?bcsAuB~$ve4aW1%12nQo4aTek8P+f9IWf{aABCeX=IW!s)~PEQn+ z8HKAZ-}2xZyTXJjrha8%NPM6^oU>C|P2O`J^fE#Zz}fTe+PIy3*F4;^iohZEy8|z! zt9f2L+$)*=Sr7{GVX#v3QyRN1+C6AD!)+Fx%lB*t~a~sv$gB2d92$0L{0{bJ!;VwN)o1#0kt;?9i`I{ z^)5BG?xPHtEE!hIT0A(UQRWE_4uQ*$&Vl3hzN=x(;7UKlw4lg01FSA*kA;vNTiwB~ zvK_g1bxq~QBl`^Z3FJ18l^=Js-x`(7^U*drrkIe2w(uJE_!^(8R{%~KM_%R-A864T zSh-kl8e!uH5J=BNb1}A!+r6|%JVp7d{fqHr)6n58VOQ}CZ)^YB+WQ4+)m_%4t<}t$ zK5Ml*9*~-|HIE?aiodx5?DD0Qm;Nh_;wEk#>im=POz(>f*etxQO%yyX-BI|KQ@B`( zYs2TuaH}soJ**&|St*}pWaV(fzU7VrkcUS0V3pr`x^}|#!ly+tizUd?U3sEuygP4qHr1sdAIS{RpUJkjl(7+!jm)Twy*9liv}hi zQS!JTQ2SRfa+T7PqeS37jXlEL9xQcM9)B#+lWU&P=FzcBZ)?-h+0T?CUmeoSh|h#4 zI=?e56(JFON~JA- z0aV-@0Xe83RRxJilODQClP)#X(3BFo0#enjd((>&LMJE?k%S^8fOHK-lp-Y%0tg1A z2|_3lq~x3IbN=(6@8ZAs@4k!gGS8D|t+D1BYmV{0V~lsU7r#Vek0~x%nNNPG)hNB6 zXPe1!uhik!*R5eQv#GS;E|oHk&N4 zP_{&EY{|xVTtr~nKI4L0t$>(O$>DR>Aw&1fBD0~n4;?k!5`x*y&%H}De+{O-5=QKP zgum(vJ9~C4YZ@vnQ%)K4TY+nekr+;LHogc-OmjFv!Z1f*dH#N8p*x>wai-zF1-WWt zX44z7!a4h3jRe|y9P26HU@=AY?2sepab^+6kVq#=s&I^&HdhFLMm>1;k(zXFBwT@> zpcBj++LgfxFCRK_=3BCH6Kbzx+%<^Rr>QfDmmeyoc{pe>mE>l$dYdR#(U>swT)57K z_h7|~opXjPWxR14>n#`>IJDFGq7{7kLx%X=D$wMA=1d|!%VSy}FO+QyM;dIfX;%9L zu?%_eOhXH-oa?KskV!?#^!hSfC>1Ax411kB;A9>Y;s;lVsOd(+O#SMcOT+OvzLbI( zKDDv(P?frJ3J2s22rf1n53*^gs(>Lq5)Xlx4OC`iGJE46m{urZyVP2F*8zb~2qp?LvC1kjj-5^7`%W zCnodK5>i7*U3cZkvY6SCP<(4MUhGT%qNTc9c?|C00Sg3RorUt?HtMl@nV4m9cOpb- zrT7tbKVuH{J|s54xtE&qj&i>>O2xk;e69njEXT1;jy9l@Q(94hU|VzYCSaAsNv4iO z5#fo945zk-2M0f=g+Il(R>q3u2(-sIzFD^ znh8zZ->?|)$)0gerSwT0JW)^|FcTkR0rJ@WHxz11HbwHXLFmdAmzv2UlZOA8Sq?L585jlW1`_>J;11r z@k|D+7ObvFIY9UL%sfp|Gen6U!{e=X)BMP~eOGNXML)?V=`yjg(~D4KKS|a>yF8M zBT0Jfu!-rYkR|w`1dQ+PxJ+2qm+`eztx6T_302lgua!nI&n}ZzOVRWe*I(?zQNM4I zYq}`@D)vcVGUHPpg0PeXAU{KZpVs5L;-Fwc395nG(+*IXCPT5Ju!rctFqS zDiy4Yk70e&<5Lv+F%21+l)+&dl$ukf5a8!85P3(;rb(^J0L#Nz1f!{Ls08iKncZ(( zg7ycdB!5KnGOjyt~-U~yF8K@ROsLv=LTR+Oy5GktQ#IuWj~>msja^Y(h^%hc*>&_LCU8X_O)uIz`zn1b5p8)phzQ~?N)-%E{{4L8fPH_MRi}cu>HTc$ zT93B%u+xgJ_fLKNR5QhvG6Y zq7_Cw)L-C!C6+jbd3M(UjF5wDDk)Fz)>|rlB0yLj4(l$@*@2B9!$z~30ML*qpx9`g z5SO0I=$C|Q9+LdYZu`6?SJw9KNZ|ZKYm4Fcwk-)ai9z@3>lm5}0$S%a#tb%J z7J7#Kf;|2QQ-eI=w}fz<##_gWmn6%3{XW@jFcsJ17-p_=iQ5|$2h$Re>uBKDRbyAO zo}#0k@Q+=E??~uCJg}vRxdO6VOS7{7q;2!>?2uC;z-6YZVPVERHaoxlc+>O(4kVD# z9-v}f^YIV1tqpqaNl4#s?SgSWXGk@*4U&0s11F{V20s6;`2GA*F+NKkbZVZP5I z=K%hV*k#n5f&dle|6Yj)@ZtZK<^SJ5RBQez2t);P3*@#(j2FLUbOQl};0`W*$A~2? z=-YsR4hoq!Ah8j;-z&RROh1Cj03^IXcUDDZclfyT;WrjL8Gb6;BbFdXhXCykgcX=q zbyn3kgDwKNsRl-%2Q>Fq-f>IDJ!R#@-1{j0ZrVLx<%q=NAt!M!^Li0nQ-L?u_;; zKJznYta(20$#)j`K4C_krLn%%Vrp|J@8x0}Sf5@z3z>yYU zm=_FUyOIYHqjt6(V|sUL(y10oiEL=*b%l-O$uN17*b8V~yTssC+ zn&HjY7YIRXEkCc|m$Q)#(jG0|Ai} z6GM+p`fG;0gO>~JEe3!M*n;U_qxE_)-;f+kM)HjzkGV5EyBAk)B?$-`syPII{fp<7 zKVfM1;Du#AEMBJkO2O~~FZaMa`L_C!%nw~YXh>qay5zNy3W+wFoX1Pm_i;rUEA0_} zq69pD-=IW1dY-GA7x$IH1%cQ`ADelbAiojJY=6p~JMRaSB%#7SP!o%NVpFnI>bbPv z6sjvCt^7^f+^3@_+IIl}v;reQIMTlfjIRN$5m>r^QS{+FR4?kW*>WmT$Ha^qFUA-0M~%u35{%`e2~m_`i~nUF-; zVQNsK$3IyKD%Fy&j^^4?^E_qacon7PO0?dO#gMyA57t_jarlHi2Z2!X1yXHBnBSj0 zH*<=301`)sHrKL^-|0RFaUq?4tae+Sh}nq>zKeM^hSM-9=v}mur;c09luaetst2r+ zkD=+Rj~?5Y2a#>`)o7Gk9@Vo4>JhX;6EpV6%(af#;}DmSzpuhtj`35jWi}heyQf^c zA#JEv>D5v5320iqy0ASEz*51X=KIW5rNc+$d}v_T^6sFK%*@V*dai;XMcUD03Lcx` zm@`T|N+@K52o@S9l^6?XpV=_KG$JwwV+@F=y)%o061Zrk(6?kn6AzH(NV0{mx|VTF zEPFu_{5>5P%Lnu;4StyzGN30RF9JX`>F{2Rhhku~9p+eVOt?a7$$UX~+w$qdWHat8zuGN;&@+S(rTHFvIurz2 zfAuRRgu7slG%E|fI=UBp^Zw|>m|m;Mg;sNwWMR(DRfq|5pjIm4$FsCYJzz4s$V`rE z%?kO<%^aw_Q5v;$tnn>ncuT?8kV^$pJhu))DEg)oxz>C5<>ogckJ09MAgWV8?-lev zg6P8c`ss&zGjGSm{XNsFbtofLjtr%uEx#6nRfH8wpj?C#AVYf0u=y?9qYlF-Nlz^! zYegijYaF$l`NJ#m8&}<)AZ2nrfsd#_;MQutZaPsRDz z5&a(&Ly>Dw=#0`{?HlCx*peEBKtK`r!G(&u)&ujiRBP2};dj__lBh>>nDsCS z9^gs@M*Z<4xA_08UPJz_c)MJb0>TK?vz0Kw*V5OfwY->Zp8t2+$OY)m+W(7(<$dnX zhY}>(E{|B6391YEgDs=fQ`JWuC{fh=j>~yDC1-5Ug>i8LriFj_MC|dtQ+>`0mJ~oM zEB37D&q_$nZ_SunDkT$FTELo3H*mcBLfTWe1Wf z`BN1R^*;c#*V^Hq!-7AcjMn)x_1wMe^FOGDd@Ue?okkG z`qAjK-f`1=N3~!G(Ah&qm)gSSMNB^b#e>Lt*Jsu-{% z@8((J1YoT#jZ|}u7p_1tVbqt$qOx*FV@idHVtgl_T8DPKaDZY_Xl3*_ZltaVgG3kn0rnvQ>J`XYy*^Mo5}3AY8%94=$i9qwZD>{FrJ|_M@E9 zzhK}~*?P?oe?++2tM`u?QMWXtf43$g@+3<9X?QNJdevFsP+k$E%#=q0_3CI5r3S#N z-K~y~J17@Ik#Pzh>b>}^8AYv#tL}3{Cq{nkoRqz_To78)5KdrZ8SlUE_lfu4#ag7N z1R}gQ&!SV1s~+{gx6ALMsv0VnPfuLC5zPA7o_XET+KeL&c7S|nnBD<9;`$jTY+bQA z9V$As^w(r54fG{LO^syBLR)&0e19AVmOXJeWds^>1;ao$qC4!&w{@c4cCz-47(%U| znrf8>^I8tT&KmJE22&p@96YObL?(H`Dc_?^&$C`DAfo$+_=Jd6!(6$lmu>;Q5{&?) z>J2lkwpG6XqeSKLe!CXaglM<@{c|*OF^sv?lZF+N$!ZQ;=AJE)pH;V4-#+5Wp6L4q zdduHbN7peX?+=);rNG<>F@jxU3%|e`SzF!u)ODd;tPs%IUYT$wM_oQCcG9}y^qs4- z=x1Y!D-6q|)47>-7XuKh32EL4kdWo2NIBFM-?;g!USU+o$G53)FgL%r$Z7&1p(ouk zGI7{rI|&bo^JgE{JQ6-1u+|io!l;uldP(O>&Qw`x!sCJxN^;*iETtDlq@*cHnj$u1 zto6Z{4R7wQj(B2mX0~_t&Shb;JyEG4f;Vw?&M9tlhq~mU3F~CL0e0);uBZ;MC}P80 z(Os>#Voa!ut9geZ)gFC0>h@`-Ih*45Xi`*zyifCj@hqAvRu9QFRYR`&B6ZiiF{cd; z_p(MRs^~ik(=*G|VPl=LMp*g9#&10L5xSHtUa51&fDI&6?haaJcUY9l*5M*}yK(*q z3nz1d0dZ$CcIiU`-sk9n1&3U{4?p(S$kR!%(Mzp3;+;J<2=U|*yCspcqfiyBX|7?N z;7{z2H}Xm94<{CyiFbn{#Fc(4oR4ER@<9o`R*`R$i(lz3mLAjnrYAUEh{Ljp$-Q3w zM8SNBg;VD>Y-en~L#&PG7rQbyo!Bazn&QCgYvVc{xbDMcYj}=Acpn_CbfE+DiYC*v zQoWqz4xlw5h>Wio5n{7RHCWAv{g;ZqwtZbV#MEO0DpD#<*l;Gj4Aq5PlLhq>!}{Z< zftfBAkr~fA#C4aDW~B~!eZ~GkV|M41&Aa12y^+T(_n4TsHjmSL%Opfh>kLa3`)?El zk>45Q1WO2lH}E`@FYaSC=a5Hfo$moFt-YE6adBe~WKDoxLLGPuhcb^{C66S?r@DHolTf*mrk{pR zV&&)2W%QjSb`}xS>qQQy$m_gH*(uE<ey!SMUlho?;s zufMuOmKC5maGPQK68Efpobm4Ey{~Ue$Ouh!%@?LvrMOf<*O+}m4`OZ(0 zYfI@eD|c)oy>nhUntRvO)G@)b56$$d!4`atCe3nPim8F6Q)_{QN)wC8Z}0Tl%?s(s z?p~CC;~I^FDqqZB)_p9VuRUObA1(#vcB~|wWD((TLHT3d$#+!KT;&lKQ7Vd){deXo zQId7gRp0(vkx)_1JK(ee77qSL>_jKh?wReu0ZFi1;{?B#X$R2&lsc9P-qc9q0nLrf>)NOH( zxRqZ4c=>6eB@K$(0SRFNrgvY0T!z$xV^7$+C%(8qJ+*W?L7a9v_(|EKE8n6K^?}P>WW%=?T^gk z&g?fdT1aQFzn?Q^AdqcQ!0+F1G2}Hyxg>-9zPX~7=Wl*17f$($QZtvwn&2u!FC4M8 zchV-IZBprPICX`q-nsJ>cwC}-Wrk|SCob<|9hwq&A9eV&_E?Ew#ld#(dc*+1B08rE zHvV$*OJ%}ql-L3CM_W}AD>6P;&@IlmBbvB_;~@C3J_e2BQ*rm7Z0Waz(LXy%A`a?$ zhwR?Jkxvyh6;Io-|FE<8TvP^Q_90a-g(o#M6OaIVCWcYTj-`+o@<*N4d=7{UYf8VmHM(8U)idvp+-wUDytc=oa3n! zs<*F7SjOch3fh&yL;lh2No_gt_$pHa|88+fM8t@WCYB5~{tjI6&e9Fnj%@wve?ju< zrL9Z(PbMGkt%3tojQG!IrWO*z7xH;>ohIj_PFSLwI^$ioDOL-}R5Kudm(Jm` z(F?k>rfZqX5k0!V15Dj$@jv+>xWC~+ih{zgbT@3NN$R!a559>T64zLX%-1AL#39Oz zg`ga*^NDymqaV_)0C1LZxOwm&%z=5CdyJpG8O?C&W61s z1NcNW7S>b;`xlUAzX_!h8Ohk4%gYnTt6VP`Vy6jVc;vQ!R3ln!@jbD?strKneKWVs zM?xk0sHzv|z9XQJbALf5|A@lbtFE1=+td~|Fuy^XvgNF8xBzCRIgIa%6`!n literal 0 HcmV?d00001 diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/recursive_forecast_overview_small.png b/how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/recursive_forecast_overview_small.png new file mode 100644 index 0000000000000000000000000000000000000000..0f3e687e24ced20f72508f447b8da58cf9d60f90 GIT binary patch literal 21897 zcmd431yI#p7dLtkF+fC=kXDe8RJuz-LAv{q?(POb;tN7(2Kcb<;Vu;T0E3Bm7Fi1ZuovbsPX3aRF93-8JLX3@PuK%EL zGps&RBaZT;SYaYQ zQ@pF5AU>r9)E^=~v)|AoMi?Uh0vB;TgxVj9_<&%y2qQi`QI|*&pZ`C;-IGzQujXiJ z>)WwMV&TZ_nyO^Ups6(IwA5SBwFQ>1ILqr?nUc!9mPdiL?zIP{L#jUH{Ut{WS!wag zW5gQyWzGeqN*GetMlCcZKMd9~-%CIsp@?Bwkf7J#qH$NvU)w+PkXCo)Otv=d-dq*S z$bEugo(N zx7ym49T?RED#Vb^WqoR%TUv)h)pSQSt;eEf*Y5~qXmY<5tcksXK=jgzRSU#0sd~#E z0C`xjqJSRu-i7qCX2Kk&z4fl|+-^earRKTwz9cGkZ9S-68l#(JbI~1NmxxxEc|G9V z(zq>3y^WfLS9;o$OSC?=@{3WJrAD(>;G;*WnSfBQ3PEzoN~|W{h2BrCY*TKR(}Tyb zuOR^&92M*GliFn0(BPmw_z{L5FQyt!Fy>pCW*tf_PU=P7rF{Y^dakf4CB@dfz1@r7 z8?<@eGgA_S4i08RchmLziPg@3WDJknUFwreSUc)FkpHZxG)!d&55^L600IURVcMv*AR@s?`@J%7HS(+mv$keiJzbYfv?e(og! zAu*R`1yU0OzoBEaJN1nzZK#e?=jETnt zT7FApHI#ZRNVslwmL>ddp!+e|_Gnk-sK;#EA=!GnF3a3`9X@d=b>hD4HSv_x2JWgI zFqAG5%rzbI9Q_Gb!a-h-l1T_H7NW%hcHd>S*IGfV<_J$TtfmoD6Yf?jyoOavJi6;N zxSJC{=Uz9ytvx#AR*J^+$KbBDm$bZr#c>r#SA*R-M(*`4zEj?Wzk1wgaOX$8ik$g! zAR~Qy*`U#odr=ju2;-kdwxD&24yKGTVyR(0t?i5!j2ngG4W6o8cQfj($y1}amS2Tw zB)>wz6vEJ)s#rZa`sJTg9DLnTo{R6xipx;U<0oP+=ks;?YrtcM{TF<~Yi09x+uJoW zGdD?T7(1BXq#klVPBxB6uv_)}lsx%}FL6pH#}6HHuT%w7Y*hj`Sk;mggqU`H6eMBG zrYPH^8r@%b6aF0Eo%LdNdxUz>S!kD0%4T#Z;SNiB6mOq~Gkl5?L~hy{*6{Zr;?wYy zegC9biHRZLg&NxjL8B`Q& zKa0`dlJOO%wUp{m@y~n=xoZ9SFDG;4(&JaB(u7h}-51i0_=11kZI@{+yT9z-JKm16 z^C0)mCbYBVuEVD6H?Fqb%^wq0V>{r?*eqK48m?!_yE+j@Q)~J?d2bpMBCm!pnsrgT zUTl2H%HirCJVeq5);?nLM2-sQ^Kxqmi47Wb6&&l&mnzaSU??kZXv+&|j!=sdnX!G0 zjtv!={-elL+`E#`7nuorX%ZFA6v+F%U4rX@=T;Cx+HfNrW)i>7RNgJ=V;)S6?Q70x zcvP6M_}RHm!N(fX&eSh1fubR+hB{tlItY*27n@A_e^q11PVFe`RCy{Uhh)X=hicC9 z48xpOEkX7KkK23-wAtXk&A&jKNqINR-RsjIzo#L5V;x!Nu8@QKhlg9lw9Ee0a$7`q zjN#EEm{Lsh-gWMr$ebo&O?+eTK5?rE}an=e~G&z6bzPo2T+ zXph6%Zn0uq%fy%6Bd&VsF5edBu8^4t37B_Pv}7e=1=G)9LilycRij9y?2JV0Y1AVURL`v+V50d~ORjIO#>aR`-Kp*5@t15a?e}Q~$vn5dBTnkQ z{Z8ujn{k=xfph^vp|c%*UhFrmqol+QC>JPZJc-m-Map;pu|x@DUJsYBFMdJfY1D&IDJ=oQa1;TZc(3BYjA2qVBEzx6 z3rmW8lA`*}7IAf!WbbxIVAKm0On$ph|L?;N=^PbVhKvR$n>MyrQ8m*mdhb+%ImA%( zn*31YKO*Flnj!B!o7ouA`=T}+P0iZ33^WRx50yS7&L^Msd5%5wRILX%$gkBPNM5c!t?5$0lf z4Sl6ak%vI`3|Lk*ug?eScFG03`@evx`E z#1=Ng81t1i@j|#kEOpd3NaKyeR!ayc#z#{%`J->*XI!HWHEIc@k45_fXUA`uTHmS^ zYII(~v7L@iiT0imo}}ICi^DHgq6=^$ZY@$aZ{R_fI&Ny`Ve4I*r`jRm_aJ=o2U z*LvNqPGkV;!{{ho*;6%U!Mkw@p`zzl*;9+HUYV?mgkISr+lCP_U&XCexmDN3T#lHe z)fQIokcIQ_CGERC5VRhvPj+CDoK6W|HBOkxJUR+`=l0d(@ThJDEi*z*BFp~5uIf|z zX%qwQ{B`O0O?O-(#kDd$F^*f=I1BDHTRVx^ooU(S4*g-lRHc(ta$M?KDQp$qzzHLK zNw1ZZOyBU@VA_{GsBQ%6UhH7vddtFD&whljpU&;50jyJj`N`9KGA*jk@0D@EKO~NO za4`ZIPA**n_>>J91f3*>(V_2QJY`;5wMc34ZS7eiNh%NDX?$nZ9@HEsW!Nn-#Pp*( z+n5O%>!ZK?W7Z;hJB2aAYHU#L+~;Z>zZioOT}IO|IBP~CWEHq4P8hyA6gG34XJY8a z1aq$W(I@3`OP7{)dGh=kqjDrv%5*p`%9t0{AMPrUgqQbT$u(|_+omHu8S6GNpO*wj zTH-^^6H4$8LYx^yudg|(H7~V{n_V`RoeJIA7(b+!`MJz}$;QUpFu|&-&0S7rb10WK z$V&P<2osO7471=ns~*8>MD4&2HVpTZh`V5h#My$U zxin`~V{3aVdmhV%({rtr$N5oArk|v`I{G(>DpHAE$cv&(A2U?oG4G%|CHS6^R;|cS zJjzEco=?2@`cSgIKW5w&BVlc6*CiyD$NNl$Yjjfn-n-+Q9?j+%Zm!-(xh6_0MM7s& z=Q|!es5wcXZ@yOh0G>xBwVHC5NxBwP(zT@%tl|$BODq|{$!$E`v2jl5%nx9Ti})1Z zH+SeWz8sxm5TB;HZFP{;Fza5N6YH6b7}~kd`kCB(5B7u8sIAc7SP<)U2YOqSnp=l9 zNN}ReUdP?e*-6v}#?WAoV|x8PQDH4O6XkR=~kA4XkWMCU6MO-CcF%Cq+{@|ZtpE>3t~ z+W6Q=wF>j8I_Ti_B$$}wZt&Nbj=(NOVM`w6D9rPIxOg~PN@7m8oCev-s93KX>7Wu%1J2%CQ-R@eE$ot`f(=cr{D@1Hz%MW;8anfje}aXCUL7wfVs z+>+a-v$W!!qW9J%=*3F}s4I#xC|2iCX%=0Vt4$p`Qo1xDl6Ks%zN6Ojm7bz+VLQra zvB;`tdUG8Al$wFzO_RvMhEq)&QP8zV73+1OiOoJ+%xjmfCgH3!s^ivAnXh0s7TmpN zT(WwcwOnq0bz-5(FEKyQ!zBx5U&M;DA02*aFuUrK5p8c)E6&{jF6GNTuOj=7K!Y2Q&-PT0eW94$1xL0_4be57T zx7*&J-t;$A>bF>P1;c7~`sT&j%qJ5)7d(Y(47BW~d(TA^te90mVB|p)_pV`vAk+*8 z#Y{QUR7PQ@dpByP-M(0BAq97LADt#d%2K%(E`_k&D~ZtpKm@I;~4A$W&Ar ziLzc!bCi>v_QuZ=VWG71@h*Ga{gS*{nm15Cn7B+co~X#?t~^$V1uuSn|KgAHtZuFS zKv#Dd_JP-;YnU-1Q{7gZ&1c5`)KIrk=~0X_{b*G~)A#Dv(pj#iGG@7=|Z@L9iU zbM2nVvy76Ouept2K3@F`Xm74Gn{_`YyJ)SI;Fc)X(_7d@d=kl^K%cI>8yR=WV7Q2P zbXp5E0vJG_*0t%V{r#lq{389lBHQYDtW)WXFnnU%yItrp=GhSm>^+H2=?==_R9WR! zfW_{SFKb#BJ9o94m1ElXTQ)m9(b8xb^_V`bb4d>_XY{c=7O8hc>KjY|4O%iyp@%ID zg|jj;#AtybuC7bm6MWnvWe-nWhO!jhn#WL9s}riaT$TBwL;)~rtQ{p}|7f_PZuXt^ zD1c?{0{QB>!;hQW>7~q%v@K^xH`U$ev~0vTGPu|TN1@~uGWeL(-o%cTYZ?rOuE;C& z%mQ}ESA2BGNm@HU8(s7}6xwd{%1+gMxDTWJUmcZuUpykYsYG#~CHXns&sQhuFx{U@ zS+U@;oK9NiUk=7u-2I_RXO|N-T#%PMqr=9tCnh9NL}{-hgnjfc+1&Yxzjh8uGNFGd zNqK2+Ti9{8_mO(|*6zJSkl;&bWWGf(MW=r^{vW4|_*cVY>^V$YnKK?VA}vXY+$>UI zLkbaCGNt6Fz{lPCTI=&7**FM0|4pYqAE6-Gg7rPnBNo7<;zNA1h!C1JC%CJ*}wHO}T5D z4S?mcA|a6!tE0*p^65XM48MCeEZ=aVD}qOO-9IAf!kB>bkk9K=menqywEj}R=nZB7 zA9)Gs0`+~n&;-e(Ssw~FO59^cdPZhyY{>IBR1eGr|KEkj|1}H=y8n$YT_2cqi->P! zoP?4b<;;H2-Ej@(?OD9f_6)Lm-rB-z~Yw z8Q^P%CDH|Uz*tJJRsG`gaoi1*WoUx+SH)Fwr^-|FfKX3qk&9;GOT~8M)F7wT9iXYz}y0=nv$7%TpdQ+aHMcC2XLf&8=r{1==W;3oO z(7R#v4oAbWZ0IRMNc9BOef$i|Y8D>PJ;mb*rA&DMj%s>UJqayQuMIC>+db(Q$zpJA zGVWk*-DOFf%gILu7f8pxz+L13iZaYcm{e;LzyVKD&c}(^yoe4GzU!D{m)b;ds3}8l z=$k&}&Qj&Vg#5*Al7YKq zd>H@a+ADdfNIIqEe2VX;_x$Y_5aXr%@oWekk1b}<-Fc8msSDD>cHt(u7`@0Nc zN$~ye!0#`wBrpX81ipOvB9`^w@bK{NW}R<)B!`Ua;_~uzb9lYc3%P5HFUMT^gxh+> ze<+>b>KZ$mRwd2$N-kBS(r8$JBwNOpm5NUJ5C7Tw8q7{pR@lW7v_c{PS*_YS!<2;E z@sBC3B%Z$TBM4wT7vJ-$n%zdTe0?;$sTG9-eQ(h0=QLhu_-{BQlJfF{sg`c9*eHv+ z5*5yt+SwROASSGA4Gl&k0huYX+a6<^*qkiQDoOM7{}vV&M&f!PU~HVb^A9tArRn5Q zu}(Yq)i*GpxrP58-v(#@TA^?{?9n>6wX|RbTb|9DPL-Qv<sj<<)BXw^Ado$t+} zp{QbB>Ylxdrq`fGI};KK4hc!(uvxPk*PygN*+>r!L?+<0&F21KGDDNU zPLcJ2r-@O%VxH_nF(yEnTP6J)U_=EH-SIBh8D_#m&KqFGyQCT3xil&J%i154?|1;b z$@sEOz-Zs#4+XIg)6RvM(ZjCPz92WTr^Z6w#-q6k*49VWHtS7IO_5`;mtGROf|$F! zcQ?mjWTMmsEXMh2m6AR@VN02kiN@D4;&$Jy(Y(C8DnvxjXiTRocH0OnuzZD~IFtLT z@7i#+Tn{6pc#Ay>CL;ZvW^3%Y?00@`MX!E)hR3Me8i-v&&dck*)Dmz$=dzEC!Tm<< zU3E^TM7;TIO}Tz=oEQz^PoTiG2U(BVLk5K%Lo6aB1<2N?l5EH}xn_5TRWL_I2a!ZL z%BUW^Q^CxBCsY{E>sCYMdbt{N<|Xm^!#|C=3VA|{vkvog%mYdtpD=i^@bHWnV}+i{ z#Sy--=h1F?B6D#zZGpC117@{5hJhjS5u4f6yOQmV!E`@#g6OEI@^8(L^n{V7CzcNu zoAsarRaVP--O<=xfslo7KuIGTTG7k3W?o+H)GT~V9>s^1fN>P~ltsPm+0|m)^Kl0b z!{?pyoJ&96R@J&AASnnl=$x$h^AC?_;CV0Gyn2=8TUJ*4gN46h5sAb+&d>0fvkh5k zgtCV1HU@?WKF?g^*MTuW+IQkK7f1u>ibT-{}BU-@lZR(wlO1%>+QHIj&);Yaw-%@&3( zV+ghcuZ)PU;HU|s@FN07U4N?VIU2)PBqT79xY5#@2XjF!@%(2@5KiT`p#9!Vwe#r~ z{Px<<*SGV9OO+>}!-B$?4EogT9E(knJi9#aZNlsGqP|-*mA(pwQpI|6?usw0>mZCR zJ_y)hvCz0ae{tLjyP19}Pdg1s=gs8%1mze082fd(J=9|+E%d{T-pGb5$_Pa zwEf$UIv^ipSGo4z6yqJASNl8<4^XknjpzlD0mCFoe1|L^r z&+aGjxJ26ZeR%zB-tAoDE7IgprNul@?;_3m8tFH#$GvQ9rV|5MQY3iz!XzU2NboefiSOZDU8yOUZecL@j`w1`t`40zYNQD zTb&Jg;-1aSA+lGq zfs)~yz~!(9WoUTe3+9C?rVQuBrVMkT7OcUCO1T1$7t&bn4?TDhT$J3fTfjoQb|NLJ z5zo_M-0|mKQlI9z`&>B<%hw6cSg8E_xlCdj!bgSgrZ(H+NS4b8eSbSFvw7T3r2ttSE;AYy zC2<=uWS(=?K7BgZrYb+PNZ<(wijmON2c?$0zWW*}+ z=R_)KiiD4v4vm@rQ~pcBz}~1iZY=v-ujXwg1v~da-}@*&di%Qb+h8-?_t^5tHU49N zJ=Ff`{o254;lo171N+xw6owt4^tr;%gIcA=@L@xLj;B3&mzh=PgDfaxCGU*4byx_B zdhx>ferkT8*nDNJ;jT?P;lH`Udbiii;#F?4qH?6w8ljXI%gJ>1o;>^G>t_Y36DjE3G-i&2?>N5i3gF)6#k z=9HYeW53!J<+5MT#?G#M7A8~)yhdlq#A#kp1EwTokf!2c^#?#OO1K_d^!Ew7pPP?5-o$2&<&l`Mt(;k9|s_TJ=v*+?_4WD%o;NeS| zk6)k!xvQIsJ{0D7tD6dNC2`vZz^O@t!g=q{r(k|- zdRe}b>dUW`)r?LBgXjgX=Gjv(7;<>u*fx1sSGa$hIE+#^VF0MXooGM&dxWnDuJG?j zhQZ2B0>}BU({JA8KK;QCkA4%py4XmBvwByosMy^Zx2Hh4IF?N#WAGfo0`V* zSuS)4`GEL`nU=Bl)|mS@o54xk-ly%YQ*p?YW3k0WXHeJ*v1U;|c+~+l_tqmxs{VBI z+-Jmg$c+k)bB`9jfMfT)GXgYG^a`2>V5Vrc!PO9Xe8fx0Pi42_*#JAUFqQDuLuKvx z@*E4-SQIRXU@Cy0oP6##_ou>$9Mc#_MWO1*QrI(cr|;A}uK zx0Ot6Cf~8~0U3#b+g)Hn(Y&p;_*fTSL&5b!zLTV(qM>c;c~hGtksOHU@dYfwGf-Il zA)%=H3BG=*%1SdpJe|r_z~T~E)XAtQ{nPRDEsbKnH1*!1ch#>%#qG^~rnS&0E1C47 z_XNU|lz%FQ%B{B@_piUnXn7MVJw1JsF4#B>ve2v0I7iR}f#0 z`h~U;POdehwI}tIZbec+h{LeXr}yXi(&ZuSo}#mkcoCiN7*qX2fXhwH241lmbV8%X zHXtY{l8Bn_KgJ(``%AZ}ny~-&0s~{vp^Gb;TS!W79#Jt{wtdY-&Irg8<@-Z5Kiy-l zzlO}bM;&Cqkv0G$s#yeE4&jO%jrz4Zg>AEhCv#vntTbL#PyU`hWj>-NyU*GTy@1|w z06KS+KmehlBnjroAGDVN8D$mL2M}ovOx*e({!qEhub937-t5oD-`#zI=2In`l^XOD zhY??&@9&S%$aJ?E1LDZ_>lG#0afz&3ye+De>9=0Bq=Abo-=NFG!l ziM@0H7vbX$uxC@C$Cl7x76Wep6Z zwLr)qDU1k_2XX_D?>CSS5v%u<%wgHE1P~n{M?6-Mo8ac=i>CP9p^_?@-Hga;u3{yKZm;EYKsLzNv9vg`R2oiKj4&DM9jbs77#iX0aXV4z|$pd zg`eOjEDZ1gc)>+l2iX{(MEaNaguvOxTF}%Vl3CjWsh<=T$L@g+wtr|SFx;-j#>RPy z1rLys?~NyfiexEm{DqZOSLFM&4FJ4vOYgt&nwXdv4-dd{<3V1-fs`)Z>wETU zUqJnY0!VQZyH%R(W!}VuG9CpYAa5iG-O?Y+R{=P#Jo)pdqVZ5XCVgODp3xyF1k1Ne zY3Fz#ZUV42UvYc2JEpaNW@hH{cs&9eaeEbx)nNRu-nC&AT^3SOK^$-h(zuEksee8Q ziHcSl4yJ)|s&&{`RK0&250K9G@o^BLXWkcvL1w;JZLs{uU7-Rg15WQ}Fybi-6fMFQ zM`LOhKvX_s5j(AfF*IB(Wx-To?)5v55Lc`sEZ5PTFb5fI(2$TxzlX?!#dc%h_wRR; zc!={|0U*B&exy_Hi;tq_7+^QyQBfK4fdiYix7X)N)O#O6jGR-*Id|=@EUyHz%zePkfId~u~1B!qWghAULeNR=$Q^ciLm);V7_~=m*mjgX5?bn=IBr6$UAE_vpz_tER z&(f;bh-yGYDck)};4*05=wMne!QX!Rp{uW7V`qq{bmK1<1T)AsYdgFvoUdAL3}R#& zr9$-*-GCts#NA!NRm#|PhiwEPAla`wmbAC8_@5G)j|$Y3k&#g{fnBD_M(%zl-Xn&$ z8w`njOP$m4UhRIv$cTIfsMP|K`rwt3W3Vjc&}&BBjzDah$UqnQ1tEM~cn92QoJ6$W z;8%}R7X2DV8tZe6CxtmCa~}>uh)>fA9(I`%Hwxi{fe&Ez3~Um@_bh#_kxvSM$N6Sf zGIbR7yN@gmdx535lx8H?XYll7Erq=2d7pX^-uvHLJ}h1l2Yb#Dj)- zc7LeZkO0v{rN^Cn#!*&VFaZJqdfdUqW)r!&YYdrJd9I4;1B^se>UpWzbKQIWYv8@4 zN9`}~KC>h=MA~g>XVURsZM)&XM#z~vloV$XjOc$)lFuvZck~sZg)I3}&EX7e^#`J7 z?R*JbuJnmjPg0J^=pSJFRy|>dLS^cIOqwQf+HEoQbIaLQ1YKNQbVg7zXx1s`C}BHm zB^POJUd?=>6R&6acvKU|)LyQ^Q<_yxI_`Zf810#@C-r_%mX(DvWm!YN5 zvEF4~^3uU*Zt%9oDS|Tu|HXSQ#LMmt-#8c^k zS)c%(Srwo**&O%h$r2u}V{noRa>C$lc6;7t-HJW-@S~e7QJjL>loP}q3!E*&Mc-KC zfvUepEl{g;KN%DSrn=xiJmPX&=Mm6~yn{L@4%BWl6AJp?khsqw(vT z-M;KNcQN&Rf;5AFp91K8O%b6M1P2twXSX>-&d6wedv&S+zdsEjM)*F?#KKR0&(Bu# z^YhUOSpAI!1qDS!eyA%b7`ZX`;y=U>a)Kp(j-t~SpYu-cqs+vM%|wi*<>Llj7j#Ta zwKBuEWNEa}w#Ou{f0T+e6S*8DB_zy1G=!b>S1WJy+5YrrtT`=v5h0;40+NK{^=rBq zVYJ~B1e?9b4+a;!8=G(5X+3dg$5aK!9z=U3X;cUn&A5MoWSDlPx4p2EXI%t;fG)x4 z3Lzd2Zve`et9S9ZIcx_S38Q6DEz7pwow6PUJX7Y^Ski6<h;A0A-TnUiJD4)lx?onVhVwWFlu&R1}K`{3e&D%&^GZzroG6 z;K%8M$a^_p124_<@p-6XI9KOn_asn6Zq9$)o#3RY$S9T~<4b~%Zu3Bl1N^prKGp$Y z{4F__(qaNaL-(gEi$v0m22up1rKN$q{ZE?|k(gLuo6Gv^->U&MVB>JuQ}=sL#A-2@ zKZ3SlThr6i1L$MmImC=|Rf96?@U)0M^!EfV`9}gsi1F|K3RKTr-V~+DY(7TxDFgUv zUBK=gz0C$w5KR~EDz-J0A+A@x#bNb!d8CzAB}A%+16Xg|No%d-AE^QjCC?D|Df}Nn zlCTKe;>@33qsD*h>U29IJUlX!T4;a!7??|gAfs1UDWgU#MM1`=ZS#edbjdu^zmp4#?QqN-1Y3C>V~gxNKDuDhKRF)eqFaC*vJ&rbDO zB7t*RK|1rT(TZ8{c>fm5b&kClktf?iC~!+ZfQNkd?wzhKm6E=qf@;CqsHiIJR_JT= zU+YcdkXvi=))dMJ9yJw&3iWxR2VZUod4C@pM#;189fd~r4nkdkvmhG!$Cn1X)( z8z8);q1kT6;FFI-ZD|MWJs&R%OF8iRz|e`AD7?$+b^PbbBHduGREyZf0Cu~B?!O@J*qh8S-S>15FIC2@6 zaeMsG!__kKV%8-^i)`?sxx%N1uyh9?*aJ~?NFOP^bF50Ket|~qm^&$Yy(XW2&o5xT z6n9gFzuauo!()}GS&V015>TWqH$)f#DCO`$E&mP<|62G_>L!!NPO5Y)Ou(-{!=rMw zqy*oyauLiQ0#orKKYBJ%Y7jbVT<>~WauFLJ&jnO$>nAojNise#FM%FMMZaH%4+l?H zQ^#aIC?q8bO&!0SnM~)!XoloMz|=LqZ)xr7Gi5aZ`IsnJpufMg7$}a>zSSMACMRIP z55l=@iGxQ}z)pL=$S62RitKOO==eE5(wR#w-9&GGAf)c<-1k?;Vz0NS*m9qA#&~i~ zyt<8B23YU!e%~Mp5RVba0(*n5lJTSOZ#g+Rxw&WD;ckPDAT&MQpPz?PtlpVQ%s8mg z6G%Am;`gqEKJTO)yQyO{S{tX-dn|eqlH=kgZ|SPt_iH-7EcAI;FlT}O%^$-(4WD>_ ztv?=f1iMNHccNI%DWnjK77`W+yKmJ-l>PqPE%3eXSwQx^ z@b_zdIv%>*ipSIs4rDXnGO!EFJYe&3BsVZ1qc5y5lXNOp=w@wYu=e(+|JZa7#JNvF zzi|BUrt$7d0ge)6n;X$@PN$xp3#%RFlN55FAI}L*BV$L6>ZJ`E^S++{b|$wAEFlK< zX=n}5+&wd6Sjoafqk)5EA~obmi*QxqZbkEdgMn;Uv0$=FqQbebKe{^Z9o)dF?&;(R z$JA2}=}I-0?`;pKDE=&}tL=n)1umA~>iXf5md+u=! z$;AMS`{JG^rU24qCGJ*PT);US2KrHy&EC1GTGq<+^We-nasqnydS2d-EawFlk_{)9 zgK6DmzU{w0Bh!zk$qXDc1P85gPZL#u@oD&wmSOt-yLT2d(XS8ZKV=^;ynhat$@)!D ziq36A0KvM?P|IAte)dDLKvkp4G8M2ac3V_zGAnCqAjVe0+9B}W*w}D7UaPhf2?hnZ zfyeY3y*UJFF_#BTB$E_SmKuQ+bp=9bEA`OAs7af{9VU~>=H`lRdP!4Aa#24Nqw*!D zd`_)OyURQVbSqb0T6nW8Zc?h;f(Wn7{+#6K5#^VgGxt;3^^~}pahycQg=mqq@1}gN z^tZlJq4chA?OVru!fpw+ZR3p4Liah6sM$z4{VlRX;U-$z#)VW+t?@t7^`C>R8EQi^ zo^4{w&~%K4Q@T%w+T5zyy%^qRQDLvU#a8jGjk*%0gI}3%VvSYG@l4Xgvy*Qx{y26< z&qu7Z1l4PAHL#bgRynwPxC@D2!`w#Am-cyhx_1^lUM+n2VgeH7f{}nF@#4a!WWg36hR(Ce|mOPZ)8D&Y^?oGpigiG0J6jt;6JlD@y9Ys~Npt^#? zY@{&jQNI1)=)Y2B-3*CHaMep}X?M1Ax9?@bLf0ib!)Y=pUjx$WB_dMNT!cmeV&Q&# zm(3+K?!2@|HJLS_RZRCdkk9fOwy{(bP^EkVd#W#<2l5uvir33BEHjj z5>6IwHb>>c?cF!>1YhQAH!dRNy%?w6*{knmEZw!?R*GFLwI5qo@oU;r z!+22-113+6Hx-g|uP!~7R{WQu=ZnUk4vwD~D86ny?Ya@wkgT|)U=Dr~^bA9}W;5JW ztKzbx2;ihx#`jnddDWIyvEKQ4kLe0CK2Cj*E;T1orDZAz%~bi$ABKINOnuE{q#nhA z6lyd`wZX^aQkXm{PMiKnh`IohOLJJXXreRKaYvD#G?@+ni42d29uY)d5!k~*kSnU> z$^?N`mkBjqf#PxK+uo*6rxqSA#+_|YJ(uhpaYo98kEAqx?j^HL4*o zq?Dnh&TLD;*yJEpcT#%zc6`Lw-$*H_3!T~g!i$~Ro~5AdTo9$LwF^|RrzH4;d?i?# z)wiTA@Az5Ucy9*VpxwjLva7CH<g}@%F-1$Gn7=;$l#m|`y-S)QT z9!$Bzo%Ko)s7xJ%!xp4V+P9o=u?&%5HSK00+ZhMOU_^C21QG+XUb_G|>7zeDZ>yV^ z{N069N^bF2Y8}LrdQ~AK?O8u2;wH80!?iz4vgxmLI@2LYeMlClz_3>>(07~{Njrd- zFp-i5&Ri@6@yuIP)2cdG6f7^;{RS~M0oTZ@A|6}T`ftjf{!poY4)(3y`dAK}J+S5q z0-5n*m^|?LrT^A~*S>z2gKfyud=Vk}*pL6ueJbKU^$?@0MV}+7;bcA&ZCS}#)hh%_ z&n&62Wf8rhy|Yhf1!Z7e0Y+C|flbV}2u9;xxS1Nb;bUx+%Z0k*=!2D^g$h4{3}Bd( zq^wJq|9izwQvezsh`yb75^pP|CJ#@q9Mx8A0>uFp44|?AVixDd4Ni6?RGCq+^h#T6 zEO&hwg?M935$*C^Mo0);qlqtWz-o7z?Q0c-DsP5}NJG{-p`~k=(0{J_4z40BP_z*Z zy()zd?rq1^P7s@I(f)guKL?ymK=aV;*RZnR=;jiwYgk{1wapm)LO+S1#yn*)!N}u4 z6&}_B$P?=3a)&YflF^1Xt=P)3ae^zJHhzu{&?fdYvHxka6{5}L{a4$Ix+>8~7NQ_wKCYzOEubbihx%^*!`=XR1RNX)m?oy!93U+ zKU9iKFqrlSfYVvDX-6;1VBaF;h))ppW_RN~Cm zr&Zfc4?b9Iv43}dZs0L<*$&iAw4wNXHX|!?6!9YUz^W$>ALEXqO{*?WuP$0fW-1#v zE{E1dq4dKz#5S&7EvGIo9f&2OR=3l=^}fySwFqz1cQ7Ju#U-V0()d??34Bo=J~C7j zegySYvNakO`r?*RqMSdCMr_A)RMEC=a2j`P>vCuV^h3D2NL2P&+s$LZA0l{z;8A0R zKz^f4Y-#Ewy?+U+&B-+|7R3_rsZ$%~pi+#FHa8Dsrq)|eoF#CcTX+cLfCVSAGZui3 z36e!xfPnPbQ$TFCK@dvC0Mcat2`4uT*b%o7s0FI^7XnUJL(q>XF6c z%0rzF1$sFsoRS7A_t~MK8t5~yvfz0J^adJVK2k~aeTX}%AglqJy6zS(_``s(Q1vaUGMv`h!CBX z2fCt}To7;#C4;FmXX#r$lYP^oso)K=z@jiX*mWwdH^&e@PkD2mU;<$aLg^R(8Ea|bgm z!M8vQw8Q=|fCIQpR60@`>O5%H`DyG8KNNt&G}8)Yj3f!I^4l))WRh~ad9!NM?m&s{ zao4-ev%S7R(75}0-h_pbjWiJS8N0cPiNGiRvAS_}YnvJTJ!15sCG&YweZ@BmSyx&2 zBY9Pmkt$y5i<&8SHizu-YK83fwV{7(G5I+@);y8N62V0>08;TV3*>Y_Dpgvt1xO=E z5*YmdW0AUVn{hd{a0>Gv8R@ZbsAui{+0XP?KxoEv~J|L z5TGA1FXca@qbsae#RSPfN*dt-b1}i6S^q8^558bPyHi2(w5}O*&!QdSocM$TNlEkh z@gszoM-@DzQ415XZaw>H_v5an9hb4L2N1``wFhDu{Gj0J`egHyqR`WS(=6GVLej1v z-T31fo}i*o=fCf^6Oi_i`}faLTx@J%X&(Ub5RDoINg4r6KxZIy^Z(OL1#;QNlk`^q zN>S31u$wH)iIfT$(fyE1B%)-!z6{CHE=BAPz|6s4XME`u+LEVk&0V2mxvOTA+U4Q% z@2bSc?L@GUl>+s8wVGtFuSY`8S>D1)vD-^rP5A9cvN^-|NwCu%YL&ug z?Q)V7P^&8`GPK*YDI9klFEQYtdhPM<~tM@viy|t&sBKoB91<3lwy; z)TtlSV#R6C=`$y9ih5QE76%*5m!1{WdOSHoi^@}Pbz3LoALU0Bwg(Ov+xDNtl3Dwz z&xFy}MUIJb{k~+g8W>g*#msqwr!Z@uLJa{@a zJL76t`-siIguMT#wGU6UaK|~jdxdXknJqy*cl@fJ@j(2rRU`jub0h8FjRjT_I|1kl zeU|^rdNgUFxO3b0OZ2*;3dAR94sEw(Oy;|-Wy5Ls#3R|F!_%wO)n{Nuc!yGefdv&&VFgO}*_|3QE2vM%B zyy#>j0R~@oPp|P%+Qpfvx!PSb9OL48T$S6y4bt}h>TWRg7vHrgpx*va#5#A8Yh>bq zwZ-17=wz^^!Nw&0VjYqt9WY;z?4|J1!n)GM{R&I+glcEQzkPVuOKdc!59W8AlzeD1 zWG%^8wzox~IBF>`i{i=`-%l$*eOXP4vYtCT=T4I-33jQvzn3t{S1K}*XoL5J+PjglndHO!N!%@o>%he_|J3er~$C)LeVp2Ll_rkbddh{iM7cUvNX=BS)H>Y z{2}+5VH73Gsk3a(9)-2abi}3(I$%==p>O=#nA&Jy_514uf_J$U70IPGd#wP(+)-9781l+yAy$2mDUBz zgxu@c8C9hkTNV@uI0}kJg#9gg_AQ9D3{Bf{`pwN))HSnIYxO6<3h4Fo{?c~_w{KCy zpRhLTw0EPim|>_CdqmYu9qTUfhIN?|O^xhRPYe4|I>X*_nz)sD)PB4Ao=7?NG@oI5 zH9`lKBhmH^`F5c4^#nQ-Axx*h)FSJr?I*E94$+lhu#ZAO?q~JhtqN@4mY@so(=D)7 z0?FhlNAcepuT$ZFUePW66NqM1vAi>_tt|98lARK)>I#$x-a8{{@kUaZ-O*e_yl$TD zu^Xkg%AjydclbDUyrrg4D)mi}}o*V4U`|8SDD z>{)1eD=S{M&^I74W|wkvVw7^R1>d*il!cP-2;tgeZCNX5yB0r7-LCnZ_0CPUMfz`7 zCw)%XtGh7uT)2FFF2)asQT*ZUg*!M3YGtMK`W!jLS@p90mI4geY_@SbG7$f;;1-(5#G0UPTnj4%%W-r__D^f-nhttKKkZxK?=K3x z)hf_@{<*8;@TvxEWCL~?pljypdXAmv!=%1dZRdwt?NIzo?6&uz^dnHcbl}qp#Wg|w z8qX?W^ng1}k)H>zXQ!wv@|Zc$Q3dSwP=Q6-J})FN$6^4(s=+=ODlZk=lmdH%bQ;u5 zXsT^g@En#moZ4xC4Kxy;1g9C7t(bF_suof!OwLKZt#H95@MVpm)4)G>$=oMwFAAWG z+OnFmxp_8Cb2_8wq{n3e=|XI_pl84u{}_Xq*5)6pAJ2)Xo#F87{Uh?n&k28`G%}g8 zt(kG2Cc4_{f{j~9p1VCrdBz@I*ARDVR+z4X}goHzA~2Dd`C?5)X~%bMP*r?6jo5Q`sEu! zr`-(2bb_%%Be##k15hRO-2;cPRiU5Ce{6o#Ramx7g;i|Qn8srht?0K-A;(k%M7Ncz zb+y~Mm#%9}?;U9!XLXPF)!!b!s!*uN<2Peax7%PURy>{dKn%O}hLq`I6bptwoxdK~ zgeRdMTB2LAt7w!B*Ao*xkI|;h04W(WuOX6R?!Cdo$gojl^Cd|3U<7kQ)3g@ZbeTapJ-;Uc%scsNcL*I zcYktwmF!C*9mX<+Xoi$%EHjeG zQ4u1^&KSGyk%SDg&RFJzA&i|)j5wdKI^A>ce{kpLdCdC#zCW+``~7-7(j`C(Mi?uOq0k3ktAgYuML)dw`>*%kYgIZhqApHt1Z{8^T$2Mz>u40pBqU)9@d zAPCN;;owBcQ1g}OIFwzK$JbS-b${k*ab_AAQR1kh`km}r0*Lr9nM{yg{3N}$wuZH! zL|p*HZi5QpSeeRP;C2 zu7Ru#E=J-ayF74y7&sq05+zzMFFrGl=W0t(PR8j26czUK0~IJrE|naAPv($M8Y22} zc>%xzC6Q&eJ3!D1P>N=8*mpn1`MgU~2Not^%OJ z6%r53n7}(<0Cs94Zz#`AEK{@#Sb-qvG2e;TEU>SzKmcPHSU;+*lCRcW>zw{8a{O?RYzt&i3$wSr!nqjFADbn2A*RPNIs&c)W}xrzAM5edRkt6zuY3G+ z$mua|T;RHW!mZPcU77TC;_KgrE!?{HiH)>&& zrv|*qt4Rv5_1huh(UmbV9c(w?Iki!rW?GhFbI@uKsgdOnw)cX}njb)X6^A{$k^0Dk z-ZAK;>IW6~hxW)NcYD%T=f;N*mr4#kQICzfgjsJ)If~oEqNDdJVhU5Ksw{%{S*pUh z^BGgLD*8w~;AUOTt17Zb*Go=8ZfyK&$MU6H=Wa z9VZ@e(_Ub!t^sn9PLM7B^=UpweTx@}{fx!*!3{ArQM;;wPJI!FA=OVVyfY0K5r0yn zcxTZc4njGHH-)K%>%NtlKPwhIYwjfl5xUdb0NV_2r&N+|zufKP<2IJnl;fGFy}sPo zpYrhT#iHZ}^><925AoA#{d8_L+rFJ1(d0r39=sCQj!*jtj_wjo9ZuKsJ(t)9FSY`# zZlxlXACmOozwbf&O{;<91fB3=mnMdzu5y{Y6RNDtl}$t`HCr0~G-S>+`5{yhWSJlX zWY<@fh~cHs3WV*YBzMc(Vx12s3;n8P>>eLWp8O*qwu#nSf1C_Mc~WX89L&G){2F1$-|Lr`g^F{RTANsXcWs! zr{W9(5XThg4C3{-$&Vhf<*b-WGtvpZmz#aXpg?VyIb<2tgX7WnX6g3@xjcEbnhs=T z5~=Le{^#++&s-fuJ!PvdMx4Q__M%Hy-sZPW_^+dEkG$-^Kr;g7n`2n?{WifGTKAm1 zzis4gOkseK_wMlcNS>7JTB%o^w{viu7dCpJ)_w{jS{l(v74H{y<8O1{&yX=DeCZTY zs2G!q8vycDUeaq^MO{vB!_DKhAZJ6ao!(GLzK15P!D^DL(ktK=h;!5433m6JpO^yL zxYT;5cdXA{cQ{)cTmTF}g(6i`%%k~djIS@?0m{jqqldzG33_Co?|N$nsvz~IO011C z+#{+C*8sU}-rjP}HlmS*`zXeEUKPHn6s>c|E?EUc=couo@E=pPE*Q3DSO)#%u@ek4 zsfu$4plky;=A46=r>+033|n)*E5oe$Z^|%>DDGg0ZLPf3tB??7Ho4-b6spJ}HLI`K{}@DnWGygQDgT2tsM*z8 zAGJj#jk>Qi4F+7M*;|^AN+hHEsLZmy8JS#kW{N4jc(AGuU0Q*kcwf%e2p)gnYvQrJ zD}KYtfFPa=P;-`CW610lJjXbH);|pOx@|O9>3;+o@{^Uy-vVu3iaWMXmQr>x?{fOw zVr1rB)!BQL7fj<85z>*3-ihK#d1{?#_u_>2{EI`KSMM*`>c>sHnd}9WweBV(Jf2DH zd@=i)->azvMuSDv7@i8j<+x`Vf;&e53-@WKezmX4BzbU-5r=3FwP-8jhXqfFyzxU( zj?62>4>`Z2M$#HMNBFC>;3CNSFpH{+A{RT4?<%q>dS|WuUh~6UZ456YT07PI;9UVq z;(%3;&OXzYQRMXmQsay1Rm*wB`4gs&LGEe1U8jR_Mr~&rQno=zc!Xh)fO`MCzE6l+ zr6bXXvLjNmXI{Z(4-iqZtPeZ2veDRHPxq=~^J-r}9K^pI^>s2A4!D+xDRk!rlP)be z_b1keN$PgF756>Nt$aK(OTPnt1k|%8TpEjvb(oBK0BfalhuFy&%6F}VUY17cY9NVX z(D{9v3xV%bi;`IzB|!tie{_&V&42445`IJv2%%RFFiQ~1ekS(5EjrbI>*B1$gM7{b zrGt>KK3pH?&|nUV(W=Zo&pjgGwZ3$AcG7?Slr(Ip216TMQ;_Jk6&" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Create a Pipeline with a Dataset PipelineParameter\n", + "\n", + "Note that the ```file_ds_consumption``` and ```tabular_ds_consumption``` are specified as both arguments and inputs to create a step." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "train_step = PythonScriptStep(\n", + " name=\"train_step\",\n", + " script_name=\"train_with_dataset.py\",\n", + " arguments=[\"--param1\", file_ds_consumption, \"--param2\", tabular_ds_consumption],\n", + " inputs=[file_ds_consumption, tabular_ds_consumption],\n", + " compute_target=compute_target,\n", + " source_directory=source_directory)\n", + "\n", + "print(\"train_step created\")\n", + "\n", + "pipeline = Pipeline(workspace=ws, steps=[train_step])\n", + "print(\"pipeline with the train_step created\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Submit a Pipeline with a Dataset PipelineParameter\n", + "\n", + "Pipelines can be submitted with default values of PipelineParameters by not specifying any parameters." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Pipeline will run with default file_ds and tabular_ds\n", + "pipeline_run = experiment.submit(pipeline)\n", + "print(\"Pipeline is submitted for execution\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "RunDetails(pipeline_run).show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pipeline_run.wait_for_completion()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Submit a Pipeline with a different Dataset PipelineParameter value from the SDK\n", + "\n", + "The training pipeline can be reused with different input datasets by passing them in as PipelineParameters" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "iris_file_ds = Dataset.File.from_files('https://raw.githubusercontent.com/Azure/MachineLearningNotebooks/'\n", + " '4e7b3784d50e81c313c62bcdf9a330194153d9cd/how-to-use-azureml/work-with-data/'\n", + " 'datasets-tutorial/train-with-datasets/train-dataset/iris.csv')\n", + "\n", + "iris_tabular_ds = Dataset.Tabular.from_delimited_files('https://raw.githubusercontent.com/Azure/MachineLearningNotebooks/'\n", + " '4e7b3784d50e81c313c62bcdf9a330194153d9cd/how-to-use-azureml/work-with-data/'\n", + " 'datasets-tutorial/train-with-datasets/train-dataset/iris.csv')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pipeline_run_with_params = experiment.submit(pipeline, pipeline_parameters={'file_ds_param': iris_file_ds, 'tabular_ds_param': iris_tabular_ds}) " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "RunDetails(pipeline_run_with_params).show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pipeline_run_with_params.wait_for_completion()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Dynamically Set the Dataset PipelineParameter Values using a REST Call\n", + "\n", + "Let's publish the pipeline we created previously, so we can generate a pipeline endpoint. We can then submit the iris datasets to the pipeline REST endpoint by passing in their IDs. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "published_pipeline = pipeline.publish(name=\"Dataset_Pipeline\", description=\"Pipeline to test Dataset PipelineParameter\", continue_on_step_failure=True)\n", + "published_pipeline" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "published_pipeline.submit(ws, experiment_name=\"publishedexperiment\", pipeline_parameters={'file_ds_param': iris_file_ds, 'tabular_ds_param': iris_tabular_ds})" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from azureml.core.authentication import InteractiveLoginAuthentication\n", + "import requests\n", + "\n", + "auth = InteractiveLoginAuthentication()\n", + "aad_token = auth.get_authentication_header()\n", + "\n", + "rest_endpoint = published_pipeline.endpoint\n", + "\n", + "print(\"You can perform HTTP POST on URL {} to trigger this pipeline\".format(rest_endpoint))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# specify the param when running the pipeline\n", + "response = requests.post(rest_endpoint, \n", + " headers=aad_token, \n", + " json={\"ExperimentName\": \"MyRestPipeline\",\n", + " \"RunSource\": \"SDK\",\n", + " \"DataSetDefinitionValueAssignments\": {\"file_ds_param\": {\"SavedDataSetReference\": {\"Id\": iris_file_ds.id}},\n", + " \"tabular_ds_param\": {\"SavedDataSetReference\": {\"Id\": iris_tabular_ds.id}}}\n", + " }\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "try:\n", + " response.raise_for_status()\n", + "except Exception: \n", + " raise Exception('Received bad response from the endpoint: {}\\n'\n", + " 'Response Code: {}\\n'\n", + " 'Headers: {}\\n'\n", + " 'Content: {}'.format(rest_endpoint, response.status_code, response.headers, response.content))\n", + "\n", + "run_id = response.json().get('Id')\n", + "print('Submitted pipeline run: ', run_id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "published_pipeline_run_via_rest = PipelineRun(ws.experiments[\"MyRestPipeline\"], run_id)\n", + "RunDetails(published_pipeline_run_via_rest).show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "published_pipeline_run_via_rest.wait_for_completion()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "" + ] + } + ], + "metadata": { + "authors": [ + { + "name": "rafarmah" + } + ], + "category": "tutorial", + "compute": [ + "AML Compute" + ], + "datasets": [ + "Custom" + ], + "deployment": [ + "None" + ], + "exclude_from_index": false, + "framework": [ + "Azure ML" + ], + "friendly_name": "How to use Dataset as a PipelineParameter", + "kernelspec": { + "display_name": "Python 3.6", + "language": "python", + "name": "python36" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.7" + }, + "order_index": 13, + "star_tag": [ + "featured" + ], + "tags": [ + "None" + ], + "task": "Demonstrates the use of Dataset as a PipelineParameter" + }, + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-dataset-and-pipelineparameter.yml b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-dataset-and-pipelineparameter.yml new file mode 100644 index 00000000..0c5c948c --- /dev/null +++ b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-dataset-and-pipelineparameter.yml @@ -0,0 +1,5 @@ +name: aml-pipelines-showcasing-dataset-and-pipelineparameter +dependencies: +- pip: + - azureml-sdk + - azureml-widgets diff --git a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-use-databricks-as-compute-target.ipynb b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-use-databricks-as-compute-target.ipynb index 684ab67d..affaa213 100644 --- a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-use-databricks-as-compute-target.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-use-databricks-as-compute-target.ipynb @@ -510,7 +510,7 @@ " inputs=[step_1_input],\n", " num_workers=1,\n", " python_script_path=python_script_path,\n", - " python_script_params={'arg1', pipeline_param, 'arg2},\n", + " python_script_params={'arg1', pipeline_param, 'arg2'},\n", " run_name='DB_Python_demo',\n", " compute_target=databricks_compute,\n", " allow_reuse=True\n", diff --git a/how-to-use-azureml/machine-learning-pipelines/nyc-taxi-data-regression-model-building/nyc-taxi-data-regression-model-building.ipynb b/how-to-use-azureml/machine-learning-pipelines/nyc-taxi-data-regression-model-building/nyc-taxi-data-regression-model-building.ipynb index 89462a87..c9362a7c 100644 --- a/how-to-use-azureml/machine-learning-pipelines/nyc-taxi-data-regression-model-building/nyc-taxi-data-regression-model-building.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/nyc-taxi-data-regression-model-building/nyc-taxi-data-regression-model-building.ipynb @@ -279,8 +279,7 @@ "# Specify CondaDependencies obj, add necessary packages\n", "aml_run_config.environment.python.conda_dependencies = CondaDependencies.create(\n", " conda_packages=['pandas','scikit-learn'], \n", - " pip_packages=['azureml-sdk[automl,explain]', 'pyarrow'], \n", - " pin_sdk_version=False)\n", + " pip_packages=['azureml-sdk[automl,explain]', 'pyarrow'])\n", "\n", "print (\"Run configuration created.\")" ] @@ -692,7 +691,6 @@ " debug_log = 'automated_ml_errors.log',\n", " path = train_model_folder,\n", " compute_target = aml_compute,\n", - " run_configuration = aml_run_config,\n", " featurization = 'auto',\n", " training_data = training_dataset,\n", " label_column_name = 'cost',\n", diff --git a/how-to-use-azureml/machine-learning-pipelines/parallel-run/README.md b/how-to-use-azureml/machine-learning-pipelines/parallel-run/README.md index b7274ae9..d795a262 100644 --- a/how-to-use-azureml/machine-learning-pipelines/parallel-run/README.md +++ b/how-to-use-azureml/machine-learning-pipelines/parallel-run/README.md @@ -2,18 +2,16 @@ Azure Machine Learning Batch Inference targets large inference jobs that are not time-sensitive. Batch Inference provides cost-effective inference compute scaling, with unparalleled throughput for asynchronous applications. It is optimized for high-throughput, fire-and-forget inference over large collections of data. -# Getting Started with Batch Inference Public Preview +# Getting Started with Batch Inference -Batch inference public preview offers a platform in which to do large inference or generic parallel map-style operations. Below introduces the major steps to use this new functionality. For a quick try, please follow the prerequisites and simply run the sample notebooks provided in this directory. +Batch inference offers a platform in which to do large inference or generic parallel map-style operations. Below introduces the major steps to use this new functionality. For a quick try, please follow the prerequisites and simply run the sample notebooks provided in this directory. ## Prerequisites ### Python package installation -Following the convention of most AzureML Public Preview features, Batch Inference SDK is currently available as a contrib package. - If you're unfamiliar with creating a new Python environment, you may follow this example for [creating a conda environment](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-environment#local). Batch Inference package can be installed through the following pip command. ``` -pip install azureml-contrib-pipeline-steps +pip install azureml-pipeline-steps ``` ### Creation of Azure Machine Learning Workspace @@ -66,9 +64,8 @@ base_image_registry.password = "password" ## Create a batch inference job -**ParallelRunStep** is a newly added step in the azureml.contrib.pipeline.steps package. You will use it to add a step to create a batch inference job with your Azure machine learning pipeline. (Use batch inference without an Azure machine learning pipeline is not supported yet). ParallelRunStep has all the following parameters: +**ParallelRunStep** is a newly added step in the azureml.pipeline.steps package. You will use it to add a step to create a batch inference job with your Azure machine learning pipeline. (Use batch inference without an Azure machine learning pipeline is not supported yet). ParallelRunStep has all the following parameters: - **name**: this name will be used to register batch inference service, has the following naming restrictions: (unique, 3-32 chars and regex ^\[a-z\]([-a-z0-9]*[a-z0-9])?$) - - **models**: zero or more model names already registered in Azure Machine Learning model registry. - **parallel_run_config**: ParallelRunConfig as defined above. - **inputs**: one or more Dataset objects. - **output**: this should be a PipelineData object encapsulating an Azure BLOB container path. diff --git a/how-to-use-azureml/machine-learning-pipelines/parallel-run/file-dataset-image-inference-mnist.ipynb b/how-to-use-azureml/machine-learning-pipelines/parallel-run/file-dataset-image-inference-mnist.ipynb index ef0008d1..518fa7e0 100644 --- a/how-to-use-azureml/machine-learning-pipelines/parallel-run/file-dataset-image-inference-mnist.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/parallel-run/file-dataset-image-inference-mnist.ipynb @@ -23,11 +23,6 @@ "\n", "In this notebook, we will demonstrate how to make predictions on large quantities of data asynchronously using the ML pipelines with Azure Machine Learning. Batch inference (or batch scoring) provides cost-effective inference, with unparalleled throughput for asynchronous applications. Batch prediction pipelines can scale to perform inference on terabytes of production data. Batch prediction is optimized for high throughput, fire-and-forget predictions for a large collection of data.\n", "\n", - "> **Note**\n", - "This notebook uses public preview functionality (ParallelRunStep). Please install azureml-contrib-pipeline-steps package before running this notebook. Pandas is used to display job results.\n", - "```\n", - "pip install azureml-contrib-pipeline-steps pandas\n", - "```\n", "> **Tip**\n", "If your system requires low-latency processing (to process a single document or small set of documents quickly), use [real-time scoring](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-consume-web-service) instead of batch prediction.\n", "\n", @@ -86,7 +81,6 @@ "source": [ "import os\n", "from azureml.core.compute import AmlCompute, ComputeTarget\n", - "from azureml.core.compute_target import ComputeTargetException\n", "\n", "# choose a name for your cluster\n", "compute_name = os.environ.get(\"AML_COMPUTE_CLUSTER_NAME\", \"cpu-cluster\")\n", @@ -184,9 +178,20 @@ "mnist_ds_name = 'mnist_sample_data'\n", "\n", "path_on_datastore = mnist_data.path('mnist')\n", - "input_mnist_ds = Dataset.File.from_files(path=path_on_datastore, validate=False)\n", - "registered_mnist_ds = input_mnist_ds.register(ws, mnist_ds_name, create_new_version=True)\n", - "named_mnist_ds = registered_mnist_ds.as_named_input(mnist_ds_name)" + "input_mnist_ds = Dataset.File.from_files(path=path_on_datastore, validate=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from azureml.data.dataset_consumption_config import DatasetConsumptionConfig\n", + "from azureml.pipeline.core import PipelineParameter\n", + "\n", + "pipeline_param = PipelineParameter(name=\"mnist_param\", default_value=input_mnist_ds)\n", + "input_mnist_ds_consumption = DatasetConsumptionConfig(\"minist_param_config\", pipeline_param).as_mount()" ] }, { @@ -306,8 +311,6 @@ "metadata": {}, "outputs": [], "source": [ - "import os\n", - "\n", "scripts_folder = \"Code\"\n", "script_file = \"digit_identification.py\"\n", "\n", @@ -341,8 +344,8 @@ "from azureml.core import Environment\n", "from azureml.core.runconfig import CondaDependencies, DEFAULT_CPU_IMAGE\n", "\n", - "batch_conda_deps = CondaDependencies.create(pip_packages=[\"tensorflow==1.15.2\", \"pillow\"])\n", - "\n", + "batch_conda_deps = CondaDependencies.create(pip_packages=[\"tensorflow==1.15.2\", \"pillow\", \n", + " \"azureml-core\", \"azureml-dataprep[fuse]\"])\n", "batch_env = Environment(name=\"batch_environment\")\n", "batch_env.python.conda_dependencies = batch_conda_deps\n", "batch_env.docker.enabled = True\n", @@ -362,17 +365,21 @@ "metadata": {}, "outputs": [], "source": [ - "from azureml.contrib.pipeline.steps import ParallelRunStep, ParallelRunConfig\n", + "from azureml.pipeline.core import PipelineParameter\n", + "from azureml.pipeline.steps import ParallelRunStep, ParallelRunConfig\n", "\n", "parallel_run_config = ParallelRunConfig(\n", " source_directory=scripts_folder,\n", " entry_script=script_file,\n", - " mini_batch_size=\"5\",\n", + " mini_batch_size=PipelineParameter(name=\"batch_size_param\", default_value=\"5\"),\n", " error_threshold=10,\n", " output_action=\"append_row\",\n", + " append_row_file_name=\"mnist_outputs.txt\",\n", " environment=batch_env,\n", " compute_target=compute_target,\n", - " node_count=2)" + " process_count_per_node=PipelineParameter(name=\"process_count_param\", default_value=2),\n", + " node_count=2\n", + ")" ] }, { @@ -392,10 +399,8 @@ "parallelrun_step = ParallelRunStep(\n", " name=\"predict-digits-mnist\",\n", " parallel_run_config=parallel_run_config,\n", - " inputs=[ named_mnist_ds ],\n", + " inputs=[ input_mnist_ds_consumption ],\n", " output=output_dir,\n", - " models=[ model ],\n", - " arguments=[ ],\n", " allow_reuse=True\n", ")" ] @@ -454,6 +459,47 @@ "pipeline_run.wait_for_completion(show_output=True)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Resubmit a with different dataset\n", + "Since we made the input a `PipelineParameter`, we can resubmit with a different dataset without having to create an entirely new experiment. We'll use the same datastore but use only a single image." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "path_on_datastore = mnist_data.path('mnist/0.png')\n", + "single_image_ds = Dataset.File.from_files(path=path_on_datastore, validate=False)\n", + "single_image_ds._ensure_saved(ws)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pipeline_run_2 = experiment.submit(pipeline, \n", + " pipeline_parameters={\"mnist_param\": single_image_ds, \n", + " \"batch_size_param\": \"1\",\n", + " \"process_count_param\": 1}\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pipeline_run_2.wait_for_completion(show_output=True)" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -480,7 +526,7 @@ "\n", "for root, dirs, files in os.walk(\"mnist_results\"):\n", " for file in files:\n", - " if file.endswith('parallel_run_step.txt'):\n", + " if file.endswith('mnist_outputs.txt'):\n", " result_file = os.path.join(root,file)\n", "\n", "df = pd.read_csv(result_file, delimiter=\":\", header=None)\n", diff --git a/how-to-use-azureml/machine-learning-pipelines/parallel-run/file-dataset-image-inference-mnist.yml b/how-to-use-azureml/machine-learning-pipelines/parallel-run/file-dataset-image-inference-mnist.yml index cd4be086..5ddece97 100644 --- a/how-to-use-azureml/machine-learning-pipelines/parallel-run/file-dataset-image-inference-mnist.yml +++ b/how-to-use-azureml/machine-learning-pipelines/parallel-run/file-dataset-image-inference-mnist.yml @@ -2,6 +2,6 @@ name: file-dataset-image-inference-mnist dependencies: - pip: - azureml-sdk - - azureml-contrib-pipeline-steps + - azureml-pipeline-steps - azureml-widgets - pandas diff --git a/how-to-use-azureml/machine-learning-pipelines/parallel-run/tabular-dataset-inference-iris.ipynb b/how-to-use-azureml/machine-learning-pipelines/parallel-run/tabular-dataset-inference-iris.ipynb index 5aae3861..4edcef6c 100644 --- a/how-to-use-azureml/machine-learning-pipelines/parallel-run/tabular-dataset-inference-iris.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/parallel-run/tabular-dataset-inference-iris.ipynb @@ -23,11 +23,6 @@ "\n", "In this notebook, we will demonstrate how to make predictions on large quantities of data asynchronously using the ML pipelines with Azure Machine Learning. Batch inference (or batch scoring) provides cost-effective inference, with unparalleled throughput for asynchronous applications. Batch prediction pipelines can scale to perform inference on terabytes of production data. Batch prediction is optimized for high throughput, fire-and-forget predictions for a large collection of data.\n", "\n", - "> **Note**\n", - "This notebook uses public preview functionality (ParallelRunStep). Please install azureml-contrib-pipeline-steps package before running this notebook. Pandas is used to display job results.\n", - "```\n", - "pip install azureml-contrib-pipeline-steps pandas\n", - "```\n", "> **Tip**\n", "If your system requires low-latency processing (to process a single document or small set of documents quickly), use [real-time scoring](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-consume-web-service) instead of batch prediction.\n", "\n", @@ -84,7 +79,6 @@ "source": [ "import os\n", "from azureml.core.compute import AmlCompute, ComputeTarget\n", - "from azureml.core.compute_target import ComputeTargetException\n", "\n", "# choose a name for your cluster\n", "compute_name = os.environ.get(\"AML_COMPUTE_CLUSTER_NAME\", \"cpu-cluster\")\n", @@ -304,7 +298,8 @@ "from azureml.core import Environment\n", "from azureml.core.runconfig import CondaDependencies\n", "\n", - "predict_conda_deps = CondaDependencies.create(pip_packages=[ \"scikit-learn==0.20.3\" ])\n", + "predict_conda_deps = CondaDependencies.create(pip_packages=[\"scikit-learn==0.20.3\",\n", + " \"azureml-core\", \"azureml-dataprep[pandas,fuse]\"])\n", "\n", "predict_env = Environment(name=\"predict_environment\")\n", "predict_env.python.conda_dependencies = predict_conda_deps\n", @@ -325,19 +320,21 @@ "metadata": {}, "outputs": [], "source": [ - "from azureml.contrib.pipeline.steps import ParallelRunStep, ParallelRunConfig\n", + "from azureml.pipeline.steps import ParallelRunStep, ParallelRunConfig\n", "\n", "# In a real-world scenario, you'll want to shape your process per node and nodes to fit your problem domain.\n", "parallel_run_config = ParallelRunConfig(\n", - " source_directory=scripts_folder,\n", - " entry_script=script_file, # the user script to run against each input\n", - " mini_batch_size='5MB',\n", - " error_threshold=5,\n", - " output_action='append_row',\n", - " environment=predict_env,\n", - " compute_target=compute_target, \n", - " node_count=3,\n", - " run_invocation_timeout=600)" + " source_directory=scripts_folder,\n", + " entry_script=script_file, # the user script to run against each input\n", + " mini_batch_size='5MB',\n", + " error_threshold=5,\n", + " output_action='append_row',\n", + " append_row_file_name=\"iris_outputs.txt\",\n", + " environment=predict_env,\n", + " compute_target=compute_target, \n", + " node_count=3,\n", + " run_invocation_timeout=600\n", + ")" ] }, { @@ -359,7 +356,6 @@ " inputs=[named_iris_ds],\n", " output=output_folder,\n", " parallel_run_config=parallel_run_config,\n", - " models=[model],\n", " arguments=['--model_name', 'iris'],\n", " allow_reuse=True\n", ")" @@ -453,7 +449,7 @@ "\n", "for root, dirs, files in os.walk(\"iris_results\"):\n", " for file in files:\n", - " if file.endswith('parallel_run_step.txt'):\n", + " if file.endswith('iris_outputs.txt'):\n", " result_file = os.path.join(root,file)\n", "\n", "# cleanup output format\n", diff --git a/how-to-use-azureml/machine-learning-pipelines/parallel-run/tabular-dataset-inference-iris.yml b/how-to-use-azureml/machine-learning-pipelines/parallel-run/tabular-dataset-inference-iris.yml index 6d1c08a8..9bdf3735 100644 --- a/how-to-use-azureml/machine-learning-pipelines/parallel-run/tabular-dataset-inference-iris.yml +++ b/how-to-use-azureml/machine-learning-pipelines/parallel-run/tabular-dataset-inference-iris.yml @@ -2,6 +2,6 @@ name: tabular-dataset-inference-iris dependencies: - pip: - azureml-sdk - - azureml-contrib-pipeline-steps + - azureml-pipeline-steps - azureml-widgets - pandas diff --git a/how-to-use-azureml/machine-learning-pipelines/pipeline-style-transfer/pipeline-style-transfer.ipynb b/how-to-use-azureml/machine-learning-pipelines/pipeline-style-transfer/pipeline-style-transfer.ipynb index 0643b8a9..d713baef 100644 --- a/how-to-use-azureml/machine-learning-pipelines/pipeline-style-transfer/pipeline-style-transfer.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/pipeline-style-transfer/pipeline-style-transfer.ipynb @@ -26,11 +26,8 @@ "2. Run neural style on each image using one of the provided models (from `pytorch` pretrained models for this example).\n", "3. Stitch the image back into a video.\n", "\n", - "> **Note**\n", - "This notebook uses public preview functionality (ParallelRunStep). Please install azureml-contrib-pipeline-steps package before running this notebook.\n", - "```\n", - "pip install azureml-contrib-pipeline-steps\n", - "```" + "> **Tip**\n", + "If your system requires low-latency processing (to process a single document or small set of documents quickly), use [real-time scoring](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-consume-web-service) instead of batch prediction." ] }, { @@ -356,7 +353,9 @@ "source": [ "from azureml.pipeline.core.graph import PipelineParameter\n", "# create a parameter for style (one of \"candy\", \"mosaic\") to transfer the images to\n", - "style_param = PipelineParameter(name=\"style\", default_value=\"mosaic\")" + "style_param = PipelineParameter(name=\"style\", default_value=\"mosaic\")\n", + "# create a parameter for the number of nodes to use in step no. 2 (style transfer)\n", + "nodecount_param = PipelineParameter(name=\"nodecount\", default_value=2)" ] }, { @@ -415,6 +414,8 @@ "parallel_cd.add_conda_package(\"pytorch\")\n", "parallel_cd.add_conda_package(\"torchvision\")\n", "parallel_cd.add_conda_package(\"pillow<7\") # needed for torchvision==0.4.0\n", + "parallel_cd.add_pip_package(\"azureml-core\")\n", + "parallel_cd.add_pip_package(\"azureml-dataprep[fuse]\")\n", "\n", "styleenvironment = Environment(name=\"styleenvironment\")\n", "styleenvironment.python.conda_dependencies=parallel_cd\n", @@ -427,17 +428,20 @@ "metadata": {}, "outputs": [], "source": [ - "from azureml.contrib.pipeline.steps import ParallelRunConfig\n", + "from azureml.pipeline.core import PipelineParameter\n", + "from azureml.pipeline.steps import ParallelRunConfig\n", "\n", "parallel_run_config = ParallelRunConfig(\n", - " environment=styleenvironment,\n", - " entry_script='transform.py',\n", - " output_action='summary_only',\n", - " mini_batch_size=\"1\",\n", - " error_threshold=1,\n", - " source_directory=scripts_folder,\n", - " compute_target=gpu_cluster, \n", - " node_count=3)" + " environment=styleenvironment,\n", + " entry_script='transform.py',\n", + " output_action='summary_only',\n", + " mini_batch_size=\"1\",\n", + " error_threshold=1,\n", + " source_directory=scripts_folder,\n", + " compute_target=gpu_cluster, \n", + " node_count=nodecount_param,\n", + " process_count_per_node=2\n", + ")" ] }, { @@ -446,7 +450,7 @@ "metadata": {}, "outputs": [], "source": [ - "from azureml.contrib.pipeline.steps import ParallelRunStep\n", + "from azureml.pipeline.steps import ParallelRunStep\n", "from datetime import datetime\n", "\n", "parallel_step_name = 'styletransfer-' + datetime.now().strftime('%Y%m%d%H%M')\n", @@ -455,9 +459,6 @@ " name=parallel_step_name,\n", " inputs=[ffmpeg_images_file_dataset], # Input file share/blob container/file dataset\n", " output=processed_images, # Output file share/blob container\n", - " models=[mosaic_model, candy_model],\n", - " tags = {'scenario': \"batch inference\", 'type': \"demo\"},\n", - " properties = {'area': \"style transfer\"},\n", " arguments=[\"--style\", style_param],\n", " parallel_run_config=parallel_run_config,\n", " allow_reuse=True #[optional - default value True]\n", @@ -666,7 +667,8 @@ "response = requests.post(rest_endpoint, \n", " headers=aad_token,\n", " json={\"ExperimentName\": experiment_name,\n", - " \"ParameterAssignments\": {\"style\": \"candy\", \"aml_node_count\": 2}})\n", + " \"ParameterAssignments\": {\"style\": \"candy\", \"NodeCount\": 3}})\n", + "\n", "run_id = response.json()[\"Id\"]\n", "\n", "from azureml.pipeline.core.run import PipelineRun\n", diff --git a/how-to-use-azureml/machine-learning-pipelines/pipeline-style-transfer/pipeline-style-transfer.yml b/how-to-use-azureml/machine-learning-pipelines/pipeline-style-transfer/pipeline-style-transfer.yml index 8ab21c22..77330241 100644 --- a/how-to-use-azureml/machine-learning-pipelines/pipeline-style-transfer/pipeline-style-transfer.yml +++ b/how-to-use-azureml/machine-learning-pipelines/pipeline-style-transfer/pipeline-style-transfer.yml @@ -2,7 +2,6 @@ name: pipeline-style-transfer dependencies: - pip: - azureml-sdk - - azureml-contrib-pipeline-steps - azureml-pipeline-steps - azureml-widgets - requests diff --git a/how-to-use-azureml/reinforcement-learning/README.md b/how-to-use-azureml/reinforcement-learning/README.md index 209701e4..2c7ce77a 100644 --- a/how-to-use-azureml/reinforcement-learning/README.md +++ b/how-to-use-azureml/reinforcement-learning/README.md @@ -22,7 +22,7 @@ Using these samples, you will be able to do the following. |-------------------|--------------------------------------------| | [devenv_setup.ipynb](setup/devenv_setup.ipynb) | Notebook to setup development environment for Azure ML RL | | [cartpole_ci.ipynb](cartpole-on-compute-instance/cartpole_ci.ipynb) | Notebook to train a Cartpole playing agent on an Azure ML Compute Instance | -| [cartpole_cc.ipynb](cartpole-on-single-compute/cartpole_cc.ipynb) | Notebook to train a Cartpole playing agent on an Azure ML Compute Cluster (single node) | +| [cartpole_sc.ipynb](cartpole-on-single-compute/cartpole_sc.ipynb) | Notebook to train a Cartpole playing agent on an Azure ML Compute Cluster (single node) | | [pong_rllib.ipynb](atari-on-distributed-compute/pong_rllib.ipynb) | Notebook to train Pong agent using RLlib on multiple compute targets | | [minecraft.ipynb](minecraft-on-distributed-compute/minecraft.ipynb) | Notebook to train an agent to navigate through a lava maze in the Minecraft game | diff --git a/how-to-use-azureml/reinforcement-learning/atari-on-distributed-compute/files/pong_rllib.py b/how-to-use-azureml/reinforcement-learning/atari-on-distributed-compute/files/pong_rllib.py index c78a19c6..7735ddda 100644 --- a/how-to-use-azureml/reinforcement-learning/atari-on-distributed-compute/files/pong_rllib.py +++ b/how-to-use-azureml/reinforcement-learning/atari-on-distributed-compute/files/pong_rllib.py @@ -23,17 +23,18 @@ if __name__ == "__main__": ray.init(address=args.ray_address) - tune.run(run_or_experiment=args.run, - config={ - "env": args.env, - "num_gpus": args.config["num_gpus"], - "num_workers": args.config["num_workers"], - "callbacks": {"on_train_result": callbacks.on_train_result}, - "sample_batch_size": 50, - "train_batch_size": 1000, - "num_sgd_iter": 2, - "num_data_loader_buffers": 2, - "model": {"dim": 42}, - }, - stop=args.stop, - local_dir='./logs') + tune.run( + run_or_experiment=args.run, + config={ + "env": args.env, + "num_gpus": args.config["num_gpus"], + "num_workers": args.config["num_workers"], + "callbacks": {"on_train_result": callbacks.on_train_result}, + "sample_batch_size": 50, + "train_batch_size": 1000, + "num_sgd_iter": 2, + "num_data_loader_buffers": 2, + "model": {"dim": 42}, + }, + stop=args.stop, + local_dir='./logs') diff --git a/how-to-use-azureml/reinforcement-learning/atari-on-distributed-compute/pong_rllib.ipynb b/how-to-use-azureml/reinforcement-learning/atari-on-distributed-compute/pong_rllib.ipynb index 0979479b..7dd280d8 100644 --- a/how-to-use-azureml/reinforcement-learning/atari-on-distributed-compute/pong_rllib.ipynb +++ b/how-to-use-azureml/reinforcement-learning/atari-on-distributed-compute/pong_rllib.ipynb @@ -20,8 +20,8 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Azure ML Reinforcement Learning Sample - Pong problem\n", - "Azure ML Reinforcement Learning (Azure ML RL) is a managed service for running distributed RL (reinforcement learning) simulation and training using the Ray framework.\n", + "# Reinforcement Learning in Azure Machine Learning - Pong problem\n", + "Reinforcement Learning in Azure Machine Learning is a managed service for running distributed reinforcement learning training and simulation using the open source Ray framework.\n", "This example uses Ray RLlib to train a Pong playing agent on a multi-node cluster.\n", "\n", "## Pong problem\n", @@ -48,7 +48,7 @@ "source": [ "The goal here is to train an agent to win an episode of Pong game against opponent with the score of at least 18 points. An episode in Pong runs until one of the players reaches a score of 21. Episodes are a terminology that is used across all the [OpenAI gym](https://gym.openai.com/envs/Pong-v0/) environments that contains a strictly defined task.\n", "\n", - "Training a Pong agent is a CPU intensive task and this example demonstrates the use of Azure ML RL service to train an agent faster in a distributed, parallel environment. You'll learn more about using the head and the worker compute targets to train an agent in this notebook below." + "Training a Pong agent is a compute-intensive task and this example demonstrates the use of Reinforcement Learning in Azure Machine Learning service to train an agent faster in a distributed, parallel environment. You'll learn more about using the head and the worker compute targets to train an agent in this notebook below." ] }, { @@ -57,7 +57,7 @@ "source": [ "## Prerequisite\n", "\n", - "The user should have completed the [Azure ML Reinforcement Learning Sample - Setting Up Development Environment](../setup/devenv_setup.ipynb) to setup a virtual network. This virtual network will be used here for head and worker compute targets. It is highly recommended that the user should go through the [Azure ML Reinforcement Learning Sample - Cartpole Problem](../cartpole-on-single-compute/cartpole_cc.ipynb) to understand the basics of Azure ML RL and Ray RLlib used in this notebook." + "The user should have completed the [Reinforcement Learning in Azure Machine Learning - Setting Up Development Environment](../setup/devenv_setup.ipynb) to setup a virtual network. This virtual network will be used here for head and worker compute targets. It is highly recommended that the user should go through the [Reinforcement Learning in Azure Machine Learning - Cartpole Problem on Single Compute](../cartpole-on-single-compute/cartpole_sc.ipynb) to understand the basics of Reinforcement Learning in Azure Machine Learning and Ray RLlib used in this notebook." ] }, { @@ -69,7 +69,7 @@ "\n", "* Connecting to a workspace to enable communication between your local machine and remote resources\n", "* Creating an experiment to track all your runs\n", - "* Creating a remote head and worker compute target on a vnet to use for training" + "* Creating remote head and worker compute target on a virtual network to use for training" ] }, { @@ -88,19 +88,19 @@ "source": [ "%matplotlib inline\n", "\n", - "# Azure ML core imports\n", + "# Azure Machine Learning core imports\n", "import azureml.core\n", "\n", "# Check core SDK version number\n", - "print(\"Azure ML SDK Version: \", azureml.core.VERSION)" + "print(\"Azure Machine Learning SDK Version: \", azureml.core.VERSION)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Get Azure ML workspace\n", - "Get a reference to an existing Azure ML workspace." + "### Get Azure Machine Learning workspace\n", + "Get a reference to an existing Azure Machine Learning workspace." ] }, { @@ -119,7 +119,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Create Azure ML experiment\n", + "### Create Azure Machine Learning experiment\n", "Create an experiment to track the runs in your workspace." ] }, @@ -140,9 +140,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Specify the name of your vnet\n", + "### Specify the name of your virtual network\n", "\n", - "The resource group you use must contain a vnet. Specify the name of the vnet here created in the [Azure ML Reinforcement Learning Sample - Setting Up Development Environment](../setup/devenv_setup.ipynb)." + "The resource group you use must contain a virtual network. Specify the name of the virtual network here created in the [Azure Machine Learning Reinforcement Learning Sample - Setting Up Development Environment](../setup/devenv_setup.ipynb)." ] }, { @@ -159,9 +159,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Create head computing cluster\n", + "### Create head compute target\n", "\n", - "In this example, we show how to set up separate compute clusters for the Ray head and Ray worker nodes. First we define the head cluster with GPU for the Ray head node. One CPU of the head node will be used for the Ray head process and the rest of the CPUs will be used by the Ray worker processes." + "In this example, we show how to set up separate compute targets for the Ray head and Ray worker nodes. First we define the head cluster with GPU for the Ray head node. One CPU of the head node will be used for the Ray head process and the rest of the CPUs will be used by the Ray worker processes." ] }, { @@ -186,15 +186,17 @@ " if head_compute_target.provisioning_state == 'Succeeded':\n", " print('found head compute target. just use it', head_compute_name)\n", " else: \n", - " raise Exception('found head compute target but it is in state', head_compute_target.provisioning_state)\n", + " raise Exception(\n", + " 'found head compute target but it is in state', head_compute_target.provisioning_state)\n", "else:\n", " print('creating a new head compute target...')\n", - " provisioning_config = AmlCompute.provisioning_configuration(vm_size=head_vm_size,\n", - " min_nodes=head_compute_min_nodes, \n", - " max_nodes=head_compute_max_nodes,\n", - " vnet_resourcegroup_name=ws.resource_group,\n", - " vnet_name=vnet_name,\n", - " subnet_name='default')\n", + " provisioning_config = AmlCompute.provisioning_configuration(\n", + " vm_size=head_vm_size,\n", + " min_nodes=head_compute_min_nodes, \n", + " max_nodes=head_compute_max_nodes,\n", + " vnet_resourcegroup_name=ws.resource_group,\n", + " vnet_name=vnet_name,\n", + " subnet_name='default')\n", "\n", " # Create the cluster\n", " head_compute_target = ComputeTarget.create(ws, head_compute_name, provisioning_config)\n", @@ -203,7 +205,7 @@ " # If no min node count is provided it will use the scale settings for the cluster\n", " head_compute_target.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)\n", " \n", - " # For a more detailed view of current AmlCompute status, use get_status()\n", + " # For a more detailed view of current AmlCompute status, use get_status()\n", " print(head_compute_target.get_status().serialize())" ] }, @@ -211,9 +213,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Create worker computing cluster\n", + "### Create worker compute target\n", "\n", - "Now we create a compute cluster with CPUs for the additional Ray worker nodes. CPUs in these worker nodes are used by Ray worker processes. Each Ray worker node may have multiple Ray worker processes depending on CPUs on the worker node. Ray can distribute multiple worker tasks on each worker node." + "Now we create a compute target with CPUs for the additional Ray worker nodes. CPUs in these worker nodes are used by Ray worker processes. Each Ray worker node, depending on the CPUs on the node, may have multiple Ray worker processes. There can be multiple worker tasks on each worker process (core)." ] }, { @@ -222,7 +224,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Choose a name for your Ray worker cluster\n", + "# Choose a name for your Ray worker compute target\n", "worker_compute_name = 'worker-cpu'\n", "worker_compute_min_nodes = 0 \n", "worker_compute_max_nodes = 4\n", @@ -237,24 +239,26 @@ " if worker_compute_target.provisioning_state == 'Succeeded':\n", " print('found worker compute target. just use it', worker_compute_name)\n", " else: \n", - " raise Exception('found worker compute target but it is in state', head_compute_target.provisioning_state)\n", + " raise Exception(\n", + " 'found worker compute target but it is in state', head_compute_target.provisioning_state)\n", "else:\n", " print('creating a new worker compute target...')\n", - " provisioning_config = AmlCompute.provisioning_configuration(vm_size=worker_vm_size,\n", - " min_nodes=worker_compute_min_nodes, \n", - " max_nodes=worker_compute_max_nodes,\n", - " vnet_resourcegroup_name=ws.resource_group,\n", - " vnet_name=vnet_name,\n", - " subnet_name='default')\n", + " provisioning_config = AmlCompute.provisioning_configuration(\n", + " vm_size=worker_vm_size,\n", + " min_nodes=worker_compute_min_nodes,\n", + " max_nodes=worker_compute_max_nodes,\n", + " vnet_resourcegroup_name=ws.resource_group,\n", + " vnet_name=vnet_name,\n", + " subnet_name='default')\n", "\n", - " # Create the cluster\n", + " # Create the compute target\n", " worker_compute_target = ComputeTarget.create(ws, worker_compute_name, provisioning_config)\n", " \n", " # Can poll for a minimum number of nodes and for a specific timeout. \n", " # If no min node count is provided it will use the scale settings for the cluster\n", " worker_compute_target.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)\n", " \n", - " # For a more detailed view of current AmlCompute status, use get_status()\n", + " # For a more detailed view of current AmlCompute status, use get_status()\n", " print(worker_compute_target.get_status().serialize())" ] }, @@ -262,12 +266,12 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Train Pong Agent Using Azure ML RL\n", - "To facilitate reinforcement learning, Azure Machine Learning Python SDK provides a high level abstraction, the _ReinforcementLearningEstimator_ class, which allows users to easily construct RL run configurations for the underlying RL framework. Azure ML RL initially supports the [Ray framework](https://ray.io/) and its highly customizable [RLLib](https://ray.readthedocs.io/en/latest/rllib.html#rllib-scalable-reinforcement-learning). In this section we show how to use _ReinforcementLearningEstimator_ and Ray/RLLib framework to train a Pong playing agent.\n", + "## Train Pong Agent\n", + "To facilitate reinforcement learning, Azure Machine Learning Python SDK provides a high level abstraction, the _ReinforcementLearningEstimator_ class, which allows users to easily construct reinforcement learning run configurations for the underlying reinforcement learning framework. Reinforcement Learning in Azure Machine Learning supports the open source [Ray framework](https://ray.io/) and its highly customizable [RLLib](https://ray.readthedocs.io/en/latest/rllib.html#rllib-scalable-reinforcement-learning). In this section we show how to use _ReinforcementLearningEstimator_ and Ray/RLLib framework to train a Pong playing agent.\n", "\n", "\n", "### Define worker configuration\n", - "Define a `WorkerConfiguration` using your worker compute target. We also specify the number of nodes in the worker compute target to be used for training and additional PIP packages to install on those nodes as a part of setup.\n", + "Define a `WorkerConfiguration` using your worker compute target. We specify the number of nodes in the worker compute target to be used for training and additional PIP packages to install on those nodes as a part of setup.\n", "In this case, we define the PIP packages as dependencies for both head and worker nodes. With this setup, the game simulations will run directly on the worker compute nodes." ] }, @@ -285,7 +289,7 @@ "# Specify the Ray worker configuration\n", "worker_conf = WorkerConfiguration(\n", " \n", - " # Azure ML compute cluster to run Ray workers\n", + " # Azure Machine Learning compute target to run Ray workers\n", " compute_target=worker_compute_target, \n", " \n", " # Number of worker nodes\n", @@ -305,7 +309,7 @@ "source": [ "### Create reinforcement learning estimator\n", "\n", - "The `ReinforcementLearningEstimator` is used to submit a job to Azure Machine Learning to start the Ray experiment run. We define the training script parameters here that will be passed to estimator. \n", + "The `ReinforcementLearningEstimator` is used to submit a job to Azure Machine Learning to start the Ray experiment run. We define the training script parameters here that will be passed to the estimator. \n", "\n", "We specify `episode_reward_mean` to 18 as we want to stop the training as soon as the trained agent reaches an average win margin of at least 18 point over opponent over all episodes in the training epoch.\n", "Number of Ray worker processes are defined by parameter `num_workers`. We set it to 13 as we have 13 CPUs available in our compute targets. Multiple Ray worker processes parallelizes agent training and helps in achieving our goal faster. \n", @@ -348,7 +352,7 @@ " \"--stop\": '\\'{\"episode_reward_mean\": 18, \"time_total_s\": 3600}\\'',\n", "}\n", "\n", - "# RL estimator\n", + "# Reinforcement learning estimator\n", "rl_estimator = ReinforcementLearningEstimator(\n", " \n", " # Location of source files\n", @@ -361,7 +365,7 @@ " # Defined above.\n", " script_params=script_params,\n", " \n", - " # The Azure ML compute target set up for Ray head nodes\n", + " # The Azure Machine Learning compute target set up for Ray head nodes\n", " compute_target=head_compute_target,\n", " \n", " # Pip packages\n", @@ -370,7 +374,7 @@ " # GPU usage\n", " use_gpu=True,\n", " \n", - " # RL framework. Currently must be Ray.\n", + " # Reinforcement learning framework. Currently must be Ray.\n", " rl_framework=Ray(),\n", " \n", " # Ray worker configuration defined above.\n", @@ -394,23 +398,24 @@ "metadata": {}, "source": [ "### Training script\n", - "As recommended in [RLLib](https://ray.readthedocs.io/en/latest/rllib.html) documentations, we use Ray [Tune](https://ray.readthedocs.io/en/latest/tune.html) API to run training algorithm. All the RLLib built-in trainers are compatible with the Tune API. Here we use tune.run() to execute a built-in training algorithm. For convenience, down below you can see part of the entry script where we make this call.\n", + "As recommended in [RLlib](https://ray.readthedocs.io/en/latest/rllib.html) documentations, we use Ray [Tune](https://ray.readthedocs.io/en/latest/tune.html) API to run the training algorithm. All the RLlib built-in trainers are compatible with the Tune API. Here we use tune.run() to execute a built-in training algorithm. For convenience, down below you can see part of the entry script where we make this call.\n", "\n", "```python\n", - " tune.run(run_or_experiment=args.run,\n", - " config={\n", - " \"env\": args.env,\n", - " \"num_gpus\": args.config[\"num_gpus\"],\n", - " \"num_workers\": args.config[\"num_workers\"],\n", - " \"callbacks\": {\"on_train_result\": callbacks.on_train_result},\n", - " \"sample_batch_size\": 50,\n", - " \"train_batch_size\": 1000,\n", - " \"num_sgd_iter\": 2,\n", - " \"num_data_loader_buffers\": 2,\n", - " \"model\": {\"dim\": 42},\n", - " },\n", - " stop=args.stop,\n", - " local_dir='./logs')\n", + " tune.run(\n", + " run_or_experiment=args.run,\n", + " config={\n", + " \"env\": args.env,\n", + " \"num_gpus\": args.config[\"num_gpus\"],\n", + " \"num_workers\": args.config[\"num_workers\"],\n", + " \"callbacks\": {\"on_train_result\": callbacks.on_train_result},\n", + " \"sample_batch_size\": 50,\n", + " \"train_batch_size\": 1000,\n", + " \"num_sgd_iter\": 2,\n", + " \"num_data_loader_buffers\": 2,\n", + " \"model\": {\"dim\": 42},\n", + " },\n", + " stop=args.stop,\n", + " local_dir='./logs')\n", "```" ] }, @@ -437,7 +442,7 @@ "source": [ "### Monitor the run\n", "\n", - "Azure ML provides a Jupyter widget to show the real-time status of an experiment run. You could use this widget to monitor the status of runs. The widget shows the list of two child runs, one for head compute target run and one for worker compute target run, as well. You can click on the link under Status to see the details of the child run." + "Azure Machine Learning provides a Jupyter widget to show the status of an experiment run. You could use this widget to monitor the status of the runs. The widget shows the list of two child runs, one for head compute target run and one for worker compute target run. You can click on the link under **Status** to see the details of the child run. It will also show the metrics being logged." ] }, { @@ -455,9 +460,29 @@ "cell_type": "markdown", "metadata": {}, "source": [ + "### Stop the run\n", + "\n", + "To stop the run, call `run.cancel()`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Uncomment line below to cancel the run\n", + "# run.cancel()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Wait for completion\n", "Wait for the run to complete before proceeding. If you want to stop the run, you may skip this and move to next section below. \n", "\n", - "**Note: the run may take anywhere from 30 minutes to 45 minutes to complete.**" + "**Note: The run may take anywhere from 30 minutes to 45 minutes to complete.**" ] }, { @@ -469,24 +494,6 @@ "run.wait_for_completion()" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Stop the run\n", - "\n", - "To cancel the run, call run.cancel()." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# run.cancel()" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -539,8 +546,8 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We observe that during the training over multiple episodes, the agent learn to win the Pong game against opponent with our target of 18 points in each episode of 21 points.\n", - "**Congratulations!! You have trained your Pong agent to win a game marvelously.**" + "We observe that during the training over multiple episodes, the agent learns to win the Pong game against opponent with our target of 18 points in each episode of 21 points.\n", + "**Congratulations!! You have trained your Pong agent to win a game.**" ] }, { @@ -570,7 +577,7 @@ "metadata": {}, "source": [ "## Next\n", - "In this example, you learnt how to solve distributed RL training problems using head and worker compute targets. This is currently the last introductory tutorial for Azure Machine Learning service's Reinforcement Learning offering. We would love to hear your feedback to build the features you need!" + "In this example, you learned how to solve distributed reinforcement learning training problems using head and worker compute targets. This was an introductory tutorial on Reinforement Learning in Azure Machine Learning service offering. We would love to hear your feedback to build the features you need!" ] } ], @@ -595,7 +602,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.4" + "version": "3.6.9" }, "notice": "Copyright (c) Microsoft Corporation. All rights reserved.\u00e2\u20ac\u00afLicensed under the MIT License.\u00e2\u20ac\u00af " }, diff --git a/how-to-use-azureml/reinforcement-learning/cartpole-on-compute-instance/cartpole_ci.ipynb b/how-to-use-azureml/reinforcement-learning/cartpole-on-compute-instance/cartpole_ci.ipynb index 19bc54f2..24476e04 100644 --- a/how-to-use-azureml/reinforcement-learning/cartpole-on-compute-instance/cartpole_ci.ipynb +++ b/how-to-use-azureml/reinforcement-learning/cartpole-on-compute-instance/cartpole_ci.ipynb @@ -20,11 +20,11 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Azure ML Reinforcement Learning Sample - Cartpole Problem on Compute Instance\n", + "# Reinforcement Learning in Azure Machine Learning - Cartpole Problem on Compute Instance\n", "\n", - "Azure ML Reinforcement Learning (Azure ML RL) is a managed service for running reinforcement learning training and simulation. With Azure MLRL, data scientists can start developing RL systems on one machine, and scale to compute clusters with 100\u00e2\u20ac\u2122s of nodes if needed.\n", + "Reinforcement Learning in Azure Machine Learning is a managed service for running reinforcement learning training and simulation. With Reinforcement Learning in Azure Machine Learning, data scientists can start developing reinforcement learning systems on one machine, and scale to compute targets with 100\u00e2\u20ac\u2122s of nodes if needed.\n", "\n", - "This example shows how to use Azure ML RL to train a Cartpole playing agent on a compute instance." + "This example shows how to use Reinforcement Learning in Azure Machine Learning to train a Cartpole playing agent on a compute instance." ] }, { @@ -56,7 +56,7 @@ "metadata": {}, "source": [ "### Prerequisite\n", - "The user should have completed the Azure Machine Learning Tutorial: [Get started creating your first ML experiment with the Python SDK](https://docs.microsoft.com/en-us/azure/machine-learning/tutorial-1st-experiment-sdk-setup). You will need to make sure that you have a valid subscription id, a resource group and a workspace. All datastores and datasets you use should be associated with your workspace." + "The user should have completed the Azure Machine Learning Tutorial: [Get started creating your first ML experiment with the Python SDK](https://docs.microsoft.com/en-us/azure/machine-learning/tutorial-1st-experiment-sdk-setup). You will need to make sure that you have a valid subscription ID, a resource group, and an Azure Machine Learning workspace. All datastores and datasets you use should be associated with your workspace." ] }, { @@ -75,8 +75,8 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Azure ML SDK \n", - "Display the Azure ML SDK version." + "### Azure Machine Learning SDK \n", + "Display the Azure Machine Learning SDK version." ] }, { @@ -86,15 +86,15 @@ "outputs": [], "source": [ "import azureml.core\n", - "print(\"Azure ML SDK Version: \", azureml.core.VERSION)" + "print(\"Azure Machine Learning SDK Version: \", azureml.core.VERSION)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Get Azure ML workspace\n", - "Get a reference to an existing Azure ML workspace." + "### Get Azure Machine Learning workspace\n", + "Get a reference to an existing Azure Machine Learning workspace." ] }, { @@ -163,18 +163,22 @@ "source": [ "# Load current compute instance info\n", "current_compute_instance = load_nbvm()\n", - "print(\"Current compute instance:\", current_compute_instance)\n", "\n", "# For this demo, let's use the current compute instance as the compute target, if available\n", "if current_compute_instance:\n", + " print(\"Current compute instance:\", current_compute_instance)\n", " instance_name = current_compute_instance['instance']\n", "else:\n", " instance_name = next(iter(ws.compute_targets))\n", + " print(\"Instance name:\", instance_name)\n", "\n", "compute_target = ws.compute_targets[instance_name]\n", "\n", "print(\"Compute target status:\")\n", - "print(compute_target.get_status().serialize())\n", + "try:\n", + " print(compute_target.get_status().serialize())\n", + "except:\n", + " print(compute_target.get_status())\n", "\n", "print(\"Compute target size:\")\n", "print(compute_target.size(ws))" @@ -184,7 +188,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Create Azure ML experiment\n", + "### Create Azure Machine Learning experiment\n", "Create an experiment to track the runs in your workspace. " ] }, @@ -204,8 +208,8 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Train Cartpole Agent Using Azure ML RL\n", - "To facilitate reinforcement learning, Azure Machine Learning Python SDK provides a high level abstraction, the _ReinforcementLearningEstimator_ class, which allows users to easily construct RL run configurations for the underlying RL framework. Azure ML RL initially supports the [Ray framework](https://ray.io/) and its highly customizable [RLlib](https://ray.readthedocs.io/en/latest/rllib.html#rllib-scalable-reinforcement-learning). In this section we show how to use _ReinforcementLearningEstimator_ and Ray/RLlib framework to train a cartpole playing agent. " + "## Train Cartpole Agent\n", + "To facilitate reinforcement learning, Azure Machine Learning Python SDK provides a high level abstraction, the _ReinforcementLearningEstimator_ class, which allows users to easily construct reinforcement learning run configurations for the underlying reinforcement learning framework. Reinforcement Learning in Azure Machine Learning supports the open source [Ray framework](https://ray.io/) and its highly customizable [RLlib](https://ray.readthedocs.io/en/latest/rllib.html#rllib-scalable-reinforcement-learning). In this section we show how to use _ReinforcementLearningEstimator_ and Ray/RLlib framework to train a cartpole playing agent. " ] }, { @@ -222,7 +226,7 @@ "- `entry_script`, path to your entry script relative to the source directory,\n", "- `script_params`, constant parameters to be passed to each run of training script,\n", "- `compute_target`, reference to the compute target in which the trainer and worker(s) jobs will be executed,\n", - "- `rl_framework`, the RL framework to be used (currently must be Ray).\n", + "- `rl_framework`, the reinforcement learning framework to be used (currently must be Ray).\n", "\n", "We use the `script_params` parameter to pass in general and algorithm-specific parameters to the training script.\n" ] @@ -273,10 +277,10 @@ " # A dictionary of arguments to pass to the training script specified in ``entry_script``\n", " script_params=script_params,\n", " \n", - " # The Azure ML compute target set up for Ray head nodes\n", + " # The Azure Machine Learning compute target set up for Ray head nodes\n", " compute_target=compute_target,\n", " \n", - " # RL framework. Currently must be Ray.\n", + " # Reinforcement learning framework. Currently must be Ray.\n", " rl_framework=Ray()\n", ")" ] @@ -345,11 +349,11 @@ "metadata": {}, "source": [ "### Monitor experiment\n", - "Azure ML provides a Jupyter widget to show the real-time status of an experiment run. You could use this widget to monitor status of the runs.\n", + "Azure Machine Learning provides a Jupyter widget to show the status of an experiment run. You could use this widget to monitor the status of the runs.\n", "\n", "Note that _ReinforcementLearningEstimator_ creates at least two runs: (a) A parent run, i.e. the run returned above, and (b) a collection of child runs. The number of the child runs depends on the configuration of the reinforcement learning estimator. In our simple scenario, configured above, only one child run will be created.\n", "\n", - "The widget will show a list of the child runs as well. You can click on the link under **Status** to see the details of a child run." + "The widget will show a list of the child runs as well. You can click on the link under **Status** to see the details of a child run. It will also show the metrics being logged." ] }, { @@ -369,7 +373,7 @@ "source": [ "### Stop the run\n", "\n", - "To cancel the run, call `training_run.cancel()`." + "To stop the run, call `training_run.cancel()`." ] }, { @@ -577,10 +581,10 @@ " training_artifacts_ds.as_named_input('artifacts_dataset'),\n", " training_artifacts_ds.as_named_input('artifacts_path').as_mount()],\n", " \n", - " # The Azure ML compute target\n", + " # The Azure Machine Learning compute target\n", " compute_target=compute_target,\n", " \n", - " # RL framework. Currently must be Ray.\n", + " # Reinforcement learning framework. Currently must be Ray.\n", " rl_framework=Ray(),\n", " \n", " # Additional pip packages to install\n", @@ -662,7 +666,7 @@ "metadata": {}, "source": [ "## Next\n", - "This example was about running Azure ML RL (Ray/RLlib Framework) on compute instance. Please see [Cartpole problem](../cartpole-on-single-compute/cartpole_cc.ipynb)\n", + "This example was about running Reinforcement Learning in Azure Machine Learning (Ray/RLlib Framework) on a compute instance. Please see [Cartpole Problem on Single Compute](../cartpole-on-single-compute/cartpole_sc.ipynb)\n", "example which uses Ray RLlib to train a Cartpole playing agent on a single node remote compute.\n" ] } diff --git a/how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/cartpole_cc.ipynb b/how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/cartpole_sc.ipynb similarity index 91% rename from how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/cartpole_cc.ipynb rename to how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/cartpole_sc.ipynb index 15fb9aa2..df30d078 100644 --- a/how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/cartpole_cc.ipynb +++ b/how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/cartpole_sc.ipynb @@ -13,18 +13,18 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/tutorials/how-to-use-azureml/reinforcement-learning/cartpole_on_single_compute/cartpole_cc.png)" + "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/tutorials/how-to-use-azureml/reinforcement-learning/cartpole_on_single_compute/cartpole_sc.png)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "# Azure ML Reinforcement Learning Sample - Cartpole Problem\n", + "# Reinforcement Learning in Azure Machine Learning - Cartpole Problem on Single Compute\n", "\n", - "Azure ML Reinforcement Learning (Azure ML RL) is a managed service for running reinforcement learning training and simulation. With Azure MLRL, data scientists can start developing RL systems on one machine, and scale to compute clusters with 100\u00e2\u20ac\u2122s of nodes if needed.\n", + "Reinforcement Learning in Azure Machine Learning is a managed service for running reinforcement learning training and simulation. With Reinforcement Learning in Azure Machine Learning, data scientists can start developing reinforcement learning systems on one machine, and scale to compute targets with 100\u00e2\u20ac\u2122s of nodes if needed.\n", "\n", - "This example shows how to use Azure ML RL to train a Cartpole playing agent on a single machine. " + "This example shows how to use Reinforcement Learning in Azure Machine Learning to train a Cartpole playing agent on a single compute. " ] }, { @@ -56,7 +56,7 @@ "metadata": {}, "source": [ "### Prerequisite\n", - "The user should have completed the Azure Machine Learning Tutorial: [Get started creating your first ML experiment with the Python SDK](https://docs.microsoft.com/en-us/azure/machine-learning/tutorial-1st-experiment-sdk-setup). You will need to make sure that you have a valid subscription id, a resource group and a workspace. All datastores and datasets you use should be associated with your workspace." + "The user should have completed the Azure Machine Learning Tutorial: [Get started creating your first ML experiment with the Python SDK](https://docs.microsoft.com/en-us/azure/machine-learning/tutorial-1st-experiment-sdk-setup). You will need to make sure that you have a valid subscription ID, a resource group, and an Azure Machine Learning workspace. All datastores and datasets you use should be associated with your workspace." ] }, { @@ -75,8 +75,8 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Azure ML SDK \n", - "Display the Azure ML SDK version." + "### Azure Machine Learning SDK \n", + "Display the Azure Machine Learning SDK version." ] }, { @@ -87,15 +87,15 @@ "source": [ "import azureml.core\n", "\n", - "print(\"Azure ML SDK Version: \", azureml.core.VERSION)" + "print(\"Azure Machine Learning SDK Version: \", azureml.core.VERSION)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Get Azure ML workspace\n", - "Get a reference to an existing Azure ML workspace." + "### Get Azure Machine Learning workspace\n", + "Get a reference to an existing Azure Machine Learning workspace." ] }, { @@ -118,7 +118,7 @@ "\n", "A compute target is a designated compute resource where you run your training and simulation scripts. This location may be your local machine or a cloud-based compute resource. The code below shows how to create a cloud-based compute target. For more information see [What are compute targets in Azure Machine Learning?](https://docs.microsoft.com/en-us/azure/machine-learning/concept-compute-target)\n", "\n", - "**Note: Creation of a compute resource can take several minutes**" + "**Note: Creation of a compute resource can take several minutes**. Please make sure to change `STANDARD_D2_V2` to a [size available in your region](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines)." ] }, { @@ -158,7 +158,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Create Azure ML experiment\n", + "### Create Azure Machine Learning experiment\n", "Create an experiment to track the runs in your workspace. " ] }, @@ -178,8 +178,8 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Train Cartpole Agent Using Azure ML RL\n", - "To facilitate reinforcement learning, Azure Machine Learning Python SDK provides a high level abstraction, the _ReinforcementLearningEstimator_ class, which allows users to easily construct RL run configurations for the underlying RL framework. Azure ML RL initially supports the [Ray framework](https://ray.io/) and its highly customizable [RLlib](https://ray.readthedocs.io/en/latest/rllib.html#rllib-scalable-reinforcement-learning). In this section we show how to use _ReinforcementLearningEstimator_ and Ray/RLlib framework to train a cartpole playing agent. " + "## Train Cartpole Agent\n", + "To facilitate reinforcement learning, Azure Machine Learning Python SDK provides a high level abstraction, the _ReinforcementLearningEstimator_ class, which allows users to easily construct reinforcement learning run configurations for the underlying reinforcement learning framework. Reinforcement Learning in Azure Machine Learning supports the open source [Ray framework](https://ray.io/) and its highly customizable [RLlib](https://ray.readthedocs.io/en/latest/rllib.html#rllib-scalable-reinforcement-learning). In this section we show how to use _ReinforcementLearningEstimator_ and Ray/RLlib framework to train a cartpole playing agent. " ] }, { @@ -196,7 +196,7 @@ "- `entry_script`, path to your entry script relative to the source directory,\n", "- `script_params`, constant parameters to be passed to each run of training script,\n", "- `compute_target`, reference to the compute target in which the trainer and worker(s) jobs will be executed,\n", - "- `rl_framework`, the RL framework to be used (currently must be Ray).\n", + "- `rl_framework`, the reinforcement learning framework to be used (currently must be Ray).\n", "\n", "We use the `script_params` parameter to pass in general and algorithm-specific parameters to the training script.\n" ] @@ -249,7 +249,7 @@ " # There are two parts to this:\n", " # 1. Use a custom docker file with proper instructions to install xvfb, ffmpeg, python-opengl\n", " # and other dependencies. \n", - " # TODO: Add these instructions to default rl base image and drop this docker file.\n", + " # TODO: Add these instructions to default reinforcement learning base image and drop this docker file.\n", " \n", " with open(\"files/docker/Dockerfile\", \"r\") as f:\n", " dockerfile=f.read()\n", @@ -274,10 +274,10 @@ " # A dictionary of arguments to pass to the training script specified in ``entry_script``\n", " script_params=script_params,\n", " \n", - " # The Azure ML compute target set up for Ray head nodes\n", + " # The Azure Machine Learning compute target set up for Ray head nodes\n", " compute_target=compute_target,\n", " \n", - " # RL framework. Currently must be Ray.\n", + " # Reinforcement learning framework. Currently must be Ray.\n", " rl_framework=Ray(),\n", " \n", " # Custom environmnet for Xvfb\n", @@ -350,11 +350,11 @@ "source": [ "### Monitor experiment\n", "\n", - "Azure ML provides a Jupyter widget to show the real-time status of an experiment run. You could use this widget to monitor status of the runs.\n", + "Azure Machine Learning provides a Jupyter widget to show the status of an experiment run. You could use this widget to monitor the status of the runs.\n", "\n", "Note that _ReinforcementLearningEstimator_ creates at least two runs: (a) A parent run, i.e. the run returned above, and (b) a collection of child runs. The number of the child runs depends on the configuration of the reinforcement learning estimator. In our simple scenario, configured above, only one child run will be created.\n", "\n", - "The widget will show a list of the child runs as well. You can click on the link under **Status** to see the details of a child run." + "The widget will show a list of the child runs as well. You can click on the link under **Status** to see the details of a child run. It will also show the metrics being logged." ] }, { @@ -373,7 +373,7 @@ "metadata": {}, "source": [ "### Stop the run\n", - "To cancel the run, call `training_run.cancel()`." + "To stop the run, call `training_run.cancel()`." ] }, { @@ -393,7 +393,7 @@ "### Wait for completion\n", "Wait for the run to complete before proceeding.\n", "\n", - "**Note: The length of the run depends on the provisioning time of the compute target and may take several minutes to complete.**" + "**Note: The length of the run depends on the provisioning time of the compute target and it may take several minutes to complete.**" ] }, { @@ -560,18 +560,20 @@ " dir_util.mkpath(destination)\n", " \n", " try:\n", - " # Mount dataset and copy movies\n", + " pirnt(\"Trying mounting dataset and copying movies.\")\n", " # Note: We assume movie paths start with '\\'\n", " mount_context = artifacts_ds.mount()\n", " mount_context.start()\n", - " print('Download started.')\n", " for movie in movies:\n", " print('Copying {} ...'.format(movie))\n", " shutil.copy2(path.join(mount_context.mount_point, movie[1:]), destination)\n", " mount_context.stop()\n", " except:\n", - " print(\"Mounting error! Downloading all artifacts ...\")\n", - " artifacts_ds.download(target_path=destination, overwrite=True)\n", + " print(\"Mounting failed! Going with dataset download.\")\n", + " for i, file in enumerate(artifacts_ds.to_path()):\n", + " if file in movies:\n", + " print('Downloading {} ...'.format(file))\n", + " artifacts_ds.skip(i).take(1).download(target_path=destination, overwrite=True)\n", " \n", " print('Downloading movies completed!')\n", "\n", @@ -625,7 +627,7 @@ "print(\"Last movie:\", last_movie)\n", "\n", "# Download movies\n", - "training_movies_path = \"training\"\n", + "training_movies_path = path.join(\"training\", \"videos\")\n", "download_movies(training_artifacts_ds, [first_movie, last_movie], training_movies_path)" ] }, @@ -781,7 +783,7 @@ "# 1. Use a custom docker file with proper instructions to install xvfb, ffmpeg, python-opengl\n", "# and other dependencies.\n", "# Note: Even when the rendering is off pyhton-opengl is needed.\n", - "# TODO: Add these instructions to default rl base image and drop this docker file.\n", + "# TODO: Add these instructions to default reinforcement learning base image and drop this docker file.\n", "\n", "with open(\"files/docker/Dockerfile\", \"r\") as f:\n", " dockerfile=f.read()\n", @@ -811,10 +813,10 @@ " training_artifacts_ds.as_named_input('artifacts_dataset'),\n", " training_artifacts_ds.as_named_input('artifacts_path').as_mount()],\n", " \n", - " # The Azure ML compute target set up for Ray head nodes\n", + " # The Azure Machine Learning compute target set up for Ray head nodes\n", " compute_target=compute_target,\n", " \n", - " # RL framework. Currently must be Ray.\n", + " # Reinforcement learning framework. Currently must be Ray.\n", " rl_framework=Ray(),\n", " \n", " # Custom environmnet for Xvfb\n", @@ -928,7 +930,7 @@ "print(\"Last movie:\", last_movie)\n", "\n", "# Download last movie\n", - "rollout_movies_path = \"rollout\"\n", + "rollout_movies_path = path.join(\"rollout\", \"videos\")\n", "download_movies(rollout_artifacts_ds, [last_movie], rollout_movies_path)\n", "\n", "# Look for the downloaded movie in local directory\n", @@ -996,7 +998,7 @@ "metadata": {}, "source": [ "## Next\n", - "This example was about running Azure ML RL (Ray/RLlib Framework) on a single node. Please see [Pong problem](../atari-on-distributed-compute/pong_rllib.ipynb)\n", + "This example was about running Reinforcement Learning in Azure Machine Learning (Ray/RLlib Framework) on a single compute. Please see [Pong Problem](../atari-on-distributed-compute/pong_rllib.ipynb)\n", "example which uses Ray RLlib to train a Pong playing agent on a multi-node cluster." ] } diff --git a/how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/cartpole_cc.yml b/how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/cartpole_sc.yml similarity index 84% rename from how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/cartpole_cc.yml rename to how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/cartpole_sc.yml index 0ac02b81..48d5edfa 100644 --- a/how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/cartpole_cc.yml +++ b/how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/cartpole_sc.yml @@ -1,4 +1,4 @@ -name: cartpole_cc +name: cartpole_sc dependencies: - pip: - azureml-sdk diff --git a/how-to-use-azureml/reinforcement-learning/minecraft-on-distributed-compute/files/minecraft_train.py b/how-to-use-azureml/reinforcement-learning/minecraft-on-distributed-compute/files/minecraft_train.py index a3e04529..d97411d6 100644 --- a/how-to-use-azureml/reinforcement-learning/minecraft-on-distributed-compute/files/minecraft_train.py +++ b/how-to-use-azureml/reinforcement-learning/minecraft-on-distributed-compute/files/minecraft_train.py @@ -1,3 +1,5 @@ +import os + import ray import ray.tune as tune @@ -6,8 +8,10 @@ from minecraft_environment import create_env def stop(trial_id, result): + max_train_time = int(os.environ.get("AML_MAX_TRAIN_TIME_SECONDS", 5 * 60 * 60)) + return result["episode_reward_mean"] >= 1 \ - or result["time_total_s"] > 5 * 60 * 60 + or result["time_total_s"] >= max_train_time if __name__ == '__main__': diff --git a/how-to-use-azureml/reinforcement-learning/minecraft-on-distributed-compute/minecraft.ipynb b/how-to-use-azureml/reinforcement-learning/minecraft-on-distributed-compute/minecraft.ipynb index 98b30ccc..5352050e 100644 --- a/how-to-use-azureml/reinforcement-learning/minecraft-on-distributed-compute/minecraft.ipynb +++ b/how-to-use-azureml/reinforcement-learning/minecraft-on-distributed-compute/minecraft.ipynb @@ -110,7 +110,7 @@ "outputs": [], "source": [ "import azureml.core\n", - "print(\"Azure Machine Learning SDK Version: \", azureml.core.VERSION)" + "print(\"Azure Machine Learning SDK Version: \", azureml.core.VERSION) " ] }, { @@ -297,8 +297,11 @@ "metadata": {}, "outputs": [], "source": [ + "import os\n", "from azureml.core import Environment\n", "\n", + "max_train_time = os.environ.get(\"AML_MAX_TRAIN_TIME_SECONDS\", 5 * 60 * 60)\n", + "\n", "def create_env(env_type):\n", " env = Environment(name='minecraft-{env_type}'.format(env_type=env_type))\n", "\n", @@ -306,6 +309,7 @@ " env.docker.base_image = 'akdmsft/minecraft-{env_type}'.format(env_type=env_type)\n", "\n", " env.python.interpreter_path = \"xvfb-run -s '-screen 0 640x480x16 -ac +extension GLX +render' python\"\n", + " env.environment_variables[\"AML_MAX_TRAIN_TIME_SECONDS\"] = str(max_train_time)\n", " env.python.user_managed_dependencies = True\n", " \n", " return env\n", @@ -590,7 +594,6 @@ "outputs": [], "source": [ "import re\n", - "import os\n", "import tempfile\n", "\n", "from azureml.core import Dataset\n", diff --git a/how-to-use-azureml/reinforcement-learning/setup/devenv_setup.ipynb b/how-to-use-azureml/reinforcement-learning/setup/devenv_setup.ipynb index 78a5d4cb..35e5dfd4 100644 --- a/how-to-use-azureml/reinforcement-learning/setup/devenv_setup.ipynb +++ b/how-to-use-azureml/reinforcement-learning/setup/devenv_setup.ipynb @@ -20,7 +20,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Azure ML Reinforcement Learning Sample - Setting Up Development Environment\n", + "# Reinforcement Learning in Azure Machine Learning - Setting Up Development Environment\n", "\n", "Ray multi-node cluster setup requires all worker nodes to be able to communicate with the head node. This notebook explains you how to setup a virtual network, to be used by the Ray head and worker compute targets, created and used in other notebook examples." ] @@ -31,7 +31,7 @@ "source": [ "### Prerequisite\n", "\n", - "The user should have completed the Azure Machine Learning Tutorial: [Get started creating your first ML experiment with the Python SDK](https://docs.microsoft.com/en-us/azure/machine-learning/tutorial-1st-experiment-sdk-setup). You will need to make sure that you have a valid subscription id, a resource group and a workspace." + "The user should have completed the Azure Machine Learning Tutorial: [Get started creating your first ML experiment with the Python SDK](https://docs.microsoft.com/en-us/azure/machine-learning/tutorial-1st-experiment-sdk-setup). You will need to make sure that you have a valid subscription ID, a resource group, and an Azure Machine Learning workspace." ] }, { @@ -48,19 +48,17 @@ "metadata": {}, "outputs": [], "source": [ - "# Azure ML Core imports\n", "import azureml.core\n", "\n", - "# Check core SDK version number\n", - "print(\"Azure ML SDK Version: \", azureml.core.VERSION)" + "print(\"Azure Machine Learning SDK Version: \", azureml.core.VERSION)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Get Azure ML workspace\n", - "Get a reference to an existing Azure ML workspace. Please make sure that the VM sizes `STANDARD_NC6` and `STANDARD_D2_V2` are supported in the workspace's region.\n" + "### Get Azure Machine Learning workspace\n", + "Get a reference to an existing Azure Machine Learning workspace. Please make sure to change `STANDARD_NC6` and `STANDARD_D2_V2` to [the ones available in your region](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines).\n" ] }, { @@ -72,7 +70,7 @@ "from azureml.core import Workspace\n", "\n", "ws = Workspace.from_config()\n", - "print(ws.name, ws.location, ws.resource_group, sep = ' | ') " + "print(ws.name, ws.location, ws.resource_group, sep = ' | ')" ] }, { @@ -115,7 +113,7 @@ "# The Azure subscription you are using\n", "subscription_id=ws.subscription_id\n", "\n", - "# The resource group for the RL cluster\n", + "# The resource group for the reinforcement learning cluster\n", "resource_group=ws.resource_group\n", "\n", "# Azure region of the resource group\n", @@ -135,7 +133,7 @@ ")\n", "\n", "async_vnet_creation.wait()\n", - "print(\"VNet created successfully: \", async_vnet_creation.result())" + "print(\"Virtual network created successfully: \", async_vnet_creation.result())" ] }, { @@ -169,7 +167,7 @@ " azure.mgmt.network.models.SecurityRule(\n", " name=security_rule_name,\n", " access=azure.mgmt.network.models.SecurityRuleAccess.allow,\n", - " description='Azure ML RL rule',\n", + " description='Reinforcement Learning in Azure Machine Learning rule',\n", " destination_address_prefix='*',\n", " destination_port_range='29876-29877',\n", " direction=azure.mgmt.network.models.SecurityRuleDirection.inbound,\n", @@ -202,7 +200,7 @@ " network_security_group=network_security_group\n", " )\n", " \n", - "# Create subnet on vnet\n", + "# Create subnet on virtual network\n", "async_subnet_creation = network_client.subnets.create_or_update(\n", " resource_group_name=resource_group,\n", " virtual_network_name=vnet_name,\n", diff --git a/how-to-use-azureml/track-and-monitor-experiments/logging-api/logging-api.ipynb b/how-to-use-azureml/track-and-monitor-experiments/logging-api/logging-api.ipynb index b2f8ef69..762f9a8e 100644 --- a/how-to-use-azureml/track-and-monitor-experiments/logging-api/logging-api.ipynb +++ b/how-to-use-azureml/track-and-monitor-experiments/logging-api/logging-api.ipynb @@ -100,7 +100,7 @@ "\n", "# Check core SDK version number\n", "\n", - "print(\"This notebook was created using SDK version 1.5.0, you are currently running version\", azureml.core.VERSION)" + "print(\"This notebook was created using SDK version 1.6.0, you are currently running version\", azureml.core.VERSION)" ] }, { diff --git a/how-to-use-azureml/track-and-monitor-experiments/tensorboard/tensorboard.ipynb b/how-to-use-azureml/track-and-monitor-experiments/tensorboard/tensorboard.ipynb index 83ff13b5..28749b7e 100644 --- a/how-to-use-azureml/track-and-monitor-experiments/tensorboard/tensorboard.ipynb +++ b/how-to-use-azureml/track-and-monitor-experiments/tensorboard/tensorboard.ipynb @@ -439,6 +439,8 @@ "metadata": {}, "outputs": [], "source": [ + "from azureml.train.dnn import TensorFlow\n", + "\n", "script_params = {\"--log_dir\": \"./logs\"}\n", "\n", "# If you want the run to go longer, set --max-steps to a higher number.\n", diff --git a/how-to-use-azureml/training/train-in-spark/train-in-spark.ipynb b/how-to-use-azureml/training/train-in-spark/train-in-spark.ipynb index 3cacab4b..a6c4ae27 100644 --- a/how-to-use-azureml/training/train-in-spark/train-in-spark.ipynb +++ b/how-to-use-azureml/training/train-in-spark/train-in-spark.ipynb @@ -144,25 +144,18 @@ "import os\n", "\n", "try:\n", - " # if you want to connect using SSH key instead of username/password you can provide parameters private_key_file and private_key_passphrase\n", - " attach_config = HDInsightCompute.attach_configuration(address=os.environ.get('hdiservername', '-ssh.azurehdinsight.net'), \n", - " ssh_port=22, \n", - " username=os.environ.get('hdiusername', ''), \n", + "# If you want to connect using SSH key instead of username/password you can provide parameters private_key_file and private_key_passphrase\n", + "\n", + "# Attaching a HDInsight cluster using the public address of the HDInsight cluster is no longer supported.\n", + "# Instead, use resourceId of the HDInsight cluster.\n", + "# The resourceId of the HDInsight Cluster can be constructed using the following string format:\n", + "# /subscriptions//resourceGroups//providers/Microsoft.HDInsight/clusters/.\n", + "# You can also use subscription_id, resource_group and cluster_name without constructing resourceId.\n", + " attach_config = HDInsightCompute.attach_configuration(resource_id='',\n", + " ssh_port=22,\n", + " username=os.environ.get('hdiusername', ''),\n", " password=os.environ.get('hdipassword', ''))\n", "\n", - "# The following Azure regions do not support attaching a HDI Cluster using the public IP address of the HDI Cluster.\n", - "# Instead, use the Azure Resource Manager ID of the HDI Cluster with the resource_id parameter:\n", - "# US East\n", - "# US West 2\n", - "# US South Central\n", - "# The resource ID of the HDI Cluster can be constructed using the\n", - "# subscription ID, resource group name, and cluster name using the following string format:\n", - "# /subscriptions//resourceGroups//providers/Microsoft.HDInsight/clusters/. \n", - "# If in US East, US West 2, or US South Central, use the following instead:\n", - "# attach_config = HDInsightCompute.attach_configuration(resource_id='',\n", - "# ssh_port=22,\n", - "# username=os.environ.get('hdiusername', ''),\n", - "# password=os.environ.get('hdipassword', ''))\n", " hdi_compute = ComputeTarget.attach(workspace=ws, \n", " name='myhdi', \n", " attach_configuration=attach_config)\n", diff --git a/how-to-use-azureml/training/train-on-remote-vm/train-on-remote-vm.ipynb b/how-to-use-azureml/training/train-on-remote-vm/train-on-remote-vm.ipynb index 6216378d..ff3d9369 100644 --- a/how-to-use-azureml/training/train-on-remote-vm/train-on-remote-vm.ipynb +++ b/how-to-use-azureml/training/train-on-remote-vm/train-on-remote-vm.ipynb @@ -268,23 +268,15 @@ " private_key_file='./.ssh/id_rsa')\n", "\n", "\n", - "# The following Azure regions do not support attaching a virtual machine using the public IP address of the VM.\n", - "# Instead, use the Azure Resource Manager ID of the VM with the resource_id parameter:\n", - "# US East\n", - "# US West 2\n", - "# US South Central\n", - "# The resource ID of the VM can be constructed using the\n", - "# subscription ID, resource group name, and VM name using the following string format:\n", - "# /subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/. \n", - "# If in US East, US West 2, or US South Central, use the following instead:\n", - "# attach_config = RemoteCompute.attach_configuration(resource_id='',\n", - "# ssh_port=22,\n", - "# username='username',\n", - "# private_key_file='./.ssh/id_rsa')\n", - "\n", - " attached_dsvm_compute = ComputeTarget.attach(workspace=ws,\n", - " name=compute_target_name,\n", - " attach_configuration=attach_config)\n", + "# Attaching a virtual machine using the public IP address of the VM is no longer supported.\n", + "# Instead, use resourceId of the VM.\n", + "# The resourceId of the VM can be constructed using the following string format:\n", + "# /subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/.\n", + "# You can also use subscription_id, resource_group and vm_name without constructing resourceId.\n", + " attach_config = RemoteCompute.attach_configuration(resource_id='',\n", + " ssh_port=22,\n", + " username='username',\n", + " private_key_file='./.ssh/id_rsa')\n", " attached_dsvm_compute.wait_for_completion(show_output=True)" ] }, diff --git a/how-to-use-azureml/work-with-data/datasets-tutorial/pipeline-with-datasets/pipeline-for-image-classification.ipynb b/how-to-use-azureml/work-with-data/datasets-tutorial/pipeline-with-datasets/pipeline-for-image-classification.ipynb index 739fca0e..d7367798 100644 --- a/how-to-use-azureml/work-with-data/datasets-tutorial/pipeline-with-datasets/pipeline-for-image-classification.ipynb +++ b/how-to-use-azureml/work-with-data/datasets-tutorial/pipeline-with-datasets/pipeline-for-image-classification.ipynb @@ -279,7 +279,8 @@ " outputs=[prepared_fashion_ds],\n", " source_directory=script_folder,\n", " compute_target=compute_target,\n", - " runconfig=run_config)" + " runconfig=run_config,\n", + " allow_reuse=False)" ] }, { diff --git a/how-to-use-azureml/work-with-data/datasets-tutorial/pipeline-with-datasets/pipeline-for-image-classification.yml b/how-to-use-azureml/work-with-data/datasets-tutorial/pipeline-with-datasets/pipeline-for-image-classification.yml index e6b3df70..f33e9474 100644 --- a/how-to-use-azureml/work-with-data/datasets-tutorial/pipeline-with-datasets/pipeline-for-image-classification.yml +++ b/how-to-use-azureml/work-with-data/datasets-tutorial/pipeline-with-datasets/pipeline-for-image-classification.yml @@ -2,6 +2,5 @@ name: pipeline-for-image-classification dependencies: - pip: - azureml-sdk - - azureml-dataprep - pandas<=0.23.4 - fuse diff --git a/how-to-use-azureml/work-with-data/datasets-tutorial/timeseries-datasets/tabular-timeseries-dataset-filtering.yml b/how-to-use-azureml/work-with-data/datasets-tutorial/timeseries-datasets/tabular-timeseries-dataset-filtering.yml index a3471ade..af9acab3 100644 --- a/how-to-use-azureml/work-with-data/datasets-tutorial/timeseries-datasets/tabular-timeseries-dataset-filtering.yml +++ b/how-to-use-azureml/work-with-data/datasets-tutorial/timeseries-datasets/tabular-timeseries-dataset-filtering.yml @@ -2,5 +2,4 @@ name: tabular-timeseries-dataset-filtering dependencies: - pip: - azureml-sdk - - azureml-dataprep - pandas<=0.23.4 diff --git a/how-to-use-azureml/work-with-data/datasets-tutorial/train-with-datasets/train-with-datasets.yml b/how-to-use-azureml/work-with-data/datasets-tutorial/train-with-datasets/train-with-datasets.yml index 4f490f41..d13f92dc 100644 --- a/how-to-use-azureml/work-with-data/datasets-tutorial/train-with-datasets/train-with-datasets.yml +++ b/how-to-use-azureml/work-with-data/datasets-tutorial/train-with-datasets/train-with-datasets.yml @@ -3,7 +3,6 @@ dependencies: - pip: - azureml-sdk - azureml-widgets - - azureml-dataprep - pandas<=0.23.4 - fuse - scikit-learn diff --git a/index.md b/index.md index 383e6669..215a8461 100644 --- a/index.md +++ b/index.md @@ -26,7 +26,7 @@ Machine Learning notebook samples and encourage efficient retrieval of topics an | :star:[Datasets with ML Pipeline](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/work-with-data/datasets-tutorial/pipeline-with-datasets/pipeline-for-image-classification.ipynb) | Train | Fashion MNIST | Remote | None | Azure ML | Dataset, Pipeline, Estimator, ScriptRun | | :star:[Filtering data using Tabular Timeseiries Dataset related API](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/work-with-data/datasets-tutorial/timeseries-datasets/tabular-timeseries-dataset-filtering.ipynb) | Filtering | NOAA | Local | None | Azure ML | Dataset, Tabular Timeseries | | :star:[Train with Datasets (Tabular and File)](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/work-with-data/datasets-tutorial/train-with-datasets/train-with-datasets.ipynb) | Train | Iris, Diabetes | Remote | None | Azure ML | Dataset, Estimator, ScriptRun | -| [Forecasting away from training data](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-high-frequency/auto-ml-forecasting-function.ipynb) | Forecasting | None | Remote | None | Azure ML AutoML | Forecasting, Confidence Intervals | +| [Forecasting away from training data](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/auto-ml-forecasting-function.ipynb) | Forecasting | None | Remote | None | Azure ML AutoML | Forecasting, Confidence Intervals | | [Automated ML run with basic edition features.](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb) | Classification | Bankmarketing | AML | ACI | None | featurization, explainability, remote_run, AutomatedML | | [Classification of credit card fraudulent transactions using Automated ML](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.ipynb) | Classification | Creditcard | AML Compute | None | None | remote_run, AutomatedML | | [Automated ML run with featurization and model explainability.](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/regression-explanation-featurization/auto-ml-regression-explanation-featurization.ipynb) | Regression | MachineData | AML | ACI | None | featurization, explainability, remote_run, AutomatedML | @@ -41,6 +41,7 @@ Machine Learning notebook samples and encourage efficient retrieval of topics an | :star:[How to Setup a Schedule for a Published Pipeline](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-setup-schedule-for-a-published-pipeline.ipynb) | Demonstrates the use of Schedules for Published Pipelines | Custom | AML Compute | None | Azure ML | None | | [How to setup a versioned Pipeline Endpoint](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-setup-versioned-pipeline-endpoints.ipynb) | Demonstrates the use of PipelineEndpoint to run a specific version of the Published Pipeline | Custom | AML Compute | None | Azure ML | None | | :star:[How to use DataPath as a PipelineParameter](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-datapath-and-pipelineparameter.ipynb) | Demonstrates the use of DataPath as a PipelineParameter | Custom | AML Compute | None | Azure ML | None | +| :star:[How to use Dataset as a PipelineParameter](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-dataset-and-pipelineparameter.ipynb) | Demonstrates the use of Dataset as a PipelineParameter | Custom | AML Compute | None | Azure ML | None | | [How to use AdlaStep with AML Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-use-adla-as-compute-target.ipynb) | Demonstrates the use of AdlaStep | Custom | Azure Data Lake Analytics | None | Azure ML | None | | :star:[How to use DatabricksStep with AML Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-use-databricks-as-compute-target.ipynb) | Demonstrates the use of DatabricksStep | Custom | Azure Databricks | None | Azure ML, Azure Databricks | None | | :star:[How to use AutoMLStep with AML Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-automated-machine-learning-step.ipynb) | Demonstrates the use of AutoMLStep | Custom | AML Compute | None | Automated Machine Learning | None | @@ -113,7 +114,6 @@ Machine Learning notebook samples and encourage efficient retrieval of topics an | [onnx-model-register-and-deploy](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/onnx/onnx-model-register-and-deploy.ipynb) | | | | | | | | [production-deploy-to-aks](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks.ipynb) | | | | | | | | [production-deploy-to-aks-gpu](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/production-deploy-to-aks-gpu/production-deploy-to-aks-gpu.ipynb) | | | | | | | -| [tensorflow-model-register-and-deploy](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/tensorflow/tensorflow-model-register-and-deploy.ipynb) | | | | | | | | [explain-model-on-amlcompute](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/explain-model/azure-integration/remote-explanation/explain-model-on-amlcompute.ipynb) | | | | | | | | [save-retrieve-explanations-run-history](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/explain-model/azure-integration/run-history/save-retrieve-explanations-run-history.ipynb) | | | | | | | | [train-explain-model-locally-and-deploy](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-locally-and-deploy.ipynb) | | | | | | | @@ -123,7 +123,7 @@ Machine Learning notebook samples and encourage efficient retrieval of topics an | [authentication-in-azureml](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/manage-azureml-service/authentication-in-azureml/authentication-in-azureml.ipynb) | | | | | | | | [pong_rllib](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/reinforcement-learning/atari-on-distributed-compute/pong_rllib.ipynb) | | | | | | | | [cartpole_ci](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/reinforcement-learning/cartpole-on-compute-instance/cartpole_ci.ipynb) | | | | | | | -| [cartpole_cc](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/cartpole_cc.ipynb) | | | | | | | +| [cartpole_sc](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/cartpole_sc.ipynb) | | | | | | | | [minecraft](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/reinforcement-learning/minecraft-on-distributed-compute/minecraft.ipynb) | | | | | | | | [devenv_setup](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/reinforcement-learning/setup/devenv_setup.ipynb) | | | | | | | | [Logging APIs](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/track-and-monitor-experiments/logging-api/logging-api.ipynb) | Logging APIs and analyzing results | None | None | None | None | None | diff --git a/setup-environment/configuration.ipynb b/setup-environment/configuration.ipynb index 3f2eb123..e0e840da 100644 --- a/setup-environment/configuration.ipynb +++ b/setup-environment/configuration.ipynb @@ -102,7 +102,7 @@ "source": [ "import azureml.core\n", "\n", - "print(\"This notebook was created using version 1.5.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.6.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, diff --git a/tutorials/create-first-ml-experiment/tutorial-1st-experiment-sdk-train.ipynb b/tutorials/create-first-ml-experiment/tutorial-1st-experiment-sdk-train.ipynb index 0c8d6334..01da453c 100644 --- a/tutorials/create-first-ml-experiment/tutorial-1st-experiment-sdk-train.ipynb +++ b/tutorials/create-first-ml-experiment/tutorial-1st-experiment-sdk-train.ipynb @@ -386,4 +386,4 @@ }, "nbformat": 4, "nbformat_minor": 2 -} +} \ No newline at end of file diff --git a/tutorials/machine-learning-pipelines-advanced/scripts/batch_scoring.py b/tutorials/machine-learning-pipelines-advanced/scripts/batch_scoring.py index 3b5e3dbc..f5f315e6 100644 --- a/tutorials/machine-learning-pipelines-advanced/scripts/batch_scoring.py +++ b/tutorials/machine-learning-pipelines-advanced/scripts/batch_scoring.py @@ -21,9 +21,10 @@ image_size = 299 num_channel = 3 -def get_class_label_dict(): +def get_class_label_dict(labels_dir): label = [] - proto_as_ascii_lines = tf.gfile.GFile("labels.txt").readlines() + labels_path = os.path.join(labels_dir, 'labels.txt') + proto_as_ascii_lines = tf.gfile.GFile(labels_path).readlines() for l in proto_as_ascii_lines: label.append(l.rstrip()) return label @@ -34,14 +35,10 @@ def init(): parser = argparse.ArgumentParser(description="Start a tensorflow model serving") parser.add_argument('--model_name', dest="model_name", required=True) - parser.add_argument('--labels_name', dest="labels_name", required=True) + parser.add_argument('--labels_dir', dest="labels_dir", required=True) args, _ = parser.parse_known_args() - workspace = Run.get_context(allow_offline=False).experiment.workspace - label_ds = Dataset.get_by_name(workspace=workspace, name=args.labels_name) - label_ds.download(target_path='.', overwrite=True) - - label_dict = get_class_label_dict() + label_dict = get_class_label_dict(args.labels_dir) classes_num = len(label_dict) with slim.arg_scope(inception_v3.inception_v3_arg_scope()): diff --git a/tutorials/machine-learning-pipelines-advanced/tutorial-pipeline-batch-scoring-classification.ipynb b/tutorials/machine-learning-pipelines-advanced/tutorial-pipeline-batch-scoring-classification.ipynb index f5b06fac..252e632d 100644 --- a/tutorials/machine-learning-pipelines-advanced/tutorial-pipeline-batch-scoring-classification.ipynb +++ b/tutorials/machine-learning-pipelines-advanced/tutorial-pipeline-batch-scoring-classification.ipynb @@ -20,14 +20,8 @@ "metadata": {}, "source": [ "# Use Azure Machine Learning Pipelines for batch prediction\n", - "\n", - "## Note\n", - "This notebook uses public preview functionality (ParallelRunStep). Please install azureml-contrib-pipeline-steps package before running this notebook.\n", - "\n", - "\n", "In this tutorial, you use Azure Machine Learning service pipelines to run a batch scoring image classification job. The example job uses the pre-trained [Inception-V3](https://arxiv.org/abs/1512.00567) CNN (convolutional neural network) Tensorflow model to classify unlabeled images. Machine learning pipelines optimize your workflow with speed, portability, and reuse so you can focus on your expertise, machine learning, rather than on infrastructure and automation. After building and publishing a pipeline, you can configure a REST endpoint to enable triggering the pipeline from any HTTP library on any platform.\n", "\n", - "\n", "In this tutorial, you learn the following tasks:\n", "\n", "> * Configure workspace and download sample data\n", @@ -38,7 +32,7 @@ "> * Build, run, and publish a pipeline\n", "> * Enable a REST endpoint for the pipeline\n", "\n", - "If you don\u00e2\u20ac\u2122t have an Azure subscription, create a free account before you begin. Try the [free or paid version of Azure Machine Learning service](https://aka.ms/AMLFree) today." + "If you don't have an Azure subscription, create a free account before you begin. Try the [free or paid version of Azure Machine Learning service](https://aka.ms/AMLFree) today." ] }, { @@ -129,7 +123,7 @@ "from azureml.pipeline.core import PipelineData\n", "\n", "input_images = Dataset.File.from_files((batchscore_blob, \"batchscoring/images/\"))\n", - "label_ds = Dataset.File.from_files((batchscore_blob, \"batchscoring/labels/*.txt\"))\n", + "label_ds = Dataset.File.from_files((batchscore_blob, \"batchscoring/labels/\"))\n", "output_dir = PipelineData(name=\"scores\", \n", " datastore=def_data_store, \n", " output_path_on_compute=\"batchscoring/results\")" @@ -149,7 +143,7 @@ "outputs": [], "source": [ "input_images = input_images.register(workspace = ws, name = \"input_images\")\n", - "label_ds = label_ds.register(workspace = ws, name = \"label_ds\")" + "label_ds = label_ds.register(workspace = ws, name = \"label_ds\", create_new_version=True)" ] }, { @@ -260,7 +254,7 @@ "The script `batch_scoring.py` takes the following parameters, which get passed from the `ParallelRunStep` that you create later:\n", "\n", "- `--model_name`: the name of the model being used\n", - "- `--labels_name` : the name of the `Dataset` holding the `labels.txt` file \n", + "- `--labels_dir` : the directory path having the `labels.txt` file \n", "\n", "The pipelines infrastructure uses the `ArgumentParser` class to pass parameters into pipeline steps. For example, in the code below the first argument `--model_name` is given the property identifier `model_name`. In the `main()` function, this property is accessed using `Model.get_model_path(args.model_name)`." ] @@ -296,7 +290,8 @@ "from azureml.core.conda_dependencies import CondaDependencies\n", "from azureml.core.runconfig import DEFAULT_GPU_IMAGE\n", "\n", - "cd = CondaDependencies.create(pip_packages=[\"tensorflow-gpu==1.15.2\", \"azureml-defaults\"])\n", + "cd = CondaDependencies.create(pip_packages=[\"tensorflow-gpu==1.15.2\",\n", + " \"azureml-core\", \"azureml-dataprep[fuse]\"])\n", "\n", "env = Environment(name=\"parallelenv\")\n", "env.python.conda_dependencies=cd\n", @@ -317,7 +312,7 @@ "metadata": {}, "outputs": [], "source": [ - "from azureml.contrib.pipeline.steps import ParallelRunConfig\n", + "from azureml.pipeline.steps import ParallelRunConfig\n", "\n", "parallel_run_config = ParallelRunConfig(\n", " environment=env,\n", @@ -356,18 +351,20 @@ "metadata": {}, "outputs": [], "source": [ - "from azureml.contrib.pipeline.steps import ParallelRunStep\n", + "from azureml.pipeline.steps import ParallelRunStep\n", "from datetime import datetime\n", "\n", "parallel_step_name = \"batchscoring-\" + datetime.now().strftime(\"%Y%m%d%H%M\")\n", "\n", + "label_config = label_ds.as_named_input(\"labels_input\")\n", + "\n", "batch_score_step = ParallelRunStep(\n", " name=parallel_step_name,\n", " inputs=[input_images.as_named_input(\"input_images\")],\n", " output=output_dir,\n", - " models=[model],\n", " arguments=[\"--model_name\", \"inception\",\n", - " \"--labels_name\", \"label_ds\"],\n", + " \"--labels_dir\", label_config],\n", + " side_inputs=[label_config],\n", " parallel_run_config=parallel_run_config,\n", " allow_reuse=False\n", ")" diff --git a/tutorials/machine-learning-pipelines-advanced/tutorial-pipeline-batch-scoring-classification.yml b/tutorials/machine-learning-pipelines-advanced/tutorial-pipeline-batch-scoring-classification.yml index 1e896b84..bb640269 100644 --- a/tutorials/machine-learning-pipelines-advanced/tutorial-pipeline-batch-scoring-classification.yml +++ b/tutorials/machine-learning-pipelines-advanced/tutorial-pipeline-batch-scoring-classification.yml @@ -3,7 +3,7 @@ dependencies: - pip: - azureml-sdk - azureml-pipeline-core - - azureml-contrib-pipeline-steps + - azureml-pipeline-steps - pandas - requests - azureml-widgets