Compare commits

...

16 Commits

Author SHA1 Message Date
vizhur
a7c3a0fdb8 update samples from Release-54 as a part of SDK release 2020-06-02 21:34:10 +00:00
Harneet Virk
6d11cdfa0a Merge pull request #984 from Azure/release_update/Release-53
update samples from Release-53 as a part of  SDK release
2020-05-26 19:59:58 -07:00
vizhur
11e8ed2bab update samples from Release-53 as a part of SDK release 2020-05-27 02:45:07 +00:00
Harneet Virk
12c06a4168 Merge pull request #978 from ahcan76/patch-1
Fix image paths in tutorial-1st-experiment-sdk-train.ipynb
2020-05-18 12:58:21 -07:00
ahcan76
1f75dc9725 Update tutorial-1st-experiment-sdk-train.ipynb
Fix the image path
2020-05-18 22:40:54 +03:00
Harneet Virk
1a1a42d525 Merge pull request #977 from Azure/release_update/Release-52
update samples from Release-52 as a part of  SDK release
2020-05-18 12:22:48 -07:00
vizhur
879a272a8d update samples from Release-52 as a part of SDK release 2020-05-18 19:21:05 +00:00
Harneet Virk
bc65bde097 Merge pull request #971 from Azure/release_update/Release-51
update samples from Release-51 as a part of  SDK release
2020-05-13 22:17:45 -07:00
vizhur
690bdfbdbe update samples from Release-51 as a part of SDK release 2020-05-14 05:03:47 +00:00
Harneet Virk
3c02bd8782 Merge pull request #967 from Azure/release_update/Release-50
update samples from Release-50 as a part of  SDK release
2020-05-12 19:57:40 -07:00
vizhur
5c14610a1c update samples from Release-50 as a part of SDK release 2020-05-13 02:45:40 +00:00
Harneet Virk
4e3afae6fb Merge pull request #965 from Azure/release_update/Release-49
update samples from Release-49 as a part of  SDK release
2020-05-11 19:25:28 -07:00
vizhur
a2144aa083 update samples from Release-49 as a part of SDK release 2020-05-12 02:24:34 +00:00
Harneet Virk
0e6334178f Merge pull request #963 from Azure/release_update/Release-46
update samples from Release-46 as a part of  SDK release
2020-05-11 14:49:34 -07:00
vizhur
4ec9178d22 update samples from Release-46 as a part of SDK release 2020-05-11 21:48:31 +00:00
Harneet Virk
2aa7c53b0c Merge pull request #962 from Azure/release_update_stablev2/Release-11
update samples from Release-11 as a part of 1.5.0 SDK stable release
2020-05-11 12:42:32 -07:00
133 changed files with 10466 additions and 6501 deletions

View File

@@ -40,6 +40,7 @@ The [How to use Azure ML](./how-to-use-azureml) folder contains specific example
- [Deployment](./how-to-use-azureml/deployment) - Examples showing how to deploy and manage machine learning models and solutions
- [Azure Databricks](./how-to-use-azureml/azure-databricks) - Examples showing how to use Azure ML with Azure Databricks
- [Monitor Models](./how-to-use-azureml/monitor-models) - Examples showing how to enable model monitoring services such as DataDrift
- [Reinforcement Learning](./how-to-use-azureml/reinforcement-learning) - Examples showing how to train reinforcement learning agents
---
## Documentation

View File

@@ -103,7 +103,7 @@
"source": [
"import azureml.core\n",
"\n",
"print(\"This notebook was created using version 1.5.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.6.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},

View File

@@ -144,7 +144,7 @@ jupyter notebook
- Dataset: forecasting for a bike-sharing
- Example of training an automated ML forecasting model on multiple time-series
- [auto-ml-forecasting-function.ipynb](forecasting-high-frequency/auto-ml-forecasting-function.ipynb)
- [auto-ml-forecasting-function.ipynb](forecasting-forecast-function/auto-ml-forecasting-function.ipynb)
- Example of training an automated ML forecasting model on multiple time-series
- [auto-ml-forecasting-beer-remote.ipynb](forecasting-beer-remote/auto-ml-forecasting-beer-remote.ipynb)

View File

@@ -1,4 +1,4 @@
name: automl_env_master
name: azure_automl
dependencies:
# The python interpreter version.
# Currently Azure ML only supports 3.5.2 and later.
@@ -12,7 +12,6 @@ dependencies:
- scipy==1.4.1
- scikit-learn>=0.19.0,<=0.20.3
- pandas>=0.22.0,<=0.23.4
- testpath=0.3.1
- py-xgboost<=0.90
- conda-forge::fbprophet==0.5
- pytorch::pytorch=1.4.0
@@ -20,14 +19,13 @@ dependencies:
- pip:
# Required packages for AzureML execution, history, and data preparation.
- --extra-index-url https://azuremlsdktestpypi.azureedge.net/sdk-release/master/588E708E0DF342C4A80BD954289657CF
- --extra-index-url https://dataprepdownloads.azureedge.net/pypi/weekly-rc-932B96D048E011E8B56608/latest/
- azureml-defaults<0.1.50
- azureml-dataprep[pandas]
- azureml-train-automl<0.1.50
- azureml-train<0.1.50
- azureml-widgets<0.1.50
- azureml-pipeline<0.1.50
- azureml-defaults
- azureml-train-automl
- azureml-train
- azureml-widgets
- azureml-pipeline
- pytorch-transformers==1.0.0
- spacy==2.1.8
- pyarrow==0.17.0
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz

View File

@@ -12,20 +12,20 @@ dependencies:
- urllib3<1.24
- scipy==1.4.1
- scikit-learn>=0.19.0,<=0.20.3
- pandas>=0.22.0,<0.23.0
- py-xgboost<=0.80
- pandas>=0.22.0,<=0.23.4
- py-xgboost<=0.90
- conda-forge::fbprophet==0.5
- pytorch::pytorch=1.4.0
- cudatoolkit=10.1.243
- cudatoolkit=9.0
- pip:
# Required packages for AzureML execution, history, and data preparation.
- azureml-defaults
- azureml-dataprep[pandas]
- azureml-train-automl
- azureml-train
- azureml-widgets
- azureml-pipeline
- pytorch-transformers==1.0.0
- spacy==2.1.8
- pyarrow==0.17.0
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz

View File

@@ -105,7 +105,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.5.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.6.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},

View File

@@ -93,7 +93,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.5.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.6.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},

View File

@@ -97,7 +97,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.5.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.6.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},

View File

@@ -88,7 +88,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.5.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.6.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},

View File

@@ -114,7 +114,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.5.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.6.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},

View File

@@ -87,7 +87,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.5.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.6.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
@@ -510,16 +510,16 @@
"metadata": {},
"outputs": [],
"source": [
"from azureml.automl.core.shared import constants, metrics\n",
"from azureml.automl.core.shared import constants\n",
"from azureml.automl.runtime.shared.score import scoring\n",
"from sklearn.metrics import mean_absolute_error, mean_squared_error\n",
"from matplotlib import pyplot as plt\n",
"\n",
"# use automl metrics module\n",
"scores = metrics.compute_metrics_regression(\n",
" df_all['predicted'],\n",
" df_all[target_column_name],\n",
" list(constants.Metric.SCALAR_REGRESSION_SET),\n",
" None, None, None)\n",
"scores = scoring.score_regression(\n",
" y_test=df_all[target_column_name],\n",
" y_pred=df_all['predicted'],\n",
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET))\n",
"\n",
"print(\"[Test data scores]\\n\")\n",
"for key, value in scores.items(): \n",

View File

@@ -97,7 +97,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.5.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.6.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
@@ -465,7 +465,7 @@
"metadata": {},
"source": [
"### Forecast Function\n",
"For forecasting, we will use the forecast function instead of the predict function. Using the predict method would result in getting predictions for EVERY horizon the forecaster can predict at. This is useful when training and evaluating the performance of the forecaster at various horizons, but the level of detail is excessive for normal use. Forecast function also can handle more complicated scenarios, see notebook on [high frequency forecasting](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/forecasting-high-frequency/auto-ml-forecasting-function.ipynb)."
"For forecasting, we will use the forecast function instead of the predict function. Using the predict method would result in getting predictions for EVERY horizon the forecaster can predict at. This is useful when training and evaluating the performance of the forecaster at various horizons, but the level of detail is excessive for normal use. Forecast function also can handle more complicated scenarios, see the [forecast function notebook](../forecasting-forecast-function/auto-ml-forecasting-function.ipynb)."
]
},
{
@@ -507,15 +507,15 @@
"metadata": {},
"outputs": [],
"source": [
"from azureml.automl.core.shared import constants, metrics\n",
"from azureml.automl.core.shared import constants\n",
"from azureml.automl.runtime.shared.score import scoring\n",
"from matplotlib import pyplot as plt\n",
"\n",
"# use automl metrics module\n",
"scores = metrics.compute_metrics_regression(\n",
" df_all['predicted'],\n",
" df_all[target_column_name],\n",
" list(constants.Metric.SCALAR_REGRESSION_SET),\n",
" None, None, None)\n",
"scores = scoring.score_regression(\n",
" y_test=df_all[target_column_name],\n",
" y_pred=df_all['predicted'],\n",
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET))\n",
"\n",
"print(\"[Test data scores]\\n\")\n",
"for key, value in scores.items(): \n",
@@ -667,15 +667,15 @@
"metadata": {},
"outputs": [],
"source": [
"from azureml.automl.core.shared import constants, metrics\n",
"from azureml.automl.core.shared import constants\n",
"from azureml.automl.runtime.shared.score import scoring\n",
"from matplotlib import pyplot as plt\n",
"\n",
"# use automl metrics module\n",
"scores = metrics.compute_metrics_regression(\n",
" df_all['predicted'],\n",
" df_all[target_column_name],\n",
" list(constants.Metric.SCALAR_REGRESSION_SET),\n",
" None, None, None)\n",
"scores = scoring.score_regression(\n",
" y_test=df_all[target_column_name],\n",
" y_pred=df_all['predicted'],\n",
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET))\n",
"\n",
"print(\"[Test data scores]\\n\")\n",
"for key, value in scores.items(): \n",

View File

@@ -35,7 +35,6 @@
"Terminology:\n",
"* forecast origin: the last period when the target value is known\n",
"* forecast periods(s): the period(s) for which the value of the target is desired.\n",
"* forecast horizon: the number of forecast periods\n",
"* lookback: how many past periods (before forecast origin) the model function depends on. The larger of number of lags and length of rolling window.\n",
"* prediction context: `lookback` periods immediately preceding the forecast origin\n",
"\n",
@@ -95,7 +94,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.5.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.6.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
@@ -720,6 +719,90 @@
"X_show[['date', 'grain', 'ext_predictor', '_automl_target_col']]\n",
"# prediction is in _automl_target_col"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Forecasting farther than the maximum horizon <a id=\"recursive forecasting\"></a>\n",
"When the forecast destination, or the latest date in the prediction data frame, is farther into the future than the specified maximum horizon, the `forecast()` function will still make point predictions out to the later date using a recursive operation mode. Internally, the method recursively applies the regular forecaster to generate context so that we can forecast further into the future. \n",
"\n",
"To illustrate the use-case and operation of recursive forecasting, we'll consider an example with a single time-series where the forecasting period directly follows the training period and is twice as long as the maximum horizon given at training time.\n",
"\n",
"![Recursive_forecast_overview](recursive_forecast_overview_small.png)\n",
"\n",
"Internally, we apply the forecaster in an iterative manner and finish the forecast task in two interations. In the first iteration, we apply the forecaster and get the prediction for the first max-horizon periods (y_pred1). In the second iteraction, y_pred1 is used as the context to produce the prediction for the next max-horizon periods (y_pred2). The combination of (y_pred1 and y_pred2) gives the results for the total forecast periods. \n",
"\n",
"A caveat: forecast accuracy will likely be worse the farther we predict into the future since errors are compounded with recursive application of the forecaster.\n",
"\n",
"![Recursive_forecast_iter1](recursive_forecast_iter1.png)\n",
"![Recursive_forecast_iter2](recursive_forecast_iter2.png)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# generate the same kind of test data we trained on, but with a single grain/time-series and test period twice as long as the max_horizon\n",
"_, _, X_test_long, y_test_long = get_timeseries(train_len=n_train_periods,\n",
" test_len=max_horizon*2,\n",
" time_column_name=TIME_COLUMN_NAME,\n",
" target_column_name=TARGET_COLUMN_NAME,\n",
" grain_column_name=GRAIN_COLUMN_NAME,\n",
" grains=1)\n",
"\n",
"print(X_test_long.groupby(GRAIN_COLUMN_NAME)[TIME_COLUMN_NAME].min())\n",
"print(X_test_long.groupby(GRAIN_COLUMN_NAME)[TIME_COLUMN_NAME].max())"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# forecast() function will invoke the recursive forecast method internally.\n",
"y_pred_long, X_trans_long = fitted_model.forecast(X_test_long)\n",
"y_pred_long"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# What forecast() function does in this case is equivalent to iterating it twice over the test set as the following. \n",
"y_pred1, _ = fitted_model.forecast(X_test_long[:max_horizon])\n",
"y_pred_all, _ = fitted_model.forecast(X_test_long, np.concatenate((y_pred1, np.full(max_horizon, np.nan))))\n",
"np.array_equal(y_pred_all, y_pred_long)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Confidence interval and distributional forecasts\n",
"AutoML cannot currently estimate forecast errors beyond the maximum horizon set during training, so the `forecast_quantiles()` function will return missing values for quantiles not equal to 0.5 beyond the maximum horizon. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"fitted_model.forecast_quantiles(X_test_long)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Similarly with the simple senarios illustrated above, forecasting farther than the max horizon in other senarios like 'multiple grain', 'Destination-date forecast', and 'forecast away from the training data' are also automatically handled by the `forecast()` function. "
]
}
],
"metadata": {

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

View File

@@ -82,7 +82,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.5.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.6.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
@@ -545,7 +545,7 @@
"source": [
"If you are used to scikit pipelines, perhaps you expected `predict(X_test)`. However, forecasting requires a more general interface that also supplies the past target `y` values. Please use `forecast(X,y)` as `predict(X)` is reserved for internal purposes on forecasting models.\n",
"\n",
"The [forecast function notebook](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/forecasting-high-frequency/auto-ml-forecasting-function.ipynb) demonstrates the use of the forecast function for a variety of use cases. Also, please see the [API documentation for the forecast function](https://docs.microsoft.com/en-us/python/api/azureml-automl-runtime/azureml.automl.runtime.shared.model_wrappers.forecastingpipelinewrapper?view=azure-ml-py#forecast-x-pred--typing-union-pandas-core-frame-dataframe--nonetype----none--y-pred--typing-union-pandas-core-frame-dataframe--numpy-ndarray--nonetype----none--forecast-destination--typing-union-pandas--libs-tslibs-timestamps-timestamp--nonetype----none--ignore-data-errors--bool---false-----typing-tuple-numpy-ndarray--pandas-core-frame-dataframe-)."
"The [forecast function notebook](../forecasting-forecast-function/auto-ml-forecasting-function.ipynb)."
]
},
{
@@ -576,15 +576,15 @@
"metadata": {},
"outputs": [],
"source": [
"from azureml.automl.core.shared import constants, metrics\n",
"from azureml.automl.core.shared import constants\n",
"from azureml.automl.runtime.shared.score import scoring\n",
"from matplotlib import pyplot as plt\n",
"\n",
"# use automl metrics module\n",
"scores = metrics.compute_metrics_regression(\n",
" df_all['predicted'],\n",
" df_all[target_column_name],\n",
" list(constants.Metric.SCALAR_REGRESSION_SET),\n",
" None, None, None)\n",
"# use automl scoring module\n",
"scores = scoring.score_regression(\n",
" y_test=df_all[target_column_name],\n",
" y_pred=df_all['predicted'],\n",
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET))\n",
"\n",
"print(\"[Test data scores]\\n\")\n",
"for key, value in scores.items(): \n",

View File

@@ -95,7 +95,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.5.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.6.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},

View File

@@ -98,7 +98,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.5.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.6.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},

View File

@@ -7,7 +7,7 @@ import azureml.train.automl
import azureml.explain.model
from azureml.train.automl.runtime.automl_explain_utilities import AutoMLExplainerSetupClass, \
automl_setup_model_explanations
from sklearn.externals import joblib
import joblib
from azureml.core.model import Model

View File

@@ -4,15 +4,14 @@ import os
from azureml.core.run import Run
from azureml.core.experiment import Experiment
from sklearn.externals import joblib
from azureml.core.dataset import Dataset
from azureml.train.automl.runtime.automl_explain_utilities import AutoMLExplainerSetupClass, \
automl_setup_model_explanations, automl_check_model_if_explainable
from azureml.explain.model.mimic.models.lightgbm_model import LGBMExplainableModel
from azureml.explain.model.mimic_wrapper import MimicWrapper
from azureml.automl.core.shared.constants import MODEL_PATH
from azureml.explain.model.scoring.scoring_explainer import TreeScoringExplainer, save
from azureml.explain.model.scoring.scoring_explainer import TreeScoringExplainer
import joblib
OUTPUT_DIR = './outputs/'
os.makedirs(OUTPUT_DIR, exist_ok=True)
@@ -74,7 +73,8 @@ print("Engineered and raw explanations computed successfully")
scoring_explainer = TreeScoringExplainer(explainer.explainer, feature_maps=[automl_explainer_setup_obj.feature_map])
# Pickle scoring explainer locally
save(scoring_explainer, exist_ok=True)
with open('scoring_explainer.pkl', 'wb') as stream:
joblib.dump(scoring_explainer, stream)
# Upload the scoring explainer to the automl run
automl_run.upload_file('outputs/scoring_explainer.pkl', 'scoring_explainer.pkl')

View File

@@ -92,7 +92,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.5.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.6.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},

View File

@@ -1,23 +0,0 @@
-- This shows using the AutoMLForecast stored procedure to predict using a forecasting model for the nyc_energy dataset.
DECLARE @Model NVARCHAR(MAX) = (SELECT TOP 1 Model FROM dbo.aml_model
WHERE ExperimentName = 'automl-sql-forecast'
ORDER BY CreatedDate DESC)
DECLARE @max_horizon INT = 48
DECLARE @split_time NVARCHAR(22) = (SELECT DATEADD(hour, -@max_horizon, MAX(timeStamp)) FROM nyc_energy WHERE demand IS NOT NULL)
DECLARE @TestDataQuery NVARCHAR(MAX) = '
SELECT CAST(timeStamp AS NVARCHAR(30)) AS timeStamp,
demand,
precip,
temp
FROM nyc_energy
WHERE demand IS NOT NULL AND precip IS NOT NULL AND temp IS NOT NULL
AND timeStamp > ''' + @split_time + ''''
EXEC dbo.AutoMLForecast @input_query=@TestDataQuery,
@label_column='demand',
@time_column_name='timeStamp',
@model=@model
WITH RESULT SETS ((timeStamp DATETIME, grain NVARCHAR(255), predicted_demand FLOAT, precip FLOAT, temp FLOAT, actual_demand FLOAT))

View File

@@ -1,10 +0,0 @@
-- This lists all the metrics for all iterations for the most recent run.
DECLARE @RunId NVARCHAR(43)
DECLARE @ExperimentName NVARCHAR(255)
SELECT TOP 1 @ExperimentName=ExperimentName, @RunId=SUBSTRING(RunId, 1, 43)
FROM aml_model
ORDER BY CreatedDate DESC
EXEC dbo.AutoMLGetMetrics @RunId, @ExperimentName

View File

@@ -1,25 +0,0 @@
-- This shows using the AutoMLTrain stored procedure to create a forecasting model for the nyc_energy dataset.
DECLARE @max_horizon INT = 48
DECLARE @split_time NVARCHAR(22) = (SELECT DATEADD(hour, -@max_horizon, MAX(timeStamp)) FROM nyc_energy WHERE demand IS NOT NULL)
DECLARE @TrainDataQuery NVARCHAR(MAX) = '
SELECT CAST(timeStamp as NVARCHAR(30)) as timeStamp,
demand,
precip,
temp
FROM nyc_energy
WHERE demand IS NOT NULL AND precip IS NOT NULL AND temp IS NOT NULL
and timeStamp < ''' + @split_time + ''''
INSERT INTO dbo.aml_model(RunId, ExperimentName, Model, LogFileText, WorkspaceName)
EXEC dbo.AutoMLTrain @input_query= @TrainDataQuery,
@label_column='demand',
@task='forecasting',
@iterations=10,
@iteration_timeout_minutes=5,
@time_column_name='timeStamp',
@max_horizon=@max_horizon,
@experiment_name='automl-sql-forecast',
@primary_metric='normalized_root_mean_squared_error'

View File

@@ -1,161 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Train a model and use it for prediction\r\n",
"\r\n",
"Before running this notebook, run the auto-ml-sql-setup.ipynb notebook."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/sql-server/energy-demand/auto-ml-sql-energy-demand.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Set the default database"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"USE [automl]\r\n",
"GO"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Use the AutoMLTrain stored procedure to create a forecasting model for the nyc_energy dataset."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"INSERT INTO dbo.aml_model(RunId, ExperimentName, Model, LogFileText, WorkspaceName)\r\n",
"EXEC dbo.AutoMLTrain @input_query='\r\n",
"SELECT CAST(timeStamp as NVARCHAR(30)) as timeStamp,\r\n",
" demand,\r\n",
"\t precip,\r\n",
"\t temp,\r\n",
"\t CASE WHEN timeStamp < ''2017-01-01'' THEN 0 ELSE 1 END AS is_validate_column\r\n",
"FROM nyc_energy\r\n",
"WHERE demand IS NOT NULL AND precip IS NOT NULL AND temp IS NOT NULL\r\n",
"and timeStamp < ''2017-02-01''',\r\n",
"@label_column='demand',\r\n",
"@task='forecasting',\r\n",
"@iterations=10,\r\n",
"@iteration_timeout_minutes=5,\r\n",
"@time_column_name='timeStamp',\r\n",
"@is_validate_column='is_validate_column',\r\n",
"@experiment_name='automl-sql-forecast',\r\n",
"@primary_metric='normalized_root_mean_squared_error'"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Use the AutoMLPredict stored procedure to predict using the forecasting model for the nyc_energy dataset."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"DECLARE @Model NVARCHAR(MAX) = (SELECT TOP 1 Model FROM dbo.aml_model\r\n",
" WHERE ExperimentName = 'automl-sql-forecast'\r\n",
"\t\t\t\t\t\t\t\tORDER BY CreatedDate DESC)\r\n",
"\r\n",
"EXEC dbo.AutoMLPredict @input_query='\r\n",
"SELECT CAST(timeStamp AS NVARCHAR(30)) AS timeStamp,\r\n",
" demand,\r\n",
"\t precip,\r\n",
"\t temp\r\n",
"FROM nyc_energy\r\n",
"WHERE demand IS NOT NULL AND precip IS NOT NULL AND temp IS NOT NULL\r\n",
"AND timeStamp >= ''2017-02-01''',\r\n",
"@label_column='demand',\r\n",
"@model=@model\r\n",
"WITH RESULT SETS ((timeStamp NVARCHAR(30), actual_demand FLOAT, precip FLOAT, temp FLOAT, predicted_demand FLOAT))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## List all the metrics for all iterations for the most recent training run."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"DECLARE @RunId NVARCHAR(43)\r\n",
"DECLARE @ExperimentName NVARCHAR(255)\r\n",
"\r\n",
"SELECT TOP 1 @ExperimentName=ExperimentName, @RunId=SUBSTRING(RunId, 1, 43)\r\n",
"FROM aml_model\r\n",
"ORDER BY CreatedDate DESC\r\n",
"\r\n",
"EXEC dbo.AutoMLGetMetrics @RunId, @ExperimentName"
]
}
],
"metadata": {
"authors": [
{
"name": "jeffshep"
}
],
"category": "tutorial",
"compute": [
"Local"
],
"datasets": [
"NYC Energy"
],
"deployment": [
"None"
],
"exclude_from_index": false,
"framework": [
"Azure ML AutoML"
],
"tags": [
""
],
"friendly_name": "Forecasting with automated ML SQL integration",
"index_order": 1,
"kernelspec": {
"display_name": "Python 3.6",
"language": "sql",
"name": "python36"
},
"language_info": {
"name": "sql",
"version": ""
},
"task": "Forecasting"
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -1,92 +0,0 @@
-- This procedure forecast values based on a forecasting model returned by AutoMLTrain.
-- It returns a dataset with the forecasted values.
SET ANSI_NULLS ON
GO
SET QUOTED_IDENTIFIER ON
GO
CREATE OR ALTER PROCEDURE [dbo].[AutoMLForecast]
(
@input_query NVARCHAR(MAX), -- A SQL query returning data to predict on.
@model NVARCHAR(MAX), -- A model returned from AutoMLTrain.
@time_column_name NVARCHAR(255)='', -- The name of the timestamp column for forecasting.
@label_column NVARCHAR(255)='', -- Optional name of the column from input_query, which should be ignored when predicting
@y_query_column NVARCHAR(255)='', -- Optional value column that can be used for predicting.
-- If specified, this can contain values for past times (after the model was trained)
-- and contain Nan for future times.
@forecast_column_name NVARCHAR(255) = 'predicted'
-- The name of the output column containing the forecast value.
) AS
BEGIN
EXEC sp_execute_external_script @language = N'Python', @script = N'import pandas as pd
import azureml.core
import numpy as np
from azureml.train.automl import AutoMLConfig
import pickle
import codecs
model_obj = pickle.loads(codecs.decode(model.encode(), "base64"))
test_data = input_data.copy()
if label_column != "" and label_column is not None:
y_test = test_data.pop(label_column).values
else:
y_test = None
if y_query_column != "" and y_query_column is not None:
y_query = test_data.pop(y_query_column).values
else:
y_query = np.repeat(np.nan, len(test_data))
X_test = test_data
if time_column_name != "" and time_column_name is not None:
X_test[time_column_name] = pd.to_datetime(X_test[time_column_name])
y_fcst, X_trans = model_obj.forecast(X_test, y_query)
def align_outputs(y_forecast, X_trans, X_test, y_test, forecast_column_name):
# Demonstrates how to get the output aligned to the inputs
# using pandas indexes. Helps understand what happened if
# the output shape differs from the input shape, or if
# the data got re-sorted by time and grain during forecasting.
# Typical causes of misalignment are:
# * we predicted some periods that were missing in actuals -> drop from eval
# * model was asked to predict past max_horizon -> increase max horizon
# * data at start of X_test was needed for lags -> provide previous periods
df_fcst = pd.DataFrame({forecast_column_name : y_forecast})
# y and X outputs are aligned by forecast() function contract
df_fcst.index = X_trans.index
# align original X_test to y_test
X_test_full = X_test.copy()
if y_test is not None:
X_test_full[label_column] = y_test
# X_test_full does not include origin, so reset for merge
df_fcst.reset_index(inplace=True)
X_test_full = X_test_full.reset_index().drop(columns=''index'')
together = df_fcst.merge(X_test_full, how=''right'')
# drop rows where prediction or actuals are nan
# happens because of missing actuals
# or at edges of time due to lags/rolling windows
clean = together[together[[label_column, forecast_column_name]].notnull().all(axis=1)]
return(clean)
combined_output = align_outputs(y_fcst, X_trans, X_test, y_test, forecast_column_name)
'
, @input_data_1 = @input_query
, @input_data_1_name = N'input_data'
, @output_data_1_name = N'combined_output'
, @params = N'@model NVARCHAR(MAX), @time_column_name NVARCHAR(255), @label_column NVARCHAR(255), @y_query_column NVARCHAR(255), @forecast_column_name NVARCHAR(255)'
, @model = @model
, @time_column_name = @time_column_name
, @label_column = @label_column
, @y_query_column = @y_query_column
, @forecast_column_name = @forecast_column_name
END

View File

@@ -1,70 +0,0 @@
-- This procedure returns a list of metrics for each iteration of a run.
SET ANSI_NULLS ON
GO
SET QUOTED_IDENTIFIER ON
GO
CREATE OR ALTER PROCEDURE [dbo].[AutoMLGetMetrics]
(
@run_id NVARCHAR(250), -- The RunId
@experiment_name NVARCHAR(32)='automl-sql-test', -- This can be used to find the experiment in the Azure Portal.
@connection_name NVARCHAR(255)='default' -- The AML connection to use.
) AS
BEGIN
DECLARE @tenantid NVARCHAR(255)
DECLARE @appid NVARCHAR(255)
DECLARE @password NVARCHAR(255)
DECLARE @config_file NVARCHAR(255)
SELECT @tenantid=TenantId, @appid=AppId, @password=Password, @config_file=ConfigFile
FROM aml_connection
WHERE ConnectionName = @connection_name;
EXEC sp_execute_external_script @language = N'Python', @script = N'import pandas as pd
import logging
import azureml.core
import numpy as np
from azureml.core.experiment import Experiment
from azureml.train.automl.run import AutoMLRun
from azureml.core.authentication import ServicePrincipalAuthentication
from azureml.core.workspace import Workspace
auth = ServicePrincipalAuthentication(tenantid, appid, password)
ws = Workspace.from_config(path=config_file, auth=auth)
experiment = Experiment(ws, experiment_name)
ml_run = AutoMLRun(experiment = experiment, run_id = run_id)
children = list(ml_run.get_children())
iterationlist = []
metricnamelist = []
metricvaluelist = []
for run in children:
properties = run.get_properties()
if "iteration" in properties:
iteration = int(properties["iteration"])
for metric_name, metric_value in run.get_metrics().items():
if isinstance(metric_value, float):
iterationlist.append(iteration)
metricnamelist.append(metric_name)
metricvaluelist.append(metric_value)
metrics = pd.DataFrame({"iteration": iterationlist, "metric_name": metricnamelist, "metric_value": metricvaluelist})
'
, @output_data_1_name = N'metrics'
, @params = N'@run_id NVARCHAR(250),
@experiment_name NVARCHAR(32),
@tenantid NVARCHAR(255),
@appid NVARCHAR(255),
@password NVARCHAR(255),
@config_file NVARCHAR(255)'
, @run_id = @run_id
, @experiment_name = @experiment_name
, @tenantid = @tenantid
, @appid = @appid
, @password = @password
, @config_file = @config_file
WITH RESULT SETS ((iteration INT, metric_name NVARCHAR(100), metric_value FLOAT))
END

View File

@@ -1,41 +0,0 @@
-- This procedure predicts values based on a model returned by AutoMLTrain and a dataset.
-- It returns the dataset with a new column added, which is the predicted value.
SET ANSI_NULLS ON
GO
SET QUOTED_IDENTIFIER ON
GO
CREATE OR ALTER PROCEDURE [dbo].[AutoMLPredict]
(
@input_query NVARCHAR(MAX), -- A SQL query returning data to predict on.
@model NVARCHAR(MAX), -- A model returned from AutoMLTrain.
@label_column NVARCHAR(255)='' -- Optional name of the column from input_query, which should be ignored when predicting
) AS
BEGIN
EXEC sp_execute_external_script @language = N'Python', @script = N'import pandas as pd
import azureml.core
import numpy as np
from azureml.train.automl import AutoMLConfig
import pickle
import codecs
model_obj = pickle.loads(codecs.decode(model.encode(), "base64"))
test_data = input_data.copy()
if label_column != "" and label_column is not None:
y_test = test_data.pop(label_column).values
X_test = test_data
predicted = model_obj.predict(X_test)
combined_output = input_data.assign(predicted=predicted)
'
, @input_data_1 = @input_query
, @input_data_1_name = N'input_data'
, @output_data_1_name = N'combined_output'
, @params = N'@model NVARCHAR(MAX), @label_column NVARCHAR(255)'
, @model = @model
, @label_column = @label_column
END

View File

@@ -1,240 +0,0 @@
-- This stored procedure uses automated machine learning to train several models
-- and returns the best model.
--
-- The result set has several columns:
-- best_run - iteration ID for the best model
-- experiment_name - experiment name pass in with the @experiment_name parameter
-- fitted_model - best model found
-- log_file_text - AutoML debug_log contents
-- workspace - name of the Azure ML workspace where run history is stored
--
-- An example call for a classification problem is:
-- insert into dbo.aml_model(RunId, ExperimentName, Model, LogFileText, WorkspaceName)
-- exec dbo.AutoMLTrain @input_query='
-- SELECT top 100000
-- CAST([pickup_datetime] AS NVARCHAR(30)) AS pickup_datetime
-- ,CAST([dropoff_datetime] AS NVARCHAR(30)) AS dropoff_datetime
-- ,[passenger_count]
-- ,[trip_time_in_secs]
-- ,[trip_distance]
-- ,[payment_type]
-- ,[tip_class]
-- FROM [dbo].[nyctaxi_sample] order by [hack_license] ',
-- @label_column = 'tip_class',
-- @iterations=10
--
-- An example call for forecasting is:
-- insert into dbo.aml_model(RunId, ExperimentName, Model, LogFileText, WorkspaceName)
-- exec dbo.AutoMLTrain @input_query='
-- select cast(timeStamp as nvarchar(30)) as timeStamp,
-- demand,
-- precip,
-- temp,
-- case when timeStamp < ''2017-01-01'' then 0 else 1 end as is_validate_column
-- from nyc_energy
-- where demand is not null and precip is not null and temp is not null
-- and timeStamp < ''2017-02-01''',
-- @label_column='demand',
-- @task='forecasting',
-- @iterations=10,
-- @iteration_timeout_minutes=5,
-- @time_column_name='timeStamp',
-- @is_validate_column='is_validate_column',
-- @experiment_name='automl-sql-forecast',
-- @primary_metric='normalized_root_mean_squared_error'
SET ANSI_NULLS ON
GO
SET QUOTED_IDENTIFIER ON
GO
CREATE OR ALTER PROCEDURE [dbo].[AutoMLTrain]
(
@input_query NVARCHAR(MAX), -- The SQL Query that will return the data to train and validate the model.
@label_column NVARCHAR(255)='Label', -- The name of the column in the result of @input_query that is the label.
@primary_metric NVARCHAR(40)='AUC_weighted', -- The metric to optimize.
@iterations INT=100, -- The maximum number of pipelines to train.
@task NVARCHAR(40)='classification', -- The type of task. Can be classification, regression or forecasting.
@experiment_name NVARCHAR(32)='automl-sql-test', -- This can be used to find the experiment in the Azure Portal.
@iteration_timeout_minutes INT = 15, -- The maximum time in minutes for training a single pipeline.
@experiment_timeout_hours FLOAT = 1, -- The maximum time in hours for training all pipelines.
@n_cross_validations INT = 3, -- The number of cross validations.
@blacklist_models NVARCHAR(MAX) = '', -- A comma separated list of algos that will not be used.
-- The list of possible models can be found at:
-- https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#configure-your-experiment-settings
@whitelist_models NVARCHAR(MAX) = '', -- A comma separated list of algos that can be used.
-- The list of possible models can be found at:
-- https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#configure-your-experiment-settings
@experiment_exit_score FLOAT = 0, -- Stop the experiment if this score is acheived.
@sample_weight_column NVARCHAR(255)='', -- The name of the column in the result of @input_query that gives a sample weight.
@is_validate_column NVARCHAR(255)='', -- The name of the column in the result of @input_query that indicates if the row is for training or validation.
-- In the values of the column, 0 means for training and 1 means for validation.
@time_column_name NVARCHAR(255)='', -- The name of the timestamp column for forecasting.
@connection_name NVARCHAR(255)='default', -- The AML connection to use.
@max_horizon INT = 0 -- A forecast horizon is a time span into the future (or just beyond the latest date in the training data)
-- where forecasts of the target quantity are needed.
-- For example, if data is recorded daily and max_horizon is 5, we will predict 5 days ahead.
) AS
BEGIN
DECLARE @tenantid NVARCHAR(255)
DECLARE @appid NVARCHAR(255)
DECLARE @password NVARCHAR(255)
DECLARE @config_file NVARCHAR(255)
SELECT @tenantid=TenantId, @appid=AppId, @password=Password, @config_file=ConfigFile
FROM aml_connection
WHERE ConnectionName = @connection_name;
EXEC sp_execute_external_script @language = N'Python', @script = N'import pandas as pd
import logging
import azureml.core
import pandas as pd
import numpy as np
from azureml.core.experiment import Experiment
from azureml.train.automl import AutoMLConfig
from sklearn import datasets
import pickle
import codecs
from azureml.core.authentication import ServicePrincipalAuthentication
from azureml.core.workspace import Workspace
if __name__.startswith("sqlindb"):
auth = ServicePrincipalAuthentication(tenantid, appid, password)
ws = Workspace.from_config(path=config_file, auth=auth)
project_folder = "./sample_projects/" + experiment_name
experiment = Experiment(ws, experiment_name)
data_train = input_data
X_valid = None
y_valid = None
sample_weight_valid = None
if is_validate_column != "" and is_validate_column is not None:
data_train = input_data[input_data[is_validate_column] <= 0]
data_valid = input_data[input_data[is_validate_column] > 0]
data_train.pop(is_validate_column)
data_valid.pop(is_validate_column)
y_valid = data_valid.pop(label_column).values
if sample_weight_column != "" and sample_weight_column is not None:
sample_weight_valid = data_valid.pop(sample_weight_column).values
X_valid = data_valid
n_cross_validations = None
y_train = data_train.pop(label_column).values
sample_weight = None
if sample_weight_column != "" and sample_weight_column is not None:
sample_weight = data_train.pop(sample_weight_column).values
X_train = data_train
if experiment_timeout_hours == 0:
experiment_timeout_hours = None
if experiment_exit_score == 0:
experiment_exit_score = None
if blacklist_models == "":
blacklist_models = None
if blacklist_models is not None:
blacklist_models = blacklist_models.replace(" ", "").split(",")
if whitelist_models == "":
whitelist_models = None
if whitelist_models is not None:
whitelist_models = whitelist_models.replace(" ", "").split(",")
automl_settings = {}
preprocess = True
if time_column_name != "" and time_column_name is not None:
automl_settings = { "time_column_name": time_column_name }
preprocess = False
if max_horizon > 0:
automl_settings["max_horizon"] = max_horizon
log_file_name = "automl_sqlindb_errors.log"
automl_config = AutoMLConfig(task = task,
debug_log = log_file_name,
primary_metric = primary_metric,
iteration_timeout_minutes = iteration_timeout_minutes,
experiment_timeout_hours = experiment_timeout_hours,
iterations = iterations,
n_cross_validations = n_cross_validations,
preprocess = preprocess,
verbosity = logging.INFO,
X = X_train,
y = y_train,
path = project_folder,
blacklist_models = blacklist_models,
whitelist_models = whitelist_models,
experiment_exit_score = experiment_exit_score,
sample_weight = sample_weight,
X_valid = X_valid,
y_valid = y_valid,
sample_weight_valid = sample_weight_valid,
**automl_settings)
local_run = experiment.submit(automl_config, show_output = True)
best_run, fitted_model = local_run.get_output()
pickled_model = codecs.encode(pickle.dumps(fitted_model), "base64").decode()
log_file_text = ""
try:
with open(log_file_name, "r") as log_file:
log_file_text = log_file.read()
except:
log_file_text = "Log file not found"
returned_model = pd.DataFrame({"best_run": [best_run.id], "experiment_name": [experiment_name], "fitted_model": [pickled_model], "log_file_text": [log_file_text], "workspace": [ws.name]}, dtype=np.dtype(np.str))
'
, @input_data_1 = @input_query
, @input_data_1_name = N'input_data'
, @output_data_1_name = N'returned_model'
, @params = N'@label_column NVARCHAR(255),
@primary_metric NVARCHAR(40),
@iterations INT, @task NVARCHAR(40),
@experiment_name NVARCHAR(32),
@iteration_timeout_minutes INT,
@experiment_timeout_hours FLOAT,
@n_cross_validations INT,
@blacklist_models NVARCHAR(MAX),
@whitelist_models NVARCHAR(MAX),
@experiment_exit_score FLOAT,
@sample_weight_column NVARCHAR(255),
@is_validate_column NVARCHAR(255),
@time_column_name NVARCHAR(255),
@tenantid NVARCHAR(255),
@appid NVARCHAR(255),
@password NVARCHAR(255),
@config_file NVARCHAR(255),
@max_horizon INT'
, @label_column = @label_column
, @primary_metric = @primary_metric
, @iterations = @iterations
, @task = @task
, @experiment_name = @experiment_name
, @iteration_timeout_minutes = @iteration_timeout_minutes
, @experiment_timeout_hours = @experiment_timeout_hours
, @n_cross_validations = @n_cross_validations
, @blacklist_models = @blacklist_models
, @whitelist_models = @whitelist_models
, @experiment_exit_score = @experiment_exit_score
, @sample_weight_column = @sample_weight_column
, @is_validate_column = @is_validate_column
, @time_column_name = @time_column_name
, @tenantid = @tenantid
, @appid = @appid
, @password = @password
, @config_file = @config_file
, @max_horizon = @max_horizon
WITH RESULT SETS ((best_run NVARCHAR(250), experiment_name NVARCHAR(100), fitted_model VARCHAR(MAX), log_file_text NVARCHAR(MAX), workspace NVARCHAR(100)))
END

View File

@@ -1,18 +0,0 @@
-- This is a table to store the Azure ML connection information.
SET ANSI_NULLS ON
GO
SET QUOTED_IDENTIFIER ON
GO
CREATE TABLE [dbo].[aml_connection](
[Id] [int] IDENTITY(1,1) NOT NULL PRIMARY KEY,
[ConnectionName] [nvarchar](255) NULL,
[TenantId] [nvarchar](255) NULL,
[AppId] [nvarchar](255) NULL,
[Password] [nvarchar](255) NULL,
[ConfigFile] [nvarchar](255) NULL
) ON [PRIMARY]
GO

View File

@@ -1,22 +0,0 @@
-- This is a table to hold the results from the AutoMLTrain procedure.
SET ANSI_NULLS ON
GO
SET QUOTED_IDENTIFIER ON
GO
CREATE TABLE [dbo].[aml_model](
[Id] [int] IDENTITY(1,1) NOT NULL PRIMARY KEY,
[Model] [varchar](max) NOT NULL, -- The model, which can be passed to AutoMLPredict for testing or prediction.
[RunId] [nvarchar](250) NULL, -- The RunId, which can be used to view the model in the Azure Portal.
[CreatedDate] [datetime] NULL,
[ExperimentName] [nvarchar](100) NULL, -- Azure ML Experiment Name
[WorkspaceName] [nvarchar](100) NULL, -- Azure ML Workspace Name
[LogFileText] [nvarchar](max) NULL
)
GO
ALTER TABLE [dbo].[aml_model] ADD DEFAULT (getutcdate()) FOR [CreatedDate]
GO

View File

@@ -1,581 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Set up Azure ML Automated Machine Learning on SQL Server 2019 CTP 2.4 big data cluster\r\n",
"\r\n",
"\\# Prerequisites: \r\n",
"\\# - An Azure subscription and resource group \r\n",
"\\# - An Azure Machine Learning workspace \r\n",
"\\# - A SQL Server 2019 CTP 2.4 big data cluster with Internet access and a database named 'automl' \r\n",
"\\# - Azure CLI \r\n",
"\\# - kubectl command \r\n",
"\\# - The https://github.com/Azure/MachineLearningNotebooks repository downloaded (cloned) to your local machine\r\n",
"\r\n",
"\\# In the 'automl' database, create a table named 'dbo.nyc_energy' as follows: \r\n",
"\\# - In SQL Server Management Studio, right-click the 'automl' database, select Tasks, then Import Flat File. \r\n",
"\\# - Select the file AzureMlCli\\notebooks\\how-to-use-azureml\\automated-machine-learning\\forecasting-energy-demand\\nyc_energy.csv. \r\n",
"\\# - Using the \"Modify Columns\" page, allow nulls for all columns. \r\n",
"\r\n",
"\\# Create an Azure Machine Learning Workspace using the instructions at https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-workspace \r\n",
"\r\n",
"\\# Create an Azure service principal. You can do this with the following commands: \r\n",
"\r\n",
"az login \r\n",
"az account set --subscription *subscriptionid* \r\n",
"\r\n",
"\\# The following command prints out the **appId** and **tenant**, \r\n",
"\\# which you insert into the indicated cell later in this notebook \r\n",
"\\# to allow AutoML to authenticate with Azure: \r\n",
"\r\n",
"az ad sp create-for-rbac --name *principlename* --password *password*\r\n",
"\r\n",
"\\# Log into the master instance of SQL Server 2019 CTP 2.4: \r\n",
"kubectl exec -it mssql-master-pool-0 -n *clustername* -c mssql-server -- /bin/bash\r\n",
"\r\n",
"mkdir /tmp/aml\r\n",
"\r\n",
"cd /tmp/aml\r\n",
"\r\n",
"\\# **Modify** the following with your subscription_id, resource_group, and workspace_name: \r\n",
"cat > config.json << EOF \r\n",
"{ \r\n",
" \"subscription_id\": \"123456ab-78cd-0123-45ef-abcd12345678\", \r\n",
" \"resource_group\": \"myrg1\", \r\n",
" \"workspace_name\": \"myws1\" \r\n",
"} \r\n",
"EOF\r\n",
"\r\n",
"\\# The directory referenced below is appropriate for the master instance of SQL Server 2019 CTP 2.4.\r\n",
"\r\n",
"cd /opt/mssql/mlservices/runtime/python/bin\r\n",
"\r\n",
"./python -m pip install azureml-sdk[automl]\r\n",
"\r\n",
"./python -m pip install --upgrade numpy \r\n",
"\r\n",
"./python -m pip install --upgrade sklearn\r\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/sql-server/setup/auto-ml-sql-setup.png)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"-- Enable external scripts to allow invoking Python\r\n",
"sp_configure 'external scripts enabled',1 \r\n",
"reconfigure with override \r\n",
"GO\r\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"-- Use database 'automl'\r\n",
"USE [automl]\r\n",
"GO"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"-- This is a table to hold the Azure ML connection information.\r\n",
"SET ANSI_NULLS ON\r\n",
"GO\r\n",
"\r\n",
"SET QUOTED_IDENTIFIER ON\r\n",
"GO\r\n",
"\r\n",
"CREATE TABLE [dbo].[aml_connection](\r\n",
" [Id] [int] IDENTITY(1,1) NOT NULL PRIMARY KEY,\r\n",
"\t[ConnectionName] [nvarchar](255) NULL,\r\n",
"\t[TenantId] [nvarchar](255) NULL,\r\n",
"\t[AppId] [nvarchar](255) NULL,\r\n",
"\t[Password] [nvarchar](255) NULL,\r\n",
"\t[ConfigFile] [nvarchar](255) NULL\r\n",
") ON [PRIMARY]\r\n",
"GO"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Copy the values from create-for-rbac above into the cell below"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"-- Use the following values:\r\n",
"-- Leave the name as 'Default'\r\n",
"-- Insert <tenant> returned by create-for-rbac above\r\n",
"-- Insert <AppId> returned by create-for-rbac above\r\n",
"-- Insert <password> used in create-for-rbac above\r\n",
"-- Leave <path> as '/tmp/aml/config.json'\r\n",
"INSERT INTO [dbo].[aml_connection] \r\n",
"VALUES (\r\n",
" N'Default', -- Name\r\n",
" N'11111111-2222-3333-4444-555555555555', -- Tenant\r\n",
" N'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee', -- AppId\r\n",
" N'insertpasswordhere', -- Password\r\n",
" N'/tmp/aml/config.json' -- Path\r\n",
" );\r\n",
"GO"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"-- This is a table to hold the results from the AutoMLTrain procedure.\r\n",
"SET ANSI_NULLS ON\r\n",
"GO\r\n",
"\r\n",
"SET QUOTED_IDENTIFIER ON\r\n",
"GO\r\n",
"\r\n",
"CREATE TABLE [dbo].[aml_model](\r\n",
" [Id] [int] IDENTITY(1,1) NOT NULL PRIMARY KEY,\r\n",
" [Model] [varchar](max) NOT NULL, -- The model, which can be passed to AutoMLPredict for testing or prediction.\r\n",
" [RunId] [nvarchar](250) NULL, -- The RunId, which can be used to view the model in the Azure Portal.\r\n",
" [CreatedDate] [datetime] NULL,\r\n",
" [ExperimentName] [nvarchar](100) NULL, -- Azure ML Experiment Name\r\n",
" [WorkspaceName] [nvarchar](100) NULL, -- Azure ML Workspace Name\r\n",
"\t[LogFileText] [nvarchar](max) NULL\r\n",
") \r\n",
"GO\r\n",
"\r\n",
"ALTER TABLE [dbo].[aml_model] ADD DEFAULT (getutcdate()) FOR [CreatedDate]\r\n",
"GO\r\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"-- This stored procedure uses automated machine learning to train several models\r\n",
"-- and return the best model.\r\n",
"--\r\n",
"-- The result set has several columns:\r\n",
"-- best_run - ID of the best model found\r\n",
"-- experiment_name - training run name\r\n",
"-- fitted_model - best model found\r\n",
"-- log_file_text - console output\r\n",
"-- workspace - name of the Azure ML workspace where run history is stored\r\n",
"--\r\n",
"-- An example call for a classification problem is:\r\n",
"-- insert into dbo.aml_model(RunId, ExperimentName, Model, LogFileText, WorkspaceName)\r\n",
"-- exec dbo.AutoMLTrain @input_query='\r\n",
"-- SELECT top 100000 \r\n",
"-- CAST([pickup_datetime] AS NVARCHAR(30)) AS pickup_datetime\r\n",
"-- ,CAST([dropoff_datetime] AS NVARCHAR(30)) AS dropoff_datetime\r\n",
"-- ,[passenger_count]\r\n",
"-- ,[trip_time_in_secs]\r\n",
"-- ,[trip_distance]\r\n",
"-- ,[payment_type]\r\n",
"-- ,[tip_class]\r\n",
"-- FROM [dbo].[nyctaxi_sample] order by [hack_license] ',\r\n",
"-- @label_column = 'tip_class',\r\n",
"-- @iterations=10\r\n",
"-- \r\n",
"-- An example call for forecasting is:\r\n",
"-- insert into dbo.aml_model(RunId, ExperimentName, Model, LogFileText, WorkspaceName)\r\n",
"-- exec dbo.AutoMLTrain @input_query='\r\n",
"-- select cast(timeStamp as nvarchar(30)) as timeStamp,\r\n",
"-- demand,\r\n",
"-- \t precip,\r\n",
"-- \t temp,\r\n",
"-- case when timeStamp < ''2017-01-01'' then 0 else 1 end as is_validate_column\r\n",
"-- from nyc_energy\r\n",
"-- where demand is not null and precip is not null and temp is not null\r\n",
"-- and timeStamp < ''2017-02-01''',\r\n",
"-- @label_column='demand',\r\n",
"-- @task='forecasting',\r\n",
"-- @iterations=10,\r\n",
"-- @iteration_timeout_minutes=5,\r\n",
"-- @time_column_name='timeStamp',\r\n",
"-- @is_validate_column='is_validate_column',\r\n",
"-- @experiment_name='automl-sql-forecast',\r\n",
"-- @primary_metric='normalized_root_mean_squared_error'\r\n",
"\r\n",
"SET ANSI_NULLS ON\r\n",
"GO\r\n",
"SET QUOTED_IDENTIFIER ON\r\n",
"GO\r\n",
"CREATE OR ALTER PROCEDURE [dbo].[AutoMLTrain]\r\n",
" (\r\n",
" @input_query NVARCHAR(MAX), -- The SQL Query that will return the data to train and validate the model.\r\n",
" @label_column NVARCHAR(255)='Label', -- The name of the column in the result of @input_query that is the label.\r\n",
" @primary_metric NVARCHAR(40)='AUC_weighted', -- The metric to optimize.\r\n",
" @iterations INT=100, -- The maximum number of pipelines to train.\r\n",
" @task NVARCHAR(40)='classification', -- The type of task. Can be classification, regression or forecasting.\r\n",
" @experiment_name NVARCHAR(32)='automl-sql-test', -- This can be used to find the experiment in the Azure Portal.\r\n",
" @iteration_timeout_minutes INT = 15, -- The maximum time in minutes for training a single pipeline. \r\n",
" @experiment_timeout_hours FLOAT = 1, -- The maximum time in hours for training all pipelines.\r\n",
" @n_cross_validations INT = 3, -- The number of cross validations.\r\n",
" @blacklist_models NVARCHAR(MAX) = '', -- A comma separated list of algos that will not be used.\r\n",
" -- The list of possible models can be found at:\r\n",
" -- https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#configure-your-experiment-settings\r\n",
" @whitelist_models NVARCHAR(MAX) = '', -- A comma separated list of algos that can be used.\r\n",
" -- The list of possible models can be found at:\r\n",
" -- https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#configure-your-experiment-settings\r\n",
" @experiment_exit_score FLOAT = 0, -- Stop the experiment if this score is acheived.\r\n",
" @sample_weight_column NVARCHAR(255)='', -- The name of the column in the result of @input_query that gives a sample weight.\r\n",
" @is_validate_column NVARCHAR(255)='', -- The name of the column in the result of @input_query that indicates if the row is for training or validation.\r\n",
"\t -- In the values of the column, 0 means for training and 1 means for validation.\r\n",
" @time_column_name NVARCHAR(255)='', -- The name of the timestamp column for forecasting.\r\n",
"\t@connection_name NVARCHAR(255)='default' -- The AML connection to use.\r\n",
" ) AS\r\n",
"BEGIN\r\n",
"\r\n",
" DECLARE @tenantid NVARCHAR(255)\r\n",
" DECLARE @appid NVARCHAR(255)\r\n",
" DECLARE @password NVARCHAR(255)\r\n",
" DECLARE @config_file NVARCHAR(255)\r\n",
"\r\n",
"\tSELECT @tenantid=TenantId, @appid=AppId, @password=Password, @config_file=ConfigFile\r\n",
"\tFROM aml_connection\r\n",
"\tWHERE ConnectionName = @connection_name;\r\n",
"\r\n",
"\tEXEC sp_execute_external_script @language = N'Python', @script = N'import pandas as pd\r\n",
"import logging \r\n",
"import azureml.core \r\n",
"import pandas as pd\r\n",
"import numpy as np\r\n",
"from azureml.core.experiment import Experiment \r\n",
"from azureml.train.automl import AutoMLConfig \r\n",
"from sklearn import datasets \r\n",
"import pickle\r\n",
"import codecs\r\n",
"from azureml.core.authentication import ServicePrincipalAuthentication \r\n",
"from azureml.core.workspace import Workspace \r\n",
"\r\n",
"if __name__.startswith(\"sqlindb\"):\r\n",
" auth = ServicePrincipalAuthentication(tenantid, appid, password) \r\n",
" \r\n",
" ws = Workspace.from_config(path=config_file, auth=auth) \r\n",
" \r\n",
" project_folder = \"./sample_projects/\" + experiment_name\r\n",
" \r\n",
" experiment = Experiment(ws, experiment_name) \r\n",
"\r\n",
" data_train = input_data\r\n",
" X_valid = None\r\n",
" y_valid = None\r\n",
" sample_weight_valid = None\r\n",
"\r\n",
" if is_validate_column != \"\" and is_validate_column is not None:\r\n",
" data_train = input_data[input_data[is_validate_column] <= 0]\r\n",
" data_valid = input_data[input_data[is_validate_column] > 0]\r\n",
" data_train.pop(is_validate_column)\r\n",
" data_valid.pop(is_validate_column)\r\n",
" y_valid = data_valid.pop(label_column).values\r\n",
" if sample_weight_column != \"\" and sample_weight_column is not None:\r\n",
" sample_weight_valid = data_valid.pop(sample_weight_column).values\r\n",
" X_valid = data_valid\r\n",
" n_cross_validations = None\r\n",
"\r\n",
" y_train = data_train.pop(label_column).values\r\n",
"\r\n",
" sample_weight = None\r\n",
" if sample_weight_column != \"\" and sample_weight_column is not None:\r\n",
" sample_weight = data_train.pop(sample_weight_column).values\r\n",
"\r\n",
" X_train = data_train\r\n",
"\r\n",
" if experiment_timeout_hours == 0:\r\n",
" experiment_timeout_hours = None\r\n",
"\r\n",
" if experiment_exit_score == 0:\r\n",
" experiment_exit_score = None\r\n",
"\r\n",
" if blacklist_models == \"\":\r\n",
" blacklist_models = None\r\n",
"\r\n",
" if blacklist_models is not None:\r\n",
" blacklist_models = blacklist_models.replace(\" \", \"\").split(\",\")\r\n",
"\r\n",
" if whitelist_models == \"\":\r\n",
" whitelist_models = None\r\n",
"\r\n",
" if whitelist_models is not None:\r\n",
" whitelist_models = whitelist_models.replace(\" \", \"\").split(\",\")\r\n",
"\r\n",
" automl_settings = {}\r\n",
" preprocess = True\r\n",
" if time_column_name != \"\" and time_column_name is not None:\r\n",
" automl_settings = { \"time_column_name\": time_column_name }\r\n",
" preprocess = False\r\n",
"\r\n",
" log_file_name = \"automl_errors.log\"\r\n",
"\t \r\n",
" automl_config = AutoMLConfig(task = task, \r\n",
" debug_log = log_file_name, \r\n",
" primary_metric = primary_metric, \r\n",
" iteration_timeout_minutes = iteration_timeout_minutes, \r\n",
" experiment_timeout_hours = experiment_timeout_hours,\r\n",
" iterations = iterations, \r\n",
" n_cross_validations = n_cross_validations, \r\n",
" preprocess = preprocess,\r\n",
" verbosity = logging.INFO, \r\n",
" X = X_train, \r\n",
" y = y_train, \r\n",
" path = project_folder,\r\n",
" blacklist_models = blacklist_models,\r\n",
" whitelist_models = whitelist_models,\r\n",
" experiment_exit_score = experiment_exit_score,\r\n",
" sample_weight = sample_weight,\r\n",
" X_valid = X_valid,\r\n",
" y_valid = y_valid,\r\n",
" sample_weight_valid = sample_weight_valid,\r\n",
" **automl_settings) \r\n",
" \r\n",
" local_run = experiment.submit(automl_config, show_output = True) \r\n",
"\r\n",
" best_run, fitted_model = local_run.get_output()\r\n",
"\r\n",
" pickled_model = codecs.encode(pickle.dumps(fitted_model), \"base64\").decode()\r\n",
"\r\n",
" log_file_text = \"\"\r\n",
"\r\n",
" try:\r\n",
" with open(log_file_name, \"r\") as log_file:\r\n",
" log_file_text = log_file.read()\r\n",
" except:\r\n",
" log_file_text = \"Log file not found\"\r\n",
"\r\n",
" returned_model = pd.DataFrame({\"best_run\": [best_run.id], \"experiment_name\": [experiment_name], \"fitted_model\": [pickled_model], \"log_file_text\": [log_file_text], \"workspace\": [ws.name]}, dtype=np.dtype(np.str))\r\n",
"'\r\n",
"\t, @input_data_1 = @input_query\r\n",
"\t, @input_data_1_name = N'input_data'\r\n",
"\t, @output_data_1_name = N'returned_model'\r\n",
"\t, @params = N'@label_column NVARCHAR(255), \r\n",
"\t @primary_metric NVARCHAR(40),\r\n",
"\t\t\t\t @iterations INT, @task NVARCHAR(40),\r\n",
"\t\t\t\t @experiment_name NVARCHAR(32),\r\n",
"\t\t\t\t @iteration_timeout_minutes INT,\r\n",
"\t\t\t\t @experiment_timeout_hours FLOAT,\r\n",
"\t\t\t\t @n_cross_validations INT,\r\n",
"\t\t\t\t @blacklist_models NVARCHAR(MAX),\r\n",
"\t\t\t\t @whitelist_models NVARCHAR(MAX),\r\n",
"\t\t\t\t @experiment_exit_score FLOAT,\r\n",
"\t\t\t\t @sample_weight_column NVARCHAR(255),\r\n",
"\t\t\t\t @is_validate_column NVARCHAR(255),\r\n",
"\t\t\t\t @time_column_name NVARCHAR(255),\r\n",
"\t\t\t\t @tenantid NVARCHAR(255),\r\n",
"\t\t\t\t @appid NVARCHAR(255),\r\n",
"\t\t\t\t @password NVARCHAR(255),\r\n",
"\t\t\t\t @config_file NVARCHAR(255)'\r\n",
"\t, @label_column = @label_column\r\n",
"\t, @primary_metric = @primary_metric\r\n",
"\t, @iterations = @iterations\r\n",
"\t, @task = @task\r\n",
"\t, @experiment_name = @experiment_name\r\n",
"\t, @iteration_timeout_minutes = @iteration_timeout_minutes\r\n",
"\t, @experiment_timeout_hours = @experiment_timeout_hours\r\n",
"\t, @n_cross_validations = @n_cross_validations\r\n",
"\t, @blacklist_models = @blacklist_models\r\n",
"\t, @whitelist_models = @whitelist_models\r\n",
"\t, @experiment_exit_score = @experiment_exit_score\r\n",
"\t, @sample_weight_column = @sample_weight_column\r\n",
"\t, @is_validate_column = @is_validate_column\r\n",
"\t, @time_column_name = @time_column_name\r\n",
"\t, @tenantid = @tenantid\r\n",
"\t, @appid = @appid\r\n",
"\t, @password = @password\r\n",
"\t, @config_file = @config_file\r\n",
"WITH RESULT SETS ((best_run NVARCHAR(250), experiment_name NVARCHAR(100), fitted_model VARCHAR(MAX), log_file_text NVARCHAR(MAX), workspace NVARCHAR(100)))\r\n",
"END"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"-- This procedure returns a list of metrics for each iteration of a training run.\r\n",
"SET ANSI_NULLS ON\r\n",
"GO\r\n",
"SET QUOTED_IDENTIFIER ON\r\n",
"GO\r\n",
"CREATE OR ALTER PROCEDURE [dbo].[AutoMLGetMetrics]\r\n",
" (\r\n",
"\t@run_id NVARCHAR(250), -- The RunId\r\n",
" @experiment_name NVARCHAR(32)='automl-sql-test', -- This can be used to find the experiment in the Azure Portal.\r\n",
" @connection_name NVARCHAR(255)='default' -- The AML connection to use.\r\n",
" ) AS\r\n",
"BEGIN\r\n",
" DECLARE @tenantid NVARCHAR(255)\r\n",
" DECLARE @appid NVARCHAR(255)\r\n",
" DECLARE @password NVARCHAR(255)\r\n",
" DECLARE @config_file NVARCHAR(255)\r\n",
"\r\n",
"\tSELECT @tenantid=TenantId, @appid=AppId, @password=Password, @config_file=ConfigFile\r\n",
"\tFROM aml_connection\r\n",
"\tWHERE ConnectionName = @connection_name;\r\n",
"\r\n",
" EXEC sp_execute_external_script @language = N'Python', @script = N'import pandas as pd\r\n",
"import logging \r\n",
"import azureml.core \r\n",
"import numpy as np\r\n",
"from azureml.core.experiment import Experiment \r\n",
"from azureml.train.automl.run import AutoMLRun\r\n",
"from azureml.core.authentication import ServicePrincipalAuthentication \r\n",
"from azureml.core.workspace import Workspace \r\n",
"\r\n",
"auth = ServicePrincipalAuthentication(tenantid, appid, password) \r\n",
" \r\n",
"ws = Workspace.from_config(path=config_file, auth=auth) \r\n",
" \r\n",
"experiment = Experiment(ws, experiment_name) \r\n",
"\r\n",
"ml_run = AutoMLRun(experiment = experiment, run_id = run_id)\r\n",
"\r\n",
"children = list(ml_run.get_children())\r\n",
"iterationlist = []\r\n",
"metricnamelist = []\r\n",
"metricvaluelist = []\r\n",
"\r\n",
"for run in children:\r\n",
" properties = run.get_properties()\r\n",
" if \"iteration\" in properties:\r\n",
" iteration = int(properties[\"iteration\"])\r\n",
" for metric_name, metric_value in run.get_metrics().items():\r\n",
" if isinstance(metric_value, float):\r\n",
" iterationlist.append(iteration)\r\n",
" metricnamelist.append(metric_name)\r\n",
" metricvaluelist.append(metric_value)\r\n",
" \r\n",
"metrics = pd.DataFrame({\"iteration\": iterationlist, \"metric_name\": metricnamelist, \"metric_value\": metricvaluelist})\r\n",
"'\r\n",
" , @output_data_1_name = N'metrics'\r\n",
"\t, @params = N'@run_id NVARCHAR(250), \r\n",
"\t\t\t\t @experiment_name NVARCHAR(32),\r\n",
" \t\t\t\t @tenantid NVARCHAR(255),\r\n",
"\t\t\t\t @appid NVARCHAR(255),\r\n",
"\t\t\t\t @password NVARCHAR(255),\r\n",
"\t\t\t\t @config_file NVARCHAR(255)'\r\n",
" , @run_id = @run_id\r\n",
"\t, @experiment_name = @experiment_name\r\n",
"\t, @tenantid = @tenantid\r\n",
"\t, @appid = @appid\r\n",
"\t, @password = @password\r\n",
"\t, @config_file = @config_file\r\n",
"WITH RESULT SETS ((iteration INT, metric_name NVARCHAR(100), metric_value FLOAT))\r\n",
"END"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"-- This procedure predicts values based on a model returned by AutoMLTrain and a dataset.\r\n",
"-- It returns the dataset with a new column added, which is the predicted value.\r\n",
"SET ANSI_NULLS ON\r\n",
"GO\r\n",
"SET QUOTED_IDENTIFIER ON\r\n",
"GO\r\n",
"CREATE OR ALTER PROCEDURE [dbo].[AutoMLPredict]\r\n",
" (\r\n",
" @input_query NVARCHAR(MAX), -- A SQL query returning data to predict on.\r\n",
" @model NVARCHAR(MAX), -- A model returned from AutoMLTrain.\r\n",
" @label_column NVARCHAR(255)='' -- Optional name of the column from input_query, which should be ignored when predicting\r\n",
" ) AS \r\n",
"BEGIN \r\n",
" \r\n",
" EXEC sp_execute_external_script @language = N'Python', @script = N'import pandas as pd \r\n",
"import azureml.core \r\n",
"import numpy as np \r\n",
"from azureml.train.automl import AutoMLConfig \r\n",
"import pickle \r\n",
"import codecs \r\n",
" \r\n",
"model_obj = pickle.loads(codecs.decode(model.encode(), \"base64\")) \r\n",
" \r\n",
"test_data = input_data.copy() \r\n",
"\r\n",
"if label_column != \"\" and label_column is not None:\r\n",
" y_test = test_data.pop(label_column).values \r\n",
"X_test = test_data \r\n",
" \r\n",
"predicted = model_obj.predict(X_test) \r\n",
" \r\n",
"combined_output = input_data.assign(predicted=predicted)\r\n",
" \r\n",
"' \r\n",
" , @input_data_1 = @input_query \r\n",
" , @input_data_1_name = N'input_data' \r\n",
" , @output_data_1_name = N'combined_output' \r\n",
" , @params = N'@model NVARCHAR(MAX), @label_column NVARCHAR(255)' \r\n",
" , @model = @model \r\n",
"\t, @label_column = @label_column\r\n",
"END"
]
}
],
"metadata": {
"authors": [
{
"name": "jeffshep"
}
],
"category": "tutorial",
"compute": [
"None"
],
"datasets": [
"None"
],
"deployment": [
"None"
],
"exclude_from_index": false,
"framework": [
"Azure ML AutoML"
],
"friendly_name": "Setup automated ML SQL integration",
"index_order": 1,
"kernelspec": {
"display_name": "Python 3.6",
"language": "sql",
"name": "python36"
},
"language_info": {
"name": "sql",
"version": ""
},
"tags": [
""
],
"task": "None"
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -1,497 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/deployment/accelerated-models/accelerated-models-object-detection.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Azure ML Hardware Accelerated Object Detection"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This tutorial will show you how to deploy an object detection service based on the SSD-VGG model in just a few minutes using the Azure Machine Learning Accelerated AI service.\n",
"\n",
"We will use the SSD-VGG model accelerated on an FPGA. Our Accelerated Models Service handles translating deep neural networks (DNN) into an FPGA program.\n",
"\n",
"The steps in this notebook are: \n",
"1. [Setup Environment](#set-up-environment)\n",
"* [Construct Model](#construct-model)\n",
" * Image Preprocessing\n",
" * Featurizer\n",
" * Save Model\n",
" * Save input and output tensor names\n",
"* [Create Image](#create-image)\n",
"* [Deploy Image](#deploy-image)\n",
"* [Test the Service](#test-service)\n",
" * Create Client\n",
" * Serve the model\n",
"* [Cleanup](#cleanup)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"set-up-environment\"></a>\n",
"## 1. Set up Environment\n",
"### 1.a. Imports"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import tensorflow as tf"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 1.b. Retrieve Workspace\n",
"If you haven't created a Workspace, please follow [this notebook](\"../../../configuration.ipynb\") to do so. If you have, run the codeblock below to retrieve it. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Workspace\n",
"\n",
"ws = Workspace.from_config()\n",
"print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\\n')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"construct-model\"></a>\n",
"## 2. Construct model\n",
"### 2.a. Image preprocessing\n",
"We'd like our service to accept JPEG images as input. However the input to SSD-VGG is a float tensor of shape \\[1, 300, 300, 3\\]. The first dimension is batch, then height, width, and channels (i.e. NHWC). To bridge this gap, we need code that decodes JPEG images and resizes them appropriately for input to SSD-VGG. The Accelerated AI service can execute TensorFlow graphs as part of the service and we'll use that ability to do the image preprocessing. This code defines a TensorFlow graph that preprocesses an array of JPEG images (as TensorFlow strings) and produces a tensor that is ready to be featurized by SSD-VGG.\n",
"\n",
"**Note:** Expect to see TF deprecation warnings until we port our SDK over to use Tensorflow 2.0."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Input images as a two-dimensional tensor containing an arbitrary number of images represented a strings\n",
"import azureml.accel.models.utils as utils\n",
"tf.reset_default_graph()\n",
"\n",
"in_images = tf.placeholder(tf.string)\n",
"image_tensors = utils.preprocess_array(in_images, output_width=300, output_height=300, preserve_aspect_ratio=False)\n",
"print(image_tensors.shape)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 2.b. Featurizer\n",
"The SSD-VGG model is different from our other models in that it generates 12 tensor outputs. These corresponds to x,y displacements of the anchor boxes and the detection confidence (for 21 classes). Because these outputs are not convenient to work with, we will later use a pre-defined post-processing utility to transform the outputs into a simplified list of bounding boxes with their respective class and confidence.\n",
"\n",
"For more information about the output tensors, take this example: the output tensor 'ssd_300_vgg/block4_box/Reshape_1:0' has a shape of [None, 37, 37, 4, 21]. This gives the pre-softmax confidence for 4 anchor boxes situated at each site of a 37 x 37 grid imposed on the image, one confidence score for each of the 21 classes. The first dimension is the batch dimension. Likewise, 'ssd_300_vgg/block4_box/Reshape:0' has shape [None, 37, 37, 4, 4] and encodes the (cx, cy) center shift and rescaling (sw, sh) relative to each anchor box. Refer to the [SSD-VGG paper](https://arxiv.org/abs/1512.02325) to understand how these are computed. The other 10 tensors are defined similarly."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.accel.models import SsdVgg\n",
"\n",
"saved_model_dir = os.path.join(os.path.expanduser('~'), 'models')\n",
"model_graph = SsdVgg(saved_model_dir, is_frozen = True)\n",
"\n",
"print('SSD-VGG Input Tensors:')\n",
"for idx, input_name in enumerate(model_graph.input_tensor_list):\n",
" print('{}, {}'.format(input_name, model_graph.get_input_dims(idx)))\n",
" \n",
"print('SSD-VGG Output Tensors:')\n",
"for idx, output_name in enumerate(model_graph.output_tensor_list):\n",
" print('{}, {}'.format(output_name, model_graph.get_output_dims(idx)))\n",
"\n",
"ssd_outputs = model_graph.import_graph_def(image_tensors, is_training=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 2.c. Save Model\n",
"Now that we loaded both parts of the tensorflow graph (preprocessor and SSD-VGG featurizer), we can save the graph and associated variables to a directory which we can register as an Azure ML Model."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model_name = \"ssdvgg\"\n",
"model_save_path = os.path.join(saved_model_dir, model_name, \"saved_model\")\n",
"print(\"Saving model in {}\".format(model_save_path))\n",
"\n",
"output_map = {}\n",
"for i, output in enumerate(ssd_outputs):\n",
" output_map['out_{}'.format(i)] = output\n",
"\n",
"with tf.Session() as sess:\n",
" model_graph.restore_weights(sess)\n",
" tf.saved_model.simple_save(sess, \n",
" model_save_path, \n",
" inputs={'images': in_images}, \n",
" outputs=output_map)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 2.d. Important! Save names of input and output tensors\n",
"\n",
"These input and output tensors that were created during the preprocessing and classifier steps are also going to be used when **converting the model** to an Accelerated Model that can run on FPGA's and for **making an inferencing request**. It is very important to save this information!"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": [
"register model from file"
]
},
"outputs": [],
"source": [
"input_tensors = in_images.name\n",
"# We will use the list of output tensors during inferencing\n",
"output_tensors = [output.name for output in ssd_outputs]\n",
"# However, for multiple output tensors, our AccelOnnxConverter will \n",
"# accept comma-delimited strings (lists will cause error)\n",
"output_tensors_str = \",\".join(output_tensors)\n",
"\n",
"print(input_tensors)\n",
"print(output_tensors)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"create-image\"></a>\n",
"## 3. Create AccelContainerImage\n",
"Below we will execute all the same steps as in the [Quickstart](./accelerated-models-quickstart.ipynb#create-image) to package the model we have saved locally into an accelerated Docker image saved in our workspace. To complete all the steps, it may take a few minutes. For more details on each step, check out the [Quickstart section on model registration](./accelerated-models-quickstart.ipynb#register-model)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Workspace\n",
"from azureml.core.model import Model\n",
"from azureml.core.image import Image\n",
"from azureml.accel import AccelOnnxConverter\n",
"from azureml.accel import AccelContainerImage\n",
"\n",
"# Retrieve workspace\n",
"ws = Workspace.from_config()\n",
"print(\"Successfully retrieved workspace:\", ws.name, ws.resource_group, ws.location, ws.subscription_id, '\\n')\n",
"\n",
"# Register model\n",
"registered_model = Model.register(workspace = ws,\n",
" model_path = model_save_path,\n",
" model_name = model_name)\n",
"print(\"Successfully registered: \", registered_model.name, registered_model.description, registered_model.version, '\\n', sep = '\\t')\n",
"\n",
"# Convert model\n",
"convert_request = AccelOnnxConverter.convert_tf_model(ws, registered_model, input_tensors, output_tensors_str)\n",
"if convert_request.wait_for_completion(show_output = False):\n",
" # If the above call succeeded, get the converted model\n",
" converted_model = convert_request.result\n",
" print(\"\\nSuccessfully converted: \", converted_model.name, converted_model.url, converted_model.version, \n",
" converted_model.id, converted_model.created_time, '\\n')\n",
"else:\n",
" print(\"Model conversion failed. Showing output.\")\n",
" convert_request.wait_for_completion(show_output = True)\n",
"\n",
"# Package into AccelContainerImage\n",
"image_config = AccelContainerImage.image_configuration()\n",
"# Image name must be lowercase\n",
"image_name = \"{}-image\".format(model_name)\n",
"image = Image.create(name = image_name,\n",
" models = [converted_model],\n",
" image_config = image_config, \n",
" workspace = ws)\n",
"image.wait_for_creation()\n",
"print(\"Created AccelContainerImage: {} {} {}\\n\".format(image.name, image.creation_state, image.image_location))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"deploy-image\"></a>\n",
"## 4. Deploy image\n",
"Once you have an Azure ML Accelerated Image in your Workspace, you can deploy it to two destinations, to a Databox Edge machine or to an AKS cluster. \n",
"\n",
"### 4.a. Deploy to Databox Edge Machine using IoT Hub\n",
"See the sample [here](https://github.com/Azure-Samples/aml-real-time-ai/) for using the Azure IoT CLI extension for deploying your Docker image to your Databox Edge Machine.\n",
"\n",
"### 4.b. Deploy to AKS Cluster\n",
"Same as in the [Quickstart section on image deployment](./accelerated-models-quickstart.ipynb#deploy-image), we are going to create an AKS cluster with FPGA-enabled machines, then deploy our service to it.\n",
"#### Create AKS ComputeTarget"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.compute import AksCompute, ComputeTarget\n",
"\n",
"# Uses the specific FPGA enabled VM (sku: Standard_PB6s)\n",
"# Standard_PB6s are available in: eastus, westus2, westeurope, southeastasia\n",
"prov_config = AksCompute.provisioning_configuration(vm_size = \"Standard_PB6s\",\n",
" agent_count = 1, \n",
" location = \"eastus\")\n",
"\n",
"aks_name = 'aks-pb6-obj'\n",
"# Create the cluster\n",
"aks_target = ComputeTarget.create(workspace = ws, \n",
" name = aks_name, \n",
" provisioning_configuration = prov_config)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Provisioning an AKS cluster might take awhile (15 or so minutes), and we want to wait until it's successfully provisioned before we can deploy a service to it. If you interrupt this cell, provisioning of the cluster will continue. You can re-run it or check the status in your Workspace under Compute."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%%time\n",
"aks_target.wait_for_completion(show_output = True)\n",
"print(aks_target.provisioning_state)\n",
"print(aks_target.provisioning_errors)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Deploy AccelContainerImage to AKS ComputeTarget"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%%time\n",
"from azureml.core.webservice import Webservice, AksWebservice\n",
"\n",
"# Set the web service configuration (for creating a test service, we don't want autoscale enabled)\n",
"# Authentication is enabled by default, but for testing we specify False\n",
"aks_config = AksWebservice.deploy_configuration(autoscale_enabled=False,\n",
" num_replicas=1,\n",
" auth_enabled = False)\n",
"\n",
"aks_service_name ='my-aks-service-3'\n",
"\n",
"aks_service = Webservice.deploy_from_image(workspace = ws,\n",
" name = aks_service_name,\n",
" image = image,\n",
" deployment_config = aks_config,\n",
" deployment_target = aks_target)\n",
"aks_service.wait_for_deployment(show_output = True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"test-service\"></a>\n",
"## 5. Test the service\n",
"<a id=\"create-client\"></a>\n",
"### 5.a. Create Client\n",
"The image supports gRPC and the TensorFlow Serving \"predict\" API. We will create a PredictionClient from the Webservice object that can call into the docker image to get predictions. If you do not have the Webservice object, you can also create [PredictionClient](https://docs.microsoft.com/en-us/python/api/azureml-accel-models/azureml.accel.predictionclient?view=azure-ml-py) directly.\n",
"\n",
"**Note:** If you chose to use auth_enabled=True when creating your AksWebservice.deploy_configuration(), see documentation [here](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.webservice(class)?view=azure-ml-py#get-keys--) on how to retrieve your keys and use either key as an argument to PredictionClient(...,access_token=key).\n",
"**WARNING:** If you are running on Azure Notebooks free compute, you will not be able to make outgoing calls to your service. Try locating your client on a different machine to consume it."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Using the grpc client in AzureML Accelerated Models SDK\n",
"from azureml.accel import client_from_service\n",
"\n",
"# Initialize AzureML Accelerated Models client\n",
"client = client_from_service(aks_service)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You can adapt the client [code](https://github.com/Azure/aml-real-time-ai/blob/master/pythonlib/amlrealtimeai/client.py) to meet your needs. There is also an example C# [client](https://github.com/Azure/aml-real-time-ai/blob/master/sample-clients/csharp).\n",
"\n",
"The service provides an API that is compatible with TensorFlow Serving. There are instructions to download a sample client [here](https://www.tensorflow.org/serving/setup)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"serve-model\"></a>\n",
"### 5.b. Serve the model\n",
"The SSD-VGG model returns the confidence and bounding boxes for all possible anchor boxes. As mentioned earlier, we will use a post-processing routine to transform this into a list of bounding boxes (y1, x1, y2, x2) where x, y are fractional coordinates measured from left and top respectively. A respective list of classes and scores is also returned to tag each bounding box. Below we make use of this information to draw the bounding boxes on top the original image. Note that in the post-processing routine we select a confidence threshold of 0.5."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"from matplotlib import pyplot as plt\n",
"\n",
"colors_tableau = [(255, 255, 255), (31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),\n",
" (44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),\n",
" (148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),\n",
" (227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),\n",
" (188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]\n",
"\n",
"\n",
"def draw_boxes_on_img(img, classes, scores, bboxes, thickness=2):\n",
" shape = img.shape\n",
" for i in range(bboxes.shape[0]):\n",
" bbox = bboxes[i]\n",
" color = colors_tableau[classes[i]]\n",
" # Draw bounding box...\n",
" p1 = (int(bbox[0] * shape[0]), int(bbox[1] * shape[1]))\n",
" p2 = (int(bbox[2] * shape[0]), int(bbox[3] * shape[1]))\n",
" cv2.rectangle(img, p1[::-1], p2[::-1], color, thickness)\n",
" # Draw text...\n",
" s = '%s/%.3f' % (classes[i], scores[i])\n",
" p1 = (p1[0]-5, p1[1])\n",
" cv2.putText(img, s, p1[::-1], cv2.FONT_HERSHEY_DUPLEX, 0.4, color, 1)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import azureml.accel._external.ssdvgg_utils as ssdvgg_utils\n",
"\n",
"result = client.score_file(path=\"meeting.jpg\", input_name=input_tensors, outputs=output_tensors)\n",
"classes, scores, bboxes = ssdvgg_utils.postprocess(result, select_threshold=0.5)\n",
"\n",
"img = cv2.imread('meeting.jpg', 1)\n",
"img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n",
"draw_boxes_on_img(img, classes, scores, bboxes)\n",
"plt.imshow(img)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"cleanup\"></a>\n",
"## 6. Cleanup\n",
"It's important to clean up your resources, so that you won't incur unnecessary costs. In the [next notebook](./accelerated-models-training.ipynb) you will learn how to train a classfier on a new dataset using transfer learning."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"aks_service.delete()\n",
"aks_target.delete()\n",
"image.delete()\n",
"registered_model.delete()\n",
"converted_model.delete()"
]
}
],
"metadata": {
"authors": [
{
"name": "coverste"
},
{
"name": "paledger"
},
{
"name": "sukha"
}
],
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.6"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -1,7 +0,0 @@
name: accelerated-models-object-detection
dependencies:
- pip:
- azureml-sdk
- azureml-accel-models[cpu]
- opencv-python
- matplotlib

View File

@@ -1,555 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/deployment/accelerated-models/accelerated-models-quickstart.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Azure ML Hardware Accelerated Models Quickstart"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This tutorial will show you how to deploy an image recognition service based on the ResNet 50 classifier using the Azure Machine Learning Accelerated Models service. Get more information about our service from our [documentation](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-accelerate-with-fpgas), [API reference](https://docs.microsoft.com/en-us/python/api/azureml-accel-models/azureml.accel?view=azure-ml-py), or [forum](https://aka.ms/aml-forum).\n",
"\n",
"We will use an accelerated ResNet50 featurizer running on an FPGA. Our Accelerated Models Service handles translating deep neural networks (DNN) into an FPGA program.\n",
"\n",
"For more information about using other models besides Resnet50, see the [README](./README.md).\n",
"\n",
"The steps covered in this notebook are: \n",
"1. [Set up environment](#set-up-environment)\n",
"* [Construct model](#construct-model)\n",
" * Image Preprocessing\n",
" * Featurizer (Resnet50)\n",
" * Classifier\n",
" * Save Model\n",
"* [Register Model](#register-model)\n",
"* [Convert into Accelerated Model](#convert-model)\n",
"* [Create Image](#create-image)\n",
"* [Deploy](#deploy-image)\n",
"* [Test service](#test-service)\n",
"* [Clean-up](#clean-up)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"set-up-environment\"></a>\n",
"## 1. Set up environment"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import tensorflow as tf"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Retrieve Workspace\n",
"If you haven't created a Workspace, please follow [this notebook](https://github.com/Azure/MachineLearningNotebooks/blob/master/configuration.ipynb) to do so. If you have, run the codeblock below to retrieve it. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Workspace\n",
"\n",
"ws = Workspace.from_config()\n",
"print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\\n')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"construct-model\"></a>\n",
"## 2. Construct model\n",
"\n",
"There are three parts to the model we are deploying: pre-processing, featurizer with ResNet50, and classifier with ImageNet dataset. Then we will save this complete Tensorflow model graph locally before registering it to your Azure ML Workspace.\n",
"\n",
"### 2.a. Image preprocessing\n",
"We'd like our service to accept JPEG images as input. However the input to ResNet50 is a tensor. So we need code that decodes JPEG images and does the preprocessing required by ResNet50. The Accelerated AI service can execute TensorFlow graphs as part of the service and we'll use that ability to do the image preprocessing. This code defines a TensorFlow graph that preprocesses an array of JPEG images (as strings) and produces a tensor that is ready to be featurized by ResNet50.\n",
"\n",
"**Note:** Expect to see TF deprecation warnings until we port our SDK over to use Tensorflow 2.0."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Input images as a two-dimensional tensor containing an arbitrary number of images represented a strings\n",
"import azureml.accel.models.utils as utils\n",
"tf.reset_default_graph()\n",
"\n",
"in_images = tf.placeholder(tf.string)\n",
"image_tensors = utils.preprocess_array(in_images)\n",
"print(image_tensors.shape)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 2.b. Featurizer\n",
"We use ResNet50 as a featurizer. In this step we initialize the model. This downloads a TensorFlow checkpoint of the quantized ResNet50."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.accel.models import QuantizedResnet50\n",
"save_path = os.path.expanduser('~/models')\n",
"model_graph = QuantizedResnet50(save_path, is_frozen = True)\n",
"feature_tensor = model_graph.import_graph_def(image_tensors)\n",
"print(model_graph.version)\n",
"print(feature_tensor.name)\n",
"print(feature_tensor.shape)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 2.c. Classifier\n",
"The model we downloaded includes a classifier which takes the output of the ResNet50 and identifies an image. This classifier is trained on the ImageNet dataset. We are going to use this classifier for our service. The next [notebook](./accelerated-models-training.ipynb) shows how to train a classifier for a different data set. The input to the classifier is a tensor matching the output of our ResNet50 featurizer."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"classifier_output = model_graph.get_default_classifier(feature_tensor)\n",
"print(classifier_output)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 2.d. Save Model\n",
"Now that we loaded all three parts of the tensorflow graph (preprocessor, resnet50 featurizer, and the classifier), we can save the graph and associated variables to a directory which we can register as an Azure ML Model."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# model_name must be lowercase\n",
"model_name = \"resnet50\"\n",
"model_save_path = os.path.join(save_path, model_name)\n",
"print(\"Saving model in {}\".format(model_save_path))\n",
"\n",
"with tf.Session() as sess:\n",
" model_graph.restore_weights(sess)\n",
" tf.saved_model.simple_save(sess, model_save_path,\n",
" inputs={'images': in_images},\n",
" outputs={'output_alias': classifier_output})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 2.e. Important! Save names of input and output tensors\n",
"\n",
"These input and output tensors that were created during the preprocessing and classifier steps are also going to be used when **converting the model** to an Accelerated Model that can run on FPGA's and for **making an inferencing request**. It is very important to save this information! You can see our defaults for all the models in the [README](./README.md).\n",
"\n",
"By default for Resnet50, these are the values you should see when running the cell below: \n",
"* input_tensors = \"Placeholder:0\"\n",
"* output_tensors = \"classifier/resnet_v1_50/predictions/Softmax:0\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": [
"register model from file"
]
},
"outputs": [],
"source": [
"input_tensors = in_images.name\n",
"output_tensors = classifier_output.name\n",
"\n",
"print(input_tensors)\n",
"print(output_tensors)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"register-model\"></a>\n",
"## 3. Register Model"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You can add tags and descriptions to your models. Using tags, you can track useful information such as the name and version of the machine learning library used to train the model. Note that tags must be alphanumeric."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": [
"register model from file"
]
},
"outputs": [],
"source": [
"from azureml.core.model import Model\n",
"\n",
"registered_model = Model.register(workspace = ws,\n",
" model_path = model_save_path,\n",
" model_name = model_name)\n",
"\n",
"print(\"Successfully registered: \", registered_model.name, registered_model.description, registered_model.version, sep = '\\t')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"convert-model\"></a>\n",
"## 4. Convert Model"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"For conversion you need to provide names of input and output tensors. This information can be found from the model_graph you saved in step 2.e. above.\n",
"\n",
"**Note**: Conversion may take a while and on average for FPGA model it is about 1-3 minutes and it depends on model type."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": [
"register model from file"
]
},
"outputs": [],
"source": [
"from azureml.accel import AccelOnnxConverter\n",
"\n",
"convert_request = AccelOnnxConverter.convert_tf_model(ws, registered_model, input_tensors, output_tensors)\n",
"\n",
"if convert_request.wait_for_completion(show_output = False):\n",
" # If the above call succeeded, get the converted model\n",
" converted_model = convert_request.result\n",
" print(\"\\nSuccessfully converted: \", converted_model.name, converted_model.url, converted_model.version, \n",
" converted_model.id, converted_model.created_time, '\\n')\n",
"else:\n",
" print(\"Model conversion failed. Showing output.\")\n",
" convert_request.wait_for_completion(show_output = True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"create-image\"></a>\n",
"## 5. Package the model into an Image"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You can add tags and descriptions to image. Also, for FPGA model an image can only contain **single** model.\n",
"\n",
"**Note**: The following command can take few minutes. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.image import Image\n",
"from azureml.accel import AccelContainerImage\n",
"\n",
"image_config = AccelContainerImage.image_configuration()\n",
"# Image name must be lowercase\n",
"image_name = \"{}-image\".format(model_name)\n",
"\n",
"image = Image.create(name = image_name,\n",
" models = [converted_model],\n",
" image_config = image_config, \n",
" workspace = ws)\n",
"image.wait_for_creation(show_output = False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"deploy-image\"></a>\n",
"## 6. Deploy\n",
"Once you have an Azure ML Accelerated Image in your Workspace, you can deploy it to two destinations, to a Databox Edge machine or to an AKS cluster. \n",
"\n",
"### 6.a. Databox Edge Machine using IoT Hub\n",
"See the sample [here](https://github.com/Azure-Samples/aml-real-time-ai/) for using the Azure IoT CLI extension for deploying your Docker image to your Databox Edge Machine.\n",
"\n",
"### 6.b. Azure Kubernetes Service (AKS) using Azure ML Service\n",
"We are going to create an AKS cluster with FPGA-enabled machines, then deploy our service to it. For more information, see [AKS official docs](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-deploy-and-where#aks).\n",
"\n",
"#### Create AKS ComputeTarget"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": [
"sample-akscompute-provision"
]
},
"outputs": [],
"source": [
"from azureml.core.compute import AksCompute, ComputeTarget\n",
"\n",
"# Uses the specific FPGA enabled VM (sku: Standard_PB6s)\n",
"# Standard_PB6s are available in: eastus, westus2, westeurope, southeastasia\n",
"prov_config = AksCompute.provisioning_configuration(vm_size = \"Standard_PB6s\",\n",
" agent_count = 1, \n",
" location = \"eastus\")\n",
"\n",
"aks_name = 'my-aks-pb6'\n",
"# Create the cluster\n",
"aks_target = ComputeTarget.create(workspace = ws, \n",
" name = aks_name, \n",
" provisioning_configuration = prov_config)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Provisioning an AKS cluster might take awhile (15 or so minutes), and we want to wait until it's successfully provisioned before we can deploy a service to it. If you interrupt this cell, provisioning of the cluster will continue. You can also check the status in your Workspace under Compute."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%%time\n",
"aks_target.wait_for_completion(show_output = True)\n",
"print(aks_target.provisioning_state)\n",
"print(aks_target.provisioning_errors)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Deploy AccelContainerImage to AKS ComputeTarget"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%%time\n",
"from azureml.core.webservice import Webservice, AksWebservice\n",
"\n",
"# Set the web service configuration (for creating a test service, we don't want autoscale enabled)\n",
"# Authentication is enabled by default, but for testing we specify False\n",
"aks_config = AksWebservice.deploy_configuration(autoscale_enabled=False,\n",
" num_replicas=1,\n",
" auth_enabled = False)\n",
"\n",
"aks_service_name ='my-aks-service-1'\n",
"\n",
"aks_service = Webservice.deploy_from_image(workspace = ws,\n",
" name = aks_service_name,\n",
" image = image,\n",
" deployment_config = aks_config,\n",
" deployment_target = aks_target)\n",
"aks_service.wait_for_deployment(show_output = True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"test-service\"></a>\n",
"## 7. Test the service"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 7.a. Create Client\n",
"The image supports gRPC and the TensorFlow Serving \"predict\" API. We will create a PredictionClient from the Webservice object that can call into the docker image to get predictions. If you do not have the Webservice object, you can also create [PredictionClient](https://docs.microsoft.com/en-us/python/api/azureml-accel-models/azureml.accel.predictionclient?view=azure-ml-py) directly.\n",
"\n",
"**Note:** If you chose to use auth_enabled=True when creating your AksWebservice, see documentation [here](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.webservice(class)?view=azure-ml-py#get-keys--) on how to retrieve your keys and use either key as an argument to PredictionClient(...,access_token=key).\n",
"**WARNING:** If you are running on Azure Notebooks free compute, you will not be able to make outgoing calls to your service. Try locating your client on a different machine to consume it."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Using the grpc client in AzureML Accelerated Models SDK\n",
"from azureml.accel import client_from_service\n",
"\n",
"# Initialize AzureML Accelerated Models client\n",
"client = client_from_service(aks_service)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You can adapt the client [code](https://github.com/Azure/aml-real-time-ai/blob/master/pythonlib/amlrealtimeai/client.py) to meet your needs. There is also an example C# [client](https://github.com/Azure/aml-real-time-ai/blob/master/sample-clients/csharp).\n",
"\n",
"The service provides an API that is compatible with TensorFlow Serving. There are instructions to download a sample client [here](https://www.tensorflow.org/serving/setup)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 7.b. Serve the model\n",
"To understand the results we need a mapping to the human readable imagenet classes"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import requests\n",
"classes_entries = requests.get(\"https://raw.githubusercontent.com/Lasagne/Recipes/master/examples/resnet50/imagenet_classes.txt\").text.splitlines()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Score image with input and output tensor names\n",
"results = client.score_file(path=\"./snowleopardgaze.jpg\", \n",
" input_name=input_tensors, \n",
" outputs=output_tensors)\n",
"\n",
"# map results [class_id] => [confidence]\n",
"results = enumerate(results)\n",
"# sort results by confidence\n",
"sorted_results = sorted(results, key=lambda x: x[1], reverse=True)\n",
"# print top 5 results\n",
"for top in sorted_results[:5]:\n",
" print(classes_entries[top[0]], 'confidence:', top[1])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"clean-up\"></a>\n",
"## 8. Clean-up\n",
"Run the cell below to delete your webservice, image, and model (must be done in that order). In the [next notebook](./accelerated-models-training.ipynb) you will learn how to train a classfier on a new dataset using transfer learning and finetune the weights."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"aks_service.delete()\n",
"aks_target.delete()\n",
"image.delete()\n",
"registered_model.delete()\n",
"converted_model.delete()"
]
}
],
"metadata": {
"authors": [
{
"name": "coverste"
},
{
"name": "paledger"
},
{
"name": "aibhalla"
}
],
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.3"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -1,5 +0,0 @@
name: accelerated-models-quickstart
dependencies:
- pip:
- azureml-sdk
- azureml-accel-models[cpu]

View File

@@ -1,870 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/deployment/accelerated-models/accelerated-models-training.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Training with the Azure Machine Learning Accelerated Models Service"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This notebook will introduce how to apply common machine learning techniques, like transfer learning, custom weights, and unquantized vs. quantized models, when working with our Azure Machine Learning Accelerated Models Service (Azure ML Accel Models).\n",
"\n",
"We will use Tensorflow for the preprocessing steps, ResNet50 for the featurizer, and the Keras API (built on Tensorflow backend) to build the classifier layers instead of the default ImageNet classifier used in Quickstart. Then we will train the model, evaluate it, and deploy it to run on an FPGA.\n",
"\n",
"#### Transfer Learning and Custom weights\n",
"We will walk you through two ways to build and train a ResNet50 model on the Kaggle Cats and Dogs dataset: transfer learning only and then transfer learning with custom weights.\n",
"\n",
"In using transfer learning, our goal is to re-purpose the ResNet50 model already trained on the [ImageNet image dataset](http://www.image-net.org/) as a basis for our training of the Kaggle Cats and Dogs dataset. The ResNet50 featurizer will be imported as frozen, so only the Keras classifier will be trained.\n",
"\n",
"With the addition of custom weights, we will build the model so that the ResNet50 featurizer weights as not frozen. This will let us retrain starting with custom weights trained with ImageNet on ResNet50 and then use the Kaggle Cats and Dogs dataset to retrain and fine-tune the quantized version of the model.\n",
"\n",
"#### Unquantized vs. Quantized models\n",
"The unquantized version of our models (ie. Resnet50, Resnet152, Densenet121, Vgg16, SsdVgg) uses native float precision (32-bit floats), which will be faster at training. We will use this for our first run through, then fine-tune the weights with the quantized version. The quantized version of our models (i.e. QuantizedResnet50, QuantizedResnet152, QuantizedDensenet121, QuantizedVgg16, QuantizedSsdVgg) will have the same node names as the unquantized version, but use quantized operations and will match the performance of the model when running on an FPGA.\n",
"\n",
"#### Contents\n",
"1. [Setup Environment](#setup)\n",
"* [Prepare Data](#prepare-data)\n",
"* [Construct Model](#construct-model)\n",
" * Preprocessor\n",
" * Classifier\n",
" * Model construction\n",
"* [Train Model](#train-model)\n",
"* [Test Model](#test-model)\n",
"* [Execution](#execution)\n",
" * [Transfer Learning](#transfer-learning)\n",
" * [Transfer Learning with Custom Weights](#custom-weights)\n",
"* [Create Image](#create-image)\n",
"* [Deploy Image](#deploy-image)\n",
"* [Test the service](#test-service)\n",
"* [Clean-up](#cleanup)\n",
"* [Appendix](#appendix)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"setup\"></a>\n",
"## 1. Setup Environment\n",
"#### 1.a. Please set up your environment as described in the [Quickstart](./accelerated-models-quickstart.ipynb), meaning:\n",
"* Make sure your Workspace config.json exists and has the correct info\n",
"* Install Tensorflow\n",
"\n",
"#### 1.b. Download dataset into ~/catsanddogs \n",
"The dataset we will be using for training can be downloaded [here](https://www.microsoft.com/en-us/download/details.aspx?id=54765). Download the zip and extract to a directory named 'catsanddogs' under your user directory (\"~/catsanddogs\"). \n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### 1.c. Import packages"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import sys\n",
"import tensorflow as tf\n",
"import numpy as np\n",
"from keras import backend as K\n",
"import sklearn\n",
"import tqdm"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### 1.d. Create directories for later use\n",
"After you train your model in float32, you'll write the weights to a place on disk. We also need a location to store the models that get downloaded."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"custom_weights_dir = os.path.expanduser(\"~/custom-weights\")\n",
"saved_model_dir = os.path.expanduser(\"~/models\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"prepare-data\"></a>\n",
"## 2. Prepare Data\n",
"Load the files we are going to use for training and testing. By default this notebook uses only a very small subset of the Cats and Dogs dataset. That makes it run relatively quickly."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import glob\n",
"import imghdr\n",
"datadir = os.path.expanduser(\"~/catsanddogs\")\n",
"\n",
"cat_files = glob.glob(os.path.join(datadir, 'PetImages', 'Cat', '*.jpg'))\n",
"dog_files = glob.glob(os.path.join(datadir, 'PetImages', 'Dog', '*.jpg'))\n",
"\n",
"# Limit the data set to make the notebook execute quickly.\n",
"cat_files = cat_files[:64]\n",
"dog_files = dog_files[:64]\n",
"\n",
"# The data set has a few images that are not jpeg. Remove them.\n",
"cat_files = [f for f in cat_files if imghdr.what(f) == 'jpeg']\n",
"dog_files = [f for f in dog_files if imghdr.what(f) == 'jpeg']\n",
"\n",
"if(not len(cat_files) or not len(dog_files)):\n",
" print(\"Please download the Kaggle Cats and Dogs dataset form https://www.microsoft.com/en-us/download/details.aspx?id=54765 and extract the zip to \" + datadir) \n",
" raise ValueError(\"Data not found\")\n",
"else:\n",
" print(cat_files[0])\n",
" print(dog_files[0])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Construct a numpy array as labels\n",
"image_paths = cat_files + dog_files\n",
"total_files = len(cat_files) + len(dog_files)\n",
"labels = np.zeros(total_files)\n",
"labels[len(cat_files):] = 1"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Split images data as training data and test data\n",
"from sklearn.model_selection import train_test_split\n",
"onehot_labels = np.array([[0,1] if i else [1,0] for i in labels])\n",
"img_train, img_test, label_train, label_test = train_test_split(image_paths, onehot_labels, random_state=42, shuffle=True)\n",
"\n",
"print(len(img_train), len(img_test), label_train.shape, label_test.shape)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"construct-model\"></a>\n",
"## 3. Construct Model\n",
"We will define the functions to handle creating the preprocessor and the classifier first, and then run them together to actually construct the model with the Resnet50 featurizer in a single Tensorflow session in a separate cell.\n",
"\n",
"We use ResNet50 for the featurizer and build our own classifier using Keras layers. We train the featurizer and the classifier as one model. We will provide parameters to determine whether we are using the quantized version and whether we are using custom weights in training or not."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 3.a. Define image preprocessing step\n",
"Same as in the Quickstart, before passing image dataset to the ResNet50 featurizer, we need to preprocess the input file to get it into the form expected by ResNet50. ResNet50 expects float tensors representing the images in BGR, channel last order. We've provided a default implementation of the preprocessing that you can use.\n",
"\n",
"**Note:** Expect to see TF deprecation warnings until we port our SDK over to use Tensorflow 2.0."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import azureml.accel.models.utils as utils\n",
"\n",
"def preprocess_images(scaling_factor=1.0):\n",
" # Convert images to 3D tensors [width,height,channel] - channels are in BGR order.\n",
" in_images = tf.placeholder(tf.string)\n",
" image_tensors = utils.preprocess_array(in_images, 'RGB', scaling_factor)\n",
" return in_images, image_tensors"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 3.b. Define classifier\n",
"We use Keras layer APIs to construct the classifier. Because we're using the tensorflow backend, we can train this classifier in one session with our Resnet50 model."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def construct_classifier(in_tensor, seed=None):\n",
" from keras.layers import Dropout, Dense, Flatten\n",
" from keras.initializers import glorot_uniform\n",
" K.set_session(tf.get_default_session())\n",
"\n",
" FC_SIZE = 1024\n",
" NUM_CLASSES = 2\n",
"\n",
" x = Dropout(0.2, input_shape=(1, 1, int(in_tensor.shape[3]),), seed=seed)(in_tensor)\n",
" x = Dense(FC_SIZE, activation='relu', input_dim=(1, 1, int(in_tensor.shape[3]),),\n",
" kernel_initializer=glorot_uniform(seed=seed), bias_initializer='zeros')(x)\n",
" x = Flatten()(x)\n",
" preds = Dense(NUM_CLASSES, activation='softmax', input_dim=FC_SIZE, name='classifier_output',\n",
" kernel_initializer=glorot_uniform(seed=seed), bias_initializer='zeros')(x)\n",
" return preds"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 3.c. Define model construction\n",
"Now that the preprocessor and classifier for the model are defined, we can define how we want to construct the model. \n",
"\n",
"Constructing the model has these steps: \n",
"1. Get preprocessing steps\n",
"* Get featurizer using the Azure ML Accel Models SDK:\n",
" * import the graph definition\n",
" * restore the weights of the model into a Tensorflow session\n",
"* Get classifier\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def construct_model(quantized, starting_weights_directory = None):\n",
" from azureml.accel.models import Resnet50, QuantizedResnet50\n",
" \n",
" # Convert images to 3D tensors [width,height,channel]\n",
" in_images, image_tensors = preprocess_images(1.0)\n",
"\n",
" # Construct featurizer using quantized or unquantized ResNet50 model\n",
" if not quantized:\n",
" featurizer = Resnet50(saved_model_dir)\n",
" else:\n",
" featurizer = QuantizedResnet50(saved_model_dir, custom_weights_directory = starting_weights_directory)\n",
"\n",
" features = featurizer.import_graph_def(input_tensor=image_tensors)\n",
" \n",
" # Construct classifier\n",
" preds = construct_classifier(features)\n",
" \n",
" # Initialize weights\n",
" sess = tf.get_default_session()\n",
" tf.global_variables_initializer().run()\n",
"\n",
" featurizer.restore_weights(sess)\n",
"\n",
" return in_images, image_tensors, features, preds, featurizer"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"train-model\"></a>\n",
"## 4. Train Model"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def read_files(files):\n",
" \"\"\" Read files to array\"\"\"\n",
" contents = []\n",
" for path in files:\n",
" with open(path, 'rb') as f:\n",
" contents.append(f.read())\n",
" return contents"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def train_model(preds, in_images, img_train, label_train, is_retrain = False, train_epoch = 10, learning_rate=None):\n",
" \"\"\" training model \"\"\"\n",
" from keras.objectives import binary_crossentropy\n",
" from tqdm import tqdm\n",
" \n",
" learning_rate = learning_rate if learning_rate else 0.001 if is_retrain else 0.01\n",
" \n",
" # Specify the loss function\n",
" in_labels = tf.placeholder(tf.float32, shape=(None, 2)) \n",
" cross_entropy = tf.reduce_mean(binary_crossentropy(in_labels, preds))\n",
" optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)\n",
"\n",
" def chunks(a, b, n):\n",
" \"\"\"Yield successive n-sized chunks from a and b.\"\"\"\n",
" if (len(a) != len(b)):\n",
" print(\"a and b are not equal in chunks(a,b,n)\")\n",
" raise ValueError(\"Parameter error\")\n",
"\n",
" for i in range(0, len(a), n):\n",
" yield a[i:i + n], b[i:i + n]\n",
"\n",
" chunk_size = 16\n",
" chunk_num = len(label_train) / chunk_size\n",
"\n",
" sess = tf.get_default_session()\n",
" for epoch in range(train_epoch):\n",
" avg_loss = 0\n",
" for img_chunk, label_chunk in tqdm(chunks(img_train, label_train, chunk_size)):\n",
" contents = read_files(img_chunk)\n",
" _, loss = sess.run([optimizer, cross_entropy],\n",
" feed_dict={in_images: contents,\n",
" in_labels: label_chunk,\n",
" K.learning_phase(): 1})\n",
" avg_loss += loss / chunk_num\n",
" print(\"Epoch:\", (epoch + 1), \"loss = \", \"{:.3f}\".format(avg_loss))\n",
" \n",
" # Reach desired performance\n",
" if (avg_loss < 0.001):\n",
" break"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"test-model\"></a>"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"test-model\"></a>\n",
"## 5. Test Model"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def test_model(preds, in_images, img_test, label_test):\n",
" \"\"\"Test the model\"\"\"\n",
" from keras.metrics import categorical_accuracy\n",
"\n",
" in_labels = tf.placeholder(tf.float32, shape=(None, 2))\n",
" accuracy = tf.reduce_mean(categorical_accuracy(in_labels, preds))\n",
" contents = read_files(img_test)\n",
"\n",
" accuracy = accuracy.eval(feed_dict={in_images: contents,\n",
" in_labels: label_test,\n",
" K.learning_phase(): 0})\n",
" return accuracy"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"execution\"></a>\n",
"## 6. Execute steps\n",
"You can run through the Transfer Learning section, then skip to Create AccelContainerImage. By default, because the custom weights section takes much longer for training twice, it is not saved as executable cells. You can copy the code or change cell type to 'Code'.\n",
"\n",
"<a id=\"transfer-learning\"></a>\n",
"### 6.a. Training using Transfer Learning"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%%time\n",
"# Launch the training\n",
"tf.reset_default_graph()\n",
"sess = tf.Session(graph=tf.get_default_graph())\n",
"\n",
"with sess.as_default():\n",
" in_images, image_tensors, features, preds, featurizer = construct_model(quantized=True)\n",
" train_model(preds, in_images, img_train, label_train, is_retrain=False, train_epoch=10, learning_rate=0.01) \n",
" accuracy = test_model(preds, in_images, img_test, label_test) \n",
" print(\"Accuracy:\", accuracy)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Save Model"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model_name = 'resnet50-catsanddogs-tl'\n",
"model_save_path = os.path.join(saved_model_dir, model_name)\n",
"\n",
"tf.saved_model.simple_save(sess, model_save_path,\n",
" inputs={'images': in_images},\n",
" outputs={'output_alias': preds})\n",
"\n",
"input_tensors = in_images.name\n",
"output_tensors = preds.name\n",
"\n",
"print(input_tensors)\n",
"print(output_tensors)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"custom-weights\"></a>\n",
"### 6.b. Traning using Custom Weights\n",
"\n",
"Because the quantized graph defintion and the float32 graph defintion share the same node names in the graph definitions, we can initally train the weights in float32, and then reload them with the quantized operations (which take longer) to fine-tune the model.\n",
"\n",
"First we train the model with custom weights but without quantization. Training is done with native float precision (32-bit floats). We load the training data set and batch the training with 10 epochs. When the performance reaches desired level or starts decredation, we stop the training iteration and save the weights as tensorflow checkpoint files. "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Launch the training\n",
"```\n",
"tf.reset_default_graph()\n",
"sess = tf.Session(graph=tf.get_default_graph())\n",
"\n",
"with sess.as_default():\n",
" in_images, image_tensors, features, preds, featurizer = construct_model(quantized=False)\n",
" train_model(preds, in_images, img_train, label_train, is_retrain=False, train_epoch=10) \n",
" accuracy = test_model(preds, in_images, img_test, label_test) \n",
" print(\"Accuracy:\", accuracy)\n",
" featurizer.save_weights(custom_weights_dir + \"/rn50\", tf.get_default_session())\n",
"```"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Test Model\n",
"After training, we evaluate the trained model's accuracy on test dataset with quantization. So that we know the model's performance if it is deployed on the FPGA."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"```\n",
"tf.reset_default_graph()\n",
"sess = tf.Session(graph=tf.get_default_graph())\n",
"\n",
"with sess.as_default():\n",
" print(\"Testing trained model with quantization\")\n",
" in_images, image_tensors, features, preds, quantized_featurizer = construct_model(quantized=True, starting_weights_directory=custom_weights_dir)\n",
" accuracy = test_model(preds, in_images, img_test, label_test) \n",
" print(\"Accuracy:\", accuracy)\n",
"```"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Fine-Tune Model\n",
"Sometimes, the model's accuracy can drop significantly after quantization. In those cases, we need to retrain the model enabled with quantization to get better model accuracy."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"```\n",
"if (accuracy < 0.93):\n",
" with sess.as_default():\n",
" print(\"Fine-tuning model with quantization\")\n",
" train_model(preds, in_images, img_train, label_train, is_retrain=True, train_epoch=10)\n",
" accuracy = test_model(preds, in_images, img_test, label_test) \n",
" print(\"Accuracy:\", accuracy)\n",
"```"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Save Model"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"```\n",
"model_name = 'resnet50-catsanddogs-cw'\n",
"model_save_path = os.path.join(saved_model_dir, model_name)\n",
"\n",
"tf.saved_model.simple_save(sess, model_save_path,\n",
" inputs={'images': in_images},\n",
" outputs={'output_alias': preds})\n",
"\n",
"input_tensors = in_images.name\n",
"output_tensors = preds.name\n",
"```"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"create-image\"></a>\n",
"## 7. Create AccelContainerImage\n",
"\n",
"Below we will execute all the same steps as in the [Quickstart](./accelerated-models-quickstart.ipynb#create-image) to package the model we have saved locally into an accelerated Docker image saved in our workspace. To complete all the steps, it may take a few minutes. For more details on each step, check out the [Quickstart section on model registration](./accelerated-models-quickstart.ipynb#register-model)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Workspace\n",
"from azureml.core.model import Model\n",
"from azureml.core.image import Image\n",
"from azureml.accel import AccelOnnxConverter\n",
"from azureml.accel import AccelContainerImage\n",
"\n",
"# Retrieve workspace\n",
"ws = Workspace.from_config()\n",
"print(\"Successfully retrieved workspace:\", ws.name, ws.resource_group, ws.location, ws.subscription_id, '\\n')\n",
"\n",
"# Register model\n",
"registered_model = Model.register(workspace = ws,\n",
" model_path = model_save_path,\n",
" model_name = model_name)\n",
"print(\"Successfully registered: \", registered_model.name, registered_model.description, registered_model.version, '\\n', sep = '\\t')\n",
"\n",
"# Convert model\n",
"convert_request = AccelOnnxConverter.convert_tf_model(ws, registered_model, input_tensors, output_tensors)\n",
"if convert_request.wait_for_completion(show_output = False):\n",
" # If the above call succeeded, get the converted model\n",
" converted_model = convert_request.result\n",
" print(\"\\nSuccessfully converted: \", converted_model.name, converted_model.url, converted_model.version, \n",
" converted_model.id, converted_model.created_time, '\\n')\n",
"else:\n",
" print(\"Model conversion failed. Showing output.\")\n",
" convert_request.wait_for_completion(show_output = True)\n",
"\n",
"# Package into AccelContainerImage\n",
"image_config = AccelContainerImage.image_configuration()\n",
"# Image name must be lowercase\n",
"image_name = \"{}-image\".format(model_name)\n",
"image = Image.create(name = image_name,\n",
" models = [converted_model],\n",
" image_config = image_config, \n",
" workspace = ws)\n",
"image.wait_for_creation()\n",
"print(\"Created AccelContainerImage: {} {} {}\\n\".format(image.name, image.creation_state, image.image_location))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"deploy-image\"></a>\n",
"## 8. Deploy image\n",
"Once you have an Azure ML Accelerated Image in your Workspace, you can deploy it to two destinations, to a Databox Edge machine or to an AKS cluster. \n",
"\n",
"### 8.a. Deploy to Databox Edge Machine using IoT Hub\n",
"See the sample [here](https://github.com/Azure-Samples/aml-real-time-ai/) for using the Azure IoT CLI extension for deploying your Docker image to your Databox Edge Machine.\n",
"\n",
"### 8.b. Deploy to AKS Cluster"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Create AKS ComputeTarget"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.compute import AksCompute, ComputeTarget\n",
"\n",
"# Uses the specific FPGA enabled VM (sku: Standard_PB6s)\n",
"# Standard_PB6s are available in: eastus, westus2, westeurope, southeastasia\n",
"prov_config = AksCompute.provisioning_configuration(vm_size = \"Standard_PB6s\",\n",
" agent_count = 1,\n",
" location = \"eastus\")\n",
"\n",
"aks_name = 'aks-pb6-tl'\n",
"# Create the cluster\n",
"aks_target = ComputeTarget.create(workspace = ws, \n",
" name = aks_name, \n",
" provisioning_configuration = prov_config)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Provisioning an AKS cluster might take awhile (15 or so minutes), and we want to wait until it's successfully provisioned before we can deploy a service to it. If you interrupt this cell, provisioning of the cluster will continue. You can re-run it or check the status in your Workspace under Compute."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%%time\n",
"aks_target.wait_for_completion(show_output = True)\n",
"print(aks_target.provisioning_state)\n",
"print(aks_target.provisioning_errors)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Deploy AccelContainerImage to AKS ComputeTarget"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": [
"sample-akswebservice-deploy-from-image"
]
},
"outputs": [],
"source": [
"%%time\n",
"from azureml.core.webservice import Webservice, AksWebservice\n",
"\n",
"# Set the web service configuration (for creating a test service, we don't want autoscale enabled)\n",
"# Authentication is enabled by default, but for testing we specify False\n",
"aks_config = AksWebservice.deploy_configuration(autoscale_enabled=False,\n",
" num_replicas=1,\n",
" auth_enabled = False)\n",
"\n",
"aks_service_name ='my-aks-service-2'\n",
"\n",
"aks_service = Webservice.deploy_from_image(workspace = ws,\n",
" name = aks_service_name,\n",
" image = image,\n",
" deployment_config = aks_config,\n",
" deployment_target = aks_target)\n",
"aks_service.wait_for_deployment(show_output = True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"test-service\"></a>\n",
"## 9. Test the service\n",
"\n",
"<a id=\"create-client\"></a>\n",
"### 9.a. Create Client\n",
"The image supports gRPC and the TensorFlow Serving \"predict\" API. We will create a PredictionClient from the Webservice object that can call into the docker image to get predictions. If you do not have the Webservice object, you can also create [PredictionClient](https://docs.microsoft.com/en-us/python/api/azureml-accel-models/azureml.accel.predictionclient?view=azure-ml-py) directly.\n",
"\n",
"**Note:** If you chose to use auth_enabled=True when creating your AksWebservice.deploy_configuration(), see documentation [here](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.webservice(class)?view=azure-ml-py#get-keys--) on how to retrieve your keys and use either key as an argument to PredictionClient(...,access_token=key).\n",
"**WARNING:** If you are running on Azure Notebooks free compute, you will not be able to make outgoing calls to your service. Try locating your client on a different machine to consume it."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Using the grpc client in AzureML Accelerated Models SDK\n",
"from azureml.accel import client_from_service\n",
"\n",
"# Initialize AzureML Accelerated Models client\n",
"client = client_from_service(aks_service)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"serve-model\"></a>\n",
"### 9.b. Serve the model\n",
"Let's see how our service does on a few images. It may get a few wrong."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Specify an image to classify\n",
"print('CATS')\n",
"for image_file in cat_files[:8]:\n",
" results = client.score_file(path=image_file, \n",
" input_name=input_tensors, \n",
" outputs=output_tensors)\n",
" result = 'CORRECT ' if results[0] > results[1] else 'WRONG '\n",
" print(result + str(results))\n",
"print('DOGS')\n",
"for image_file in dog_files[:8]:\n",
" results = client.score_file(path=image_file, \n",
" input_name=input_tensors, \n",
" outputs=output_tensors)\n",
" result = 'CORRECT ' if results[1] > results[0] else 'WRONG '\n",
" print(result + str(results))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"cleanup\"></a>\n",
"## 10. Cleanup\n",
"It's important to clean up your resources, so that you won't incur unnecessary costs."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"aks_service.delete()\n",
"aks_target.delete()\n",
"image.delete()\n",
"registered_model.delete()\n",
"converted_model.delete()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"appendix\"></a>\n",
"## 11. Appendix"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"License for plot_confusion_matrix:\n",
"\n",
"New BSD License\n",
"\n",
"Copyright (c) 2007-2018 The scikit-learn developers.\n",
"All rights reserved.\n",
"\n",
"\n",
"Redistribution and use in source and binary forms, with or without\n",
"modification, are permitted provided that the following conditions are met:\n",
"\n",
" a. Redistributions of source code must retain the above copyright notice,\n",
" this list of conditions and the following disclaimer.\n",
" b. Redistributions in binary form must reproduce the above copyright\n",
" notice, this list of conditions and the following disclaimer in the\n",
" documentation and/or other materials provided with the distribution.\n",
" c. Neither the name of the Scikit-learn Developers nor the names of\n",
" its contributors may be used to endorse or promote products\n",
" derived from this software without specific prior written\n",
" permission. \n",
"\n",
"\n",
"THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n",
"AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n",
"IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n",
"ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR\n",
"ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n",
"DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n",
"SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n",
"CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n",
"LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n",
"OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH\n",
"DAMAGE.\n"
]
}
],
"metadata": {
"authors": [
{
"name": "coverste"
},
{
"name": "paledger"
}
],
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.6"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -1,8 +0,0 @@
name: accelerated-models-training
dependencies:
- pip:
- azureml-sdk
- azureml-accel-models[cpu]
- keras
- tqdm
- sklearn

Binary file not shown.

Before

Width:  |  Height:  |  Size: 74 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 79 KiB

View File

@@ -3,6 +3,6 @@ dependencies:
- python=3.6.2
- pip:
- azureml-defaults
- scikit-learn
- scikit-learn==0.19.1
- numpy
- inference-schema[numpy-support]

View File

@@ -233,7 +233,8 @@
" 'inference-schema[numpy-support]',\n",
" 'joblib',\n",
" 'numpy',\n",
" 'scikit-learn'\n",
" 'scikit-learn==0.19.1',\n",
" 'scipy'\n",
"])\n",
"inference_config = InferenceConfig(entry_script='score.py', environment=environment)\n",
"# if cpu and memory_in_gb parameters are not provided\n",

View File

@@ -5,7 +5,7 @@
"metadata": {},
"source": [
"# Enabling App Insights for Services in Production\n",
"With this notebook, you can learn how to enable App Insights for standard service monitoring, plus, we provide examples for doing custom logging within a scoring files in a model. \n",
"With this notebook, you can learn how to enable App Insights for standard service monitoring, plus, we provide examples for doing custom logging within a scoring files in a model.\n",
"\n",
"\n",
"## What does Application Insights monitor?\n",
@@ -45,11 +45,13 @@
"metadata": {},
"outputs": [],
"source": [
"import azureml.core\n",
"import json\n",
"\n",
"from azureml.core import Workspace\n",
"from azureml.core.compute import AksCompute, ComputeTarget\n",
"from azureml.core.webservice import AksWebservice\n",
"import azureml.core\n",
"import json\n",
"\n",
"print(azureml.core.VERSION)"
]
},
@@ -67,7 +69,7 @@
"outputs": [],
"source": [
"ws = Workspace.from_config()\n",
"print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\\n')"
"print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep='\\n')"
]
},
{
@@ -84,13 +86,13 @@
"metadata": {},
"outputs": [],
"source": [
"#Register the model\n",
"from azureml.core.model import Model\n",
"model = Model.register(model_path = \"sklearn_regression_model.pkl\", # this points to a local file\n",
" model_name = \"sklearn_regression_model.pkl\", # this is the name the model is registered as\n",
" tags = {'area': \"diabetes\", 'type': \"regression\"},\n",
" description = \"Ridge regression model to predict diabetes\",\n",
" workspace = ws)\n",
"from azureml.core import Model\n",
"\n",
"model = Model.register(model_path=\"sklearn_regression_model.pkl\", # This points to a local file.\n",
" model_name=\"sklearn_regression_model.pkl\", # This is the name the model is registered as.\n",
" tags={'area': \"diabetes\", 'type': \"regression\"},\n",
" description=\"Ridge regression model to predict diabetes\",\n",
" workspace=ws)\n",
"\n",
"print(model.name, model.description, model.version)"
]
@@ -120,7 +122,7 @@
"import os\n",
"import pickle\n",
"import json\n",
"import numpy \n",
"import numpy\n",
"from sklearn.externals import joblib\n",
"from sklearn.linear_model import Ridge\n",
"import time\n",
@@ -129,15 +131,15 @@
" global model\n",
" #Print statement for appinsights custom traces:\n",
" print (\"model initialized\" + time.strftime(\"%H:%M:%S\"))\n",
" \n",
"\n",
" # AZUREML_MODEL_DIR is an environment variable created during deployment.\n",
" # It is the path to the model folder (./azureml-models/$MODEL_NAME/$VERSION)\n",
" # For multiple models, it points to the folder containing all deployed models (./azureml-models)\n",
" model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), 'sklearn_regression_model.pkl')\n",
" \n",
"\n",
" # deserialize the model file back into a sklearn model\n",
" model = joblib.load(model_path)\n",
" \n",
"\n",
"\n",
"# note you can pass in multiple rows for scoring\n",
"def run(raw_data):\n",
@@ -168,7 +170,7 @@
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.conda_dependencies import CondaDependencies \n",
"from azureml.core.conda_dependencies import CondaDependencies\n",
"\n",
"myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn'],\n",
" pip_packages=['azureml-defaults'])\n",
@@ -190,9 +192,8 @@
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.model import InferenceConfig\n",
"from azureml.core.environment import Environment\n",
"\n",
"from azureml.core.model import InferenceConfig\n",
"\n",
"myenv = Environment.from_conda_specification(name=\"myenv\", file_path=\"myenv.yml\")\n",
"inference_config = InferenceConfig(entry_script=\"score.py\", environment=myenv)"
@@ -213,11 +214,11 @@
"source": [
"from azureml.core.webservice import AciWebservice\n",
"\n",
"aci_deployment_config = AciWebservice.deploy_configuration(cpu_cores = 1, \n",
" memory_gb = 1, \n",
" tags = {'area': \"diabetes\", 'type': \"regression\"}, \n",
" description = 'Predict diabetes using regression model',\n",
" enable_app_insights = True)"
"aci_deployment_config = AciWebservice.deploy_configuration(cpu_cores=1,\n",
" memory_gb=1,\n",
" tags={'area': \"diabetes\", 'type': \"regression\"},\n",
" description=\"Predict diabetes using regression model\",\n",
" enable_app_insights=True)"
]
},
{
@@ -226,29 +227,14 @@
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.webservice import Webservice\n",
"aci_service_name = \"aci-service-appinsights\"\n",
"\n",
"aci_service = Model.deploy(ws, aci_service_name, [model], inference_config, aci_deployment_config, overwrite=True)\n",
"aci_service.wait_for_deployment(show_output=True)\n",
"\n",
"aci_service_name = 'my-aci-service-4'\n",
"aci_service = Model.deploy(ws, aci_service_name, [model], inference_config, aci_deployment_config)\n",
"aci_service.wait_for_deployment(True)\n",
"print(aci_service.state)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%%time\n",
"\n",
"test_sample = json.dumps({'data': [\n",
" [1,28,13,45,54,6,57,8,8,10], \n",
" [101,9,8,37,6,45,4,3,2,41]\n",
"]})\n",
"test_sample = bytes(test_sample,encoding='utf8')"
]
},
{
"cell_type": "code",
"execution_count": null,
@@ -256,7 +242,15 @@
"outputs": [],
"source": [
"if aci_service.state == \"Healthy\":\n",
" prediction = aci_service.run(input_data=test_sample)\n",
" test_sample = json.dumps({\n",
" \"data\": [\n",
" [1,28,13,45,54,6,57,8,8,10],\n",
" [101,9,8,37,6,45,4,3,2,41]\n",
" ]\n",
" })\n",
"\n",
" prediction = aci_service.run(test_sample)\n",
"\n",
" print(prediction)\n",
"else:\n",
" raise ValueError(\"Service deployment isn't healthy, can't call the service. Error: \", aci_service.error)"
@@ -282,14 +276,21 @@
"metadata": {},
"outputs": [],
"source": [
"# Use the default configuration (can also provide parameters to customize)\n",
"prov_config = AksCompute.provisioning_configuration()\n",
"from azureml.exceptions import ComputeTargetException\n",
"\n",
"aks_name = 'my-aks-test3' \n",
"# Create the cluster\n",
"aks_target = ComputeTarget.create(workspace = ws, \n",
" name = aks_name, \n",
" provisioning_configuration = prov_config)"
"aks_name = \"my-aks\"\n",
"\n",
"try:\n",
" aks_target = ComputeTarget(ws, aks_name)\n",
" print(\"Using existing AKS cluster {}.\".format(aks_name))\n",
"except ComputeTargetException:\n",
" print(\"Creating a new AKS cluster {}.\".format(aks_name))\n",
"\n",
" # Use the default configuration (can also provide parameters to customize).\n",
" prov_config = AksCompute.provisioning_configuration()\n",
" aks_target = ComputeTarget.create(workspace=ws,\n",
" name=aks_name,\n",
" provisioning_configuration=prov_config)"
]
},
{
@@ -299,7 +300,8 @@
"outputs": [],
"source": [
"%%time\n",
"aks_target.wait_for_completion(show_output = True)"
"if aks_target.provisioning_state != \"Succeeded\":\n",
" aks_target.wait_for_completion(show_output=True)"
]
},
{
@@ -323,13 +325,13 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"```python \n",
"```python\n",
"%%time\n",
"resource_id = '/subscriptions/<subscriptionid>/resourcegroups/<resourcegroupname>/providers/Microsoft.ContainerService/managedClusters/<aksservername>'\n",
"create_name= 'myaks4'\n",
"attach_config = AksCompute.attach_configuration(resource_id=resource_id)\n",
"aks_target = ComputeTarget.attach(workspace = ws, \n",
" name = create_name, \n",
"aks_target = ComputeTarget.attach(workspace=ws,\n",
" name=create_name,\n",
" attach_configuration=attach_config)\n",
"## Wait for the operation to complete\n",
"aks_target.wait_for_provisioning(True)```"
@@ -349,7 +351,7 @@
"metadata": {},
"outputs": [],
"source": [
"#Set the web service configuration\n",
"# Set the web service configuration.\n",
"aks_deployment_config = AksWebservice.deploy_configuration(enable_app_insights=True)"
]
},
@@ -366,15 +368,16 @@
"metadata": {},
"outputs": [],
"source": [
"if aks_target.provisioning_state== \"Succeeded\": \n",
" aks_service_name ='aks-w-dc5'\n",
"if aks_target.provisioning_state == \"Succeeded\":\n",
" aks_service_name = \"aks-service-appinsights\"\n",
" aks_service = Model.deploy(ws,\n",
" aks_service_name, \n",
" [model], \n",
" inference_config, \n",
" aks_deployment_config, \n",
" deployment_target = aks_target) \n",
" aks_service.wait_for_deployment(show_output = True)\n",
" aks_service_name,\n",
" [model],\n",
" inference_config,\n",
" aks_deployment_config,\n",
" deployment_target=aks_target,\n",
" overwrite=True)\n",
" aks_service.wait_for_deployment(show_output=True)\n",
" print(aks_service.state)\n",
"else:\n",
" raise ValueError(\"AKS provisioning failed. Error: \", aks_service.error)"
@@ -395,13 +398,14 @@
"source": [
"%%time\n",
"\n",
"test_sample = json.dumps({'data': [\n",
" [1,28,13,45,54,6,57,8,8,10], \n",
" [101,9,8,37,6,45,4,3,2,41]\n",
"]})\n",
"test_sample = bytes(test_sample,encoding='utf8')\n",
"\n",
"if aks_service.state == \"Healthy\":\n",
" test_sample = json.dumps({\n",
" \"data\": [\n",
" [1,28,13,45,54,6,57,8,8,10],\n",
" [101,9,8,37,6,45,4,3,2,41]\n",
" ]\n",
" })\n",
"\n",
" prediction = aks_service.run(input_data=test_sample)\n",
" print(prediction)\n",
"else:\n",
@@ -435,7 +439,7 @@
"outputs": [],
"source": [
"aks_service.update(enable_app_insights=False)\n",
"aks_service.wait_for_deployment(show_output = True)"
"aks_service.wait_for_deployment(show_output=True)"
]
},
{

View File

@@ -115,6 +115,11 @@
"# Convert from CoreML into ONNX\n",
"onnx_model = onnxmltools.convert_coreml(coreml_model, 'TinyYOLOv2')\n",
"\n",
"# Fix the preprocessor bias in the ImageScaler\n",
"for init in onnx_model.graph.initializer:\n",
" if init.name == 'scalerPreprocessor_bias':\n",
" init.dims[1] = 1\n",
"\n",
"# Save ONNX model\n",
"onnxmltools.utils.save_model(onnx_model, 'tinyyolov2.onnx')\n",
"\n",
@@ -255,7 +260,7 @@
"source": [
"from azureml.core.conda_dependencies import CondaDependencies \n",
"\n",
"myenv = CondaDependencies.create(pip_packages=[\"numpy\", \"onnxruntime==0.4.0\", \"azureml-core\", \"azureml-defaults\"])\n",
"myenv = CondaDependencies.create(pip_packages=[\"numpy\", \"onnxruntime\", \"azureml-core\", \"azureml-defaults\"])\n",
"\n",
"with open(\"myenv.yml\",\"w\") as f:\n",
" f.write(myenv.serialize_to_string())"
@@ -316,7 +321,7 @@
"metadata": {},
"outputs": [],
"source": [
"aci_service_name = 'my-aci-service-15ad'\n",
"aci_service_name = 'my-aci-service-tiny-yolo'\n",
"print(\"Service\", aci_service_name)\n",
"aci_service = Model.deploy(ws, aci_service_name, [model], inference_config, aciconfig)\n",
"aci_service.wait_for_deployment(True)\n",

View File

@@ -4,4 +4,5 @@ dependencies:
- azureml-sdk
- numpy
- git+https://github.com/apple/coremltools@v2.1
- onnx<1.7.0
- onnxmltools

View File

@@ -5,5 +5,5 @@ dependencies:
- azureml-widgets
- matplotlib
- numpy
- onnx
- onnx<1.7.0
- opencv-python-headless

View File

@@ -5,5 +5,5 @@ dependencies:
- azureml-widgets
- matplotlib
- numpy
- onnx
- onnx<1.7.0
- opencv-python-headless

File diff suppressed because one or more lines are too long

View File

@@ -1,260 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/deployment/tensorflow/tensorflow-model-register-and-deploy.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Register TensorFlow SavedModel and deploy as webservice\n",
"\n",
"Following this notebook, you will:\n",
"\n",
" - Learn how to register a TF SavedModel in your Azure Machine Learning Workspace.\n",
" - Deploy your model as a web service in an Azure Container Instance."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Prerequisites\n",
"\n",
"If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure you go through the [configuration notebook](../../../configuration.ipynb) to install the Azure Machine Learning Python SDK and create a workspace."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import azureml.core\n",
"\n",
"# Check core SDK version number.\n",
"print('SDK version:', azureml.core.VERSION)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Initialize workspace\n",
"\n",
"Create a [Workspace](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.workspace%28class%29?view=azure-ml-py) object from your persisted configuration."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": [
"create workspace"
]
},
"outputs": [],
"source": [
"from azureml.core import Workspace\n",
"\n",
"ws = Workspace.from_config()\n",
"print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep='\\n')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Download the Model\n",
"\n",
"Download and extract the model from https://amlsamplenotebooksdata.blob.core.windows.net/data/flowers_model.tar.gz to \"models\" directory"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import tarfile\n",
"import urllib.request\n",
"\n",
"# create directory for model\n",
"model_dir = 'models'\n",
"if not os.path.isdir(model_dir):\n",
" os.mkdir(model_dir)\n",
"\n",
"url=\"https://amlsamplenotebooksdata.blob.core.windows.net/data/flowers_model.tar.gz\"\n",
"response = urllib.request.urlretrieve(url, model_dir + \"/flowers_model.tar.gz\")\n",
"tar = tarfile.open(model_dir + \"/flowers_model.tar.gz\", \"r:gz\")\n",
"tar.extractall(model_dir)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Register model\n",
"\n",
"Register a file or folder as a model by calling [Model.register()](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.model.model?view=azure-ml-py#register-workspace--model-path--model-name--tags-none--properties-none--description-none--datasets-none--model-framework-none--model-framework-version-none--child-paths-none-). For this example, we have provided a TensorFlow SavedModel (`flowers_model` in the notebook's directory).\n",
"\n",
"In addition to the content of the model file itself, your registered model will also store model metadata -- model description, tags, and framework information -- that will be useful when managing and deploying models in your workspace. Using tags, for instance, you can categorize your models and apply filters when listing models in your workspace. Also, marking this model with the scikit-learn framework will simplify deploying it as a web service, as we'll see later."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": [
"register model from file"
]
},
"outputs": [],
"source": [
"from azureml.core import Model\n",
"\n",
"model = Model.register(workspace=ws,\n",
" model_name='flowers', # Name of the registered model in your workspace.\n",
" model_path= model_dir + '/flowers_model', # Local Tensorflow SavedModel folder to upload and register as a model.\n",
" model_framework=Model.Framework.TENSORFLOW, # Framework used to create the model.\n",
" model_framework_version='1.14.0', # Version of Tensorflow used to create the model.\n",
" description='Flowers model')\n",
"\n",
"print('Name:', model.name)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Deploy model\n",
"\n",
"Deploy your model as a web service using [Model.deploy()](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.model.model?view=azure-ml-py#deploy-workspace--name--models--inference-config--deployment-config-none--deployment-target-none-). Web services take one or more models, load them in an environment, and run them on one of several supported deployment targets.\n",
"\n",
"For this example, we will deploy your TensorFlow SavedModel to an Azure Container Instance (ACI)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Use a default environment (for supported models)\n",
"\n",
"The Azure Machine Learning service provides a default environment for supported model frameworks, including TensorFlow, based on the metadata you provided when registering your model. This is the easiest way to deploy your model.\n",
"\n",
"**Note**: This step can take several minutes."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Webservice\n",
"from azureml.exceptions import WebserviceException\n",
"\n",
"service_name = 'tensorflow-flower-service'\n",
"\n",
"# Remove any existing service under the same name.\n",
"try:\n",
" Webservice(ws, service_name).delete()\n",
"except WebserviceException:\n",
" pass\n",
"\n",
"service = Model.deploy(ws, service_name, [model])\n",
"service.wait_for_deployment(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"After your model is deployed, perform a call to the web service."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import requests\n",
"\n",
"headers = {'Content-Type': 'application/json'}\n",
"\n",
"if service.auth_enabled:\n",
" headers['Authorization'] = 'Bearer '+ service.get_keys()[0]\n",
"elif service.token_auth_enabled:\n",
" headers['Authorization'] = 'Bearer '+ service.get_token()[0]\n",
"\n",
"scoring_uri = service.scoring_uri # If you have a SavedModel with classify and regress, \n",
" # you can change the scoring_uri from 'uri:predict' to 'uri:classify' or 'uri:regress'.\n",
"print(scoring_uri)\n",
"\n",
"with open('tensorflow-flower-predict-input.json', 'rb') as data_file:\n",
" response = requests.post(\n",
" scoring_uri, data=data_file, headers=headers)\n",
"print(response.status_code)\n",
"print(response.elapsed)\n",
"print(response.json())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"When you are finished testing your service, clean up the deployment."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"service.delete()"
]
}
],
"metadata": {
"authors": [
{
"name": "vaidyas"
}
],
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.0"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -1,4 +0,0 @@
name: tensorflow-model-register-and-deploy
dependencies:
- pip:
- azureml-sdk

View File

@@ -58,7 +58,7 @@
"\n",
"Problem: Boston Housing Price Prediction with scikit-learn (train a model and run an explainer remotely via AMLCompute, and download and visualize the remotely-calculated explanations.)\n",
"\n",
"| ![explanations-run-history](./img/explanations-run-history.PNG) |\n",
"| ![explanations-run-history](./img/explanations-run-history.png) |\n",
"|:--:|\n"
]
},
@@ -672,7 +672,7 @@
"source": [
"# retrieve model for visualization and deployment\n",
"from azureml.core.model import Model\n",
"from sklearn.externals import joblib\n",
"import joblib\n",
"original_model = Model(ws, 'model_explain_model_on_amlcomp')\n",
"model_path = original_model.download(exist_ok=True)\n",
"original_model = joblib.load(model_path)"
@@ -692,7 +692,7 @@
"outputs": [],
"source": [
"# retrieve x_test for visualization\n",
"from sklearn.externals import joblib\n",
"import joblib\n",
"x_test_path = './x_test_boston_housing.pkl'\n",
"run.download_file('x_test_boston_housing.pkl', output_file_path=x_test_path)"
]

View File

@@ -7,7 +7,7 @@ from interpret.ext.blackbox import TabularExplainer
from azureml.contrib.interpret.explanation.explanation_client import ExplanationClient
from sklearn.model_selection import train_test_split
from azureml.core.run import Run
from sklearn.externals import joblib
import joblib
import os
import numpy as np

View File

@@ -3,7 +3,7 @@ import numpy as np
import pandas as pd
import os
import pickle
from sklearn.externals import joblib
import joblib
from sklearn.linear_model import LogisticRegression
from azureml.core.model import Model

View File

@@ -1,33 +0,0 @@
import json
import pandas as pd
from sklearn.externals import joblib
from azureml.core.model import Model
import tensorflow as tf
def init():
global preprocess
global network
global scoring_explainer
# Retrieve the path to the model file using the model name
# Assume original model is named original_prediction_model
featurize_path = Model.get_model_path('featurize')
keras_model_path = Model.get_model_path('keras_model')
scoring_explainer_path = Model.get_model_path('IBM_attrition_explainer')
preprocess = joblib.load(featurize_path)
network = tf.keras.models.load_model(keras_model_path)
scoring_explainer = joblib.load(scoring_explainer_path)
def run(raw_data):
# Get predictions and explanations for each data point
data = pd.read_json(raw_data)
preprocessed_data = preprocess.transform(data)
# Make prediction
predictions = network.predict(preprocessed_data)
# Retrieve model explanations
local_importance_values = scoring_explainer.explain(data)
# You can return any data type as long as it is JSON-serializable
return {'predictions': predictions.tolist(), 'local_importance_values': local_importance_values}

View File

@@ -3,7 +3,7 @@ import numpy as np
import pandas as pd
import os
import pickle
from sklearn.externals import joblib
import joblib
from sklearn.linear_model import LogisticRegression
from azureml.core.model import Model

View File

@@ -1,612 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-locally-and-deploy.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Train and explain keras model locally and deploy model with scoring explainer\n",
"\n",
"\n",
"_**This notebook illustrates how to use the Azure Machine Learning Interpretability SDK to deploy a locally-trained keras model and its corresponding deep scoring explainer to Azure Container Instances (ACI) as a web service.**_\n",
"\n",
"\n",
"\n",
"\n",
"\n",
"Problem: IBM employee attrition classification with keras (train and explain a model locally and use Azure Container Instances (ACI) for deploying your model and its corresponding deep scoring explainer as a web service.)\n",
"\n",
"---\n",
"\n",
"## Table of Contents\n",
"\n",
"1. [Introduction](#Introduction)\n",
"1. [Setup](#Setup)\n",
"1. [Run model explainer locally at training time](#Explain)\n",
" 1. Apply feature transformations\n",
" 1. Train a binary classification keras model\n",
" 1. Explain the model on raw features\n",
" 1. Generate global explanations\n",
" 1. Generate local explanations\n",
"1. [Visualize explanations](#Visualize)\n",
"1. [Deploy keras model and scoring explainer](#Deploy)\n",
"1. [Next steps](#Next)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Introduction\n",
"\n",
"\n",
"This notebook showcases how to train and explain a keras classification model locally, and deploy the trained model and its corresponding DeepExplainer to Azure Container Instances (ACI).\n",
"It demonstrates the API calls that you need to make to submit a run for training and explaining a keras model to AMLCompute, download the compute explanations remotely, and visualizing the global and local explanations via a visualization dashboard that provides an interactive way of discovering patterns in model predictions and downloaded explanations. It also demonstrates how to use Azure Machine Learning MLOps capabilities to deploy your keras model and its corresponding DeepExplainer.\n",
"\n",
"We will showcase one of the tabular data explainers, DeepExplainer (SHAP), following these steps:\n",
"1.\tDevelop a machine learning script in Python which involves the training script and the explanation script.\n",
"2.\tRun the script locally.\n",
"3.\tUse the interpretability toolkit\u00e2\u20ac\u2122s visualization dashboard to visualize predictions and their explanation. If the metrics and explanations don't indicate a desired outcome, loop back to step 1 and iterate on your scripts.\n",
"5.\tAfter a satisfactory run is found, create a Deep Scoring Explainer and register the persisted model and its corresponding DeepExplainer in the model registry.\n",
"6.\tDevelop a scoring script.\n",
"7.\tCreate an image and register it in the image registry.\n",
"8.\tDeploy the image as a web service in Azure.\n",
"\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setup\n",
"Make sure you go through the [configuration notebook](../../../../configuration.ipynb) first if you haven't."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Check core SDK version number\n",
"import azureml.core\n",
"\n",
"print(\"SDK version:\", azureml.core.VERSION)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Initialize a Workspace\n",
"\n",
"Initialize a workspace object from persisted configuration"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": [
"create workspace"
]
},
"outputs": [],
"source": [
"from azureml.core import Workspace\n",
"\n",
"ws = Workspace.from_config()\n",
"print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep='\\n')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Explain\n",
"Create An Experiment: **Experiment** is a logical container in an Azure ML Workspace. It hosts run records which can include run metrics and output artifacts from your experiments."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Experiment\n",
"experiment_name = 'explain_model_at_scoring_time'\n",
"experiment = Experiment(workspace=ws, name=experiment_name)\n",
"run = experiment.start_logging()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# get IBM attrition data\n",
"import os\n",
"import pandas as pd\n",
"\n",
"outdirname = 'dataset.6.21.19'\n",
"try:\n",
" from urllib import urlretrieve\n",
"except ImportError:\n",
" from urllib.request import urlretrieve\n",
"import zipfile\n",
"zipfilename = outdirname + '.zip'\n",
"urlretrieve('https://publictestdatasets.blob.core.windows.net/data/' + zipfilename, zipfilename)\n",
"with zipfile.ZipFile(zipfilename, 'r') as unzip:\n",
" unzip.extractall('.')\n",
"attritionData = pd.read_csv('./WA_Fn-UseC_-HR-Employee-Attrition.csv')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from sklearn.model_selection import train_test_split\n",
"from sklearn.externals import joblib\n",
"from sklearn.preprocessing import StandardScaler, OneHotEncoder\n",
"from sklearn.impute import SimpleImputer\n",
"from sklearn.pipeline import Pipeline\n",
"from sklearn_pandas import DataFrameMapper\n",
"\n",
"os.makedirs('./outputs', exist_ok=True)\n",
"\n",
"# Dropping Employee count as all values are 1 and hence attrition is independent of this feature\n",
"attritionData = attritionData.drop(['EmployeeCount'], axis=1)\n",
"# Dropping Employee Number since it is merely an identifier\n",
"attritionData = attritionData.drop(['EmployeeNumber'], axis=1)\n",
"attritionData = attritionData.drop(['Over18'], axis=1)\n",
"# Since all values are 80\n",
"attritionData = attritionData.drop(['StandardHours'], axis=1)\n",
"\n",
"# Converting target variables from string to numerical values\n",
"target_map = {'Yes': 1, 'No': 0}\n",
"attritionData[\"Attrition_numerical\"] = attritionData[\"Attrition\"].apply(lambda x: target_map[x])\n",
"target = attritionData[\"Attrition_numerical\"]\n",
"\n",
"attritionXData = attritionData.drop(['Attrition_numerical', 'Attrition'], axis=1)\n",
"\n",
"# Creating dummy columns for each categorical feature\n",
"categorical = []\n",
"for col, value in attritionXData.iteritems():\n",
" if value.dtype == 'object':\n",
" categorical.append(col)\n",
"\n",
"# Store the numerical columns in a list numerical\n",
"numerical = attritionXData.columns.difference(categorical)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from sklearn.compose import ColumnTransformer\n",
"\n",
"# We create the preprocessing pipelines for both numeric and categorical data.\n",
"numeric_transformer = Pipeline(steps=[\n",
" ('imputer', SimpleImputer(strategy='median')),\n",
" ('scaler', StandardScaler())])\n",
"\n",
"categorical_transformer = Pipeline(steps=[\n",
" ('imputer', SimpleImputer(strategy='constant', fill_value='missing')),\n",
" ('onehot', OneHotEncoder(handle_unknown='ignore'))])\n",
"\n",
"preprocess = ColumnTransformer(\n",
" transformers=[\n",
" ('num', numeric_transformer, numerical),\n",
" ('cat', categorical_transformer, categorical)])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from sklearn.pipeline import make_pipeline\n",
"pipeline = make_pipeline(preprocess)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from sklearn.model_selection import train_test_split\n",
"\n",
"X_train, X_test, y_train, y_test = train_test_split(attritionXData, \n",
" target, \n",
" test_size=0.2,\n",
" random_state=0,\n",
" stratify=target)\n",
"\n",
"X_train_t = pipeline.fit_transform(X_train)\n",
"X_test_t = pipeline.transform(X_test)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# check tensorflow version\n",
"import tensorflow as tf\n",
"from distutils.version import StrictVersion\n",
"\n",
"print(tf.__version__)\n",
"# Append classifier to preprocessing pipeline.\n",
"# Now we have a full prediction pipeline.\n",
"\n",
"\n",
"network = tf.keras.models.Sequential()\n",
"network.add(tf.keras.layers.Dense(units=16, activation='relu', input_shape=(X_train_t.shape[1],)))\n",
"network.add(tf.keras.layers.Dense(units=16, activation='relu'))\n",
"network.add(tf.keras.layers.Dense(units=1, activation='sigmoid'))\n",
"\n",
"# Compile neural network\n",
"network.compile(loss='binary_crossentropy', # Cross-entropy\n",
" optimizer='rmsprop', # Root Mean Square Propagation\n",
" metrics=['accuracy']) # Accuracy performance metric\n",
"\n",
"# Train neural network\n",
"history = network.fit(X_train_t, # Features\n",
" y_train, # Target vector\n",
" epochs=20, # Number of epochs\n",
" verbose=1, # Print description after each epoch\n",
" batch_size=100, # Number of observations per batch\n",
" validation_data=(X_test_t, y_test)) # Data for evaluation"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# You can run the DeepExplainer directly, or run the TabularExplainer which will choose the most appropriate explainer\n",
"from interpret.ext.greybox import DeepExplainer\n",
"explainer = DeepExplainer(network,\n",
" X_train,\n",
" features=X_train.columns,\n",
" classes=[\"STAYING\", \"LEAVING\"], \n",
" transformations=preprocess,\n",
" model_task=\"classification\",\n",
" is_classifier=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Save featurization prior to keras model in the outputs folder so it automatically get uploaded\n",
"# We cannot save Keras with the pipeline due to known issues with pickling Keras models\n",
"featurize_file_name = 'featurize.pkl'\n",
"\n",
"with open(featurize_file_name, 'wb') as file:\n",
" joblib.dump(value=preprocess, filename=os.path.join('./outputs/', featurize_file_name))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Save keras model to disk\n",
"keras_model_file_name = 'keras_model.pkl'\n",
"network.save(os.path.join('./outputs/', keras_model_file_name))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Explain overall model predictions (global explanation)\n",
"# Passing in test dataset for evaluation examples - note it must be a representative sample of the original data\n",
"# x_train can be passed as well, but with more examples explanations it will\n",
"# take longer although they may be more accurate\n",
"global_explanation = explainer.explain_global(X_test)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.interpret.scoring.scoring_explainer import DeepScoringExplainer, save\n",
"from azureml.interpret.model.serialize import KerasSerializer\n",
"# ScoringExplainer with custom keras serializer\n",
"scoring_explainer = DeepScoringExplainer(explainer, serializer=KerasSerializer())\n",
"# Pickle scoring explainer locally\n",
"save(scoring_explainer, exist_ok=True)\n",
"\n",
"# Register featurization\n",
"run.upload_file(featurize_file_name, os.path.join('./outputs/', featurize_file_name))\n",
"featurize_model = run.register_model(model_name='featurize',\n",
" model_path=featurize_file_name)\n",
"\n",
"# Register keras model\n",
"run.upload_file(keras_model_file_name, os.path.join('./outputs/', keras_model_file_name))\n",
"keras_model = run.register_model(model_name='keras_model',\n",
" model_path=keras_model_file_name)\n",
"\n",
"# Register scoring explainer\n",
"run.upload_file('IBM_attrition_explainer.pkl', 'scoring_explainer.pkl')\n",
"scoring_explainer_model = run.register_model(model_name='IBM_attrition_explainer', model_path='IBM_attrition_explainer.pkl')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Use helper utility to wrap keras model in scikit-learn style API for visualization dashboard\n",
"from interpret_community.common.model_wrapper import wrap_model\n",
"from interpret_community.dataset.dataset_wrapper import DatasetWrapper\n",
"wrapped_model, ml_domain = wrap_model(network, DatasetWrapper(X_test_t), \"classification\")\n",
"wrapped_model.fit = network.fit\n",
"from sklearn.pipeline import Pipeline\n",
"dashboard_pipeline = Pipeline(steps=[('preprocess', preprocess), ('network', wrapped_model)])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Visualize\n",
"Visualize the explanations"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from interpret_community.widget import ExplanationDashboard"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ExplanationDashboard(global_explanation, dashboard_pipeline, datasetX=X_test)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Deploy \n",
"\n",
"Deploy Model and ScoringExplainer.\n",
"\n",
"Please note that you must indicate azureml-defaults with verion >= 1.0.45 as a pip dependency, because it contains the functionality needed to host the model as a web service."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.conda_dependencies import CondaDependencies \n",
"\n",
"# azureml-defaults is required to host the model as a web service.\n",
"azureml_pip_packages = [\n",
" 'azureml-defaults', 'azureml-contrib-interpret', 'azureml-core', 'azureml-telemetry',\n",
" 'azureml-interpret'\n",
"]\n",
"# Note: this is to pin the scikit-learn and pandas versions to be same as notebook.\n",
"# In production scenario user would choose their dependencies\n",
"import pkg_resources\n",
"available_packages = pkg_resources.working_set\n",
"sklearn_ver = None\n",
"pandas_ver = None\n",
"for dist in available_packages:\n",
" if dist.key == 'scikit-learn':\n",
" sklearn_ver = dist.version\n",
" elif dist.key == 'pandas':\n",
" pandas_ver = dist.version\n",
"sklearn_dep = 'scikit-learn'\n",
"pandas_dep = 'pandas'\n",
"if sklearn_ver:\n",
" sklearn_dep = 'scikit-learn=={}'.format(sklearn_ver)\n",
"if pandas_ver:\n",
" pandas_dep = 'pandas=={}'.format(pandas_ver)\n",
"# specify CondaDependencies obj\n",
"myenv = CondaDependencies.create(conda_packages=[sklearn_dep, pandas_dep],\n",
" pip_packages=['sklearn-pandas', 'pyyaml', 'tensorflow<2.0', 'keras==2.3.1'] + azureml_pip_packages)\n",
"\n",
"with open(\"myenv.yml\",\"w\") as f:\n",
" f.write(myenv.serialize_to_string())\n",
"\n",
"with open(\"myenv.yml\",\"r\") as f:\n",
" print(f.read())"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.model import Model\n",
"# retrieve scoring explainer for deployment\n",
"scoring_explainer_model = Model(ws, 'IBM_attrition_explainer')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.webservice import Webservice\n",
"from azureml.core.model import InferenceConfig\n",
"from azureml.core.webservice import AciWebservice\n",
"from azureml.core.model import Model\n",
"from azureml.core.environment import Environment\n",
"\n",
"\n",
"aciconfig = AciWebservice.deploy_configuration(cpu_cores=1,\n",
" memory_gb=1,\n",
" tags={\"data\": \"IBM_Attrition\",\n",
" \"method\" : \"local_explanation\"},\n",
" description='Get local explanations for IBM Employee Attrition data')\n",
"\n",
"myenv = Environment.from_conda_specification(name=\"myenv\", file_path=\"myenv.yml\")\n",
"inference_config = InferenceConfig(entry_script=\"score_local_explain_keras.py\", environment=myenv)\n",
"\n",
"# Use configs and models generated above\n",
"service = Model.deploy(ws, 'model-scoring-keras-deploy-local', [scoring_explainer_model, featurize_model, keras_model], inference_config, aciconfig)\n",
"service.wait_for_deployment(show_output=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(service.get_logs())"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import requests\n",
"import json\n",
"\n",
"# Create data to test service with\n",
"sample_data = '{\"Age\":{\"899\":49},\"BusinessTravel\":{\"899\":\"Travel_Rarely\"},\"DailyRate\":{\"899\":1098},\"Department\":{\"899\":\"Research & Development\"},\"DistanceFromHome\":{\"899\":4},\"Education\":{\"899\":2},\"EducationField\":{\"899\":\"Medical\"},\"EnvironmentSatisfaction\":{\"899\":1},\"Gender\":{\"899\":\"Male\"},\"HourlyRate\":{\"899\":85},\"JobInvolvement\":{\"899\":2},\"JobLevel\":{\"899\":5},\"JobRole\":{\"899\":\"Manager\"},\"JobSatisfaction\":{\"899\":3},\"MaritalStatus\":{\"899\":\"Married\"},\"MonthlyIncome\":{\"899\":18711},\"MonthlyRate\":{\"899\":12124},\"NumCompaniesWorked\":{\"899\":2},\"OverTime\":{\"899\":\"No\"},\"PercentSalaryHike\":{\"899\":13},\"PerformanceRating\":{\"899\":3},\"RelationshipSatisfaction\":{\"899\":3},\"StockOptionLevel\":{\"899\":1},\"TotalWorkingYears\":{\"899\":23},\"TrainingTimesLastYear\":{\"899\":2},\"WorkLifeBalance\":{\"899\":4},\"YearsAtCompany\":{\"899\":1},\"YearsInCurrentRole\":{\"899\":0},\"YearsSinceLastPromotion\":{\"899\":0},\"YearsWithCurrManager\":{\"899\":0}}'\n",
"\n",
"headers = {'Content-Type':'application/json'}\n",
"\n",
"# send request to service\n",
"resp = requests.post(service.scoring_uri, sample_data, headers=headers)\n",
"\n",
"print(\"POST to url\", service.scoring_uri)\n",
"# can covert back to Python objects from json string if desired\n",
"print(\"prediction:\", resp.text)\n",
"result = json.loads(resp.text)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#plot the feature importance for the prediction\n",
"import numpy as np\n",
"import matplotlib.pyplot as plt; plt.rcdefaults()\n",
"\n",
"labels = json.loads(sample_data)\n",
"labels = labels.keys()\n",
"objects = labels\n",
"y_pos = np.arange(len(objects))\n",
"performance = result[\"local_importance_values\"][0][0]\n",
"\n",
"plt.bar(y_pos, performance, align='center', alpha=0.5)\n",
"plt.xticks(y_pos, objects)\n",
"locs, labels = plt.xticks()\n",
"plt.setp(labels, rotation=90)\n",
"plt.ylabel('Feature impact - leaving vs not leaving')\n",
"plt.title('Local feature importance for prediction')\n",
"\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"service.delete()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Next\n",
"Learn about other use cases of the explain package on a:\n",
"1. [Training time: regression problem](https://github.com/interpretml/interpret-community/blob/master/notebooks/explain-regression-local.ipynb) \n",
"1. [Training time: binary classification problem](https://github.com/interpretml/interpret-community/blob/master/notebooks/explain-binary-classification-local.ipynb)\n",
"1. [Training time: multiclass classification problem](https://github.com/interpretml/interpret-community/blob/master/notebooks/explain-multiclass-classification-local.ipynb)\n",
"1. Explain models with engineered features:\n",
" 1. [Simple feature transformations](https://github.com/interpretml/interpret-community/blob/master/notebooks/simple-feature-transformations-explain-local.ipynb)\n",
" 1. [Advanced feature transformations](https://github.com/interpretml/interpret-community/blob/master/notebooks/advanced-feature-transformations-explain-local.ipynb)\n",
"1. [Save model explanations via Azure Machine Learning Run History](../run-history/save-retrieve-explanations-run-history.ipynb)\n",
"1. [Run explainers remotely on Azure Machine Learning Compute (AMLCompute)](../remote-explanation/explain-model-on-amlcompute.ipynb)\n",
"1. [Inferencing time: deploy a remotely-trained model and explainer](./train-explain-model-on-amlcompute-and-deploy.ipynb)\n",
"1. [Inferencing time: deploy a locally-trained model and explainer](./train-explain-model-locally-and-deploy.ipynb)"
]
}
],
"metadata": {
"authors": [
{
"name": "mesameki"
}
],
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.8"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -1,12 +0,0 @@
name: train-explain-model-keras-locally-and-deploy
dependencies:
- pip:
- azureml-sdk
- azureml-interpret
- interpret-community[visualization]
- matplotlib
- azureml-contrib-interpret
- sklearn-pandas
- ipywidgets
- tensorflow<2.0
- keras

View File

@@ -165,7 +165,7 @@
"outputs": [],
"source": [
"from sklearn.model_selection import train_test_split\n",
"from sklearn.externals import joblib\n",
"import joblib\n",
"from sklearn.preprocessing import StandardScaler, OneHotEncoder\n",
"from sklearn.impute import SimpleImputer\n",
"from sklearn.pipeline import Pipeline\n",

View File

@@ -63,7 +63,7 @@
"7.\tCreate an image and register it in the image registry.\n",
"8.\tDeploy the image as a web service in Azure.\n",
"\n",
"| ![azure-machine-learning-cycle](./img/azure-machine-learning-cycle.PNG) |\n",
"| ![azure-machine-learning-cycle](./img/azure-machine-learning-cycle.png) |\n",
"|:--:|"
]
},
@@ -325,7 +325,7 @@
"source": [
"# retrieve model for visualization and deployment\n",
"from azureml.core.model import Model\n",
"from sklearn.externals import joblib\n",
"import joblib\n",
"original_model = Model(ws, 'amlcompute_deploy_model')\n",
"model_path = original_model.download(exist_ok=True)\n",
"original_svm_model = joblib.load(model_path)"
@@ -352,7 +352,7 @@
"outputs": [],
"source": [
"# retrieve x_test for visualization\n",
"from sklearn.externals import joblib\n",
"import joblib\n",
"x_test_path = './x_test.pkl'\n",
"run.download_file('x_test_ibm.pkl', output_file_path=x_test_path)\n",
"x_test = joblib.load(x_test_path)"

View File

@@ -6,7 +6,7 @@ import os
import pandas as pd
import zipfile
from sklearn.model_selection import train_test_split
from sklearn.externals import joblib
import joblib
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline

View File

@@ -252,7 +252,7 @@
"source": [
"binaries_folder = \"azurebatch/job_binaries\"\n",
"if not os.path.isdir(binaries_folder):\n",
" os.mkdir(binaries_folder)\n",
" os.makedirs(binaries_folder)\n",
"\n",
"file_name=\"azurebatch.cmd\"\n",
"with open(path.join(binaries_folder, file_name), 'w') as f:\n",

View File

@@ -0,0 +1,510 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved. \n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-data-dependency-steps.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Showcasing Dataset and PipelineParameter\n",
"\n",
"This notebook demonstrates how a [**FileDataset**](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.filedataset?view=azure-ml-py) or [**TabularDataset**](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py) can be parametrized with [**PipelineParameters**](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-core/azureml.pipeline.core.pipelineparameter?view=azure-ml-py) in an AML [Pipeline](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-core/azureml.pipeline.core.pipeline(class)?view=azure-ml-py). By parametrizing datasets, you can dynamically run pipeline experiments with different datasets without any code change.\n",
"\n",
"A common use case is building a training pipeline with a sample of your training data for quick iterative development. When you're ready to test and deploy your pipeline at scale, you can pass in your full training dataset to the pipeline experiment without making any changes to your training script. \n",
" \n",
"To see more about how parameters work between steps, please refer [aml-pipelines-with-data-dependency-steps](https://aka.ms/pl-data-dep).\n",
"\n",
"* [How to create a Pipeline with a Dataset PipelineParameter](#index1)\n",
"* [How to submit a Pipeline with a Dataset PipelineParameter](#index2)\n",
"* [How to submit a Pipeline and change the Dataset PipelineParameter value from the sdk](#index3)\n",
"* [How to submit a Pipeline and change the Dataset PipelineParameter value using a REST call](#index4)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Azure Machine Learning and Pipeline SDK-specific imports"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import azureml.core\n",
"from azureml.core import Workspace, Experiment, Dataset\n",
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
"from azureml.data.dataset_consumption_config import DatasetConsumptionConfig\n",
"from azureml.widgets import RunDetails\n",
"\n",
"from azureml.pipeline.core import PipelineParameter\n",
"from azureml.pipeline.core import Pipeline, PipelineRun\n",
"from azureml.pipeline.steps import PythonScriptStep\n",
"\n",
"# Check core SDK version number\n",
"print(\"SDK version:\", azureml.core.VERSION)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Initialize Workspace\n",
"\n",
"Initialize a workspace object from persisted configuration. If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure the config file is present at .\\config.json\n",
"\n",
"If you don't have a config.json file, go through the [configuration Notebook](https://aka.ms/pl-config) first.\n",
"\n",
"This sets you up with a working config file that has information on your workspace, subscription id, etc."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ws = Workspace.from_config()\n",
"print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\\n')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Create an Azure ML experiment\n",
"\n",
"Let's create an experiment named \"showcasing-dataset\" and a folder to hold the training scripts. The script runs will be recorded under the experiment in Azure."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Choose a name for the run history container in the workspace.\n",
"experiment_name = 'showcasing-dataset'\n",
"source_directory = '.'\n",
"\n",
"experiment = Experiment(ws, experiment_name)\n",
"experiment"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Create or Attach an AmlCompute cluster\n",
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you get the default `AmlCompute` as your training compute resource."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Choose a name for your cluster.\n",
"amlcompute_cluster_name = \"cpu-cluster\"\n",
"\n",
"found = False\n",
"# Check if this compute target already exists in the workspace.\n",
"cts = ws.compute_targets\n",
"if amlcompute_cluster_name in cts and cts[amlcompute_cluster_name].type == 'AmlCompute':\n",
" found = True\n",
" print('Found existing compute target.')\n",
" compute_target = cts[amlcompute_cluster_name]\n",
" \n",
"if not found:\n",
" print('Creating a new compute target...')\n",
" provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_D2_V2\", # for GPU, use \"STANDARD_NC6\"\n",
" #vm_priority = 'lowpriority', # optional\n",
" max_nodes = 4)\n",
"\n",
" # Create the cluster.\n",
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, provisioning_config)\n",
" \n",
" # Can poll for a minimum number of nodes and for a specific timeout.\n",
" # If no min_node_count is provided, it will use the scale settings for the cluster.\n",
" compute_target.wait_for_completion(show_output = True, timeout_in_minutes = 10)\n",
" \n",
" # For a more detailed view of current AmlCompute status, use get_status()."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Dataset Configuration\n",
"\n",
"The following steps detail how to create a [FileDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.filedataset?view=azure-ml-py) and [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py) from an external CSV file, and configure them to be used by a [Pipeline](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-core/azureml.pipeline.core.pipeline(class)?view=azure-ml-py):\n",
"\n",
"1. Create a dataset from a csv file\n",
"2. Create a [PipelineParameter](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-core/azureml.pipeline.core.pipelineparameter?view=azure-ml-py) object and set the `default_value` to the dataset. [PipelineParameter](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-core/azureml.pipeline.core.pipelineparameter?view=azure-ml-py) objects enabled arguments to be passed into Pipelines when they are resubmitted after creation. The `name` is referenced later on when we submit additional pipeline runs with different input datasets. \n",
"3. Create a [DatasetConsumptionConfig](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.dataset_consumption_config.datasetconsumptionconfig?view=azure-ml-py) object from the [PiepelineParameter](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-core/azureml.pipeline.core.pipelineparameter?view=azure-ml-py). The [DatasetConsumptionConfig](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.dataset_consumption_config.datasetconsumptionconfig?view=azure-ml-py) object specifies how the dataset should be used by the remote compute where the pipeline is run. **NOTE** only [DatasetConsumptionConfig](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.dataset_consumption_config.datasetconsumptionconfig?view=azure-ml-py) objects built on [FileDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.filedataset?view=azure-ml-py) can be set `as_mount()` or `as_download()` on the remote compute."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": [
"datapath-remarks-sample"
]
},
"outputs": [],
"source": [
"file_dataset = Dataset.File.from_files('https://dprepdata.blob.core.windows.net/demo/Titanic.csv')\n",
"file_pipeline_param = PipelineParameter(name=\"file_ds_param\", default_value=file_dataset)\n",
"file_ds_consumption = DatasetConsumptionConfig(\"file_dataset\", file_pipeline_param).as_mount()\n",
"\n",
"tabular_dataset = Dataset.Tabular.from_delimited_files('https://dprepdata.blob.core.windows.net/demo/Titanic.csv')\n",
"tabular_pipeline_param = PipelineParameter(name=\"tabular_ds_param\", default_value=tabular_dataset)\n",
"tabular_ds_consumption = DatasetConsumptionConfig(\"tabular_dataset\", tabular_pipeline_param)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We will setup a training script to ingest our passed-in datasets and print their contents. **NOTE** the names of the datasets referenced inside the training script correspond to the `name` of their respective [DatasetConsumptionConfig](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.dataset_consumption_config.datasetconsumptionconfig?view=azure-ml-py) objects we defined above."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%%writefile train_with_dataset.py\n",
"from azureml.core import Run\n",
"\n",
"input_file_ds_path = Run.get_context().input_datasets['file_dataset']\n",
"with open(input_file_ds_path, 'r') as f:\n",
" content = f.read()\n",
" print(content)\n",
"\n",
"input_tabular_ds = Run.get_context().input_datasets['tabular_dataset']\n",
"tabular_df = input_tabular_ds.to_pandas_dataframe()\n",
"print(tabular_df)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id='index1'></a>"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Create a Pipeline with a Dataset PipelineParameter\n",
"\n",
"Note that the ```file_ds_consumption``` and ```tabular_ds_consumption``` are specified as both arguments and inputs to create a step."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"train_step = PythonScriptStep(\n",
" name=\"train_step\",\n",
" script_name=\"train_with_dataset.py\",\n",
" arguments=[\"--param1\", file_ds_consumption, \"--param2\", tabular_ds_consumption],\n",
" inputs=[file_ds_consumption, tabular_ds_consumption],\n",
" compute_target=compute_target,\n",
" source_directory=source_directory)\n",
"\n",
"print(\"train_step created\")\n",
"\n",
"pipeline = Pipeline(workspace=ws, steps=[train_step])\n",
"print(\"pipeline with the train_step created\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id='index2'></a>"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Submit a Pipeline with a Dataset PipelineParameter\n",
"\n",
"Pipelines can be submitted with default values of PipelineParameters by not specifying any parameters."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Pipeline will run with default file_ds and tabular_ds\n",
"pipeline_run = experiment.submit(pipeline)\n",
"print(\"Pipeline is submitted for execution\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"RunDetails(pipeline_run).show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"pipeline_run.wait_for_completion()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id='index3'></a>"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Submit a Pipeline with a different Dataset PipelineParameter value from the SDK\n",
"\n",
"The training pipeline can be reused with different input datasets by passing them in as PipelineParameters"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"iris_file_ds = Dataset.File.from_files('https://raw.githubusercontent.com/Azure/MachineLearningNotebooks/'\n",
" '4e7b3784d50e81c313c62bcdf9a330194153d9cd/how-to-use-azureml/work-with-data/'\n",
" 'datasets-tutorial/train-with-datasets/train-dataset/iris.csv')\n",
"\n",
"iris_tabular_ds = Dataset.Tabular.from_delimited_files('https://raw.githubusercontent.com/Azure/MachineLearningNotebooks/'\n",
" '4e7b3784d50e81c313c62bcdf9a330194153d9cd/how-to-use-azureml/work-with-data/'\n",
" 'datasets-tutorial/train-with-datasets/train-dataset/iris.csv')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"pipeline_run_with_params = experiment.submit(pipeline, pipeline_parameters={'file_ds_param': iris_file_ds, 'tabular_ds_param': iris_tabular_ds}) "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"RunDetails(pipeline_run_with_params).show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"pipeline_run_with_params.wait_for_completion()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id='index4'></a>"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Dynamically Set the Dataset PipelineParameter Values using a REST Call\n",
"\n",
"Let's publish the pipeline we created previously, so we can generate a pipeline endpoint. We can then submit the iris datasets to the pipeline REST endpoint by passing in their IDs. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"published_pipeline = pipeline.publish(name=\"Dataset_Pipeline\", description=\"Pipeline to test Dataset PipelineParameter\", continue_on_step_failure=True)\n",
"published_pipeline"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"published_pipeline.submit(ws, experiment_name=\"publishedexperiment\", pipeline_parameters={'file_ds_param': iris_file_ds, 'tabular_ds_param': iris_tabular_ds})"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.authentication import InteractiveLoginAuthentication\n",
"import requests\n",
"\n",
"auth = InteractiveLoginAuthentication()\n",
"aad_token = auth.get_authentication_header()\n",
"\n",
"rest_endpoint = published_pipeline.endpoint\n",
"\n",
"print(\"You can perform HTTP POST on URL {} to trigger this pipeline\".format(rest_endpoint))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# specify the param when running the pipeline\n",
"response = requests.post(rest_endpoint, \n",
" headers=aad_token, \n",
" json={\"ExperimentName\": \"MyRestPipeline\",\n",
" \"RunSource\": \"SDK\",\n",
" \"DataSetDefinitionValueAssignments\": {\"file_ds_param\": {\"SavedDataSetReference\": {\"Id\": iris_file_ds.id}},\n",
" \"tabular_ds_param\": {\"SavedDataSetReference\": {\"Id\": iris_tabular_ds.id}}}\n",
" }\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"try:\n",
" response.raise_for_status()\n",
"except Exception: \n",
" raise Exception('Received bad response from the endpoint: {}\\n'\n",
" 'Response Code: {}\\n'\n",
" 'Headers: {}\\n'\n",
" 'Content: {}'.format(rest_endpoint, response.status_code, response.headers, response.content))\n",
"\n",
"run_id = response.json().get('Id')\n",
"print('Submitted pipeline run: ', run_id)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"published_pipeline_run_via_rest = PipelineRun(ws.experiments[\"MyRestPipeline\"], run_id)\n",
"RunDetails(published_pipeline_run_via_rest).show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"published_pipeline_run_via_rest.wait_for_completion()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id='index5'></a>"
]
}
],
"metadata": {
"authors": [
{
"name": "rafarmah"
}
],
"category": "tutorial",
"compute": [
"AML Compute"
],
"datasets": [
"Custom"
],
"deployment": [
"None"
],
"exclude_from_index": false,
"framework": [
"Azure ML"
],
"friendly_name": "How to use Dataset as a PipelineParameter",
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.7"
},
"order_index": 13,
"star_tag": [
"featured"
],
"tags": [
"None"
],
"task": "Demonstrates the use of Dataset as a PipelineParameter"
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -0,0 +1,5 @@
name: aml-pipelines-showcasing-dataset-and-pipelineparameter
dependencies:
- pip:
- azureml-sdk
- azureml-widgets

View File

@@ -510,7 +510,7 @@
" inputs=[step_1_input],\n",
" num_workers=1,\n",
" python_script_path=python_script_path,\n",
" python_script_params={'arg1', pipeline_param, 'arg2},\n",
" python_script_params={'arg1', pipeline_param, 'arg2'},\n",
" run_name='DB_Python_demo',\n",
" compute_target=databricks_compute,\n",
" allow_reuse=True\n",

View File

@@ -279,8 +279,7 @@
"# Specify CondaDependencies obj, add necessary packages\n",
"aml_run_config.environment.python.conda_dependencies = CondaDependencies.create(\n",
" conda_packages=['pandas','scikit-learn'], \n",
" pip_packages=['azureml-sdk[automl,explain]', 'pyarrow'], \n",
" pin_sdk_version=False)\n",
" pip_packages=['azureml-sdk[automl,explain]', 'pyarrow'])\n",
"\n",
"print (\"Run configuration created.\")"
]
@@ -692,7 +691,6 @@
" debug_log = 'automated_ml_errors.log',\n",
" path = train_model_folder,\n",
" compute_target = aml_compute,\n",
" run_configuration = aml_run_config,\n",
" featurization = 'auto',\n",
" training_data = training_dataset,\n",
" label_column_name = 'cost',\n",

View File

@@ -2,18 +2,16 @@
Azure Machine Learning Batch Inference targets large inference jobs that are not time-sensitive. Batch Inference provides cost-effective inference compute scaling, with unparalleled throughput for asynchronous applications. It is optimized for high-throughput, fire-and-forget inference over large collections of data.
# Getting Started with Batch Inference Public Preview
# Getting Started with Batch Inference
Batch inference public preview offers a platform in which to do large inference or generic parallel map-style operations. Below introduces the major steps to use this new functionality. For a quick try, please follow the prerequisites and simply run the sample notebooks provided in this directory.
Batch inference offers a platform in which to do large inference or generic parallel map-style operations. Below introduces the major steps to use this new functionality. For a quick try, please follow the prerequisites and simply run the sample notebooks provided in this directory.
## Prerequisites
### Python package installation
Following the convention of most AzureML Public Preview features, Batch Inference SDK is currently available as a contrib package.
If you're unfamiliar with creating a new Python environment, you may follow this example for [creating a conda environment](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-environment#local). Batch Inference package can be installed through the following pip command.
```
pip install azureml-contrib-pipeline-steps
pip install azureml-pipeline-steps
```
### Creation of Azure Machine Learning Workspace
@@ -66,9 +64,8 @@ base_image_registry.password = "password"
## Create a batch inference job
**ParallelRunStep** is a newly added step in the azureml.contrib.pipeline.steps package. You will use it to add a step to create a batch inference job with your Azure machine learning pipeline. (Use batch inference without an Azure machine learning pipeline is not supported yet). ParallelRunStep has all the following parameters:
**ParallelRunStep** is a newly added step in the azureml.pipeline.steps package. You will use it to add a step to create a batch inference job with your Azure machine learning pipeline. (Use batch inference without an Azure machine learning pipeline is not supported yet). ParallelRunStep has all the following parameters:
- **name**: this name will be used to register batch inference service, has the following naming restrictions: (unique, 3-32 chars and regex ^\[a-z\]([-a-z0-9]*[a-z0-9])?$)
- **models**: zero or more model names already registered in Azure Machine Learning model registry.
- **parallel_run_config**: ParallelRunConfig as defined above.
- **inputs**: one or more Dataset objects.
- **output**: this should be a PipelineData object encapsulating an Azure BLOB container path.

View File

@@ -23,11 +23,6 @@
"\n",
"In this notebook, we will demonstrate how to make predictions on large quantities of data asynchronously using the ML pipelines with Azure Machine Learning. Batch inference (or batch scoring) provides cost-effective inference, with unparalleled throughput for asynchronous applications. Batch prediction pipelines can scale to perform inference on terabytes of production data. Batch prediction is optimized for high throughput, fire-and-forget predictions for a large collection of data.\n",
"\n",
"> **Note**\n",
"This notebook uses public preview functionality (ParallelRunStep). Please install azureml-contrib-pipeline-steps package before running this notebook. Pandas is used to display job results.\n",
"```\n",
"pip install azureml-contrib-pipeline-steps pandas\n",
"```\n",
"> **Tip**\n",
"If your system requires low-latency processing (to process a single document or small set of documents quickly), use [real-time scoring](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-consume-web-service) instead of batch prediction.\n",
"\n",
@@ -86,7 +81,6 @@
"source": [
"import os\n",
"from azureml.core.compute import AmlCompute, ComputeTarget\n",
"from azureml.core.compute_target import ComputeTargetException\n",
"\n",
"# choose a name for your cluster\n",
"compute_name = os.environ.get(\"AML_COMPUTE_CLUSTER_NAME\", \"cpu-cluster\")\n",
@@ -184,9 +178,20 @@
"mnist_ds_name = 'mnist_sample_data'\n",
"\n",
"path_on_datastore = mnist_data.path('mnist')\n",
"input_mnist_ds = Dataset.File.from_files(path=path_on_datastore, validate=False)\n",
"registered_mnist_ds = input_mnist_ds.register(ws, mnist_ds_name, create_new_version=True)\n",
"named_mnist_ds = registered_mnist_ds.as_named_input(mnist_ds_name)"
"input_mnist_ds = Dataset.File.from_files(path=path_on_datastore, validate=False)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.data.dataset_consumption_config import DatasetConsumptionConfig\n",
"from azureml.pipeline.core import PipelineParameter\n",
"\n",
"pipeline_param = PipelineParameter(name=\"mnist_param\", default_value=input_mnist_ds)\n",
"input_mnist_ds_consumption = DatasetConsumptionConfig(\"minist_param_config\", pipeline_param).as_mount()"
]
},
{
@@ -306,8 +311,6 @@
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"scripts_folder = \"Code\"\n",
"script_file = \"digit_identification.py\"\n",
"\n",
@@ -341,8 +344,8 @@
"from azureml.core import Environment\n",
"from azureml.core.runconfig import CondaDependencies, DEFAULT_CPU_IMAGE\n",
"\n",
"batch_conda_deps = CondaDependencies.create(pip_packages=[\"tensorflow==1.15.2\", \"pillow\"])\n",
"\n",
"batch_conda_deps = CondaDependencies.create(pip_packages=[\"tensorflow==1.15.2\", \"pillow\", \n",
" \"azureml-core\", \"azureml-dataprep[fuse]\"])\n",
"batch_env = Environment(name=\"batch_environment\")\n",
"batch_env.python.conda_dependencies = batch_conda_deps\n",
"batch_env.docker.enabled = True\n",
@@ -362,17 +365,21 @@
"metadata": {},
"outputs": [],
"source": [
"from azureml.contrib.pipeline.steps import ParallelRunStep, ParallelRunConfig\n",
"from azureml.pipeline.core import PipelineParameter\n",
"from azureml.pipeline.steps import ParallelRunStep, ParallelRunConfig\n",
"\n",
"parallel_run_config = ParallelRunConfig(\n",
" source_directory=scripts_folder,\n",
" entry_script=script_file,\n",
" mini_batch_size=\"5\",\n",
" mini_batch_size=PipelineParameter(name=\"batch_size_param\", default_value=\"5\"),\n",
" error_threshold=10,\n",
" output_action=\"append_row\",\n",
" append_row_file_name=\"mnist_outputs.txt\",\n",
" environment=batch_env,\n",
" compute_target=compute_target,\n",
" node_count=2)"
" process_count_per_node=PipelineParameter(name=\"process_count_param\", default_value=2),\n",
" node_count=2\n",
")"
]
},
{
@@ -392,10 +399,8 @@
"parallelrun_step = ParallelRunStep(\n",
" name=\"predict-digits-mnist\",\n",
" parallel_run_config=parallel_run_config,\n",
" inputs=[ named_mnist_ds ],\n",
" inputs=[ input_mnist_ds_consumption ],\n",
" output=output_dir,\n",
" models=[ model ],\n",
" arguments=[ ],\n",
" allow_reuse=True\n",
")"
]
@@ -454,6 +459,47 @@
"pipeline_run.wait_for_completion(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Resubmit a with different dataset\n",
"Since we made the input a `PipelineParameter`, we can resubmit with a different dataset without having to create an entirely new experiment. We'll use the same datastore but use only a single image."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"path_on_datastore = mnist_data.path('mnist/0.png')\n",
"single_image_ds = Dataset.File.from_files(path=path_on_datastore, validate=False)\n",
"single_image_ds._ensure_saved(ws)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"pipeline_run_2 = experiment.submit(pipeline, \n",
" pipeline_parameters={\"mnist_param\": single_image_ds, \n",
" \"batch_size_param\": \"1\",\n",
" \"process_count_param\": 1}\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"pipeline_run_2.wait_for_completion(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
@@ -480,7 +526,7 @@
"\n",
"for root, dirs, files in os.walk(\"mnist_results\"):\n",
" for file in files:\n",
" if file.endswith('parallel_run_step.txt'):\n",
" if file.endswith('mnist_outputs.txt'):\n",
" result_file = os.path.join(root,file)\n",
"\n",
"df = pd.read_csv(result_file, delimiter=\":\", header=None)\n",

View File

@@ -2,6 +2,6 @@ name: file-dataset-image-inference-mnist
dependencies:
- pip:
- azureml-sdk
- azureml-contrib-pipeline-steps
- azureml-pipeline-steps
- azureml-widgets
- pandas

View File

@@ -23,11 +23,6 @@
"\n",
"In this notebook, we will demonstrate how to make predictions on large quantities of data asynchronously using the ML pipelines with Azure Machine Learning. Batch inference (or batch scoring) provides cost-effective inference, with unparalleled throughput for asynchronous applications. Batch prediction pipelines can scale to perform inference on terabytes of production data. Batch prediction is optimized for high throughput, fire-and-forget predictions for a large collection of data.\n",
"\n",
"> **Note**\n",
"This notebook uses public preview functionality (ParallelRunStep). Please install azureml-contrib-pipeline-steps package before running this notebook. Pandas is used to display job results.\n",
"```\n",
"pip install azureml-contrib-pipeline-steps pandas\n",
"```\n",
"> **Tip**\n",
"If your system requires low-latency processing (to process a single document or small set of documents quickly), use [real-time scoring](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-consume-web-service) instead of batch prediction.\n",
"\n",
@@ -84,7 +79,6 @@
"source": [
"import os\n",
"from azureml.core.compute import AmlCompute, ComputeTarget\n",
"from azureml.core.compute_target import ComputeTargetException\n",
"\n",
"# choose a name for your cluster\n",
"compute_name = os.environ.get(\"AML_COMPUTE_CLUSTER_NAME\", \"cpu-cluster\")\n",
@@ -304,7 +298,8 @@
"from azureml.core import Environment\n",
"from azureml.core.runconfig import CondaDependencies\n",
"\n",
"predict_conda_deps = CondaDependencies.create(pip_packages=[ \"scikit-learn==0.20.3\" ])\n",
"predict_conda_deps = CondaDependencies.create(pip_packages=[\"scikit-learn==0.20.3\",\n",
" \"azureml-core\", \"azureml-dataprep[pandas,fuse]\"])\n",
"\n",
"predict_env = Environment(name=\"predict_environment\")\n",
"predict_env.python.conda_dependencies = predict_conda_deps\n",
@@ -325,19 +320,21 @@
"metadata": {},
"outputs": [],
"source": [
"from azureml.contrib.pipeline.steps import ParallelRunStep, ParallelRunConfig\n",
"from azureml.pipeline.steps import ParallelRunStep, ParallelRunConfig\n",
"\n",
"# In a real-world scenario, you'll want to shape your process per node and nodes to fit your problem domain.\n",
"parallel_run_config = ParallelRunConfig(\n",
" source_directory=scripts_folder,\n",
" entry_script=script_file, # the user script to run against each input\n",
" mini_batch_size='5MB',\n",
" error_threshold=5,\n",
" output_action='append_row',\n",
" environment=predict_env,\n",
" compute_target=compute_target, \n",
" node_count=3,\n",
" run_invocation_timeout=600)"
" source_directory=scripts_folder,\n",
" entry_script=script_file, # the user script to run against each input\n",
" mini_batch_size='5MB',\n",
" error_threshold=5,\n",
" output_action='append_row',\n",
" append_row_file_name=\"iris_outputs.txt\",\n",
" environment=predict_env,\n",
" compute_target=compute_target, \n",
" node_count=3,\n",
" run_invocation_timeout=600\n",
")"
]
},
{
@@ -359,7 +356,6 @@
" inputs=[named_iris_ds],\n",
" output=output_folder,\n",
" parallel_run_config=parallel_run_config,\n",
" models=[model],\n",
" arguments=['--model_name', 'iris'],\n",
" allow_reuse=True\n",
")"
@@ -453,7 +449,7 @@
"\n",
"for root, dirs, files in os.walk(\"iris_results\"):\n",
" for file in files:\n",
" if file.endswith('parallel_run_step.txt'):\n",
" if file.endswith('iris_outputs.txt'):\n",
" result_file = os.path.join(root,file)\n",
"\n",
"# cleanup output format\n",

View File

@@ -2,6 +2,6 @@ name: tabular-dataset-inference-iris
dependencies:
- pip:
- azureml-sdk
- azureml-contrib-pipeline-steps
- azureml-pipeline-steps
- azureml-widgets
- pandas

View File

@@ -26,11 +26,8 @@
"2. Run neural style on each image using one of the provided models (from `pytorch` pretrained models for this example).\n",
"3. Stitch the image back into a video.\n",
"\n",
"> **Note**\n",
"This notebook uses public preview functionality (ParallelRunStep). Please install azureml-contrib-pipeline-steps package before running this notebook.\n",
"```\n",
"pip install azureml-contrib-pipeline-steps\n",
"```"
"> **Tip**\n",
"If your system requires low-latency processing (to process a single document or small set of documents quickly), use [real-time scoring](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-consume-web-service) instead of batch prediction."
]
},
{
@@ -356,7 +353,9 @@
"source": [
"from azureml.pipeline.core.graph import PipelineParameter\n",
"# create a parameter for style (one of \"candy\", \"mosaic\") to transfer the images to\n",
"style_param = PipelineParameter(name=\"style\", default_value=\"mosaic\")"
"style_param = PipelineParameter(name=\"style\", default_value=\"mosaic\")\n",
"# create a parameter for the number of nodes to use in step no. 2 (style transfer)\n",
"nodecount_param = PipelineParameter(name=\"nodecount\", default_value=2)"
]
},
{
@@ -415,6 +414,8 @@
"parallel_cd.add_conda_package(\"pytorch\")\n",
"parallel_cd.add_conda_package(\"torchvision\")\n",
"parallel_cd.add_conda_package(\"pillow<7\") # needed for torchvision==0.4.0\n",
"parallel_cd.add_pip_package(\"azureml-core\")\n",
"parallel_cd.add_pip_package(\"azureml-dataprep[fuse]\")\n",
"\n",
"styleenvironment = Environment(name=\"styleenvironment\")\n",
"styleenvironment.python.conda_dependencies=parallel_cd\n",
@@ -427,17 +428,20 @@
"metadata": {},
"outputs": [],
"source": [
"from azureml.contrib.pipeline.steps import ParallelRunConfig\n",
"from azureml.pipeline.core import PipelineParameter\n",
"from azureml.pipeline.steps import ParallelRunConfig\n",
"\n",
"parallel_run_config = ParallelRunConfig(\n",
" environment=styleenvironment,\n",
" entry_script='transform.py',\n",
" output_action='summary_only',\n",
" mini_batch_size=\"1\",\n",
" error_threshold=1,\n",
" source_directory=scripts_folder,\n",
" compute_target=gpu_cluster, \n",
" node_count=3)"
" environment=styleenvironment,\n",
" entry_script='transform.py',\n",
" output_action='summary_only',\n",
" mini_batch_size=\"1\",\n",
" error_threshold=1,\n",
" source_directory=scripts_folder,\n",
" compute_target=gpu_cluster, \n",
" node_count=nodecount_param,\n",
" process_count_per_node=2\n",
")"
]
},
{
@@ -446,7 +450,7 @@
"metadata": {},
"outputs": [],
"source": [
"from azureml.contrib.pipeline.steps import ParallelRunStep\n",
"from azureml.pipeline.steps import ParallelRunStep\n",
"from datetime import datetime\n",
"\n",
"parallel_step_name = 'styletransfer-' + datetime.now().strftime('%Y%m%d%H%M')\n",
@@ -455,9 +459,6 @@
" name=parallel_step_name,\n",
" inputs=[ffmpeg_images_file_dataset], # Input file share/blob container/file dataset\n",
" output=processed_images, # Output file share/blob container\n",
" models=[mosaic_model, candy_model],\n",
" tags = {'scenario': \"batch inference\", 'type': \"demo\"},\n",
" properties = {'area': \"style transfer\"},\n",
" arguments=[\"--style\", style_param],\n",
" parallel_run_config=parallel_run_config,\n",
" allow_reuse=True #[optional - default value True]\n",
@@ -666,7 +667,8 @@
"response = requests.post(rest_endpoint, \n",
" headers=aad_token,\n",
" json={\"ExperimentName\": experiment_name,\n",
" \"ParameterAssignments\": {\"style\": \"candy\", \"aml_node_count\": 2}})\n",
" \"ParameterAssignments\": {\"style\": \"candy\", \"NodeCount\": 3}})\n",
"\n",
"run_id = response.json()[\"Id\"]\n",
"\n",
"from azureml.pipeline.core.run import PipelineRun\n",

View File

@@ -2,7 +2,6 @@ name: pipeline-style-transfer
dependencies:
- pip:
- azureml-sdk
- azureml-contrib-pipeline-steps
- azureml-pipeline-steps
- azureml-widgets
- requests

View File

@@ -1,350 +0,0 @@
import json
import tempfile
import numpy as np
import copy
import time
import torch
import torch._six
from pycocotools.cocoeval import COCOeval
from pycocotools.coco import COCO
import pycocotools.mask as mask_util
from collections import defaultdict
import utils
class CocoEvaluator(object):
def __init__(self, coco_gt, iou_types):
assert isinstance(iou_types, (list, tuple))
coco_gt = copy.deepcopy(coco_gt)
self.coco_gt = coco_gt
self.iou_types = iou_types
self.coco_eval = {}
for iou_type in iou_types:
self.coco_eval[iou_type] = COCOeval(coco_gt, iouType=iou_type)
self.img_ids = []
self.eval_imgs = {k: [] for k in iou_types}
def update(self, predictions):
img_ids = list(np.unique(list(predictions.keys())))
self.img_ids.extend(img_ids)
for iou_type in self.iou_types:
results = self.prepare(predictions, iou_type)
coco_dt = loadRes(self.coco_gt, results) if results else COCO()
coco_eval = self.coco_eval[iou_type]
coco_eval.cocoDt = coco_dt
coco_eval.params.imgIds = list(img_ids)
img_ids, eval_imgs = evaluate(coco_eval)
self.eval_imgs[iou_type].append(eval_imgs)
def synchronize_between_processes(self):
for iou_type in self.iou_types:
self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2)
create_common_coco_eval(self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type])
def accumulate(self):
for coco_eval in self.coco_eval.values():
coco_eval.accumulate()
def summarize(self):
for iou_type, coco_eval in self.coco_eval.items():
print("IoU metric: {}".format(iou_type))
coco_eval.summarize()
def prepare(self, predictions, iou_type):
if iou_type == "bbox":
return self.prepare_for_coco_detection(predictions)
elif iou_type == "segm":
return self.prepare_for_coco_segmentation(predictions)
elif iou_type == "keypoints":
return self.prepare_for_coco_keypoint(predictions)
else:
raise ValueError("Unknown iou type {}".format(iou_type))
def prepare_for_coco_detection(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
boxes = prediction["boxes"]
boxes = convert_to_xywh(boxes).tolist()
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"bbox": box,
"score": scores[k],
}
for k, box in enumerate(boxes)
]
)
return coco_results
def prepare_for_coco_segmentation(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
scores = prediction["scores"]
labels = prediction["labels"]
masks = prediction["masks"]
masks = masks > 0.5
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
rles = [
mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F"))[0]
for mask in masks
]
for rle in rles:
rle["counts"] = rle["counts"].decode("utf-8")
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"segmentation": rle,
"score": scores[k],
}
for k, rle in enumerate(rles)
]
)
return coco_results
def prepare_for_coco_keypoint(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
boxes = prediction["boxes"]
boxes = convert_to_xywh(boxes).tolist()
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
keypoints = prediction["keypoints"]
keypoints = keypoints.flatten(start_dim=1).tolist()
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
'keypoints': keypoint,
"score": scores[k],
}
for k, keypoint in enumerate(keypoints)
]
)
return coco_results
def convert_to_xywh(boxes):
xmin, ymin, xmax, ymax = boxes.unbind(1)
return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1)
def merge(img_ids, eval_imgs):
all_img_ids = utils.all_gather(img_ids)
all_eval_imgs = utils.all_gather(eval_imgs)
merged_img_ids = []
for p in all_img_ids:
merged_img_ids.extend(p)
merged_eval_imgs = []
for p in all_eval_imgs:
merged_eval_imgs.append(p)
merged_img_ids = np.array(merged_img_ids)
merged_eval_imgs = np.concatenate(merged_eval_imgs, 2)
# keep only unique (and in sorted order) images
merged_img_ids, idx = np.unique(merged_img_ids, return_index=True)
merged_eval_imgs = merged_eval_imgs[..., idx]
return merged_img_ids, merged_eval_imgs
def create_common_coco_eval(coco_eval, img_ids, eval_imgs):
img_ids, eval_imgs = merge(img_ids, eval_imgs)
img_ids = list(img_ids)
eval_imgs = list(eval_imgs.flatten())
coco_eval.evalImgs = eval_imgs
coco_eval.params.imgIds = img_ids
coco_eval._paramsEval = copy.deepcopy(coco_eval.params)
#################################################################
# From pycocotools, just removed the prints and fixed
# a Python3 bug about unicode not defined
#################################################################
# Ideally, pycocotools wouldn't have hard-coded prints
# so that we could avoid copy-pasting those two functions
def createIndex(self):
# create index
# print('creating index...')
anns, cats, imgs = {}, {}, {}
imgToAnns, catToImgs = defaultdict(list), defaultdict(list)
if 'annotations' in self.dataset:
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']].append(ann)
anns[ann['id']] = ann
if 'images' in self.dataset:
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
for cat in self.dataset['categories']:
cats[cat['id']] = cat
if 'annotations' in self.dataset and 'categories' in self.dataset:
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']].append(ann['image_id'])
# print('index created!')
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
maskUtils = mask_util
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
# print('Loading and preparing results...')
# tic = time.time()
if isinstance(resFile, torch._six.string_classes):
anns = json.load(open(resFile))
elif type(resFile) == np.ndarray:
anns = self.loadNumpyAnnotations(resFile)
else:
anns = resFile
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id + 1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0] + bb[2], bb[1], bb[1] + bb[3]]
if 'segmentation' not in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2] * bb[3]
ann['id'] = id + 1
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
ann['area'] = maskUtils.area(ann['segmentation'])
if 'bbox' not in ann:
ann['bbox'] = maskUtils.toBbox(ann['segmentation'])
ann['id'] = id + 1
ann['iscrowd'] = 0
elif 'keypoints' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
s = ann['keypoints']
x = s[0::3]
y = s[1::3]
x1, x2, y1, y2 = np.min(x), np.max(x), np.min(y), np.max(y)
ann['area'] = (x2 - x1) * (y2 - y1)
ann['id'] = id + 1
ann['bbox'] = [x1, y1, x2 - x1, y2 - y1]
# print('DONE (t={:0.2f}s)'.format(time.time()- tic))
res.dataset['annotations'] = anns
createIndex(res)
return res
def evaluate(self):
'''
Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
:return: None
'''
# tic = time.time()
# print('Running per image evaluation...')
p = self.params
# add backward compatibility if useSegm is specified in params
if p.useSegm is not None:
p.iouType = 'segm' if p.useSegm == 1 else 'bbox'
print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))
# print('Evaluate annotation type *{}*'.format(p.iouType))
p.imgIds = list(np.unique(p.imgIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
p.maxDets = sorted(p.maxDets)
self.params = p
self._prepare()
# loop through images, area range, max detection number
catIds = p.catIds if p.useCats else [-1]
if p.iouType == 'segm' or p.iouType == 'bbox':
computeIoU = self.computeIoU
elif p.iouType == 'keypoints':
computeIoU = self.computeOks
self.ious = {
(imgId, catId): computeIoU(imgId, catId)
for imgId in p.imgIds
for catId in catIds}
evaluateImg = self.evaluateImg
maxDet = p.maxDets[-1]
evalImgs = [
evaluateImg(imgId, catId, areaRng, maxDet)
for catId in catIds
for areaRng in p.areaRng
for imgId in p.imgIds
]
# this is NOT in the pycocotools code, but could be done outside
evalImgs = np.asarray(evalImgs).reshape(
len(catIds), len(p.areaRng), len(p.imgIds))
self._paramsEval = copy.deepcopy(self.params)
# toc = time.time()
# print('DONE (t={:0.2f}s).'.format(toc-tic))
return p.imgIds, evalImgs
#################################################################
# end of straight copy from pycocotools, just removing the prints
#################################################################

View File

@@ -1,252 +0,0 @@
import copy
import os
from PIL import Image
import torch
import torch.utils.data
import torchvision
from pycocotools import mask as coco_mask
from pycocotools.coco import COCO
import transforms as T
class FilterAndRemapCocoCategories(object):
def __init__(self, categories, remap=True):
self.categories = categories
self.remap = remap
def __call__(self, image, target):
anno = target["annotations"]
anno = [obj for obj in anno if obj["category_id"] in self.categories]
if not self.remap:
target["annotations"] = anno
return image, target
anno = copy.deepcopy(anno)
for obj in anno:
obj["category_id"] = self.categories.index(obj["category_id"])
target["annotations"] = anno
return image, target
def convert_coco_poly_to_mask(segmentations, height, width):
masks = []
for polygons in segmentations:
rles = coco_mask.frPyObjects(polygons, height, width)
mask = coco_mask.decode(rles)
if len(mask.shape) < 3:
mask = mask[..., None]
mask = torch.as_tensor(mask, dtype=torch.uint8)
mask = mask.any(dim=2)
masks.append(mask)
if masks:
masks = torch.stack(masks, dim=0)
else:
masks = torch.zeros((0, height, width), dtype=torch.uint8)
return masks
class ConvertCocoPolysToMask(object):
def __call__(self, image, target):
w, h = image.size
image_id = target["image_id"]
image_id = torch.tensor([image_id])
anno = target["annotations"]
anno = [obj for obj in anno if obj['iscrowd'] == 0]
boxes = [obj["bbox"] for obj in anno]
# guard against no boxes via resizing
boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)
boxes[:, 2:] += boxes[:, :2]
boxes[:, 0::2].clamp_(min=0, max=w)
boxes[:, 1::2].clamp_(min=0, max=h)
classes = [obj["category_id"] for obj in anno]
classes = torch.tensor(classes, dtype=torch.int64)
segmentations = [obj["segmentation"] for obj in anno]
masks = convert_coco_poly_to_mask(segmentations, h, w)
keypoints = None
if anno and "keypoints" in anno[0]:
keypoints = [obj["keypoints"] for obj in anno]
keypoints = torch.as_tensor(keypoints, dtype=torch.float32)
num_keypoints = keypoints.shape[0]
if num_keypoints:
keypoints = keypoints.view(num_keypoints, -1, 3)
keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
boxes = boxes[keep]
classes = classes[keep]
masks = masks[keep]
if keypoints is not None:
keypoints = keypoints[keep]
target = {}
target["boxes"] = boxes
target["labels"] = classes
target["masks"] = masks
target["image_id"] = image_id
if keypoints is not None:
target["keypoints"] = keypoints
# for conversion to coco api
area = torch.tensor([obj["area"] for obj in anno])
iscrowd = torch.tensor([obj["iscrowd"] for obj in anno])
target["area"] = area
target["iscrowd"] = iscrowd
return image, target
def _coco_remove_images_without_annotations(dataset, cat_list=None):
def _has_only_empty_bbox(anno):
return all(any(o <= 1 for o in obj["bbox"][2:]) for obj in anno)
def _count_visible_keypoints(anno):
return sum(sum(1 for v in ann["keypoints"][2::3] if v > 0) for ann in anno)
min_keypoints_per_image = 10
def _has_valid_annotation(anno):
# if it's empty, there is no annotation
if len(anno) == 0:
return False
# if all boxes have close to zero area, there is no annotation
if _has_only_empty_bbox(anno):
return False
# keypoints task have a slight different critera for considering
# if an annotation is valid
if "keypoints" not in anno[0]:
return True
# for keypoint detection tasks, only consider valid images those
# containing at least min_keypoints_per_image
if _count_visible_keypoints(anno) >= min_keypoints_per_image:
return True
return False
assert isinstance(dataset, torchvision.datasets.CocoDetection)
ids = []
for ds_idx, img_id in enumerate(dataset.ids):
ann_ids = dataset.coco.getAnnIds(imgIds=img_id, iscrowd=None)
anno = dataset.coco.loadAnns(ann_ids)
if cat_list:
anno = [obj for obj in anno if obj["category_id"] in cat_list]
if _has_valid_annotation(anno):
ids.append(ds_idx)
dataset = torch.utils.data.Subset(dataset, ids)
return dataset
def convert_to_coco_api(ds):
coco_ds = COCO()
# annotation IDs need to start at 1, not 0, see torchvision issue #1530
ann_id = 1
dataset = {'images': [], 'categories': [], 'annotations': []}
categories = set()
for img_idx in range(len(ds)):
# find better way to get target
# targets = ds.get_annotations(img_idx)
img, targets = ds[img_idx]
image_id = targets["image_id"].item()
img_dict = {}
img_dict['id'] = image_id
img_dict['height'] = img.shape[-2]
img_dict['width'] = img.shape[-1]
dataset['images'].append(img_dict)
bboxes = targets["boxes"]
bboxes[:, 2:] -= bboxes[:, :2]
bboxes = bboxes.tolist()
labels = targets['labels'].tolist()
areas = targets['area'].tolist()
iscrowd = targets['iscrowd'].tolist()
if 'masks' in targets:
masks = targets['masks']
# make masks Fortran contiguous for coco_mask
masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1)
if 'keypoints' in targets:
keypoints = targets['keypoints']
keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist()
num_objs = len(bboxes)
for i in range(num_objs):
ann = {}
ann['image_id'] = image_id
ann['bbox'] = bboxes[i]
ann['category_id'] = labels[i]
categories.add(labels[i])
ann['area'] = areas[i]
ann['iscrowd'] = iscrowd[i]
ann['id'] = ann_id
if 'masks' in targets:
ann["segmentation"] = coco_mask.encode(masks[i].numpy())
if 'keypoints' in targets:
ann['keypoints'] = keypoints[i]
ann['num_keypoints'] = sum(k != 0 for k in keypoints[i][2::3])
dataset['annotations'].append(ann)
ann_id += 1
dataset['categories'] = [{'id': i} for i in sorted(categories)]
coco_ds.dataset = dataset
coco_ds.createIndex()
return coco_ds
def get_coco_api_from_dataset(dataset):
for _ in range(10):
if isinstance(dataset, torchvision.datasets.CocoDetection):
break
if isinstance(dataset, torch.utils.data.Subset):
dataset = dataset.dataset
if isinstance(dataset, torchvision.datasets.CocoDetection):
return dataset.coco
return convert_to_coco_api(dataset)
class CocoDetection(torchvision.datasets.CocoDetection):
def __init__(self, img_folder, ann_file, transforms):
super(CocoDetection, self).__init__(img_folder, ann_file)
self._transforms = transforms
def __getitem__(self, idx):
img, target = super(CocoDetection, self).__getitem__(idx)
image_id = self.ids[idx]
target = dict(image_id=image_id, annotations=target)
if self._transforms is not None:
img, target = self._transforms(img, target)
return img, target
def get_coco(root, image_set, transforms, mode='instances'):
anno_file_template = "{}_{}2017.json"
PATHS = {
"train": ("train2017", os.path.join("annotations", anno_file_template.format(mode, "train"))),
"val": ("val2017", os.path.join("annotations", anno_file_template.format(mode, "val"))),
# "train": ("val2017", os.path.join("annotations", anno_file_template.format(mode, "val")))
}
t = [ConvertCocoPolysToMask()]
if transforms is not None:
t.append(transforms)
transforms = T.Compose(t)
img_folder, ann_file = PATHS[image_set]
img_folder = os.path.join(root, img_folder)
ann_file = os.path.join(root, ann_file)
dataset = CocoDetection(img_folder, ann_file, transforms=transforms)
if image_set == "train":
dataset = _coco_remove_images_without_annotations(dataset)
# dataset = torch.utils.data.Subset(dataset, [i for i in range(500)])
return dataset
def get_coco_kp(root, image_set, transforms):
return get_coco(root, image_set, transforms, mode="person_keypoints")

View File

@@ -1,77 +0,0 @@
import numpy as np
import os
import torch.utils.data
from azureml.core import Run
from PIL import Image
class PennFudanDataset(torch.utils.data.Dataset):
def __init__(self, root, transforms=None):
self.root = root
self.transforms = transforms
# load all image files, sorting them to ensure that they are aligned
self.img_dir = os.path.join(root, "PNGImages")
self.mask_dir = os.path.join(root, "PedMasks")
self.imgs = list(sorted(os.listdir(self.img_dir)))
self.masks = list(sorted(os.listdir(self.mask_dir)))
def __getitem__(self, idx):
# load images ad masks
img_path = os.path.join(self.img_dir, self.imgs[idx])
mask_path = os.path.join(self.mask_dir, self.masks[idx])
img = Image.open(img_path).convert("RGB")
# note that we haven't converted the mask to RGB,
# because each color corresponds to a different instance
# with 0 being background
mask = Image.open(mask_path)
mask = np.array(mask)
# instances are encoded as different colors
obj_ids = np.unique(mask)
# first id is the background, so remove it
obj_ids = obj_ids[1:]
# split the color-encoded mask into a set
# of binary masks
masks = mask == obj_ids[:, None, None]
# get bounding box coordinates for each mask
num_objs = len(obj_ids)
boxes = []
for i in range(num_objs):
pos = np.where(masks[i])
xmin = np.min(pos[1])
xmax = np.max(pos[1])
ymin = np.min(pos[0])
ymax = np.max(pos[0])
boxes.append([xmin, ymin, xmax, ymax])
boxes = torch.as_tensor(boxes, dtype=torch.float32)
# there is only one class
labels = torch.ones((num_objs,), dtype=torch.int64)
masks = torch.as_tensor(masks, dtype=torch.uint8)
image_id = torch.tensor([idx])
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
# suppose all instances are not crowd
iscrowd = torch.zeros((num_objs,), dtype=torch.int64)
target = {}
target["boxes"] = boxes
target["labels"] = labels
target["masks"] = masks
target["image_id"] = image_id
target["area"] = area
target["iscrowd"] = iscrowd
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self):
return len(self.imgs)

View File

@@ -1,16 +0,0 @@
# From https://github.com/microsoft/AzureML-BERT/blob/master/finetune/PyTorch/dockerfile
FROM mcr.microsoft.com/azureml/base-gpu:openmpi3.1.2-cuda10.1-cudnn7-ubuntu18.04
RUN apt update && apt install git -y && rm -rf /var/lib/apt/lists/*
RUN /opt/miniconda/bin/conda update -n base -c defaults conda
RUN /opt/miniconda/bin/conda install -y cython=0.29.15 numpy=1.18.1
RUN /opt/miniconda/bin/conda install -y pytorch=1.4 torchvision=0.5.0 -c pytorch
# Install cocoapi, required for drawing bounding boxes
RUN git clone https://github.com/cocodataset/cocoapi.git && cd cocoapi/PythonAPI && python setup.py build_ext install
RUN pip install azureml-defaults
RUN pip install "azureml-dataprep[fuse]"
RUN pip install pandas pyarrow

View File

@@ -1,108 +0,0 @@
import math
import sys
import time
import torch
import torchvision.models.detection.mask_rcnn
from coco_utils import get_coco_api_from_dataset
from coco_eval import CocoEvaluator
import utils
def train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq):
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
lr_scheduler = None
if epoch == 0:
warmup_factor = 1. / 1000
warmup_iters = min(1000, len(data_loader) - 1)
lr_scheduler = utils.warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor)
for images, targets in metric_logger.log_every(data_loader, print_freq, header):
images = list(image.to(device) for image in images)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
loss_dict = model(images, targets)
losses = sum(loss for loss in loss_dict.values())
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
loss_value = losses_reduced.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
print(loss_dict_reduced)
sys.exit(1)
optimizer.zero_grad()
losses.backward()
optimizer.step()
if lr_scheduler is not None:
lr_scheduler.step()
metric_logger.update(loss=losses_reduced, **loss_dict_reduced)
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
def _get_iou_types(model):
model_without_ddp = model
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_without_ddp = model.module
iou_types = ["bbox"]
if isinstance(model_without_ddp, torchvision.models.detection.MaskRCNN):
iou_types.append("segm")
if isinstance(model_without_ddp, torchvision.models.detection.KeypointRCNN):
iou_types.append("keypoints")
return iou_types
@torch.no_grad()
def evaluate(model, data_loader, device):
n_threads = torch.get_num_threads()
# FIXME remove this and make paste_masks_in_image run on the GPU
torch.set_num_threads(1)
cpu_device = torch.device("cpu")
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
coco = get_coco_api_from_dataset(data_loader.dataset)
iou_types = _get_iou_types(model)
coco_evaluator = CocoEvaluator(coco, iou_types)
for image, targets in metric_logger.log_every(data_loader, 100, header):
image = list(img.to(device) for img in image)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
torch.cuda.synchronize()
model_time = time.time()
outputs = model(image)
outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]
model_time = time.time() - model_time
res = {target["image_id"].item(): output for target, output in zip(targets, outputs)}
evaluator_time = time.time()
coco_evaluator.update(res)
evaluator_time = time.time() - evaluator_time
metric_logger.update(model_time=model_time, evaluator_time=evaluator_time)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
coco_evaluator.synchronize_between_processes()
# accumulate predictions from all images
coco_evaluator.accumulate()
coco_evaluator.summarize()
torch.set_num_threads(n_threads)
return coco_evaluator

View File

@@ -1,23 +0,0 @@
import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor
def get_instance_segmentation_model(num_classes):
# load an instance segmentation model pre-trained on COCO
model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)
# get the number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new one
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
# now get the number of input features for the mask classifier
in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
hidden_layer = 256
# and replace the mask predictor with a new one
model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,
hidden_layer,
num_classes)
return model

View File

@@ -1,544 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/ml-frameworks/pytorch/training/mask-rcnn-object-detection/pytorch-mask-rcnn.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Object detection with PyTorch, Mask R-CNN, and a custom Dockerfile\n",
"\n",
"In this tutorial, you will finetune a pre-trained [Mask R-CNN](https://arxiv.org/abs/1703.06870) model on images from the [Penn-Fudan Database for Pedestrian Detection and Segmentation](https://www.cis.upenn.edu/~jshi/ped_html/). The dataset has 170 images with 345 instances of pedestrians. After running this tutorial, you will have a model that can outline the silhouettes of all pedestrians within an image.\n",
"\n",
"You\u00e2\u20ac\u2122ll use Azure Machine Learning to: \n",
"\n",
"- Initialize a workspace \n",
"- Create a compute cluster\n",
"- Define a training environment\n",
"- Train a model remotely\n",
"- Register your model\n",
"- Generate predictions locally\n",
"\n",
"## Prerequisities\n",
"\n",
"- If you are using an Azure Machine Learning Notebook VM, your environment already meets these prerequisites. Otherwise, go through the [configuration notebook](../../../../../configuration.ipynb) to install the Azure Machine Learning Python SDK and [create an Azure ML Workspace](https://docs.microsoft.com/azure/machine-learning/how-to-manage-workspace#create-a-workspace). You also need matplotlib 3.2, pycocotools-2.0.0, torchvision >= 0.5.0 and torch >= 1.4.0.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Check core SDK version number, check other dependencies\n",
"import azureml.core\n",
"import matplotlib\n",
"import pycocotools\n",
"import torch\n",
"import torchvision\n",
"\n",
"print(\"SDK version:\", azureml.core.VERSION)\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Diagnostics\n",
"\n",
"Opt-in diagnostics for better experience, quality, and security in future releases."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.telemetry import set_diagnostics_collection\n",
"\n",
"set_diagnostics_collection(send_diagnostics=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Initialize a workspace\n",
"\n",
"Initialize a [workspace](https://docs.microsoft.com/en-us/azure/machine-learning/concept-workspace) object from the existing workspace you created in the Prerequisites step. `Workspace.from_config()` creates a workspace object from the details stored in `config.json`, using the [from_config()](https://docs.microsoft.com/python/api/azureml-core/azureml.core.workspace(class)?view=azure-ml-py#from-config-path-none--auth-none---logger-none---file-name-none-) method."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.workspace import Workspace\n",
"\n",
"ws = Workspace.from_config()\n",
"print('Workspace name: ' + ws.name, \n",
" 'Azure region: ' + ws.location, \n",
" 'Subscription id: ' + ws.subscription_id, \n",
" 'Resource group: ' + ws.resource_group, sep='\\n')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Create or attach existing Azure ML Managed Compute\n",
"\n",
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/concept-compute-target) for training your model. In this tutorial, we use [Azure ML managed compute](https://docs.microsoft.com/azure/machine-learning/how-to-set-up-training-targets#amlcompute) for our remote training compute resource. Specifically, the below code creates a `STANDARD_NC6` GPU cluster that autoscales from 0 to 4 nodes.\n",
"\n",
"**Creation of Compute takes approximately 5 minutes.** If the Aauzre ML Compute with that name is already in your workspace, this code will skip the creation process. \n",
"\n",
"As with other Azure servies, there are limits on certain resources associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/azure/machine-learning/how-to-manage-quotas) on the default limits and how to request more quota.\n",
"\n",
"> Note that the below code creates GPU compute. If you instead want to create CPU compute, provide a different VM size to the `vm_size` parameter, such as `STANDARD_D2_V2`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
"from azureml.core.compute_target import ComputeTargetException\n",
"\n",
"\n",
"# choose a name for your cluster\n",
"cluster_name = 'gpu-cluster'\n",
"\n",
"try:\n",
" compute_target = ComputeTarget(workspace=ws, name=cluster_name)\n",
" print('Found existing compute target.')\n",
"except ComputeTargetException:\n",
" print('Creating a new compute target...')\n",
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6', \n",
" max_nodes=4)\n",
"\n",
" # create the cluster\n",
" compute_target = ComputeTarget.create(ws, cluster_name, compute_config)\n",
"\n",
" compute_target.wait_for_completion(show_output=True)\n",
"\n",
"# use get_status() to get a detailed status for the current cluster. \n",
"print(compute_target.get_status().serialize())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Define a training environment\n",
"\n",
"### Create a project directory\n",
"Create a directory that will contain all the code from your local machine that you will need access to on the remote resource. This includes the training script an any additional files your training script depends on."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"project_folder = './pytorch-peds'\n",
"\n",
"try:\n",
" os.makedirs(project_folder, exist_ok=False)\n",
"except FileExistsError:\n",
" print('project folder {} exists, moving on...'.format(project_folder))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Copy training script and dependencies into project directory"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import shutil\n",
"\n",
"files_to_copy = ['data', 'model', 'script', 'utils', 'transforms', 'coco_eval', 'engine', 'coco_utils']\n",
"for file in files_to_copy:\n",
" shutil.copy(os.path.join(os.getcwd(), (file + '.py')), project_folder)\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create an experiment"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Experiment\n",
"\n",
"experiment_name = 'pytorch-peds'\n",
"experiment = Experiment(ws, name=experiment_name)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Specify dependencies with a custom Dockerfile\n",
"\n",
"There are a number of ways to [use environments](https://docs.microsoft.com/azure/machine-learning/how-to-use-environments) for specifying dependencies during model training. In this case, we use a custom Dockerfile."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Environment\n",
"\n",
"my_env = Environment(name='maskr-docker')\n",
"my_env.docker.enabled = True\n",
"with open(\"dockerfiles/Dockerfile\", \"r\") as f:\n",
" dockerfile_contents=f.read()\n",
"my_env.docker.base_dockerfile=dockerfile_contents\n",
"my_env.docker.base_image = None\n",
"my_env.python.interpreter_path = '/opt/miniconda/bin/python'\n",
"my_env.python.user_managed_dependencies = True\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create a ScriptRunConfig\n",
"\n",
"Use the [ScriptRunConfig](https://docs.microsoft.com/python/api/azureml-core/azureml.core.scriptrunconfig?view=azure-ml-py) class to define your run. Specify the source directory, compute target, and environment."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.train.dnn import PyTorch\n",
"from azureml.core import ScriptRunConfig\n",
"\n",
"model_name = 'pytorch-peds'\n",
"output_dir = './outputs/'\n",
"n_epochs = 2\n",
"\n",
"script_args = [\n",
" '--model_name', model_name,\n",
" '--output_dir', output_dir,\n",
" '--n_epochs', n_epochs,\n",
"]\n",
"# Add training script to run config\n",
"runconfig = ScriptRunConfig(\n",
" source_directory=project_folder,\n",
" script=\"script.py\",\n",
" arguments=script_args)\n",
"\n",
"# Attach compute target to run config\n",
"runconfig.run_config.target = cluster_name\n",
"\n",
"# Uncomment the line below if you want to try this locally first\n",
"#runconfig.run_config.target = \"local\"\n",
"\n",
"# Attach environment to run config\n",
"runconfig.run_config.environment = my_env"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Train remotely\n",
"\n",
"### Submit your run"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Submit run \n",
"run = experiment.submit(runconfig)\n",
"\n",
"# to get more details of your run\n",
"print(run.get_details())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Monitor your run\n",
"\n",
"Use a widget to keep track of your run. You can also view the status of the run within the [Azure Machine Learning service portal](https://ml.azure.com)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.widgets import RunDetails\n",
"\n",
"RunDetails(run).show()\n",
"run.wait_for_completion(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Test your model\n",
"\n",
"Now that we are done training, let's see how well this model actually performs.\n",
"\n",
"### Get your latest run\n",
"First, pull the latest run using `experiment.get_runs()`, which lists runs from `experiment` in reverse chronological order."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Run\n",
"\n",
"last_run = next(experiment.get_runs())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Register your model\n",
"Next, [register the model](https://docs.microsoft.com/azure/machine-learning/concept-model-management-and-deployment#register-package-and-deploy-models-from-anywhere) from your run. Registering your model assigns it a version and helps you with auditability."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"last_run.register_model(model_name=model_name, model_path=os.path.join(output_dir, model_name))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Download your model\n",
"Next, download this registered model. Notice how we can initialize the `Model` object with the name of the registered model, rather than a path to the file itself."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Model\n",
"\n",
"model = Model(workspace=ws, name=model_name)\n",
"path = model.download(target_dir='model', exist_ok=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Use your model to make a prediction\n",
"\n",
"Run inferencing on a single test image and display the results."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import torch\n",
"from azureml.core import Dataset\n",
"from data import PennFudanDataset\n",
"from script import get_transform, download_data, NUM_CLASSES\n",
"from model import get_instance_segmentation_model\n",
"\n",
"if torch.cuda.is_available():\n",
" device = torch.device('cuda')\n",
"else:\n",
" device = torch.device('cpu')\n",
"\n",
"# Instantiate model with correct weights, cast to correct device, place in evaluation mode\n",
"predict_model = get_instance_segmentation_model(NUM_CLASSES)\n",
"predict_model.to(device)\n",
"predict_model.load_state_dict(torch.load(path, map_location=device))\n",
"predict_model.eval()\n",
"\n",
"# Load dataset\n",
"root_dir=download_data()\n",
"dataset_test = PennFudanDataset(root=root_dir, transforms=get_transform(train=False))\n",
"\n",
"# pick one image from the test set\n",
"img, _ = dataset_test[0]\n",
"\n",
"with torch.no_grad():\n",
" prediction = predict_model([img.to(device)])\n",
"\n",
"# model = torch.load(path)\n",
"#torch.load(model.get_model_path(model_name='outputs/model.pt'))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Display the input image\n",
"\n",
"While tensors are great for computers, a tensor of RGB values doesn't mean much to a human. Let's display the input image in a way that a human could understand."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from PIL import Image\n",
"\n",
"\n",
"Image.fromarray(img.mul(255).permute(1, 2, 0).byte().numpy())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Display the predicted masks\n",
"\n",
"The prediction consists of masks, displaying the outline of pedestrians in the image. Let's take a look at the first two masks, below."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"Image.fromarray(prediction[0]['masks'][0, 0].mul(255).byte().cpu().numpy())"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"Image.fromarray(prediction[0]['masks'][1, 0].mul(255).byte().cpu().numpy())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Next steps\n",
"\n",
"Congratulations! You just trained a Mask R-CNN model with PyTorch in Azure Machine Learning. As next steps, consider:\n",
"1. Learn more about using PyTorch in Azure Machine Learning service by checking out the [README](./README.md]\n",
"2. Try exporting your model to [ONNX](https://docs.microsoft.com/azure/machine-learning/concept-onnx) for accelerated inferencing."
]
}
],
"metadata": {
"authors": [
{
"name": "gopalv"
}
],
"category": "training",
"compute": [
"AML Compute"
],
"datasets": [
"Custom"
],
"deployment": [
"None"
],
"exclude_from_index": false,
"framework": [
"PyTorch"
],
"friendly_name": "PyTorch object detection",
"index_order": 1,
"kernel_info": {
"name": "python3"
},
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.5-final"
},
"nteract": {
"version": "nteract-front-end@1.0.0"
},
"tags": [
"remote run",
"docker"
],
"task": "Fine-tune PyTorch object detection model with a custom dockerfile"
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -1,14 +0,0 @@
name: pytorch-mask-rcnn
dependencies:
- cython
- pytorch==1.4.0 -c pytorch
- torchvision -c pytorch
- pip:
- azureml-sdk
- azureml-widgets
- azureml-dataprep
- fuse
- pandas
- matplotlib
- pillow==7.0.0
- git+https://github.com/philferriere/cocoapi.git#subdirectory=PythonAPI

View File

@@ -1,117 +0,0 @@
import argparse
import os
import torch
import torchvision
import transforms as T
import urllib.request
import utils
from azureml.core import Dataset, Run
from data import PennFudanDataset
from engine import train_one_epoch, evaluate
from model import get_instance_segmentation_model
from zipfile import ZipFile
NUM_CLASSES = 2
def download_data():
data_file = 'PennFudanPed.zip'
ds_path = 'PennFudanPed/'
urllib.request.urlretrieve('https://www.cis.upenn.edu/~jshi/ped_html/PennFudanPed.zip', data_file)
zip = ZipFile(file=data_file)
zip.extractall(path=ds_path)
return os.path.join(ds_path, zip.namelist()[0])
def get_transform(train):
transforms = []
# converts the image, a PIL image, into a PyTorch Tensor
transforms.append(T.ToTensor())
if train:
# during training, randomly flip the training images
# and ground-truth for data augmentation
transforms.append(T.RandomHorizontalFlip(0.5))
return T.Compose(transforms)
def main():
print("Torch version:", torch.__version__)
# get command-line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', type=str, default="pytorch-peds.pt",
help='name with which to register your model')
parser.add_argument('--output_dir', default="local-outputs",
type=str, help='output directory')
parser.add_argument('--n_epochs', type=int,
default=10, help='number of epochs')
args = parser.parse_args()
# In case user inputs a nested output directory
os.makedirs(name=args.output_dir, exist_ok=True)
# Get a dataset by name
root_dir = download_data()
# use our dataset and defined transformations
dataset = PennFudanDataset(root=root_dir, transforms=get_transform(train=True))
dataset_test = PennFudanDataset(root=root_dir, transforms=get_transform(train=False))
# split the dataset in train and test set
torch.manual_seed(1)
indices = torch.randperm(len(dataset)).tolist()
dataset = torch.utils.data.Subset(dataset, indices[:-50])
dataset_test = torch.utils.data.Subset(dataset_test, indices[-50:])
# define training and validation data loaders
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=2, shuffle=True, num_workers=4,
collate_fn=utils.collate_fn)
data_loader_test = torch.utils.data.DataLoader(
dataset_test, batch_size=1, shuffle=False, num_workers=4,
collate_fn=utils.collate_fn)
if torch.cuda.is_available():
print('Using GPU')
device = torch.device('cuda')
else:
print('Using CPU')
device = torch.device('cpu')
# our dataset has two classes only - background and person
num_classes = NUM_CLASSES
# get the model using our helper function
model = get_instance_segmentation_model(num_classes)
# move model to the right device
model.to(device)
# construct an optimizer
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params, lr=0.005,
momentum=0.9, weight_decay=0.0005)
# and a learning rate scheduler which decreases the learning rate by
# 10x every 3 epochs
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
step_size=3,
gamma=0.1)
for epoch in range(args.n_epochs):
# train for one epoch, printing every 10 iterations
train_one_epoch(
model, optimizer, data_loader, device, epoch, print_freq=10)
# update the learning rate
lr_scheduler.step()
# evaluate on the test dataset
evaluate(model, data_loader_test, device=device)
# Saving the state dict is recommended method, per
# https://pytorch.org/tutorials/beginner/saving_loading_models.html
torch.save(model.state_dict(), os.path.join(args.output_dir, args.model_name))
if __name__ == '__main__':
main()

View File

@@ -1,50 +0,0 @@
import random
import torch
from torchvision.transforms import functional as F
def _flip_coco_person_keypoints(kps, width):
flip_inds = [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15]
flipped_data = kps[:, flip_inds]
flipped_data[..., 0] = width - flipped_data[..., 0]
# Maintain COCO convention that if visibility == 0, then x, y = 0
inds = flipped_data[..., 2] == 0
flipped_data[inds] = 0
return flipped_data
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
class RandomHorizontalFlip(object):
def __init__(self, prob):
self.prob = prob
def __call__(self, image, target):
if random.random() < self.prob:
height, width = image.shape[-2:]
image = image.flip(-1)
bbox = target["boxes"]
bbox[:, [0, 2]] = width - bbox[:, [2, 0]]
target["boxes"] = bbox
if "masks" in target:
target["masks"] = target["masks"].flip(-1)
if "keypoints" in target:
keypoints = target["keypoints"]
keypoints = _flip_coco_person_keypoints(keypoints, width)
target["keypoints"] = keypoints
return image, target
class ToTensor(object):
def __call__(self, image, target):
image = F.to_tensor(image)
return image, target

View File

@@ -1,326 +0,0 @@
from __future__ import print_function
from collections import defaultdict, deque
import datetime
import pickle
import time
import torch
import torch.distributed as dist
import errno
import os
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.tensor([tensor.numel()], device="cuda")
size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda"))
if local_size != max_size:
padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that all processes
have the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.all_reduce(values)
if average:
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
if torch.cuda.is_available():
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}',
'max mem: {memory:.0f}'
])
else:
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
])
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
def collate_fn(batch):
return tuple(zip(*batch))
def warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor):
def f(x):
if x >= warmup_iters:
return 1
alpha = float(x) / warmup_iters
return warmup_factor * (1 - alpha) + alpha
return torch.optim.lr_scheduler.LambdaLR(optimizer, f)
def mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)

View File

@@ -9,5 +9,4 @@ dependencies:
- keras
- tensorflow==2.0.0
- matplotlib
- azureml-dataprep
- fuse

View File

@@ -7,6 +7,5 @@ dependencies:
- tensorflow-gpu==1.13.2
- horovod==0.16.1
- matplotlib
- azureml-dataprep
- pandas
- fuse

View File

@@ -7,5 +7,4 @@ dependencies:
- keras
- tensorflow==1.14.0
- matplotlib
- azureml-dataprep
- fuse

View File

@@ -20,11 +20,11 @@ Using these samples, you will be able to do the following.
| File/folder | Description |
|-------------------|--------------------------------------------|
| [README.md](README.md) | This README file. |
| [devenv_setup.ipynb](setup/devenv_setup.ipynb) | Notebook to setup development environment for Azure ML RL |
| [cartpole_ci.ipynb](cartpole-on-compute-instance/cartpole_ci.ipynb) | Notebook to train a Cartpole playing agent on an Azure ML Compute Instance |
| [cartpole_cc.ipynb](cartpole-on-single-compute/cartpole_cc.ipynb) | Notebook to train a Cartpole playing agent on an Azure ML Compute Cluster (single node) |
| [cartpole_sc.ipynb](cartpole-on-single-compute/cartpole_sc.ipynb) | Notebook to train a Cartpole playing agent on an Azure ML Compute Cluster (single node) |
| [pong_rllib.ipynb](atari-on-distributed-compute/pong_rllib.ipynb) | Notebook to train Pong agent using RLlib on multiple compute targets |
| [minecraft.ipynb](minecraft-on-distributed-compute/minecraft.ipynb) | Notebook to train an agent to navigate through a lava maze in the Minecraft game |
## Prerequisites
@@ -111,7 +111,7 @@ contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additio
For more on SDK concepts, please refer to [notebooks](https://github.com/Azure/MachineLearningNotebooks).
**Please let us know your feedback.**
**Please let us know your [feedback](https://github.com/Azure/MachineLearningNotebooks/labels/Reinforcement%20Learning).**

View File

@@ -23,17 +23,18 @@ if __name__ == "__main__":
ray.init(address=args.ray_address)
tune.run(run_or_experiment=args.run,
config={
"env": args.env,
"num_gpus": args.config["num_gpus"],
"num_workers": args.config["num_workers"],
"callbacks": {"on_train_result": callbacks.on_train_result},
"sample_batch_size": 50,
"train_batch_size": 1000,
"num_sgd_iter": 2,
"num_data_loader_buffers": 2,
"model": {"dim": 42},
},
stop=args.stop,
local_dir='./logs')
tune.run(
run_or_experiment=args.run,
config={
"env": args.env,
"num_gpus": args.config["num_gpus"],
"num_workers": args.config["num_workers"],
"callbacks": {"on_train_result": callbacks.on_train_result},
"sample_batch_size": 50,
"train_batch_size": 1000,
"num_sgd_iter": 2,
"num_data_loader_buffers": 2,
"model": {"dim": 42},
},
stop=args.stop,
local_dir='./logs')

View File

@@ -20,8 +20,8 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"# Azure ML Reinforcement Learning Sample - Pong problem\n",
"Azure ML Reinforcement Learning (Azure ML RL) is a managed service for running distributed RL (reinforcement learning) simulation and training using the Ray framework.\n",
"# Reinforcement Learning in Azure Machine Learning - Pong problem\n",
"Reinforcement Learning in Azure Machine Learning is a managed service for running distributed reinforcement learning training and simulation using the open source Ray framework.\n",
"This example uses Ray RLlib to train a Pong playing agent on a multi-node cluster.\n",
"\n",
"## Pong problem\n",
@@ -48,7 +48,7 @@
"source": [
"The goal here is to train an agent to win an episode of Pong game against opponent with the score of at least 18 points. An episode in Pong runs until one of the players reaches a score of 21. Episodes are a terminology that is used across all the [OpenAI gym](https://gym.openai.com/envs/Pong-v0/) environments that contains a strictly defined task.\n",
"\n",
"Training a Pong agent is a CPU intensive task and this example demonstrates the use of Azure ML RL service to train an agent faster in a distributed, parallel environment. You'll learn more about using the head and the worker compute targets to train an agent in this notebook below."
"Training a Pong agent is a compute-intensive task and this example demonstrates the use of Reinforcement Learning in Azure Machine Learning service to train an agent faster in a distributed, parallel environment. You'll learn more about using the head and the worker compute targets to train an agent in this notebook below."
]
},
{
@@ -57,7 +57,7 @@
"source": [
"## Prerequisite\n",
"\n",
"The user should have completed the [Azure ML Reinforcement Learning Sample - Setting Up Development Environment](../setup/devenv_setup.ipynb) to setup a virtual network. This virtual network will be used here for head and worker compute targets. It is highly recommended that the user should go through the [Azure ML Reinforcement Learning Sample - Cartpole Problem](../cartpole-on-single-compute/cartpole_cc.ipynb) to understand the basics of Azure ML RL and Ray RLlib used in this notebook."
"The user should have completed the [Reinforcement Learning in Azure Machine Learning - Setting Up Development Environment](../setup/devenv_setup.ipynb) to setup a virtual network. This virtual network will be used here for head and worker compute targets. It is highly recommended that the user should go through the [Reinforcement Learning in Azure Machine Learning - Cartpole Problem on Single Compute](../cartpole-on-single-compute/cartpole_sc.ipynb) to understand the basics of Reinforcement Learning in Azure Machine Learning and Ray RLlib used in this notebook."
]
},
{
@@ -69,7 +69,7 @@
"\n",
"* Connecting to a workspace to enable communication between your local machine and remote resources\n",
"* Creating an experiment to track all your runs\n",
"* Creating a remote head and worker compute target on a vnet to use for training"
"* Creating remote head and worker compute target on a virtual network to use for training"
]
},
{
@@ -88,19 +88,19 @@
"source": [
"%matplotlib inline\n",
"\n",
"# Azure ML core imports\n",
"# Azure Machine Learning core imports\n",
"import azureml.core\n",
"\n",
"# Check core SDK version number\n",
"print(\"Azure ML SDK Version: \", azureml.core.VERSION)"
"print(\"Azure Machine Learning SDK Version: \", azureml.core.VERSION)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Get Azure ML workspace\n",
"Get a reference to an existing Azure ML workspace."
"### Get Azure Machine Learning workspace\n",
"Get a reference to an existing Azure Machine Learning workspace."
]
},
{
@@ -119,7 +119,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create Azure ML experiment\n",
"### Create Azure Machine Learning experiment\n",
"Create an experiment to track the runs in your workspace."
]
},
@@ -140,9 +140,9 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"### Specify the name of your vnet\n",
"### Specify the name of your virtual network\n",
"\n",
"The resource group you use must contain a vnet. Specify the name of the vnet here created in the [Azure ML Reinforcement Learning Sample - Setting Up Development Environment](../setup/devenv_setup.ipynb)."
"The resource group you use must contain a virtual network. Specify the name of the virtual network here created in the [Azure Machine Learning Reinforcement Learning Sample - Setting Up Development Environment](../setup/devenv_setup.ipynb)."
]
},
{
@@ -159,9 +159,9 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create head computing cluster\n",
"### Create head compute target\n",
"\n",
"In this example, we show how to set up separate compute clusters for the Ray head and Ray worker nodes. First we define the head cluster with GPU for the Ray head node. One CPU of the head node will be used for the Ray head process and the rest of the CPUs will be used by the Ray worker processes."
"In this example, we show how to set up separate compute targets for the Ray head and Ray worker nodes. First we define the head cluster with GPU for the Ray head node. One CPU of the head node will be used for the Ray head process and the rest of the CPUs will be used by the Ray worker processes."
]
},
{
@@ -186,15 +186,17 @@
" if head_compute_target.provisioning_state == 'Succeeded':\n",
" print('found head compute target. just use it', head_compute_name)\n",
" else: \n",
" raise Exception('found head compute target but it is in state', head_compute_target.provisioning_state)\n",
" raise Exception(\n",
" 'found head compute target but it is in state', head_compute_target.provisioning_state)\n",
"else:\n",
" print('creating a new head compute target...')\n",
" provisioning_config = AmlCompute.provisioning_configuration(vm_size=head_vm_size,\n",
" min_nodes=head_compute_min_nodes, \n",
" max_nodes=head_compute_max_nodes,\n",
" vnet_resourcegroup_name=ws.resource_group,\n",
" vnet_name=vnet_name,\n",
" subnet_name='default')\n",
" provisioning_config = AmlCompute.provisioning_configuration(\n",
" vm_size=head_vm_size,\n",
" min_nodes=head_compute_min_nodes, \n",
" max_nodes=head_compute_max_nodes,\n",
" vnet_resourcegroup_name=ws.resource_group,\n",
" vnet_name=vnet_name,\n",
" subnet_name='default')\n",
"\n",
" # Create the cluster\n",
" head_compute_target = ComputeTarget.create(ws, head_compute_name, provisioning_config)\n",
@@ -203,7 +205,7 @@
" # If no min node count is provided it will use the scale settings for the cluster\n",
" head_compute_target.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)\n",
" \n",
" # For a more detailed view of current AmlCompute status, use get_status()\n",
" # For a more detailed view of current AmlCompute status, use get_status()\n",
" print(head_compute_target.get_status().serialize())"
]
},
@@ -211,9 +213,9 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create worker computing cluster\n",
"### Create worker compute target\n",
"\n",
"Now we create a compute cluster with CPUs for the additional Ray worker nodes. CPUs in these worker nodes are used by Ray worker processes. Each Ray worker node may have multiple Ray worker processes depending on CPUs on the worker node. Ray can distribute multiple worker tasks on each worker node."
"Now we create a compute target with CPUs for the additional Ray worker nodes. CPUs in these worker nodes are used by Ray worker processes. Each Ray worker node, depending on the CPUs on the node, may have multiple Ray worker processes. There can be multiple worker tasks on each worker process (core)."
]
},
{
@@ -222,7 +224,7 @@
"metadata": {},
"outputs": [],
"source": [
"# Choose a name for your Ray worker cluster\n",
"# Choose a name for your Ray worker compute target\n",
"worker_compute_name = 'worker-cpu'\n",
"worker_compute_min_nodes = 0 \n",
"worker_compute_max_nodes = 4\n",
@@ -237,24 +239,26 @@
" if worker_compute_target.provisioning_state == 'Succeeded':\n",
" print('found worker compute target. just use it', worker_compute_name)\n",
" else: \n",
" raise Exception('found worker compute target but it is in state', head_compute_target.provisioning_state)\n",
" raise Exception(\n",
" 'found worker compute target but it is in state', head_compute_target.provisioning_state)\n",
"else:\n",
" print('creating a new worker compute target...')\n",
" provisioning_config = AmlCompute.provisioning_configuration(vm_size=worker_vm_size,\n",
" min_nodes=worker_compute_min_nodes, \n",
" max_nodes=worker_compute_max_nodes,\n",
" vnet_resourcegroup_name=ws.resource_group,\n",
" vnet_name=vnet_name,\n",
" subnet_name='default')\n",
" provisioning_config = AmlCompute.provisioning_configuration(\n",
" vm_size=worker_vm_size,\n",
" min_nodes=worker_compute_min_nodes,\n",
" max_nodes=worker_compute_max_nodes,\n",
" vnet_resourcegroup_name=ws.resource_group,\n",
" vnet_name=vnet_name,\n",
" subnet_name='default')\n",
"\n",
" # Create the cluster\n",
" # Create the compute target\n",
" worker_compute_target = ComputeTarget.create(ws, worker_compute_name, provisioning_config)\n",
" \n",
" # Can poll for a minimum number of nodes and for a specific timeout. \n",
" # If no min node count is provided it will use the scale settings for the cluster\n",
" worker_compute_target.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)\n",
" \n",
" # For a more detailed view of current AmlCompute status, use get_status()\n",
" # For a more detailed view of current AmlCompute status, use get_status()\n",
" print(worker_compute_target.get_status().serialize())"
]
},
@@ -262,12 +266,12 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"## Train Pong Agent Using Azure ML RL\n",
"To facilitate reinforcement learning, Azure Machine Learning Python SDK provides a high level abstraction, the _ReinforcementLearningEstimator_ class, which allows users to easily construct RL run configurations for the underlying RL framework. Azure ML RL initially supports the [Ray framework](https://ray.io/) and its highly customizable [RLLib](https://ray.readthedocs.io/en/latest/rllib.html#rllib-scalable-reinforcement-learning). In this section we show how to use _ReinforcementLearningEstimator_ and Ray/RLLib framework to train a Pong playing agent.\n",
"## Train Pong Agent\n",
"To facilitate reinforcement learning, Azure Machine Learning Python SDK provides a high level abstraction, the _ReinforcementLearningEstimator_ class, which allows users to easily construct reinforcement learning run configurations for the underlying reinforcement learning framework. Reinforcement Learning in Azure Machine Learning supports the open source [Ray framework](https://ray.io/) and its highly customizable [RLLib](https://ray.readthedocs.io/en/latest/rllib.html#rllib-scalable-reinforcement-learning). In this section we show how to use _ReinforcementLearningEstimator_ and Ray/RLLib framework to train a Pong playing agent.\n",
"\n",
"\n",
"### Define worker configuration\n",
"Define a `WorkerConfiguration` using your worker compute target. We also specify the number of nodes in the worker compute target to be used for training and additional PIP packages to install on those nodes as a part of setup.\n",
"Define a `WorkerConfiguration` using your worker compute target. We specify the number of nodes in the worker compute target to be used for training and additional PIP packages to install on those nodes as a part of setup.\n",
"In this case, we define the PIP packages as dependencies for both head and worker nodes. With this setup, the game simulations will run directly on the worker compute nodes."
]
},
@@ -285,7 +289,7 @@
"# Specify the Ray worker configuration\n",
"worker_conf = WorkerConfiguration(\n",
" \n",
" # Azure ML compute cluster to run Ray workers\n",
" # Azure Machine Learning compute target to run Ray workers\n",
" compute_target=worker_compute_target, \n",
" \n",
" # Number of worker nodes\n",
@@ -305,7 +309,7 @@
"source": [
"### Create reinforcement learning estimator\n",
"\n",
"The `ReinforcementLearningEstimator` is used to submit a job to Azure Machine Learning to start the Ray experiment run. We define the training script parameters here that will be passed to estimator. \n",
"The `ReinforcementLearningEstimator` is used to submit a job to Azure Machine Learning to start the Ray experiment run. We define the training script parameters here that will be passed to the estimator. \n",
"\n",
"We specify `episode_reward_mean` to 18 as we want to stop the training as soon as the trained agent reaches an average win margin of at least 18 point over opponent over all episodes in the training epoch.\n",
"Number of Ray worker processes are defined by parameter `num_workers`. We set it to 13 as we have 13 CPUs available in our compute targets. Multiple Ray worker processes parallelizes agent training and helps in achieving our goal faster. \n",
@@ -348,7 +352,7 @@
" \"--stop\": '\\'{\"episode_reward_mean\": 18, \"time_total_s\": 3600}\\'',\n",
"}\n",
"\n",
"# RL estimator\n",
"# Reinforcement learning estimator\n",
"rl_estimator = ReinforcementLearningEstimator(\n",
" \n",
" # Location of source files\n",
@@ -361,7 +365,7 @@
" # Defined above.\n",
" script_params=script_params,\n",
" \n",
" # The Azure ML compute target set up for Ray head nodes\n",
" # The Azure Machine Learning compute target set up for Ray head nodes\n",
" compute_target=head_compute_target,\n",
" \n",
" # Pip packages\n",
@@ -370,7 +374,7 @@
" # GPU usage\n",
" use_gpu=True,\n",
" \n",
" # RL framework. Currently must be Ray.\n",
" # Reinforcement learning framework. Currently must be Ray.\n",
" rl_framework=Ray(),\n",
" \n",
" # Ray worker configuration defined above.\n",
@@ -394,23 +398,24 @@
"metadata": {},
"source": [
"### Training script\n",
"As recommended in [RLLib](https://ray.readthedocs.io/en/latest/rllib.html) documentations, we use Ray [Tune](https://ray.readthedocs.io/en/latest/tune.html) API to run training algorithm. All the RLLib built-in trainers are compatible with the Tune API. Here we use tune.run() to execute a built-in training algorithm. For convenience, down below you can see part of the entry script where we make this call.\n",
"As recommended in [RLlib](https://ray.readthedocs.io/en/latest/rllib.html) documentations, we use Ray [Tune](https://ray.readthedocs.io/en/latest/tune.html) API to run the training algorithm. All the RLlib built-in trainers are compatible with the Tune API. Here we use tune.run() to execute a built-in training algorithm. For convenience, down below you can see part of the entry script where we make this call.\n",
"\n",
"```python\n",
" tune.run(run_or_experiment=args.run,\n",
" config={\n",
" \"env\": args.env,\n",
" \"num_gpus\": args.config[\"num_gpus\"],\n",
" \"num_workers\": args.config[\"num_workers\"],\n",
" \"callbacks\": {\"on_train_result\": callbacks.on_train_result},\n",
" \"sample_batch_size\": 50,\n",
" \"train_batch_size\": 1000,\n",
" \"num_sgd_iter\": 2,\n",
" \"num_data_loader_buffers\": 2,\n",
" \"model\": {\"dim\": 42},\n",
" },\n",
" stop=args.stop,\n",
" local_dir='./logs')\n",
" tune.run(\n",
" run_or_experiment=args.run,\n",
" config={\n",
" \"env\": args.env,\n",
" \"num_gpus\": args.config[\"num_gpus\"],\n",
" \"num_workers\": args.config[\"num_workers\"],\n",
" \"callbacks\": {\"on_train_result\": callbacks.on_train_result},\n",
" \"sample_batch_size\": 50,\n",
" \"train_batch_size\": 1000,\n",
" \"num_sgd_iter\": 2,\n",
" \"num_data_loader_buffers\": 2,\n",
" \"model\": {\"dim\": 42},\n",
" },\n",
" stop=args.stop,\n",
" local_dir='./logs')\n",
"```"
]
},
@@ -437,7 +442,7 @@
"source": [
"### Monitor the run\n",
"\n",
"Azure ML provides a Jupyter widget to show the real-time status of an experiment run. You could use this widget to monitor the status of runs. The widget shows the list of two child runs, one for head compute target run and one for worker compute target run, as well. You can click on the link under Status to see the details of the child run."
"Azure Machine Learning provides a Jupyter widget to show the status of an experiment run. You could use this widget to monitor the status of the runs. The widget shows the list of two child runs, one for head compute target run and one for worker compute target run. You can click on the link under **Status** to see the details of the child run. It will also show the metrics being logged."
]
},
{
@@ -455,9 +460,29 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"### Stop the run\n",
"\n",
"To stop the run, call `run.cancel()`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Uncomment line below to cancel the run\n",
"# run.cancel()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Wait for completion\n",
"Wait for the run to complete before proceeding. If you want to stop the run, you may skip this and move to next section below. \n",
"\n",
"**Note: the run may take anywhere from 30 minutes to 45 minutes to complete.**"
"**Note: The run may take anywhere from 30 minutes to 45 minutes to complete.**"
]
},
{
@@ -469,24 +494,6 @@
"run.wait_for_completion()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Stop the run\n",
"\n",
"To cancel the run, call run.cancel()."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# run.cancel()"
]
},
{
"cell_type": "markdown",
"metadata": {},
@@ -539,8 +546,8 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"We observe that during the training over multiple episodes, the agent learn to win the Pong game against opponent with our target of 18 points in each episode of 21 points.\n",
"**Congratulations!! You have trained your Pong agent to win a game marvelously.**"
"We observe that during the training over multiple episodes, the agent learns to win the Pong game against opponent with our target of 18 points in each episode of 21 points.\n",
"**Congratulations!! You have trained your Pong agent to win a game.**"
]
},
{
@@ -570,7 +577,7 @@
"metadata": {},
"source": [
"## Next\n",
"In this example, you learnt how to solve distributed RL training problems using head and worker compute targets. This is currently the last introductory tutorial for Azure Machine Learning service's Reinforcement Learning offering. We would love to hear your feedback to build the features you need!"
"In this example, you learned how to solve distributed reinforcement learning training problems using head and worker compute targets. This was an introductory tutorial on Reinforement Learning in Azure Machine Learning service offering. We would love to hear your feedback to build the features you need!"
]
}
],
@@ -595,7 +602,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.4"
"version": "3.6.9"
},
"notice": "Copyright (c) Microsoft Corporation. All rights reserved.\u00e2\u20ac\u00afLicensed under the MIT License.\u00e2\u20ac\u00af "
},

View File

@@ -20,11 +20,11 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"# Azure ML Reinforcement Learning Sample - Cartpole Problem on Compute Instance\n",
"# Reinforcement Learning in Azure Machine Learning - Cartpole Problem on Compute Instance\n",
"\n",
"Azure ML Reinforcement Learning (Azure ML RL) is a managed service for running reinforcement learning training and simulation. With Azure MLRL, data scientists can start developing RL systems on one machine, and scale to compute clusters with 100\u00e2\u20ac\u2122s of nodes if needed.\n",
"Reinforcement Learning in Azure Machine Learning is a managed service for running reinforcement learning training and simulation. With Reinforcement Learning in Azure Machine Learning, data scientists can start developing reinforcement learning systems on one machine, and scale to compute targets with 100\u00e2\u20ac\u2122s of nodes if needed.\n",
"\n",
"This example shows how to use Azure ML RL to train a Cartpole playing agent on a compute instance."
"This example shows how to use Reinforcement Learning in Azure Machine Learning to train a Cartpole playing agent on a compute instance."
]
},
{
@@ -56,7 +56,7 @@
"metadata": {},
"source": [
"### Prerequisite\n",
"The user should have completed the Azure Machine Learning Tutorial: [Get started creating your first ML experiment with the Python SDK](https://docs.microsoft.com/en-us/azure/machine-learning/tutorial-1st-experiment-sdk-setup). You will need to make sure that you have a valid subscription id, a resource group and a workspace. All datastores and datasets you use should be associated with your workspace."
"The user should have completed the Azure Machine Learning Tutorial: [Get started creating your first ML experiment with the Python SDK](https://docs.microsoft.com/en-us/azure/machine-learning/tutorial-1st-experiment-sdk-setup). You will need to make sure that you have a valid subscription ID, a resource group, and an Azure Machine Learning workspace. All datastores and datasets you use should be associated with your workspace."
]
},
{
@@ -75,8 +75,8 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"### Azure ML SDK \n",
"Display the Azure ML SDK version."
"### Azure Machine Learning SDK \n",
"Display the Azure Machine Learning SDK version."
]
},
{
@@ -86,15 +86,15 @@
"outputs": [],
"source": [
"import azureml.core\n",
"print(\"Azure ML SDK Version: \", azureml.core.VERSION)"
"print(\"Azure Machine Learning SDK Version: \", azureml.core.VERSION)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Get Azure ML workspace\n",
"Get a reference to an existing Azure ML workspace."
"### Get Azure Machine Learning workspace\n",
"Get a reference to an existing Azure Machine Learning workspace."
]
},
{
@@ -163,18 +163,22 @@
"source": [
"# Load current compute instance info\n",
"current_compute_instance = load_nbvm()\n",
"print(\"Current compute instance:\", current_compute_instance)\n",
"\n",
"# For this demo, let's use the current compute instance as the compute target, if available\n",
"if current_compute_instance:\n",
" print(\"Current compute instance:\", current_compute_instance)\n",
" instance_name = current_compute_instance['instance']\n",
"else:\n",
" instance_name = next(iter(ws.compute_targets))\n",
" print(\"Instance name:\", instance_name)\n",
"\n",
"compute_target = ws.compute_targets[instance_name]\n",
"\n",
"print(\"Compute target status:\")\n",
"print(compute_target.get_status().serialize())\n",
"try:\n",
" print(compute_target.get_status().serialize())\n",
"except:\n",
" print(compute_target.get_status())\n",
"\n",
"print(\"Compute target size:\")\n",
"print(compute_target.size(ws))"
@@ -184,7 +188,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create Azure ML experiment\n",
"### Create Azure Machine Learning experiment\n",
"Create an experiment to track the runs in your workspace. "
]
},
@@ -204,8 +208,8 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"## Train Cartpole Agent Using Azure ML RL\n",
"To facilitate reinforcement learning, Azure Machine Learning Python SDK provides a high level abstraction, the _ReinforcementLearningEstimator_ class, which allows users to easily construct RL run configurations for the underlying RL framework. Azure ML RL initially supports the [Ray framework](https://ray.io/) and its highly customizable [RLlib](https://ray.readthedocs.io/en/latest/rllib.html#rllib-scalable-reinforcement-learning). In this section we show how to use _ReinforcementLearningEstimator_ and Ray/RLlib framework to train a cartpole playing agent. "
"## Train Cartpole Agent\n",
"To facilitate reinforcement learning, Azure Machine Learning Python SDK provides a high level abstraction, the _ReinforcementLearningEstimator_ class, which allows users to easily construct reinforcement learning run configurations for the underlying reinforcement learning framework. Reinforcement Learning in Azure Machine Learning supports the open source [Ray framework](https://ray.io/) and its highly customizable [RLlib](https://ray.readthedocs.io/en/latest/rllib.html#rllib-scalable-reinforcement-learning). In this section we show how to use _ReinforcementLearningEstimator_ and Ray/RLlib framework to train a cartpole playing agent. "
]
},
{
@@ -222,7 +226,7 @@
"- `entry_script`, path to your entry script relative to the source directory,\n",
"- `script_params`, constant parameters to be passed to each run of training script,\n",
"- `compute_target`, reference to the compute target in which the trainer and worker(s) jobs will be executed,\n",
"- `rl_framework`, the RL framework to be used (currently must be Ray).\n",
"- `rl_framework`, the reinforcement learning framework to be used (currently must be Ray).\n",
"\n",
"We use the `script_params` parameter to pass in general and algorithm-specific parameters to the training script.\n"
]
@@ -273,10 +277,10 @@
" # A dictionary of arguments to pass to the training script specified in ``entry_script``\n",
" script_params=script_params,\n",
" \n",
" # The Azure ML compute target set up for Ray head nodes\n",
" # The Azure Machine Learning compute target set up for Ray head nodes\n",
" compute_target=compute_target,\n",
" \n",
" # RL framework. Currently must be Ray.\n",
" # Reinforcement learning framework. Currently must be Ray.\n",
" rl_framework=Ray()\n",
")"
]
@@ -345,11 +349,11 @@
"metadata": {},
"source": [
"### Monitor experiment\n",
"Azure ML provides a Jupyter widget to show the real-time status of an experiment run. You could use this widget to monitor status of the runs.\n",
"Azure Machine Learning provides a Jupyter widget to show the status of an experiment run. You could use this widget to monitor the status of the runs.\n",
"\n",
"Note that _ReinforcementLearningEstimator_ creates at least two runs: (a) A parent run, i.e. the run returned above, and (b) a collection of child runs. The number of the child runs depends on the configuration of the reinforcement learning estimator. In our simple scenario, configured above, only one child run will be created.\n",
"\n",
"The widget will show a list of the child runs as well. You can click on the link under **Status** to see the details of a child run."
"The widget will show a list of the child runs as well. You can click on the link under **Status** to see the details of a child run. It will also show the metrics being logged."
]
},
{
@@ -369,7 +373,7 @@
"source": [
"### Stop the run\n",
"\n",
"To cancel the run, call `training_run.cancel()`."
"To stop the run, call `training_run.cancel()`."
]
},
{
@@ -577,10 +581,10 @@
" training_artifacts_ds.as_named_input('artifacts_dataset'),\n",
" training_artifacts_ds.as_named_input('artifacts_path').as_mount()],\n",
" \n",
" # The Azure ML compute target\n",
" # The Azure Machine Learning compute target\n",
" compute_target=compute_target,\n",
" \n",
" # RL framework. Currently must be Ray.\n",
" # Reinforcement learning framework. Currently must be Ray.\n",
" rl_framework=Ray(),\n",
" \n",
" # Additional pip packages to install\n",
@@ -662,7 +666,7 @@
"metadata": {},
"source": [
"## Next\n",
"This example was about running Azure ML RL (Ray/RLlib Framework) on compute instance. Please see [Cartpole problem](../cartpole-on-single-compute/cartpole_cc.ipynb)\n",
"This example was about running Reinforcement Learning in Azure Machine Learning (Ray/RLlib Framework) on a compute instance. Please see [Cartpole Problem on Single Compute](../cartpole-on-single-compute/cartpole_sc.ipynb)\n",
"example which uses Ray RLlib to train a Cartpole playing agent on a single node remote compute.\n"
]
}

View File

@@ -13,18 +13,18 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/tutorials/how-to-use-azureml/reinforcement-learning/cartpole_on_single_compute/cartpole_cc.png)"
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/tutorials/how-to-use-azureml/reinforcement-learning/cartpole_on_single_compute/cartpole_sc.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Azure ML Reinforcement Learning Sample - Cartpole Problem\n",
"# Reinforcement Learning in Azure Machine Learning - Cartpole Problem on Single Compute\n",
"\n",
"Azure ML Reinforcement Learning (Azure ML RL) is a managed service for running reinforcement learning training and simulation. With Azure MLRL, data scientists can start developing RL systems on one machine, and scale to compute clusters with 100\u00e2\u20ac\u2122s of nodes if needed.\n",
"Reinforcement Learning in Azure Machine Learning is a managed service for running reinforcement learning training and simulation. With Reinforcement Learning in Azure Machine Learning, data scientists can start developing reinforcement learning systems on one machine, and scale to compute targets with 100\u00e2\u20ac\u2122s of nodes if needed.\n",
"\n",
"This example shows how to use Azure ML RL to train a Cartpole playing agent on a single machine. "
"This example shows how to use Reinforcement Learning in Azure Machine Learning to train a Cartpole playing agent on a single compute. "
]
},
{
@@ -56,7 +56,7 @@
"metadata": {},
"source": [
"### Prerequisite\n",
"The user should have completed the Azure Machine Learning Tutorial: [Get started creating your first ML experiment with the Python SDK](https://docs.microsoft.com/en-us/azure/machine-learning/tutorial-1st-experiment-sdk-setup). You will need to make sure that you have a valid subscription id, a resource group and a workspace. All datastores and datasets you use should be associated with your workspace."
"The user should have completed the Azure Machine Learning Tutorial: [Get started creating your first ML experiment with the Python SDK](https://docs.microsoft.com/en-us/azure/machine-learning/tutorial-1st-experiment-sdk-setup). You will need to make sure that you have a valid subscription ID, a resource group, and an Azure Machine Learning workspace. All datastores and datasets you use should be associated with your workspace."
]
},
{
@@ -75,8 +75,8 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"### Azure ML SDK \n",
"Display the Azure ML SDK version."
"### Azure Machine Learning SDK \n",
"Display the Azure Machine Learning SDK version."
]
},
{
@@ -87,15 +87,15 @@
"source": [
"import azureml.core\n",
"\n",
"print(\"Azure ML SDK Version: \", azureml.core.VERSION)"
"print(\"Azure Machine Learning SDK Version: \", azureml.core.VERSION)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Get Azure ML workspace\n",
"Get a reference to an existing Azure ML workspace."
"### Get Azure Machine Learning workspace\n",
"Get a reference to an existing Azure Machine Learning workspace."
]
},
{
@@ -118,7 +118,7 @@
"\n",
"A compute target is a designated compute resource where you run your training and simulation scripts. This location may be your local machine or a cloud-based compute resource. The code below shows how to create a cloud-based compute target. For more information see [What are compute targets in Azure Machine Learning?](https://docs.microsoft.com/en-us/azure/machine-learning/concept-compute-target)\n",
"\n",
"**Note: Creation of a compute resource can take several minutes**"
"**Note: Creation of a compute resource can take several minutes**. Please make sure to change `STANDARD_D2_V2` to a [size available in your region](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines)."
]
},
{
@@ -158,7 +158,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create Azure ML experiment\n",
"### Create Azure Machine Learning experiment\n",
"Create an experiment to track the runs in your workspace. "
]
},
@@ -178,8 +178,8 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"## Train Cartpole Agent Using Azure ML RL\n",
"To facilitate reinforcement learning, Azure Machine Learning Python SDK provides a high level abstraction, the _ReinforcementLearningEstimator_ class, which allows users to easily construct RL run configurations for the underlying RL framework. Azure ML RL initially supports the [Ray framework](https://ray.io/) and its highly customizable [RLlib](https://ray.readthedocs.io/en/latest/rllib.html#rllib-scalable-reinforcement-learning). In this section we show how to use _ReinforcementLearningEstimator_ and Ray/RLlib framework to train a cartpole playing agent. "
"## Train Cartpole Agent\n",
"To facilitate reinforcement learning, Azure Machine Learning Python SDK provides a high level abstraction, the _ReinforcementLearningEstimator_ class, which allows users to easily construct reinforcement learning run configurations for the underlying reinforcement learning framework. Reinforcement Learning in Azure Machine Learning supports the open source [Ray framework](https://ray.io/) and its highly customizable [RLlib](https://ray.readthedocs.io/en/latest/rllib.html#rllib-scalable-reinforcement-learning). In this section we show how to use _ReinforcementLearningEstimator_ and Ray/RLlib framework to train a cartpole playing agent. "
]
},
{
@@ -196,7 +196,7 @@
"- `entry_script`, path to your entry script relative to the source directory,\n",
"- `script_params`, constant parameters to be passed to each run of training script,\n",
"- `compute_target`, reference to the compute target in which the trainer and worker(s) jobs will be executed,\n",
"- `rl_framework`, the RL framework to be used (currently must be Ray).\n",
"- `rl_framework`, the reinforcement learning framework to be used (currently must be Ray).\n",
"\n",
"We use the `script_params` parameter to pass in general and algorithm-specific parameters to the training script.\n"
]
@@ -249,7 +249,7 @@
" # There are two parts to this:\n",
" # 1. Use a custom docker file with proper instructions to install xvfb, ffmpeg, python-opengl\n",
" # and other dependencies. \n",
" # TODO: Add these instructions to default rl base image and drop this docker file.\n",
" # TODO: Add these instructions to default reinforcement learning base image and drop this docker file.\n",
" \n",
" with open(\"files/docker/Dockerfile\", \"r\") as f:\n",
" dockerfile=f.read()\n",
@@ -274,10 +274,10 @@
" # A dictionary of arguments to pass to the training script specified in ``entry_script``\n",
" script_params=script_params,\n",
" \n",
" # The Azure ML compute target set up for Ray head nodes\n",
" # The Azure Machine Learning compute target set up for Ray head nodes\n",
" compute_target=compute_target,\n",
" \n",
" # RL framework. Currently must be Ray.\n",
" # Reinforcement learning framework. Currently must be Ray.\n",
" rl_framework=Ray(),\n",
" \n",
" # Custom environmnet for Xvfb\n",
@@ -350,11 +350,11 @@
"source": [
"### Monitor experiment\n",
"\n",
"Azure ML provides a Jupyter widget to show the real-time status of an experiment run. You could use this widget to monitor status of the runs.\n",
"Azure Machine Learning provides a Jupyter widget to show the status of an experiment run. You could use this widget to monitor the status of the runs.\n",
"\n",
"Note that _ReinforcementLearningEstimator_ creates at least two runs: (a) A parent run, i.e. the run returned above, and (b) a collection of child runs. The number of the child runs depends on the configuration of the reinforcement learning estimator. In our simple scenario, configured above, only one child run will be created.\n",
"\n",
"The widget will show a list of the child runs as well. You can click on the link under **Status** to see the details of a child run."
"The widget will show a list of the child runs as well. You can click on the link under **Status** to see the details of a child run. It will also show the metrics being logged."
]
},
{
@@ -373,7 +373,7 @@
"metadata": {},
"source": [
"### Stop the run\n",
"To cancel the run, call `training_run.cancel()`."
"To stop the run, call `training_run.cancel()`."
]
},
{
@@ -393,7 +393,7 @@
"### Wait for completion\n",
"Wait for the run to complete before proceeding.\n",
"\n",
"**Note: The length of the run depends on the provisioning time of the compute target and may take several minutes to complete.**"
"**Note: The length of the run depends on the provisioning time of the compute target and it may take several minutes to complete.**"
]
},
{
@@ -560,18 +560,20 @@
" dir_util.mkpath(destination)\n",
" \n",
" try:\n",
" # Mount dataset and copy movies\n",
" pirnt(\"Trying mounting dataset and copying movies.\")\n",
" # Note: We assume movie paths start with '\\'\n",
" mount_context = artifacts_ds.mount()\n",
" mount_context.start()\n",
" print('Download started.')\n",
" for movie in movies:\n",
" print('Copying {} ...'.format(movie))\n",
" shutil.copy2(path.join(mount_context.mount_point, movie[1:]), destination)\n",
" mount_context.stop()\n",
" except:\n",
" print(\"Mounting error! Downloading all artifacts ...\")\n",
" artifacts_ds.download(target_path=destination, overwrite=True)\n",
" print(\"Mounting failed! Going with dataset download.\")\n",
" for i, file in enumerate(artifacts_ds.to_path()):\n",
" if file in movies:\n",
" print('Downloading {} ...'.format(file))\n",
" artifacts_ds.skip(i).take(1).download(target_path=destination, overwrite=True)\n",
" \n",
" print('Downloading movies completed!')\n",
"\n",
@@ -625,7 +627,7 @@
"print(\"Last movie:\", last_movie)\n",
"\n",
"# Download movies\n",
"training_movies_path = \"training\"\n",
"training_movies_path = path.join(\"training\", \"videos\")\n",
"download_movies(training_artifacts_ds, [first_movie, last_movie], training_movies_path)"
]
},
@@ -781,7 +783,7 @@
"# 1. Use a custom docker file with proper instructions to install xvfb, ffmpeg, python-opengl\n",
"# and other dependencies.\n",
"# Note: Even when the rendering is off pyhton-opengl is needed.\n",
"# TODO: Add these instructions to default rl base image and drop this docker file.\n",
"# TODO: Add these instructions to default reinforcement learning base image and drop this docker file.\n",
"\n",
"with open(\"files/docker/Dockerfile\", \"r\") as f:\n",
" dockerfile=f.read()\n",
@@ -811,10 +813,10 @@
" training_artifacts_ds.as_named_input('artifacts_dataset'),\n",
" training_artifacts_ds.as_named_input('artifacts_path').as_mount()],\n",
" \n",
" # The Azure ML compute target set up for Ray head nodes\n",
" # The Azure Machine Learning compute target set up for Ray head nodes\n",
" compute_target=compute_target,\n",
" \n",
" # RL framework. Currently must be Ray.\n",
" # Reinforcement learning framework. Currently must be Ray.\n",
" rl_framework=Ray(),\n",
" \n",
" # Custom environmnet for Xvfb\n",
@@ -928,7 +930,7 @@
"print(\"Last movie:\", last_movie)\n",
"\n",
"# Download last movie\n",
"rollout_movies_path = \"rollout\"\n",
"rollout_movies_path = path.join(\"rollout\", \"videos\")\n",
"download_movies(rollout_artifacts_ds, [last_movie], rollout_movies_path)\n",
"\n",
"# Look for the downloaded movie in local directory\n",
@@ -996,7 +998,7 @@
"metadata": {},
"source": [
"## Next\n",
"This example was about running Azure ML RL (Ray/RLlib Framework) on a single node. Please see [Pong problem](../atari-on-distributed-compute/pong_rllib.ipynb)\n",
"This example was about running Reinforcement Learning in Azure Machine Learning (Ray/RLlib Framework) on a single compute. Please see [Pong Problem](../atari-on-distributed-compute/pong_rllib.ipynb)\n",
"example which uses Ray RLlib to train a Pong playing agent on a multi-node cluster."
]
}

View File

@@ -0,0 +1,70 @@
FROM mcr.microsoft.com/azureml/base:openmpi3.1.2-ubuntu18.04
# Install some basic utilities
RUN apt-get update && apt-get install -y \
curl \
ca-certificates \
sudo \
cpio \
git \
bzip2 \
libx11-6 \
tmux \
htop \
gcc \
xvfb \
python-opengl \
x11-xserver-utils \
ffmpeg \
mesa-utils \
nano \
vim \
rsync \
&& rm -rf /var/lib/apt/lists/*
# Create a working directory
RUN mkdir /app
WORKDIR /app
# Install Minecraft needed libraries
RUN mkdir -p /usr/share/man/man1 && \
sudo apt-get update && \
sudo apt-get install -y \
openjdk-8-jre-headless=8u162-b12-1 \
openjdk-8-jdk-headless=8u162-b12-1 \
openjdk-8-jre=8u162-b12-1 \
openjdk-8-jdk=8u162-b12-1
# Create a Python 3.7 environment
RUN conda install conda-build \
&& conda create -y --name py37 python=3.7.3 \
&& conda clean -ya
ENV CONDA_DEFAULT_ENV=py37
# Install minerl
RUN pip install --upgrade --user minerl
RUN pip install \
pandas \
matplotlib \
numpy \
scipy \
azureml-defaults \
tensorboardX \
tensorflow==1.15rc2 \
tabulate \
dm_tree \
lz4 \
ray==0.8.3 \
ray[rllib]==0.8.3 \
ray[tune]==0.8.3
COPY patch_files/* /root/.local/lib/python3.7/site-packages/minerl/env/Malmo/Minecraft/src/main/java/com/microsoft/Malmo/Client/
# Start minerl to pre-fetch minerl files (saves time when starting minerl during training)
RUN xvfb-run -a -s "-screen 0 1400x900x24" python -c "import gym; import minerl; env = gym.make('MineRLTreechop-v0'); env.close();"
RUN pip install --index-url https://test.pypi.org/simple/ malmo && \
python -c "import malmo.minecraftbootstrap; malmo.minecraftbootstrap.download();"
ENV MALMO_XSD_PATH="/app/MalmoPlatform/Schemas"

View File

@@ -0,0 +1,939 @@
// --------------------------------------------------------------------------------------------------
// Copyright (c) 2016 Microsoft Corporation
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
// associated documentation files (the "Software"), to deal in the Software without restriction,
// including without limitation the rights to use, copy, modify, merge, publish, distribute,
// sublicense, and/or l copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or
// substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
// NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
// --------------------------------------------------------------------------------------------------
package com.microsoft.Malmo.Client;
import com.microsoft.Malmo.MalmoMod;
import com.microsoft.Malmo.MissionHandlerInterfaces.IWantToQuit;
import com.microsoft.Malmo.Schemas.MissionInit;
import com.microsoft.Malmo.Utils.TCPUtils;
import net.minecraft.profiler.Profiler;
import com.microsoft.Malmo.Utils.TimeHelper;
import net.minecraftforge.common.config.Configuration;
import java.io.*;
import java.net.ServerSocket;
import java.net.Socket;
import java.nio.charset.Charset;
import java.util.Arrays;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.Hashtable;
import com.microsoft.Malmo.Utils.TCPInputPoller;
import java.util.logging.Level;
import java.util.LinkedList;
import java.util.List;
/**
* MalmoEnvServer - service supporting OpenAI gym "environment" for multi-agent Malmo missions.
*/
public class MalmoEnvServer implements IWantToQuit {
private static Profiler profiler = new Profiler();
private static int nsteps = 0;
private static boolean debug = false;
private static String hello = "<MalmoEnv" ;
private class EnvState {
// Mission parameters:
String missionInit = null;
String token = null;
String experimentId = null;
int agentCount = 0;
int reset = 0;
boolean quit = false;
boolean synchronous = false;
Long seed = null;
// OpenAI gym state:
boolean done = false;
double reward = 0.0;
byte[] obs = null;
String info = "";
LinkedList<String> commands = new LinkedList<String>();
}
private static boolean envPolicy = false; // Are we configured by config policy?
// Synchronize on EnvStateasd
private Lock lock = new ReentrantLock();
private Condition cond = lock.newCondition();
private EnvState envState = new EnvState();
private Hashtable<String, Integer> initTokens = new Hashtable<String, Integer>();
static final long COND_WAIT_SECONDS = 3; // Max wait in seconds before timing out (and replying to RPC).
static final int BYTES_INT = 4;
static final int BYTES_DOUBLE = 8;
private static final Charset utf8 = Charset.forName("UTF-8");
// Service uses a single per-environment client connection - initiated by the remote environment.
private int port;
private TCPInputPoller missionPoller; // Used for command parsing and not actual communication.
private String version;
// AOG: From running experiments, I've found that MineRL can get stuck resetting the
// environment which causes huge delays while we wait for the Python side to time
// out and restart the Minecraft instace. Minecraft itself is normally in a recoverable
// state, but the MalmoEnvServer instance will be blocked in a tight spin loop trying
// handling a Peek request from the Python client. To unstick things, I've added this
// flag that can be set when we know things are in a bad state to abort the peek request.
// WARNING: THIS IS ONLY TREATING THE SYMPTOM AND NOT THE ROOT CAUSE
// The reason things are getting stuck is because the player is either dying or we're
// receiving a quit request while an episode reset is in progress.
private boolean abortRequest;
public void abort() {
System.out.println("AOG: MalmoEnvServer.abort");
abortRequest = true;
}
/***
* Malmo "Env" service.
* @param port the port the service listens on.
* @param missionPoller for plugging into existing comms handling.
*/
public MalmoEnvServer(String version, int port, TCPInputPoller missionPoller) {
this.version = version;
this.missionPoller = missionPoller;
this.port = port;
// AOG - Assume we don't wan't to be aborting in the first place
this.abortRequest = false;
}
/** Initialize malmo env configuration. For now either on or "legacy" AgentHost protocol.*/
static public void update(Configuration configs) {
envPolicy = configs.get(MalmoMod.ENV_CONFIGS, "env", "false").getBoolean();
}
public static boolean isEnv() {
return envPolicy;
}
/**
* Start servicing the MalmoEnv protocol.
* @throws IOException
*/
public void serve() throws IOException {
ServerSocket serverSocket = new ServerSocket(port);
serverSocket.setPerformancePreferences(0,2,1);
while (true) {
try {
final Socket socket = serverSocket.accept();
socket.setTcpNoDelay(true);
Thread thread = new Thread("EnvServerSocketHandler") {
public void run() {
boolean running = false;
try {
checkHello(socket);
while (true) {
DataInputStream din = new DataInputStream(socket.getInputStream());
int hdr = din.readInt();
byte[] data = new byte[hdr];
din.readFully(data);
String command = new String(data, utf8);
if (command.startsWith("<Step")) {
profiler.startSection("root");
long start = System.nanoTime();
step(command, socket, din);
profiler.endSection();
if (nsteps % 100 == 0 && debug){
List<Profiler.Result> dat = profiler.getProfilingData("root");
for(int qq = 0; qq < dat.size(); qq++){
Profiler.Result res = dat.get(qq);
System.out.println(res.profilerName + " " + res.totalUsePercentage + " "+ res.usePercentage);
}
}
} else if (command.startsWith("<Peek")) {
peek(command, socket, din);
} else if (command.startsWith("<Init")) {
init(command, socket);
} else if (command.startsWith("<Find")) {
find(command, socket);
} else if (command.startsWith("<MissionInit")) {
if (missionInit(din, command, socket))
{
running = true;
}
} else if (command.startsWith("<Quit")) {
quit(command, socket);
profiler.profilingEnabled = false;
} else if (command.startsWith("<Exit")) {
exit(command, socket);
profiler.profilingEnabled = false;
} else if (command.startsWith("<Close")) {
close(command, socket);
profiler.profilingEnabled = false;
} else if (command.startsWith("<Status")) {
status(command, socket);
} else if (command.startsWith("<Echo")) {
command = "<Echo>" + command + "</Echo>";
data = command.getBytes(utf8);
hdr = data.length;
DataOutputStream dout = new DataOutputStream(socket.getOutputStream());
dout.writeInt(hdr);
dout.write(data, 0, hdr);
dout.flush();
} else {
throw new IOException("Unknown env service command");
}
}
} catch (IOException ioe) {
// ioe.printStackTrace();
TCPUtils.Log(Level.SEVERE, "MalmoEnv socket error: " + ioe + " (can be on disconnect)");
// System.out.println("[ERROR] " + "MalmoEnv socket error: " + ioe + " (can be on disconnect)");
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] MalmoEnv socket error");
try {
if (running) {
TCPUtils.Log(Level.INFO,"Want to quit on disconnect.");
System.out.println("[LOGTOPY] " + "Want to quit on disconnect.");
setWantToQuit();
}
socket.close();
} catch (IOException ioe2) {
}
}
}
};
thread.start();
} catch (IOException ioe) {
TCPUtils.Log(Level.SEVERE, "MalmoEnv service exits on " + ioe);
}
}
}
private void checkHello(Socket socket) throws IOException {
DataInputStream din = new DataInputStream(socket.getInputStream());
int hdr = din.readInt();
if (hdr <= 0 || hdr > hello.length() + 8) // Version number may be somewhat longer in future.
throw new IOException("Invalid MalmoEnv hello header length");
byte[] data = new byte[hdr];
din.readFully(data);
if (!new String(data).startsWith(hello + version))
throw new IOException("MalmoEnv invalid protocol or version - expected " + hello + version);
}
// Handler for <MissionInit> messages.
private boolean missionInit(DataInputStream din, String command, Socket socket) throws IOException {
String ipOriginator = socket.getInetAddress().getHostName();
int hdr;
byte[] data;
hdr = din.readInt();
data = new byte[hdr];
din.readFully(data);
String id = new String(data, utf8);
TCPUtils.Log(Level.INFO,"Mission Init" + id);
String[] token = id.split(":");
String experimentId = token[0];
int role = Integer.parseInt(token[1]);
int reset = Integer.parseInt(token[2]);
int agentCount = Integer.parseInt(token[3]);
Boolean isSynchronous = Boolean.parseBoolean(token[4]);
Long seed = null;
if(token.length > 5)
seed = Long.parseLong(token[5]);
if(isSynchronous && agentCount > 1){
throw new IOException("Synchronous mode currently does not support multiple agents.");
}
port = -1;
boolean allTokensConsumed = true;
boolean started = false;
lock.lock();
try {
if (role == 0) {
String previousToken = experimentId + ":0:" + (reset - 1);
initTokens.remove(previousToken);
String myToken = experimentId + ":0:" + reset;
if (!initTokens.containsKey(myToken)) {
TCPUtils.Log(Level.INFO,"(Pre)Start " + role + " reset " + reset);
started = startUp(command, ipOriginator, experimentId, reset, agentCount, myToken, seed, isSynchronous);
if (started)
initTokens.put(myToken, 0);
} else {
started = true; // Pre-started previously.
}
// Check that all previous tokens have been consumed. If not don't proceed to mission.
allTokensConsumed = areAllTokensConsumed(experimentId, reset, agentCount);
if (!allTokensConsumed) {
try {
cond.await(COND_WAIT_SECONDS, TimeUnit.SECONDS);
} catch (InterruptedException ie) {
}
allTokensConsumed = areAllTokensConsumed(experimentId, reset, agentCount);
}
} else {
TCPUtils.Log(Level.INFO, "Start " + role + " reset " + reset);
started = startUp(command, ipOriginator, experimentId, reset, agentCount, experimentId + ":" + role + ":" + reset, seed, isSynchronous);
}
} finally {
lock.unlock();
}
DataOutputStream dout = new DataOutputStream(socket.getOutputStream());
dout.writeInt(BYTES_INT);
dout.writeInt(allTokensConsumed && started ? 1 : 0);
dout.flush();
dout.flush();
return allTokensConsumed && started;
}
private boolean areAllTokensConsumed(String experimentId, int reset, int agentCount) {
boolean allTokensConsumed = true;
for (int i = 1; i < agentCount; i++) {
String tokenForAgent = experimentId + ":" + i + ":" + (reset - 1);
if (initTokens.containsKey(tokenForAgent)) {
TCPUtils.Log(Level.FINE,"Mission init - unconsumed " + tokenForAgent);
allTokensConsumed = false;
}
}
return allTokensConsumed;
}
private boolean startUp(String command, String ipOriginator, String experimentId, int reset, int agentCount, String myToken, Long seed, Boolean isSynchronous) throws IOException {
// Clear out mission state
envState.reward = 0.0;
envState.commands.clear();
envState.obs = null;
envState.info = "";
envState.missionInit = command;
envState.done = false;
envState.quit = false;
envState.token = myToken;
envState.experimentId = experimentId;
envState.agentCount = agentCount;
envState.reset = reset;
envState.synchronous = isSynchronous;
envState.seed = seed;
return startUpMission(command, ipOriginator);
}
private boolean startUpMission(String command, String ipOriginator) throws IOException {
if (missionPoller == null)
return false;
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(baos);
missionPoller.commandReceived(command, ipOriginator, dos);
dos.flush();
byte[] reply = baos.toByteArray();
ByteArrayInputStream bais = new ByteArrayInputStream(reply);
DataInputStream dis = new DataInputStream(bais);
int hdr = dis.readInt();
byte[] replyBytes = new byte[hdr];
dis.readFully(replyBytes);
String replyStr = new String(replyBytes);
if (replyStr.equals("MALMOOK")) {
TCPUtils.Log(Level.INFO, "MalmoEnvServer Mission starting ...");
return true;
} else if (replyStr.equals("MALMOBUSY")) {
TCPUtils.Log(Level.INFO, "MalmoEnvServer Busy - I want to quit");
this.envState.quit = true;
}
return false;
}
private static final int stepTagLength = "<Step_>".length(); // Step with option code.
private synchronized void stepSync(String command, Socket socket, DataInputStream din) throws IOException
{
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <STEP> Entering synchronous step.");
nsteps += 1;
profiler.startSection("commandProcessing");
String actions = command.substring(stepTagLength, command.length() - (stepTagLength + 2));
int options = Character.getNumericValue(command.charAt(stepTagLength - 2));
boolean withInfo = options == 0 || options == 2;
// Prepare to write data to the client.
DataOutputStream dout = new DataOutputStream(socket.getOutputStream());
double reward = 0.0;
boolean done;
byte[] obs;
String info = "";
boolean sent = false;
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <STEP> Acquiring lock for synchronous step.");
lock.lock();
try {
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <STEP> Lock is acquired.");
done = envState.done;
// TODO Handle when the environment is done.
// Process the actions.
if (actions.contains("\n")) {
String[] cmds = actions.split("\\n");
for(String cmd : cmds) {
envState.commands.add(cmd);
}
} else {
if (!actions.isEmpty())
envState.commands.add(actions);
}
sent = true;
profiler.endSection(); //cmd
profiler.startSection("requestTick");
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <STEP> Received: " + actions);
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <STEP> Requesting tick.");
// Now wait to run a tick
// If synchronous mode is off then we should see if want to quit is true.
while(!TimeHelper.SyncManager.requestTick() && !done ){Thread.yield();}
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <STEP> Tick request granted.");
profiler.endSection();
profiler.startSection("waitForTick");
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <STEP> Waiting for tick.");
// Then wait until the tick is finished
while(!TimeHelper.SyncManager.isTickCompleted() && !done ){ Thread.yield();}
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <STEP> TICK DONE. Getting observation.");
profiler.endSection();
profiler.startSection("getObservation");
// After which, get the observations.
obs = getObservation(done);
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <STEP> Observation received. Getting info.");
profiler.endSection();
profiler.startSection("getInfo");
// Pick up rewards.
reward = envState.reward;
if (withInfo) {
info = envState.info;
// if(info == null)
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <STEP> FILLING INFO: NULL");
// else
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <STEP> FILLING " + info.toString());
}
done = envState.done;
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <STEP> STATUS " + Boolean.toString(done));
envState.info = null;
envState.obs = null;
envState.reward = 0.0;
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <STEP> Info received..");
profiler.endSection();
} finally {
lock.unlock();
}
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <STEP> Lock released. Writing observation, info, done.");
profiler.startSection("writeObs");
dout.writeInt(obs.length);
dout.write(obs);
dout.writeInt(BYTES_DOUBLE + 2);
dout.writeDouble(reward);
dout.writeByte(done ? 1 : 0);
dout.writeByte(sent ? 1 : 0);
if (withInfo) {
byte[] infoBytes = info.getBytes(utf8);
dout.writeInt(infoBytes.length);
dout.write(infoBytes);
}
profiler.endSection(); //write obs
profiler.startSection("flush");
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <STEP> Packets written. Flushing.");
dout.flush();
profiler.endSection(); // flush
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <STEP> Done with step.");
}
// Handler for <Step_> messages. Single digit option code after _ specifies if turnkey and info are included in message.
private void step(String command, Socket socket, DataInputStream din) throws IOException {
if(envState.synchronous){
stepSync(command, socket, din);
}
else{
System.out.println("[ERROR] Asynchronous stepping is not supported in MineRL.");
}
}
// Handler for <Peek> messages.
private void peek(String command, Socket socket, DataInputStream din) throws IOException {
DataOutputStream dout = new DataOutputStream(socket.getOutputStream());
byte[] obs;
boolean done;
String info = "";
// AOG - As we've only seen issues with the peek reqest, I've focused my changes to just
// this function. Initially we want to be optimistic and assume we're not going to abort
// the request and my observations of event timings indicate that there is plenty of time
// between the peek request being received and the reset failing, so a race condition is
// unlikely.
abortRequest = false;
lock.lock();
try {
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <PEEK> Waiting for pistol to fire.");
while(!TimeHelper.SyncManager.hasServerFiredPistol() && !abortRequest){
// Now wait to run a tick
while(!TimeHelper.SyncManager.requestTick() && !abortRequest){Thread.yield();}
// Then wait until the tick is finished
while(!TimeHelper.SyncManager.isTickCompleted() && !abortRequest){ Thread.yield();}
Thread.yield();
}
if (abortRequest) {
System.out.println("AOG: Aborting peek request");
// AOG - We detect the lack of observation within our Python wrapper and throw a slightly
// diferent exception that by-passes MineRLs automatic clean up code. If we were to report
// 'done', the MineRL detects this as a runtime error and kills the Minecraft process
// triggering a lengthy restart. So far from testing, Minecraft itself is fine can we can
// retry the reset, it's only the tight loops above that were causing things to stall and
// timeout.
// No observation
dout.writeInt(0);
// No info
dout.writeInt(0);
// Done
dout.writeInt(1);
dout.writeByte(0);
dout.flush();
return;
}
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <PEEK> Pistol fired!.");
// Wait two ticks for the first observation from server to be propagated.
while(!TimeHelper.SyncManager.requestTick() ){Thread.yield();}
// Then wait until the tick is finished
while(!TimeHelper.SyncManager.isTickCompleted()){ Thread.yield();}
while(!TimeHelper.SyncManager.requestTick() ){Thread.yield();}
// Then wait until the tick is finished
while(!TimeHelper.SyncManager.isTickCompleted()){ Thread.yield();}
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <PEEK> Getting observation.");
obs = getObservation(false);
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <PEEK> Observation acquired.");
done = envState.done;
info = envState.info;
} finally {
lock.unlock();
}
dout.writeInt(obs.length);
dout.write(obs);
byte[] infoBytes = info.getBytes(utf8);
dout.writeInt(infoBytes.length);
dout.write(infoBytes);
dout.writeInt(1);
dout.writeByte(done ? 1 : 0);
dout.flush();
}
// Get the current observation. If none and not done wait for a short time.
public byte[] getObservation(boolean done) {
byte[] obs = envState.obs;
if (obs == null){
System.out.println("[ERROR] Video observation is null; please notify the developer.");
}
return obs;
}
// Handler for <Find> messages - used by non-zero roles to discover integrated server port from primary (role 0) service.
private final static int findTagLength = "<Find>".length();
private void find(String command, Socket socket) throws IOException {
Integer port;
lock.lock();
try {
String token = command.substring(findTagLength, command.length() - (findTagLength + 1));
TCPUtils.Log(Level.INFO, "Find token? " + token);
// Purge previous token.
String[] tokenSplits = token.split(":");
String experimentId = tokenSplits[0];
int role = Integer.parseInt(tokenSplits[1]);
int reset = Integer.parseInt(tokenSplits[2]);
String previousToken = experimentId + ":" + role + ":" + (reset - 1);
initTokens.remove(previousToken);
cond.signalAll();
// Check for next token. Wait for a short time if not already produced.
port = initTokens.get(token);
if (port == null) {
try {
cond.await(COND_WAIT_SECONDS, TimeUnit.SECONDS);
} catch (InterruptedException ie) {
}
port = initTokens.get(token);
if (port == null) {
port = 0;
TCPUtils.Log(Level.INFO,"Role " + role + " reset " + reset + " waiting for token.");
}
}
} finally {
lock.unlock();
}
DataOutputStream dout = new DataOutputStream(socket.getOutputStream());
dout.writeInt(BYTES_INT);
dout.writeInt(port);
dout.flush();
}
public boolean isSynchronous(){
return envState.synchronous;
}
// Handler for <Init> messages. These reset the service so use with care!
private void init(String command, Socket socket) throws IOException {
lock.lock();
try {
initTokens = new Hashtable<String, Integer>();
DataOutputStream dout = new DataOutputStream(socket.getOutputStream());
dout.writeInt(BYTES_INT);
dout.writeInt(1);
dout.flush();
} finally {
lock.unlock();
}
}
// Handler for <Quit> (quit mission) messages.
private void quit(String command, Socket socket) throws IOException {
lock.lock();
try {
if (!envState.done){
envState.quit = true;
}
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <PEEK> Pistol fired!.");
// Wait two ticks for the first observation from server to be propagated.
while(!TimeHelper.SyncManager.requestTick() ){Thread.yield();}
// Then wait until the tick is finished
while(!TimeHelper.SyncManager.isTickCompleted()){ Thread.yield();}
DataOutputStream dout = new DataOutputStream(socket.getOutputStream());
dout.writeInt(BYTES_INT);
dout.writeInt(envState.done ? 1 : 0);
dout.flush();
} finally {
lock.unlock();
}
}
private final static int closeTagLength = "<Close>".length();
// Handler for <Close> messages.
private void close(String command, Socket socket) throws IOException {
lock.lock();
try {
String token = command.substring(closeTagLength, command.length() - (closeTagLength + 1));
initTokens.remove(token);
DataOutputStream dout = new DataOutputStream(socket.getOutputStream());
dout.writeInt(BYTES_INT);
dout.writeInt(1);
dout.flush();
} finally {
lock.unlock();
}
}
// Handler for <Status> messages.
private void status(String command, Socket socket) throws IOException {
lock.lock();
try {
String status = "{}"; // TODO Possibly have something more interesting to report.
DataOutputStream dout = new DataOutputStream(socket.getOutputStream());
byte[] statusBytes = status.getBytes(utf8);
dout.writeInt(statusBytes.length);
dout.write(statusBytes);
dout.flush();
} finally {
lock.unlock();
}
}
// Handler for <Exit> messages. These "kill the service" temporarily so use with care!f
private void exit(String command, Socket socket) throws IOException {
// lock.lock();
try {
// We may exit before we get a chance to reply.
TimeHelper.SyncManager.setSynchronous(false);
DataOutputStream dout = new DataOutputStream(socket.getOutputStream());
dout.writeInt(BYTES_INT);
dout.writeInt(1);
dout.flush();
ClientStateMachine.exitJava();
} finally {
// lock.unlock();
}
}
// Malmo client state machine interface methods:
public String getCommand() {
try {
String command = envState.commands.poll();
if (command == null)
return "";
else
return command;
} finally {
}
}
public void endMission() {
// lock.lock();
try {
// AOG - If the mission is ending, we always want to abort requests and they won't
// be able to progress to completion and will stall.
System.out.println("AOG: MalmoEnvServer.endMission");
abort();
envState.done = true;
envState.quit = false;
envState.missionInit = null;
if (envState.token != null) {
initTokens.remove(envState.token);
envState.token = null;
envState.experimentId = null;
envState.agentCount = 0;
envState.reset = 0;
// cond.signalAll();
}
// lock.unlock();
} finally {
}
}
// Record a Malmo "observation" json - as the env info since an environment "obs" is a video frame.
public void observation(String info) {
// Parsing obs as JSON would be slower but less fragile than extracting the turn_key using string search.
// lock.lock();
try {
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <OBSERVATION> Inserting: " + info);
envState.info = info;
// cond.signalAll();
} finally {
// lock.unlock();
}
}
public void addRewards(double rewards) {
// lock.lock();
try {
envState.reward += rewards;
} finally {
// lock.unlock();
}
}
public void addFrame(byte[] frame) {
// lock.lock();
try {
envState.obs = frame; // Replaces current.
// cond.signalAll();
} finally {
// lock.unlock();
}
}
public void notifyIntegrationServerStarted(int integrationServerPort) {
lock.lock();
try {
if (envState.token != null) {
TCPUtils.Log(Level.INFO,"Integration server start up - token: " + envState.token);
addTokens(integrationServerPort, envState.token, envState.experimentId, envState.agentCount, envState.reset);
cond.signalAll();
} else {
TCPUtils.Log(Level.WARNING,"No mission token on integration server start up!");
}
} finally {
lock.unlock();
}
}
private void addTokens(int integratedServerPort, String myToken, String experimentId, int agentCount, int reset) {
initTokens.put(myToken, integratedServerPort);
// Place tokens for other agents to find.
for (int i = 1; i < agentCount; i++) {
String tokenForAgent = experimentId + ":" + i + ":" + reset;
initTokens.put(tokenForAgent, integratedServerPort);
}
}
// IWantToQuit implementation.
@Override
public boolean doIWantToQuit(MissionInit missionInit) {
// lock.lock();
try {
return envState.quit;
} finally {
// lock.unlock();
}
}
public Long getSeed(){
return envState.seed;
}
private void setWantToQuit() {
// lock.lock();
try {
envState.quit = true;
} finally {
if(TimeHelper.SyncManager.isSynchronous()){
// We want to dsynchronize everything.
TimeHelper.SyncManager.setSynchronous(false);
}
// lock.unlock();
}
}
@Override
public void prepare(MissionInit missionInit) {
}
@Override
public void cleanup() {
}
@Override
public String getOutcome() {
return "Env quit";
}
}

Some files were not shown because too many files have changed in this diff Show More