mirror of
https://github.com/Azure/MachineLearningNotebooks.git
synced 2025-12-20 17:45:10 -05:00
Compare commits
59 Commits
azureml-sd
...
lostmygith
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ba741fb18d | ||
|
|
ac0ad8d487 | ||
|
|
5019ad6c5a | ||
|
|
41a2ebd2b3 | ||
|
|
53e3283d1d | ||
|
|
ba9c4c5465 | ||
|
|
a6c65f00ec | ||
|
|
95072eabc2 | ||
|
|
12905ef254 | ||
|
|
4cf56eee91 | ||
|
|
d345ff6c37 | ||
|
|
560dcac0a0 | ||
|
|
322087a58c | ||
|
|
e255c000ab | ||
|
|
7871e37ec0 | ||
|
|
58e584e7eb | ||
|
|
1b0d75cb45 | ||
|
|
5c38272fb4 | ||
|
|
e026c56f19 | ||
|
|
4aad830f1c | ||
|
|
c1b125025a | ||
|
|
9f364f7638 | ||
|
|
4beb749a76 | ||
|
|
04fe8c4580 | ||
|
|
498018451a | ||
|
|
04305e33f0 | ||
|
|
d22e76d5e0 | ||
|
|
d71c482f75 | ||
|
|
5775f8a78f | ||
|
|
aae823ecd8 | ||
|
|
f1126e07f9 | ||
|
|
0e4b27a233 | ||
|
|
0a3d5f68a1 | ||
|
|
a6fe2affcb | ||
|
|
ce469ddf6a | ||
|
|
9fe459be79 | ||
|
|
89c35c8ed6 | ||
|
|
33168c7f5d | ||
|
|
1d0766bd46 | ||
|
|
9903e56882 | ||
|
|
a039166b90 | ||
|
|
4e4bf48013 | ||
|
|
0a2408300a | ||
|
|
d99c3f5470 | ||
|
|
3f62fe7d47 | ||
|
|
6059c1dc0c | ||
|
|
8e2032fcde | ||
|
|
824d844cd7 | ||
|
|
bb1c7db690 | ||
|
|
8dad09a42f | ||
|
|
db2bf8ae93 | ||
|
|
820c09734f | ||
|
|
a2a33c70a6 | ||
|
|
2ff791968a | ||
|
|
7186127804 | ||
|
|
b01c52bfd6 | ||
|
|
28be7bcf58 | ||
|
|
37a9350fde | ||
|
|
5080053a35 |
@@ -1,5 +1,7 @@
|
|||||||
# Azure Machine Learning service example notebooks
|
# Azure Machine Learning service example notebooks
|
||||||
|
|
||||||
|
> a community-driven repository of examples using mlflow for tracking can be found at https://github.com/Azure/azureml-examples
|
||||||
|
|
||||||
This repository contains example notebooks demonstrating the [Azure Machine Learning](https://azure.microsoft.com/en-us/services/machine-learning-service/) Python SDK which allows you to build, train, deploy and manage machine learning solutions using Azure. The AML SDK allows you the choice of using local or cloud compute resources, while managing and maintaining the complete data science workflow from the cloud.
|
This repository contains example notebooks demonstrating the [Azure Machine Learning](https://azure.microsoft.com/en-us/services/machine-learning-service/) Python SDK which allows you to build, train, deploy and manage machine learning solutions using Azure. The AML SDK allows you the choice of using local or cloud compute resources, while managing and maintaining the complete data science workflow from the cloud.
|
||||||
|
|
||||||

|

|
||||||
@@ -65,7 +67,7 @@ Visit following repos to see projects contributed by Azure ML users:
|
|||||||
- [UMass Amherst Student Samples](https://github.com/katiehouse3/microsoft-azure-ml-notebooks) - A number of end-to-end machine learning notebooks, including machine translation, image classification, and customer churn, created by students in the 696DS course at UMass Amherst.
|
- [UMass Amherst Student Samples](https://github.com/katiehouse3/microsoft-azure-ml-notebooks) - A number of end-to-end machine learning notebooks, including machine translation, image classification, and customer churn, created by students in the 696DS course at UMass Amherst.
|
||||||
|
|
||||||
## Data/Telemetry
|
## Data/Telemetry
|
||||||
This repository collects usage data and sends it to Mircosoft to help improve our products and services. Read Microsoft's [privacy statement to learn more](https://privacy.microsoft.com/en-US/privacystatement)
|
This repository collects usage data and sends it to Microsoft to help improve our products and services. Read Microsoft's [privacy statement to learn more](https://privacy.microsoft.com/en-US/privacystatement)
|
||||||
|
|
||||||
To opt out of tracking, please go to the raw markdown or .ipynb files and remove the following line of code:
|
To opt out of tracking, please go to the raw markdown or .ipynb files and remove the following line of code:
|
||||||
|
|
||||||
|
|||||||
@@ -103,7 +103,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"import azureml.core\n",
|
"import azureml.core\n",
|
||||||
"\n",
|
"\n",
|
||||||
"print(\"This notebook was created using version 1.12.0 of the Azure ML SDK\")\n",
|
"print(\"This notebook was created using version 1.17.0 of the Azure ML SDK\")\n",
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -82,8 +82,7 @@
|
|||||||
"from sklearn import svm\n",
|
"from sklearn import svm\n",
|
||||||
"from sklearn.preprocessing import LabelEncoder, StandardScaler\n",
|
"from sklearn.preprocessing import LabelEncoder, StandardScaler\n",
|
||||||
"from sklearn.linear_model import LogisticRegression\n",
|
"from sklearn.linear_model import LogisticRegression\n",
|
||||||
"import pandas as pd\n",
|
"import pandas as pd"
|
||||||
"import shap"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -99,8 +98,12 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"X_raw, Y = shap.datasets.adult()\n",
|
"from sklearn.datasets import fetch_openml\n",
|
||||||
"X_raw[\"Race\"].value_counts().to_dict()"
|
"data = fetch_openml(data_id=1590, as_frame=True)\n",
|
||||||
|
"X_raw = data.data\n",
|
||||||
|
"Y = (data.target == '>50K') * 1\n",
|
||||||
|
"\n",
|
||||||
|
"X_raw[\"race\"].value_counts().to_dict()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -116,9 +119,13 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"A = X_raw[['Sex','Race']]\n",
|
"A = X_raw[['sex','race']]\n",
|
||||||
"X = X_raw.drop(labels=['Sex', 'Race'],axis = 1)\n",
|
"X = X_raw.drop(labels=['sex', 'race'],axis = 1)\n",
|
||||||
"X = pd.get_dummies(X)\n",
|
"X_dummies = pd.get_dummies(X)\n",
|
||||||
|
"\n",
|
||||||
|
"sc = StandardScaler()\n",
|
||||||
|
"X_scaled = sc.fit_transform(X_dummies)\n",
|
||||||
|
"X_scaled = pd.DataFrame(X_scaled, columns=X_dummies.columns)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"le = LabelEncoder()\n",
|
"le = LabelEncoder()\n",
|
||||||
@@ -139,7 +146,7 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from sklearn.model_selection import train_test_split\n",
|
"from sklearn.model_selection import train_test_split\n",
|
||||||
"X_train, X_test, Y_train, Y_test, A_train, A_test = train_test_split(X_raw, \n",
|
"X_train, X_test, Y_train, Y_test, A_train, A_test = train_test_split(X_scaled, \n",
|
||||||
" Y, \n",
|
" Y, \n",
|
||||||
" A,\n",
|
" A,\n",
|
||||||
" test_size = 0.2,\n",
|
" test_size = 0.2,\n",
|
||||||
@@ -150,18 +157,7 @@
|
|||||||
"X_train = X_train.reset_index(drop=True)\n",
|
"X_train = X_train.reset_index(drop=True)\n",
|
||||||
"A_train = A_train.reset_index(drop=True)\n",
|
"A_train = A_train.reset_index(drop=True)\n",
|
||||||
"X_test = X_test.reset_index(drop=True)\n",
|
"X_test = X_test.reset_index(drop=True)\n",
|
||||||
"A_test = A_test.reset_index(drop=True)\n",
|
"A_test = A_test.reset_index(drop=True)"
|
||||||
"\n",
|
|
||||||
"# Improve labels\n",
|
|
||||||
"A_test.Sex.loc[(A_test['Sex'] == 0)] = 'female'\n",
|
|
||||||
"A_test.Sex.loc[(A_test['Sex'] == 1)] = 'male'\n",
|
|
||||||
"\n",
|
|
||||||
"\n",
|
|
||||||
"A_test.Race.loc[(A_test['Race'] == 0)] = 'Amer-Indian-Eskimo'\n",
|
|
||||||
"A_test.Race.loc[(A_test['Race'] == 1)] = 'Asian-Pac-Islander'\n",
|
|
||||||
"A_test.Race.loc[(A_test['Race'] == 2)] = 'Black'\n",
|
|
||||||
"A_test.Race.loc[(A_test['Race'] == 3)] = 'Other'\n",
|
|
||||||
"A_test.Race.loc[(A_test['Race'] == 4)] = 'White'"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -251,7 +247,7 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"sweep.fit(X_train, Y_train,\n",
|
"sweep.fit(X_train, Y_train,\n",
|
||||||
" sensitive_features=A_train.Sex)\n",
|
" sensitive_features=A_train.sex)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"predictors = sweep._predictors"
|
"predictors = sweep._predictors"
|
||||||
]
|
]
|
||||||
@@ -274,9 +270,9 @@
|
|||||||
" classifier = lambda X: m.predict(X)\n",
|
" classifier = lambda X: m.predict(X)\n",
|
||||||
" \n",
|
" \n",
|
||||||
" error = ErrorRate()\n",
|
" error = ErrorRate()\n",
|
||||||
" error.load_data(X_train, pd.Series(Y_train), sensitive_features=A_train.Sex)\n",
|
" error.load_data(X_train, pd.Series(Y_train), sensitive_features=A_train.sex)\n",
|
||||||
" disparity = DemographicParity()\n",
|
" disparity = DemographicParity()\n",
|
||||||
" disparity.load_data(X_train, pd.Series(Y_train), sensitive_features=A_train.Sex)\n",
|
" disparity.load_data(X_train, pd.Series(Y_train), sensitive_features=A_train.sex)\n",
|
||||||
" \n",
|
" \n",
|
||||||
" errors.append(error.gamma(classifier)[0])\n",
|
" errors.append(error.gamma(classifier)[0])\n",
|
||||||
" disparities.append(disparity.gamma(classifier).max())\n",
|
" disparities.append(disparity.gamma(classifier).max())\n",
|
||||||
@@ -440,7 +436,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"sf = { 'sex': A_test.Sex, 'race': A_test.Race }\n",
|
"sf = { 'sex': A_test.sex, 'race': A_test.race }\n",
|
||||||
"\n",
|
"\n",
|
||||||
"from fairlearn.metrics._group_metric_set import _create_group_metric_set\n",
|
"from fairlearn.metrics._group_metric_set import _create_group_metric_set\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
|||||||
7
contrib/fairness/fairlearn-azureml-mitigation.yml
Normal file
7
contrib/fairness/fairlearn-azureml-mitigation.yml
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
name: fairlearn-azureml-mitigation
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-contrib-fairness
|
||||||
|
- fairlearn==0.4.6
|
||||||
|
- joblib
|
||||||
@@ -82,8 +82,7 @@
|
|||||||
"from sklearn import svm\n",
|
"from sklearn import svm\n",
|
||||||
"from sklearn.preprocessing import LabelEncoder, StandardScaler\n",
|
"from sklearn.preprocessing import LabelEncoder, StandardScaler\n",
|
||||||
"from sklearn.linear_model import LogisticRegression\n",
|
"from sklearn.linear_model import LogisticRegression\n",
|
||||||
"import pandas as pd\n",
|
"import pandas as pd"
|
||||||
"import shap"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -99,7 +98,10 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"X_raw, Y = shap.datasets.adult()"
|
"from sklearn.datasets import fetch_openml\n",
|
||||||
|
"data = fetch_openml(data_id=1590, as_frame=True)\n",
|
||||||
|
"X_raw = data.data\n",
|
||||||
|
"Y = (data.target == '>50K') * 1"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -115,7 +117,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(X_raw[\"Race\"].value_counts().to_dict())"
|
"print(X_raw[\"race\"].value_counts().to_dict())"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -134,9 +136,9 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"A = X_raw[['Sex','Race']]\n",
|
"A = X_raw[['sex','race']]\n",
|
||||||
"X = X_raw.drop(labels=['Sex', 'Race'],axis = 1)\n",
|
"X = X_raw.drop(labels=['sex', 'race'],axis = 1)\n",
|
||||||
"X = pd.get_dummies(X)"
|
"X_dummies = pd.get_dummies(X)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -153,8 +155,8 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"sc = StandardScaler()\n",
|
"sc = StandardScaler()\n",
|
||||||
"X_scaled = sc.fit_transform(X)\n",
|
"X_scaled = sc.fit_transform(X_dummies)\n",
|
||||||
"X_scaled = pd.DataFrame(X_scaled, columns=X.columns)\n",
|
"X_scaled = pd.DataFrame(X_scaled, columns=X_dummies.columns)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"le = LabelEncoder()\n",
|
"le = LabelEncoder()\n",
|
||||||
"Y = le.fit_transform(Y)"
|
"Y = le.fit_transform(Y)"
|
||||||
@@ -185,18 +187,7 @@
|
|||||||
"X_train = X_train.reset_index(drop=True)\n",
|
"X_train = X_train.reset_index(drop=True)\n",
|
||||||
"A_train = A_train.reset_index(drop=True)\n",
|
"A_train = A_train.reset_index(drop=True)\n",
|
||||||
"X_test = X_test.reset_index(drop=True)\n",
|
"X_test = X_test.reset_index(drop=True)\n",
|
||||||
"A_test = A_test.reset_index(drop=True)\n",
|
"A_test = A_test.reset_index(drop=True)"
|
||||||
"\n",
|
|
||||||
"# Improve labels\n",
|
|
||||||
"A_test.Sex.loc[(A_test['Sex'] == 0)] = 'female'\n",
|
|
||||||
"A_test.Sex.loc[(A_test['Sex'] == 1)] = 'male'\n",
|
|
||||||
"\n",
|
|
||||||
"\n",
|
|
||||||
"A_test.Race.loc[(A_test['Race'] == 0)] = 'Amer-Indian-Eskimo'\n",
|
|
||||||
"A_test.Race.loc[(A_test['Race'] == 1)] = 'Asian-Pac-Islander'\n",
|
|
||||||
"A_test.Race.loc[(A_test['Race'] == 2)] = 'Black'\n",
|
|
||||||
"A_test.Race.loc[(A_test['Race'] == 3)] = 'Other'\n",
|
|
||||||
"A_test.Race.loc[(A_test['Race'] == 4)] = 'White'"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -380,7 +371,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"sf = { 'Race': A_test.Race, 'Sex': A_test.Sex }\n",
|
"sf = { 'Race': A_test.race, 'Sex': A_test.sex }\n",
|
||||||
"\n",
|
"\n",
|
||||||
"from fairlearn.metrics._group_metric_set import _create_group_metric_set\n",
|
"from fairlearn.metrics._group_metric_set import _create_group_metric_set\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -499,7 +490,7 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.6.8"
|
"version": "3.6.10"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
|||||||
7
contrib/fairness/upload-fairness-dashboard.yml
Normal file
7
contrib/fairness/upload-fairness-dashboard.yml
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
name: upload-fairness-dashboard
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-contrib-fairness
|
||||||
|
- fairlearn==0.4.6
|
||||||
|
- joblib
|
||||||
@@ -4,7 +4,7 @@ Learn how to use Azure Machine Learning services for experimentation and model m
|
|||||||
|
|
||||||
As a pre-requisite, run the [configuration Notebook](../configuration.ipynb) notebook first to set up your Azure ML Workspace. Then, run the notebooks in following recommended order.
|
As a pre-requisite, run the [configuration Notebook](../configuration.ipynb) notebook first to set up your Azure ML Workspace. Then, run the notebooks in following recommended order.
|
||||||
|
|
||||||
* [train-within-notebook](./training/train-within-notebook): Train a model hile tracking run history, and learn how to deploy the model as web service to Azure Container Instance.
|
* [train-within-notebook](./training/train-within-notebook): Train a model while tracking run history, and learn how to deploy the model as web service to Azure Container Instance.
|
||||||
* [train-on-local](./training/train-on-local): Learn how to submit a run to local computer and use Azure ML managed run configuration.
|
* [train-on-local](./training/train-on-local): Learn how to submit a run to local computer and use Azure ML managed run configuration.
|
||||||
* [train-on-amlcompute](./training/train-on-amlcompute): Use a 1-n node Azure ML managed compute cluster for remote runs on Azure CPU or GPU infrastructure.
|
* [train-on-amlcompute](./training/train-on-amlcompute): Use a 1-n node Azure ML managed compute cluster for remote runs on Azure CPU or GPU infrastructure.
|
||||||
* [train-on-remote-vm](./training/train-on-remote-vm): Use Data Science Virtual Machine as a target for remote runs.
|
* [train-on-remote-vm](./training/train-on-remote-vm): Use Data Science Virtual Machine as a target for remote runs.
|
||||||
|
|||||||
@@ -97,68 +97,96 @@ jupyter notebook
|
|||||||
<a name="databricks"></a>
|
<a name="databricks"></a>
|
||||||
## Setup using Azure Databricks
|
## Setup using Azure Databricks
|
||||||
|
|
||||||
**NOTE**: Please create your Azure Databricks cluster as v6.0 (high concurrency preferred) with **Python 3** (dropdown).
|
**NOTE**: Please create your Azure Databricks cluster as v7.1 (high concurrency preferred) with **Python 3** (dropdown).
|
||||||
**NOTE**: You should at least have contributor access to your Azure subcription to run the notebook.
|
**NOTE**: You should at least have contributor access to your Azure subcription to run the notebook.
|
||||||
- Please remove the previous SDK version if there is any and install the latest SDK by installing **azureml-sdk[automl]** as a PyPi library in Azure Databricks workspace.
|
- You can find the detail Readme instructions at [GitHub](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/azure-databricks/automl).
|
||||||
- You can find the detail Readme instructions at [GitHub](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/azure-databricks).
|
- Download the sample notebook automl-databricks-local-01.ipynb from [GitHub](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/azure-databricks/automl) and import into the Azure databricks workspace.
|
||||||
- Download the sample notebook automl-databricks-local-01.ipynb from [GitHub](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/azure-databricks) and import into the Azure databricks workspace.
|
|
||||||
- Attach the notebook to the cluster.
|
- Attach the notebook to the cluster.
|
||||||
|
|
||||||
<a name="samples"></a>
|
<a name="samples"></a>
|
||||||
# Automated ML SDK Sample Notebooks
|
# Automated ML SDK Sample Notebooks
|
||||||
|
|
||||||
- [auto-ml-classification-credit-card-fraud.ipynb](classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.ipynb)
|
## Classification
|
||||||
- Dataset: Kaggle's [credit card fraud detection dataset](https://www.kaggle.com/mlg-ulb/creditcardfraud)
|
- **Classify Credit Card Fraud**
|
||||||
- Simple example of using automated ML for classification to fraudulent credit card transactions
|
- Dataset: [Kaggle's credit card fraud detection dataset](https://www.kaggle.com/mlg-ulb/creditcardfraud)
|
||||||
- Uses azure compute for training
|
- **[Jupyter Notebook (remote run)](classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.ipynb)**
|
||||||
|
- run the experiment remotely on AML Compute cluster
|
||||||
|
- test the performance of the best model in the local environment
|
||||||
|
- **[Jupyter Notebook (local run)](local-run-classification-credit-card-fraud/auto-ml-classification-credit-card-fraud-local.ipynb)**
|
||||||
|
- run experiment in the local environment
|
||||||
|
- use Mimic Explainer for computing feature importance
|
||||||
|
- deploy the best model along with the explainer to an Azure Kubernetes (AKS) cluster, which will compute the raw and engineered feature importances at inference time
|
||||||
|
- **Predict Term Deposit Subscriptions in a Bank**
|
||||||
|
- Dataset: [UCI's bank marketing dataset](https://www.kaggle.com/janiobachmann/bank-marketing-dataset)
|
||||||
|
- **[Jupyter Notebook](classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb)**
|
||||||
|
- run experiment remotely on AML Compute cluster to generate ONNX compatible models
|
||||||
|
- view the featurization steps that were applied during training
|
||||||
|
- view feature importance for the best model
|
||||||
|
- download the best model in ONNX format and use it for inferencing using ONNXRuntime
|
||||||
|
- deploy the best model in PKL format to Azure Container Instance (ACI)
|
||||||
|
- **Predict Newsgroup based on Text from News Article**
|
||||||
|
- Dataset: [20 newsgroups text dataset](https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html)
|
||||||
|
- **[Jupyter Notebook](classification-text-dnn/auto-ml-classification-text-dnn.ipynb)**
|
||||||
|
- AutoML highlights here include using deep neural networks (DNNs) to create embedded features from text data
|
||||||
|
- AutoML will use Bidirectional Encoder Representations from Transformers (BERT) when a GPU compute is used
|
||||||
|
- Bidirectional Long-Short Term neural network (BiLSTM) will be utilized when a CPU compute is used, thereby optimizing the choice of DNN
|
||||||
|
|
||||||
- [auto-ml-regression.ipynb](regression/auto-ml-regression.ipynb)
|
## Regression
|
||||||
|
- **Predict Performance of Hardware Parts**
|
||||||
- Dataset: Hardware Performance Dataset
|
- Dataset: Hardware Performance Dataset
|
||||||
- Simple example of using automated ML for regression
|
- **[Jupyter Notebook](regression/auto-ml-regression.ipynb)**
|
||||||
- Uses azure compute for training
|
- run the experiment remotely on AML Compute cluster
|
||||||
|
- get best trained model for a different metric than the one the experiment was optimized for
|
||||||
|
- test the performance of the best model in the local environment
|
||||||
|
- **[Jupyter Notebook (advanced)](regression/auto-ml-regression.ipynb)**
|
||||||
|
- run the experiment remotely on AML Compute cluster
|
||||||
|
- customize featurization: override column purpose within the dataset, configure transformer parameters
|
||||||
|
- get best trained model for a different metric than the one the experiment was optimized for
|
||||||
|
- run a model explanation experiment on the remote cluster
|
||||||
|
- deploy the model along the explainer and run online inferencing
|
||||||
|
|
||||||
- [auto-ml-regression-explanation-featurization.ipynb](regression-explanation-featurization/auto-ml-regression-explanation-featurization.ipynb)
|
## Time Series Forecasting
|
||||||
- Dataset: Hardware Performance Dataset
|
- **Forecast Energy Demand**
|
||||||
- Shows featurization and excplanation
|
- Dataset: [NYC energy demand data](http://mis.nyiso.com/public/P-58Blist.htm)
|
||||||
- Uses azure compute for training
|
- **[Jupyter Notebook](forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb)**
|
||||||
|
- run experiment remotely on AML Compute cluster
|
||||||
- [auto-ml-forecasting-energy-demand.ipynb](forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb)
|
- use lags and rolling window features
|
||||||
- Dataset: [NYC energy demand data](forecasting-a/nyc_energy.csv)
|
- view the featurization steps that were applied during training
|
||||||
- Example of using automated ML for training a forecasting model
|
- get the best model, use it to forecast on test data and compare the accuracy of predictions against real data
|
||||||
|
- **Forecast Orange Juice Sales (Multi-Series)**
|
||||||
- [auto-ml-classification-credit-card-fraud-local.ipynb](local-run-classification-credit-card-fraud/auto-ml-classification-credit-card-fraud-local.ipynb)
|
- Dataset: [Dominick's grocery sales of orange juice](forecasting-orange-juice-sales/dominicks_OJ.csv)
|
||||||
- Dataset: Kaggle's [credit card fraud detection dataset](https://www.kaggle.com/mlg-ulb/creditcardfraud)
|
- **[Jupyter Notebook](forecasting-orange-juice-sales/dominicks_OJ.csv)**
|
||||||
- Simple example of using automated ML for classification to fraudulent credit card transactions
|
- run experiment remotely on AML Compute cluster
|
||||||
- Uses local compute for training
|
- customize time-series featurization, change column purpose and override transformer hyper parameters
|
||||||
|
- evaluate locally the performance of the generated best model
|
||||||
- [auto-ml-classification-bank-marketing-all-features.ipynb](classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb)
|
- deploy the best model as a webservice on Azure Container Instance (ACI)
|
||||||
- Dataset: UCI's [bank marketing dataset](https://www.kaggle.com/janiobachmann/bank-marketing-dataset)
|
- get online predictions from the deployed model
|
||||||
- Simple example of using automated ML for classification to predict term deposit subscriptions for a bank
|
- **Forecast Demand of a Bike-Sharing Service**
|
||||||
- Uses azure compute for training
|
- Dataset: [Bike demand data](forecasting-bike-share/bike-no.csv)
|
||||||
|
- **[Jupyter Notebook](forecasting-bike-share/auto-ml-forecasting-bike-share.ipynb)**
|
||||||
- [auto-ml-forecasting-orange-juice-sales.ipynb](forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb)
|
- run experiment remotely on AML Compute cluster
|
||||||
- Dataset: [Dominick's grocery sales of orange juice](forecasting-b/dominicks_OJ.csv)
|
- integrate holiday features
|
||||||
- Example of training an automated ML forecasting model on multiple time-series
|
- run rolling forecast for test set that is longer than the forecast horizon
|
||||||
|
- compute metrics on the predictions from the remote forecast
|
||||||
- [auto-ml-forecasting-bike-share.ipynb](forecasting-bike-share/auto-ml-forecasting-bike-share.ipynb)
|
- **The Forecast Function Interface**
|
||||||
- Dataset: forecasting for a bike-sharing
|
- Dataset: Generated for sample purposes
|
||||||
- Example of training an automated ML forecasting model on multiple time-series
|
- **[Jupyter Notebook](forecasting-forecast-function/auto-ml-forecasting-function.ipynb)**
|
||||||
|
- train a forecaster using a remote AML Compute cluster
|
||||||
- [auto-ml-forecasting-function.ipynb](forecasting-forecast-function/auto-ml-forecasting-function.ipynb)
|
- capabilities of forecast function (e.g. forecast farther into the horizon)
|
||||||
- Example of training an automated ML forecasting model on multiple time-series
|
- generate confidence intervals
|
||||||
|
- **Forecast Beverage Production**
|
||||||
- [auto-ml-forecasting-beer-remote.ipynb](forecasting-beer-remote/auto-ml-forecasting-beer-remote.ipynb)
|
- Dataset: [Monthly beer production data](forecasting-beer-remote/Beer_no_valid_split_train.csv)
|
||||||
- Example of training an automated ML forecasting model on multiple time-series
|
- **[Jupyter Notebook](forecasting-beer-remote/auto-ml-forecasting-beer-remote.ipynb)**
|
||||||
- Beer Production Forecasting
|
- train using a remote AML Compute cluster
|
||||||
|
- enable the DNN learning model
|
||||||
- [auto-ml-continuous-retraining.ipynb](continuous-retraining/auto-ml-continuous-retraining.ipynb)
|
- forecast on a remote compute cluster and compare different model performance
|
||||||
- Continuous retraining using Pipelines and Time-Series TabularDataset
|
- **Continuous Retraining with NOAA Weather Data**
|
||||||
|
- Dataset: [NOAA weather data from Azure Open Datasets](https://azure.microsoft.com/en-us/services/open-datasets/)
|
||||||
- [auto-ml-classification-text-dnn.ipynb](classification-text-dnn/auto-ml-classification-text-dnn.ipynb)
|
- **[Jupyter Notebook](continuous-retraining/auto-ml-continuous-retraining.ipynb)**
|
||||||
- Classification with text data using deep learning in AutoML
|
- continuously retrain a model using Pipelines and AutoML
|
||||||
- AutoML highlights here include using deep neural networks (DNNs) to create embedded features from text data.
|
- create a Pipeline to upload a time series dataset to an Azure blob
|
||||||
- Depending on the compute cluster the user provides, AutoML tried out Bidirectional Encoder Representations from Transformers (BERT) when a GPU compute is used.
|
- create a Pipeline to run an AutoML experiment and register the best resulting model in the Workspace
|
||||||
- Bidirectional Long-Short Term neural network (BiLSTM) when a CPU compute is used, thereby optimizing the choice of DNN for the uesr's setup.
|
- publish the training pipeline created and schedule it to run daily
|
||||||
|
|
||||||
<a name="documentation"></a>
|
<a name="documentation"></a>
|
||||||
See [Configure automated machine learning experiments](https://docs.microsoft.com/azure/machine-learning/service/how-to-configure-auto-train) to learn how more about the the settings and features available for automated machine learning experiments.
|
See [Configure automated machine learning experiments](https://docs.microsoft.com/azure/machine-learning/service/how-to-configure-auto-train) to learn how more about the the settings and features available for automated machine learning experiments.
|
||||||
@@ -179,7 +207,7 @@ The main code of the file must be indented so that it is under this condition.
|
|||||||
## automl_setup fails
|
## automl_setup fails
|
||||||
1. On Windows, make sure that you are running automl_setup from an Anconda Prompt window rather than a regular cmd window. You can launch the "Anaconda Prompt" window by hitting the Start button and typing "Anaconda Prompt". If you don't see the application "Anaconda Prompt", you might not have conda or mini conda installed. In that case, you can install it [here](https://conda.io/miniconda.html)
|
1. On Windows, make sure that you are running automl_setup from an Anconda Prompt window rather than a regular cmd window. You can launch the "Anaconda Prompt" window by hitting the Start button and typing "Anaconda Prompt". If you don't see the application "Anaconda Prompt", you might not have conda or mini conda installed. In that case, you can install it [here](https://conda.io/miniconda.html)
|
||||||
2. Check that you have conda 64-bit installed rather than 32-bit. You can check this with the command `conda info`. The `platform` should be `win-64` for Windows or `osx-64` for Mac.
|
2. Check that you have conda 64-bit installed rather than 32-bit. You can check this with the command `conda info`. The `platform` should be `win-64` for Windows or `osx-64` for Mac.
|
||||||
3. Check that you have conda 4.4.10 or later. You can check the version with the command `conda -V`. If you have a previous version installed, you can update it using the command: `conda update conda`.
|
3. Check that you have conda 4.7.8 or later. You can check the version with the command `conda -V`. If you have a previous version installed, you can update it using the command: `conda update conda`.
|
||||||
4. On Linux, if the error is `gcc: error trying to exec 'cc1plus': execvp: No such file or directory`, install build essentials using the command `sudo apt-get install build-essential`.
|
4. On Linux, if the error is `gcc: error trying to exec 'cc1plus': execvp: No such file or directory`, install build essentials using the command `sudo apt-get install build-essential`.
|
||||||
5. Pass a new name as the first parameter to automl_setup so that it creates a new conda environment. You can view existing conda environments using `conda env list` and remove them with `conda env remove -n <environmentname>`.
|
5. Pass a new name as the first parameter to automl_setup so that it creates a new conda environment. You can view existing conda environments using `conda env list` and remove them with `conda env remove -n <environmentname>`.
|
||||||
|
|
||||||
|
|||||||
@@ -6,12 +6,12 @@ dependencies:
|
|||||||
- python>=3.5.2,<3.6.8
|
- python>=3.5.2,<3.6.8
|
||||||
- nb_conda
|
- nb_conda
|
||||||
- matplotlib==2.1.0
|
- matplotlib==2.1.0
|
||||||
- numpy~=1.16.0
|
- numpy==1.18.5
|
||||||
- cython
|
- cython
|
||||||
- urllib3<1.24
|
- urllib3<1.24
|
||||||
- scipy==1.4.1
|
- scipy>=1.4.1,<=1.5.2
|
||||||
- scikit-learn>=0.19.0,<=0.20.3
|
- scikit-learn==0.22.1
|
||||||
- pandas>=0.22.0,<=0.23.4
|
- pandas==0.25.1
|
||||||
- py-xgboost<=0.90
|
- py-xgboost<=0.90
|
||||||
- conda-forge::fbprophet==0.5
|
- conda-forge::fbprophet==0.5
|
||||||
- holidays==0.9.11
|
- holidays==0.9.11
|
||||||
@@ -20,12 +20,9 @@ dependencies:
|
|||||||
|
|
||||||
- pip:
|
- pip:
|
||||||
# Required packages for AzureML execution, history, and data preparation.
|
# Required packages for AzureML execution, history, and data preparation.
|
||||||
- azureml-defaults
|
|
||||||
- azureml-train-automl
|
|
||||||
- azureml-train
|
|
||||||
- azureml-widgets
|
- azureml-widgets
|
||||||
- azureml-pipeline
|
|
||||||
- pytorch-transformers==1.0.0
|
- pytorch-transformers==1.0.0
|
||||||
- spacy==2.1.8
|
- spacy==2.1.8
|
||||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||||
|
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.17.0/validated_win32_requirements.txt [--no-deps]
|
||||||
|
|
||||||
|
|||||||
@@ -0,0 +1,28 @@
|
|||||||
|
name: azure_automl
|
||||||
|
dependencies:
|
||||||
|
# The python interpreter version.
|
||||||
|
# Currently Azure ML only supports 3.5.2 and later.
|
||||||
|
- pip<=19.3.1
|
||||||
|
- python>=3.5.2,<3.6.8
|
||||||
|
- nb_conda
|
||||||
|
- matplotlib==2.1.0
|
||||||
|
- numpy==1.18.5
|
||||||
|
- cython
|
||||||
|
- urllib3<1.24
|
||||||
|
- scipy>=1.4.1,<=1.5.2
|
||||||
|
- scikit-learn==0.22.1
|
||||||
|
- pandas==0.25.1
|
||||||
|
- py-xgboost<=0.90
|
||||||
|
- conda-forge::fbprophet==0.5
|
||||||
|
- holidays==0.9.11
|
||||||
|
- pytorch::pytorch=1.4.0
|
||||||
|
- cudatoolkit=10.1.243
|
||||||
|
|
||||||
|
- pip:
|
||||||
|
# Required packages for AzureML execution, history, and data preparation.
|
||||||
|
- azureml-widgets
|
||||||
|
- pytorch-transformers==1.0.0
|
||||||
|
- spacy==2.1.8
|
||||||
|
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||||
|
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.17.0/validated_linux_requirements.txt [--no-deps]
|
||||||
|
|
||||||
@@ -7,12 +7,12 @@ dependencies:
|
|||||||
- python>=3.5.2,<3.6.8
|
- python>=3.5.2,<3.6.8
|
||||||
- nb_conda
|
- nb_conda
|
||||||
- matplotlib==2.1.0
|
- matplotlib==2.1.0
|
||||||
- numpy~=1.16.0
|
- numpy==1.18.5
|
||||||
- cython
|
- cython
|
||||||
- urllib3<1.24
|
- urllib3<1.24
|
||||||
- scipy==1.4.1
|
- scipy>=1.4.1,<=1.5.2
|
||||||
- scikit-learn>=0.19.0,<=0.20.3
|
- scikit-learn==0.22.1
|
||||||
- pandas>=0.22.0,<=0.23.4
|
- pandas==0.25.1
|
||||||
- py-xgboost<=0.90
|
- py-xgboost<=0.90
|
||||||
- conda-forge::fbprophet==0.5
|
- conda-forge::fbprophet==0.5
|
||||||
- holidays==0.9.11
|
- holidays==0.9.11
|
||||||
@@ -21,11 +21,8 @@ dependencies:
|
|||||||
|
|
||||||
- pip:
|
- pip:
|
||||||
# Required packages for AzureML execution, history, and data preparation.
|
# Required packages for AzureML execution, history, and data preparation.
|
||||||
- azureml-defaults
|
|
||||||
- azureml-train-automl
|
|
||||||
- azureml-train
|
|
||||||
- azureml-widgets
|
- azureml-widgets
|
||||||
- azureml-pipeline
|
|
||||||
- pytorch-transformers==1.0.0
|
- pytorch-transformers==1.0.0
|
||||||
- spacy==2.1.8
|
- spacy==2.1.8
|
||||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||||
|
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.17.0/validated_darwin_requirements.txt [--no-deps]
|
||||||
|
|||||||
@@ -6,11 +6,22 @@ set PIP_NO_WARN_SCRIPT_LOCATION=0
|
|||||||
|
|
||||||
IF "%conda_env_name%"=="" SET conda_env_name="azure_automl"
|
IF "%conda_env_name%"=="" SET conda_env_name="azure_automl"
|
||||||
IF "%automl_env_file%"=="" SET automl_env_file="automl_env.yml"
|
IF "%automl_env_file%"=="" SET automl_env_file="automl_env.yml"
|
||||||
|
SET check_conda_version_script="check_conda_version.py"
|
||||||
|
|
||||||
IF NOT EXIST %automl_env_file% GOTO YmlMissing
|
IF NOT EXIST %automl_env_file% GOTO YmlMissing
|
||||||
|
|
||||||
IF "%CONDA_EXE%"=="" GOTO CondaMissing
|
IF "%CONDA_EXE%"=="" GOTO CondaMissing
|
||||||
|
|
||||||
|
IF NOT EXIST %check_conda_version_script% GOTO VersionCheckMissing
|
||||||
|
|
||||||
|
python "%check_conda_version_script%"
|
||||||
|
IF errorlevel 1 GOTO ErrorExit:
|
||||||
|
|
||||||
|
SET replace_version_script="replace_latest_version.ps1"
|
||||||
|
IF EXIST %replace_version_script% (
|
||||||
|
powershell -file %replace_version_script% %automl_env_file%
|
||||||
|
)
|
||||||
|
|
||||||
call conda activate %conda_env_name% 2>nul:
|
call conda activate %conda_env_name% 2>nul:
|
||||||
|
|
||||||
if not errorlevel 1 (
|
if not errorlevel 1 (
|
||||||
@@ -54,6 +65,10 @@ echo If you are running an older version of Miniconda or Anaconda,
|
|||||||
echo you can upgrade using the command: conda update conda
|
echo you can upgrade using the command: conda update conda
|
||||||
goto End
|
goto End
|
||||||
|
|
||||||
|
:VersionCheckMissing
|
||||||
|
echo File %check_conda_version_script% not found.
|
||||||
|
goto End
|
||||||
|
|
||||||
:YmlMissing
|
:YmlMissing
|
||||||
echo File %automl_env_file% not found.
|
echo File %automl_env_file% not found.
|
||||||
|
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ CONDA_ENV_NAME=$1
|
|||||||
AUTOML_ENV_FILE=$2
|
AUTOML_ENV_FILE=$2
|
||||||
OPTIONS=$3
|
OPTIONS=$3
|
||||||
PIP_NO_WARN_SCRIPT_LOCATION=0
|
PIP_NO_WARN_SCRIPT_LOCATION=0
|
||||||
|
CHECK_CONDA_VERSION_SCRIPT="check_conda_version.py"
|
||||||
|
|
||||||
if [ "$CONDA_ENV_NAME" == "" ]
|
if [ "$CONDA_ENV_NAME" == "" ]
|
||||||
then
|
then
|
||||||
@@ -12,7 +13,7 @@ fi
|
|||||||
|
|
||||||
if [ "$AUTOML_ENV_FILE" == "" ]
|
if [ "$AUTOML_ENV_FILE" == "" ]
|
||||||
then
|
then
|
||||||
AUTOML_ENV_FILE="automl_env.yml"
|
AUTOML_ENV_FILE="automl_env_linux.yml"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ! -f $AUTOML_ENV_FILE ]; then
|
if [ ! -f $AUTOML_ENV_FILE ]; then
|
||||||
@@ -20,6 +21,18 @@ if [ ! -f $AUTOML_ENV_FILE ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [ ! -f $CHECK_CONDA_VERSION_SCRIPT ]; then
|
||||||
|
echo "File $CHECK_CONDA_VERSION_SCRIPT not found"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
python "$CHECK_CONDA_VERSION_SCRIPT"
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
sed -i 's/AZUREML-SDK-VERSION/latest/' $AUTOML_ENV_FILE
|
||||||
|
|
||||||
if source activate $CONDA_ENV_NAME 2> /dev/null
|
if source activate $CONDA_ENV_NAME 2> /dev/null
|
||||||
then
|
then
|
||||||
echo "Upgrading existing conda environment" $CONDA_ENV_NAME
|
echo "Upgrading existing conda environment" $CONDA_ENV_NAME
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ CONDA_ENV_NAME=$1
|
|||||||
AUTOML_ENV_FILE=$2
|
AUTOML_ENV_FILE=$2
|
||||||
OPTIONS=$3
|
OPTIONS=$3
|
||||||
PIP_NO_WARN_SCRIPT_LOCATION=0
|
PIP_NO_WARN_SCRIPT_LOCATION=0
|
||||||
|
CHECK_CONDA_VERSION_SCRIPT="check_conda_version.py"
|
||||||
|
|
||||||
if [ "$CONDA_ENV_NAME" == "" ]
|
if [ "$CONDA_ENV_NAME" == "" ]
|
||||||
then
|
then
|
||||||
@@ -20,6 +21,18 @@ if [ ! -f $AUTOML_ENV_FILE ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [ ! -f $CHECK_CONDA_VERSION_SCRIPT ]; then
|
||||||
|
echo "File $CHECK_CONDA_VERSION_SCRIPT not found"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
python "$CHECK_CONDA_VERSION_SCRIPT"
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
sed -i '' 's/AZUREML-SDK-VERSION/latest/' $AUTOML_ENV_FILE
|
||||||
|
|
||||||
if source activate $CONDA_ENV_NAME 2> /dev/null
|
if source activate $CONDA_ENV_NAME 2> /dev/null
|
||||||
then
|
then
|
||||||
echo "Upgrading existing conda environment" $CONDA_ENV_NAME
|
echo "Upgrading existing conda environment" $CONDA_ENV_NAME
|
||||||
|
|||||||
@@ -0,0 +1,26 @@
|
|||||||
|
from distutils.version import LooseVersion
|
||||||
|
import platform
|
||||||
|
|
||||||
|
try:
|
||||||
|
import conda
|
||||||
|
except:
|
||||||
|
print('Failed to import conda.')
|
||||||
|
print('This setup is usually run from the base conda environment.')
|
||||||
|
print('You can activate the base environment using the command "conda activate base"')
|
||||||
|
exit(1)
|
||||||
|
|
||||||
|
architecture = platform.architecture()[0]
|
||||||
|
|
||||||
|
if architecture != "64bit":
|
||||||
|
print('This setup requires 64bit Anaconda or Miniconda. Found: ' + architecture)
|
||||||
|
exit(1)
|
||||||
|
|
||||||
|
minimumVersion = "4.7.8"
|
||||||
|
|
||||||
|
versionInvalid = (LooseVersion(conda.__version__) < LooseVersion(minimumVersion))
|
||||||
|
|
||||||
|
if versionInvalid:
|
||||||
|
print('Setup requires conda version ' + minimumVersion + ' or higher.')
|
||||||
|
print('You can use the command "conda update conda" to upgrade conda.')
|
||||||
|
|
||||||
|
exit(versionInvalid)
|
||||||
@@ -89,7 +89,7 @@
|
|||||||
"from azureml.automl.core.featurization import FeaturizationConfig\n",
|
"from azureml.automl.core.featurization import FeaturizationConfig\n",
|
||||||
"from azureml.core.dataset import Dataset\n",
|
"from azureml.core.dataset import Dataset\n",
|
||||||
"from azureml.train.automl import AutoMLConfig\n",
|
"from azureml.train.automl import AutoMLConfig\n",
|
||||||
"from azureml.interpret._internal.explanation_client import ExplanationClient"
|
"from azureml.interpret import ExplanationClient"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -105,7 +105,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.12.0 of the Azure ML SDK\")\n",
|
"print(\"This notebook was created using version 1.17.0 of the Azure ML SDK\")\n",
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -500,11 +500,10 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"# Wait for the best model explanation run to complete\n",
|
"# Wait for the best model explanation run to complete\n",
|
||||||
"from azureml.core.run import Run\n",
|
"from azureml.core.run import Run\n",
|
||||||
"model_explainability_run_id = remote_run.get_properties().get('ModelExplainRunId')\n",
|
"model_explainability_run_id = remote_run.id + \"_\" + \"ModelExplain\"\n",
|
||||||
"print(model_explainability_run_id)\n",
|
"print(model_explainability_run_id)\n",
|
||||||
"if model_explainability_run_id is not None:\n",
|
"model_explainability_run = Run(experiment=experiment, run_id=model_explainability_run_id)\n",
|
||||||
" model_explainability_run = Run(experiment=experiment, run_id=model_explainability_run_id)\n",
|
"model_explainability_run.wait_for_completion()\n",
|
||||||
" model_explainability_run.wait_for_completion()\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"# Get the best run object\n",
|
"# Get the best run object\n",
|
||||||
"best_run, fitted_model = remote_run.get_output()"
|
"best_run, fitted_model = remote_run.get_output()"
|
||||||
|
|||||||
@@ -93,7 +93,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.12.0 of the Azure ML SDK\")\n",
|
"print(\"This notebook was created using version 1.17.0 of the Azure ML SDK\")\n",
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -424,15 +424,26 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"This Credit Card fraud Detection dataset is made available under the Open Database License: http://opendatacommons.org/licenses/odbl/1.0/. Any rights in individual contents of the database are licensed under the Database Contents License: http://opendatacommons.org/licenses/dbcl/1.0/ and is available at: https://www.kaggle.com/mlg-ulb/creditcardfraud\n",
|
"This Credit Card fraud Detection dataset is made available under the Open Database License: http://opendatacommons.org/licenses/odbl/1.0/. Any rights in individual contents of the database are licensed under the Database Contents License: http://opendatacommons.org/licenses/dbcl/1.0/ and is available at: https://www.kaggle.com/mlg-ulb/creditcardfraud\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"The dataset has been collected and analysed during a research collaboration of Worldline and the Machine Learning Group (http://mlg.ulb.ac.be) of ULB (Universit\u00c3\u00a9 Libre de Bruxelles) on big data mining and fraud detection.\n",
|
||||||
|
"More details on current and past projects on related topics are available on https://www.researchgate.net/project/Fraud-detection-5 and the page of the DefeatFraud project\n",
|
||||||
"\n",
|
"\n",
|
||||||
"The dataset has been collected and analysed during a research collaboration of Worldline and the Machine Learning Group (http://mlg.ulb.ac.be) of ULB (Universit\u00c3\u0192\u00c2\u00a9 Libre de Bruxelles) on big data mining and fraud detection. More details on current and past projects on related topics are available on https://www.researchgate.net/project/Fraud-detection-5 and the page of the DefeatFraud project\n",
|
"Please cite the following works:\n",
|
||||||
"Please cite the following works: \n",
|
"\n",
|
||||||
"\u00c3\u00a2\u00e2\u201a\u00ac\u00c2\u00a2\tAndrea Dal Pozzolo, Olivier Caelen, Reid A. Johnson and Gianluca Bontempi. Calibrating Probability with Undersampling for Unbalanced Classification. In Symposium on Computational Intelligence and Data Mining (CIDM), IEEE, 2015\n",
|
"Andrea Dal Pozzolo, Olivier Caelen, Reid A. Johnson and Gianluca Bontempi. Calibrating Probability with Undersampling for Unbalanced Classification. In Symposium on Computational Intelligence and Data Mining (CIDM), IEEE, 2015\n",
|
||||||
"\u00c3\u00a2\u00e2\u201a\u00ac\u00c2\u00a2\tDal Pozzolo, Andrea; Caelen, Olivier; Le Borgne, Yann-Ael; Waterschoot, Serge; Bontempi, Gianluca. Learned lessons in credit card fraud detection from a practitioner perspective, Expert systems with applications,41,10,4915-4928,2014, Pergamon\n",
|
"\n",
|
||||||
"\u00c3\u00a2\u00e2\u201a\u00ac\u00c2\u00a2\tDal Pozzolo, Andrea; Boracchi, Giacomo; Caelen, Olivier; Alippi, Cesare; Bontempi, Gianluca. Credit card fraud detection: a realistic modeling and a novel learning strategy, IEEE transactions on neural networks and learning systems,29,8,3784-3797,2018,IEEE\n",
|
"Dal Pozzolo, Andrea; Caelen, Olivier; Le Borgne, Yann-Ael; Waterschoot, Serge; Bontempi, Gianluca. Learned lessons in credit card fraud detection from a practitioner perspective, Expert systems with applications,41,10,4915-4928,2014, Pergamon\n",
|
||||||
"o\tDal Pozzolo, Andrea Adaptive Machine learning for credit card fraud detection ULB MLG PhD thesis (supervised by G. Bontempi)\n",
|
"\n",
|
||||||
"\u00c3\u00a2\u00e2\u201a\u00ac\u00c2\u00a2\tCarcillo, Fabrizio; Dal Pozzolo, Andrea; Le Borgne, Yann-A\u00c3\u0192\u00c2\u00abl; Caelen, Olivier; Mazzer, Yannis; Bontempi, Gianluca. Scarff: a scalable framework for streaming credit card fraud detection with Spark, Information fusion,41, 182-194,2018,Elsevier\n",
|
"Dal Pozzolo, Andrea; Boracchi, Giacomo; Caelen, Olivier; Alippi, Cesare; Bontempi, Gianluca. Credit card fraud detection: a realistic modeling and a novel learning strategy, IEEE transactions on neural networks and learning systems,29,8,3784-3797,2018,IEEE\n",
|
||||||
"\u00c3\u00a2\u00e2\u201a\u00ac\u00c2\u00a2\tCarcillo, Fabrizio; Le Borgne, Yann-A\u00c3\u0192\u00c2\u00abl; Caelen, Olivier; Bontempi, Gianluca. Streaming active learning strategies for real-life credit card fraud detection: assessment and visualization, International Journal of Data Science and Analytics, 5,4,285-300,2018,Springer International Publishing"
|
"\n",
|
||||||
|
"Dal Pozzolo, Andrea Adaptive Machine learning for credit card fraud detection ULB MLG PhD thesis (supervised by G. Bontempi)\n",
|
||||||
|
"\n",
|
||||||
|
"Carcillo, Fabrizio; Dal Pozzolo, Andrea; Le Borgne, Yann-A\u00c3\u00abl; Caelen, Olivier; Mazzer, Yannis; Bontempi, Gianluca. Scarff: a scalable framework for streaming credit card fraud detection with Spark, Information fusion,41, 182-194,2018,Elsevier\n",
|
||||||
|
"\n",
|
||||||
|
"Carcillo, Fabrizio; Le Borgne, Yann-A\u00c3\u00abl; Caelen, Olivier; Bontempi, Gianluca. Streaming active learning strategies for real-life credit card fraud detection: assessment and visualization, International Journal of Data Science and Analytics, 5,4,285-300,2018,Springer International Publishing\n",
|
||||||
|
"\n",
|
||||||
|
"Bertrand Lebichot, Yann-A\u00c3\u00abl Le Borgne, Liyun He, Frederic Obl\u00c3\u00a9, Gianluca Bontempi Deep-Learning Domain Adaptation Techniques for Credit Cards Fraud Detection, INNSBDDL 2019: Recent Advances in Big Data and Deep Learning, pp 78-88, 2019\n",
|
||||||
|
"\n",
|
||||||
|
"Fabrizio Carcillo, Yann-A\u00c3\u00abl Le Borgne, Olivier Caelen, Frederic Obl\u00c3\u00a9, Gianluca Bontempi Combining Unsupervised and Supervised Learning in Credit Card Fraud Detection Information Sciences, 2019"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
|||||||
@@ -97,7 +97,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.12.0 of the Azure ML SDK\")\n",
|
"print(\"This notebook was created using version 1.17.0 of the Azure ML SDK\")\n",
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -151,6 +151,8 @@
|
|||||||
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||||
"from azureml.core.compute_target import ComputeTargetException\n",
|
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"num_nodes = 2\n",
|
||||||
|
"\n",
|
||||||
"# Choose a name for your cluster.\n",
|
"# Choose a name for your cluster.\n",
|
||||||
"amlcompute_cluster_name = \"dnntext-cluster\"\n",
|
"amlcompute_cluster_name = \"dnntext-cluster\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -163,7 +165,7 @@
|
|||||||
" # To use BERT (this is recommended for best performance), select a GPU such as \"STANDARD_NC6\" \n",
|
" # To use BERT (this is recommended for best performance), select a GPU such as \"STANDARD_NC6\" \n",
|
||||||
" # or similar GPU option\n",
|
" # or similar GPU option\n",
|
||||||
" # available in your workspace\n",
|
" # available in your workspace\n",
|
||||||
" max_nodes = 1)\n",
|
" max_nodes = num_nodes)\n",
|
||||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"compute_target.wait_for_completion(show_output=True)"
|
"compute_target.wait_for_completion(show_output=True)"
|
||||||
@@ -270,7 +272,9 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"This step requires an Enterprise workspace to gain access to this feature. To learn more about creating an Enterprise workspace or upgrading to an Enterprise workspace from the Azure portal, please visit our [Workspace page](https://docs.microsoft.com/azure/machine-learning/service/concept-workspace#upgrade)."
|
"This step requires an Enterprise workspace to gain access to this feature. To learn more about creating an Enterprise workspace or upgrading to an Enterprise workspace from the Azure portal, please visit our [Workspace page](https://docs.microsoft.com/azure/machine-learning/service/concept-workspace#upgrade).\n",
|
||||||
|
"\n",
|
||||||
|
"This notebook uses the blocked_models parameter to exclude some models that can take a longer time to train on some text datasets. You can choose to remove models from the blocked_models list but you may need to increase the experiment_timeout_hours parameter value to get results."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -282,7 +286,7 @@
|
|||||||
"automl_settings = {\n",
|
"automl_settings = {\n",
|
||||||
" \"experiment_timeout_minutes\": 20,\n",
|
" \"experiment_timeout_minutes\": 20,\n",
|
||||||
" \"primary_metric\": 'accuracy',\n",
|
" \"primary_metric\": 'accuracy',\n",
|
||||||
" \"max_concurrent_iterations\": 4, \n",
|
" \"max_concurrent_iterations\": num_nodes, \n",
|
||||||
" \"max_cores_per_iteration\": -1,\n",
|
" \"max_cores_per_iteration\": -1,\n",
|
||||||
" \"enable_dnn\": True,\n",
|
" \"enable_dnn\": True,\n",
|
||||||
" \"enable_early_stopping\": True,\n",
|
" \"enable_early_stopping\": True,\n",
|
||||||
@@ -297,6 +301,7 @@
|
|||||||
" compute_target=compute_target,\n",
|
" compute_target=compute_target,\n",
|
||||||
" training_data=train_dataset,\n",
|
" training_data=train_dataset,\n",
|
||||||
" label_column_name=target_column_name,\n",
|
" label_column_name=target_column_name,\n",
|
||||||
|
" blocked_models = ['LightGBM'],\n",
|
||||||
" **automl_settings\n",
|
" **automl_settings\n",
|
||||||
" )"
|
" )"
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
import pandas as pd
|
import pandas as pd
|
||||||
from azureml.core import Environment
|
from azureml.core import Environment
|
||||||
from azureml.core.conda_dependencies import CondaDependencies
|
|
||||||
from azureml.train.estimator import Estimator
|
from azureml.train.estimator import Estimator
|
||||||
from azureml.core.run import Run
|
from azureml.core.run import Run
|
||||||
|
|
||||||
@@ -8,13 +7,7 @@ from azureml.core.run import Run
|
|||||||
def run_inference(test_experiment, compute_target, script_folder, train_run,
|
def run_inference(test_experiment, compute_target, script_folder, train_run,
|
||||||
train_dataset, test_dataset, target_column_name, model_name):
|
train_dataset, test_dataset, target_column_name, model_name):
|
||||||
|
|
||||||
train_run.download_file('outputs/conda_env_v_1_0_0.yml',
|
inference_env = train_run.get_environment()
|
||||||
'inference/condafile.yml')
|
|
||||||
|
|
||||||
inference_env = Environment("myenv")
|
|
||||||
inference_env.docker.enabled = True
|
|
||||||
inference_env.python.conda_dependencies = CondaDependencies(
|
|
||||||
conda_dependencies_file_path='inference/condafile.yml')
|
|
||||||
|
|
||||||
est = Estimator(source_directory=script_folder,
|
est = Estimator(source_directory=script_folder,
|
||||||
entry_script='infer.py',
|
entry_script='infer.py',
|
||||||
|
|||||||
@@ -88,7 +88,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.12.0 of the Azure ML SDK\")\n",
|
"print(\"This notebook was created using version 1.17.0 of the Azure ML SDK\")\n",
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -190,7 +190,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from azureml.core.runconfig import CondaDependencies, DEFAULT_CPU_IMAGE, RunConfiguration\n",
|
"from azureml.core.runconfig import CondaDependencies, RunConfiguration\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# create a new RunConfig object\n",
|
"# create a new RunConfig object\n",
|
||||||
"conda_run_config = RunConfiguration(framework=\"python\")\n",
|
"conda_run_config = RunConfiguration(framework=\"python\")\n",
|
||||||
@@ -199,7 +199,6 @@
|
|||||||
"conda_run_config.target = compute_target\n",
|
"conda_run_config.target = compute_target\n",
|
||||||
"\n",
|
"\n",
|
||||||
"conda_run_config.environment.docker.enabled = True\n",
|
"conda_run_config.environment.docker.enabled = True\n",
|
||||||
"conda_run_config.environment.docker.base_image = azureml.core.runconfig.DEFAULT_CPU_IMAGE\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"cd = CondaDependencies.create(pip_packages=['azureml-sdk[automl]', 'applicationinsights', 'azureml-opendatasets', 'azureml-defaults'], \n",
|
"cd = CondaDependencies.create(pip_packages=['azureml-sdk[automl]', 'applicationinsights', 'azureml-opendatasets', 'azureml-defaults'], \n",
|
||||||
" conda_packages=['numpy==1.16.2'], \n",
|
" conda_packages=['numpy==1.16.2'], \n",
|
||||||
@@ -551,7 +550,7 @@
|
|||||||
"metadata": {
|
"metadata": {
|
||||||
"authors": [
|
"authors": [
|
||||||
{
|
{
|
||||||
"name": "vivijay"
|
"name": "anshirga"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
|
|||||||
@@ -0,0 +1,92 @@
|
|||||||
|
# Experimental Notebooks for Automated ML
|
||||||
|
Notebooks listed in this folder are leveraging experimental features. Namespaces or function signitures may change in future SDK releases. The notebooks published here will reflect the latest supported APIs. All of these notebooks can run on a client-only installation of the Automated ML SDK.
|
||||||
|
The client only installation doesn't contain any of the machine learning libraries, such as scikit-learn, xgboost, or tensorflow, making it much faster to install and is less likely to conflict with any packages in an existing environment. However, since the ML libraries are not available locally, models cannot be downloaded and loaded directly in the client. To replace the functionality of having models locally, these notebooks also demonstrate the ModelProxy feature which will allow you to submit a predict/forecast to the training environment.
|
||||||
|
|
||||||
|
<a name="localconda"></a>
|
||||||
|
## Setup using a Local Conda environment
|
||||||
|
|
||||||
|
To run these notebook on your own notebook server, use these installation instructions.
|
||||||
|
The instructions below will install everything you need and then start a Jupyter notebook.
|
||||||
|
If you would like to use a lighter-weight version of the client that does not install all of the machine learning libraries locally, you can leverage the [experimental notebooks.](experimental/README.md)
|
||||||
|
|
||||||
|
### 1. Install mini-conda from [here](https://conda.io/miniconda.html), choose 64-bit Python 3.7 or higher.
|
||||||
|
- **Note**: if you already have conda installed, you can keep using it but it should be version 4.4.10 or later (as shown by: conda -V). If you have a previous version installed, you can update it using the command: conda update conda.
|
||||||
|
There's no need to install mini-conda specifically.
|
||||||
|
|
||||||
|
### 2. Downloading the sample notebooks
|
||||||
|
- Download the sample notebooks from [GitHub](https://github.com/Azure/MachineLearningNotebooks) as zip and extract the contents to a local directory. The automated ML sample notebooks are in the "automated-machine-learning" folder.
|
||||||
|
|
||||||
|
### 3. Setup a new conda environment
|
||||||
|
The **automl_setup_thin_client** script creates a new conda environment, installs the necessary packages, configures the widget and starts a jupyter notebook. It takes the conda environment name as an optional parameter. The default conda environment name is azure_automl_experimental. The exact command depends on the operating system. See the specific sections below for Windows, Mac and Linux. It can take about 10 minutes to execute.
|
||||||
|
|
||||||
|
Packages installed by the **automl_setup** script:
|
||||||
|
<ul><li>python</li><li>nb_conda</li><li>matplotlib</li><li>numpy</li><li>cython</li><li>urllib3</li><li>pandas</li><li>azureml-sdk</li><li>azureml-widgets</li><li>pandas-ml</li></ul>
|
||||||
|
|
||||||
|
For more details refer to the [automl_env_thin_client.yml](./automl_env_thin_client.yml)
|
||||||
|
## Windows
|
||||||
|
Start an **Anaconda Prompt** window, cd to the **how-to-use-azureml/automated-machine-learning/experimental** folder where the sample notebooks were extracted and then run:
|
||||||
|
```
|
||||||
|
automl_setup_thin_client
|
||||||
|
```
|
||||||
|
## Mac
|
||||||
|
Install "Command line developer tools" if it is not already installed (you can use the command: `xcode-select --install`).
|
||||||
|
|
||||||
|
Start a Terminal windows, cd to the **how-to-use-azureml/automated-machine-learning/experimental** folder where the sample notebooks were extracted and then run:
|
||||||
|
|
||||||
|
```
|
||||||
|
bash automl_setup_thin_client_mac.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
## Linux
|
||||||
|
cd to the **how-to-use-azureml/automated-machine-learning/experimental** folder where the sample notebooks were extracted and then run:
|
||||||
|
|
||||||
|
```
|
||||||
|
bash automl_setup_thin_client_linux.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. Running configuration.ipynb
|
||||||
|
- Before running any samples you next need to run the configuration notebook. Click on [configuration](../../configuration.ipynb) notebook
|
||||||
|
- Execute the cells in the notebook to Register Machine Learning Services Resource Provider and create a workspace. (*instructions in notebook*)
|
||||||
|
|
||||||
|
### 5. Running Samples
|
||||||
|
- Please make sure you use the Python [conda env:azure_automl_experimental] kernel when trying the sample Notebooks.
|
||||||
|
- Follow the instructions in the individual notebooks to explore various features in automated ML.
|
||||||
|
|
||||||
|
### 6. Starting jupyter notebook manually
|
||||||
|
To start your Jupyter notebook manually, use:
|
||||||
|
|
||||||
|
```
|
||||||
|
conda activate azure_automl
|
||||||
|
jupyter notebook
|
||||||
|
```
|
||||||
|
|
||||||
|
or on Mac or Linux:
|
||||||
|
|
||||||
|
```
|
||||||
|
source activate azure_automl
|
||||||
|
jupyter notebook
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
<a name="samples"></a>
|
||||||
|
# Automated ML SDK Sample Notebooks
|
||||||
|
|
||||||
|
- [auto-ml-regression-model-proxy.ipynb](regression-model-proxy/auto-ml-regression-model-proxy.ipynb)
|
||||||
|
- Dataset: Hardware Performance Dataset
|
||||||
|
- Simple example of using automated ML for regression
|
||||||
|
- Uses azure compute for training
|
||||||
|
- Uses ModelProxy for submitting prediction to training environment on azure compute
|
||||||
|
|
||||||
|
<a name="documentation"></a>
|
||||||
|
See [Configure automated machine learning experiments](https://docs.microsoft.com/azure/machine-learning/service/how-to-configure-auto-train) to learn how more about the the settings and features available for automated machine learning experiments.
|
||||||
|
|
||||||
|
<a name="pythoncommand"></a>
|
||||||
|
# Running using python command
|
||||||
|
Jupyter notebook provides a File / Download as / Python (.py) option for saving the notebook as a Python file.
|
||||||
|
You can then run this file using the python command.
|
||||||
|
However, on Windows the file needs to be modified before it can be run.
|
||||||
|
The following condition must be added to the main code in the file:
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
|
||||||
|
The main code of the file must be indented so that it is under this condition.
|
||||||
@@ -0,0 +1,63 @@
|
|||||||
|
@echo off
|
||||||
|
set conda_env_name=%1
|
||||||
|
set automl_env_file=%2
|
||||||
|
set options=%3
|
||||||
|
set PIP_NO_WARN_SCRIPT_LOCATION=0
|
||||||
|
|
||||||
|
IF "%conda_env_name%"=="" SET conda_env_name="azure_automl_experimental"
|
||||||
|
IF "%automl_env_file%"=="" SET automl_env_file="automl_env.yml"
|
||||||
|
|
||||||
|
IF NOT EXIST %automl_env_file% GOTO YmlMissing
|
||||||
|
|
||||||
|
IF "%CONDA_EXE%"=="" GOTO CondaMissing
|
||||||
|
|
||||||
|
call conda activate %conda_env_name% 2>nul:
|
||||||
|
|
||||||
|
if not errorlevel 1 (
|
||||||
|
echo Upgrading existing conda environment %conda_env_name%
|
||||||
|
call pip uninstall azureml-train-automl -y -q
|
||||||
|
call conda env update --name %conda_env_name% --file %automl_env_file%
|
||||||
|
if errorlevel 1 goto ErrorExit
|
||||||
|
) else (
|
||||||
|
call conda env create -f %automl_env_file% -n %conda_env_name%
|
||||||
|
)
|
||||||
|
|
||||||
|
call conda activate %conda_env_name% 2>nul:
|
||||||
|
if errorlevel 1 goto ErrorExit
|
||||||
|
|
||||||
|
call python -m ipykernel install --user --name %conda_env_name% --display-name "Python (%conda_env_name%)"
|
||||||
|
|
||||||
|
REM azureml.widgets is now installed as part of the pip install under the conda env.
|
||||||
|
REM Removing the old user install so that the notebooks will use the latest widget.
|
||||||
|
call jupyter nbextension uninstall --user --py azureml.widgets
|
||||||
|
|
||||||
|
echo.
|
||||||
|
echo.
|
||||||
|
echo ***************************************
|
||||||
|
echo * AutoML setup completed successfully *
|
||||||
|
echo ***************************************
|
||||||
|
IF NOT "%options%"=="nolaunch" (
|
||||||
|
echo.
|
||||||
|
echo Starting jupyter notebook - please run the configuration notebook
|
||||||
|
echo.
|
||||||
|
jupyter notebook --log-level=50 --notebook-dir='..\..'
|
||||||
|
)
|
||||||
|
|
||||||
|
goto End
|
||||||
|
|
||||||
|
:CondaMissing
|
||||||
|
echo Please run this script from an Anaconda Prompt window.
|
||||||
|
echo You can start an Anaconda Prompt window by
|
||||||
|
echo typing Anaconda Prompt on the Start menu.
|
||||||
|
echo If you don't see the Anaconda Prompt app, install Miniconda.
|
||||||
|
echo If you are running an older version of Miniconda or Anaconda,
|
||||||
|
echo you can upgrade using the command: conda update conda
|
||||||
|
goto End
|
||||||
|
|
||||||
|
:YmlMissing
|
||||||
|
echo File %automl_env_file% not found.
|
||||||
|
|
||||||
|
:ErrorExit
|
||||||
|
echo Install failed
|
||||||
|
|
||||||
|
:End
|
||||||
@@ -0,0 +1,53 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
CONDA_ENV_NAME=$1
|
||||||
|
AUTOML_ENV_FILE=$2
|
||||||
|
OPTIONS=$3
|
||||||
|
PIP_NO_WARN_SCRIPT_LOCATION=0
|
||||||
|
|
||||||
|
if [ "$CONDA_ENV_NAME" == "" ]
|
||||||
|
then
|
||||||
|
CONDA_ENV_NAME="azure_automl_experimental"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$AUTOML_ENV_FILE" == "" ]
|
||||||
|
then
|
||||||
|
AUTOML_ENV_FILE="automl_env.yml"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -f $AUTOML_ENV_FILE ]; then
|
||||||
|
echo "File $AUTOML_ENV_FILE not found"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if source activate $CONDA_ENV_NAME 2> /dev/null
|
||||||
|
then
|
||||||
|
echo "Upgrading existing conda environment" $CONDA_ENV_NAME
|
||||||
|
pip uninstall azureml-train-automl -y -q
|
||||||
|
conda env update --name $CONDA_ENV_NAME --file $AUTOML_ENV_FILE &&
|
||||||
|
jupyter nbextension uninstall --user --py azureml.widgets
|
||||||
|
else
|
||||||
|
conda env create -f $AUTOML_ENV_FILE -n $CONDA_ENV_NAME &&
|
||||||
|
source activate $CONDA_ENV_NAME &&
|
||||||
|
python -m ipykernel install --user --name $CONDA_ENV_NAME --display-name "Python ($CONDA_ENV_NAME)" &&
|
||||||
|
jupyter nbextension uninstall --user --py azureml.widgets &&
|
||||||
|
echo "" &&
|
||||||
|
echo "" &&
|
||||||
|
echo "***************************************" &&
|
||||||
|
echo "* AutoML setup completed successfully *" &&
|
||||||
|
echo "***************************************" &&
|
||||||
|
if [ "$OPTIONS" != "nolaunch" ]
|
||||||
|
then
|
||||||
|
echo "" &&
|
||||||
|
echo "Starting jupyter notebook - please run the configuration notebook" &&
|
||||||
|
echo "" &&
|
||||||
|
jupyter notebook --log-level=50 --notebook-dir '../..'
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $? -gt 0 ]
|
||||||
|
then
|
||||||
|
echo "Installation failed"
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
@@ -0,0 +1,55 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
CONDA_ENV_NAME=$1
|
||||||
|
AUTOML_ENV_FILE=$2
|
||||||
|
OPTIONS=$3
|
||||||
|
PIP_NO_WARN_SCRIPT_LOCATION=0
|
||||||
|
|
||||||
|
if [ "$CONDA_ENV_NAME" == "" ]
|
||||||
|
then
|
||||||
|
CONDA_ENV_NAME="azure_automl_experimental"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$AUTOML_ENV_FILE" == "" ]
|
||||||
|
then
|
||||||
|
AUTOML_ENV_FILE="automl_env.yml"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -f $AUTOML_ENV_FILE ]; then
|
||||||
|
echo "File $AUTOML_ENV_FILE not found"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if source activate $CONDA_ENV_NAME 2> /dev/null
|
||||||
|
then
|
||||||
|
echo "Upgrading existing conda environment" $CONDA_ENV_NAME
|
||||||
|
pip uninstall azureml-train-automl -y -q
|
||||||
|
conda env update --name $CONDA_ENV_NAME --file $AUTOML_ENV_FILE &&
|
||||||
|
jupyter nbextension uninstall --user --py azureml.widgets
|
||||||
|
else
|
||||||
|
conda env create -f $AUTOML_ENV_FILE -n $CONDA_ENV_NAME &&
|
||||||
|
source activate $CONDA_ENV_NAME &&
|
||||||
|
conda install lightgbm -c conda-forge -y &&
|
||||||
|
python -m ipykernel install --user --name $CONDA_ENV_NAME --display-name "Python ($CONDA_ENV_NAME)" &&
|
||||||
|
jupyter nbextension uninstall --user --py azureml.widgets &&
|
||||||
|
echo "" &&
|
||||||
|
echo "" &&
|
||||||
|
echo "***************************************" &&
|
||||||
|
echo "* AutoML setup completed successfully *" &&
|
||||||
|
echo "***************************************" &&
|
||||||
|
if [ "$OPTIONS" != "nolaunch" ]
|
||||||
|
then
|
||||||
|
echo "" &&
|
||||||
|
echo "Starting jupyter notebook - please run the configuration notebook" &&
|
||||||
|
echo "" &&
|
||||||
|
jupyter notebook --log-level=50 --notebook-dir '../..'
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $? -gt 0 ]
|
||||||
|
then
|
||||||
|
echo "Installation failed"
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -0,0 +1,20 @@
|
|||||||
|
name: azure_automl_experimental
|
||||||
|
dependencies:
|
||||||
|
# The python interpreter version.
|
||||||
|
# Currently Azure ML only supports 3.5.2 and later.
|
||||||
|
- pip<=19.3.1
|
||||||
|
- python>=3.5.2,<3.8
|
||||||
|
- nb_conda
|
||||||
|
- matplotlib==2.1.0
|
||||||
|
- numpy~=1.18.0
|
||||||
|
- cython
|
||||||
|
- urllib3<1.24
|
||||||
|
- scikit-learn==0.22.1
|
||||||
|
- pandas==0.25.1
|
||||||
|
|
||||||
|
- pip:
|
||||||
|
# Required packages for AzureML execution, history, and data preparation.
|
||||||
|
- azureml-defaults
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-widgets
|
||||||
|
- azureml-explain-model
|
||||||
@@ -0,0 +1,21 @@
|
|||||||
|
name: azure_automl_experimental
|
||||||
|
dependencies:
|
||||||
|
# The python interpreter version.
|
||||||
|
# Currently Azure ML only supports 3.5.2 and later.
|
||||||
|
- pip<=19.3.1
|
||||||
|
- nomkl
|
||||||
|
- python>=3.5.2,<3.8
|
||||||
|
- nb_conda
|
||||||
|
- matplotlib==2.1.0
|
||||||
|
- numpy~=1.18.0
|
||||||
|
- cython
|
||||||
|
- urllib3<1.24
|
||||||
|
- scikit-learn==0.22.1
|
||||||
|
- pandas==0.25.1
|
||||||
|
|
||||||
|
- pip:
|
||||||
|
# Required packages for AzureML execution, history, and data preparation.
|
||||||
|
- azureml-defaults
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-widgets
|
||||||
|
- azureml-explain-model
|
||||||
@@ -0,0 +1,481 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||||
|
"\n",
|
||||||
|
"Licensed under the MIT License."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Automated Machine Learning\n",
|
||||||
|
"_**Regression with Aml Compute**_\n",
|
||||||
|
"\n",
|
||||||
|
"## Contents\n",
|
||||||
|
"1. [Introduction](#Introduction)\n",
|
||||||
|
"1. [Setup](#Setup)\n",
|
||||||
|
"1. [Data](#Data)\n",
|
||||||
|
"1. [Train](#Train)\n",
|
||||||
|
"1. [Results](#Results)\n",
|
||||||
|
"1. [Test](#Test)\n",
|
||||||
|
"\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Introduction\n",
|
||||||
|
"In this example we use an experimental feature, Model Proxy, to do a predict on the best generated model without downloading the model locally. The prediction will happen on same compute and environment that was used to train the model. This feature is currently in the experimental state, which means that the API is prone to changing, please make sure to run on the latest version of this notebook if you face any issues.\n",
|
||||||
|
"\n",
|
||||||
|
"If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration](../../../../configuration.ipynb) notebook first if you haven't already to establish your connection to the AzureML Workspace. \n",
|
||||||
|
"\n",
|
||||||
|
"In this notebook you will learn how to:\n",
|
||||||
|
"1. Create an `Experiment` in an existing `Workspace`.\n",
|
||||||
|
"2. Configure AutoML using `AutoMLConfig`.\n",
|
||||||
|
"3. Train the model using remote compute.\n",
|
||||||
|
"4. Explore the results.\n",
|
||||||
|
"5. Test the best fitted model."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Setup\n",
|
||||||
|
"\n",
|
||||||
|
"As part of the setup you have already created an Azure ML `Workspace` object. For Automated ML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import logging\n",
|
||||||
|
"\n",
|
||||||
|
"from matplotlib import pyplot as plt\n",
|
||||||
|
"import numpy as np\n",
|
||||||
|
"import pandas as pd\n",
|
||||||
|
" \n",
|
||||||
|
"\n",
|
||||||
|
"import azureml.core\n",
|
||||||
|
"from azureml.core.experiment import Experiment\n",
|
||||||
|
"from azureml.core.workspace import Workspace\n",
|
||||||
|
"from azureml.core.dataset import Dataset\n",
|
||||||
|
"from azureml.train.automl import AutoMLConfig"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"print(\"This notebook was created using version 1.17.0 of the Azure ML SDK\")\n",
|
||||||
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"ws = Workspace.from_config()\n",
|
||||||
|
"\n",
|
||||||
|
"# Choose a name for the experiment.\n",
|
||||||
|
"experiment_name = 'automl-regression-model-proxy'\n",
|
||||||
|
"\n",
|
||||||
|
"experiment = Experiment(ws, experiment_name)\n",
|
||||||
|
"\n",
|
||||||
|
"output = {}\n",
|
||||||
|
"output['Subscription ID'] = ws.subscription_id\n",
|
||||||
|
"output['Workspace'] = ws.name\n",
|
||||||
|
"output['Resource Group'] = ws.resource_group\n",
|
||||||
|
"output['Location'] = ws.location\n",
|
||||||
|
"output['Run History Name'] = experiment_name\n",
|
||||||
|
"pd.set_option('display.max_colwidth', -1)\n",
|
||||||
|
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||||
|
"outputDf.T"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Using AmlCompute\n",
|
||||||
|
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you use `AmlCompute` as your training compute resource."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||||
|
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||||
|
"\n",
|
||||||
|
"# Choose a name for your CPU cluster\n",
|
||||||
|
"cpu_cluster_name = \"reg-cluster\"\n",
|
||||||
|
"\n",
|
||||||
|
"# Verify that cluster does not exist already\n",
|
||||||
|
"try:\n",
|
||||||
|
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
||||||
|
" print('Found existing cluster, use it.')\n",
|
||||||
|
"except ComputeTargetException:\n",
|
||||||
|
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2',\n",
|
||||||
|
" max_nodes=4)\n",
|
||||||
|
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
||||||
|
"\n",
|
||||||
|
"compute_target.wait_for_completion(show_output=True)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Data\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Load Data\n",
|
||||||
|
"Load the hardware dataset from a csv file containing both training features and labels. The features are inputs to the model, while the training labels represent the expected output of the model. Next, we'll split the data using random_split and extract the training data for the model. "
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"data = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/machineData.csv\"\n",
|
||||||
|
"dataset = Dataset.Tabular.from_delimited_files(data)\n",
|
||||||
|
"\n",
|
||||||
|
"# Split the dataset into train and test datasets\n",
|
||||||
|
"train_data, test_data = dataset.random_split(percentage=0.8, seed=223)\n",
|
||||||
|
"\n",
|
||||||
|
"label = \"ERP\"\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Train\n",
|
||||||
|
"\n",
|
||||||
|
"Instantiate an `AutoMLConfig` object to specify the settings and data used to run the experiment.\n",
|
||||||
|
"\n",
|
||||||
|
"|Property|Description|\n",
|
||||||
|
"|-|-|\n",
|
||||||
|
"|**task**|classification, regression or forecasting|\n",
|
||||||
|
"|**primary_metric**|This is the metric that you want to optimize. Regression supports the following primary metrics: <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i>|\n",
|
||||||
|
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
||||||
|
"|**training_data**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
||||||
|
"|**label_column_name**|(sparse) array-like, shape = [n_samples, ], targets values.|\n",
|
||||||
|
"\n",
|
||||||
|
"**_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"tags": [
|
||||||
|
"automlconfig-remarks-sample"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"automl_settings = {\n",
|
||||||
|
" \"n_cross_validations\": 3,\n",
|
||||||
|
" \"primary_metric\": 'r2_score',\n",
|
||||||
|
" \"enable_early_stopping\": True, \n",
|
||||||
|
" \"experiment_timeout_hours\": 0.3, #for real scenarios we reccommend a timeout of at least one hour \n",
|
||||||
|
" \"max_concurrent_iterations\": 4,\n",
|
||||||
|
" \"max_cores_per_iteration\": -1,\n",
|
||||||
|
" \"verbosity\": logging.INFO,\n",
|
||||||
|
"}\n",
|
||||||
|
"\n",
|
||||||
|
"automl_config = AutoMLConfig(task = 'regression',\n",
|
||||||
|
" compute_target = compute_target,\n",
|
||||||
|
" training_data = train_data,\n",
|
||||||
|
" label_column_name = label,\n",
|
||||||
|
" **automl_settings\n",
|
||||||
|
" )"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Call the `submit` method on the experiment object and pass the run configuration. Execution of remote runs is asynchronous. Depending on the data and the number of iterations this can run for a while. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"remote_run = experiment.submit(automl_config, show_output = False)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# If you need to retrieve a run that already started, use the following code\n",
|
||||||
|
"#from azureml.train.automl.run import AutoMLRun\n",
|
||||||
|
"#remote_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"remote_run"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Results"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### Widget for Monitoring Runs\n",
|
||||||
|
"\n",
|
||||||
|
"The widget will first report a \"loading\" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete.\n",
|
||||||
|
"\n",
|
||||||
|
"**Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.widgets import RunDetails\n",
|
||||||
|
"RunDetails(remote_run).show() "
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"remote_run.wait_for_completion()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Retrieve the Best Child Run\n",
|
||||||
|
"\n",
|
||||||
|
"Below we select the best pipeline from our iterations. The `get_best_child` method returns the best run. Overloads on `get_best_child` allow you to retrieve the best run for *any* logged metric."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"best_run = remote_run.get_best_child()\n",
|
||||||
|
"print(best_run)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### Best Child Run Based on Any Other Metric\n",
|
||||||
|
"Show the run and the model that has the smallest `root_mean_squared_error` value (which turned out to be the same as the one with largest `spearman_correlation` value):"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"lookup_metric = \"root_mean_squared_error\"\n",
|
||||||
|
"best_run = remote_run.get_best_child(metric = lookup_metric)\n",
|
||||||
|
"print(best_run)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# preview the first 3 rows of the dataset\n",
|
||||||
|
"\n",
|
||||||
|
"test_data = test_data.to_pandas_dataframe()\n",
|
||||||
|
"y_test = test_data['ERP'].fillna(0)\n",
|
||||||
|
"test_data = test_data.drop('ERP', 1)\n",
|
||||||
|
"test_data = test_data.fillna(0)\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"train_data = train_data.to_pandas_dataframe()\n",
|
||||||
|
"y_train = train_data['ERP'].fillna(0)\n",
|
||||||
|
"train_data = train_data.drop('ERP', 1)\n",
|
||||||
|
"train_data = train_data.fillna(0)\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### Creating ModelProxy for submitting prediction runs to the training environment.\n",
|
||||||
|
"We will create a ModelProxy for the best child run, which will allow us to submit a run that does the prediction in the training environment. Unlike the local client, which can have different versions of some libraries, the training environment will have all the compatible libraries for the model already."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.train.automl.model_proxy import ModelProxy\n",
|
||||||
|
"best_model_proxy = ModelProxy(best_run)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"y_pred_train = best_model_proxy.predict(train_data).to_pandas_dataframe().values.flatten()\n",
|
||||||
|
"y_residual_train = y_train - y_pred_train\n",
|
||||||
|
"\n",
|
||||||
|
"y_pred_test = best_model_proxy.predict(test_data).to_pandas_dataframe().values.flatten()\n",
|
||||||
|
"y_residual_test = y_test - y_pred_test"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"%matplotlib inline\n",
|
||||||
|
"from sklearn.metrics import mean_squared_error, r2_score\n",
|
||||||
|
"\n",
|
||||||
|
"# Set up a multi-plot chart.\n",
|
||||||
|
"f, (a0, a1) = plt.subplots(1, 2, gridspec_kw = {'width_ratios':[1, 1], 'wspace':0, 'hspace': 0})\n",
|
||||||
|
"f.suptitle('Regression Residual Values', fontsize = 18)\n",
|
||||||
|
"f.set_figheight(6)\n",
|
||||||
|
"f.set_figwidth(16)\n",
|
||||||
|
"\n",
|
||||||
|
"# Plot residual values of training set.\n",
|
||||||
|
"a0.axis([0, 360, -100, 100])\n",
|
||||||
|
"a0.plot(y_residual_train, 'bo', alpha = 0.5)\n",
|
||||||
|
"a0.plot([-10,360],[0,0], 'r-', lw = 3)\n",
|
||||||
|
"a0.text(16,170,'RMSE = {0:.2f}'.format(np.sqrt(mean_squared_error(y_train, y_pred_train))), fontsize = 12)\n",
|
||||||
|
"a0.text(16,140,'R2 score = {0:.2f}'.format(r2_score(y_train, y_pred_train)),fontsize = 12)\n",
|
||||||
|
"a0.set_xlabel('Training samples', fontsize = 12)\n",
|
||||||
|
"a0.set_ylabel('Residual Values', fontsize = 12)\n",
|
||||||
|
"\n",
|
||||||
|
"# Plot residual values of test set.\n",
|
||||||
|
"a1.axis([0, 90, -100, 100])\n",
|
||||||
|
"a1.plot(y_residual_test, 'bo', alpha = 0.5)\n",
|
||||||
|
"a1.plot([-10,360],[0,0], 'r-', lw = 3)\n",
|
||||||
|
"a1.text(5,170,'RMSE = {0:.2f}'.format(np.sqrt(mean_squared_error(y_test, y_pred_test))), fontsize = 12)\n",
|
||||||
|
"a1.text(5,140,'R2 score = {0:.2f}'.format(r2_score(y_test, y_pred_test)),fontsize = 12)\n",
|
||||||
|
"a1.set_xlabel('Test samples', fontsize = 12)\n",
|
||||||
|
"a1.set_yticklabels([])\n",
|
||||||
|
"\n",
|
||||||
|
"plt.show()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"%matplotlib inline\n",
|
||||||
|
"test_pred = plt.scatter(y_test, y_pred_test, color='')\n",
|
||||||
|
"test_test = plt.scatter(y_test, y_test, color='g')\n",
|
||||||
|
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n",
|
||||||
|
"plt.show()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"authors": [
|
||||||
|
{
|
||||||
|
"name": "rakellam"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"categories": [
|
||||||
|
"how-to-use-azureml",
|
||||||
|
"automated-machine-learning"
|
||||||
|
],
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3.6",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python36"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.6.2"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 2
|
||||||
|
}
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: auto-ml-regression-model-proxy
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
@@ -114,7 +114,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.12.0 of the Azure ML SDK\")\n",
|
"print(\"This notebook was created using version 1.17.0 of the Azure ML SDK\")\n",
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
import argparse
|
import argparse
|
||||||
|
import os
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
@@ -10,6 +11,13 @@ from sklearn.metrics import mean_absolute_error, mean_squared_error
|
|||||||
from azureml.automl.runtime.shared.score import scoring, constants
|
from azureml.automl.runtime.shared.score import scoring, constants
|
||||||
from azureml.core import Run
|
from azureml.core import Run
|
||||||
|
|
||||||
|
try:
|
||||||
|
import torch
|
||||||
|
|
||||||
|
_torch_present = True
|
||||||
|
except ImportError:
|
||||||
|
_torch_present = False
|
||||||
|
|
||||||
|
|
||||||
def align_outputs(y_predicted, X_trans, X_test, y_test,
|
def align_outputs(y_predicted, X_trans, X_test, y_test,
|
||||||
predicted_column_name='predicted',
|
predicted_column_name='predicted',
|
||||||
@@ -48,7 +56,7 @@ def align_outputs(y_predicted, X_trans, X_test, y_test,
|
|||||||
# or at edges of time due to lags/rolling windows
|
# or at edges of time due to lags/rolling windows
|
||||||
clean = together[together[[target_column_name,
|
clean = together[together[[target_column_name,
|
||||||
predicted_column_name]].notnull().all(axis=1)]
|
predicted_column_name]].notnull().all(axis=1)]
|
||||||
return(clean)
|
return (clean)
|
||||||
|
|
||||||
|
|
||||||
def do_rolling_forecast_with_lookback(fitted_model, X_test, y_test,
|
def do_rolling_forecast_with_lookback(fitted_model, X_test, y_test,
|
||||||
@@ -83,8 +91,7 @@ def do_rolling_forecast_with_lookback(fitted_model, X_test, y_test,
|
|||||||
if origin_time != X[time_column_name].min():
|
if origin_time != X[time_column_name].min():
|
||||||
# Set the context by including actuals up-to the origin time
|
# Set the context by including actuals up-to the origin time
|
||||||
test_context_expand_wind = (X[time_column_name] < origin_time)
|
test_context_expand_wind = (X[time_column_name] < origin_time)
|
||||||
context_expand_wind = (
|
context_expand_wind = (X_test_expand[time_column_name] < origin_time)
|
||||||
X_test_expand[time_column_name] < origin_time)
|
|
||||||
y_query_expand[context_expand_wind] = y[test_context_expand_wind]
|
y_query_expand[context_expand_wind] = y[test_context_expand_wind]
|
||||||
|
|
||||||
# Print some debug info
|
# Print some debug info
|
||||||
@@ -115,8 +122,7 @@ def do_rolling_forecast_with_lookback(fitted_model, X_test, y_test,
|
|||||||
# Align forecast with test set for dates within
|
# Align forecast with test set for dates within
|
||||||
# the current rolling window
|
# the current rolling window
|
||||||
trans_tindex = X_trans.index.get_level_values(time_column_name)
|
trans_tindex = X_trans.index.get_level_values(time_column_name)
|
||||||
trans_roll_wind = (trans_tindex >= origin_time) & (
|
trans_roll_wind = (trans_tindex >= origin_time) & (trans_tindex < horizon_time)
|
||||||
trans_tindex < horizon_time)
|
|
||||||
test_roll_wind = expand_wind & (X[time_column_name] >= origin_time)
|
test_roll_wind = expand_wind & (X[time_column_name] >= origin_time)
|
||||||
df_list.append(align_outputs(
|
df_list.append(align_outputs(
|
||||||
y_fcst[trans_roll_wind], X_trans[trans_roll_wind],
|
y_fcst[trans_roll_wind], X_trans[trans_roll_wind],
|
||||||
@@ -155,8 +161,7 @@ def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq='D'):
|
|||||||
if origin_time != X_test[time_column_name].min():
|
if origin_time != X_test[time_column_name].min():
|
||||||
# Set the context by including actuals up-to the origin time
|
# Set the context by including actuals up-to the origin time
|
||||||
test_context_expand_wind = (X_test[time_column_name] < origin_time)
|
test_context_expand_wind = (X_test[time_column_name] < origin_time)
|
||||||
context_expand_wind = (
|
context_expand_wind = (X_test_expand[time_column_name] < origin_time)
|
||||||
X_test_expand[time_column_name] < origin_time)
|
|
||||||
y_query_expand[context_expand_wind] = y_test[
|
y_query_expand[context_expand_wind] = y_test[
|
||||||
test_context_expand_wind]
|
test_context_expand_wind]
|
||||||
|
|
||||||
@@ -186,10 +191,8 @@ def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq='D'):
|
|||||||
# Align forecast with test set for dates within the
|
# Align forecast with test set for dates within the
|
||||||
# current rolling window
|
# current rolling window
|
||||||
trans_tindex = X_trans.index.get_level_values(time_column_name)
|
trans_tindex = X_trans.index.get_level_values(time_column_name)
|
||||||
trans_roll_wind = (trans_tindex >= origin_time) & (
|
trans_roll_wind = (trans_tindex >= origin_time) & (trans_tindex < horizon_time)
|
||||||
trans_tindex < horizon_time)
|
test_roll_wind = expand_wind & (X_test[time_column_name] >= origin_time)
|
||||||
test_roll_wind = expand_wind & (
|
|
||||||
X_test[time_column_name] >= origin_time)
|
|
||||||
df_list.append(align_outputs(y_fcst[trans_roll_wind],
|
df_list.append(align_outputs(y_fcst[trans_roll_wind],
|
||||||
X_trans[trans_roll_wind],
|
X_trans[trans_roll_wind],
|
||||||
X_test[test_roll_wind],
|
X_test[test_roll_wind],
|
||||||
@@ -221,6 +224,10 @@ def MAPE(actual, pred):
|
|||||||
return np.mean(APE(actual_safe, pred_safe))
|
return np.mean(APE(actual_safe, pred_safe))
|
||||||
|
|
||||||
|
|
||||||
|
def map_location_cuda(storage, loc):
|
||||||
|
return storage.cuda()
|
||||||
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--max_horizon', type=int, dest='max_horizon',
|
'--max_horizon', type=int, dest='max_horizon',
|
||||||
@@ -238,7 +245,6 @@ parser.add_argument(
|
|||||||
'--model_path', type=str, dest='model_path',
|
'--model_path', type=str, dest='model_path',
|
||||||
default='model.pkl', help='Filename of model to be loaded')
|
default='model.pkl', help='Filename of model to be loaded')
|
||||||
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
max_horizon = args.max_horizon
|
max_horizon = args.max_horizon
|
||||||
target_column_name = args.target_column_name
|
target_column_name = args.target_column_name
|
||||||
@@ -246,7 +252,6 @@ time_column_name = args.time_column_name
|
|||||||
freq = args.freq
|
freq = args.freq
|
||||||
model_path = args.model_path
|
model_path = args.model_path
|
||||||
|
|
||||||
|
|
||||||
print('args passed are: ')
|
print('args passed are: ')
|
||||||
print(max_horizon)
|
print(max_horizon)
|
||||||
print(target_column_name)
|
print(target_column_name)
|
||||||
@@ -274,8 +279,19 @@ X_lookback_df = lookback_dataset.drop_columns(columns=[target_column_name])
|
|||||||
y_lookback_df = lookback_dataset.with_timestamp_columns(
|
y_lookback_df = lookback_dataset.with_timestamp_columns(
|
||||||
None).keep_columns(columns=[target_column_name])
|
None).keep_columns(columns=[target_column_name])
|
||||||
|
|
||||||
fitted_model = joblib.load(model_path)
|
_, ext = os.path.splitext(model_path)
|
||||||
|
if ext == '.pt':
|
||||||
|
# Load the fc-tcn torch model.
|
||||||
|
assert _torch_present
|
||||||
|
if torch.cuda.is_available():
|
||||||
|
map_location = map_location_cuda
|
||||||
|
else:
|
||||||
|
map_location = 'cpu'
|
||||||
|
with open(model_path, 'rb') as fh:
|
||||||
|
fitted_model = torch.load(fh, map_location=map_location)
|
||||||
|
else:
|
||||||
|
# Load the sklearn pipeline.
|
||||||
|
fitted_model = joblib.load(model_path)
|
||||||
|
|
||||||
if hasattr(fitted_model, 'get_lookback'):
|
if hasattr(fitted_model, 'get_lookback'):
|
||||||
lookback = fitted_model.get_lookback()
|
lookback = fitted_model.get_lookback()
|
||||||
|
|||||||
@@ -87,7 +87,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.12.0 of the Azure ML SDK\")\n",
|
"print(\"This notebook was created using version 1.17.0 of the Azure ML SDK\")\n",
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -1,20 +1,12 @@
|
|||||||
from azureml.core import Environment
|
|
||||||
from azureml.core.conda_dependencies import CondaDependencies
|
|
||||||
from azureml.train.estimator import Estimator
|
from azureml.train.estimator import Estimator
|
||||||
from azureml.core.run import Run
|
|
||||||
|
|
||||||
|
|
||||||
def run_rolling_forecast(test_experiment, compute_target, train_run, test_dataset,
|
def run_rolling_forecast(test_experiment, compute_target, train_run, test_dataset,
|
||||||
target_column_name, inference_folder='./forecast'):
|
target_column_name, inference_folder='./forecast'):
|
||||||
condafile = inference_folder + '/condafile.yml'
|
|
||||||
train_run.download_file('outputs/model.pkl',
|
train_run.download_file('outputs/model.pkl',
|
||||||
inference_folder + '/model.pkl')
|
inference_folder + '/model.pkl')
|
||||||
train_run.download_file('outputs/conda_env_v_1_0_0.yml', condafile)
|
|
||||||
|
|
||||||
inference_env = Environment("myenv")
|
inference_env = train_run.get_environment()
|
||||||
inference_env.docker.enabled = True
|
|
||||||
inference_env.python.conda_dependencies = CondaDependencies(
|
|
||||||
conda_dependencies_file_path=condafile)
|
|
||||||
|
|
||||||
est = Estimator(source_directory=inference_folder,
|
est = Estimator(source_directory=inference_folder,
|
||||||
entry_script='forecasting_script.py',
|
entry_script='forecasting_script.py',
|
||||||
|
|||||||
@@ -97,7 +97,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.12.0 of the Azure ML SDK\")\n",
|
"print(\"This notebook was created using version 1.17.0 of the Azure ML SDK\")\n",
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -94,7 +94,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.12.0 of the Azure ML SDK\")\n",
|
"print(\"This notebook was created using version 1.17.0 of the Azure ML SDK\")\n",
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -82,7 +82,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.12.0 of the Azure ML SDK\")\n",
|
"print(\"This notebook was created using version 1.17.0 of the Azure ML SDK\")\n",
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -327,7 +327,7 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"The featurization customization in forecasting is an advanced feature in AutoML which allows our customers to change the default forecasting featurization behaviors and column types through `FeaturizationConfig`. The supported scenarios include,\n",
|
"The featurization customization in forecasting is an advanced feature in AutoML which allows our customers to change the default forecasting featurization behaviors and column types through `FeaturizationConfig`. The supported scenarios include,\n",
|
||||||
"1. Column purposes update: Override feature type for the specified column. Currently supports DateTime, Categorical and Numeric. This customization can be used in the scenario that the type of the column cannot correctly reflect its purpose. Some numerical columns, for instance, can be treated as Categorical columns which need to be converted to categorical while some can be treated as epoch timestamp which need to be converted to datetime. To tell our SDK to correctly preprocess these columns, a configuration need to be add with the columns and their desired types.\n",
|
"1. Column purposes update: Override feature type for the specified column. Currently supports DateTime, Categorical and Numeric. This customization can be used in the scenario that the type of the column cannot correctly reflect its purpose. Some numerical columns, for instance, can be treated as Categorical columns which need to be converted to categorical while some can be treated as epoch timestamp which need to be converted to datetime. To tell our SDK to correctly preprocess these columns, a configuration need to be add with the columns and their desired types.\n",
|
||||||
"2. Transformer parameters update: Currently supports parameter change for Imputer only. User can customize imputation methods, the supported methods are constant for target data and mean, median, most frequent and constant for training data. This customization can be used for the scenario that our customers know which imputation methods fit best to the input data. For instance, some datasets use NaN to represent 0 which the correct behavior should impute all the missing value with 0. To achieve this behavior, these columns need to be configured as constant imputation with `fill_value` 0.\n",
|
"2. Transformer parameters update: Currently supports parameter change for Imputer only. User can customize imputation methods. The supported imputing methods for target column are constant and ffill (forward fill). The supported imputing methods for feature columns are mean, median, most frequent, constant and ffill (forward fill). This customization can be used for the scenario that our customers know which imputation methods fit best to the input data. For instance, some datasets use NaN to represent 0 which the correct behavior should impute all the missing value with 0. To achieve this behavior, these columns need to be configured as constant imputation with `fill_value` 0.\n",
|
||||||
"3. Drop columns: Columns to drop from being featurized. These usually are the columns which are leaky or the columns contain no useful data.\n",
|
"3. Drop columns: Columns to drop from being featurized. These usually are the columns which are leaky or the columns contain no useful data.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"This step requires an Enterprise workspace to gain access to this feature. To learn more about creating an Enterprise workspace or upgrading to an Enterprise workspace from the Azure portal, please visit our [Workspace page.](https://docs.microsoft.com/azure/machine-learning/service/concept-workspace#upgrade)"
|
"This step requires an Enterprise workspace to gain access to this feature. To learn more about creating an Enterprise workspace or upgrading to an Enterprise workspace from the Azure portal, please visit our [Workspace page.](https://docs.microsoft.com/azure/machine-learning/service/concept-workspace#upgrade)"
|
||||||
@@ -350,7 +350,9 @@
|
|||||||
"# Fill missing values in the target column, Quantity, with zeros.\n",
|
"# Fill missing values in the target column, Quantity, with zeros.\n",
|
||||||
"featurization_config.add_transformer_params('Imputer', ['Quantity'], {\"strategy\": \"constant\", \"fill_value\": 0})\n",
|
"featurization_config.add_transformer_params('Imputer', ['Quantity'], {\"strategy\": \"constant\", \"fill_value\": 0})\n",
|
||||||
"# Fill missing values in the INCOME column with median value.\n",
|
"# Fill missing values in the INCOME column with median value.\n",
|
||||||
"featurization_config.add_transformer_params('Imputer', ['INCOME'], {\"strategy\": \"median\"})"
|
"featurization_config.add_transformer_params('Imputer', ['INCOME'], {\"strategy\": \"median\"})\n",
|
||||||
|
"# Fill missing values in the Price column with forward fill (last value carried forward).\n",
|
||||||
|
"featurization_config.add_transformer_params('Imputer', ['Price'], {\"strategy\": \"ffill\"})"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -80,7 +80,7 @@
|
|||||||
"from azureml.core.workspace import Workspace\n",
|
"from azureml.core.workspace import Workspace\n",
|
||||||
"from azureml.core.dataset import Dataset\n",
|
"from azureml.core.dataset import Dataset\n",
|
||||||
"from azureml.train.automl import AutoMLConfig\n",
|
"from azureml.train.automl import AutoMLConfig\n",
|
||||||
"from azureml.interpret._internal.explanation_client import ExplanationClient"
|
"from azureml.interpret import ExplanationClient"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -96,7 +96,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.12.0 of the Azure ML SDK\")\n",
|
"print(\"This notebook was created using version 1.17.0 of the Azure ML SDK\")\n",
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -562,16 +562,10 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"%%writefile score.py\n",
|
"%%writefile score.py\n",
|
||||||
"import numpy as np\n",
|
|
||||||
"import pandas as pd\n",
|
|
||||||
"import os\n",
|
|
||||||
"import pickle\n",
|
|
||||||
"import azureml.train.automl\n",
|
|
||||||
"import azureml.interpret\n",
|
|
||||||
"from azureml.train.automl.runtime.automl_explain_utilities import AutoMLExplainerSetupClass, \\\n",
|
|
||||||
" automl_setup_model_explanations\n",
|
|
||||||
"import joblib\n",
|
"import joblib\n",
|
||||||
|
"import pandas as pd\n",
|
||||||
"from azureml.core.model import Model\n",
|
"from azureml.core.model import Model\n",
|
||||||
|
"from azureml.train.automl.runtime.automl_explain_utilities import automl_setup_model_explanations\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"def init():\n",
|
"def init():\n",
|
||||||
|
|||||||
@@ -98,7 +98,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.12.0 of the Azure ML SDK\")\n",
|
"print(\"This notebook was created using version 1.17.0 of the Azure ML SDK\")\n",
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -625,7 +625,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from azureml.interpret._internal.explanation_client import ExplanationClient\n",
|
"from azureml.interpret import ExplanationClient\n",
|
||||||
"client = ExplanationClient.from_run(automl_run)\n",
|
"client = ExplanationClient.from_run(automl_run)\n",
|
||||||
"engineered_explanations = client.download_model_explanation(raw=False, comment='engineered explanations')\n",
|
"engineered_explanations = client.download_model_explanation(raw=False, comment='engineered explanations')\n",
|
||||||
"print(engineered_explanations.get_feature_importance_dict())\n",
|
"print(engineered_explanations.get_feature_importance_dict())\n",
|
||||||
|
|||||||
@@ -1,14 +1,7 @@
|
|||||||
import json
|
|
||||||
import numpy as np
|
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import os
|
|
||||||
import pickle
|
|
||||||
import azureml.train.automl
|
|
||||||
import azureml.interpret
|
|
||||||
from azureml.train.automl.runtime.automl_explain_utilities import AutoMLExplainerSetupClass, \
|
|
||||||
automl_setup_model_explanations
|
|
||||||
import joblib
|
import joblib
|
||||||
from azureml.core.model import Model
|
from azureml.core.model import Model
|
||||||
|
from azureml.train.automl.runtime.automl_explain_utilities import automl_setup_model_explanations
|
||||||
|
|
||||||
|
|
||||||
def init():
|
def init():
|
||||||
|
|||||||
@@ -1,17 +1,17 @@
|
|||||||
# Copyright (c) Microsoft. All rights reserved.
|
# Copyright (c) Microsoft. All rights reserved.
|
||||||
# Licensed under the MIT license.
|
# Licensed under the MIT license.
|
||||||
import os
|
import os
|
||||||
|
import joblib
|
||||||
|
|
||||||
from azureml.core.run import Run
|
from interpret.ext.glassbox import LGBMExplainableModel
|
||||||
|
from automl.client.core.common.constants import MODEL_PATH
|
||||||
from azureml.core.experiment import Experiment
|
from azureml.core.experiment import Experiment
|
||||||
from azureml.core.dataset import Dataset
|
from azureml.core.dataset import Dataset
|
||||||
from azureml.train.automl.runtime.automl_explain_utilities import AutoMLExplainerSetupClass, \
|
from azureml.core.run import Run
|
||||||
automl_setup_model_explanations, automl_check_model_if_explainable
|
|
||||||
from interpret.ext.glassbox import LGBMExplainableModel
|
|
||||||
from azureml.interpret.mimic_wrapper import MimicWrapper
|
from azureml.interpret.mimic_wrapper import MimicWrapper
|
||||||
from automl.client.core.common.constants import MODEL_PATH
|
|
||||||
from azureml.interpret.scoring.scoring_explainer import TreeScoringExplainer
|
from azureml.interpret.scoring.scoring_explainer import TreeScoringExplainer
|
||||||
import joblib
|
from azureml.train.automl.runtime.automl_explain_utilities import automl_setup_model_explanations, \
|
||||||
|
automl_check_model_if_explainable
|
||||||
|
|
||||||
|
|
||||||
OUTPUT_DIR = './outputs/'
|
OUTPUT_DIR = './outputs/'
|
||||||
|
|||||||
@@ -92,7 +92,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.12.0 of the Azure ML SDK\")\n",
|
"print(\"This notebook was created using version 1.17.0 of the Azure ML SDK\")\n",
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -1,33 +0,0 @@
|
|||||||
Azure Databricks is a managed Spark offering on Azure and customers already use it for advanced analytics. It provides a collaborative Notebook based environment with CPU or GPU based compute cluster.
|
|
||||||
|
|
||||||
In this section, you will find sample notebooks on how to use Azure Machine Learning SDK with Azure Databricks. You can train a model using Spark MLlib and then deploy the model to ACI/AKS from within Azure Databricks. You can also use Automated ML capability (**public preview**) of Azure ML SDK with Azure Databricks.
|
|
||||||
|
|
||||||
- Customers who use Azure Databricks for advanced analytics can now use the same cluster to run experiments with or without automated machine learning.
|
|
||||||
- You can keep the data within the same cluster.
|
|
||||||
- You can leverage the local worker nodes with autoscale and auto termination capabilities.
|
|
||||||
- You can use multiple cores of your Azure Databricks cluster to perform simultenous training.
|
|
||||||
- You can further tune the model generated by automated machine learning if you chose to.
|
|
||||||
- Every run (including the best run) is available as a pipeline, which you can tune further if needed.
|
|
||||||
- The model trained using Azure Databricks can be registered in Azure ML SDK workspace and then deployed to Azure managed compute (ACI or AKS) using the Azure Machine learning SDK.
|
|
||||||
|
|
||||||
Please follow our [Azure doc](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-environment#azure-databricks) to install the sdk in your Azure Databricks cluster before trying any of the sample notebooks.
|
|
||||||
|
|
||||||
**Single file** -
|
|
||||||
The following archive contains all the sample notebooks. You can the run notebooks after importing [DBC](Databricks_AMLSDK_1-4_6.dbc) in your Databricks workspace instead of downloading individually.
|
|
||||||
|
|
||||||
Notebooks 1-4 have to be run sequentially & are related to Income prediction experiment based on this [dataset](https://archive.ics.uci.edu/ml/datasets/adult) and demonstrate how to data prep, train and operationalize a Spark ML model with Azure ML Python SDK from within Azure Databricks.
|
|
||||||
|
|
||||||
Notebook 6 is an Automated ML sample notebook for Classification.
|
|
||||||
|
|
||||||
Learn more about [how to use Azure Databricks as a development environment](https://docs.microsoft.com/azure/machine-learning/service/how-to-configure-environment#azure-databricks) for Azure Machine Learning service.
|
|
||||||
|
|
||||||
**Databricks as a Compute Target from AML Pipelines**
|
|
||||||
You can use Azure Databricks as a compute target from [Azure Machine Learning Pipelines](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-ml-pipelines). Take a look at this notebook for details: [aml-pipelines-use-databricks-as-compute-target.ipynb](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/azure-databricks/databricks-as-remote-compute-target/aml-pipelines-use-databricks-as-compute-target.ipynb).
|
|
||||||
|
|
||||||
For more on SDK concepts, please refer to [notebooks](https://github.com/Azure/MachineLearningNotebooks).
|
|
||||||
|
|
||||||
**Please let us know your feedback.**
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||

|
|
||||||
@@ -1,373 +0,0 @@
|
|||||||
{
|
|
||||||
"cells": [
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"Azure ML & Azure Databricks notebooks by Parashar Shah.\n",
|
|
||||||
"\n",
|
|
||||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
|
||||||
"\n",
|
|
||||||
"Licensed under the MIT License."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"#Model Building"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"import os\n",
|
|
||||||
"import pprint\n",
|
|
||||||
"import numpy as np\n",
|
|
||||||
"\n",
|
|
||||||
"from pyspark.ml import Pipeline, PipelineModel\n",
|
|
||||||
"from pyspark.ml.feature import OneHotEncoder, StringIndexer, VectorAssembler\n",
|
|
||||||
"from pyspark.ml.classification import LogisticRegression\n",
|
|
||||||
"from pyspark.ml.evaluation import BinaryClassificationEvaluator\n",
|
|
||||||
"from pyspark.ml.tuning import CrossValidator, ParamGridBuilder"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"import azureml.core\n",
|
|
||||||
"\n",
|
|
||||||
"# Check core SDK version number\n",
|
|
||||||
"print(\"SDK version:\", azureml.core.VERSION)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# Set auth to be used by workspace related APIs.\n",
|
|
||||||
"# For automation or CI/CD ServicePrincipalAuthentication can be used.\n",
|
|
||||||
"# https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.authentication.serviceprincipalauthentication?view=azure-ml-py\n",
|
|
||||||
"auth = None"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# import the Workspace class and check the azureml SDK version\n",
|
|
||||||
"from azureml.core import Workspace\n",
|
|
||||||
"\n",
|
|
||||||
"ws = Workspace.from_config(auth = auth)\n",
|
|
||||||
"print('Workspace name: ' + ws.name, \n",
|
|
||||||
" 'Azure region: ' + ws.location, \n",
|
|
||||||
" 'Subscription id: ' + ws.subscription_id, \n",
|
|
||||||
" 'Resource group: ' + ws.resource_group, sep = '\\n')"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#get the train and test datasets\n",
|
|
||||||
"train_data_path = \"AdultCensusIncomeTrain\"\n",
|
|
||||||
"test_data_path = \"AdultCensusIncomeTest\"\n",
|
|
||||||
"\n",
|
|
||||||
"train = spark.read.parquet(train_data_path)\n",
|
|
||||||
"test = spark.read.parquet(test_data_path)\n",
|
|
||||||
"\n",
|
|
||||||
"print(\"train: ({}, {})\".format(train.count(), len(train.columns)))\n",
|
|
||||||
"print(\"test: ({}, {})\".format(test.count(), len(test.columns)))\n",
|
|
||||||
"\n",
|
|
||||||
"train.printSchema()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"#Define Model"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"label = \"income\"\n",
|
|
||||||
"dtypes = dict(train.dtypes)\n",
|
|
||||||
"dtypes.pop(label)\n",
|
|
||||||
"\n",
|
|
||||||
"si_xvars = []\n",
|
|
||||||
"ohe_xvars = []\n",
|
|
||||||
"featureCols = []\n",
|
|
||||||
"for idx,key in enumerate(dtypes):\n",
|
|
||||||
" if dtypes[key] == \"string\":\n",
|
|
||||||
" featureCol = \"-\".join([key, \"encoded\"])\n",
|
|
||||||
" featureCols.append(featureCol)\n",
|
|
||||||
" \n",
|
|
||||||
" tmpCol = \"-\".join([key, \"tmp\"])\n",
|
|
||||||
" # string-index and one-hot encode the string column\n",
|
|
||||||
" #https://spark.apache.org/docs/2.3.0/api/java/org/apache/spark/ml/feature/StringIndexer.html\n",
|
|
||||||
" #handleInvalid: Param for how to handle invalid data (unseen labels or NULL values). \n",
|
|
||||||
" #Options are 'skip' (filter out rows with invalid data), 'error' (throw an error), \n",
|
|
||||||
" #or 'keep' (put invalid data in a special additional bucket, at index numLabels). Default: \"error\"\n",
|
|
||||||
" si_xvars.append(StringIndexer(inputCol=key, outputCol=tmpCol, handleInvalid=\"skip\"))\n",
|
|
||||||
" ohe_xvars.append(OneHotEncoder(inputCol=tmpCol, outputCol=featureCol))\n",
|
|
||||||
" else:\n",
|
|
||||||
" featureCols.append(key)\n",
|
|
||||||
"\n",
|
|
||||||
"# string-index the label column into a column named \"label\"\n",
|
|
||||||
"si_label = StringIndexer(inputCol=label, outputCol='label')\n",
|
|
||||||
"\n",
|
|
||||||
"# assemble the encoded feature columns in to a column named \"features\"\n",
|
|
||||||
"assembler = VectorAssembler(inputCols=featureCols, outputCol=\"features\")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"from azureml.core.run import Run\n",
|
|
||||||
"from azureml.core.experiment import Experiment\n",
|
|
||||||
"import numpy as np\n",
|
|
||||||
"import os\n",
|
|
||||||
"import shutil\n",
|
|
||||||
"\n",
|
|
||||||
"model_name = \"AdultCensus_runHistory.mml\"\n",
|
|
||||||
"model_dbfs = os.path.join(\"/dbfs\", model_name)\n",
|
|
||||||
"run_history_name = 'spark-ml-notebook'\n",
|
|
||||||
"\n",
|
|
||||||
"# start a training run by defining an experiment\n",
|
|
||||||
"myexperiment = Experiment(ws, \"Ignite_AI_Talk\")\n",
|
|
||||||
"root_run = myexperiment.start_logging()\n",
|
|
||||||
"\n",
|
|
||||||
"# Regularization Rates - \n",
|
|
||||||
"regs = [0.0001, 0.001, 0.01, 0.1]\n",
|
|
||||||
" \n",
|
|
||||||
"# try a bunch of regularization rate in a Logistic Regression model\n",
|
|
||||||
"for reg in regs:\n",
|
|
||||||
" print(\"Regularization rate: {}\".format(reg))\n",
|
|
||||||
" # create a bunch of child runs\n",
|
|
||||||
" with root_run.child_run(\"reg-\" + str(reg)) as run:\n",
|
|
||||||
" # create a new Logistic Regression model.\n",
|
|
||||||
" lr = LogisticRegression(regParam=reg)\n",
|
|
||||||
" \n",
|
|
||||||
" # put together the pipeline\n",
|
|
||||||
" pipe = Pipeline(stages=[*si_xvars, *ohe_xvars, si_label, assembler, lr])\n",
|
|
||||||
"\n",
|
|
||||||
" # train the model\n",
|
|
||||||
" model_p = pipe.fit(train)\n",
|
|
||||||
" \n",
|
|
||||||
" # make prediction\n",
|
|
||||||
" pred = model_p.transform(test)\n",
|
|
||||||
" \n",
|
|
||||||
" # evaluate. note only 2 metrics are supported out of the box by Spark ML.\n",
|
|
||||||
" bce = BinaryClassificationEvaluator(rawPredictionCol='rawPrediction')\n",
|
|
||||||
" au_roc = bce.setMetricName('areaUnderROC').evaluate(pred)\n",
|
|
||||||
" au_prc = bce.setMetricName('areaUnderPR').evaluate(pred)\n",
|
|
||||||
"\n",
|
|
||||||
" print(\"Area under ROC: {}\".format(au_roc))\n",
|
|
||||||
" print(\"Area Under PR: {}\".format(au_prc))\n",
|
|
||||||
" \n",
|
|
||||||
" # log reg, au_roc, au_prc and feature names in run history\n",
|
|
||||||
" run.log(\"reg\", reg)\n",
|
|
||||||
" run.log(\"au_roc\", au_roc)\n",
|
|
||||||
" run.log(\"au_prc\", au_prc)\n",
|
|
||||||
" run.log_list(\"columns\", train.columns)\n",
|
|
||||||
"\n",
|
|
||||||
" # save model\n",
|
|
||||||
" model_p.write().overwrite().save(model_name)\n",
|
|
||||||
" \n",
|
|
||||||
" # upload the serialized model into run history record\n",
|
|
||||||
" mdl, ext = model_name.split(\".\")\n",
|
|
||||||
" model_zip = mdl + \".zip\"\n",
|
|
||||||
" shutil.make_archive(mdl, 'zip', model_dbfs)\n",
|
|
||||||
" run.upload_file(\"outputs/\" + model_name, model_zip) \n",
|
|
||||||
" #run.upload_file(\"outputs/\" + model_name, path_or_stream = model_dbfs) #cannot deal with folders\n",
|
|
||||||
"\n",
|
|
||||||
" # now delete the serialized model from local folder since it is already uploaded to run history \n",
|
|
||||||
" shutil.rmtree(model_dbfs)\n",
|
|
||||||
" os.remove(model_zip)\n",
|
|
||||||
" \n",
|
|
||||||
"# Declare run completed\n",
|
|
||||||
"root_run.complete()\n",
|
|
||||||
"root_run_id = root_run.id\n",
|
|
||||||
"print (\"run id:\", root_run.id)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"metrics = root_run.get_metrics(recursive=True)\n",
|
|
||||||
"best_run_id = max(metrics, key = lambda k: metrics[k]['au_roc'])\n",
|
|
||||||
"print(best_run_id, metrics[best_run_id]['au_roc'], metrics[best_run_id]['reg'])"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#Get the best run\n",
|
|
||||||
"child_runs = {}\n",
|
|
||||||
"\n",
|
|
||||||
"for r in root_run.get_children():\n",
|
|
||||||
" child_runs[r.id] = r\n",
|
|
||||||
" \n",
|
|
||||||
"best_run = child_runs[best_run_id]"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#Download the model from the best run to a local folder\n",
|
|
||||||
"best_model_file_name = \"best_model.zip\"\n",
|
|
||||||
"best_run.download_file(name = 'outputs/' + model_name, output_file_path = best_model_file_name)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"#Model Evaluation"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"##unzip the model to dbfs (as load() seems to require that) and load it.\n",
|
|
||||||
"if os.path.isfile(model_dbfs) or os.path.isdir(model_dbfs):\n",
|
|
||||||
" shutil.rmtree(model_dbfs)\n",
|
|
||||||
"shutil.unpack_archive(best_model_file_name, model_dbfs)\n",
|
|
||||||
"\n",
|
|
||||||
"model_p_best = PipelineModel.load(model_name)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# make prediction\n",
|
|
||||||
"pred = model_p_best.transform(test)\n",
|
|
||||||
"output = pred[['hours_per_week','age','workclass','marital_status','income','prediction']]\n",
|
|
||||||
"display(output.limit(5))"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# evaluate. note only 2 metrics are supported out of the box by Spark ML.\n",
|
|
||||||
"bce = BinaryClassificationEvaluator(rawPredictionCol='rawPrediction')\n",
|
|
||||||
"au_roc = bce.setMetricName('areaUnderROC').evaluate(pred)\n",
|
|
||||||
"au_prc = bce.setMetricName('areaUnderPR').evaluate(pred)\n",
|
|
||||||
"\n",
|
|
||||||
"print(\"Area under ROC: {}\".format(au_roc))\n",
|
|
||||||
"print(\"Area Under PR: {}\".format(au_prc))"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"#Model Persistence"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"##NOTE: by default the model is saved to and loaded from /dbfs/ instead of cwd!\n",
|
|
||||||
"model_p_best.write().overwrite().save(model_name)\n",
|
|
||||||
"print(\"saved model to {}\".format(model_dbfs))"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"%sh\n",
|
|
||||||
"\n",
|
|
||||||
"ls -la /dbfs/AdultCensus_runHistory.mml/*"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"dbutils.notebook.exit(\"success\")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
""
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"authors": [
|
|
||||||
{
|
|
||||||
"name": "pasha"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"kernelspec": {
|
|
||||||
"display_name": "Python 3.6",
|
|
||||||
"language": "python",
|
|
||||||
"name": "python36"
|
|
||||||
},
|
|
||||||
"language_info": {
|
|
||||||
"codemirror_mode": {
|
|
||||||
"name": "ipython",
|
|
||||||
"version": 3
|
|
||||||
},
|
|
||||||
"file_extension": ".py",
|
|
||||||
"mimetype": "text/x-python",
|
|
||||||
"name": "python",
|
|
||||||
"nbconvert_exporter": "python",
|
|
||||||
"pygments_lexer": "ipython3",
|
|
||||||
"version": "3.6.6"
|
|
||||||
},
|
|
||||||
"name": "build-model-run-history-03",
|
|
||||||
"notebookId": 3836944406456339
|
|
||||||
},
|
|
||||||
"nbformat": 4,
|
|
||||||
"nbformat_minor": 1
|
|
||||||
}
|
|
||||||
@@ -1,320 +0,0 @@
|
|||||||
{
|
|
||||||
"cells": [
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"Azure ML & Azure Databricks notebooks by Parashar Shah.\n",
|
|
||||||
"\n",
|
|
||||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
|
||||||
"\n",
|
|
||||||
"Licensed under the MIT License."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"# Register Azure Databricks trained model and deploy it to ACI\n"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"Please ensure you have run all previous notebooks in sequence before running this.\n",
|
|
||||||
"\n",
|
|
||||||
"Please Register Azure Container Instance(ACI) using Azure Portal: https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-manager-supported-services#portal in your subscription before using the SDK to deploy your ML model to ACI."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"import azureml.core\n",
|
|
||||||
"\n",
|
|
||||||
"# Check core SDK version number\n",
|
|
||||||
"print(\"SDK version:\", azureml.core.VERSION)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# Set auth to be used by workspace related APIs.\n",
|
|
||||||
"# For automation or CI/CD ServicePrincipalAuthentication can be used.\n",
|
|
||||||
"# https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.authentication.serviceprincipalauthentication?view=azure-ml-py\n",
|
|
||||||
"auth = None"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"from azureml.core import Workspace\n",
|
|
||||||
"\n",
|
|
||||||
"ws = Workspace.from_config(auth = auth)\n",
|
|
||||||
"print('Workspace name: ' + ws.name, \n",
|
|
||||||
" 'Azure region: ' + ws.location, \n",
|
|
||||||
" 'Subscription id: ' + ws.subscription_id, \n",
|
|
||||||
" 'Resource group: ' + ws.resource_group, sep = '\\n')"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"##NOTE: service deployment always gets the model from the current working dir.\n",
|
|
||||||
"import os\n",
|
|
||||||
"\n",
|
|
||||||
"model_name = \"AdultCensus_runHistory.mml\" # \n",
|
|
||||||
"model_name_dbfs = os.path.join(\"/dbfs\", model_name)\n",
|
|
||||||
"\n",
|
|
||||||
"print(\"copy model from dbfs to local\")\n",
|
|
||||||
"model_local = \"file:\" + os.getcwd() + \"/\" + model_name\n",
|
|
||||||
"dbutils.fs.cp(model_name, model_local, True)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#Register the model\n",
|
|
||||||
"from azureml.core.model import Model\n",
|
|
||||||
"mymodel = Model.register(model_path = model_name, # this points to a local file\n",
|
|
||||||
" model_name = model_name, # this is the name the model is registered as, am using same name for both path and name. \n",
|
|
||||||
" description = \"ADB trained model by Parashar\",\n",
|
|
||||||
" workspace = ws)\n",
|
|
||||||
"\n",
|
|
||||||
"print(mymodel.name, mymodel.description, mymodel.version)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#%%writefile score_sparkml.py\n",
|
|
||||||
"score_sparkml = \"\"\"\n",
|
|
||||||
" \n",
|
|
||||||
"import json\n",
|
|
||||||
" \n",
|
|
||||||
"def init():\n",
|
|
||||||
" # One-time initialization of PySpark and predictive model\n",
|
|
||||||
" import pyspark\n",
|
|
||||||
" import os\n",
|
|
||||||
" from azureml.core.model import Model\n",
|
|
||||||
" from pyspark.ml import PipelineModel\n",
|
|
||||||
" \n",
|
|
||||||
" global trainedModel\n",
|
|
||||||
" global spark\n",
|
|
||||||
" \n",
|
|
||||||
" spark = pyspark.sql.SparkSession.builder.appName(\"ADB and AML notebook by Parashar\").getOrCreate()\n",
|
|
||||||
" model_name = \"{model_name}\" #interpolated\n",
|
|
||||||
" # AZUREML_MODEL_DIR is an environment variable created during deployment.\n",
|
|
||||||
" # It is the path to the model folder (./azureml-models/$MODEL_NAME/$VERSION)\n",
|
|
||||||
" # For multiple models, it points to the folder containing all deployed models (./azureml-models)\n",
|
|
||||||
" model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), model_name)\n",
|
|
||||||
" trainedModel = PipelineModel.load(model_path)\n",
|
|
||||||
" \n",
|
|
||||||
"def run(input_json):\n",
|
|
||||||
" if isinstance(trainedModel, Exception):\n",
|
|
||||||
" return json.dumps({{\"trainedModel\":str(trainedModel)}})\n",
|
|
||||||
" \n",
|
|
||||||
" try:\n",
|
|
||||||
" sc = spark.sparkContext\n",
|
|
||||||
" input_list = json.loads(input_json)\n",
|
|
||||||
" input_rdd = sc.parallelize(input_list)\n",
|
|
||||||
" input_df = spark.read.json(input_rdd)\n",
|
|
||||||
" \n",
|
|
||||||
" # Compute prediction\n",
|
|
||||||
" prediction = trainedModel.transform(input_df)\n",
|
|
||||||
" #result = prediction.first().prediction\n",
|
|
||||||
" predictions = prediction.collect()\n",
|
|
||||||
" \n",
|
|
||||||
" #Get each scored result\n",
|
|
||||||
" preds = [str(x['prediction']) for x in predictions]\n",
|
|
||||||
" result = \",\".join(preds)\n",
|
|
||||||
" # you can return any data type as long as it is JSON-serializable\n",
|
|
||||||
" return result.tolist()\n",
|
|
||||||
" except Exception as e:\n",
|
|
||||||
" result = str(e)\n",
|
|
||||||
" return result\n",
|
|
||||||
" \n",
|
|
||||||
"\"\"\".format(model_name=model_name)\n",
|
|
||||||
" \n",
|
|
||||||
"exec(score_sparkml)\n",
|
|
||||||
" \n",
|
|
||||||
"with open(\"score_sparkml.py\", \"w\") as file:\n",
|
|
||||||
" file.write(score_sparkml)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"from azureml.core.conda_dependencies import CondaDependencies \n",
|
|
||||||
"\n",
|
|
||||||
"myacienv = CondaDependencies.create(conda_packages=['scikit-learn','numpy','pandas']) # showing how to add libs as an eg. - not needed for this model.\n",
|
|
||||||
"\n",
|
|
||||||
"with open(\"myenv.yml\",\"w\") as f:\n",
|
|
||||||
" f.write(myacienv.serialize_to_string())"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#deploy to ACI\n",
|
|
||||||
"from azureml.core.webservice import AciWebservice, Webservice\n",
|
|
||||||
"from azureml.exceptions import WebserviceException\n",
|
|
||||||
"from azureml.core.model import InferenceConfig\n",
|
|
||||||
"from azureml.core.environment import Environment\n",
|
|
||||||
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
|
||||||
"\n",
|
|
||||||
"\n",
|
|
||||||
"myaci_config = AciWebservice.deploy_configuration(cpu_cores = 2, \n",
|
|
||||||
" memory_gb = 2, \n",
|
|
||||||
" tags = {'name':'Databricks Azure ML ACI'}, \n",
|
|
||||||
" description = 'This is for ADB and AML example.')\n",
|
|
||||||
"\n",
|
|
||||||
"service_name = 'aciws'\n",
|
|
||||||
"\n",
|
|
||||||
"# Remove any existing service under the same name.\n",
|
|
||||||
"try:\n",
|
|
||||||
" Webservice(ws, service_name).delete()\n",
|
|
||||||
"except WebserviceException:\n",
|
|
||||||
" pass\n",
|
|
||||||
"\n",
|
|
||||||
"myenv = Environment.get(ws, name='AzureML-PySpark-MmlSpark-0.15')\n",
|
|
||||||
"# we need to add extra packages to procured environment\n",
|
|
||||||
"# in order to deploy amended environment we need to rename it\n",
|
|
||||||
"myenv.name = 'myenv'\n",
|
|
||||||
"model_dependencies = CondaDependencies('myenv.yml')\n",
|
|
||||||
"for pip_dep in model_dependencies.pip_packages:\n",
|
|
||||||
" myenv.python.conda_dependencies.add_pip_package(pip_dep)\n",
|
|
||||||
"for conda_dep in model_dependencies.conda_packages:\n",
|
|
||||||
" myenv.python.conda_dependencies.add_conda_package(conda_dep)\n",
|
|
||||||
"inference_config = InferenceConfig(entry_script='score_sparkml.py', environment=myenv)\n",
|
|
||||||
"\n",
|
|
||||||
"myservice = Model.deploy(ws, service_name, [mymodel], inference_config, myaci_config)\n",
|
|
||||||
"myservice.wait_for_deployment(show_output=True)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"help(Webservice)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#for using the Web HTTP API \n",
|
|
||||||
"print(myservice.scoring_uri)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"import json\n",
|
|
||||||
"\n",
|
|
||||||
"#get the some sample data\n",
|
|
||||||
"test_data_path = \"AdultCensusIncomeTest\"\n",
|
|
||||||
"test = spark.read.parquet(test_data_path).limit(5)\n",
|
|
||||||
"\n",
|
|
||||||
"test_json = json.dumps(test.toJSON().collect())\n",
|
|
||||||
"\n",
|
|
||||||
"print(test_json)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#using data defined above predict if income is >50K (1) or <=50K (0)\n",
|
|
||||||
"myservice.run(input_data=test_json)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#comment to not delete the web service\n",
|
|
||||||
"myservice.delete()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"## Deploying to other types of computes\n",
|
|
||||||
"\n",
|
|
||||||
"In order to learn how to deploy to other types of compute targets, such as AKS, please take a look at the set of notebooks in the [deployment](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/deployment) folder."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
""
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"authors": [
|
|
||||||
{
|
|
||||||
"name": "pasha"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"kernelspec": {
|
|
||||||
"display_name": "Python 3.6",
|
|
||||||
"language": "python",
|
|
||||||
"name": "python36"
|
|
||||||
},
|
|
||||||
"language_info": {
|
|
||||||
"codemirror_mode": {
|
|
||||||
"name": "ipython",
|
|
||||||
"version": 3
|
|
||||||
},
|
|
||||||
"file_extension": ".py",
|
|
||||||
"mimetype": "text/x-python",
|
|
||||||
"name": "python",
|
|
||||||
"nbconvert_exporter": "python",
|
|
||||||
"pygments_lexer": "ipython3",
|
|
||||||
"version": "3.6.8"
|
|
||||||
},
|
|
||||||
"name": "deploy-to-aci-04",
|
|
||||||
"notebookId": 3836944406456376
|
|
||||||
},
|
|
||||||
"nbformat": 4,
|
|
||||||
"nbformat_minor": 1
|
|
||||||
}
|
|
||||||
@@ -1,179 +0,0 @@
|
|||||||
{
|
|
||||||
"cells": [
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"Azure ML & Azure Databricks notebooks by Parashar Shah.\n",
|
|
||||||
"\n",
|
|
||||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
|
||||||
"\n",
|
|
||||||
"Licensed under the MIT License."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"#Data Ingestion"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"import os\n",
|
|
||||||
"import urllib"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# Download AdultCensusIncome.csv from Azure CDN. This file has 32,561 rows.\n",
|
|
||||||
"dataurl = \"https://amldockerdatasets.azureedge.net/AdultCensusIncome.csv\"\n",
|
|
||||||
"datafile = \"AdultCensusIncome.csv\"\n",
|
|
||||||
"datafile_dbfs = os.path.join(\"/dbfs\", datafile)\n",
|
|
||||||
"\n",
|
|
||||||
"if os.path.isfile(datafile_dbfs):\n",
|
|
||||||
" print(\"found {} at {}\".format(datafile, datafile_dbfs))\n",
|
|
||||||
"else:\n",
|
|
||||||
" print(\"downloading {} to {}\".format(datafile, datafile_dbfs))\n",
|
|
||||||
" urllib.request.urlretrieve(dataurl, datafile_dbfs)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# Create a Spark dataframe out of the csv file.\n",
|
|
||||||
"data_all = sqlContext.read.format('csv').options(header='true', inferSchema='true', ignoreLeadingWhiteSpace='true', ignoreTrailingWhiteSpace='true').load(datafile)\n",
|
|
||||||
"print(\"({}, {})\".format(data_all.count(), len(data_all.columns)))\n",
|
|
||||||
"data_all.printSchema()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#renaming columns\n",
|
|
||||||
"columns_new = [col.replace(\"-\", \"_\") for col in data_all.columns]\n",
|
|
||||||
"data_all = data_all.toDF(*columns_new)\n",
|
|
||||||
"data_all.printSchema()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"display(data_all.limit(5))"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"#Data Preparation"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# Choose feature columns and the label column.\n",
|
|
||||||
"label = \"income\"\n",
|
|
||||||
"xvars = set(data_all.columns) - {label}\n",
|
|
||||||
"\n",
|
|
||||||
"print(\"label = {}\".format(label))\n",
|
|
||||||
"print(\"features = {}\".format(xvars))\n",
|
|
||||||
"\n",
|
|
||||||
"data = data_all.select([*xvars, label])\n",
|
|
||||||
"\n",
|
|
||||||
"# Split data into train and test.\n",
|
|
||||||
"train, test = data.randomSplit([0.75, 0.25], seed=123)\n",
|
|
||||||
"\n",
|
|
||||||
"print(\"train ({}, {})\".format(train.count(), len(train.columns)))\n",
|
|
||||||
"print(\"test ({}, {})\".format(test.count(), len(test.columns)))"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"#Data Persistence"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# Write the train and test data sets to intermediate storage\n",
|
|
||||||
"train_data_path = \"AdultCensusIncomeTrain\"\n",
|
|
||||||
"test_data_path = \"AdultCensusIncomeTest\"\n",
|
|
||||||
"\n",
|
|
||||||
"train_data_path_dbfs = os.path.join(\"/dbfs\", \"AdultCensusIncomeTrain\")\n",
|
|
||||||
"test_data_path_dbfs = os.path.join(\"/dbfs\", \"AdultCensusIncomeTest\")\n",
|
|
||||||
"\n",
|
|
||||||
"train.write.mode('overwrite').parquet(train_data_path)\n",
|
|
||||||
"test.write.mode('overwrite').parquet(test_data_path)\n",
|
|
||||||
"print(\"train and test datasets saved to {} and {}\".format(train_data_path_dbfs, test_data_path_dbfs))"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": []
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
""
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"authors": [
|
|
||||||
{
|
|
||||||
"name": "pasha"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"kernelspec": {
|
|
||||||
"display_name": "Python 3.6",
|
|
||||||
"language": "python",
|
|
||||||
"name": "python36"
|
|
||||||
},
|
|
||||||
"language_info": {
|
|
||||||
"codemirror_mode": {
|
|
||||||
"name": "ipython",
|
|
||||||
"version": 3
|
|
||||||
},
|
|
||||||
"file_extension": ".py",
|
|
||||||
"mimetype": "text/x-python",
|
|
||||||
"name": "python",
|
|
||||||
"nbconvert_exporter": "python",
|
|
||||||
"pygments_lexer": "ipython3",
|
|
||||||
"version": "3.6.6"
|
|
||||||
},
|
|
||||||
"name": "ingest-data-02",
|
|
||||||
"notebookId": 3836944406456362
|
|
||||||
},
|
|
||||||
"nbformat": 4,
|
|
||||||
"nbformat_minor": 1
|
|
||||||
}
|
|
||||||
@@ -1,183 +0,0 @@
|
|||||||
{
|
|
||||||
"cells": [
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"Azure ML & Azure Databricks notebooks by Parashar Shah.\n",
|
|
||||||
"\n",
|
|
||||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
|
||||||
"\n",
|
|
||||||
"Licensed under the MIT License."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"We support installing AML SDK as library from GUI. When attaching a library follow this https://docs.databricks.com/user-guide/libraries.html and add the below string as your PyPi package. You can select the option to attach the library to all clusters or just one cluster.\n",
|
|
||||||
"\n",
|
|
||||||
"**install azureml-sdk**\n",
|
|
||||||
"* Source: Upload Python Egg or PyPi\n",
|
|
||||||
"* PyPi Name: `azureml-sdk[databricks]`\n",
|
|
||||||
"* Select Install Library"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"import azureml.core\n",
|
|
||||||
"\n",
|
|
||||||
"# Check core SDK version number - based on build number of preview/master.\n",
|
|
||||||
"print(\"SDK version:\", azureml.core.VERSION)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"Please specify the Azure subscription Id, resource group name, workspace name, and the region in which you want to create the Azure Machine Learning Workspace.\n",
|
|
||||||
"\n",
|
|
||||||
"You can get the value of your Azure subscription ID from the Azure Portal, and then selecting Subscriptions from the menu on the left.\n",
|
|
||||||
"\n",
|
|
||||||
"For the resource_group, use the name of the resource group that contains your Azure Databricks Workspace.\n",
|
|
||||||
"\n",
|
|
||||||
"NOTE: If you provide a resource group name that does not exist, the resource group will be automatically created. This may or may not succeed in your environment, depending on the permissions you have on your Azure Subscription."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# subscription_id = \"<your-subscription-id>\"\n",
|
|
||||||
"# resource_group = \"<your-existing-resource-group>\"\n",
|
|
||||||
"# workspace_name = \"<a-new-or-existing-workspace; it is unrelated to Databricks workspace>\"\n",
|
|
||||||
"# workspace_region = \"<your-resource group-region>\""
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# Set auth to be used by workspace related APIs.\n",
|
|
||||||
"# For automation or CI/CD ServicePrincipalAuthentication can be used.\n",
|
|
||||||
"# https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.authentication.serviceprincipalauthentication?view=azure-ml-py\n",
|
|
||||||
"auth = None"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# import the Workspace class and check the azureml SDK version\n",
|
|
||||||
"# exist_ok checks if workspace exists or not.\n",
|
|
||||||
"\n",
|
|
||||||
"from azureml.core import Workspace\n",
|
|
||||||
"\n",
|
|
||||||
"ws = Workspace.create(name = workspace_name,\n",
|
|
||||||
" subscription_id = subscription_id,\n",
|
|
||||||
" resource_group = resource_group, \n",
|
|
||||||
" location = workspace_region,\n",
|
|
||||||
" auth = auth,\n",
|
|
||||||
" exist_ok=True)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#get workspace details\n",
|
|
||||||
"ws.get_details()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"ws = Workspace(workspace_name = workspace_name,\n",
|
|
||||||
" subscription_id = subscription_id,\n",
|
|
||||||
" resource_group = resource_group,\n",
|
|
||||||
" auth = auth)\n",
|
|
||||||
"\n",
|
|
||||||
"# persist the subscription id, resource group name, and workspace name in aml_config/config.json.\n",
|
|
||||||
"ws.write_config()\n",
|
|
||||||
"#if you need to give a different path/filename please use this\n",
|
|
||||||
"#write_config(path=\"/databricks/driver/aml_config/\",file_name=<alias_conf.cfg>)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"help(Workspace)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# import the Workspace class and check the azureml SDK version\n",
|
|
||||||
"from azureml.core import Workspace\n",
|
|
||||||
"\n",
|
|
||||||
"ws = Workspace.from_config(auth = auth)\n",
|
|
||||||
"#ws = Workspace.from_config(<full path>)\n",
|
|
||||||
"print('Workspace name: ' + ws.name, \n",
|
|
||||||
" 'Azure region: ' + ws.location, \n",
|
|
||||||
" 'Subscription id: ' + ws.subscription_id, \n",
|
|
||||||
" 'Resource group: ' + ws.resource_group, sep = '\\n')"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
""
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"authors": [
|
|
||||||
{
|
|
||||||
"name": "pasha"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"kernelspec": {
|
|
||||||
"display_name": "Python 3.6",
|
|
||||||
"language": "python",
|
|
||||||
"name": "python36"
|
|
||||||
},
|
|
||||||
"language_info": {
|
|
||||||
"codemirror_mode": {
|
|
||||||
"name": "ipython",
|
|
||||||
"version": 3
|
|
||||||
},
|
|
||||||
"file_extension": ".py",
|
|
||||||
"mimetype": "text/x-python",
|
|
||||||
"name": "python",
|
|
||||||
"nbconvert_exporter": "python",
|
|
||||||
"pygments_lexer": "ipython3",
|
|
||||||
"version": "3.6.6"
|
|
||||||
},
|
|
||||||
"name": "installation-and-configuration-01",
|
|
||||||
"notebookId": 3688394266452835
|
|
||||||
},
|
|
||||||
"nbformat": 4,
|
|
||||||
"nbformat_minor": 1
|
|
||||||
}
|
|
||||||
70
how-to-use-azureml/azure-databricks/automl/README.md
Normal file
70
how-to-use-azureml/azure-databricks/automl/README.md
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
# Automated ML introduction
|
||||||
|
Automated machine learning (automated ML) builds high quality machine learning models for you by automating model and hyperparameter selection. Bring a labelled dataset that you want to build a model for, automated ML will give you a high quality machine learning model that you can use for predictions.
|
||||||
|
|
||||||
|
|
||||||
|
If you are new to Data Science, automated ML will help you get jumpstarted by simplifying machine learning model building. It abstracts you from needing to perform model selection, hyperparameter selection and in one step creates a high quality trained model for you to use.
|
||||||
|
|
||||||
|
If you are an experienced data scientist, automated ML will help increase your productivity by intelligently performing the model and hyperparameter selection for your training and generates high quality models much quicker than manually specifying several combinations of the parameters and running training jobs. Automated ML provides visibility and access to all the training jobs and the performance characteristics of the models to help you further tune the pipeline if you desire.
|
||||||
|
|
||||||
|
# Install Instructions using Azure Databricks :
|
||||||
|
|
||||||
|
#### For Databricks non ML runtime 7.1(scala 2.21, spark 3.0.0) and up, Install Automated Machine Learning sdk by adding and running the following command as the first cell of your notebook. This will install AutoML dependencies specific for your notebook.
|
||||||
|
|
||||||
|
%pip install --upgrade --force-reinstall -r https://aka.ms/automl_linux_requirements.txt
|
||||||
|
|
||||||
|
|
||||||
|
#### For Databricks non ML runtime 7.0 and lower, Install Automated Machine Learning sdk using init script as shown below before running the notebook.**
|
||||||
|
|
||||||
|
**Create the Azure Databricks cluster-scoped init script 'azureml-cluster-init.sh' as below
|
||||||
|
|
||||||
|
1. Create the base directory you want to store the init script in if it does not exist.
|
||||||
|
```
|
||||||
|
dbutils.fs.mkdirs("dbfs:/databricks/init/")
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Create the script azureml-cluster-init.sh
|
||||||
|
```
|
||||||
|
dbutils.fs.put("/databricks/init/azureml-cluster-init.sh","""
|
||||||
|
#!/bin/bash
|
||||||
|
set -ex
|
||||||
|
/databricks/python/bin/pip install --upgrade --force-reinstall -r https://aka.ms/automl_linux_requirements.txt
|
||||||
|
""", True)
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Check that the script exists.
|
||||||
|
```
|
||||||
|
display(dbutils.fs.ls("dbfs:/databricks/init/azureml-cluster-init.sh"))
|
||||||
|
```
|
||||||
|
|
||||||
|
**Install libraries to cluster using init script 'azureml-cluster-init.sh' created in previous step
|
||||||
|
|
||||||
|
1. Configure the cluster to run the script.
|
||||||
|
* Using the cluster configuration page
|
||||||
|
1. On the cluster configuration page, click the Advanced Options toggle.
|
||||||
|
1. At the bottom of the page, click the Init Scripts tab.
|
||||||
|
1. In the Destination drop-down, select a destination type. Example: 'DBFS'
|
||||||
|
1. Specify a path to the init script.
|
||||||
|
```
|
||||||
|
dbfs:/databricks/init/azureml-cluster-init.sh
|
||||||
|
```
|
||||||
|
1. Click Add
|
||||||
|
|
||||||
|
* Using the API.
|
||||||
|
```
|
||||||
|
curl -n -X POST -H 'Content-Type: application/json' -d '{
|
||||||
|
"cluster_id": "<cluster_id>",
|
||||||
|
"num_workers": <num_workers>,
|
||||||
|
"spark_version": "<spark_version>",
|
||||||
|
"node_type_id": "<node_type_id>",
|
||||||
|
"cluster_log_conf": {
|
||||||
|
"dbfs" : {
|
||||||
|
"destination": "dbfs:/cluster-logs"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"init_scripts": [ {
|
||||||
|
"dbfs": {
|
||||||
|
"destination": "dbfs:/databricks/init/azureml-cluster-init.sh"
|
||||||
|
}
|
||||||
|
} ]
|
||||||
|
}' https://<databricks-instance>/api/2.0/clusters/edit
|
||||||
|
```
|
||||||
@@ -13,12 +13,13 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"We support installing AML SDK as library from GUI. When attaching a library follow this https://docs.databricks.com/user-guide/libraries.html and add the below string as your PyPi package. You can select the option to attach the library to all clusters or just one cluster.\n",
|
"## AutoML Installation\n",
|
||||||
"\n",
|
"\n",
|
||||||
"**install azureml-sdk with Automated ML**\n",
|
"**For Databricks non ML runtime 7.1(scala 2.21, spark 3.0.0) and up, Install AML sdk by running the following command in the first cell of the notebook.**\n",
|
||||||
"* Source: Upload Python Egg or PyPi\n",
|
"\n",
|
||||||
"* PyPi Name: `azureml-sdk[automl]`\n",
|
"%pip install --upgrade --force-reinstall -r https://aka.ms/automl_linux_requirements.txt\n",
|
||||||
"* Select Install Library"
|
"\n",
|
||||||
|
"**For Databricks non ML runtime 7.0 and lower, Install AML sdk using init script as shown in [readme](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/azure-databricks/automl/README.md) before running this notebook.**\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -13,12 +13,13 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"We support installing AML SDK as library from GUI. When attaching a library follow this https://docs.databricks.com/user-guide/libraries.html and add the below string as your PyPi package. You can select the option to attach the library to all clusters or just one cluster.\n",
|
"## AutoML Installation\n",
|
||||||
"\n",
|
"\n",
|
||||||
"**install azureml-sdk with Automated ML**\n",
|
"**For Databricks non ML runtime 7.1(scala 2.21, spark 3.0.0) and up, Install AML sdk by running the following command in the first cell of the notebook.**\n",
|
||||||
"* Source: Upload Python Egg or PyPi\n",
|
"\n",
|
||||||
"* PyPi Name: `azureml-sdk[automl]`\n",
|
"%pip install --upgrade --force-reinstall -r https://aka.ms/automl_linux_requirements.txt\n",
|
||||||
"* Select Install Library"
|
"\n",
|
||||||
|
"**For Databricks non ML runtime 7.0 and lower, Install AML sdk using init script as shown in [readme](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/azure-databricks/automl/README.md) before running this notebook.**"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,719 +0,0 @@
|
|||||||
{
|
|
||||||
"cells": [
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"Copyright (c) Microsoft Corporation. All rights reserved. \n",
|
|
||||||
"Licensed under the MIT License."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"# Using Databricks as a Compute Target from Azure Machine Learning Pipeline\n",
|
|
||||||
"To use Databricks as a compute target from [Azure Machine Learning Pipeline](https://aka.ms/pl-concept), a [DatabricksStep](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.databricks_step.databricksstep?view=azure-ml-py) is used. This notebook demonstrates the use of DatabricksStep in Azure Machine Learning Pipeline.\n",
|
|
||||||
"\n",
|
|
||||||
"The notebook will show:\n",
|
|
||||||
"1. Running an arbitrary Databricks notebook that the customer has in Databricks workspace\n",
|
|
||||||
"2. Running an arbitrary Python script that the customer has in DBFS\n",
|
|
||||||
"3. Running an arbitrary Python script that is available on local computer (will upload to DBFS, and then run in Databricks) \n",
|
|
||||||
"4. Running a JAR job that the customer has in DBFS.\n",
|
|
||||||
"\n",
|
|
||||||
"## Before you begin:\n",
|
|
||||||
"\n",
|
|
||||||
"1. **Create an Azure Databricks workspace** in the same subscription where you have your Azure Machine Learning workspace. You will need details of this workspace later on to define DatabricksStep. [Click here](https://ms.portal.azure.com/#blade/HubsExtension/Resources/resourceType/Microsoft.Databricks%2Fworkspaces) for more information.\n",
|
|
||||||
"2. **Create PAT (access token)**: Manually create a Databricks access token at the Azure Databricks portal. See [this](https://docs.databricks.com/api/latest/authentication.html#generate-a-token) for more information.\n",
|
|
||||||
"3. **Add demo notebook to ADB**: This notebook has a sample you can use as is. Launch Azure Databricks attached to your Azure Machine Learning workspace and add a new notebook. \n",
|
|
||||||
"4. **Create/attach a Blob storage** for use from ADB"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"## Add demo notebook to ADB Workspace\n",
|
|
||||||
"Copy and paste the below code to create a new notebook in your ADB workspace."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"```python\n",
|
|
||||||
"# direct access\n",
|
|
||||||
"dbutils.widgets.get(\"myparam\")\n",
|
|
||||||
"p = getArgument(\"myparam\")\n",
|
|
||||||
"print (\"Param -\\'myparam':\")\n",
|
|
||||||
"print (p)\n",
|
|
||||||
"\n",
|
|
||||||
"dbutils.widgets.get(\"input\")\n",
|
|
||||||
"i = getArgument(\"input\")\n",
|
|
||||||
"print (\"Param -\\'input':\")\n",
|
|
||||||
"print (i)\n",
|
|
||||||
"\n",
|
|
||||||
"dbutils.widgets.get(\"output\")\n",
|
|
||||||
"o = getArgument(\"output\")\n",
|
|
||||||
"print (\"Param -\\'output':\")\n",
|
|
||||||
"print (o)\n",
|
|
||||||
"\n",
|
|
||||||
"n = i + \"/testdata.txt\"\n",
|
|
||||||
"df = spark.read.csv(n)\n",
|
|
||||||
"\n",
|
|
||||||
"display (df)\n",
|
|
||||||
"\n",
|
|
||||||
"data = [('value1', 'value2')]\n",
|
|
||||||
"df2 = spark.createDataFrame(data)\n",
|
|
||||||
"\n",
|
|
||||||
"z = o + \"/output.txt\"\n",
|
|
||||||
"df2.write.csv(z)\n",
|
|
||||||
"```"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"## Azure Machine Learning and Pipeline SDK-specific imports"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"import os\n",
|
|
||||||
"import azureml.core\n",
|
|
||||||
"from azureml.core.runconfig import JarLibrary\n",
|
|
||||||
"from azureml.core.compute import ComputeTarget, DatabricksCompute\n",
|
|
||||||
"from azureml.exceptions import ComputeTargetException\n",
|
|
||||||
"from azureml.core import Workspace, Experiment\n",
|
|
||||||
"from azureml.pipeline.core import Pipeline, PipelineData\n",
|
|
||||||
"from azureml.pipeline.steps import DatabricksStep\n",
|
|
||||||
"from azureml.core.datastore import Datastore\n",
|
|
||||||
"from azureml.data.data_reference import DataReference\n",
|
|
||||||
"\n",
|
|
||||||
"# Check core SDK version number\n",
|
|
||||||
"print(\"SDK version:\", azureml.core.VERSION)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"## Initialize Workspace\n",
|
|
||||||
"\n",
|
|
||||||
"Initialize a workspace object from persisted configuration. Make sure the config file is present at .\\config.json"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"ws = Workspace.from_config()\n",
|
|
||||||
"print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\\n')"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"## Attach Databricks compute target\n",
|
|
||||||
"Next, you need to add your Databricks workspace to Azure Machine Learning as a compute target and give it a name. You will use this name to refer to your Databricks workspace compute target inside Azure Machine Learning.\n",
|
|
||||||
"\n",
|
|
||||||
"- **Resource Group** - The resource group name of your Azure Machine Learning workspace\n",
|
|
||||||
"- **Databricks Workspace Name** - The workspace name of your Azure Databricks workspace\n",
|
|
||||||
"- **Databricks Access Token** - The access token you created in ADB\n",
|
|
||||||
"\n",
|
|
||||||
"**The Databricks workspace need to be present in the same subscription as your AML workspace**"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# Replace with your account info before running.\n",
|
|
||||||
" \n",
|
|
||||||
"db_compute_name=os.getenv(\"DATABRICKS_COMPUTE_NAME\", \"<my-databricks-compute-name>\") # Databricks compute name\n",
|
|
||||||
"db_resource_group=os.getenv(\"DATABRICKS_RESOURCE_GROUP\", \"<my-db-resource-group>\") # Databricks resource group\n",
|
|
||||||
"db_workspace_name=os.getenv(\"DATABRICKS_WORKSPACE_NAME\", \"<my-db-workspace-name>\") # Databricks workspace name\n",
|
|
||||||
"db_access_token=os.getenv(\"DATABRICKS_ACCESS_TOKEN\", \"<my-access-token>\") # Databricks access token\n",
|
|
||||||
" \n",
|
|
||||||
"try:\n",
|
|
||||||
" databricks_compute = DatabricksCompute(workspace=ws, name=db_compute_name)\n",
|
|
||||||
" print('Compute target {} already exists'.format(db_compute_name))\n",
|
|
||||||
"except ComputeTargetException:\n",
|
|
||||||
" print('Compute not found, will use below parameters to attach new one')\n",
|
|
||||||
" print('db_compute_name {}'.format(db_compute_name))\n",
|
|
||||||
" print('db_resource_group {}'.format(db_resource_group))\n",
|
|
||||||
" print('db_workspace_name {}'.format(db_workspace_name))\n",
|
|
||||||
" print('db_access_token {}'.format(db_access_token))\n",
|
|
||||||
" \n",
|
|
||||||
" config = DatabricksCompute.attach_configuration(\n",
|
|
||||||
" resource_group = db_resource_group,\n",
|
|
||||||
" workspace_name = db_workspace_name,\n",
|
|
||||||
" access_token= db_access_token)\n",
|
|
||||||
" databricks_compute=ComputeTarget.attach(ws, db_compute_name, config)\n",
|
|
||||||
" databricks_compute.wait_for_completion(True)\n"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"## Data Connections with Inputs and Outputs\n",
|
|
||||||
"The DatabricksStep supports Azure Bloband ADLS for inputs and outputs. You also will need to define a [Secrets](https://docs.azuredatabricks.net/user-guide/secrets/index.html) scope to enable authentication to external data sources such as Blob and ADLS from Databricks.\n",
|
|
||||||
"\n",
|
|
||||||
"- Databricks documentation on [Azure Blob](https://docs.azuredatabricks.net/spark/latest/data-sources/azure/azure-storage.html)\n",
|
|
||||||
"- Databricks documentation on [ADLS](https://docs.databricks.com/spark/latest/data-sources/azure/azure-datalake.html)\n",
|
|
||||||
"\n",
|
|
||||||
"### Type of Data Access\n",
|
|
||||||
"Databricks allows to interact with Azure Blob and ADLS in two ways.\n",
|
|
||||||
"- **Direct Access**: Databricks allows you to interact with Azure Blob or ADLS URIs directly. The input or output URIs will be mapped to a Databricks widget param in the Databricks notebook.\n",
|
|
||||||
"- **Mounting**: You will be supplied with additional parameters and secrets that will enable you to mount your ADLS or Azure Blob input or output location in your Databricks notebook."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"#### Direct Access: Python sample code\n",
|
|
||||||
"If you have a data reference named \"input\" it will represent the URI of the input and you can access it directly in the Databricks python notebook like so:"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"```python\n",
|
|
||||||
"dbutils.widgets.get(\"input\")\n",
|
|
||||||
"y = getArgument(\"input\")\n",
|
|
||||||
"df = spark.read.csv(y)\n",
|
|
||||||
"```"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"#### Mounting: Python sample code for Azure Blob\n",
|
|
||||||
"Given an Azure Blob data reference named \"input\" the following widget params will be made available in the Databricks notebook:"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"```python\n",
|
|
||||||
"# This contains the input URI\n",
|
|
||||||
"dbutils.widgets.get(\"input\")\n",
|
|
||||||
"myinput_uri = getArgument(\"input\")\n",
|
|
||||||
"\n",
|
|
||||||
"# How to get the input datastore name inside ADB notebook\n",
|
|
||||||
"# This contains the name of a Databricks secret (in the predefined \"amlscope\" secret scope) \n",
|
|
||||||
"# that contians an access key or sas for the Azure Blob input (this name is obtained by appending \n",
|
|
||||||
"# the name of the input with \"_blob_secretname\". \n",
|
|
||||||
"dbutils.widgets.get(\"input_blob_secretname\") \n",
|
|
||||||
"myinput_blob_secretname = getArgument(\"input_blob_secretname\")\n",
|
|
||||||
"\n",
|
|
||||||
"# This contains the required configuration for mounting\n",
|
|
||||||
"dbutils.widgets.get(\"input_blob_config\")\n",
|
|
||||||
"myinput_blob_config = getArgument(\"input_blob_config\")\n",
|
|
||||||
"\n",
|
|
||||||
"# Usage\n",
|
|
||||||
"dbutils.fs.mount(\n",
|
|
||||||
" source = myinput_uri,\n",
|
|
||||||
" mount_point = \"/mnt/input\",\n",
|
|
||||||
" extra_configs = {myinput_blob_config:dbutils.secrets.get(scope = \"amlscope\", key = myinput_blob_secretname)})\n",
|
|
||||||
"```"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"#### Mounting: Python sample code for ADLS\n",
|
|
||||||
"Given an ADLS data reference named \"input\" the following widget params will be made available in the Databricks notebook:"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"```python\n",
|
|
||||||
"# This contains the input URI\n",
|
|
||||||
"dbutils.widgets.get(\"input\") \n",
|
|
||||||
"myinput_uri = getArgument(\"input\")\n",
|
|
||||||
"\n",
|
|
||||||
"# This contains the client id for the service principal \n",
|
|
||||||
"# that has access to the adls input\n",
|
|
||||||
"dbutils.widgets.get(\"input_adls_clientid\") \n",
|
|
||||||
"myinput_adls_clientid = getArgument(\"input_adls_clientid\")\n",
|
|
||||||
"\n",
|
|
||||||
"# This contains the name of a Databricks secret (in the predefined \"amlscope\" secret scope) \n",
|
|
||||||
"# that contains the secret for the above mentioned service principal\n",
|
|
||||||
"dbutils.widgets.get(\"input_adls_secretname\") \n",
|
|
||||||
"myinput_adls_secretname = getArgument(\"input_adls_secretname\")\n",
|
|
||||||
"\n",
|
|
||||||
"# This contains the refresh url for the mounting configs\n",
|
|
||||||
"dbutils.widgets.get(\"input_adls_refresh_url\") \n",
|
|
||||||
"myinput_adls_refresh_url = getArgument(\"input_adls_refresh_url\")\n",
|
|
||||||
"\n",
|
|
||||||
"# Usage \n",
|
|
||||||
"configs = {\"dfs.adls.oauth2.access.token.provider.type\": \"ClientCredential\",\n",
|
|
||||||
" \"dfs.adls.oauth2.client.id\": myinput_adls_clientid,\n",
|
|
||||||
" \"dfs.adls.oauth2.credential\": dbutils.secrets.get(scope = \"amlscope\", key =myinput_adls_secretname),\n",
|
|
||||||
" \"dfs.adls.oauth2.refresh.url\": myinput_adls_refresh_url}\n",
|
|
||||||
"\n",
|
|
||||||
"dbutils.fs.mount(\n",
|
|
||||||
" source = myinput_uri,\n",
|
|
||||||
" mount_point = \"/mnt/output\",\n",
|
|
||||||
" extra_configs = configs)\n",
|
|
||||||
"```"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"## Use Databricks from Azure Machine Learning Pipeline\n",
|
|
||||||
"To use Databricks as a compute target from Azure Machine Learning Pipeline, a DatabricksStep is used. Let's define a datasource (via DataReference) and intermediate data (via PipelineData) to be used in DatabricksStep."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# Use the default blob storage\n",
|
|
||||||
"def_blob_store = Datastore(ws, \"workspaceblobstore\")\n",
|
|
||||||
"print('Datastore {} will be used'.format(def_blob_store.name))\n",
|
|
||||||
"\n",
|
|
||||||
"# We are uploading a sample file in the local directory to be used as a datasource\n",
|
|
||||||
"def_blob_store.upload_files(files=[\"./testdata.txt\"], target_path=\"dbtest\", overwrite=False)\n",
|
|
||||||
"\n",
|
|
||||||
"step_1_input = DataReference(datastore=def_blob_store, path_on_datastore=\"dbtest\",\n",
|
|
||||||
" data_reference_name=\"input\")\n",
|
|
||||||
"\n",
|
|
||||||
"step_1_output = PipelineData(\"output\", datastore=def_blob_store)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"### Add a DatabricksStep\n",
|
|
||||||
"Adds a Databricks notebook as a step in a Pipeline.\n",
|
|
||||||
"- ***name:** Name of the Module\n",
|
|
||||||
"- **inputs:** List of input connections for data consumed by this step. Fetch this inside the notebook using dbutils.widgets.get(\"input\")\n",
|
|
||||||
"- **outputs:** List of output port definitions for outputs produced by this step. Fetch this inside the notebook using dbutils.widgets.get(\"output\")\n",
|
|
||||||
"- **existing_cluster_id:** Cluster ID of an existing Interactive cluster on the Databricks workspace. If you are providing this, do not provide any of the parameters below that are used to create a new cluster such as spark_version, node_type, etc.\n",
|
|
||||||
"- **spark_version:** Version of spark for the databricks run cluster. default value: 4.0.x-scala2.11\n",
|
|
||||||
"- **node_type:** Azure vm node types for the databricks run cluster. default value: Standard_D3_v2\n",
|
|
||||||
"- **num_workers:** Specifies a static number of workers for the databricks run cluster\n",
|
|
||||||
"- **min_workers:** Specifies a min number of workers to use for auto-scaling the databricks run cluster\n",
|
|
||||||
"- **max_workers:** Specifies a max number of workers to use for auto-scaling the databricks run cluster\n",
|
|
||||||
"- **spark_env_variables:** Spark environment variables for the databricks run cluster (dictionary of {str:str}). default value: {'PYSPARK_PYTHON': '/databricks/python3/bin/python3'}\n",
|
|
||||||
"- **notebook_path:** Path to the notebook in the databricks instance. If you are providing this, do not provide python script related paramaters or JAR related parameters.\n",
|
|
||||||
"- **notebook_params:** Parameters for the databricks notebook (dictionary of {str:str}). Fetch this inside the notebook using dbutils.widgets.get(\"myparam\")\n",
|
|
||||||
"- **python_script_path:** The path to the python script in the DBFS or S3. If you are providing this, do not provide python_script_name which is used for uploading script from local machine.\n",
|
|
||||||
"- **python_script_params:** Parameters for the python script (list of str)\n",
|
|
||||||
"- **main_class_name:** The name of the entry point in a JAR module. If you are providing this, do not provide any python script or notebook related parameters.\n",
|
|
||||||
"- **jar_params:** Parameters for the JAR module (list of str)\n",
|
|
||||||
"- **python_script_name:** name of a python script on your local machine (relative to source_directory). If you are providing this do not provide python_script_path which is used to execute a remote python script; or any of the JAR or notebook related parameters.\n",
|
|
||||||
"- **source_directory:** folder that contains the script and other files\n",
|
|
||||||
"- **hash_paths:** list of paths to hash to detect a change in source_directory (script file is always hashed)\n",
|
|
||||||
"- **run_name:** Name in databricks for this run\n",
|
|
||||||
"- **timeout_seconds:** Timeout for the databricks run\n",
|
|
||||||
"- **runconfig:** Runconfig to use. Either pass runconfig or each library type as a separate parameter but do not mix the two\n",
|
|
||||||
"- **maven_libraries:** maven libraries for the databricks run\n",
|
|
||||||
"- **pypi_libraries:** pypi libraries for the databricks run\n",
|
|
||||||
"- **egg_libraries:** egg libraries for the databricks run\n",
|
|
||||||
"- **jar_libraries:** jar libraries for the databricks run\n",
|
|
||||||
"- **rcran_libraries:** rcran libraries for the databricks run\n",
|
|
||||||
"- **compute_target:** Azure Databricks compute\n",
|
|
||||||
"- **allow_reuse:** Whether the step should reuse previous results when run with the same settings/inputs\n",
|
|
||||||
"- **version:** Optional version tag to denote a change in functionality for the step\n",
|
|
||||||
"\n",
|
|
||||||
"\\* *denotes required fields* \n",
|
|
||||||
"*You must provide exactly one of num_workers or min_workers and max_workers paramaters* \n",
|
|
||||||
"*You must provide exactly one of databricks_compute or databricks_compute_name parameters*\n",
|
|
||||||
"\n",
|
|
||||||
"## Use runconfig to specify library dependencies\n",
|
|
||||||
"You can use a runconfig to specify the library dependencies for your cluster in Databricks. The runconfig will contain a databricks section as follows:\n",
|
|
||||||
"\n",
|
|
||||||
"```yaml\n",
|
|
||||||
"environment:\n",
|
|
||||||
"# Databricks details\n",
|
|
||||||
" databricks:\n",
|
|
||||||
"# List of maven libraries.\n",
|
|
||||||
" mavenLibraries:\n",
|
|
||||||
" - coordinates: org.jsoup:jsoup:1.7.1\n",
|
|
||||||
" repo: ''\n",
|
|
||||||
" exclusions:\n",
|
|
||||||
" - slf4j:slf4j\n",
|
|
||||||
" - '*:hadoop-client'\n",
|
|
||||||
"# List of PyPi libraries\n",
|
|
||||||
" pypiLibraries:\n",
|
|
||||||
" - package: beautifulsoup4\n",
|
|
||||||
" repo: ''\n",
|
|
||||||
"# List of RCran libraries\n",
|
|
||||||
" rcranLibraries:\n",
|
|
||||||
" -\n",
|
|
||||||
"# Coordinates.\n",
|
|
||||||
" package: ada\n",
|
|
||||||
"# Repo\n",
|
|
||||||
" repo: http://cran.us.r-project.org\n",
|
|
||||||
"# List of JAR libraries\n",
|
|
||||||
" jarLibraries:\n",
|
|
||||||
" -\n",
|
|
||||||
"# Coordinates.\n",
|
|
||||||
" library: dbfs:/mnt/libraries/library.jar\n",
|
|
||||||
"# List of Egg libraries\n",
|
|
||||||
" eggLibraries:\n",
|
|
||||||
" -\n",
|
|
||||||
"# Coordinates.\n",
|
|
||||||
" library: dbfs:/mnt/libraries/library.egg\n",
|
|
||||||
"```\n",
|
|
||||||
"\n",
|
|
||||||
"You can then create a RunConfiguration object using this file and pass it as the runconfig parameter to DatabricksStep.\n",
|
|
||||||
"```python\n",
|
|
||||||
"from azureml.core.runconfig import RunConfiguration\n",
|
|
||||||
"\n",
|
|
||||||
"runconfig = RunConfiguration()\n",
|
|
||||||
"runconfig.load(path='<directory_where_runconfig_is_stored>', name='<runconfig_file_name>')\n",
|
|
||||||
"```"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"### 1. Running the demo notebook already added to the Databricks workspace\n",
|
|
||||||
"Create a notebook in the Azure Databricks workspace, and provide the path to that notebook as the value associated with the environment variable \"DATABRICKS_NOTEBOOK_PATH\". This will then set the variable\u00c2\u00a0notebook_path\u00c2\u00a0when you run the code cell below:"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {
|
|
||||||
"tags": [
|
|
||||||
"databricksstep-remarks-sample"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"notebook_path=os.getenv(\"DATABRICKS_NOTEBOOK_PATH\", \"<my-databricks-notebook-path>\") # Databricks notebook path\n",
|
|
||||||
"\n",
|
|
||||||
"dbNbStep = DatabricksStep(\n",
|
|
||||||
" name=\"DBNotebookInWS\",\n",
|
|
||||||
" inputs=[step_1_input],\n",
|
|
||||||
" outputs=[step_1_output],\n",
|
|
||||||
" num_workers=1,\n",
|
|
||||||
" notebook_path=notebook_path,\n",
|
|
||||||
" notebook_params={'myparam': 'testparam'},\n",
|
|
||||||
" run_name='DB_Notebook_demo',\n",
|
|
||||||
" compute_target=databricks_compute,\n",
|
|
||||||
" allow_reuse=True\n",
|
|
||||||
")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"#### Build and submit the Experiment"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"steps = [dbNbStep]\n",
|
|
||||||
"pipeline = Pipeline(workspace=ws, steps=steps)\n",
|
|
||||||
"pipeline_run = Experiment(ws, 'DB_Notebook_demo').submit(pipeline)\n",
|
|
||||||
"pipeline_run.wait_for_completion()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"#### View Run Details"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"from azureml.widgets import RunDetails\n",
|
|
||||||
"RunDetails(pipeline_run).show()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"### 2. Running a Python script from DBFS\n",
|
|
||||||
"This shows how to run a Python script in DBFS. \n",
|
|
||||||
"\n",
|
|
||||||
"To complete this, you will need to first upload the Python script in your local machine to DBFS using the [CLI](https://docs.azuredatabricks.net/user-guide/dbfs-databricks-file-system.html). The CLI command is given below:\n",
|
|
||||||
"\n",
|
|
||||||
"```\n",
|
|
||||||
"dbfs cp ./train-db-dbfs.py dbfs:/train-db-dbfs.py\n",
|
|
||||||
"```\n",
|
|
||||||
"\n",
|
|
||||||
"The code in the below cell assumes that you have completed the previous step of uploading the script `train-db-dbfs.py` to the root folder in DBFS."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"python_script_path = os.getenv(\"DATABRICKS_PYTHON_SCRIPT_PATH\", \"<my-databricks-python-script-path>\") # Databricks python script path\n",
|
|
||||||
"\n",
|
|
||||||
"dbPythonInDbfsStep = DatabricksStep(\n",
|
|
||||||
" name=\"DBPythonInDBFS\",\n",
|
|
||||||
" inputs=[step_1_input],\n",
|
|
||||||
" num_workers=1,\n",
|
|
||||||
" python_script_path=python_script_path,\n",
|
|
||||||
" python_script_params={'--input_data'},\n",
|
|
||||||
" run_name='DB_Python_demo',\n",
|
|
||||||
" compute_target=databricks_compute,\n",
|
|
||||||
" allow_reuse=True\n",
|
|
||||||
")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"#### Build and submit the Experiment"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"steps = [dbPythonInDbfsStep]\n",
|
|
||||||
"pipeline = Pipeline(workspace=ws, steps=steps)\n",
|
|
||||||
"pipeline_run = Experiment(ws, 'DB_Python_demo').submit(pipeline)\n",
|
|
||||||
"pipeline_run.wait_for_completion()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"#### View Run Details"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"from azureml.widgets import RunDetails\n",
|
|
||||||
"RunDetails(pipeline_run).show()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"### 3. Running a Python script in Databricks that currenlty is in local computer\n",
|
|
||||||
"To run a Python script that is currently in your local computer, follow the instructions below. \n",
|
|
||||||
"\n",
|
|
||||||
"The commented out code below code assumes that you have `train-db-local.py` in the `scripts` subdirectory under the current working directory.\n",
|
|
||||||
"\n",
|
|
||||||
"In this case, the Python script will be uploaded first to DBFS, and then the script will be run in Databricks."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"python_script_name = \"train-db-local.py\"\n",
|
|
||||||
"source_directory = \".\"\n",
|
|
||||||
"\n",
|
|
||||||
"dbPythonInLocalMachineStep = DatabricksStep(\n",
|
|
||||||
" name=\"DBPythonInLocalMachine\",\n",
|
|
||||||
" inputs=[step_1_input],\n",
|
|
||||||
" num_workers=1,\n",
|
|
||||||
" python_script_name=python_script_name,\n",
|
|
||||||
" source_directory=source_directory,\n",
|
|
||||||
" run_name='DB_Python_Local_demo',\n",
|
|
||||||
" compute_target=databricks_compute,\n",
|
|
||||||
" allow_reuse=True\n",
|
|
||||||
")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"#### Build and submit the Experiment"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"steps = [dbPythonInLocalMachineStep]\n",
|
|
||||||
"pipeline = Pipeline(workspace=ws, steps=steps)\n",
|
|
||||||
"pipeline_run = Experiment(ws, 'DB_Python_Local_demo').submit(pipeline)\n",
|
|
||||||
"pipeline_run.wait_for_completion()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"#### View Run Details"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"from azureml.widgets import RunDetails\n",
|
|
||||||
"RunDetails(pipeline_run).show()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"### 4. Running a JAR job that is alreay added in DBFS\n",
|
|
||||||
"To run a JAR job that is already uploaded to DBFS, follow the instructions below. You will first upload the JAR file to DBFS using the [CLI](https://docs.azuredatabricks.net/user-guide/dbfs-databricks-file-system.html).\n",
|
|
||||||
"\n",
|
|
||||||
"The commented out code in the below cell assumes that you have uploaded `train-db-dbfs.jar` to the root folder in DBFS. You can upload `train-db-dbfs.jar` to the root folder in DBFS using this commandline so you can use `jar_library_dbfs_path = \"dbfs:/train-db-dbfs.jar\"`:\n",
|
|
||||||
"\n",
|
|
||||||
"```\n",
|
|
||||||
"dbfs cp ./train-db-dbfs.jar dbfs:/train-db-dbfs.jar\n",
|
|
||||||
"```"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"main_jar_class_name = \"com.microsoft.aeva.Main\"\n",
|
|
||||||
"jar_library_dbfs_path = os.getenv(\"DATABRICKS_JAR_LIB_PATH\", \"<my-databricks-jar-lib-path>\") # Databricks jar library path\n",
|
|
||||||
"\n",
|
|
||||||
"dbJarInDbfsStep = DatabricksStep(\n",
|
|
||||||
" name=\"DBJarInDBFS\",\n",
|
|
||||||
" inputs=[step_1_input],\n",
|
|
||||||
" num_workers=1,\n",
|
|
||||||
" main_class_name=main_jar_class_name,\n",
|
|
||||||
" jar_params={'arg1', 'arg2'},\n",
|
|
||||||
" run_name='DB_JAR_demo',\n",
|
|
||||||
" jar_libraries=[JarLibrary(jar_library_dbfs_path)],\n",
|
|
||||||
" compute_target=databricks_compute,\n",
|
|
||||||
" allow_reuse=True\n",
|
|
||||||
")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"#### Build and submit the Experiment"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"steps = [dbJarInDbfsStep]\n",
|
|
||||||
"pipeline = Pipeline(workspace=ws, steps=steps)\n",
|
|
||||||
"pipeline_run = Experiment(ws, 'DB_JAR_demo').submit(pipeline)\n",
|
|
||||||
"pipeline_run.wait_for_completion()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"#### View Run Details"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"from azureml.widgets import RunDetails\n",
|
|
||||||
"RunDetails(pipeline_run).show()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"# Next: ADLA as a Compute Target\n",
|
|
||||||
"To use ADLA as a compute target from Azure Machine Learning Pipeline, a AdlaStep is used. This [notebook](https://aka.ms/pl-adla) demonstrates the use of AdlaStep in Azure Machine Learning Pipeline."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
""
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"authors": [
|
|
||||||
{
|
|
||||||
"name": "diray"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"kernelspec": {
|
|
||||||
"display_name": "Python 3.6",
|
|
||||||
"language": "python",
|
|
||||||
"name": "python36"
|
|
||||||
},
|
|
||||||
"language_info": {
|
|
||||||
"codemirror_mode": {
|
|
||||||
"name": "ipython",
|
|
||||||
"version": 3
|
|
||||||
},
|
|
||||||
"file_extension": ".py",
|
|
||||||
"mimetype": "text/x-python",
|
|
||||||
"name": "python",
|
|
||||||
"nbconvert_exporter": "python",
|
|
||||||
"pygments_lexer": "ipython3",
|
|
||||||
"version": "3.6.2"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nbformat": 4,
|
|
||||||
"nbformat_minor": 2
|
|
||||||
}
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
Test1
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
# Copyright (c) Microsoft. All rights reserved.
|
|
||||||
# Licensed under the MIT license.
|
|
||||||
|
|
||||||
print("In train.py")
|
|
||||||
print("As a data scientist, this is where I use my training code.")
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
# Copyright (c) Microsoft. All rights reserved.
|
|
||||||
# Licensed under the MIT license.
|
|
||||||
|
|
||||||
print("In train.py")
|
|
||||||
print("As a data scientist, this is where I use my training code.")
|
|
||||||
@@ -1,36 +0,0 @@
|
|||||||
## Examples to get started with Azure Machine Learning SDK for R
|
|
||||||
|
|
||||||
Learn how to use Azure Machine Learning SDK for R for experimentation and model management.
|
|
||||||
|
|
||||||
As a pre-requisite, go through the [Installation](vignettes/installation.Rmd) and [Configuration](vignettes/configuration.Rmd) vignettes to first install the package and set up your Azure Machine Learning Workspace unless you are running these examples on an Azure Machine Learning compute instance. Azure Machine Learning compute instances have the Azure Machine Learning SDK pre-installed and your workspace details pre-configured.
|
|
||||||
|
|
||||||
|
|
||||||
Samples
|
|
||||||
* Deployment
|
|
||||||
* [deploy-to-aci](./samples/deployment/deploy-to-aci): Deploy a model as a web service to Azure Container Instances (ACI).
|
|
||||||
* [deploy-to-local](./samples/deployment/deploy-to-local): Deploy a model as a web service locally.
|
|
||||||
* Training
|
|
||||||
* [train-on-amlcompute](./samples/training/train-on-amlcompute): Train a model on a remote AmlCompute cluster.
|
|
||||||
* [train-on-local](./samples/training/train-on-local): Train a model locally with Docker.
|
|
||||||
|
|
||||||
Vignettes
|
|
||||||
* [deploy-to-aks](./vignettes/deploy-to-aks): Production deploy a model as a web service to Azure Kubernetes Service (AKS).
|
|
||||||
* [hyperparameter-tune-with-keras](./vignettes/hyperparameter-tune-with-keras): Hyperparameter tune a Keras model using HyperDrive, Azure ML's hyperparameter tuning functionality.
|
|
||||||
* [train-and-deploy-to-aci](./vignettes/train-and-deploy-to-aci): Train a caret model and deploy as a web service to Azure Container Instances (ACI).
|
|
||||||
* [train-with-tensorflow](./vignettes/train-with-tensorflow): Train a deep learning TensorFlow model with Azure ML.
|
|
||||||
|
|
||||||
Find more information on the [official documentation site for Azure Machine Learning SDK for R](https://azure.github.io/azureml-sdk-for-r/).
|
|
||||||
|
|
||||||
|
|
||||||
### Troubleshooting
|
|
||||||
|
|
||||||
- If the following error occurs when submitting an experiment using RStudio:
|
|
||||||
```R
|
|
||||||
Error in py_call_impl(callable, dots$args, dots$keywords) :
|
|
||||||
PermissionError: [Errno 13] Permission denied
|
|
||||||
```
|
|
||||||
Move the files for your project into a subdirectory and reset the working directory to that directory before re-submitting.
|
|
||||||
|
|
||||||
In order to submit an experiment, the Azure ML SDK must create a .zip file of the project directory to send to the service. However,
|
|
||||||
the SDK does not have permission to write into the .Rproj.user subdirectory that is automatically created during an RStudio
|
|
||||||
session. For this reason, the recommended best practice is to isolate project files into their own directory.
|
|
||||||
@@ -1,11 +0,0 @@
|
|||||||
## Azure Machine Learning samples
|
|
||||||
These samples are short code examples for using Azure Machine Learning SDK for R. If you are new to the R SDK, we recommend that you first take a look at the more detailed end-to-end [vignettes](../vignettes).
|
|
||||||
|
|
||||||
Before running a sample in RStudio, set the working directory to the folder that contains the sample script in RStudio using `setwd(dirname)` or Session -> Set Working Directory -> To Source File Location. Each vignette assumes that the data and scripts are in the current working directory.
|
|
||||||
|
|
||||||
1. [train-on-amlcompute](training/train-on-amlcompute): Train a model on a remote AmlCompute cluster.
|
|
||||||
2. [train-on-local](training/train-on-local): Train a model locally with Docker.
|
|
||||||
2. [deploy-to-aci](deployment/deploy-to-aci): Deploy a model as a web service to Azure Container Instances (ACI).
|
|
||||||
3. [deploy-to-local](deployment/deploy-to-local): Deploy a model as a web service locally.
|
|
||||||
|
|
||||||
> Before you run these samples, make sure you have an Azure Machine Learning workspace. You can follow the [configuration vignette](../vignettes/configuration.Rmd) to set up a workspace. (You do not need to do this if you are running these examples on an Azure Machine Learning compute instance).
|
|
||||||
@@ -1,59 +0,0 @@
|
|||||||
# Copyright(c) Microsoft Corporation.
|
|
||||||
# Licensed under the MIT license.
|
|
||||||
|
|
||||||
library(azuremlsdk)
|
|
||||||
library(jsonlite)
|
|
||||||
|
|
||||||
ws <- load_workspace_from_config()
|
|
||||||
|
|
||||||
# Register the model
|
|
||||||
model <- register_model(ws, model_path = "project_files/model.rds",
|
|
||||||
model_name = "model.rds")
|
|
||||||
|
|
||||||
# Create environment
|
|
||||||
r_env <- r_environment(name = "r_env")
|
|
||||||
|
|
||||||
# Create inference config
|
|
||||||
inference_config <- inference_config(
|
|
||||||
entry_script = "score.R",
|
|
||||||
source_directory = "project_files",
|
|
||||||
environment = r_env)
|
|
||||||
|
|
||||||
# Create ACI deployment config
|
|
||||||
deployment_config <- aci_webservice_deployment_config(cpu_cores = 1,
|
|
||||||
memory_gb = 1)
|
|
||||||
|
|
||||||
# Deploy the web service
|
|
||||||
service <- deploy_model(ws,
|
|
||||||
'rservice',
|
|
||||||
list(model),
|
|
||||||
inference_config,
|
|
||||||
deployment_config)
|
|
||||||
wait_for_deployment(service, show_output = TRUE)
|
|
||||||
|
|
||||||
# If you encounter any issue in deploying the webservice, please visit
|
|
||||||
# https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-troubleshoot-deployment
|
|
||||||
|
|
||||||
# Inferencing
|
|
||||||
# versicolor
|
|
||||||
plant <- data.frame(Sepal.Length = 6.4,
|
|
||||||
Sepal.Width = 2.8,
|
|
||||||
Petal.Length = 4.6,
|
|
||||||
Petal.Width = 1.8)
|
|
||||||
# setosa
|
|
||||||
plant <- data.frame(Sepal.Length = 5.1,
|
|
||||||
Sepal.Width = 3.5,
|
|
||||||
Petal.Length = 1.4,
|
|
||||||
Petal.Width = 0.2)
|
|
||||||
# virginica
|
|
||||||
plant <- data.frame(Sepal.Length = 6.7,
|
|
||||||
Sepal.Width = 3.3,
|
|
||||||
Petal.Length = 5.2,
|
|
||||||
Petal.Width = 2.3)
|
|
||||||
|
|
||||||
# Test the web service
|
|
||||||
predicted_val <- invoke_webservice(service, toJSON(plant))
|
|
||||||
predicted_val
|
|
||||||
|
|
||||||
# Delete the web service
|
|
||||||
delete_webservice(service)
|
|
||||||
Binary file not shown.
@@ -1,17 +0,0 @@
|
|||||||
# Copyright(c) Microsoft Corporation.
|
|
||||||
# Licensed under the MIT license.
|
|
||||||
|
|
||||||
library(jsonlite)
|
|
||||||
|
|
||||||
init <- function() {
|
|
||||||
model_path <- Sys.getenv("AZUREML_MODEL_DIR")
|
|
||||||
model <- readRDS(file.path(model_path, "model.rds"))
|
|
||||||
message("model is loaded")
|
|
||||||
|
|
||||||
function(data) {
|
|
||||||
plant <- as.data.frame(fromJSON(data))
|
|
||||||
prediction <- predict(model, plant)
|
|
||||||
result <- as.character(prediction)
|
|
||||||
toJSON(result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,112 +0,0 @@
|
|||||||
# Copyright(c) Microsoft Corporation.
|
|
||||||
# Licensed under the MIT license.
|
|
||||||
|
|
||||||
# Register model and deploy locally
|
|
||||||
# This example shows how to deploy a web service in step-by-step fashion:
|
|
||||||
#
|
|
||||||
# 1) Register model
|
|
||||||
# 2) Deploy the model as a web service in a local Docker container.
|
|
||||||
# 3) Invoke web service with SDK or call web service with raw HTTP call.
|
|
||||||
# 4) Quickly test changes to your entry script by reloading the local service.
|
|
||||||
# 5) Optionally, you can also make changes to model and update the local service.
|
|
||||||
|
|
||||||
library(azuremlsdk)
|
|
||||||
library(jsonlite)
|
|
||||||
|
|
||||||
ws <- load_workspace_from_config()
|
|
||||||
|
|
||||||
# Register the model
|
|
||||||
model <- register_model(ws, model_path = "project_files/model.rds",
|
|
||||||
model_name = "model.rds")
|
|
||||||
|
|
||||||
# Create environment
|
|
||||||
r_env <- r_environment(name = "r_env")
|
|
||||||
|
|
||||||
# Create inference config
|
|
||||||
inference_config <- inference_config(
|
|
||||||
entry_script = "score.R",
|
|
||||||
source_directory = "project_files",
|
|
||||||
environment = r_env)
|
|
||||||
|
|
||||||
# Create local deployment config
|
|
||||||
local_deployment_config <- local_webservice_deployment_config()
|
|
||||||
|
|
||||||
# Deploy the web service
|
|
||||||
# NOTE:
|
|
||||||
# The Docker image runs as a Linux container. If you are running Docker for Windows, you need to ensure the Linux Engine is running:
|
|
||||||
# # PowerShell command to switch to Linux engine
|
|
||||||
# & 'C:\Program Files\Docker\Docker\DockerCli.exe' -SwitchLinuxEngine
|
|
||||||
service <- deploy_model(ws,
|
|
||||||
'rservice-local',
|
|
||||||
list(model),
|
|
||||||
inference_config,
|
|
||||||
local_deployment_config)
|
|
||||||
# Wait for deployment
|
|
||||||
wait_for_deployment(service, show_output = TRUE)
|
|
||||||
|
|
||||||
# Show the port of local service
|
|
||||||
message(service$port)
|
|
||||||
|
|
||||||
# If you encounter any issue in deploying the webservice, please visit
|
|
||||||
# https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-troubleshoot-deployment
|
|
||||||
|
|
||||||
# Inferencing
|
|
||||||
# versicolor
|
|
||||||
# plant <- data.frame(Sepal.Length = 6.4,
|
|
||||||
# Sepal.Width = 2.8,
|
|
||||||
# Petal.Length = 4.6,
|
|
||||||
# Petal.Width = 1.8)
|
|
||||||
# setosa
|
|
||||||
plant <- data.frame(Sepal.Length = 5.1,
|
|
||||||
Sepal.Width = 3.5,
|
|
||||||
Petal.Length = 1.4,
|
|
||||||
Petal.Width = 0.2)
|
|
||||||
# # virginica
|
|
||||||
# plant <- data.frame(Sepal.Length = 6.7,
|
|
||||||
# Sepal.Width = 3.3,
|
|
||||||
# Petal.Length = 5.2,
|
|
||||||
# Petal.Width = 2.3)
|
|
||||||
|
|
||||||
#Test the web service
|
|
||||||
invoke_webservice(service, toJSON(plant))
|
|
||||||
|
|
||||||
## The last few lines of the logs should have the correct prediction and should display -> R[write to console]: "setosa"
|
|
||||||
cat(gsub(pattern = "\n", replacement = " \n", x = get_webservice_logs(service)))
|
|
||||||
|
|
||||||
## Test the web service with a HTTP Raw request
|
|
||||||
#
|
|
||||||
# NOTE:
|
|
||||||
# To test the service locally use the https://localhost:<local_service$port> URL
|
|
||||||
|
|
||||||
# Import the request library
|
|
||||||
library(httr)
|
|
||||||
# Get the service scoring URL from the service object, its URL is for testing locally
|
|
||||||
local_service_url <- service$scoring_uri #Same as https://localhost:<local_service$port>
|
|
||||||
|
|
||||||
#POST request to web service
|
|
||||||
resp <- POST(local_service_url, body = plant, encode = "json", verbose())
|
|
||||||
|
|
||||||
## The last few lines of the logs should have the correct prediction and should display -> R[write to console]: "setosa"
|
|
||||||
cat(gsub(pattern = "\n", replacement = " \n", x = get_webservice_logs(service)))
|
|
||||||
|
|
||||||
|
|
||||||
# Optional, use a new scoring script
|
|
||||||
inference_config <- inference_config(
|
|
||||||
entry_script = "score_new.R",
|
|
||||||
source_directory = "project_files",
|
|
||||||
environment = r_env)
|
|
||||||
|
|
||||||
## Then reload the service to see the changes made
|
|
||||||
reload_local_webservice_assets(service)
|
|
||||||
|
|
||||||
## Check reloaded service, you will see the last line will say "this is a new scoring script! I was reloaded"
|
|
||||||
invoke_webservice(service, toJSON(plant))
|
|
||||||
cat(gsub(pattern = "\n", replacement = " \n", x = get_webservice_logs(service)))
|
|
||||||
|
|
||||||
# Update service
|
|
||||||
# If you want to change your model(s), environment, or deployment configuration, call update() to rebuild the Docker image.
|
|
||||||
|
|
||||||
# update_local_webservice(service, models = [NewModelObject], deployment_config = deployment_config, wait = FALSE, inference_config = inference_config)
|
|
||||||
|
|
||||||
# Delete service
|
|
||||||
delete_local_webservice(service)
|
|
||||||
Binary file not shown.
@@ -1,18 +0,0 @@
|
|||||||
# Copyright(c) Microsoft Corporation.
|
|
||||||
# Licensed under the MIT license.
|
|
||||||
|
|
||||||
library(jsonlite)
|
|
||||||
|
|
||||||
init <- function() {
|
|
||||||
model_path <- Sys.getenv("AZUREML_MODEL_DIR")
|
|
||||||
model <- readRDS(file.path(model_path, "model.rds"))
|
|
||||||
message("model is loaded")
|
|
||||||
|
|
||||||
function(data) {
|
|
||||||
plant <- as.data.frame(fromJSON(data))
|
|
||||||
prediction <- predict(model, plant)
|
|
||||||
result <- as.character(prediction)
|
|
||||||
message(result)
|
|
||||||
toJSON(result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,19 +0,0 @@
|
|||||||
# Copyright(c) Microsoft Corporation.
|
|
||||||
# Licensed under the MIT license.
|
|
||||||
|
|
||||||
library(jsonlite)
|
|
||||||
|
|
||||||
init <- function() {
|
|
||||||
model_path <- Sys.getenv("AZUREML_MODEL_DIR")
|
|
||||||
model <- readRDS(file.path(model_path, "model.rds"))
|
|
||||||
message("model is loaded")
|
|
||||||
|
|
||||||
function(data) {
|
|
||||||
plant <- as.data.frame(fromJSON(data))
|
|
||||||
prediction <- predict(model, plant)
|
|
||||||
result <- as.character(prediction)
|
|
||||||
message(result)
|
|
||||||
message("this is a new scoring script! I was reloaded")
|
|
||||||
toJSON(result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,34 +0,0 @@
|
|||||||
# This script loads a dataset of which the last column is supposed to be the
|
|
||||||
# class and logs the accuracy
|
|
||||||
|
|
||||||
library(azuremlsdk)
|
|
||||||
library(caret)
|
|
||||||
library(optparse)
|
|
||||||
library(datasets)
|
|
||||||
|
|
||||||
|
|
||||||
iris_data <- data(iris)
|
|
||||||
summary(iris_data)
|
|
||||||
|
|
||||||
in_train <- createDataPartition(y = iris_data$Species, p = .8, list = FALSE)
|
|
||||||
train_data <- iris_data[in_train,]
|
|
||||||
test_data <- iris_data[-in_train,]
|
|
||||||
|
|
||||||
# Run algorithms using 10-fold cross validation
|
|
||||||
control <- trainControl(method = "cv", number = 10)
|
|
||||||
metric <- "Accuracy"
|
|
||||||
|
|
||||||
set.seed(7)
|
|
||||||
model <- train(Species ~ .,
|
|
||||||
data = train_data,
|
|
||||||
method = "lda",
|
|
||||||
metric = metric,
|
|
||||||
trControl = control)
|
|
||||||
predictions <- predict(model, test_data)
|
|
||||||
conf_matrix <- confusionMatrix(predictions, test_data$Species)
|
|
||||||
message(conf_matrix)
|
|
||||||
|
|
||||||
log_metric_to_run(metric, conf_matrix$overall["Accuracy"])
|
|
||||||
|
|
||||||
saveRDS(model, file = "./outputs/model.rds")
|
|
||||||
message("Model saved")
|
|
||||||
@@ -1,41 +0,0 @@
|
|||||||
# Copyright(c) Microsoft Corporation.
|
|
||||||
# Licensed under the MIT license.
|
|
||||||
|
|
||||||
# Reminder: set working directory to current file location prior to running this script
|
|
||||||
|
|
||||||
library(azuremlsdk)
|
|
||||||
|
|
||||||
ws <- load_workspace_from_config()
|
|
||||||
|
|
||||||
# Create AmlCompute cluster
|
|
||||||
cluster_name <- "r-cluster"
|
|
||||||
compute_target <- get_compute(ws, cluster_name = cluster_name)
|
|
||||||
if (is.null(compute_target)) {
|
|
||||||
vm_size <- "STANDARD_D2_V2"
|
|
||||||
compute_target <- create_aml_compute(workspace = ws,
|
|
||||||
cluster_name = cluster_name,
|
|
||||||
vm_size = vm_size,
|
|
||||||
max_nodes = 1)
|
|
||||||
|
|
||||||
wait_for_provisioning_completion(compute_target, show_output = TRUE)
|
|
||||||
}
|
|
||||||
|
|
||||||
# Define estimator
|
|
||||||
est <- estimator(source_directory = "scripts",
|
|
||||||
entry_script = "train.R",
|
|
||||||
compute_target = compute_target)
|
|
||||||
|
|
||||||
experiment_name <- "train-r-script-on-amlcompute"
|
|
||||||
exp <- experiment(ws, experiment_name)
|
|
||||||
|
|
||||||
# Submit job and display the run details
|
|
||||||
run <- submit_experiment(exp, est)
|
|
||||||
view_run_details(run)
|
|
||||||
wait_for_run_completion(run, show_output = TRUE)
|
|
||||||
|
|
||||||
# Get the run metrics
|
|
||||||
metrics <- get_run_metrics(run)
|
|
||||||
metrics
|
|
||||||
|
|
||||||
# Delete cluster
|
|
||||||
delete_compute(compute_target)
|
|
||||||
@@ -1,28 +0,0 @@
|
|||||||
# This script loads a dataset of which the last column is supposed to be the
|
|
||||||
# class and logs the accuracy
|
|
||||||
|
|
||||||
library(azuremlsdk)
|
|
||||||
library(caret)
|
|
||||||
library(datasets)
|
|
||||||
|
|
||||||
iris_data <- data(iris)
|
|
||||||
summary(iris_data)
|
|
||||||
|
|
||||||
in_train <- createDataPartition(y = iris_data$Species, p = .8, list = FALSE)
|
|
||||||
train_data <- iris_data[in_train,]
|
|
||||||
test_data <- iris_data[-in_train,]
|
|
||||||
# Run algorithms using 10-fold cross validation
|
|
||||||
control <- trainControl(method = "cv", number = 10)
|
|
||||||
metric <- "Accuracy"
|
|
||||||
|
|
||||||
set.seed(7)
|
|
||||||
model <- train(Species ~ .,
|
|
||||||
data = train_data,
|
|
||||||
method = "lda",
|
|
||||||
metric = metric,
|
|
||||||
trControl = control)
|
|
||||||
predictions <- predict(model, test_data)
|
|
||||||
conf_matrix <- confusionMatrix(predictions, test_data$Species)
|
|
||||||
message(conf_matrix)
|
|
||||||
|
|
||||||
log_metric_to_run(metric, conf_matrix$overall["Accuracy"])
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
# Copyright(c) Microsoft Corporation.
|
|
||||||
# Licensed under the MIT license.
|
|
||||||
|
|
||||||
# Reminder: set working directory to current file location prior to running this script
|
|
||||||
|
|
||||||
library(azuremlsdk)
|
|
||||||
|
|
||||||
ws <- load_workspace_from_config()
|
|
||||||
|
|
||||||
# Define estimator
|
|
||||||
est <- estimator(source_directory = "scripts",
|
|
||||||
entry_script = "train.R",
|
|
||||||
compute_target = "local")
|
|
||||||
|
|
||||||
# Initialize experiment
|
|
||||||
experiment_name <- "train-r-script-on-local"
|
|
||||||
exp <- experiment(ws, experiment_name)
|
|
||||||
|
|
||||||
# Submit job and display the run details
|
|
||||||
run <- submit_experiment(exp, est)
|
|
||||||
view_run_details(run)
|
|
||||||
wait_for_run_completion(run, show_output = TRUE)
|
|
||||||
|
|
||||||
# Get the run metrics
|
|
||||||
metrics <- get_run_metrics(run)
|
|
||||||
metrics
|
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
## Azure Machine Learning vignettes
|
|
||||||
|
|
||||||
These vignettes are end-to-end tutorials for using Azure Machine Learning SDK for R.
|
|
||||||
|
|
||||||
Before running a vignette in RStudio, set the working directory to the folder that contains the vignette file (.Rmd file) in RStudio using `setwd(dirname)` or Session -> Set Working Directory -> To Source File Location. Each vignette assumes that the data and scripts are in the current working directory.
|
|
||||||
|
|
||||||
The following vignettes are included:
|
|
||||||
1. [installation](installation.Rmd): Install the Azure ML SDK for R.
|
|
||||||
2. [configuration](configuration.Rmd): Set up an Azure ML workspace.
|
|
||||||
3. [train-and-deploy-to-aci](train-and-deploy-to-aci): Train a caret model and deploy as a web service to Azure Container Instances (ACI).
|
|
||||||
4. [train-with-tensorflow](train-with-tensorflow/): Train a deep learning TensorFlow model with Azure ML.
|
|
||||||
5. [hyperparameter-tune-with-keras](hyperparameter-tune-with-keras/): Hyperparameter tune a Keras model using HyperDrive, Azure ML's hyperparameter tuning functionality.
|
|
||||||
6. [deploy-to-aks](deploy-to-aks/): Production deploy a model as a web service to Azure Kubernetes Service (AKS).
|
|
||||||
|
|
||||||
> Before you run these samples, make sure you have an Azure Machine Learning workspace. You can follow the [configuration vignette](../vignettes/configuration.Rmd) to set up a workspace. (You do not need to do this if you are running these examples on an Azure Machine Learning compute instance).
|
|
||||||
|
|
||||||
For additional examples on using the R SDK, see the [samples](../samples) folder.
|
|
||||||
@@ -1,108 +0,0 @@
|
|||||||
---
|
|
||||||
title: "Set up an Azure ML workspace"
|
|
||||||
date: "`r Sys.Date()`"
|
|
||||||
output: rmarkdown::html_vignette
|
|
||||||
vignette: >
|
|
||||||
%\VignetteIndexEntry{Set up an Azure ML workspace}
|
|
||||||
%\VignetteEngine{knitr::rmarkdown}
|
|
||||||
\use_package{UTF-8}
|
|
||||||
---
|
|
||||||
|
|
||||||
This tutorial gets you started with the Azure Machine Learning service by walking through the requirements and instructions for setting up a workspace, the top-level resource for Azure ML.
|
|
||||||
|
|
||||||
You do not need run this if you are working on an Azure Machine Learning Compute Instance, as the compute instance is already associated with an existing workspace.
|
|
||||||
|
|
||||||
## What is an Azure ML workspace?
|
|
||||||
The workspace is the top-level resource for Azure ML, providing a centralized place to work with all the artifacts you create when you use Azure ML. The workspace keeps a history of all training runs, including logs, metrics, output, and a snapshot of your scripts.
|
|
||||||
|
|
||||||
When you create a new workspace, it automatically creates several Azure resources that are used by the workspace:
|
|
||||||
|
|
||||||
* Azure Container Registry: Registers docker containers that you use during training and when you deploy a model. To minimize costs, ACR is lazy-loaded until deployment images are created.
|
|
||||||
* Azure Storage account: Used as the default datastore for the workspace.
|
|
||||||
* Azure Application Insights: Stores monitoring information about your models.
|
|
||||||
* Azure Key Vault: Stores secrets that are used by compute targets and other sensitive information that's needed by the workspace.
|
|
||||||
|
|
||||||
## Setup
|
|
||||||
This section describes the steps required before you can access any Azure ML service functionality.
|
|
||||||
|
|
||||||
### Azure subscription
|
|
||||||
In order to create an Azure ML workspace, first you need access to an Azure subscription. An Azure subscription allows you to manage storage, compute, and other assets in the Azure cloud. You can [create a new subscription](https://azure.microsoft.com/en-us/free/) or access existing subscription information from the [Azure portal](https://portal.azure.com/). Later in this tutorial you will need information such as your subscription ID in order to create and access workspaces.
|
|
||||||
|
|
||||||
### Azure ML SDK installation
|
|
||||||
Follow the [installation guide](https://azure.github.io/azureml-sdk-for-r/articles/installation.html) to install **azuremlsdk** on your machine.
|
|
||||||
|
|
||||||
## Configure your workspace
|
|
||||||
### Workspace parameters
|
|
||||||
To use an Azure ML workspace, you will need to supply the following information:
|
|
||||||
|
|
||||||
* Your subscription ID
|
|
||||||
* A resource group name
|
|
||||||
* (Optional) The region that will host your workspace
|
|
||||||
* A name for your workspace
|
|
||||||
|
|
||||||
You can get your subscription ID from the [Azure portal](https://portal.azure.com/).
|
|
||||||
|
|
||||||
You will also need access to a [resource group](https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-overview#resource-groups), which organizes Azure resources and provides a default region for the resources in a group. You can see what resource groups to which you have access, or create a new one in the Azure portal. If you don't have a resource group, the `create_workspace()` method will create one for you using the name you provide.
|
|
||||||
|
|
||||||
The region to host your workspace will be used if you are creating a new workspace. You do not need to specify this if you are using an existing workspace. You can find the list of supported regions [here](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=machine-learning-service). You should pick a region that is close to your location or that contains your data.
|
|
||||||
|
|
||||||
The name for your workspace is unique within the subscription and should be descriptive enough to discern among other workspaces. The subscription may be used only by you, or it may be used by your department or your entire enterprise, so choose a name that makes sense for your situation.
|
|
||||||
|
|
||||||
The following code chunk allows you to specify your workspace parameters. It uses `Sys.getenv` to read values from environment variables, which is useful for automation. If no environment variable exists, the parameters will be set to the specified default values. Replace the default values in the code below with your default parameter values.
|
|
||||||
|
|
||||||
``` {r configure_parameters, eval=FALSE}
|
|
||||||
subscription_id <- Sys.getenv("SUBSCRIPTION_ID", unset = "<my-subscription-id>")
|
|
||||||
resource_group <- Sys.getenv("RESOURCE_GROUP", default="<my-resource-group>")
|
|
||||||
workspace_name <- Sys.getenv("WORKSPACE_NAME", default="<my-workspace-name>")
|
|
||||||
workspace_region <- Sys.getenv("WORKSPACE_REGION", default="eastus2")
|
|
||||||
```
|
|
||||||
|
|
||||||
### Create a new workspace
|
|
||||||
If you don't have an existing workspace and are the owner of the subscription or resource group, you can create a new workspace. If you don't have a resource group, `create_workspace()` will create one for you using the name you provide. If you don't want it to do so, set the `create_resource_group = FALSE` parameter.
|
|
||||||
|
|
||||||
Note: As with other Azure services, there are limits on certain resources (e.g. AmlCompute quota) associated with the Azure ML service. Please read this [article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota.
|
|
||||||
|
|
||||||
This cell will create an Azure ML workspace for you in a subscription, provided you have the correct permissions.
|
|
||||||
|
|
||||||
This will fail if:
|
|
||||||
|
|
||||||
* You do not have permission to create a workspace in the resource group.
|
|
||||||
* You do not have permission to create a resource group if it does not exist.
|
|
||||||
* You are not a subscription owner or contributor and no Azure ML workspaces have ever been created in this subscription.
|
|
||||||
|
|
||||||
If workspace creation fails, please work with your IT admin to provide you with the appropriate permissions or to provision the required resources.
|
|
||||||
|
|
||||||
There are additional parameters that are not shown below that can be configured when creating a workspace. Please see [`create_workspace()`](https://azure.github.io/azureml-sdk-for-r/reference/create_workspace.html) for more details.
|
|
||||||
|
|
||||||
``` {r create_workspace, eval=FALSE}
|
|
||||||
library(azuremlsdk)
|
|
||||||
|
|
||||||
ws <- create_workspace(name = workspace_name,
|
|
||||||
subscription_id = subscription_id,
|
|
||||||
resource_group = resource_group,
|
|
||||||
location = workspace_region,
|
|
||||||
exist_ok = TRUE)
|
|
||||||
```
|
|
||||||
|
|
||||||
You can out write out the workspace ARM properties to a config file with [`write_workspace_config()`](https://azure.github.io/azureml-sdk-for-r/reference/write_workspace_config.html). The method provides a simple way of reusing the same workspace across multiple files or projects. Users can save the workspace details with `write_workspace_config()`, and use [`load_workspace_from_config()`](https://azure.github.io/azureml-sdk-for-r/reference/load_workspace_from_config.html) to load the same workspace in different files or projects without retyping the workspace ARM properties. The method defaults to writing out the config file to the current working directory with "config.json" as the file name. To specify a different path or file name, set the `path` and `file_name` parameters.
|
|
||||||
|
|
||||||
``` {r write_config, eval=FALSE}
|
|
||||||
write_workspace_config(ws)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Access an existing workspace
|
|
||||||
You can access an existing workspace in a couple of ways. If your workspace properties were previously saved to a config file, you can load the workspace as follows:
|
|
||||||
|
|
||||||
``` {r load_config, eval=FALSE}
|
|
||||||
ws <- load_workspace_from_config()
|
|
||||||
```
|
|
||||||
|
|
||||||
If Azure ML cannot find the config file, specify the path to the config file with the `path` parameter. The method defaults to starting the search in the current directory.
|
|
||||||
|
|
||||||
You can also initialize a workspace using the [`get_workspace()`](https://azure.github.io/azureml-sdk-for-r/reference/get_workspace.html) method.
|
|
||||||
|
|
||||||
``` {r get_workspace, eval=FALSE}
|
|
||||||
ws <- get_workspace(name = workspace_name,
|
|
||||||
subscription_id = subscription_id,
|
|
||||||
resource_group = resource_group)
|
|
||||||
```
|
|
||||||
@@ -1,188 +0,0 @@
|
|||||||
---
|
|
||||||
title: "Deploy a web service to Azure Kubernetes Service"
|
|
||||||
date: "`r Sys.Date()`"
|
|
||||||
output: rmarkdown::html_vignette
|
|
||||||
vignette: >
|
|
||||||
%\VignetteIndexEntry{Deploy a web service to Azure Kubernetes Service}
|
|
||||||
%\VignetteEngine{knitr::rmarkdown}
|
|
||||||
\use_package{UTF-8}
|
|
||||||
---
|
|
||||||
|
|
||||||
This tutorial demonstrates how to deploy a model as a web service on [Azure Kubernetes Service](https://azure.microsoft.com/en-us/services/kubernetes-service/) (AKS). AKS is good for high-scale production deployments; use it if you need one or more of the following capabilities:
|
|
||||||
|
|
||||||
* Fast response time
|
|
||||||
* Autoscaling of the deployed service
|
|
||||||
* Hardware acceleration options such as GPU
|
|
||||||
|
|
||||||
You will learn to:
|
|
||||||
|
|
||||||
* Set up your testing environment
|
|
||||||
* Register a model
|
|
||||||
* Provision an AKS cluster
|
|
||||||
* Deploy the model to AKS
|
|
||||||
* Test the deployed service
|
|
||||||
|
|
||||||
## Prerequisites
|
|
||||||
If you don’t have access to an Azure ML workspace, follow the [setup tutorial](https://azure.github.io/azureml-sdk-for-r/articles/configuration.html) to configure and create a workspace.
|
|
||||||
|
|
||||||
## Set up your testing environment
|
|
||||||
Start by setting up your environment. This includes importing the **azuremlsdk** package and connecting to your workspace.
|
|
||||||
|
|
||||||
### Import package
|
|
||||||
```{r import_package, eval=FALSE}
|
|
||||||
library(azuremlsdk)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Load your workspace
|
|
||||||
Instantiate a workspace object from your existing workspace. The following code will load the workspace details from a **config.json** file if you previously wrote one out with `write_workspace_config()`.
|
|
||||||
```{r load_workspace, eval=FALSE}
|
|
||||||
ws <- load_workspace_from_config()
|
|
||||||
```
|
|
||||||
|
|
||||||
Or, you can retrieve a workspace by directly specifying your workspace details:
|
|
||||||
```{r get_workspace, eval=FALSE}
|
|
||||||
ws <- get_workspace("<your workspace name>", "<your subscription ID>", "<your resource group>")
|
|
||||||
```
|
|
||||||
|
|
||||||
## Register the model
|
|
||||||
In this tutorial we will deploy a model that was trained in one of the [samples](https://github.com/Azure/azureml-sdk-for-r/blob/master/samples/training/train-on-amlcompute/train-on-amlcompute.R). The model was trained with the Iris dataset and can be used to determine if a flower is one of three Iris flower species (setosa, versicolor, virginica). We have provided the model file (`model.rds`) for the tutorial; it is located in the "project_files" directory of this vignette.
|
|
||||||
|
|
||||||
First, register the model to your workspace with [`register_model()`](https://azure.github.io/azureml-sdk-for-r/reference/register_model.html). A registered model can be any collection of files, but in this case the R model file is sufficient. Azure ML will use the registered model for deployment.
|
|
||||||
|
|
||||||
```{r register_model, eval=FALSE}
|
|
||||||
model <- register_model(ws,
|
|
||||||
model_path = "project_files/model.rds",
|
|
||||||
model_name = "iris_model",
|
|
||||||
description = "Predict an Iris flower type")
|
|
||||||
```
|
|
||||||
|
|
||||||
## Provision an AKS cluster
|
|
||||||
When deploying a web service to AKS, you deploy to an AKS cluster that is connected to your workspace. There are two ways to connect an AKS cluster to your workspace:
|
|
||||||
|
|
||||||
* Create the AKS cluster. The process automatically connects the cluster to the workspace.
|
|
||||||
* Attach an existing AKS cluster to your workspace. You can attach a cluster with the [`attach_aks_compute()`](https://azure.github.io/azureml-sdk-for-r/reference/attach_aks_compute.html) method.
|
|
||||||
|
|
||||||
Creating or attaching an AKS cluster is a one-time process for your workspace. You can reuse this cluster for multiple deployments. If you delete the cluster or the resource group that contains it, you must create a new cluster the next time you need to deploy.
|
|
||||||
|
|
||||||
In this tutorial, we will go with the first method of provisioning a new cluster. See the [`create_aks_compute()`](https://azure.github.io/azureml-sdk-for-r/reference/create_aks_compute.html) reference for the full set of configurable parameters. If you pick custom values for the `agent_count` and `vm_size` parameters, you need to make sure `agent_count` multiplied by `vm_size` is greater than or equal to `12` virtual CPUs.
|
|
||||||
|
|
||||||
``` {r provision_cluster, eval=FALSE}
|
|
||||||
aks_target <- create_aks_compute(ws, cluster_name = 'myakscluster')
|
|
||||||
|
|
||||||
wait_for_provisioning_completion(aks_target, show_output = TRUE)
|
|
||||||
```
|
|
||||||
|
|
||||||
The Azure ML SDK does not provide support for scaling an AKS cluster. To scale the nodes in the cluster, use the UI for your AKS cluster in the Azure portal. You can only change the node count, not the VM size of the cluster.
|
|
||||||
|
|
||||||
## Deploy as a web service
|
|
||||||
### Define the inference dependencies
|
|
||||||
To deploy a model, you need an **inference configuration**, which describes the environment needed to host the model and web service. To create an inference config, you will first need a scoring script and an Azure ML environment.
|
|
||||||
|
|
||||||
The scoring script (`entry_script`) is an R script that will take as input variable values (in JSON format) and output a prediction from your model. For this tutorial, use the provided scoring file `score.R`. The scoring script must contain an `init()` method that loads your model and returns a function that uses the model to make a prediction based on the input data. See the [documentation](https://azure.github.io/azureml-sdk-for-r/reference/inference_config.html#details) for more details.
|
|
||||||
|
|
||||||
Next, define an Azure ML **environment** for your script’s package dependencies. With an environment, you specify R packages (from CRAN or elsewhere) that are needed for your script to run. You can also provide the values of environment variables that your script can reference to modify its behavior.
|
|
||||||
|
|
||||||
By default Azure ML will build a default Docker image that includes R, the Azure ML SDK, and additional required dependencies for deployment. See the documentation here for the full list of dependencies that will be installed in the default container. You can also specify additional packages to be installed at runtime, or even a custom Docker image to be used instead of the base image that will be built, using the other available parameters to [`r_environment()`](https://azure.github.io/azureml-sdk-for-r/reference/r_environment.html).
|
|
||||||
|
|
||||||
```{r create_env, eval=FALSE}
|
|
||||||
r_env <- r_environment(name = "deploy_env")
|
|
||||||
```
|
|
||||||
|
|
||||||
Now you have everything you need to create an inference config for encapsulating your scoring script and environment dependencies.
|
|
||||||
|
|
||||||
``` {r create_inference_config, eval=FALSE}
|
|
||||||
inference_config <- inference_config(
|
|
||||||
entry_script = "score.R",
|
|
||||||
source_directory = "project_files",
|
|
||||||
environment = r_env)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Deploy to AKS
|
|
||||||
Now, define the deployment configuration that describes the compute resources needed, for example, the number of cores and memory. See the [`aks_webservice_deployment_config()`](https://azure.github.io/azureml-sdk-for-r/reference/aks_webservice_deployment_config.html) for the full set of configurable parameters.
|
|
||||||
|
|
||||||
``` {r deploy_config, eval=FALSE}
|
|
||||||
aks_config <- aks_webservice_deployment_config(cpu_cores = 1, memory_gb = 1)
|
|
||||||
```
|
|
||||||
|
|
||||||
Now, deploy your model as a web service to the AKS cluster you created earlier.
|
|
||||||
|
|
||||||
```{r deploy_service, eval=FALSE}
|
|
||||||
aks_service <- deploy_model(ws,
|
|
||||||
'my-new-aksservice',
|
|
||||||
models = list(model),
|
|
||||||
inference_config = inference_config,
|
|
||||||
deployment_config = aks_config,
|
|
||||||
deployment_target = aks_target)
|
|
||||||
|
|
||||||
wait_for_deployment(aks_service, show_output = TRUE)
|
|
||||||
```
|
|
||||||
|
|
||||||
To inspect the logs from the deployment:
|
|
||||||
```{r get_logs, eval=FALSE}
|
|
||||||
get_webservice_logs(aks_service)
|
|
||||||
```
|
|
||||||
|
|
||||||
If you encounter any issue in deploying the web service, please visit the [troubleshooting guide](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-troubleshoot-deployment).
|
|
||||||
|
|
||||||
## Test the deployed service
|
|
||||||
Now that your model is deployed as a service, you can test the service from R using [`invoke_webservice()`](https://azure.github.io/azureml-sdk-for-r/reference/invoke_webservice.html). Provide a new set of data to predict from, convert it to JSON, and send it to the service.
|
|
||||||
|
|
||||||
``` {r test_service, eval=FALSE}
|
|
||||||
library(jsonlite)
|
|
||||||
# versicolor
|
|
||||||
plant <- data.frame(Sepal.Length = 6.4,
|
|
||||||
Sepal.Width = 2.8,
|
|
||||||
Petal.Length = 4.6,
|
|
||||||
Petal.Width = 1.8)
|
|
||||||
|
|
||||||
# setosa
|
|
||||||
# plant <- data.frame(Sepal.Length = 5.1,
|
|
||||||
# Sepal.Width = 3.5,
|
|
||||||
# Petal.Length = 1.4,
|
|
||||||
# Petal.Width = 0.2)
|
|
||||||
|
|
||||||
# virginica
|
|
||||||
# plant <- data.frame(Sepal.Length = 6.7,
|
|
||||||
# Sepal.Width = 3.3,
|
|
||||||
# Petal.Length = 5.2,
|
|
||||||
# Petal.Width = 2.3)
|
|
||||||
|
|
||||||
predicted_val <- invoke_webservice(aks_service, toJSON(plant))
|
|
||||||
message(predicted_val)
|
|
||||||
```
|
|
||||||
|
|
||||||
You can also get the web service’s HTTP endpoint, which accepts REST client calls. You can share this endpoint with anyone who wants to test the web service or integrate it into an application.
|
|
||||||
|
|
||||||
``` {r eval=FALSE}
|
|
||||||
aks_service$scoring_uri
|
|
||||||
```
|
|
||||||
|
|
||||||
## Web service authentication
|
|
||||||
When deploying to AKS, key-based authentication is enabled by default. You can also enable token-based authentication. Token-based authentication requires clients to use an Azure Active Directory account to request an authentication token, which is used to make requests to the deployed service.
|
|
||||||
|
|
||||||
To disable key-based auth, set the `auth_enabled = FALSE` parameter when creating the deployment configuration with [`aks_webservice_deployment_config()`](https://azure.github.io/azureml-sdk-for-r/reference/aks_webservice_deployment_config.html).
|
|
||||||
To enable token-based auth, set `token_auth_enabled = TRUE` when creating the deployment config.
|
|
||||||
|
|
||||||
### Key-based authentication
|
|
||||||
If key authentication is enabled, you can use the [`get_webservice_keys()`](https://azure.github.io/azureml-sdk-for-r/reference/get_webservice_keys.html) method to retrieve a primary and secondary authentication key. To generate a new key, use [`generate_new_webservice_key()`](https://azure.github.io/azureml-sdk-for-r/reference/generate_new_webservice_key.html).
|
|
||||||
|
|
||||||
### Token-based authentication
|
|
||||||
If token authentication is enabled, you can use the [`get_webservice_token()`](https://azure.github.io/azureml-sdk-for-r/reference/get_webservice_token.html) method to retrieve a JWT token and that token's expiration time. Make sure to request a new token after the token's expiration time.
|
|
||||||
|
|
||||||
## Clean up resources
|
|
||||||
Delete the resources once you no longer need them. Do not delete any resource you plan on still using.
|
|
||||||
|
|
||||||
Delete the web service:
|
|
||||||
```{r delete_service, eval=FALSE}
|
|
||||||
delete_webservice(aks_service)
|
|
||||||
```
|
|
||||||
|
|
||||||
Delete the registered model:
|
|
||||||
```{r delete_model, eval=FALSE}
|
|
||||||
delete_model(model)
|
|
||||||
```
|
|
||||||
|
|
||||||
Delete the AKS cluster:
|
|
||||||
```{r delete_cluster, eval=FALSE}
|
|
||||||
delete_compute(aks_target)
|
|
||||||
```
|
|
||||||
Binary file not shown.
@@ -1,17 +0,0 @@
|
|||||||
#' Copyright(c) Microsoft Corporation.
|
|
||||||
#' Licensed under the MIT license.
|
|
||||||
|
|
||||||
library(jsonlite)
|
|
||||||
|
|
||||||
init <- function() {
|
|
||||||
model_path <- Sys.getenv("AZUREML_MODEL_DIR")
|
|
||||||
model <- readRDS(file.path(model_path, "model.rds"))
|
|
||||||
message("model is loaded")
|
|
||||||
|
|
||||||
function(data) {
|
|
||||||
plant <- as.data.frame(fromJSON(data))
|
|
||||||
prediction <- predict(model, plant)
|
|
||||||
result <- as.character(prediction)
|
|
||||||
toJSON(result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,242 +0,0 @@
|
|||||||
---
|
|
||||||
title: "Hyperparameter tune a Keras model"
|
|
||||||
date: "`r Sys.Date()`"
|
|
||||||
output: rmarkdown::html_vignette
|
|
||||||
vignette: >
|
|
||||||
%\VignetteIndexEntry{Hyperparameter tune a Keras model}
|
|
||||||
%\VignetteEngine{knitr::rmarkdown}
|
|
||||||
\use_package{UTF-8}
|
|
||||||
---
|
|
||||||
|
|
||||||
This tutorial demonstrates how you can efficiently tune hyperparameters for a model using HyperDrive, Azure ML's hyperparameter tuning functionality. You will train a Keras model on the CIFAR10 dataset, automate hyperparameter exploration, launch parallel jobs, log your results, and find the best run.
|
|
||||||
|
|
||||||
### What are hyperparameters?
|
|
||||||
|
|
||||||
Hyperparameters are variable parameters chosen to train a model. Learning rate, number of epochs, and batch size are all examples of hyperparameters.
|
|
||||||
|
|
||||||
Using brute-force methods to find the optimal values for parameters can be time-consuming, and poor-performing runs can result in wasted money. To avoid this, HyperDrive automates hyperparameter exploration in a time-saving and cost-effective manner by launching several parallel runs with different configurations and finding the configuration that results in best performance on your primary metric.
|
|
||||||
|
|
||||||
Let's get started with the example to see how it works!
|
|
||||||
|
|
||||||
## Prerequisites
|
|
||||||
|
|
||||||
If you don’t have access to an Azure ML workspace, follow the [setup tutorial](https://azure.github.io/azureml-sdk-for-r/articles/configuration.html) to configure and create a workspace.
|
|
||||||
|
|
||||||
## Set up development environment
|
|
||||||
The setup for your development work in this tutorial includes the following actions:
|
|
||||||
|
|
||||||
* Import required packages
|
|
||||||
* Connect to a workspace
|
|
||||||
* Create an experiment to track your runs
|
|
||||||
* Create a remote compute target to use for training
|
|
||||||
|
|
||||||
### Import **azuremlsdk** package
|
|
||||||
```{r eval=FALSE}
|
|
||||||
library(azuremlsdk)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Load your workspace
|
|
||||||
Instantiate a workspace object from your existing workspace. The following code will load the workspace details from a **config.json** file if you previously wrote one out with [`write_workspace_config()`](https://azure.github.io/azureml-sdk-for-r/reference/write_workspace_config.html).
|
|
||||||
```{r load_workpace, eval=FALSE}
|
|
||||||
ws <- load_workspace_from_config()
|
|
||||||
```
|
|
||||||
|
|
||||||
Or, you can retrieve a workspace by directly specifying your workspace details:
|
|
||||||
```{r get_workpace, eval=FALSE}
|
|
||||||
ws <- get_workspace("<your workspace name>", "<your subscription ID>", "<your resource group>")
|
|
||||||
```
|
|
||||||
|
|
||||||
### Create an experiment
|
|
||||||
An Azure ML **experiment** tracks a grouping of runs, typically from the same training script. Create an experiment to track hyperparameter tuning runs for the Keras model.
|
|
||||||
|
|
||||||
```{r create_experiment, eval=FALSE}
|
|
||||||
exp <- experiment(workspace = ws, name = 'hyperdrive-cifar10')
|
|
||||||
```
|
|
||||||
|
|
||||||
If you would like to track your runs in an existing experiment, simply specify that experiment's name to the `name` parameter of `experiment()`.
|
|
||||||
|
|
||||||
### Create a compute target
|
|
||||||
By using Azure Machine Learning Compute (AmlCompute), a managed service, data scientists can train machine learning models on clusters of Azure virtual machines. In this tutorial, you create a GPU-enabled cluster as your training environment. The code below creates the compute cluster for you if it doesn't already exist in your workspace.
|
|
||||||
|
|
||||||
You may need to wait a few minutes for your compute cluster to be provisioned if it doesn't already exist.
|
|
||||||
|
|
||||||
```{r create_cluster, eval=FALSE}
|
|
||||||
cluster_name <- "gpucluster"
|
|
||||||
|
|
||||||
compute_target <- get_compute(ws, cluster_name = cluster_name)
|
|
||||||
if (is.null(compute_target))
|
|
||||||
{
|
|
||||||
vm_size <- "STANDARD_NC6"
|
|
||||||
compute_target <- create_aml_compute(workspace = ws,
|
|
||||||
cluster_name = cluster_name,
|
|
||||||
vm_size = vm_size,
|
|
||||||
max_nodes = 4)
|
|
||||||
|
|
||||||
wait_for_provisioning_completion(compute_target, show_output = TRUE)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Prepare the training script
|
|
||||||
A training script called `cifar10_cnn.R` has been provided for you in the "project_files" directory of this tutorial.
|
|
||||||
|
|
||||||
In order to leverage HyperDrive, the training script for your model must log the relevant metrics during model training. When you configure the hyperparameter tuning run, you specify the primary metric to use for evaluating run performance. You must log this metric so it is available to the hyperparameter tuning process.
|
|
||||||
|
|
||||||
In order to log the required metrics, you need to do the following **inside the training script**:
|
|
||||||
|
|
||||||
* Import the **azuremlsdk** package
|
|
||||||
```
|
|
||||||
library(azuremlsdk)
|
|
||||||
```
|
|
||||||
|
|
||||||
* Take the hyperparameters as command-line arguments to the script. This is necessary so that when HyperDrive carries out the hyperparameter sweep, it can run the training script with different values to the hyperparameters as defined by the search space.
|
|
||||||
|
|
||||||
* Use the [`log_metric_to_run()`](https://azure.github.io/azureml-sdk-for-r/reference/log_metric_to_run.html) function to log the hyperparameters and the primary metric.
|
|
||||||
```
|
|
||||||
log_metric_to_run("batch_size", batch_size)
|
|
||||||
...
|
|
||||||
log_metric_to_run("epochs", epochs)
|
|
||||||
...
|
|
||||||
log_metric_to_run("lr", lr)
|
|
||||||
...
|
|
||||||
log_metric_to_run("decay", decay)
|
|
||||||
...
|
|
||||||
log_metric_to_run("Loss", results[[1]])
|
|
||||||
```
|
|
||||||
|
|
||||||
## Create an estimator
|
|
||||||
|
|
||||||
An Azure ML **estimator** encapsulates the run configuration information needed for executing a training script on the compute target. Azure ML runs are run as containerized jobs on the specified compute target. By default, the Docker image built for your training job will include R, the Azure ML SDK, and a set of commonly used R packages. See the full list of default packages included [here](https://azure.github.io/azureml-sdk-for-r/reference/r_environment.html). The estimator is used to define the configuration for each of the child runs that the parent HyperDrive run will kick off.
|
|
||||||
|
|
||||||
To create the estimator, define the following:
|
|
||||||
|
|
||||||
* The directory that contains your scripts needed for training (`source_directory`). All the files in this directory are uploaded to the cluster node(s) for execution. The directory must contain your training script and any additional scripts required.
|
|
||||||
* The training script that will be executed (`entry_script`).
|
|
||||||
* The compute target (`compute_target`), in this case the AmlCompute cluster you created earlier.
|
|
||||||
* Any environment dependencies required for training. Since the training script requires the Keras package, which is not included in the image by default, pass the package name to the `cran_packages` parameter to have it installed in the Docker container where the job will run. See the [`estimator()`](https://azure.github.io/azureml-sdk-for-r/reference/estimator.html) reference for the full set of configurable options.
|
|
||||||
* Set the `use_gpu = TRUE` flag so the default base GPU Docker image will be built, since the job will be run on a GPU cluster.
|
|
||||||
|
|
||||||
```{r create_estimator, eval=FALSE}
|
|
||||||
est <- estimator(source_directory = "project_files",
|
|
||||||
entry_script = "cifar10_cnn.R",
|
|
||||||
compute_target = compute_target,
|
|
||||||
cran_packages = c("keras"),
|
|
||||||
use_gpu = TRUE)
|
|
||||||
```
|
|
||||||
|
|
||||||
## Configure the HyperDrive run
|
|
||||||
To kick off hyperparameter tuning in Azure ML, you will need to configure a HyperDrive run, which will in turn launch individual children runs of the training scripts with the corresponding hyperparameter values.
|
|
||||||
|
|
||||||
### Define search space
|
|
||||||
|
|
||||||
In this experiment, we will use four hyperparameters: batch size, number of epochs, learning rate, and decay. In order to begin tuning, we must define the range of values we would like to explore from and how they will be distributed. This is called a parameter space definition and can be created with discrete or continuous ranges.
|
|
||||||
|
|
||||||
__Discrete hyperparameters__ are specified as a choice among discrete values represented as a list.
|
|
||||||
|
|
||||||
Advanced discrete hyperparameters can also be specified using a distribution. The following distributions are supported:
|
|
||||||
|
|
||||||
* `quniform(low, high, q)`
|
|
||||||
* `qloguniform(low, high, q)`
|
|
||||||
* `qnormal(mu, sigma, q)`
|
|
||||||
* `qlognormal(mu, sigma, q)`
|
|
||||||
|
|
||||||
__Continuous hyperparameters__ are specified as a distribution over a continuous range of values. The following distributions are supported:
|
|
||||||
|
|
||||||
* `uniform(low, high)`
|
|
||||||
* `loguniform(low, high)`
|
|
||||||
* `normal(mu, sigma)`
|
|
||||||
* `lognormal(mu, sigma)`
|
|
||||||
|
|
||||||
Here, we will use the [`random_parameter_sampling()`](https://azure.github.io/azureml-sdk-for-r/reference/random_parameter_sampling.html) function to define the search space for each hyperparameter. `batch_size` and `epochs` will be chosen from discrete sets while `lr` and `decay` will be drawn from continuous distributions.
|
|
||||||
|
|
||||||
Other available sampling function options are:
|
|
||||||
|
|
||||||
* [`grid_parameter_sampling()`](https://azure.github.io/azureml-sdk-for-r/reference/grid_parameter_sampling.html)
|
|
||||||
* [`bayesian_parameter_sampling()`](https://azure.github.io/azureml-sdk-for-r/reference/bayesian_parameter_sampling.html)
|
|
||||||
|
|
||||||
```{r search_space, eval=FALSE}
|
|
||||||
sampling <- random_parameter_sampling(list(batch_size = choice(c(16, 32, 64)),
|
|
||||||
epochs = choice(c(200, 350, 500)),
|
|
||||||
lr = normal(0.0001, 0.005),
|
|
||||||
decay = uniform(1e-6, 3e-6)))
|
|
||||||
```
|
|
||||||
|
|
||||||
### Define termination policy
|
|
||||||
|
|
||||||
To prevent resource waste, Azure ML can detect and terminate poorly performing runs. HyperDrive will do this automatically if you specify an early termination policy.
|
|
||||||
|
|
||||||
Here, you will use the [`bandit_policy()`](https://azure.github.io/azureml-sdk-for-r/reference/bandit_policy.html), which terminates any runs where the primary metric is not within the specified slack factor with respect to the best performing training run.
|
|
||||||
|
|
||||||
```{r termination_policy, eval=FALSE}
|
|
||||||
policy <- bandit_policy(slack_factor = 0.15)
|
|
||||||
```
|
|
||||||
|
|
||||||
Other termination policy options are:
|
|
||||||
|
|
||||||
* [`median_stopping_policy()`](https://azure.github.io/azureml-sdk-for-r/reference/median_stopping_policy.html)
|
|
||||||
* [`truncation_selection_policy()`](https://azure.github.io/azureml-sdk-for-r/reference/truncation_selection_policy.html)
|
|
||||||
|
|
||||||
If no policy is provided, all runs will continue to completion regardless of performance.
|
|
||||||
|
|
||||||
### Finalize configuration
|
|
||||||
|
|
||||||
Now, you can create a `HyperDriveConfig` object to define your HyperDrive run. Along with the sampling and policy definitions, you need to specify the name of the primary metric that you want to track and whether we want to maximize it or minimize it. The `primary_metric_name` must correspond with the name of the primary metric you logged in your training script. `max_total_runs` specifies the total number of child runs to launch. See the [hyperdrive_config()](https://azure.github.io/azureml-sdk-for-r/reference/hyperdrive_config.html) reference for the full set of configurable parameters.
|
|
||||||
|
|
||||||
```{r create_config, eval=FALSE}
|
|
||||||
hyperdrive_config <- hyperdrive_config(hyperparameter_sampling = sampling,
|
|
||||||
primary_metric_goal("MINIMIZE"),
|
|
||||||
primary_metric_name = "Loss",
|
|
||||||
max_total_runs = 4,
|
|
||||||
policy = policy,
|
|
||||||
estimator = est)
|
|
||||||
```
|
|
||||||
|
|
||||||
## Submit the HyperDrive run
|
|
||||||
|
|
||||||
Finally submit the experiment to run on your cluster. The parent HyperDrive run will launch the individual child runs. `submit_experiment()` will return a `HyperDriveRun` object that you will use to interface with the run. In this tutorial, since the cluster we created scales to a max of `4` nodes, all 4 child runs will be launched in parallel.
|
|
||||||
|
|
||||||
```{r submit_run, eval=FALSE}
|
|
||||||
hyperdrive_run <- submit_experiment(exp, hyperdrive_config)
|
|
||||||
```
|
|
||||||
|
|
||||||
You can view the HyperDrive run’s details as a table. Clicking the “Web View” link provided will bring you to Azure Machine Learning studio, where you can monitor the run in the UI.
|
|
||||||
|
|
||||||
```{r eval=FALSE}
|
|
||||||
view_run_details(hyperdrive_run)
|
|
||||||
```
|
|
||||||
|
|
||||||
Wait until hyperparameter tuning is complete before you run more code.
|
|
||||||
|
|
||||||
```{r eval=FALSE}
|
|
||||||
wait_for_run_completion(hyperdrive_run, show_output = TRUE)
|
|
||||||
```
|
|
||||||
|
|
||||||
## Analyse runs by performance
|
|
||||||
|
|
||||||
Finally, you can view and compare the metrics collected during all of the child runs!
|
|
||||||
|
|
||||||
```{r analyse_runs, eval=FALSE}
|
|
||||||
# Get the metrics of all the child runs
|
|
||||||
child_run_metrics <- get_child_run_metrics(hyperdrive_run)
|
|
||||||
child_run_metrics
|
|
||||||
|
|
||||||
# Get the child run objects sorted in descending order by the best primary metric
|
|
||||||
child_runs <- get_child_runs_sorted_by_primary_metric(hyperdrive_run)
|
|
||||||
child_runs
|
|
||||||
|
|
||||||
# Directly get the run object of the best performing run
|
|
||||||
best_run <- get_best_run_by_primary_metric(hyperdrive_run)
|
|
||||||
|
|
||||||
# Get the metrics of the best performing run
|
|
||||||
metrics <- get_run_metrics(best_run)
|
|
||||||
metrics
|
|
||||||
```
|
|
||||||
|
|
||||||
The `metrics` variable will include the values of the hyperparameters that resulted in the best performing run.
|
|
||||||
|
|
||||||
## Clean up resources
|
|
||||||
Delete the resources once you no longer need them. Don't delete any resource you plan to still use.
|
|
||||||
|
|
||||||
Delete the compute cluster:
|
|
||||||
```{r delete_compute, eval=FALSE}
|
|
||||||
delete_compute(compute_target)
|
|
||||||
```
|
|
||||||
@@ -1,124 +0,0 @@
|
|||||||
#' Modified from: "https://github.com/rstudio/keras/blob/master/vignettes/
|
|
||||||
#' examples/cifar10_cnn.R"
|
|
||||||
#'
|
|
||||||
#' Train a simple deep CNN on the CIFAR10 small images dataset.
|
|
||||||
#'
|
|
||||||
#' It gets down to 0.65 test logloss in 25 epochs, and down to 0.55 after 50
|
|
||||||
#' epochs, though it is still underfitting at that point.
|
|
||||||
|
|
||||||
library(keras)
|
|
||||||
install_keras()
|
|
||||||
|
|
||||||
library(azuremlsdk)
|
|
||||||
|
|
||||||
# Parameters --------------------------------------------------------------
|
|
||||||
|
|
||||||
args <- commandArgs(trailingOnly = TRUE)
|
|
||||||
|
|
||||||
batch_size <- as.numeric(args[2])
|
|
||||||
log_metric_to_run("batch_size", batch_size)
|
|
||||||
|
|
||||||
epochs <- as.numeric(args[4])
|
|
||||||
log_metric_to_run("epochs", epochs)
|
|
||||||
|
|
||||||
lr <- as.numeric(args[6])
|
|
||||||
log_metric_to_run("lr", lr)
|
|
||||||
|
|
||||||
decay <- as.numeric(args[8])
|
|
||||||
log_metric_to_run("decay", decay)
|
|
||||||
|
|
||||||
data_augmentation <- TRUE
|
|
||||||
|
|
||||||
|
|
||||||
# Data Preparation --------------------------------------------------------
|
|
||||||
|
|
||||||
# See ?dataset_cifar10 for more info
|
|
||||||
cifar10 <- dataset_cifar10()
|
|
||||||
|
|
||||||
# Feature scale RGB values in test and train inputs
|
|
||||||
x_train <- cifar10$train$x / 255
|
|
||||||
x_test <- cifar10$test$x / 255
|
|
||||||
y_train <- to_categorical(cifar10$train$y, num_classes = 10)
|
|
||||||
y_test <- to_categorical(cifar10$test$y, num_classes = 10)
|
|
||||||
|
|
||||||
|
|
||||||
# Defining Model ----------------------------------------------------------
|
|
||||||
|
|
||||||
# Initialize sequential model
|
|
||||||
model <- keras_model_sequential()
|
|
||||||
|
|
||||||
model %>%
|
|
||||||
|
|
||||||
# Start with hidden 2D convolutional layer being fed 32x32 pixel images
|
|
||||||
layer_conv_2d(
|
|
||||||
filter = 32, kernel_size = c(3, 3), padding = "same",
|
|
||||||
input_shape = c(32, 32, 3)
|
|
||||||
) %>%
|
|
||||||
layer_activation("relu") %>%
|
|
||||||
|
|
||||||
# Second hidden layer
|
|
||||||
layer_conv_2d(filter = 32, kernel_size = c(3, 3)) %>%
|
|
||||||
layer_activation("relu") %>%
|
|
||||||
|
|
||||||
# Use max pooling
|
|
||||||
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
|
|
||||||
layer_dropout(0.25) %>%
|
|
||||||
|
|
||||||
# 2 additional hidden 2D convolutional layers
|
|
||||||
layer_conv_2d(filter = 32, kernel_size = c(3, 3), padding = "same") %>%
|
|
||||||
layer_activation("relu") %>%
|
|
||||||
layer_conv_2d(filter = 32, kernel_size = c(3, 3)) %>%
|
|
||||||
layer_activation("relu") %>%
|
|
||||||
|
|
||||||
# Use max pooling once more
|
|
||||||
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
|
|
||||||
layer_dropout(0.25) %>%
|
|
||||||
|
|
||||||
# Flatten max filtered output into feature vector
|
|
||||||
# and feed into dense layer
|
|
||||||
layer_flatten() %>%
|
|
||||||
layer_dense(512) %>%
|
|
||||||
layer_activation("relu") %>%
|
|
||||||
layer_dropout(0.5) %>%
|
|
||||||
|
|
||||||
# Outputs from dense layer are projected onto 10 unit output layer
|
|
||||||
layer_dense(10) %>%
|
|
||||||
layer_activation("softmax")
|
|
||||||
|
|
||||||
opt <- optimizer_rmsprop(lr, decay)
|
|
||||||
|
|
||||||
model %>%
|
|
||||||
compile(loss = "categorical_crossentropy",
|
|
||||||
optimizer = opt,
|
|
||||||
metrics = "accuracy"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Training ----------------------------------------------------------------
|
|
||||||
|
|
||||||
if (!data_augmentation) {
|
|
||||||
|
|
||||||
model %>%
|
|
||||||
fit(x_train,
|
|
||||||
y_train,
|
|
||||||
batch_size = batch_size,
|
|
||||||
epochs = epochs,
|
|
||||||
validation_data = list(x_test, y_test),
|
|
||||||
shuffle = TRUE
|
|
||||||
)
|
|
||||||
|
|
||||||
} else {
|
|
||||||
|
|
||||||
datagen <- image_data_generator(rotation_range = 20,
|
|
||||||
width_shift_range = 0.2,
|
|
||||||
height_shift_range = 0.2,
|
|
||||||
horizontal_flip = TRUE
|
|
||||||
)
|
|
||||||
|
|
||||||
datagen %>% fit_image_data_generator(x_train)
|
|
||||||
|
|
||||||
results <- evaluate(model, x_train, y_train, batch_size)
|
|
||||||
log_metric_to_run("Loss", results[[1]])
|
|
||||||
cat("Loss: ", results[[1]], "\n")
|
|
||||||
cat("Accuracy: ", results[[2]], "\n")
|
|
||||||
}
|
|
||||||
@@ -1,100 +0,0 @@
|
|||||||
---
|
|
||||||
title: "Install the Azure ML SDK for R"
|
|
||||||
date: "`r Sys.Date()`"
|
|
||||||
output: rmarkdown::html_vignette
|
|
||||||
vignette: >
|
|
||||||
%\VignetteIndexEntry{Install the Azure ML SDK for R}
|
|
||||||
%\VignetteEngine{knitr::rmarkdown}
|
|
||||||
\use_package{UTF-8}
|
|
||||||
---
|
|
||||||
|
|
||||||
This article covers the step-by-step instructions for installing the Azure ML SDK for R.
|
|
||||||
|
|
||||||
You do not need run this if you are working on an Azure Machine Learning Compute Instance, as the compute instance already has the Azure ML SDK preinstalled.
|
|
||||||
|
|
||||||
## Install Conda
|
|
||||||
|
|
||||||
If you do not have Conda already installed on your machine, you will first need to install it, since the Azure ML R SDK uses **reticulate** to bind to the Python SDK. We recommend installing [Miniconda](https://docs.conda.io/en/latest/miniconda.html), which is a smaller, lightweight version of Anaconda. Choose the 64-bit binary for Python 3.5 or later.
|
|
||||||
|
|
||||||
## Install the **azuremlsdk** R package
|
|
||||||
You will need **remotes** to install **azuremlsdk** from the GitHub repo.
|
|
||||||
``` {r install_remotes, eval=FALSE}
|
|
||||||
install.packages('remotes')
|
|
||||||
```
|
|
||||||
|
|
||||||
Then, you can use the `install_github` function to install the package.
|
|
||||||
``` {r install_azuremlsdk, eval=FALSE}
|
|
||||||
remotes::install_cran('azuremlsdk', repos = 'https://cloud.r-project.org/')
|
|
||||||
```
|
|
||||||
|
|
||||||
If you are using R installed from CRAN, which comes with 32-bit and 64-bit binaries, you may need to specify the parameter `INSTALL_opts=c("--no-multiarch")` to only build for the current 64-bit architecture.
|
|
||||||
``` {r eval=FALSE}
|
|
||||||
remotes::install_cran('azuremlsdk', repos = 'https://cloud.r-project.org/', INSTALL_opts=c("--no-multiarch"))
|
|
||||||
```
|
|
||||||
|
|
||||||
## Install the Azure ML Python SDK
|
|
||||||
Lastly, use the **azuremlsdk** R library to install the Python SDK. By default, `azuremlsdk::install_azureml()` will install the [latest version of the Python SDK](https://pypi.org/project/azureml-sdk/) in a conda environment called `r-azureml` if reticulate < 1.14 or `r-reticulate` if reticulate ≥ 1.14.
|
|
||||||
``` {r install_pythonsdk, eval=FALSE}
|
|
||||||
azuremlsdk::install_azureml()
|
|
||||||
```
|
|
||||||
|
|
||||||
If you would like to override the default version, environment name, or Python version, you can pass in those arguments. If you would like to restart the R session after installation or delete the conda environment if it already exists and create a new environment, you can also do so:
|
|
||||||
``` {r eval=FALSE}
|
|
||||||
azuremlsdk::install_azureml(version = NULL,
|
|
||||||
custom_envname = "<your conda environment name>",
|
|
||||||
conda_python_version = "<desired python version>",
|
|
||||||
restart_session = TRUE,
|
|
||||||
remove_existing_env = TRUE)
|
|
||||||
```
|
|
||||||
|
|
||||||
## Test installation
|
|
||||||
You can confirm your installation worked by loading the library and successfully retrieving a run.
|
|
||||||
``` {r test_installation, eval=FALSE}
|
|
||||||
library(azuremlsdk)
|
|
||||||
get_current_run()
|
|
||||||
```
|
|
||||||
|
|
||||||
## Troubleshooting
|
|
||||||
- In step 3 of the installation, if you get ssl errors on windows, it is due to an
|
|
||||||
outdated openssl binary. Install the latest openssl binaries from
|
|
||||||
[here](https://wiki.openssl.org/index.php/Binaries).
|
|
||||||
|
|
||||||
- If installation fails due to this error:
|
|
||||||
|
|
||||||
```R
|
|
||||||
Error in strptime(xx, f, tz = tz) :
|
|
||||||
(converted from warning) unable to identify current timezone 'C':
|
|
||||||
please set environment variable 'TZ'
|
|
||||||
In R CMD INSTALL
|
|
||||||
Error in i.p(...) :
|
|
||||||
(converted from warning) installation of package ‘C:/.../azureml_0.4.0.tar.gz’ had non-zero exit
|
|
||||||
status
|
|
||||||
```
|
|
||||||
|
|
||||||
You will need to set your time zone environment variable to GMT and restart the installation process.
|
|
||||||
|
|
||||||
```R
|
|
||||||
Sys.setenv(TZ='GMT')
|
|
||||||
```
|
|
||||||
|
|
||||||
- If the following permission error occurs while installing in RStudio,
|
|
||||||
change your RStudio session to administrator mode, and re-run the installation command.
|
|
||||||
|
|
||||||
```R
|
|
||||||
Downloading GitHub repo Azure/azureml-sdk-for-r@master
|
|
||||||
Skipping 2 packages ahead of CRAN: reticulate, rlang
|
|
||||||
Running `R CMD build`...
|
|
||||||
|
|
||||||
Error: (converted from warning) invalid package
|
|
||||||
'C:/.../file2b441bf23631'
|
|
||||||
In R CMD INSTALL
|
|
||||||
Error in i.p(...) :
|
|
||||||
(converted from warning) installation of package
|
|
||||||
‘C:/.../file2b441bf23631’ had non-zero exit status
|
|
||||||
In addition: Warning messages:
|
|
||||||
1: In file(con, "r") :
|
|
||||||
cannot open file 'C:...\file2b44144a540f': Permission denied
|
|
||||||
2: In file(con, "r") :
|
|
||||||
cannot open file 'C:...\file2b4463c21577': Permission denied
|
|
||||||
```
|
|
||||||
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
#' Copyright(c) Microsoft Corporation.
|
|
||||||
#' Licensed under the MIT license.
|
|
||||||
|
|
||||||
library(jsonlite)
|
|
||||||
|
|
||||||
init <- function() {
|
|
||||||
model_path <- Sys.getenv("AZUREML_MODEL_DIR")
|
|
||||||
model <- readRDS(file.path(model_path, "model.rds"))
|
|
||||||
message("logistic regression model loaded")
|
|
||||||
|
|
||||||
function(data) {
|
|
||||||
vars <- as.data.frame(fromJSON(data))
|
|
||||||
prediction <- as.numeric(predict(model, vars, type = "response") * 100)
|
|
||||||
toJSON(prediction)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
#' Copyright(c) Microsoft Corporation.
|
|
||||||
#' Licensed under the MIT license.
|
|
||||||
|
|
||||||
library(azuremlsdk)
|
|
||||||
library(optparse)
|
|
||||||
library(caret)
|
|
||||||
|
|
||||||
options <- list(
|
|
||||||
make_option(c("-d", "--data_folder"))
|
|
||||||
)
|
|
||||||
|
|
||||||
opt_parser <- OptionParser(option_list = options)
|
|
||||||
opt <- parse_args(opt_parser)
|
|
||||||
|
|
||||||
paste(opt$data_folder)
|
|
||||||
|
|
||||||
accidents <- readRDS(file.path(opt$data_folder, "accidents.Rd"))
|
|
||||||
summary(accidents)
|
|
||||||
|
|
||||||
mod <- glm(dead ~ dvcat + seatbelt + frontal + sex + ageOFocc + yearVeh + airbag + occRole, family = binomial, data = accidents)
|
|
||||||
summary(mod)
|
|
||||||
predictions <- factor(ifelse(predict(mod) > 0.1, "dead", "alive"))
|
|
||||||
conf_matrix <- confusionMatrix(predictions, accidents$dead)
|
|
||||||
message(conf_matrix)
|
|
||||||
|
|
||||||
log_metric_to_run("Accuracy", conf_matrix$overall["Accuracy"])
|
|
||||||
|
|
||||||
output_dir = "outputs"
|
|
||||||
if (!dir.exists(output_dir)) {
|
|
||||||
dir.create(output_dir)
|
|
||||||
}
|
|
||||||
saveRDS(mod, file = "./outputs/model.rds")
|
|
||||||
message("Model saved")
|
|
||||||
@@ -1,326 +0,0 @@
|
|||||||
---
|
|
||||||
title: "Train and deploy your first model with Azure ML"
|
|
||||||
author: "David Smith"
|
|
||||||
date: "`r Sys.Date()`"
|
|
||||||
output: rmarkdown::html_vignette
|
|
||||||
vignette: >
|
|
||||||
%\VignetteIndexEntry{Train and deploy your first model with Azure ML}
|
|
||||||
%\VignetteEngine{knitr::rmarkdown}
|
|
||||||
\use_package{UTF-8}
|
|
||||||
---
|
|
||||||
|
|
||||||
In this tutorial, you learn the foundational design patterns in Azure Machine Learning. You'll train and deploy a **caret** model to predict the likelihood of a fatality in an automobile accident. After completing this tutorial, you'll have the practical knowledge of the R SDK to scale up to developing more-complex experiments and workflows.
|
|
||||||
|
|
||||||
In this tutorial, you learn the following tasks:
|
|
||||||
|
|
||||||
* Connect your workspace
|
|
||||||
* Load data and prepare for training
|
|
||||||
* Upload data to the datastore so it is available for remote training
|
|
||||||
* Create a compute resource
|
|
||||||
* Train a caret model to predict probability of fatality
|
|
||||||
* Deploy a prediction endpoint
|
|
||||||
* Test the model from R
|
|
||||||
|
|
||||||
## Prerequisites
|
|
||||||
|
|
||||||
If you don't have access to an Azure ML workspace, follow the [setup tutorial](https://azure.github.io/azureml-sdk-for-r/articles/configuration.html) to configure and create a workspace.
|
|
||||||
|
|
||||||
## Set up your development environment
|
|
||||||
The setup for your development work in this tutorial includes the following actions:
|
|
||||||
|
|
||||||
* Install required packages
|
|
||||||
* Connect to a workspace, so that your local computer can communicate with remote resources
|
|
||||||
* Create an experiment to track your runs
|
|
||||||
* Create a remote compute target to use for training
|
|
||||||
|
|
||||||
### Install required packages
|
|
||||||
This tutorial assumes you already have the Azure ML SDK installed. Go ahead and import the **azuremlsdk** package.
|
|
||||||
|
|
||||||
```{r eval=FALSE}
|
|
||||||
library(azuremlsdk)
|
|
||||||
```
|
|
||||||
|
|
||||||
The tutorial uses data from the [**DAAG** package](https://cran.r-project.org/package=DAAG). Install the package if you don't have it.
|
|
||||||
|
|
||||||
```{r eval=FALSE}
|
|
||||||
install.packages("DAAG")
|
|
||||||
```
|
|
||||||
|
|
||||||
The training and scoring scripts (`accidents.R` and `accident_predict.R`) have some additional dependencies. If you plan on running those scripts locally, make sure you have those required packages as well.
|
|
||||||
|
|
||||||
### Load your workspace
|
|
||||||
Instantiate a workspace object from your existing workspace. The following code will load the workspace details from the **config.json** file. You can also retrieve a workspace using [`get_workspace()`](https://azure.github.io/azureml-sdk-for-r/reference/get_workspace.html).
|
|
||||||
|
|
||||||
```{r load_workpace, eval=FALSE}
|
|
||||||
ws <- load_workspace_from_config()
|
|
||||||
```
|
|
||||||
|
|
||||||
### Create an experiment
|
|
||||||
An Azure ML experiment tracks a grouping of runs, typically from the same training script. Create an experiment to track the runs for training the caret model on the accidents data.
|
|
||||||
|
|
||||||
```{r create_experiment, eval=FALSE}
|
|
||||||
experiment_name <- "accident-logreg"
|
|
||||||
exp <- experiment(ws, experiment_name)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Create a compute target
|
|
||||||
By using Azure Machine Learning Compute (AmlCompute), a managed service, data scientists can train machine learning models on clusters of Azure virtual machines. Examples include VMs with GPU support. In this tutorial, you create a single-node AmlCompute cluster as your training environment. The code below creates the compute cluster for you if it doesn't already exist in your workspace.
|
|
||||||
|
|
||||||
You may need to wait a few minutes for your compute cluster to be provisioned if it doesn't already exist.
|
|
||||||
|
|
||||||
```{r create_cluster, eval=FALSE}
|
|
||||||
cluster_name <- "rcluster"
|
|
||||||
compute_target <- get_compute(ws, cluster_name = cluster_name)
|
|
||||||
if (is.null(compute_target)) {
|
|
||||||
vm_size <- "STANDARD_D2_V2"
|
|
||||||
compute_target <- create_aml_compute(workspace = ws,
|
|
||||||
cluster_name = cluster_name,
|
|
||||||
vm_size = vm_size,
|
|
||||||
max_nodes = 1)
|
|
||||||
|
|
||||||
wait_for_provisioning_completion(compute_target, show_output = TRUE)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Prepare data for training
|
|
||||||
This tutorial uses data from the **DAAG** package. This dataset includes data from over 25,000 car crashes in the US, with variables you can use to predict the likelihood of a fatality. First, import the data into R and transform it into a new dataframe `accidents` for analysis, and export it to an `Rdata` file.
|
|
||||||
|
|
||||||
```{r load_data, eval=FALSE}
|
|
||||||
library(DAAG)
|
|
||||||
data(nassCDS)
|
|
||||||
|
|
||||||
accidents <- na.omit(nassCDS[,c("dead","dvcat","seatbelt","frontal","sex","ageOFocc","yearVeh","airbag","occRole")])
|
|
||||||
accidents$frontal <- factor(accidents$frontal, labels=c("notfrontal","frontal"))
|
|
||||||
accidents$occRole <- factor(accidents$occRole)
|
|
||||||
|
|
||||||
saveRDS(accidents, file="accidents.Rd")
|
|
||||||
```
|
|
||||||
|
|
||||||
### Upload data to the datastore
|
|
||||||
Upload data to the cloud so that it can be access by your remote training environment. Each Azure ML workspace comes with a default datastore that stores the connection information to the Azure blob container that is provisioned in the storage account attached to the workspace. The following code will upload the accidents data you created above to that datastore.
|
|
||||||
|
|
||||||
```{r upload_data, eval=FALSE}
|
|
||||||
ds <- get_default_datastore(ws)
|
|
||||||
|
|
||||||
target_path <- "accidentdata"
|
|
||||||
upload_files_to_datastore(ds,
|
|
||||||
list("./project_files/accidents.Rd"),
|
|
||||||
target_path = target_path,
|
|
||||||
overwrite = TRUE)
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
## Train a model
|
|
||||||
|
|
||||||
For this tutorial, fit a logistic regression model on your uploaded data using your remote compute cluster. To submit a job, you need to:
|
|
||||||
|
|
||||||
* Prepare the training script
|
|
||||||
* Create an estimator
|
|
||||||
* Submit the job
|
|
||||||
|
|
||||||
### Prepare the training script
|
|
||||||
A training script called `accidents.R` has been provided for you in the "project_files" directory of this tutorial. Notice the following details **inside the training script** that have been done to leverage the Azure ML service for training:
|
|
||||||
|
|
||||||
* The training script takes an argument `-d` to find the directory that contains the training data. When you define and submit your job later, you point to the datastore for this argument. Azure ML will mount the storage folder to the remote cluster for the training job.
|
|
||||||
* The training script logs the final accuracy as a metric to the run record in Azure ML using `log_metric_to_run()`. The Azure ML SDK provides a set of logging APIs for logging various metrics during training runs. These metrics are recorded and persisted in the experiment run record. The metrics can then be accessed at any time or viewed in the run details page in [Azure Machine Learning studio](http://ml.azure.com). See the [reference](https://azure.github.io/azureml-sdk-for-r/reference/index.html#section-training-experimentation) for the full set of logging methods `log_*()`.
|
|
||||||
* The training script saves your model into a directory named **outputs**. The `./outputs` folder receives special treatment by Azure ML. During training, files written to `./outputs` are automatically uploaded to your run record by Azure ML and persisted as artifacts. By saving the trained model to `./outputs`, you'll be able to access and retrieve your model file even after the run is over and you no longer have access to your remote training environment.
|
|
||||||
|
|
||||||
### Create an estimator
|
|
||||||
|
|
||||||
An Azure ML estimator encapsulates the run configuration information needed for executing a training script on the compute target. Azure ML runs are run as containerized jobs on the specified compute target. By default, the Docker image built for your training job will include R, the Azure ML SDK, and a set of commonly used R packages. See the full list of default packages included [here](https://azure.github.io/azureml-sdk-for-r/reference/r_environment.html).
|
|
||||||
|
|
||||||
To create the estimator, define:
|
|
||||||
|
|
||||||
* The directory that contains your scripts needed for training (`source_directory`). All the files in this directory are uploaded to the cluster node(s) for execution. The directory must contain your training script and any additional scripts required.
|
|
||||||
* The training script that will be executed (`entry_script`).
|
|
||||||
* The compute target (`compute_target`), in this case the AmlCompute cluster you created earlier.
|
|
||||||
* The parameters required from the training script (`script_params`). Azure ML will run your training script as a command-line script with `Rscript`. In this tutorial you specify one argument to the script, the data directory mounting point, which you can access with `ds$path(target_path)`.
|
|
||||||
* Any environment dependencies required for training. The default Docker image built for training already contains the three packages (`caret`, `e1071`, and `optparse`) needed in the training script. So you don't need to specify additional information. If you are using R packages that are not included by default, use the estimator's `cran_packages` parameter to add additional CRAN packages. See the [`estimator()`](https://azure.github.io/azureml-sdk-for-r/reference/estimator.html) reference for the full set of configurable options.
|
|
||||||
|
|
||||||
```{r create_estimator, eval=FALSE}
|
|
||||||
est <- estimator(source_directory = "project_files",
|
|
||||||
entry_script = "accidents.R",
|
|
||||||
script_params = list("--data_folder" = ds$path(target_path)),
|
|
||||||
compute_target = compute_target
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Submit the job on the remote cluster
|
|
||||||
|
|
||||||
Finally submit the job to run on your cluster. `submit_experiment()` returns a Run object that you then use to interface with the run. In total, the first run takes **about 10 minutes**. But for later runs, the same Docker image is reused as long as the script dependencies don't change. In this case, the image is cached and the container startup time is much faster.
|
|
||||||
|
|
||||||
```{r submit_job, eval=FALSE}
|
|
||||||
run <- submit_experiment(exp, est)
|
|
||||||
```
|
|
||||||
|
|
||||||
You can view a table of the run's details. Clicking the "Web View" link provided will bring you to Azure Machine Learning studio, where you can monitor the run in the UI.
|
|
||||||
|
|
||||||
```{r view_run, eval=FALSE}
|
|
||||||
view_run_details(run)
|
|
||||||
```
|
|
||||||
|
|
||||||
Model training happens in the background. Wait until the model has finished training before you run more code.
|
|
||||||
|
|
||||||
```{r wait_run, eval=FALSE}
|
|
||||||
wait_for_run_completion(run, show_output = TRUE)
|
|
||||||
```
|
|
||||||
|
|
||||||
You -- and colleagues with access to the workspace -- can submit multiple experiments in parallel, and Azure ML will take of scheduling the tasks on the compute cluster. You can even configure the cluster to automatically scale up to multiple nodes, and scale back when there are no more compute tasks in the queue. This configuration is a cost-effective way for teams to share compute resources.
|
|
||||||
|
|
||||||
## Retrieve training results
|
|
||||||
Once your model has finished training, you can access the artifacts of your job that were persisted to the run record, including any metrics logged and the final trained model.
|
|
||||||
|
|
||||||
### Get the logged metrics
|
|
||||||
In the training script `accidents.R`, you logged a metric from your model: the accuracy of the predictions in the training data. You can see metrics in the [studio](https://ml.azure.com), or extract them to the local session as an R list as follows:
|
|
||||||
|
|
||||||
```{r metrics, eval=FALSE}
|
|
||||||
metrics <- get_run_metrics(run)
|
|
||||||
metrics
|
|
||||||
```
|
|
||||||
|
|
||||||
If you've run multiple experiments (say, using differing variables, algorithms, or hyperparamers), you can use the metrics from each run to compare and choose the model you'll use in production.
|
|
||||||
|
|
||||||
### Get the trained model
|
|
||||||
You can retrieve the trained model and look at the results in your local R session. The following code will download the contents of the `./outputs` directory, which includes the model file.
|
|
||||||
|
|
||||||
```{r retrieve_model, eval=FALSE}
|
|
||||||
download_files_from_run(run, prefix="outputs/")
|
|
||||||
accident_model <- readRDS("project_files/outputs/model.rds")
|
|
||||||
summary(accident_model)
|
|
||||||
```
|
|
||||||
|
|
||||||
You see some factors that contribute to an increase in the estimated probability of death:
|
|
||||||
|
|
||||||
* higher impact speed
|
|
||||||
* male driver
|
|
||||||
* older occupant
|
|
||||||
* passenger
|
|
||||||
|
|
||||||
You see lower probabilities of death with:
|
|
||||||
|
|
||||||
* presence of airbags
|
|
||||||
* presence seatbelts
|
|
||||||
* frontal collision
|
|
||||||
|
|
||||||
The vehicle year of manufacture does not have a significant effect.
|
|
||||||
|
|
||||||
You can use this model to make new predictions:
|
|
||||||
|
|
||||||
```{r manual_predict, eval=FALSE}
|
|
||||||
newdata <- data.frame( # valid values shown below
|
|
||||||
dvcat="10-24", # "1-9km/h" "10-24" "25-39" "40-54" "55+"
|
|
||||||
seatbelt="none", # "none" "belted"
|
|
||||||
frontal="frontal", # "notfrontal" "frontal"
|
|
||||||
sex="f", # "f" "m"
|
|
||||||
ageOFocc=16, # age in years, 16-97
|
|
||||||
yearVeh=2002, # year of vehicle, 1955-2003
|
|
||||||
airbag="none", # "none" "airbag"
|
|
||||||
occRole="pass" # "driver" "pass"
|
|
||||||
)
|
|
||||||
|
|
||||||
## predicted probability of death for these variables, as a percentage
|
|
||||||
as.numeric(predict(accident_model,newdata, type="response")*100)
|
|
||||||
```
|
|
||||||
|
|
||||||
## Deploy as a web service
|
|
||||||
|
|
||||||
With your model, you can predict the danger of death from a collision. Use Azure ML to deploy your model as a prediction service. In this tutorial, you will deploy the web service in [Azure Container Instances](https://docs.microsoft.com/en-us/azure/container-instances/) (ACI).
|
|
||||||
|
|
||||||
### Register the model
|
|
||||||
|
|
||||||
First, register the model you downloaded to your workspace with [`register_model()`](https://azure.github.io/azureml-sdk-for-r/reference/register_model.html). A registered model can be any collection of files, but in this case the R model object is sufficient. Azure ML will use the registered model for deployment.
|
|
||||||
|
|
||||||
```{r register_model, eval=FALSE}
|
|
||||||
model <- register_model(ws,
|
|
||||||
model_path = "project_files/outputs/model.rds",
|
|
||||||
model_name = "accidents_model",
|
|
||||||
description = "Predict probablity of auto accident")
|
|
||||||
```
|
|
||||||
|
|
||||||
### Define the inference dependencies
|
|
||||||
To create a web service for your model, you first need to create a scoring script (`entry_script`), an R script that will take as input variable values (in JSON format) and output a prediction from your model. For this tutorial, use the provided scoring file `accident_predict.R`. The scoring script must contain an `init()` method that loads your model and returns a function that uses the model to make a prediction based on the input data. See the [documentation](https://azure.github.io/azureml-sdk-for-r/reference/inference_config.html#details) for more details.
|
|
||||||
|
|
||||||
Next, define an Azure ML **environment** for your script's package dependencies. With an environment, you specify R packages (from CRAN or elsewhere) that are needed for your script to run. You can also provide the values of environment variables that your script can reference to modify its behavior. By default, Azure ML will build the same default Docker image used with the estimator for training. Since the tutorial has no special requirements, create an environment with no special attributes.
|
|
||||||
|
|
||||||
```{r create_environment, eval=FALSE}
|
|
||||||
r_env <- r_environment(name = "basic_env")
|
|
||||||
```
|
|
||||||
|
|
||||||
If you want to use your own Docker image for deployment instead, specify the `custom_docker_image` parameter. See the [`r_environment()`](https://azure.github.io/azureml-sdk-for-r/reference/r_environment.html) reference for the full set of configurable options for defining an environment.
|
|
||||||
|
|
||||||
Now you have everything you need to create an **inference config** for encapsulating your scoring script and environment dependencies.
|
|
||||||
|
|
||||||
``` {r create_inference_config, eval=FALSE}
|
|
||||||
inference_config <- inference_config(
|
|
||||||
entry_script = "accident_predict.R",
|
|
||||||
source_directory = "project_files",
|
|
||||||
environment = r_env)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Deploy to ACI
|
|
||||||
In this tutorial, you will deploy your service to ACI. This code provisions a single container to respond to inbound requests, which is suitable for testing and light loads. See [`aci_webservice_deployment_config()`](https://azure.github.io/azureml-sdk-for-r/reference/aci_webservice_deployment_config.html) for additional configurable options. (For production-scale deployments, you can also [deploy to Azure Kubernetes Service](https://azure.github.io/azureml-sdk-for-r/articles/deploy-to-aks/deploy-to-aks.html).)
|
|
||||||
|
|
||||||
``` {r create_aci_config, eval=FALSE}
|
|
||||||
aci_config <- aci_webservice_deployment_config(cpu_cores = 1, memory_gb = 0.5)
|
|
||||||
```
|
|
||||||
|
|
||||||
Now you deploy your model as a web service. Deployment **can take several minutes**.
|
|
||||||
|
|
||||||
```{r deploy_service, eval=FALSE}
|
|
||||||
aci_service <- deploy_model(ws,
|
|
||||||
'accident-pred',
|
|
||||||
list(model),
|
|
||||||
inference_config,
|
|
||||||
aci_config)
|
|
||||||
|
|
||||||
wait_for_deployment(aci_service, show_output = TRUE)
|
|
||||||
```
|
|
||||||
|
|
||||||
If you encounter any issue in deploying the web service, please visit the [troubleshooting guide](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-troubleshoot-deployment).
|
|
||||||
|
|
||||||
## Test the deployed service
|
|
||||||
|
|
||||||
Now that your model is deployed as a service, you can test the service from R using [`invoke_webservice()`](https://azure.github.io/azureml-sdk-for-r/reference/invoke_webservice.html). Provide a new set of data to predict from, convert it to JSON, and send it to the service.
|
|
||||||
|
|
||||||
```{r test_deployment, eval=FALSE}
|
|
||||||
library(jsonlite)
|
|
||||||
|
|
||||||
newdata <- data.frame( # valid values shown below
|
|
||||||
dvcat="10-24", # "1-9km/h" "10-24" "25-39" "40-54" "55+"
|
|
||||||
seatbelt="none", # "none" "belted"
|
|
||||||
frontal="frontal", # "notfrontal" "frontal"
|
|
||||||
sex="f", # "f" "m"
|
|
||||||
ageOFocc=22, # age in years, 16-97
|
|
||||||
yearVeh=2002, # year of vehicle, 1955-2003
|
|
||||||
airbag="none", # "none" "airbag"
|
|
||||||
occRole="pass" # "driver" "pass"
|
|
||||||
)
|
|
||||||
|
|
||||||
prob <- invoke_webservice(aci_service, toJSON(newdata))
|
|
||||||
prob
|
|
||||||
```
|
|
||||||
|
|
||||||
You can also get the web service's HTTP endpoint, which accepts REST client calls. You can share this endpoint with anyone who wants to test the web service or integrate it into an application.
|
|
||||||
|
|
||||||
```{r get_endpoint, eval=FALSE}
|
|
||||||
aci_service$scoring_uri
|
|
||||||
```
|
|
||||||
|
|
||||||
## Clean up resources
|
|
||||||
|
|
||||||
Delete the resources once you no longer need them. Don't delete any resource you plan to still use.
|
|
||||||
|
|
||||||
Delete the web service:
|
|
||||||
```{r delete_service, eval=FALSE}
|
|
||||||
delete_webservice(aci_service)
|
|
||||||
```
|
|
||||||
|
|
||||||
Delete the registered model:
|
|
||||||
```{r delete_model, eval=FALSE}
|
|
||||||
delete_model(model)
|
|
||||||
```
|
|
||||||
|
|
||||||
Delete the compute cluster:
|
|
||||||
```{r delete_compute, eval=FALSE}
|
|
||||||
delete_compute(compute_target)
|
|
||||||
```
|
|
||||||
@@ -1,62 +0,0 @@
|
|||||||
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
|
|
||||||
# Copyright 2016 RStudio, Inc. All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
# ==============================================================================
|
|
||||||
|
|
||||||
|
|
||||||
library(tensorflow)
|
|
||||||
install_tensorflow(version = "1.13.2-gpu")
|
|
||||||
|
|
||||||
library(azuremlsdk)
|
|
||||||
|
|
||||||
# Create the model
|
|
||||||
x <- tf$placeholder(tf$float32, shape(NULL, 784L))
|
|
||||||
W <- tf$Variable(tf$zeros(shape(784L, 10L)))
|
|
||||||
b <- tf$Variable(tf$zeros(shape(10L)))
|
|
||||||
|
|
||||||
y <- tf$nn$softmax(tf$matmul(x, W) + b)
|
|
||||||
|
|
||||||
# Define loss and optimizer
|
|
||||||
y_ <- tf$placeholder(tf$float32, shape(NULL, 10L))
|
|
||||||
cross_entropy <- tf$reduce_mean(-tf$reduce_sum(y_ * log(y),
|
|
||||||
reduction_indices = 1L))
|
|
||||||
train_step <- tf$train$GradientDescentOptimizer(0.5)$minimize(cross_entropy)
|
|
||||||
|
|
||||||
# Create session and initialize variables
|
|
||||||
sess <- tf$Session()
|
|
||||||
sess$run(tf$global_variables_initializer())
|
|
||||||
|
|
||||||
# Load mnist data )
|
|
||||||
datasets <- tf$contrib$learn$datasets
|
|
||||||
mnist <- datasets$mnist$read_data_sets("MNIST-data", one_hot = TRUE)
|
|
||||||
|
|
||||||
# Train
|
|
||||||
for (i in 1:1000) {
|
|
||||||
batches <- mnist$train$next_batch(100L)
|
|
||||||
batch_xs <- batches[[1]]
|
|
||||||
batch_ys <- batches[[2]]
|
|
||||||
sess$run(train_step,
|
|
||||||
feed_dict = dict(x = batch_xs, y_ = batch_ys))
|
|
||||||
}
|
|
||||||
|
|
||||||
# Test trained model
|
|
||||||
correct_prediction <- tf$equal(tf$argmax(y, 1L), tf$argmax(y_, 1L))
|
|
||||||
accuracy <- tf$reduce_mean(tf$cast(correct_prediction, tf$float32))
|
|
||||||
cat("Accuracy: ", sess$run(accuracy,
|
|
||||||
feed_dict = dict(x = mnist$test$images,
|
|
||||||
y_ = mnist$test$labels)))
|
|
||||||
|
|
||||||
log_metric_to_run("accuracy",
|
|
||||||
sess$run(accuracy, feed_dict = dict(x = mnist$test$images,
|
|
||||||
y_ = mnist$test$labels)))
|
|
||||||
@@ -1,143 +0,0 @@
|
|||||||
---
|
|
||||||
title: "Train a TensorFlow model"
|
|
||||||
date: "`r Sys.Date()`"
|
|
||||||
output: rmarkdown::html_vignette
|
|
||||||
vignette: >
|
|
||||||
%\VignetteIndexEntry{Train a TensorFlow model}
|
|
||||||
%\VignetteEngine{knitr::rmarkdown}
|
|
||||||
\use_package{UTF-8}
|
|
||||||
---
|
|
||||||
|
|
||||||
This tutorial demonstrates how run a TensorFlow job at scale using Azure ML. You will train a TensorFlow model to classify handwritten digits (MNIST) using a deep neural network (DNN) and log your results to the Azure ML service.
|
|
||||||
|
|
||||||
## Prerequisites
|
|
||||||
If you don’t have access to an Azure ML workspace, follow the [setup tutorial](https://azure.github.io/azureml-sdk-for-r/articles/configuration.html) to configure and create a workspace.
|
|
||||||
|
|
||||||
## Set up development environment
|
|
||||||
The setup for your development work in this tutorial includes the following actions:
|
|
||||||
|
|
||||||
* Import required packages
|
|
||||||
* Connect to a workspace
|
|
||||||
* Create an experiment to track your runs
|
|
||||||
* Create a remote compute target to use for training
|
|
||||||
|
|
||||||
### Import **azuremlsdk** package
|
|
||||||
```{r eval=FALSE}
|
|
||||||
library(azuremlsdk)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Load your workspace
|
|
||||||
Instantiate a workspace object from your existing workspace. The following code will load the workspace details from a **config.json** file if you previously wrote one out with [`write_workspace_config()`](https://azure.github.io/azureml-sdk-for-r/reference/write_workspace_config.html).
|
|
||||||
```{r load_workpace, eval=FALSE}
|
|
||||||
ws <- load_workspace_from_config()
|
|
||||||
```
|
|
||||||
|
|
||||||
Or, you can retrieve a workspace by directly specifying your workspace details:
|
|
||||||
```{r get_workpace, eval=FALSE}
|
|
||||||
ws <- get_workspace("<your workspace name>", "<your subscription ID>", "<your resource group>")
|
|
||||||
```
|
|
||||||
|
|
||||||
### Create an experiment
|
|
||||||
An Azure ML **experiment** tracks a grouping of runs, typically from the same training script. Create an experiment to track the runs for training the TensorFlow model on the MNIST data.
|
|
||||||
|
|
||||||
```{r create_experiment, eval=FALSE}
|
|
||||||
exp <- experiment(workspace = ws, name = "tf-mnist")
|
|
||||||
```
|
|
||||||
|
|
||||||
If you would like to track your runs in an existing experiment, simply specify that experiment's name to the `name` parameter of `experiment()`.
|
|
||||||
|
|
||||||
### Create a compute target
|
|
||||||
By using Azure Machine Learning Compute (AmlCompute), a managed service, data scientists can train machine learning models on clusters of Azure virtual machines. In this tutorial, you create a GPU-enabled cluster as your training environment. The code below creates the compute cluster for you if it doesn't already exist in your workspace.
|
|
||||||
|
|
||||||
You may need to wait a few minutes for your compute cluster to be provisioned if it doesn't already exist.
|
|
||||||
|
|
||||||
```{r create_cluster, eval=FALSE}
|
|
||||||
cluster_name <- "gpucluster"
|
|
||||||
compute_target <- get_compute(ws, cluster_name = cluster_name)
|
|
||||||
if (is.null(compute_target))
|
|
||||||
{
|
|
||||||
vm_size <- "STANDARD_NC6"
|
|
||||||
compute_target <- create_aml_compute(workspace = ws,
|
|
||||||
cluster_name = cluster_name,
|
|
||||||
vm_size = vm_size,
|
|
||||||
max_nodes = 4)
|
|
||||||
|
|
||||||
wait_for_provisioning_completion(compute_target, show_output = TRUE)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Prepare the training script
|
|
||||||
|
|
||||||
A training script called `tf_mnist.R` has been provided for you in the "project_files" directory of this tutorial. The Azure ML SDK provides a set of logging APIs for logging various metrics during training runs. These metrics are recorded and persisted in the experiment run record, and can be be accessed at any time or viewed in the run details page in [Azure Machine Learning studio](http://ml.azure.com/).
|
|
||||||
|
|
||||||
In order to collect and upload run metrics, you need to do the following **inside the training script**:
|
|
||||||
|
|
||||||
* Import the **azuremlsdk** package
|
|
||||||
```
|
|
||||||
library(azuremlsdk)
|
|
||||||
```
|
|
||||||
|
|
||||||
* Add the [`log_metric_to_run()`](https://azure.github.io/azureml-sdk-for-r/reference/log_metric_to_run.html) function to track our primary metric, "accuracy", for this experiment. If you have your own training script with several important metrics, simply create a logging call for each one within the script.
|
|
||||||
```
|
|
||||||
log_metric_to_run("accuracy",
|
|
||||||
sess$run(accuracy,
|
|
||||||
feed_dict = dict(x = mnist$test$images, y_ = mnist$test$labels)))
|
|
||||||
```
|
|
||||||
|
|
||||||
See the [reference](https://azure.github.io/azureml-sdk-for-r/reference/index.html#section-training-experimentation) for the full set of logging methods `log_*()` available from the R SDK.
|
|
||||||
|
|
||||||
## Create an estimator
|
|
||||||
|
|
||||||
An Azure ML **estimator** encapsulates the run configuration information needed for executing a training script on the compute target. Azure ML runs are run as containerized jobs on the specified compute target. By default, the Docker image built for your training job will include R, the Azure ML SDK, and a set of commonly used R packages. See the full list of default packages included [here](https://azure.github.io/azureml-sdk-for-r/reference/r_environment.html).
|
|
||||||
|
|
||||||
To create the estimator, define the following:
|
|
||||||
|
|
||||||
* The directory that contains your scripts needed for training (`source_directory`). All the files in this directory are uploaded to the cluster node(s) for execution. The directory must contain your training script and any additional scripts required.
|
|
||||||
* The training script that will be executed (`entry_script`).
|
|
||||||
* The compute target (`compute_target`), in this case the AmlCompute cluster you created earlier.
|
|
||||||
* Any environment dependencies required for training. Since the training script requires the TensorFlow package, which is not included in the image by default, pass the package name to the `cran_packages` parameter to have it installed in the Docker container where the job will run. See the [`estimator()`](https://azure.github.io/azureml-sdk-for-r/reference/estimator.html) reference for the full set of configurable options.
|
|
||||||
* Set the `use_gpu = TRUE` flag so the default base GPU Docker image will be built, since the job will be run on a GPU cluster.
|
|
||||||
|
|
||||||
```{r create_estimator, eval=FALSE}
|
|
||||||
est <- estimator(source_directory = "project_files",
|
|
||||||
entry_script = "tf_mnist.R",
|
|
||||||
compute_target = compute_target,
|
|
||||||
cran_packages = c("tensorflow"),
|
|
||||||
use_gpu = TRUE)
|
|
||||||
```
|
|
||||||
|
|
||||||
## Submit the job
|
|
||||||
|
|
||||||
Finally submit the job to run on your cluster. [`submit_experiment()`](https://azure.github.io/azureml-sdk-for-r/reference/submit_experiment.html) returns a `Run` object that you can then use to interface with the run.
|
|
||||||
|
|
||||||
```{r submit_job, eval=FALSE}
|
|
||||||
run <- submit_experiment(exp, est)
|
|
||||||
```
|
|
||||||
|
|
||||||
You can view the run’s details as a table. Clicking the “Web View” link provided will bring you to Azure Machine Learning studio, where you can monitor the run in the UI.
|
|
||||||
|
|
||||||
```{r eval=FALSE}
|
|
||||||
view_run_details(run)
|
|
||||||
```
|
|
||||||
|
|
||||||
Model training happens in the background. Wait until the model has finished training before you run more code.
|
|
||||||
|
|
||||||
```{r eval=FALSE}
|
|
||||||
wait_for_run_completion(run, show_output = TRUE)
|
|
||||||
```
|
|
||||||
|
|
||||||
## View run metrics
|
|
||||||
Once your job has finished, you can view the metrics collected during your TensorFlow run.
|
|
||||||
|
|
||||||
```{r get_metrics, eval=FALSE}
|
|
||||||
metrics <- get_run_metrics(run)
|
|
||||||
metrics
|
|
||||||
```
|
|
||||||
|
|
||||||
## Clean up resources
|
|
||||||
Delete the resources once you no longer need them. Don't delete any resource you plan to still use.
|
|
||||||
|
|
||||||
Delete the compute cluster:
|
|
||||||
```{r delete_compute, eval=FALSE}
|
|
||||||
delete_compute(compute_target)
|
|
||||||
```
|
|
||||||
@@ -334,14 +334,27 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Use the default configuration (can also provide parameters to customize)\n",
|
"from azureml.core.compute import ComputeTarget\n",
|
||||||
"prov_config = AksCompute.provisioning_configuration()\n",
|
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"# Choose a name for your AKS cluster\n",
|
||||||
"aks_name = 'my-aks-9' \n",
|
"aks_name = 'my-aks-9' \n",
|
||||||
"# Create the cluster\n",
|
"\n",
|
||||||
"aks_target = ComputeTarget.create(workspace = ws, \n",
|
"# Verify that cluster does not exist already\n",
|
||||||
" name = aks_name, \n",
|
"try:\n",
|
||||||
" provisioning_configuration = prov_config)"
|
" aks_target = ComputeTarget(workspace=ws, name=aks_name)\n",
|
||||||
|
" print('Found existing cluster, use it.')\n",
|
||||||
|
"except ComputeTargetException:\n",
|
||||||
|
" # Use the default configuration (can also provide parameters to customize)\n",
|
||||||
|
" prov_config = AksCompute.provisioning_configuration()\n",
|
||||||
|
"\n",
|
||||||
|
" # Create the cluster\n",
|
||||||
|
" aks_target = ComputeTarget.create(workspace = ws, \n",
|
||||||
|
" name = aks_name, \n",
|
||||||
|
" provisioning_configuration = prov_config)\n",
|
||||||
|
"\n",
|
||||||
|
"if aks_target.get_status() != \"Succeeded\":\n",
|
||||||
|
" aks_target.wait_for_completion(show_output=True)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -518,7 +518,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from azureml.contrib.interpret.explanation.explanation_client import ExplanationClient\n",
|
"from azureml.interpret import ExplanationClient\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Get model explanation data\n",
|
"# Get model explanation data\n",
|
||||||
"client = ExplanationClient.from_run(run)\n",
|
"client = ExplanationClient.from_run(run)\n",
|
||||||
|
|||||||
@@ -6,6 +6,6 @@ dependencies:
|
|||||||
- interpret-community[visualization]
|
- interpret-community[visualization]
|
||||||
- matplotlib
|
- matplotlib
|
||||||
- azureml-contrib-interpret
|
- azureml-contrib-interpret
|
||||||
- sklearn-pandas
|
- sklearn-pandas<2.0.0
|
||||||
- azureml-dataset-runtime
|
- azureml-dataset-runtime
|
||||||
- ipywidgets
|
- ipywidgets
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
from sklearn import datasets
|
from sklearn import datasets
|
||||||
from sklearn.linear_model import Ridge
|
from sklearn.linear_model import Ridge
|
||||||
from interpret.ext.blackbox import TabularExplainer
|
from interpret.ext.blackbox import TabularExplainer
|
||||||
from azureml.contrib.interpret.explanation.explanation_client import ExplanationClient
|
from azureml.interpret import ExplanationClient
|
||||||
from sklearn.model_selection import train_test_split
|
from sklearn.model_selection import train_test_split
|
||||||
from azureml.core.run import Run
|
from azureml.core.run import Run
|
||||||
import joblib
|
import joblib
|
||||||
|
|||||||
@@ -451,7 +451,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"import azureml.core\n",
|
"import azureml.core\n",
|
||||||
"from azureml.core import Workspace, Experiment\n",
|
"from azureml.core import Workspace, Experiment\n",
|
||||||
"from azureml.contrib.interpret.explanation.explanation_client import ExplanationClient\n",
|
"from azureml.interpret import ExplanationClient\n",
|
||||||
"# Check core SDK version number\n",
|
"# Check core SDK version number\n",
|
||||||
"print(\"SDK version:\", azureml.core.VERSION)"
|
"print(\"SDK version:\", azureml.core.VERSION)"
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -6,5 +6,5 @@ dependencies:
|
|||||||
- interpret-community[visualization]
|
- interpret-community[visualization]
|
||||||
- matplotlib
|
- matplotlib
|
||||||
- azureml-contrib-interpret
|
- azureml-contrib-interpret
|
||||||
- sklearn-pandas
|
- sklearn-pandas<2.0.0
|
||||||
- ipywidgets
|
- ipywidgets
|
||||||
|
|||||||
@@ -295,8 +295,7 @@
|
|||||||
"# environment, otherwise if a model is trained or deployed in a different environment this can\n",
|
"# environment, otherwise if a model is trained or deployed in a different environment this can\n",
|
||||||
"# cause errors. Please take extra care when specifying your dependencies in a production environment.\n",
|
"# cause errors. Please take extra care when specifying your dependencies in a production environment.\n",
|
||||||
"azureml_pip_packages.extend(['sklearn-pandas', 'pyyaml', sklearn_dep, pandas_dep])\n",
|
"azureml_pip_packages.extend(['sklearn-pandas', 'pyyaml', sklearn_dep, pandas_dep])\n",
|
||||||
"run_config.environment.python.conda_dependencies = CondaDependencies.create(pip_packages=azureml_pip_packages,\n",
|
"run_config.environment.python.conda_dependencies = CondaDependencies.create(pip_packages=azureml_pip_packages)\n",
|
||||||
" pin_sdk_version=False)\n",
|
|
||||||
"# Now submit a run on AmlCompute\n",
|
"# Now submit a run on AmlCompute\n",
|
||||||
"from azureml.core.script_run_config import ScriptRunConfig\n",
|
"from azureml.core.script_run_config import ScriptRunConfig\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -368,7 +367,7 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Retrieve global explanation for visualization\n",
|
"# Retrieve global explanation for visualization\n",
|
||||||
"from azureml.contrib.interpret.explanation.explanation_client import ExplanationClient\n",
|
"from azureml.interpret import ExplanationClient\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# get model explanation data\n",
|
"# get model explanation data\n",
|
||||||
"client = ExplanationClient.from_run(run)\n",
|
"client = ExplanationClient.from_run(run)\n",
|
||||||
@@ -460,8 +459,7 @@
|
|||||||
"# environment, otherwise if a model is trained or deployed in a different environment this can\n",
|
"# environment, otherwise if a model is trained or deployed in a different environment this can\n",
|
||||||
"# cause errors. Please take extra care when specifying your dependencies in a production environment.\n",
|
"# cause errors. Please take extra care when specifying your dependencies in a production environment.\n",
|
||||||
"azureml_pip_packages.extend(['sklearn-pandas', 'pyyaml', sklearn_dep, pandas_dep])\n",
|
"azureml_pip_packages.extend(['sklearn-pandas', 'pyyaml', sklearn_dep, pandas_dep])\n",
|
||||||
"myenv = CondaDependencies.create(pip_packages=azureml_pip_packages,\n",
|
"myenv = CondaDependencies.create(pip_packages=azureml_pip_packages)\n",
|
||||||
" pin_sdk_version=False)\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"with open(\"myenv.yml\",\"w\") as f:\n",
|
"with open(\"myenv.yml\",\"w\") as f:\n",
|
||||||
" f.write(myenv.serialize_to_string())\n",
|
" f.write(myenv.serialize_to_string())\n",
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ dependencies:
|
|||||||
- interpret-community[visualization]
|
- interpret-community[visualization]
|
||||||
- matplotlib
|
- matplotlib
|
||||||
- azureml-contrib-interpret
|
- azureml-contrib-interpret
|
||||||
- sklearn-pandas
|
- sklearn-pandas<2.0.0
|
||||||
- azureml-dataset-runtime
|
- azureml-dataset-runtime
|
||||||
- azureml-core
|
- azureml-core
|
||||||
- ipywidgets
|
- ipywidgets
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ from sklearn_pandas import DataFrameMapper
|
|||||||
|
|
||||||
from azureml.core.run import Run
|
from azureml.core.run import Run
|
||||||
from interpret.ext.blackbox import TabularExplainer
|
from interpret.ext.blackbox import TabularExplainer
|
||||||
from azureml.contrib.interpret.explanation.explanation_client import ExplanationClient
|
from azureml.interpret import ExplanationClient
|
||||||
from azureml.interpret.scoring.scoring_explainer import LinearScoringExplainer, save
|
from azureml.interpret.scoring.scoring_explainer import LinearScoringExplainer, save
|
||||||
|
|
||||||
OUTPUT_DIR = './outputs/'
|
OUTPUT_DIR = './outputs/'
|
||||||
|
|||||||
@@ -34,7 +34,7 @@
|
|||||||
"| Azure Data Lake Storage Gen 1 | Yes | Yes |\n",
|
"| Azure Data Lake Storage Gen 1 | Yes | Yes |\n",
|
||||||
"| Azure Data Lake Storage Gen 2 | Yes | Yes |\n",
|
"| Azure Data Lake Storage Gen 2 | Yes | Yes |\n",
|
||||||
"| Azure SQL Database | Yes | Yes |\n",
|
"| Azure SQL Database | Yes | Yes |\n",
|
||||||
"| Azure Database for PostgreSQL | Yes | Yes |",
|
"| Azure Database for PostgreSQL | Yes | Yes |\n",
|
||||||
"| Azure Database for MySQL | Yes | Yes |"
|
"| Azure Database for MySQL | Yes | Yes |"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -558,7 +558,7 @@
|
|||||||
"metadata": {
|
"metadata": {
|
||||||
"authors": [
|
"authors": [
|
||||||
{
|
{
|
||||||
"name": "sanpil"
|
"name": "shbijlan"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"category": "tutorial",
|
"category": "tutorial",
|
||||||
|
|||||||
@@ -100,7 +100,7 @@
|
|||||||
"from azureml.core.compute_target import ComputeTargetException\n",
|
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# choose a name for your cluster\n",
|
"# choose a name for your cluster\n",
|
||||||
"cluster_name = \"cpu-cluster\"\n",
|
"cluster_name = \"amlcomp\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"try:\n",
|
"try:\n",
|
||||||
" cpu_cluster = ComputeTarget(workspace=ws, name=cluster_name)\n",
|
" cpu_cluster = ComputeTarget(workspace=ws, name=cluster_name)\n",
|
||||||
@@ -147,10 +147,8 @@
|
|||||||
" 'script_params' accepts a dictionary. However 'estimator_entry_script_arguments' parameter expects arguments as\n",
|
" 'script_params' accepts a dictionary. However 'estimator_entry_script_arguments' parameter expects arguments as\n",
|
||||||
" a list.\n",
|
" a list.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"> Estimator object initialization involves specifying a list of DataReference objects in its 'inputs' parameter.\n",
|
"> Estimator object initialization involves specifying a list of data input and output.\n",
|
||||||
" In Pipelines, a step can take another step's output or DataReferences as input. So when creating an EstimatorStep,\n",
|
" In Pipelines, a step can take another step's output as input. So when creating an EstimatorStep.\n",
|
||||||
" the parameters 'inputs' and 'outputs' need to be set explicitly and that will override 'inputs' parameter\n",
|
|
||||||
" specified in the Estimator object.\n",
|
|
||||||
" \n",
|
" \n",
|
||||||
"> The best practice is to use separate folders for scripts and its dependent files for each step and specify that folder as the `source_directory` for the step. This helps reduce the size of the snapshot created for the step (only the specific folder is snapshotted). Since changes in any files in the `source_directory` would trigger a re-upload of the snapshot, this helps keep the reuse of the step when there are no changes in the `source_directory` of the step."
|
"> The best practice is to use separate folders for scripts and its dependent files for each step and specify that folder as the `source_directory` for the step. This helps reduce the size of the snapshot created for the step (only the specific folder is snapshotted). Since changes in any files in the `source_directory` would trigger a re-upload of the snapshot, this helps keep the reuse of the step when there are no changes in the `source_directory` of the step."
|
||||||
]
|
]
|
||||||
@@ -166,17 +164,27 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from azureml.core import Datastore\n",
|
"from azureml.core import Datastore\n",
|
||||||
"from azureml.data.data_reference import DataReference\n",
|
|
||||||
"from azureml.pipeline.core import PipelineData\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"def_blob_store = Datastore(ws, \"workspaceblobstore\")\n",
|
"def_blob_store = Datastore(ws, \"workspaceblobstore\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"input_data = DataReference(\n",
|
"#upload input data to workspaceblobstore\n",
|
||||||
" datastore=def_blob_store,\n",
|
"def_blob_store.upload_files(files=['20news.pkl'], target_path='20newsgroups')"
|
||||||
" data_reference_name=\"input_data\",\n",
|
]
|
||||||
" path_on_datastore=\"20newsgroups/20news.pkl\")\n",
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.core import Dataset\n",
|
||||||
|
"from azureml.data import OutputFileDatasetConfig\n",
|
||||||
"\n",
|
"\n",
|
||||||
"output = PipelineData(\"output\", datastore=def_blob_store)\n",
|
"# create dataset to be used as the input to estimator step\n",
|
||||||
|
"input_data = Dataset.File.from_files(def_blob_store.path('20newsgroups/20news.pkl'))\n",
|
||||||
|
"\n",
|
||||||
|
"# OutputFileDatasetConfig by default write output to the default workspaceblobstore\n",
|
||||||
|
"output = OutputFileDatasetConfig()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"source_directory = 'estimator_train'"
|
"source_directory = 'estimator_train'"
|
||||||
]
|
]
|
||||||
@@ -204,10 +212,8 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"- **name:** Name of the step\n",
|
"- **name:** Name of the step\n",
|
||||||
"- **estimator:** Estimator object\n",
|
"- **estimator:** Estimator object\n",
|
||||||
"- **estimator_entry_script_arguments:** \n",
|
"- **estimator_entry_script_arguments:** A list of command-line arguments\n",
|
||||||
"- **runconfig_pipeline_params:** Override runconfig properties at runtime using key-value pairs each with name of the runconfig property and PipelineParameter for that property\n",
|
"- **runconfig_pipeline_params:** Override runconfig properties at runtime using key-value pairs each with name of the runconfig property and PipelineParameter for that property\n",
|
||||||
"- **inputs:** Inputs\n",
|
|
||||||
"- **outputs:** Output is list of PipelineData\n",
|
|
||||||
"- **compute_target:** Compute target to use \n",
|
"- **compute_target:** Compute target to use \n",
|
||||||
"- **allow_reuse:** Whether the step should reuse previous results when run with the same settings/inputs. If this is false, a new run will always be generated for this step during pipeline execution.\n",
|
"- **allow_reuse:** Whether the step should reuse previous results when run with the same settings/inputs. If this is false, a new run will always be generated for this step during pipeline execution.\n",
|
||||||
"- **version:** Optional version tag to denote a change in functionality for the step"
|
"- **version:** Optional version tag to denote a change in functionality for the step"
|
||||||
@@ -227,10 +233,8 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"est_step = EstimatorStep(name=\"Estimator_Train\", \n",
|
"est_step = EstimatorStep(name=\"Estimator_Train\", \n",
|
||||||
" estimator=est, \n",
|
" estimator=est, \n",
|
||||||
" estimator_entry_script_arguments=[\"--datadir\", input_data, \"--output\", output],\n",
|
" estimator_entry_script_arguments=[\"--datadir\", input_data.as_mount(), \"--output\", output],\n",
|
||||||
" runconfig_pipeline_params=None, \n",
|
" runconfig_pipeline_params=None, \n",
|
||||||
" inputs=[input_data], \n",
|
|
||||||
" outputs=[output], \n",
|
|
||||||
" compute_target=cpu_cluster)"
|
" compute_target=cpu_cluster)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -42,13 +42,13 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"import azureml.core\n",
|
"import azureml.core\n",
|
||||||
"from azureml.core import Workspace, Experiment\n",
|
"from azureml.core import Workspace, Experiment, Datastore, Dataset\n",
|
||||||
"from azureml.core.datastore import Datastore\n",
|
|
||||||
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||||
|
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||||
|
"from azureml.core.runconfig import RunConfiguration\n",
|
||||||
"from azureml.exceptions import ComputeTargetException\n",
|
"from azureml.exceptions import ComputeTargetException\n",
|
||||||
"from azureml.data.data_reference import DataReference\n",
|
"from azureml.pipeline.steps import HyperDriveStep, HyperDriveStepRun, PythonScriptStep\n",
|
||||||
"from azureml.pipeline.steps import HyperDriveStep, HyperDriveStepRun\n",
|
"from azureml.pipeline.core import Pipeline, PipelineData, TrainingOutput\n",
|
||||||
"from azureml.pipeline.core import Pipeline, PipelineData\n",
|
|
||||||
"from azureml.train.dnn import TensorFlow\n",
|
"from azureml.train.dnn import TensorFlow\n",
|
||||||
"# from azureml.train.hyperdrive import *\n",
|
"# from azureml.train.hyperdrive import *\n",
|
||||||
"from azureml.train.hyperdrive import RandomParameterSampling, BanditPolicy, HyperDriveConfig, PrimaryMetricGoal\n",
|
"from azureml.train.hyperdrive import RandomParameterSampling, BanditPolicy, HyperDriveConfig, PrimaryMetricGoal\n",
|
||||||
@@ -179,8 +179,25 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"ds = ws.get_default_datastore()\n",
|
"datastore = ws.get_default_datastore()\n",
|
||||||
"ds.upload(src_dir='./data/mnist', target_path='mnist', overwrite=True, show_progress=True)"
|
"datastore.upload(src_dir='./data/mnist', target_path='mnist', overwrite=True, show_progress=True)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Create Azure Machine Learning datasets\n",
|
||||||
|
"By creating a dataset, you create a reference to the data source location. If you applied any subsetting transformations to the dataset, they will be stored in the dataset as well. The data remains in its existing location, so no extra storage cost is incurred."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"dataset = Dataset.File.from_files(datastore.path('mnist'))"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -204,7 +221,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"cluster_name = \"gpu-cluster\"\n",
|
"cluster_name = \"amlcomp\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"try:\n",
|
"try:\n",
|
||||||
" compute_target = ComputeTarget(workspace=ws, name=cluster_name)\n",
|
" compute_target = ComputeTarget(workspace=ws, name=cluster_name)\n",
|
||||||
@@ -217,7 +234,22 @@
|
|||||||
" compute_target = ComputeTarget.create(ws, cluster_name, compute_config)\n",
|
" compute_target = ComputeTarget.create(ws, cluster_name, compute_config)\n",
|
||||||
" compute_target.wait_for_completion(show_output=True, timeout_in_minutes=20)\n",
|
" compute_target.wait_for_completion(show_output=True, timeout_in_minutes=20)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"print(\"Azure Machine Learning Compute attached\")"
|
"print(\"Azure Machine Learning Compute attached\")\n",
|
||||||
|
"\n",
|
||||||
|
"cpu_cluster_name = \"cpu-cluster\"\n",
|
||||||
|
"\n",
|
||||||
|
"try:\n",
|
||||||
|
" cpu_cluster = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
||||||
|
" print(\"Found existing cpu-cluster\")\n",
|
||||||
|
"except ComputeTargetException:\n",
|
||||||
|
" print(\"Creating new cpu-cluster\")\n",
|
||||||
|
" \n",
|
||||||
|
" compute_config = AmlCompute.provisioning_configuration(vm_size=\"STANDARD_D2_V2\",\n",
|
||||||
|
" min_nodes=0,\n",
|
||||||
|
" max_nodes=4)\n",
|
||||||
|
" cpu_cluster = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
||||||
|
" \n",
|
||||||
|
" cpu_cluster.wait_for_completion(show_output=True)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -264,7 +296,8 @@
|
|||||||
" compute_target=compute_target,\n",
|
" compute_target=compute_target,\n",
|
||||||
" entry_script='tf_mnist.py', \n",
|
" entry_script='tf_mnist.py', \n",
|
||||||
" use_gpu=True,\n",
|
" use_gpu=True,\n",
|
||||||
" framework_version='1.13')"
|
" framework_version='2.0',\n",
|
||||||
|
" pip_packages=['azureml-dataset-runtime[pandas,fuse]'])"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -344,7 +377,7 @@
|
|||||||
"## Add HyperDrive as a step of pipeline\n",
|
"## Add HyperDrive as a step of pipeline\n",
|
||||||
"\n",
|
"\n",
|
||||||
"### Setup an input for the hypderdrive step\n",
|
"### Setup an input for the hypderdrive step\n",
|
||||||
"Let's setup a data reference for inputs of hyperdrive step."
|
"You can mount dataset to remote compute."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -353,9 +386,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"data_folder = DataReference(\n",
|
"data_folder = dataset.as_mount()"
|
||||||
" datastore=ds,\n",
|
|
||||||
" data_reference_name=\"mnist_data\")"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -386,8 +417,16 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"metrics_output_name = 'metrics_output'\n",
|
"metrics_output_name = 'metrics_output'\n",
|
||||||
"metrics_data = PipelineData(name='metrics_data',\n",
|
"metrics_data = PipelineData(name='metrics_data',\n",
|
||||||
" datastore=ds,\n",
|
" datastore=datastore,\n",
|
||||||
" pipeline_output_name=metrics_output_name)\n",
|
" pipeline_output_name=metrics_output_name,\n",
|
||||||
|
" training_output=TrainingOutput(\"Metrics\"))\n",
|
||||||
|
"\n",
|
||||||
|
"model_output_name = 'model_output'\n",
|
||||||
|
"saved_model = PipelineData(name='saved_model',\n",
|
||||||
|
" datastore=datastore,\n",
|
||||||
|
" pipeline_output_name=model_output_name,\n",
|
||||||
|
" training_output=TrainingOutput(\"Model\",\n",
|
||||||
|
" model_file=\"outputs/model/saved_model.pb\"))\n",
|
||||||
"\n",
|
"\n",
|
||||||
"hd_step_name='hd_step01'\n",
|
"hd_step_name='hd_step01'\n",
|
||||||
"hd_step = HyperDriveStep(\n",
|
"hd_step = HyperDriveStep(\n",
|
||||||
@@ -395,7 +434,39 @@
|
|||||||
" hyperdrive_config=hd_config,\n",
|
" hyperdrive_config=hd_config,\n",
|
||||||
" estimator_entry_script_arguments=['--data-folder', data_folder],\n",
|
" estimator_entry_script_arguments=['--data-folder', data_folder],\n",
|
||||||
" inputs=[data_folder],\n",
|
" inputs=[data_folder],\n",
|
||||||
" metrics_output=metrics_data)"
|
" outputs=[metrics_data, saved_model])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Find and register best model\n",
|
||||||
|
"When all the jobs finish, we can choose to register the model that has the highest accuracy through an additional PythonScriptStep.\n",
|
||||||
|
"\n",
|
||||||
|
"Through this additional register_model_step, we register the chosen files as a model named `tf-dnn-mnist` under the workspace for deployment."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"conda_dep = CondaDependencies()\n",
|
||||||
|
"conda_dep.add_pip_package(\"azureml-sdk\")\n",
|
||||||
|
"\n",
|
||||||
|
"rcfg = RunConfiguration(conda_dependencies=conda_dep)\n",
|
||||||
|
"\n",
|
||||||
|
"register_model_step = PythonScriptStep(script_name='register_model.py',\n",
|
||||||
|
" name=\"register_model_step01\",\n",
|
||||||
|
" inputs=[saved_model],\n",
|
||||||
|
" compute_target=cpu_cluster,\n",
|
||||||
|
" arguments=[\"--saved-model\", saved_model],\n",
|
||||||
|
" allow_reuse=True,\n",
|
||||||
|
" runconfig=rcfg)\n",
|
||||||
|
"\n",
|
||||||
|
"register_model_step.run_after(hd_step)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -411,7 +482,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"pipeline = Pipeline(workspace=ws, steps=[hd_step])\n",
|
"pipeline = Pipeline(workspace=ws, steps=[hd_step, register_model_step])\n",
|
||||||
"pipeline_run = exp.submit(pipeline)"
|
"pipeline_run = exp.submit(pipeline)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -486,58 +557,7 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"## Find and register best model\n",
|
"For model deployment, please refer to [Training, hyperparameter tune, and deploy with TensorFlow](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/ml-frameworks/tensorflow/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb)."
|
||||||
"When all the jobs finish, we can find out the one that has the highest accuracy."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"hd_step_run = HyperDriveStepRun(step_run=pipeline_run.find_step_run(hd_step_name)[0])\n",
|
|
||||||
"best_run = hd_step_run.get_best_run_by_primary_metric()\n",
|
|
||||||
"best_run"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"Now let's list the model files uploaded during the run."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(best_run.get_file_names())"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"We can then register the folder (and all files in it) as a model named `tf-dnn-mnist` under the workspace for deployment."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"model = best_run.register_model(model_name='tf-dnn-mnist', model_path='outputs/model')"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"For model deployment, please refer to [Training, hyperparameter tune, and deploy with TensorFlow](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/ml-frameworks/tensorflow/deployment/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb)."
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
@@ -577,7 +597,7 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.6.7"
|
"version": "3.6.9"
|
||||||
},
|
},
|
||||||
"order_index": 8,
|
"order_index": 8,
|
||||||
"star_tag": [
|
"star_tag": [
|
||||||
|
|||||||
@@ -6,3 +6,4 @@ dependencies:
|
|||||||
- matplotlib
|
- matplotlib
|
||||||
- numpy
|
- numpy
|
||||||
- pandas_ml
|
- pandas_ml
|
||||||
|
- azureml-dataset-runtime[pandas,fuse]
|
||||||
|
|||||||
@@ -87,7 +87,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"## Create an Azure ML experiment\n",
|
"## Create an Azure ML experiment\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Let's create an experiment named \"automl-classification\" and a folder to hold the training scripts. The script runs will be recorded under the experiment in Azure."
|
"Let's create an experiment named \"showcasing-datapath\" and a folder to hold the training scripts. The script runs will be recorded under the experiment in Azure."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -479,7 +479,7 @@
|
|||||||
"metadata": {
|
"metadata": {
|
||||||
"authors": [
|
"authors": [
|
||||||
{
|
{
|
||||||
"name": "sanpil"
|
"name": "shbijlan"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"category": "tutorial",
|
"category": "tutorial",
|
||||||
|
|||||||
@@ -0,0 +1,274 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Copyright (c) Microsoft Corporation. All rights reserved. \n",
|
||||||
|
"Licensed under the MIT License."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Azure Machine Learning Pipeline with KustoStep\n",
|
||||||
|
"To use Kusto as a compute target from [Azure Machine Learning Pipeline](https://aka.ms/pl-concept), a KustoStep is used. A KustoStep enables the functionality of running Kusto queries on a target Kusto cluster in Azure ML Pipelines. Each KustoStep can target one Kusto cluster and perform multiple queries on them. This notebook demonstrates the use of KustoStep in Azure Machine Learning (AML) Pipeline.\n",
|
||||||
|
"\n",
|
||||||
|
"## Before you begin:\n",
|
||||||
|
"\n",
|
||||||
|
"1. **Have an Azure Machine Learning workspace**: You will need details of this workspace later on to define KustoStep.\n",
|
||||||
|
"2. **Have a Service Principal**: You will need a service principal and use its credentials to access your cluster. See [this](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal) for more information.\n",
|
||||||
|
"3. **Have a Blob storage**: You will need a Azure Blob storage for uploading the output of your Kusto query."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Azure Machine Learning and Pipeline SDK-specific imports"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import os\n",
|
||||||
|
"import azureml.core\n",
|
||||||
|
"from azureml.core.runconfig import JarLibrary\n",
|
||||||
|
"from azureml.core.compute import ComputeTarget, KustoCompute\n",
|
||||||
|
"from azureml.exceptions import ComputeTargetException\n",
|
||||||
|
"from azureml.core import Workspace, Experiment\n",
|
||||||
|
"from azureml.pipeline.core import Pipeline, PipelineData\n",
|
||||||
|
"from azureml.pipeline.steps import KustoStep\n",
|
||||||
|
"from azureml.core.datastore import Datastore\n",
|
||||||
|
"from azureml.data.data_reference import DataReference\n",
|
||||||
|
"\n",
|
||||||
|
"# Check core SDK version number\n",
|
||||||
|
"print(\"SDK version:\", azureml.core.VERSION)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Initialize Workspace\n",
|
||||||
|
"\n",
|
||||||
|
"Initialize a workspace object from persisted configuration. If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure you go through the [configuration Notebook](https://aka.ms/pl-config) first if you haven't."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"ws = Workspace.from_config()\n",
|
||||||
|
"print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\\n')"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Attach Kusto compute target\n",
|
||||||
|
"Next, you need to create a Kusto compute target and give it a name. You will use this name to refer to your Kusto compute target inside Azure Machine Learning. Your workspace will be associated to this Kusto compute target. You will also need to provide some credentials that will be used to enable access to your target Kusto cluster and database.\n",
|
||||||
|
"\n",
|
||||||
|
"- **Resource Group** - The resource group name of your Azure Machine Learning workspace\n",
|
||||||
|
"- **Workspace Name** - The workspace name of your Azure Machine Learning workspace\n",
|
||||||
|
"- **Resource ID** - The resource ID of your Kusto cluster\n",
|
||||||
|
"- **Tenant ID** - The tenant ID associated to your Kusto cluster\n",
|
||||||
|
"- **Application ID** - The Application ID associated to your Kusto cluster\n",
|
||||||
|
"- **Application Key** - The Application key associated to your Kusto cluster\n",
|
||||||
|
"- **Kusto Connection String** - The connection string of your Kusto cluster\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"tags": [
|
||||||
|
"sample-databrickscompute-attach"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"compute_name = \"<compute_name>\" # Name to associate with new compute in workspace\n",
|
||||||
|
"\n",
|
||||||
|
"# Account details associated to the target Kusto cluster\n",
|
||||||
|
"resource_id = \"<resource_id>\" # Resource ID of the Kusto cluster\n",
|
||||||
|
"kusto_connection_string = \"<kusto_connection_string>\" # Connection string of the Kusto cluster\n",
|
||||||
|
"application_id = \"<application_id>\" # Application ID associated to the Kusto cluster\n",
|
||||||
|
"application_key = \"<application_key>\" # Application Key associated to the Kusto cluster\n",
|
||||||
|
"tenant_id = \"<tenant_id>\" # Tenant ID associated to the Kusto cluster\n",
|
||||||
|
"\n",
|
||||||
|
"try:\n",
|
||||||
|
" kusto_compute = KustoCompute(workspace=ws, name=compute_name)\n",
|
||||||
|
" print('Compute target {} already exists'.format(compute_name))\n",
|
||||||
|
"except ComputeTargetException:\n",
|
||||||
|
" print('Compute not found, will use provided parameters to attach new one')\n",
|
||||||
|
" config = KustoCompute.attach_configuration(resource_group=ws.resource_group, workspace_name=ws.name, \n",
|
||||||
|
" resource_id=resource_id, tenant_id=tenant_id, \n",
|
||||||
|
" kusto_connection_string=kusto_connection_string, \n",
|
||||||
|
" application_id=application_id, application_key=application_key)\n",
|
||||||
|
" kusto_compute=ComputeTarget.attach(ws, compute_name, config)\n",
|
||||||
|
" kusto_compute.wait_for_completion(True)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Setup output\n",
|
||||||
|
"To use Kusto as a compute target for Azure Machine Learning Pipeline, a KustoStep is used. Currently KustoStep only supports uploading results to Azure Blob store. Let's define an output datastore via PipelineData to be used in KustoStep."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.pipeline.core import PipelineParameter\n",
|
||||||
|
"\n",
|
||||||
|
"# Use the default blob storage\n",
|
||||||
|
"def_blob_store = Datastore.get(ws, \"workspaceblobstore\")\n",
|
||||||
|
"print('Datastore {} will be used'.format(def_blob_store.name))\n",
|
||||||
|
"\n",
|
||||||
|
"step_1_output = PipelineData(\"output\", datastore=def_blob_store)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Add a KustoStep to Pipeline\n",
|
||||||
|
"Adds a Kusto query as a step in a Pipeline.\n",
|
||||||
|
"- **name:** Name of the Module\n",
|
||||||
|
"- **compute_target:** Name of Kusto compute target\n",
|
||||||
|
"- **database_name:** Name of the database to perform Kusto query on\n",
|
||||||
|
"- **query_directory:** Path to folder that contains only a text file with Kusto queries (see [here](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/) for more details on Kusto queries). \n",
|
||||||
|
" - If the query is parameterized, then the text file must also include any declaration of query parameters (see [here](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/queryparametersstatement?pivots=azuredataexplorer) for more details on query parameters declaration statements). \n",
|
||||||
|
" - An example of the query text file could just contain the query \"StormEvents | count | as HowManyRecords;\", where StormEvents is the table name. \n",
|
||||||
|
" - Note. the text file should just contain the declarations and queries without quotation marks around them.\n",
|
||||||
|
"- **outputs:** Output binding to an Azure Blob Store.\n",
|
||||||
|
"- **parameter_dict (optional):** Dictionary that contains the values of parameters declared in the query text file in the **query_directory** mentioned above.\n",
|
||||||
|
" - Dictionary key is the parameter name, and dictionary value is the parameter value.\n",
|
||||||
|
" - For example, parameter_dict = {\"paramName1\": \"paramValue1\", \"paramName2\": \"paramValue2\"}\n",
|
||||||
|
"- **allow_reuse (optional):** Whether the step should reuse previous results when run with the same settings/inputs (default to False)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"database_name = \"<database_name>\" # Name of the database to perform Kusto queries on\n",
|
||||||
|
"query_directory = \"<query_directory>\" # Path to folder that contains a text file with Kusto queries\n",
|
||||||
|
"\n",
|
||||||
|
"kustoStep = KustoStep(\n",
|
||||||
|
" name='KustoNotebook',\n",
|
||||||
|
" compute_target=compute_name,\n",
|
||||||
|
" database_name=database_name,\n",
|
||||||
|
" query_directory=query_directory,\n",
|
||||||
|
" output=step_1_output,\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Build and submit the Experiment"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"steps = [kustoStep]\n",
|
||||||
|
"pipeline = Pipeline(workspace=ws, steps=steps)\n",
|
||||||
|
"pipeline_run = Experiment(ws, 'Notebook_demo').submit(pipeline)\n",
|
||||||
|
"pipeline_run.wait_for_completion()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# View Run Details"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.widgets import RunDetails\n",
|
||||||
|
"RunDetails(pipeline_run).show()"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"authors": [
|
||||||
|
{
|
||||||
|
"name": "t-kachia"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"category": "tutorial",
|
||||||
|
"compute": [
|
||||||
|
"Kusto"
|
||||||
|
],
|
||||||
|
"datasets": [
|
||||||
|
"Custom"
|
||||||
|
],
|
||||||
|
"deployment": [
|
||||||
|
"None"
|
||||||
|
],
|
||||||
|
"exclude_from_index": false,
|
||||||
|
"framework": [
|
||||||
|
"Azure ML, Kusto"
|
||||||
|
],
|
||||||
|
"friendly_name": "How to use KustoStep with AML Pipelines",
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3.6",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python36"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.7.6"
|
||||||
|
},
|
||||||
|
"order_index": 5,
|
||||||
|
"star_tag": [
|
||||||
|
"featured"
|
||||||
|
],
|
||||||
|
"tags": [
|
||||||
|
"None"
|
||||||
|
],
|
||||||
|
"task": "Demonstrates the use of KustoStep"
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 2
|
||||||
|
}
|
||||||
@@ -477,7 +477,7 @@
|
|||||||
"metadata": {
|
"metadata": {
|
||||||
"authors": [
|
"authors": [
|
||||||
{
|
{
|
||||||
"name": "sanpil"
|
"name": "anshirga"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"category": "tutorial",
|
"category": "tutorial",
|
||||||
|
|||||||
@@ -0,0 +1,21 @@
|
|||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import azureml.core
|
||||||
|
from azureml.core import Workspace, Experiment, Model
|
||||||
|
from azureml.core import Run
|
||||||
|
from azureml.train.hyperdrive import HyperDriveRun
|
||||||
|
from shutil import copy2
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument('--saved-model', type=str, dest='saved_model', help='path to saved model file')
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
model_output_dir = './model/'
|
||||||
|
|
||||||
|
os.makedirs(model_output_dir, exist_ok=True)
|
||||||
|
copy2(args.saved_model, model_output_dir)
|
||||||
|
|
||||||
|
ws = Run.get_context().experiment.workspace
|
||||||
|
|
||||||
|
model = Model.register(workspace=ws, model_name='tf-dnn-mnist', model_path=model_output_dir)
|
||||||
@@ -4,34 +4,103 @@
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
import argparse
|
import argparse
|
||||||
import os
|
import os
|
||||||
|
import re
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
import time
|
||||||
|
import glob
|
||||||
|
|
||||||
from azureml.core import Run
|
from azureml.core import Run
|
||||||
from utils import load_data
|
from utils import load_data
|
||||||
|
from tensorflow.keras import Model, layers
|
||||||
|
|
||||||
print("TensorFlow version:", tf.VERSION)
|
|
||||||
|
# Create TF Model.
|
||||||
|
class NeuralNet(Model):
|
||||||
|
# Set layers.
|
||||||
|
def __init__(self):
|
||||||
|
super(NeuralNet, self).__init__()
|
||||||
|
# First hidden layer.
|
||||||
|
self.h1 = layers.Dense(n_h1, activation=tf.nn.relu)
|
||||||
|
# Second hidden layer.
|
||||||
|
self.h2 = layers.Dense(n_h2, activation=tf.nn.relu)
|
||||||
|
self.out = layers.Dense(n_outputs)
|
||||||
|
|
||||||
|
# Set forward pass.
|
||||||
|
def call(self, x, is_training=False):
|
||||||
|
x = self.h1(x)
|
||||||
|
x = self.h2(x)
|
||||||
|
x = self.out(x)
|
||||||
|
if not is_training:
|
||||||
|
# Apply softmax when not training.
|
||||||
|
x = tf.nn.softmax(x)
|
||||||
|
return x
|
||||||
|
|
||||||
|
|
||||||
|
def cross_entropy_loss(y, logits):
|
||||||
|
# Convert labels to int 64 for tf cross-entropy function.
|
||||||
|
y = tf.cast(y, tf.int64)
|
||||||
|
# Apply softmax to logits and compute cross-entropy.
|
||||||
|
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
|
||||||
|
# Average loss across the batch.
|
||||||
|
return tf.reduce_mean(loss)
|
||||||
|
|
||||||
|
|
||||||
|
# Accuracy metric.
|
||||||
|
def accuracy(y_pred, y_true):
|
||||||
|
# Predicted class is the index of highest score in prediction vector (i.e. argmax).
|
||||||
|
correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64))
|
||||||
|
return tf.reduce_mean(tf.cast(correct_prediction, tf.float32), axis=-1)
|
||||||
|
|
||||||
|
|
||||||
|
# Optimization process.
|
||||||
|
def run_optimization(x, y):
|
||||||
|
# Wrap computation inside a GradientTape for automatic differentiation.
|
||||||
|
with tf.GradientTape() as g:
|
||||||
|
# Forward pass.
|
||||||
|
logits = neural_net(x, is_training=True)
|
||||||
|
# Compute loss.
|
||||||
|
loss = cross_entropy_loss(y, logits)
|
||||||
|
|
||||||
|
# Variables to update, i.e. trainable variables.
|
||||||
|
trainable_variables = neural_net.trainable_variables
|
||||||
|
|
||||||
|
# Compute gradients.
|
||||||
|
gradients = g.gradient(loss, trainable_variables)
|
||||||
|
|
||||||
|
# Update W and b following gradients.
|
||||||
|
optimizer.apply_gradients(zip(gradients, trainable_variables))
|
||||||
|
|
||||||
|
|
||||||
|
print("TensorFlow version:", tf.__version__)
|
||||||
|
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
parser.add_argument('--data-folder', type=str, dest='data_folder', help='data folder mounting point')
|
parser.add_argument('--data-folder', type=str, dest='data_folder', default='data', help='data folder mounting point')
|
||||||
parser.add_argument('--batch-size', type=int, dest='batch_size', default=50, help='mini batch size for training')
|
parser.add_argument('--batch-size', type=int, dest='batch_size', default=128, help='mini batch size for training')
|
||||||
parser.add_argument('--first-layer-neurons', type=int, dest='n_hidden_1', default=100,
|
parser.add_argument('--first-layer-neurons', type=int, dest='n_hidden_1', default=128,
|
||||||
help='# of neurons in the first layer')
|
help='# of neurons in the first layer')
|
||||||
parser.add_argument('--second-layer-neurons', type=int, dest='n_hidden_2', default=100,
|
parser.add_argument('--second-layer-neurons', type=int, dest='n_hidden_2', default=128,
|
||||||
help='# of neurons in the second layer')
|
help='# of neurons in the second layer')
|
||||||
parser.add_argument('--learning-rate', type=float, dest='learning_rate', default=0.01, help='learning rate')
|
parser.add_argument('--learning-rate', type=float, dest='learning_rate', default=0.01, help='learning rate')
|
||||||
|
parser.add_argument('--resume-from', type=str, default=None,
|
||||||
|
help='location of the model or checkpoint files from where to resume the training')
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
data_folder = os.path.join(args.data_folder, 'mnist')
|
previous_model_location = args.resume_from
|
||||||
|
# You can also use environment variable to get the model/checkpoint files location
|
||||||
|
# previous_model_location = os.path.expandvars(os.getenv("AZUREML_DATAREFERENCE_MODEL_LOCATION", None))
|
||||||
|
|
||||||
print('training dataset is stored here:', data_folder)
|
data_folder = args.data_folder
|
||||||
|
print('Data folder:', data_folder)
|
||||||
|
|
||||||
|
# load train and test set into numpy arrays
|
||||||
|
# note we scale the pixel intensity values to 0-1 (by dividing it with 255.0) so the model can converge faster.
|
||||||
X_train = load_data(os.path.join(data_folder, 'train-images.gz'), False) / 255.0
|
X_train = load_data(os.path.join(data_folder, 'train-images.gz'), False) / 255.0
|
||||||
X_test = load_data(os.path.join(data_folder, 'test-images.gz'), False) / 255.0
|
X_test = load_data(os.path.join(data_folder, 'test-images.gz'), False) / 255.0
|
||||||
|
|
||||||
y_train = load_data(os.path.join(data_folder, 'train-labels.gz'), True).reshape(-1)
|
y_train = load_data(os.path.join(data_folder, 'train-labels.gz'), True).reshape(-1)
|
||||||
y_test = load_data(os.path.join(data_folder, 'test-labels.gz'), True).reshape(-1)
|
y_test = load_data(os.path.join(data_folder, 'test-labels.gz'), True).reshape(-1)
|
||||||
|
|
||||||
print(X_train.shape, y_train.shape, X_test.shape, y_test.shape, sep='\n')
|
print(X_train.shape, y_train.shape, X_test.shape, y_test.shape, sep='\n')
|
||||||
|
|
||||||
training_set_size = X_train.shape[0]
|
training_set_size = X_train.shape[0]
|
||||||
|
|
||||||
n_inputs = 28 * 28
|
n_inputs = 28 * 28
|
||||||
@@ -39,68 +108,79 @@ n_h1 = args.n_hidden_1
|
|||||||
n_h2 = args.n_hidden_2
|
n_h2 = args.n_hidden_2
|
||||||
n_outputs = 10
|
n_outputs = 10
|
||||||
learning_rate = args.learning_rate
|
learning_rate = args.learning_rate
|
||||||
n_epochs = 50
|
n_epochs = 20
|
||||||
batch_size = args.batch_size
|
batch_size = args.batch_size
|
||||||
|
|
||||||
with tf.name_scope('network'):
|
# Build neural network model.
|
||||||
# construct the DNN
|
neural_net = NeuralNet()
|
||||||
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name='X')
|
|
||||||
y = tf.placeholder(tf.int64, shape=(None), name='y')
|
|
||||||
h1 = tf.layers.dense(X, n_h1, activation=tf.nn.relu, name='h1')
|
|
||||||
h2 = tf.layers.dense(h1, n_h2, activation=tf.nn.relu, name='h2')
|
|
||||||
output = tf.layers.dense(h2, n_outputs, name='output')
|
|
||||||
|
|
||||||
with tf.name_scope('train'):
|
# Stochastic gradient descent optimizer.
|
||||||
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=output)
|
optimizer = tf.optimizers.SGD(learning_rate)
|
||||||
loss = tf.reduce_mean(cross_entropy, name='loss')
|
|
||||||
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
|
|
||||||
train_op = optimizer.minimize(loss)
|
|
||||||
|
|
||||||
with tf.name_scope('eval'):
|
|
||||||
correct = tf.nn.in_top_k(output, y, 1)
|
|
||||||
acc_op = tf.reduce_mean(tf.cast(correct, tf.float32))
|
|
||||||
|
|
||||||
init = tf.global_variables_initializer()
|
|
||||||
saver = tf.train.Saver()
|
|
||||||
|
|
||||||
# start an Azure ML run
|
# start an Azure ML run
|
||||||
run = Run.get_context()
|
run = Run.get_context()
|
||||||
|
|
||||||
with tf.Session() as sess:
|
if previous_model_location:
|
||||||
init.run()
|
# Restore variables from latest checkpoint.
|
||||||
for epoch in range(n_epochs):
|
checkpoint = tf.train.Checkpoint(model=neural_net, optimizer=optimizer)
|
||||||
|
checkpoint_file_path = tf.train.latest_checkpoint(previous_model_location)
|
||||||
|
checkpoint.restore(checkpoint_file_path)
|
||||||
|
checkpoint_filename = os.path.basename(checkpoint_file_path)
|
||||||
|
num_found = re.search(r'\d+', checkpoint_filename)
|
||||||
|
if num_found:
|
||||||
|
start_epoch = int(num_found.group(0))
|
||||||
|
print("Resuming from epoch {}".format(str(start_epoch)))
|
||||||
|
|
||||||
# randomly shuffle training set
|
start_time = time.perf_counter()
|
||||||
indices = np.random.permutation(training_set_size)
|
for epoch in range(0, n_epochs):
|
||||||
X_train = X_train[indices]
|
|
||||||
y_train = y_train[indices]
|
|
||||||
|
|
||||||
# batch index
|
# randomly shuffle training set
|
||||||
b_start = 0
|
indices = np.random.permutation(training_set_size)
|
||||||
b_end = b_start + batch_size
|
X_train = X_train[indices]
|
||||||
for _ in range(training_set_size // batch_size):
|
y_train = y_train[indices]
|
||||||
# get a batch
|
|
||||||
X_batch, y_batch = X_train[b_start: b_end], y_train[b_start: b_end]
|
|
||||||
|
|
||||||
# update batch index for the next batch
|
# batch index
|
||||||
b_start = b_start + batch_size
|
b_start = 0
|
||||||
b_end = min(b_start + batch_size, training_set_size)
|
b_end = b_start + batch_size
|
||||||
|
for _ in range(training_set_size // batch_size):
|
||||||
|
# get a batch
|
||||||
|
X_batch, y_batch = X_train[b_start: b_end], y_train[b_start: b_end]
|
||||||
|
|
||||||
# train
|
# update batch index for the next batch
|
||||||
sess.run(train_op, feed_dict={X: X_batch, y: y_batch})
|
b_start = b_start + batch_size
|
||||||
# evaluate training set
|
b_end = min(b_start + batch_size, training_set_size)
|
||||||
acc_train = acc_op.eval(feed_dict={X: X_batch, y: y_batch})
|
|
||||||
# evaluate validation set
|
|
||||||
acc_val = acc_op.eval(feed_dict={X: X_test, y: y_test})
|
|
||||||
|
|
||||||
# log accuracies
|
# train
|
||||||
run.log('training_acc', np.float(acc_train))
|
run_optimization(X_batch, y_batch)
|
||||||
run.log('validation_acc', np.float(acc_val))
|
|
||||||
print(epoch, '-- Training accuracy:', acc_train, '\b Validation accuracy:', acc_val)
|
|
||||||
y_hat = np.argmax(output.eval(feed_dict={X: X_test}), axis=1)
|
|
||||||
|
|
||||||
run.log('final_acc', np.float(acc_val))
|
# evaluate training set
|
||||||
|
pred = neural_net(X_batch, is_training=False)
|
||||||
|
acc_train = accuracy(pred, y_batch)
|
||||||
|
|
||||||
os.makedirs('./outputs/model', exist_ok=True)
|
# evaluate validation set
|
||||||
# files saved in the "./outputs" folder are automatically uploaded into run history
|
pred = neural_net(X_test, is_training=False)
|
||||||
saver.save(sess, './outputs/model/mnist-tf.model')
|
acc_val = accuracy(pred, y_test)
|
||||||
|
|
||||||
|
# log accuracies
|
||||||
|
run.log('training_acc', np.float(acc_train))
|
||||||
|
run.log('validation_acc', np.float(acc_val))
|
||||||
|
print(epoch, '-- Training accuracy:', acc_train, '\b Validation accuracy:', acc_val)
|
||||||
|
|
||||||
|
# Save checkpoints in the "./outputs" folder so that they are automatically uploaded into run history.
|
||||||
|
checkpoint_dir = './outputs/'
|
||||||
|
checkpoint = tf.train.Checkpoint(model=neural_net, optimizer=optimizer)
|
||||||
|
|
||||||
|
if epoch % 2 == 0:
|
||||||
|
checkpoint.save(checkpoint_dir)
|
||||||
|
|
||||||
|
run.log('final_acc', np.float(acc_val))
|
||||||
|
os.makedirs('./outputs/model', exist_ok=True)
|
||||||
|
|
||||||
|
# files saved in the "./outputs" folder are automatically uploaded into run history
|
||||||
|
# this is workaround for https://github.com/tensorflow/tensorflow/issues/33913 and will be fixed once we move to >tf2.1
|
||||||
|
neural_net._set_inputs(X_train)
|
||||||
|
tf.saved_model.save(neural_net, './outputs/model/')
|
||||||
|
|
||||||
|
stop_time = time.perf_counter()
|
||||||
|
training_time = (stop_time - start_time) * 1000
|
||||||
|
print("Total time in milliseconds for training: {}".format(str(training_time)))
|
||||||
|
|||||||
@@ -278,9 +278,6 @@
|
|||||||
"# Enable Docker\n",
|
"# Enable Docker\n",
|
||||||
"aml_run_config.environment.docker.enabled = True\n",
|
"aml_run_config.environment.docker.enabled = True\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Set Docker base image to the default CPU-based image\n",
|
|
||||||
"aml_run_config.environment.docker.base_image = \"mcr.microsoft.com/azureml/base:0.2.1\"\n",
|
|
||||||
"\n",
|
|
||||||
"# Use conda_dependencies.yml to create a conda environment in the Docker image for execution\n",
|
"# Use conda_dependencies.yml to create a conda environment in the Docker image for execution\n",
|
||||||
"aml_run_config.environment.python.user_managed_dependencies = False\n",
|
"aml_run_config.environment.python.user_managed_dependencies = False\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -463,8 +460,8 @@
|
|||||||
" name=\"Merge Taxi Data\",\n",
|
" name=\"Merge Taxi Data\",\n",
|
||||||
" script_name=\"merge.py\", \n",
|
" script_name=\"merge.py\", \n",
|
||||||
" arguments=[\"--output_merge\", merged_data],\n",
|
" arguments=[\"--output_merge\", merged_data],\n",
|
||||||
" inputs=[cleansed_green_data.parse_parquet_files(file_extension=None),\n",
|
" inputs=[cleansed_green_data.parse_parquet_files(),\n",
|
||||||
" cleansed_yellow_data.parse_parquet_files(file_extension=None)],\n",
|
" cleansed_yellow_data.parse_parquet_files()],\n",
|
||||||
" outputs=[merged_data],\n",
|
" outputs=[merged_data],\n",
|
||||||
" compute_target=aml_compute,\n",
|
" compute_target=aml_compute,\n",
|
||||||
" runconfig=aml_run_config,\n",
|
" runconfig=aml_run_config,\n",
|
||||||
@@ -500,7 +497,7 @@
|
|||||||
" name=\"Filter Taxi Data\",\n",
|
" name=\"Filter Taxi Data\",\n",
|
||||||
" script_name=\"filter.py\", \n",
|
" script_name=\"filter.py\", \n",
|
||||||
" arguments=[\"--output_filter\", filtered_data],\n",
|
" arguments=[\"--output_filter\", filtered_data],\n",
|
||||||
" inputs=[merged_data.parse_parquet_files(file_extension=None)],\n",
|
" inputs=[merged_data.parse_parquet_files()],\n",
|
||||||
" outputs=[filtered_data],\n",
|
" outputs=[filtered_data],\n",
|
||||||
" compute_target=aml_compute,\n",
|
" compute_target=aml_compute,\n",
|
||||||
" runconfig = aml_run_config,\n",
|
" runconfig = aml_run_config,\n",
|
||||||
@@ -536,7 +533,7 @@
|
|||||||
" name=\"Normalize Taxi Data\",\n",
|
" name=\"Normalize Taxi Data\",\n",
|
||||||
" script_name=\"normalize.py\", \n",
|
" script_name=\"normalize.py\", \n",
|
||||||
" arguments=[\"--output_normalize\", normalized_data],\n",
|
" arguments=[\"--output_normalize\", normalized_data],\n",
|
||||||
" inputs=[filtered_data.parse_parquet_files(file_extension=None)],\n",
|
" inputs=[filtered_data.parse_parquet_files()],\n",
|
||||||
" outputs=[normalized_data],\n",
|
" outputs=[normalized_data],\n",
|
||||||
" compute_target=aml_compute,\n",
|
" compute_target=aml_compute,\n",
|
||||||
" runconfig = aml_run_config,\n",
|
" runconfig = aml_run_config,\n",
|
||||||
@@ -577,7 +574,7 @@
|
|||||||
" name=\"Transform Taxi Data\",\n",
|
" name=\"Transform Taxi Data\",\n",
|
||||||
" script_name=\"transform.py\", \n",
|
" script_name=\"transform.py\", \n",
|
||||||
" arguments=[\"--output_transform\", transformed_data],\n",
|
" arguments=[\"--output_transform\", transformed_data],\n",
|
||||||
" inputs=[normalized_data.parse_parquet_files(file_extension=None)],\n",
|
" inputs=[normalized_data.parse_parquet_files()],\n",
|
||||||
" outputs=[transformed_data],\n",
|
" outputs=[transformed_data],\n",
|
||||||
" compute_target=aml_compute,\n",
|
" compute_target=aml_compute,\n",
|
||||||
" runconfig = aml_run_config,\n",
|
" runconfig = aml_run_config,\n",
|
||||||
@@ -617,7 +614,7 @@
|
|||||||
" script_name=\"train_test_split.py\", \n",
|
" script_name=\"train_test_split.py\", \n",
|
||||||
" arguments=[\"--output_split_train\", output_split_train,\n",
|
" arguments=[\"--output_split_train\", output_split_train,\n",
|
||||||
" \"--output_split_test\", output_split_test],\n",
|
" \"--output_split_test\", output_split_test],\n",
|
||||||
" inputs=[transformed_data.parse_parquet_files(file_extension=None)],\n",
|
" inputs=[transformed_data.parse_parquet_files()],\n",
|
||||||
" outputs=[output_split_train, output_split_test],\n",
|
" outputs=[output_split_train, output_split_test],\n",
|
||||||
" compute_target=aml_compute,\n",
|
" compute_target=aml_compute,\n",
|
||||||
" runconfig = aml_run_config,\n",
|
" runconfig = aml_run_config,\n",
|
||||||
@@ -693,7 +690,7 @@
|
|||||||
" \"n_cross_validations\": 5\n",
|
" \"n_cross_validations\": 5\n",
|
||||||
"}\n",
|
"}\n",
|
||||||
"\n",
|
"\n",
|
||||||
"training_dataset = output_split_train.parse_parquet_files(file_extension=None).keep_columns(['pickup_weekday','pickup_hour', 'distance','passengers', 'vendor', 'cost'])\n",
|
"training_dataset = output_split_train.parse_parquet_files().keep_columns(['pickup_weekday','pickup_hour', 'distance','passengers', 'vendor', 'cost'])\n",
|
||||||
"\n",
|
"\n",
|
||||||
"automl_config = AutoMLConfig(task = 'regression',\n",
|
"automl_config = AutoMLConfig(task = 'regression',\n",
|
||||||
" debug_log = 'automated_ml_errors.log',\n",
|
" debug_log = 'automated_ml_errors.log',\n",
|
||||||
@@ -777,7 +774,7 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Before we proceed we need to wait for the run to complete.\n",
|
"# Before we proceed we need to wait for the run to complete.\n",
|
||||||
"pipeline_run.wait_for_completion()\n",
|
"pipeline_run.wait_for_completion(show_output=False)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# functions to download output to local and fetch as dataframe\n",
|
"# functions to download output to local and fetch as dataframe\n",
|
||||||
"def get_download_path(download_path, output_name):\n",
|
"def get_download_path(download_path, output_name):\n",
|
||||||
|
|||||||
@@ -29,8 +29,8 @@ print("Argument 2(output final transformed taxi data): %s" % args.output_transfo
|
|||||||
# use the drop_columns() function to delete the original fields as the newly generated features are preferred.
|
# use the drop_columns() function to delete the original fields as the newly generated features are preferred.
|
||||||
# Rename the rest of the fields to use meaningful descriptions.
|
# Rename the rest of the fields to use meaningful descriptions.
|
||||||
|
|
||||||
normalized_df = normalized_df.astype({"pickup_date": 'datetime64', "dropoff_date": 'datetime64',
|
normalized_df = normalized_df.astype({"pickup_date": 'datetime64[ns]', "dropoff_date": 'datetime64[ns]',
|
||||||
"pickup_time": 'datetime64', "dropoff_time": 'datetime64',
|
"pickup_time": 'datetime64[us]', "dropoff_time": 'datetime64[us]',
|
||||||
"distance": 'float64', "cost": 'float64'})
|
"distance": 'float64', "cost": 'float64'})
|
||||||
|
|
||||||
normalized_df["pickup_weekday"] = normalized_df["pickup_date"].dt.dayofweek
|
normalized_df["pickup_weekday"] = normalized_df["pickup_date"].dt.dayofweek
|
||||||
|
|||||||
11
how-to-use-azureml/ml-frameworks/README.md
Normal file
11
how-to-use-azureml/ml-frameworks/README.md
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
## Training and deployment examples with ML frameworks
|
||||||
|
These sample notebooks show you how to train and deploy models with popular machine learning frameworks using Azure Machine Learning.
|
||||||
|
|
||||||
|
1. [Scikit-learn](scikit-learn): Train, hyperparameter tune and deploy scikit-learn models.
|
||||||
|
2. [PyTorch](pytorch): Train, hyperparameter tune and deploy PyTorch models. Distributed training with PyTorch.
|
||||||
|
3. [TensorFlow](tensorflow): Train, hyperparameter tune and deploy TensorFlow models. Distributed training with TensorFlow.
|
||||||
|
4. [Keras](keras): Train, hyperparameter tune and deploy Keras models.
|
||||||
|
5. [Chainer](chainer): Train, hyperparameter tune and deploy Chainer models. Distributed training with Chainer.
|
||||||
|
6. [Fastai](fastai): Train, hyperparameter tune and deploy Fastai models.
|
||||||
|
|
||||||
|

|
||||||
@@ -13,7 +13,7 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
""
|
""
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -29,7 +29,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"## Prerequisites\n",
|
"## Prerequisites\n",
|
||||||
"* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [Configuration](../../../configuration.ipynb) notebook to install the Azure Machine Learning Python SDK and create an Azure ML `Workspace`"
|
"* If you are using an Azure Machine Learning compute instance, you are all set. Otherwise, go through the [Configuration](../../../../configuration.ipynb) notebook to install the Azure Machine Learning Python SDK and create an Azure ML `Workspace`"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -217,8 +217,9 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"### Create a Chainer estimator\n",
|
"### Create an environment\n",
|
||||||
"The Azure ML SDK's Chainer estimator enables you to easily submit Chainer training jobs for both single-node and distributed runs."
|
"\n",
|
||||||
|
"In this tutorial, we will use one of the Azure ML Chainer curated environments for training."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -227,21 +228,36 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from azureml.train.dnn import Chainer, Mpi\n",
|
"from azureml.core import Environment\n",
|
||||||
"\n",
|
"\n",
|
||||||
"estimator = Chainer(source_directory=project_folder,\n",
|
"chainer_env = Environment.get(ws, name='AzureML-Chainer-5.1.0-GPU')"
|
||||||
" compute_target=compute_target,\n",
|
|
||||||
" entry_script='train_mnist.py',\n",
|
|
||||||
" node_count=2,\n",
|
|
||||||
" distributed_training=Mpi(),\n",
|
|
||||||
" use_gpu=True)"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"The above code specifies that we will run our training script on `2` nodes, with one worker per node. In order to execute a distributed run using MPI, you must provide the argument `distributed_backend=Mpi()`. To specify `i` workers per node, you must provide the argument `distributed_backend=Mpi(process_count_per_node=i)`.Using this estimator with these settings, Chainer and its dependencies will be installed for you. However, if your script also uses other packages, make sure to install them via the `Chainer` constructor's `pip_packages` or `conda_packages` parameters."
|
"### Configure your training job\n",
|
||||||
|
"\n",
|
||||||
|
"Create a ScriptRunConfig object to specify the configuration details of your training job, including your training script, environment to use, and the compute target to run on.\n",
|
||||||
|
"\n",
|
||||||
|
"In order to execute a distributed run using MPI, you must create an `MpiConfiguration` object and specify it to the `distributed_job_config` parameter. The below code will configure a 2-node distributed job. If you would also like to run multiple processes per node (i.e. if your cluster SKU has multiple GPUs), additionally specify the `process_count_per_node` parameter in MpiConfiguration."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.core import ScriptRunConfig\n",
|
||||||
|
"from azureml.core.runconfig import MpiConfiguration\n",
|
||||||
|
"\n",
|
||||||
|
"src = ScriptRunConfig(source_directory=project_folder,\n",
|
||||||
|
" script='train_mnist.py',\n",
|
||||||
|
" compute_target=compute_target,\n",
|
||||||
|
" environment=chainer_env,\n",
|
||||||
|
" distributed_job_config=MpiConfiguration(node_count=2))"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -249,7 +265,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"### Submit job\n",
|
"### Submit job\n",
|
||||||
"Run your experiment by submitting your estimator object. Note that this call is asynchronous."
|
"Run your experiment by submitting your ScriptRunConfig object. Note that this call is asynchronous."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -258,7 +274,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"run = experiment.submit(estimator)\n",
|
"run = experiment.submit(src)\n",
|
||||||
"print(run)"
|
"print(run)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -297,6 +313,22 @@
|
|||||||
"name": "ninhu"
|
"name": "ninhu"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
"category": "training",
|
||||||
|
"compute": [
|
||||||
|
"AML Compute"
|
||||||
|
],
|
||||||
|
"datasets": [
|
||||||
|
"MNIST"
|
||||||
|
],
|
||||||
|
"deployment": [
|
||||||
|
"None"
|
||||||
|
],
|
||||||
|
"exclude_from_index": false,
|
||||||
|
"framework": [
|
||||||
|
"Chainer"
|
||||||
|
],
|
||||||
|
"friendly_name": "Distributed Training with Chainer",
|
||||||
|
"index_order": 1,
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3.6",
|
"display_name": "Python 3.6",
|
||||||
"language": "python",
|
"language": "python",
|
||||||
@@ -312,28 +344,12 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.6.6"
|
"version": "3.7.7"
|
||||||
},
|
},
|
||||||
"friendly_name": "Distributed Training with Chainer",
|
|
||||||
"exclude_from_index": false,
|
|
||||||
"index_order": 1,
|
|
||||||
"category": "training",
|
|
||||||
"task": "Use the Chainer estimator to perform distributed training",
|
|
||||||
"datasets": [
|
|
||||||
"MNIST"
|
|
||||||
],
|
|
||||||
"compute": [
|
|
||||||
"AML Compute"
|
|
||||||
],
|
|
||||||
"deployment": [
|
|
||||||
"None"
|
|
||||||
],
|
|
||||||
"framework": [
|
|
||||||
"Chainer"
|
|
||||||
],
|
|
||||||
"tags": [
|
"tags": [
|
||||||
"None"
|
"None"
|
||||||
]
|
],
|
||||||
|
"task": "Use the Chainer estimator to perform distributed training"
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
"nbformat_minor": 2
|
"nbformat_minor": 2
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user