mirror of
https://github.com/Azure/MachineLearningNotebooks.git
synced 2025-12-20 01:27:06 -05:00
Compare commits
35 Commits
release_up
...
lostmygith
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
12905ef254 | ||
|
|
4cf56eee91 | ||
|
|
d345ff6c37 | ||
|
|
560dcac0a0 | ||
|
|
322087a58c | ||
|
|
e255c000ab | ||
|
|
7871e37ec0 | ||
|
|
58e584e7eb | ||
|
|
1b0d75cb45 | ||
|
|
5c38272fb4 | ||
|
|
e026c56f19 | ||
|
|
4aad830f1c | ||
|
|
c1b125025a | ||
|
|
9f364f7638 | ||
|
|
4beb749a76 | ||
|
|
04fe8c4580 | ||
|
|
498018451a | ||
|
|
04305e33f0 | ||
|
|
d22e76d5e0 | ||
|
|
d71c482f75 | ||
|
|
5775f8a78f | ||
|
|
aae823ecd8 | ||
|
|
f1126e07f9 | ||
|
|
0e4b27a233 | ||
|
|
0a3d5f68a1 | ||
|
|
a6fe2affcb | ||
|
|
ce469ddf6a | ||
|
|
9fe459be79 | ||
|
|
89c35c8ed6 | ||
|
|
33168c7f5d | ||
|
|
1d0766bd46 | ||
|
|
9903e56882 | ||
|
|
a039166b90 | ||
|
|
4e4bf48013 | ||
|
|
0a2408300a |
@@ -1,5 +1,7 @@
|
||||
# Azure Machine Learning service example notebooks
|
||||
|
||||
> a community-driven repository of examples using mlflow for tracking can be found at https://github.com/Azure/azureml-examples
|
||||
|
||||
This repository contains example notebooks demonstrating the [Azure Machine Learning](https://azure.microsoft.com/en-us/services/machine-learning-service/) Python SDK which allows you to build, train, deploy and manage machine learning solutions using Azure. The AML SDK allows you the choice of using local or cloud compute resources, while managing and maintaining the complete data science workflow from the cloud.
|
||||
|
||||

|
||||
|
||||
@@ -103,7 +103,7 @@
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"\n",
|
||||
"print(\"This notebook was created using version 1.14.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.17.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -82,8 +82,7 @@
|
||||
"from sklearn import svm\n",
|
||||
"from sklearn.preprocessing import LabelEncoder, StandardScaler\n",
|
||||
"from sklearn.linear_model import LogisticRegression\n",
|
||||
"import pandas as pd\n",
|
||||
"import shap"
|
||||
"import pandas as pd"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -99,8 +98,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"X_raw, Y = shap.datasets.adult()\n",
|
||||
"X_raw[\"Race\"].value_counts().to_dict()"
|
||||
"from sklearn.datasets import fetch_openml\n",
|
||||
"data = fetch_openml(data_id=1590, as_frame=True)\n",
|
||||
"X_raw = data.data\n",
|
||||
"Y = (data.target == '>50K') * 1\n",
|
||||
"\n",
|
||||
"X_raw[\"race\"].value_counts().to_dict()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -116,9 +119,13 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"A = X_raw[['Sex','Race']]\n",
|
||||
"X = X_raw.drop(labels=['Sex', 'Race'],axis = 1)\n",
|
||||
"X = pd.get_dummies(X)\n",
|
||||
"A = X_raw[['sex','race']]\n",
|
||||
"X = X_raw.drop(labels=['sex', 'race'],axis = 1)\n",
|
||||
"X_dummies = pd.get_dummies(X)\n",
|
||||
"\n",
|
||||
"sc = StandardScaler()\n",
|
||||
"X_scaled = sc.fit_transform(X_dummies)\n",
|
||||
"X_scaled = pd.DataFrame(X_scaled, columns=X_dummies.columns)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"le = LabelEncoder()\n",
|
||||
@@ -139,7 +146,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
"X_train, X_test, Y_train, Y_test, A_train, A_test = train_test_split(X_raw, \n",
|
||||
"X_train, X_test, Y_train, Y_test, A_train, A_test = train_test_split(X_scaled, \n",
|
||||
" Y, \n",
|
||||
" A,\n",
|
||||
" test_size = 0.2,\n",
|
||||
@@ -150,18 +157,7 @@
|
||||
"X_train = X_train.reset_index(drop=True)\n",
|
||||
"A_train = A_train.reset_index(drop=True)\n",
|
||||
"X_test = X_test.reset_index(drop=True)\n",
|
||||
"A_test = A_test.reset_index(drop=True)\n",
|
||||
"\n",
|
||||
"# Improve labels\n",
|
||||
"A_test.Sex.loc[(A_test['Sex'] == 0)] = 'female'\n",
|
||||
"A_test.Sex.loc[(A_test['Sex'] == 1)] = 'male'\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"A_test.Race.loc[(A_test['Race'] == 0)] = 'Amer-Indian-Eskimo'\n",
|
||||
"A_test.Race.loc[(A_test['Race'] == 1)] = 'Asian-Pac-Islander'\n",
|
||||
"A_test.Race.loc[(A_test['Race'] == 2)] = 'Black'\n",
|
||||
"A_test.Race.loc[(A_test['Race'] == 3)] = 'Other'\n",
|
||||
"A_test.Race.loc[(A_test['Race'] == 4)] = 'White'"
|
||||
"A_test = A_test.reset_index(drop=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -251,7 +247,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"sweep.fit(X_train, Y_train,\n",
|
||||
" sensitive_features=A_train.Sex)\n",
|
||||
" sensitive_features=A_train.sex)\n",
|
||||
"\n",
|
||||
"predictors = sweep._predictors"
|
||||
]
|
||||
@@ -274,9 +270,9 @@
|
||||
" classifier = lambda X: m.predict(X)\n",
|
||||
" \n",
|
||||
" error = ErrorRate()\n",
|
||||
" error.load_data(X_train, pd.Series(Y_train), sensitive_features=A_train.Sex)\n",
|
||||
" error.load_data(X_train, pd.Series(Y_train), sensitive_features=A_train.sex)\n",
|
||||
" disparity = DemographicParity()\n",
|
||||
" disparity.load_data(X_train, pd.Series(Y_train), sensitive_features=A_train.Sex)\n",
|
||||
" disparity.load_data(X_train, pd.Series(Y_train), sensitive_features=A_train.sex)\n",
|
||||
" \n",
|
||||
" errors.append(error.gamma(classifier)[0])\n",
|
||||
" disparities.append(disparity.gamma(classifier).max())\n",
|
||||
@@ -440,7 +436,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"sf = { 'sex': A_test.Sex, 'race': A_test.Race }\n",
|
||||
"sf = { 'sex': A_test.sex, 'race': A_test.race }\n",
|
||||
"\n",
|
||||
"from fairlearn.metrics._group_metric_set import _create_group_metric_set\n",
|
||||
"\n",
|
||||
|
||||
7
contrib/fairness/fairlearn-azureml-mitigation.yml
Normal file
7
contrib/fairness/fairlearn-azureml-mitigation.yml
Normal file
@@ -0,0 +1,7 @@
|
||||
name: fairlearn-azureml-mitigation
|
||||
dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
- azureml-contrib-fairness
|
||||
- fairlearn==0.4.6
|
||||
- joblib
|
||||
@@ -82,8 +82,7 @@
|
||||
"from sklearn import svm\n",
|
||||
"from sklearn.preprocessing import LabelEncoder, StandardScaler\n",
|
||||
"from sklearn.linear_model import LogisticRegression\n",
|
||||
"import pandas as pd\n",
|
||||
"import shap"
|
||||
"import pandas as pd"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -99,7 +98,10 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"X_raw, Y = shap.datasets.adult()"
|
||||
"from sklearn.datasets import fetch_openml\n",
|
||||
"data = fetch_openml(data_id=1590, as_frame=True)\n",
|
||||
"X_raw = data.data\n",
|
||||
"Y = (data.target == '>50K') * 1"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -115,7 +117,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(X_raw[\"Race\"].value_counts().to_dict())"
|
||||
"print(X_raw[\"race\"].value_counts().to_dict())"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -134,9 +136,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"A = X_raw[['Sex','Race']]\n",
|
||||
"X = X_raw.drop(labels=['Sex', 'Race'],axis = 1)\n",
|
||||
"X = pd.get_dummies(X)"
|
||||
"A = X_raw[['sex','race']]\n",
|
||||
"X = X_raw.drop(labels=['sex', 'race'],axis = 1)\n",
|
||||
"X_dummies = pd.get_dummies(X)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -153,8 +155,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"sc = StandardScaler()\n",
|
||||
"X_scaled = sc.fit_transform(X)\n",
|
||||
"X_scaled = pd.DataFrame(X_scaled, columns=X.columns)\n",
|
||||
"X_scaled = sc.fit_transform(X_dummies)\n",
|
||||
"X_scaled = pd.DataFrame(X_scaled, columns=X_dummies.columns)\n",
|
||||
"\n",
|
||||
"le = LabelEncoder()\n",
|
||||
"Y = le.fit_transform(Y)"
|
||||
@@ -185,18 +187,7 @@
|
||||
"X_train = X_train.reset_index(drop=True)\n",
|
||||
"A_train = A_train.reset_index(drop=True)\n",
|
||||
"X_test = X_test.reset_index(drop=True)\n",
|
||||
"A_test = A_test.reset_index(drop=True)\n",
|
||||
"\n",
|
||||
"# Improve labels\n",
|
||||
"A_test.Sex.loc[(A_test['Sex'] == 0)] = 'female'\n",
|
||||
"A_test.Sex.loc[(A_test['Sex'] == 1)] = 'male'\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"A_test.Race.loc[(A_test['Race'] == 0)] = 'Amer-Indian-Eskimo'\n",
|
||||
"A_test.Race.loc[(A_test['Race'] == 1)] = 'Asian-Pac-Islander'\n",
|
||||
"A_test.Race.loc[(A_test['Race'] == 2)] = 'Black'\n",
|
||||
"A_test.Race.loc[(A_test['Race'] == 3)] = 'Other'\n",
|
||||
"A_test.Race.loc[(A_test['Race'] == 4)] = 'White'"
|
||||
"A_test = A_test.reset_index(drop=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -380,7 +371,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"sf = { 'Race': A_test.Race, 'Sex': A_test.Sex }\n",
|
||||
"sf = { 'Race': A_test.race, 'Sex': A_test.sex }\n",
|
||||
"\n",
|
||||
"from fairlearn.metrics._group_metric_set import _create_group_metric_set\n",
|
||||
"\n",
|
||||
@@ -499,7 +490,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.8"
|
||||
"version": "3.6.10"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
7
contrib/fairness/upload-fairness-dashboard.yml
Normal file
7
contrib/fairness/upload-fairness-dashboard.yml
Normal file
@@ -0,0 +1,7 @@
|
||||
name: upload-fairness-dashboard
|
||||
dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
- azureml-contrib-fairness
|
||||
- fairlearn==0.4.6
|
||||
- joblib
|
||||
@@ -97,62 +97,96 @@ jupyter notebook
|
||||
<a name="databricks"></a>
|
||||
## Setup using Azure Databricks
|
||||
|
||||
**NOTE**: Please create your Azure Databricks cluster as v6.0 (high concurrency preferred) with **Python 3** (dropdown).
|
||||
**NOTE**: Please create your Azure Databricks cluster as v7.1 (high concurrency preferred) with **Python 3** (dropdown).
|
||||
**NOTE**: You should at least have contributor access to your Azure subcription to run the notebook.
|
||||
- Please remove the previous SDK version if there is any and install the latest SDK by installing **azureml-sdk[automl]** as a PyPi library in Azure Databricks workspace.
|
||||
- You can find the detail Readme instructions at [GitHub](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/azure-databricks).
|
||||
- Download the sample notebook automl-databricks-local-01.ipynb from [GitHub](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/azure-databricks) and import into the Azure databricks workspace.
|
||||
- You can find the detail Readme instructions at [GitHub](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/azure-databricks/automl).
|
||||
- Download the sample notebook automl-databricks-local-01.ipynb from [GitHub](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/azure-databricks/automl) and import into the Azure databricks workspace.
|
||||
- Attach the notebook to the cluster.
|
||||
|
||||
<a name="samples"></a>
|
||||
# Automated ML SDK Sample Notebooks
|
||||
|
||||
- [auto-ml-classification-credit-card-fraud.ipynb](classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.ipynb)
|
||||
- Dataset: Kaggle's [credit card fraud detection dataset](https://www.kaggle.com/mlg-ulb/creditcardfraud)
|
||||
- Simple example of using automated ML for classification to fraudulent credit card transactions
|
||||
- Uses azure compute for training
|
||||
## Classification
|
||||
- **Classify Credit Card Fraud**
|
||||
- Dataset: [Kaggle's credit card fraud detection dataset](https://www.kaggle.com/mlg-ulb/creditcardfraud)
|
||||
- **[Jupyter Notebook (remote run)](classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.ipynb)**
|
||||
- run the experiment remotely on AML Compute cluster
|
||||
- test the performance of the best model in the local environment
|
||||
- **[Jupyter Notebook (local run)](local-run-classification-credit-card-fraud/auto-ml-classification-credit-card-fraud-local.ipynb)**
|
||||
- run experiment in the local environment
|
||||
- use Mimic Explainer for computing feature importance
|
||||
- deploy the best model along with the explainer to an Azure Kubernetes (AKS) cluster, which will compute the raw and engineered feature importances at inference time
|
||||
- **Predict Term Deposit Subscriptions in a Bank**
|
||||
- Dataset: [UCI's bank marketing dataset](https://www.kaggle.com/janiobachmann/bank-marketing-dataset)
|
||||
- **[Jupyter Notebook](classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb)**
|
||||
- run experiment remotely on AML Compute cluster to generate ONNX compatible models
|
||||
- view the featurization steps that were applied during training
|
||||
- view feature importance for the best model
|
||||
- download the best model in ONNX format and use it for inferencing using ONNXRuntime
|
||||
- deploy the best model in PKL format to Azure Container Instance (ACI)
|
||||
- **Predict Newsgroup based on Text from News Article**
|
||||
- Dataset: [20 newsgroups text dataset](https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html)
|
||||
- **[Jupyter Notebook](classification-text-dnn/auto-ml-classification-text-dnn.ipynb)**
|
||||
- AutoML highlights here include using deep neural networks (DNNs) to create embedded features from text data
|
||||
- AutoML will use Bidirectional Encoder Representations from Transformers (BERT) when a GPU compute is used
|
||||
- Bidirectional Long-Short Term neural network (BiLSTM) will be utilized when a CPU compute is used, thereby optimizing the choice of DNN
|
||||
|
||||
- [auto-ml-regression.ipynb](regression/auto-ml-regression.ipynb)
|
||||
## Regression
|
||||
- **Predict Performance of Hardware Parts**
|
||||
- Dataset: Hardware Performance Dataset
|
||||
- Simple example of using automated ML for regression
|
||||
- Uses azure compute for training
|
||||
- **[Jupyter Notebook](regression/auto-ml-regression.ipynb)**
|
||||
- run the experiment remotely on AML Compute cluster
|
||||
- get best trained model for a different metric than the one the experiment was optimized for
|
||||
- test the performance of the best model in the local environment
|
||||
- **[Jupyter Notebook (advanced)](regression/auto-ml-regression.ipynb)**
|
||||
- run the experiment remotely on AML Compute cluster
|
||||
- customize featurization: override column purpose within the dataset, configure transformer parameters
|
||||
- get best trained model for a different metric than the one the experiment was optimized for
|
||||
- run a model explanation experiment on the remote cluster
|
||||
- deploy the model along the explainer and run online inferencing
|
||||
|
||||
- [auto-ml-regression-explanation-featurization.ipynb](regression-explanation-featurization/auto-ml-regression-explanation-featurization.ipynb)
|
||||
- Dataset: Hardware Performance Dataset
|
||||
- Shows featurization and excplanation
|
||||
- Uses azure compute for training
|
||||
|
||||
- [auto-ml-forecasting-energy-demand.ipynb](forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb)
|
||||
- Dataset: [NYC energy demand data](forecasting-a/nyc_energy.csv)
|
||||
- Example of using automated ML for training a forecasting model
|
||||
|
||||
- [auto-ml-classification-credit-card-fraud-local.ipynb](local-run-classification-credit-card-fraud/auto-ml-classification-credit-card-fraud-local.ipynb)
|
||||
- Dataset: Kaggle's [credit card fraud detection dataset](https://www.kaggle.com/mlg-ulb/creditcardfraud)
|
||||
- Simple example of using automated ML for classification to fraudulent credit card transactions
|
||||
- Uses local compute for training
|
||||
|
||||
- [auto-ml-classification-bank-marketing-all-features.ipynb](classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb)
|
||||
- Dataset: UCI's [bank marketing dataset](https://www.kaggle.com/janiobachmann/bank-marketing-dataset)
|
||||
- Simple example of using automated ML for classification to predict term deposit subscriptions for a bank
|
||||
- Uses azure compute for training
|
||||
|
||||
- [auto-ml-forecasting-orange-juice-sales.ipynb](forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb)
|
||||
- Dataset: [Dominick's grocery sales of orange juice](forecasting-b/dominicks_OJ.csv)
|
||||
- Example of training an automated ML forecasting model on multiple time-series
|
||||
|
||||
- [auto-ml-forecasting-bike-share.ipynb](forecasting-bike-share/auto-ml-forecasting-bike-share.ipynb)
|
||||
- Dataset: forecasting for a bike-sharing
|
||||
- Example of training an automated ML forecasting model on multiple time-series
|
||||
|
||||
- [auto-ml-forecasting-function.ipynb](forecasting-forecast-function/auto-ml-forecasting-function.ipynb)
|
||||
- Example of training an automated ML forecasting model on multiple time-series
|
||||
|
||||
- [auto-ml-forecasting-beer-remote.ipynb](forecasting-beer-remote/auto-ml-forecasting-beer-remote.ipynb)
|
||||
- Example of training an automated ML forecasting model on multiple time-series
|
||||
- Beer Production Forecasting
|
||||
|
||||
- [auto-ml-continuous-retraining.ipynb](continuous-retraining/auto-ml-continuous-retraining.ipynb)
|
||||
- Continuous retraining using Pipelines and Time-Series TabularDataset
|
||||
## Time Series Forecasting
|
||||
- **Forecast Energy Demand**
|
||||
- Dataset: [NYC energy demand data](http://mis.nyiso.com/public/P-58Blist.htm)
|
||||
- **[Jupyter Notebook](forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb)**
|
||||
- run experiment remotely on AML Compute cluster
|
||||
- use lags and rolling window features
|
||||
- view the featurization steps that were applied during training
|
||||
- get the best model, use it to forecast on test data and compare the accuracy of predictions against real data
|
||||
- **Forecast Orange Juice Sales (Multi-Series)**
|
||||
- Dataset: [Dominick's grocery sales of orange juice](forecasting-orange-juice-sales/dominicks_OJ.csv)
|
||||
- **[Jupyter Notebook](forecasting-orange-juice-sales/dominicks_OJ.csv)**
|
||||
- run experiment remotely on AML Compute cluster
|
||||
- customize time-series featurization, change column purpose and override transformer hyper parameters
|
||||
- evaluate locally the performance of the generated best model
|
||||
- deploy the best model as a webservice on Azure Container Instance (ACI)
|
||||
- get online predictions from the deployed model
|
||||
- **Forecast Demand of a Bike-Sharing Service**
|
||||
- Dataset: [Bike demand data](forecasting-bike-share/bike-no.csv)
|
||||
- **[Jupyter Notebook](forecasting-bike-share/auto-ml-forecasting-bike-share.ipynb)**
|
||||
- run experiment remotely on AML Compute cluster
|
||||
- integrate holiday features
|
||||
- run rolling forecast for test set that is longer than the forecast horizon
|
||||
- compute metrics on the predictions from the remote forecast
|
||||
- **The Forecast Function Interface**
|
||||
- Dataset: Generated for sample purposes
|
||||
- **[Jupyter Notebook](forecasting-forecast-function/auto-ml-forecasting-function.ipynb)**
|
||||
- train a forecaster using a remote AML Compute cluster
|
||||
- capabilities of forecast function (e.g. forecast farther into the horizon)
|
||||
- generate confidence intervals
|
||||
- **Forecast Beverage Production**
|
||||
- Dataset: [Monthly beer production data](forecasting-beer-remote/Beer_no_valid_split_train.csv)
|
||||
- **[Jupyter Notebook](forecasting-beer-remote/auto-ml-forecasting-beer-remote.ipynb)**
|
||||
- train using a remote AML Compute cluster
|
||||
- enable the DNN learning model
|
||||
- forecast on a remote compute cluster and compare different model performance
|
||||
- **Continuous Retraining with NOAA Weather Data**
|
||||
- Dataset: [NOAA weather data from Azure Open Datasets](https://azure.microsoft.com/en-us/services/open-datasets/)
|
||||
- **[Jupyter Notebook](continuous-retraining/auto-ml-continuous-retraining.ipynb)**
|
||||
- continuously retrain a model using Pipelines and AutoML
|
||||
- create a Pipeline to upload a time series dataset to an Azure blob
|
||||
- create a Pipeline to run an AutoML experiment and register the best resulting model in the Workspace
|
||||
- publish the training pipeline created and schedule it to run daily
|
||||
|
||||
<a name="documentation"></a>
|
||||
See [Configure automated machine learning experiments](https://docs.microsoft.com/azure/machine-learning/service/how-to-configure-auto-train) to learn how more about the the settings and features available for automated machine learning experiments.
|
||||
@@ -173,7 +207,7 @@ The main code of the file must be indented so that it is under this condition.
|
||||
## automl_setup fails
|
||||
1. On Windows, make sure that you are running automl_setup from an Anconda Prompt window rather than a regular cmd window. You can launch the "Anaconda Prompt" window by hitting the Start button and typing "Anaconda Prompt". If you don't see the application "Anaconda Prompt", you might not have conda or mini conda installed. In that case, you can install it [here](https://conda.io/miniconda.html)
|
||||
2. Check that you have conda 64-bit installed rather than 32-bit. You can check this with the command `conda info`. The `platform` should be `win-64` for Windows or `osx-64` for Mac.
|
||||
3. Check that you have conda 4.4.10 or later. You can check the version with the command `conda -V`. If you have a previous version installed, you can update it using the command: `conda update conda`.
|
||||
3. Check that you have conda 4.7.8 or later. You can check the version with the command `conda -V`. If you have a previous version installed, you can update it using the command: `conda update conda`.
|
||||
4. On Linux, if the error is `gcc: error trying to exec 'cc1plus': execvp: No such file or directory`, install build essentials using the command `sudo apt-get install build-essential`.
|
||||
5. Pass a new name as the first parameter to automl_setup so that it creates a new conda environment. You can view existing conda environments using `conda env list` and remove them with `conda env remove -n <environmentname>`.
|
||||
|
||||
|
||||
@@ -6,10 +6,10 @@ dependencies:
|
||||
- python>=3.5.2,<3.6.8
|
||||
- nb_conda
|
||||
- matplotlib==2.1.0
|
||||
- numpy~=1.18.0
|
||||
- numpy==1.18.5
|
||||
- cython
|
||||
- urllib3<1.24
|
||||
- scipy==1.4.1
|
||||
- scipy>=1.4.1,<=1.5.2
|
||||
- scikit-learn==0.22.1
|
||||
- pandas==0.25.1
|
||||
- py-xgboost<=0.90
|
||||
@@ -24,5 +24,5 @@ dependencies:
|
||||
- pytorch-transformers==1.0.0
|
||||
- spacy==2.1.8
|
||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.14.0/validated_win32_requirements.txt [--no-deps]
|
||||
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.17.0/validated_win32_requirements.txt [--no-deps]
|
||||
|
||||
|
||||
@@ -6,10 +6,10 @@ dependencies:
|
||||
- python>=3.5.2,<3.6.8
|
||||
- nb_conda
|
||||
- matplotlib==2.1.0
|
||||
- numpy~=1.18.0
|
||||
- numpy==1.18.5
|
||||
- cython
|
||||
- urllib3<1.24
|
||||
- scipy==1.4.1
|
||||
- scipy>=1.4.1,<=1.5.2
|
||||
- scikit-learn==0.22.1
|
||||
- pandas==0.25.1
|
||||
- py-xgboost<=0.90
|
||||
@@ -24,5 +24,5 @@ dependencies:
|
||||
- pytorch-transformers==1.0.0
|
||||
- spacy==2.1.8
|
||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.14.0/validated_linux_requirements.txt [--no-deps]
|
||||
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.17.0/validated_linux_requirements.txt [--no-deps]
|
||||
|
||||
|
||||
@@ -7,10 +7,10 @@ dependencies:
|
||||
- python>=3.5.2,<3.6.8
|
||||
- nb_conda
|
||||
- matplotlib==2.1.0
|
||||
- numpy~=1.18.0
|
||||
- numpy==1.18.5
|
||||
- cython
|
||||
- urllib3<1.24
|
||||
- scipy==1.4.1
|
||||
- scipy>=1.4.1,<=1.5.2
|
||||
- scikit-learn==0.22.1
|
||||
- pandas==0.25.1
|
||||
- py-xgboost<=0.90
|
||||
@@ -25,4 +25,4 @@ dependencies:
|
||||
- pytorch-transformers==1.0.0
|
||||
- spacy==2.1.8
|
||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.14.0/validated_darwin_requirements.txt [--no-deps]
|
||||
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.17.0/validated_darwin_requirements.txt [--no-deps]
|
||||
|
||||
@@ -6,11 +6,22 @@ set PIP_NO_WARN_SCRIPT_LOCATION=0
|
||||
|
||||
IF "%conda_env_name%"=="" SET conda_env_name="azure_automl"
|
||||
IF "%automl_env_file%"=="" SET automl_env_file="automl_env.yml"
|
||||
SET check_conda_version_script="check_conda_version.py"
|
||||
|
||||
IF NOT EXIST %automl_env_file% GOTO YmlMissing
|
||||
|
||||
IF "%CONDA_EXE%"=="" GOTO CondaMissing
|
||||
|
||||
IF NOT EXIST %check_conda_version_script% GOTO VersionCheckMissing
|
||||
|
||||
python "%check_conda_version_script%"
|
||||
IF errorlevel 1 GOTO ErrorExit:
|
||||
|
||||
SET replace_version_script="replace_latest_version.ps1"
|
||||
IF EXIST %replace_version_script% (
|
||||
powershell -file %replace_version_script% %automl_env_file%
|
||||
)
|
||||
|
||||
call conda activate %conda_env_name% 2>nul:
|
||||
|
||||
if not errorlevel 1 (
|
||||
@@ -54,6 +65,10 @@ echo If you are running an older version of Miniconda or Anaconda,
|
||||
echo you can upgrade using the command: conda update conda
|
||||
goto End
|
||||
|
||||
:VersionCheckMissing
|
||||
echo File %check_conda_version_script% not found.
|
||||
goto End
|
||||
|
||||
:YmlMissing
|
||||
echo File %automl_env_file% not found.
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ CONDA_ENV_NAME=$1
|
||||
AUTOML_ENV_FILE=$2
|
||||
OPTIONS=$3
|
||||
PIP_NO_WARN_SCRIPT_LOCATION=0
|
||||
CHECK_CONDA_VERSION_SCRIPT="check_conda_version.py"
|
||||
|
||||
if [ "$CONDA_ENV_NAME" == "" ]
|
||||
then
|
||||
@@ -20,6 +21,18 @@ if [ ! -f $AUTOML_ENV_FILE ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f $CHECK_CONDA_VERSION_SCRIPT ]; then
|
||||
echo "File $CHECK_CONDA_VERSION_SCRIPT not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
python "$CHECK_CONDA_VERSION_SCRIPT"
|
||||
if [ $? -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sed -i 's/AZUREML-SDK-VERSION/latest/' $AUTOML_ENV_FILE
|
||||
|
||||
if source activate $CONDA_ENV_NAME 2> /dev/null
|
||||
then
|
||||
echo "Upgrading existing conda environment" $CONDA_ENV_NAME
|
||||
|
||||
@@ -4,6 +4,7 @@ CONDA_ENV_NAME=$1
|
||||
AUTOML_ENV_FILE=$2
|
||||
OPTIONS=$3
|
||||
PIP_NO_WARN_SCRIPT_LOCATION=0
|
||||
CHECK_CONDA_VERSION_SCRIPT="check_conda_version.py"
|
||||
|
||||
if [ "$CONDA_ENV_NAME" == "" ]
|
||||
then
|
||||
@@ -20,6 +21,18 @@ if [ ! -f $AUTOML_ENV_FILE ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f $CHECK_CONDA_VERSION_SCRIPT ]; then
|
||||
echo "File $CHECK_CONDA_VERSION_SCRIPT not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
python "$CHECK_CONDA_VERSION_SCRIPT"
|
||||
if [ $? -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sed -i '' 's/AZUREML-SDK-VERSION/latest/' $AUTOML_ENV_FILE
|
||||
|
||||
if source activate $CONDA_ENV_NAME 2> /dev/null
|
||||
then
|
||||
echo "Upgrading existing conda environment" $CONDA_ENV_NAME
|
||||
|
||||
@@ -0,0 +1,26 @@
|
||||
from distutils.version import LooseVersion
|
||||
import platform
|
||||
|
||||
try:
|
||||
import conda
|
||||
except:
|
||||
print('Failed to import conda.')
|
||||
print('This setup is usually run from the base conda environment.')
|
||||
print('You can activate the base environment using the command "conda activate base"')
|
||||
exit(1)
|
||||
|
||||
architecture = platform.architecture()[0]
|
||||
|
||||
if architecture != "64bit":
|
||||
print('This setup requires 64bit Anaconda or Miniconda. Found: ' + architecture)
|
||||
exit(1)
|
||||
|
||||
minimumVersion = "4.7.8"
|
||||
|
||||
versionInvalid = (LooseVersion(conda.__version__) < LooseVersion(minimumVersion))
|
||||
|
||||
if versionInvalid:
|
||||
print('Setup requires conda version ' + minimumVersion + ' or higher.')
|
||||
print('You can use the command "conda update conda" to upgrade conda.')
|
||||
|
||||
exit(versionInvalid)
|
||||
@@ -89,7 +89,7 @@
|
||||
"from azureml.automl.core.featurization import FeaturizationConfig\n",
|
||||
"from azureml.core.dataset import Dataset\n",
|
||||
"from azureml.train.automl import AutoMLConfig\n",
|
||||
"from azureml.interpret._internal.explanation_client import ExplanationClient"
|
||||
"from azureml.interpret import ExplanationClient"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -105,7 +105,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.14.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.17.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -93,7 +93,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.14.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.17.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -424,15 +424,26 @@
|
||||
"source": [
|
||||
"This Credit Card fraud Detection dataset is made available under the Open Database License: http://opendatacommons.org/licenses/odbl/1.0/. Any rights in individual contents of the database are licensed under the Database Contents License: http://opendatacommons.org/licenses/dbcl/1.0/ and is available at: https://www.kaggle.com/mlg-ulb/creditcardfraud\n",
|
||||
"\n",
|
||||
"The dataset has been collected and analysed during a research collaboration of Worldline and the Machine Learning Group (http://mlg.ulb.ac.be) of ULB (Universit\u00c3\u00a9 Libre de Bruxelles) on big data mining and fraud detection.\n",
|
||||
"More details on current and past projects on related topics are available on https://www.researchgate.net/project/Fraud-detection-5 and the page of the DefeatFraud project\n",
|
||||
"\n",
|
||||
"The dataset has been collected and analysed during a research collaboration of Worldline and the Machine Learning Group (http://mlg.ulb.ac.be) of ULB (Universit\u00c3\u0192\u00c2\u00a9 Libre de Bruxelles) on big data mining and fraud detection. More details on current and past projects on related topics are available on https://www.researchgate.net/project/Fraud-detection-5 and the page of the DefeatFraud project\n",
|
||||
"Please cite the following works: \n",
|
||||
"\u00c3\u00a2\u00e2\u201a\u00ac\u00c2\u00a2\tAndrea Dal Pozzolo, Olivier Caelen, Reid A. Johnson and Gianluca Bontempi. Calibrating Probability with Undersampling for Unbalanced Classification. In Symposium on Computational Intelligence and Data Mining (CIDM), IEEE, 2015\n",
|
||||
"\u00c3\u00a2\u00e2\u201a\u00ac\u00c2\u00a2\tDal Pozzolo, Andrea; Caelen, Olivier; Le Borgne, Yann-Ael; Waterschoot, Serge; Bontempi, Gianluca. Learned lessons in credit card fraud detection from a practitioner perspective, Expert systems with applications,41,10,4915-4928,2014, Pergamon\n",
|
||||
"\u00c3\u00a2\u00e2\u201a\u00ac\u00c2\u00a2\tDal Pozzolo, Andrea; Boracchi, Giacomo; Caelen, Olivier; Alippi, Cesare; Bontempi, Gianluca. Credit card fraud detection: a realistic modeling and a novel learning strategy, IEEE transactions on neural networks and learning systems,29,8,3784-3797,2018,IEEE\n",
|
||||
"o\tDal Pozzolo, Andrea Adaptive Machine learning for credit card fraud detection ULB MLG PhD thesis (supervised by G. Bontempi)\n",
|
||||
"\u00c3\u00a2\u00e2\u201a\u00ac\u00c2\u00a2\tCarcillo, Fabrizio; Dal Pozzolo, Andrea; Le Borgne, Yann-A\u00c3\u0192\u00c2\u00abl; Caelen, Olivier; Mazzer, Yannis; Bontempi, Gianluca. Scarff: a scalable framework for streaming credit card fraud detection with Spark, Information fusion,41, 182-194,2018,Elsevier\n",
|
||||
"\u00c3\u00a2\u00e2\u201a\u00ac\u00c2\u00a2\tCarcillo, Fabrizio; Le Borgne, Yann-A\u00c3\u0192\u00c2\u00abl; Caelen, Olivier; Bontempi, Gianluca. Streaming active learning strategies for real-life credit card fraud detection: assessment and visualization, International Journal of Data Science and Analytics, 5,4,285-300,2018,Springer International Publishing"
|
||||
"Please cite the following works:\n",
|
||||
"\n",
|
||||
"Andrea Dal Pozzolo, Olivier Caelen, Reid A. Johnson and Gianluca Bontempi. Calibrating Probability with Undersampling for Unbalanced Classification. In Symposium on Computational Intelligence and Data Mining (CIDM), IEEE, 2015\n",
|
||||
"\n",
|
||||
"Dal Pozzolo, Andrea; Caelen, Olivier; Le Borgne, Yann-Ael; Waterschoot, Serge; Bontempi, Gianluca. Learned lessons in credit card fraud detection from a practitioner perspective, Expert systems with applications,41,10,4915-4928,2014, Pergamon\n",
|
||||
"\n",
|
||||
"Dal Pozzolo, Andrea; Boracchi, Giacomo; Caelen, Olivier; Alippi, Cesare; Bontempi, Gianluca. Credit card fraud detection: a realistic modeling and a novel learning strategy, IEEE transactions on neural networks and learning systems,29,8,3784-3797,2018,IEEE\n",
|
||||
"\n",
|
||||
"Dal Pozzolo, Andrea Adaptive Machine learning for credit card fraud detection ULB MLG PhD thesis (supervised by G. Bontempi)\n",
|
||||
"\n",
|
||||
"Carcillo, Fabrizio; Dal Pozzolo, Andrea; Le Borgne, Yann-A\u00c3\u00abl; Caelen, Olivier; Mazzer, Yannis; Bontempi, Gianluca. Scarff: a scalable framework for streaming credit card fraud detection with Spark, Information fusion,41, 182-194,2018,Elsevier\n",
|
||||
"\n",
|
||||
"Carcillo, Fabrizio; Le Borgne, Yann-A\u00c3\u00abl; Caelen, Olivier; Bontempi, Gianluca. Streaming active learning strategies for real-life credit card fraud detection: assessment and visualization, International Journal of Data Science and Analytics, 5,4,285-300,2018,Springer International Publishing\n",
|
||||
"\n",
|
||||
"Bertrand Lebichot, Yann-A\u00c3\u00abl Le Borgne, Liyun He, Frederic Obl\u00c3\u00a9, Gianluca Bontempi Deep-Learning Domain Adaptation Techniques for Credit Cards Fraud Detection, INNSBDDL 2019: Recent Advances in Big Data and Deep Learning, pp 78-88, 2019\n",
|
||||
"\n",
|
||||
"Fabrizio Carcillo, Yann-A\u00c3\u00abl Le Borgne, Olivier Caelen, Frederic Obl\u00c3\u00a9, Gianluca Bontempi Combining Unsupervised and Supervised Learning in Credit Card Fraud Detection Information Sciences, 2019"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -0,0 +1,592 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Automated Machine Learning\n",
|
||||
"_**Text Classification Using Deep Learning**_\n",
|
||||
"\n",
|
||||
"## Contents\n",
|
||||
"1. [Introduction](#Introduction)\n",
|
||||
"1. [Setup](#Setup)\n",
|
||||
"1. [Data](#Data)\n",
|
||||
"1. [Train](#Train)\n",
|
||||
"1. [Evaluate](#Evaluate)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Introduction\n",
|
||||
"This notebook demonstrates classification with text data using deep learning in AutoML.\n",
|
||||
"\n",
|
||||
"AutoML highlights here include using deep neural networks (DNNs) to create embedded features from text data. Depending on the compute cluster the user provides, AutoML tried out Bidirectional Encoder Representations from Transformers (BERT) when a GPU compute is used, and Bidirectional Long-Short Term neural network (BiLSTM) when a CPU compute is used, thereby optimizing the choice of DNN for the uesr's setup.\n",
|
||||
"\n",
|
||||
"Make sure you have executed the [configuration](../../../configuration.ipynb) before running this notebook.\n",
|
||||
"\n",
|
||||
"An Enterprise workspace is required for this notebook. To learn more about creating an Enterprise workspace or upgrading to an Enterprise workspace from the Azure portal, please visit our [Workspace page](https://docs.microsoft.com/azure/machine-learning/service/concept-workspace#upgrade).\n",
|
||||
"\n",
|
||||
"Notebook synopsis:\n",
|
||||
"1. Creating an Experiment in an existing Workspace\n",
|
||||
"2. Configuration and remote run of AutoML for a text dataset (20 Newsgroups dataset from scikit-learn) for classification\n",
|
||||
"3. Registering the best model for future use\n",
|
||||
"4. Evaluating the final model on a test set"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import logging\n",
|
||||
"import os\n",
|
||||
"import shutil\n",
|
||||
"\n",
|
||||
"import pandas as pd\n",
|
||||
"\n",
|
||||
"import azureml.core\n",
|
||||
"from azureml.core.experiment import Experiment\n",
|
||||
"from azureml.core.workspace import Workspace\n",
|
||||
"from azureml.core.dataset import Dataset\n",
|
||||
"from azureml.core.compute import AmlCompute\n",
|
||||
"from azureml.core.compute import ComputeTarget\n",
|
||||
"from azureml.core.run import Run\n",
|
||||
"from azureml.widgets import RunDetails\n",
|
||||
"from azureml.core.model import Model \n",
|
||||
"from helper import run_inference, get_result_df\n",
|
||||
"from azureml.train.automl import AutoMLConfig\n",
|
||||
"from sklearn.datasets import fetch_20newsgroups"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.17.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As part of the setup you have already created a <b>Workspace</b>. To run AutoML, you also need to create an <b>Experiment</b>. An Experiment corresponds to a prediction problem you are trying to solve, while a Run corresponds to a specific approach to the problem."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# Choose an experiment name.\n",
|
||||
"experiment_name = 'automl-classification-text-dnn'\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace Name'] = ws.name\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Experiment Name'] = experiment.name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set up a compute cluster\n",
|
||||
"This section uses a user-provided compute cluster (named \"dnntext-cluster\" in this example). If a cluster with this name does not exist in the user's workspace, the below code will create a new cluster. You can choose the parameters of the cluster as mentioned in the comments.\n",
|
||||
"\n",
|
||||
"Whether you provide/select a CPU or GPU cluster, AutoML will choose the appropriate DNN for that setup - BiLSTM or BERT text featurizer will be included in the candidate featurizers on CPU and GPU respectively. If your goal is to obtain the most accurate model, we recommend you use GPU clusters since BERT featurizers usually outperform BiLSTM featurizers."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||
"\n",
|
||||
"num_nodes = 2\n",
|
||||
"\n",
|
||||
"# Choose a name for your cluster.\n",
|
||||
"amlcompute_cluster_name = \"dnntext-cluster\"\n",
|
||||
"\n",
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
|
||||
" print('Found existing cluster, use it.')\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_NC6\", # CPU for BiLSTM, such as \"STANDARD_D2_V2\" \n",
|
||||
" # To use BERT (this is recommended for best performance), select a GPU such as \"STANDARD_NC6\" \n",
|
||||
" # or similar GPU option\n",
|
||||
" # available in your workspace\n",
|
||||
" max_nodes = num_nodes)\n",
|
||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
"compute_target.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Get data\n",
|
||||
"For this notebook we will use 20 Newsgroups data from scikit-learn. We filter the data to contain four classes and take a sample as training data. Please note that for accuracy improvement, more data is needed. For this notebook we provide a small-data example so that you can use this template to use with your larger sized data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data_dir = \"text-dnn-data\" # Local directory to store data\n",
|
||||
"blobstore_datadir = data_dir # Blob store directory to store data in\n",
|
||||
"target_column_name = 'y'\n",
|
||||
"feature_column_name = 'X'\n",
|
||||
"\n",
|
||||
"def get_20newsgroups_data():\n",
|
||||
" '''Fetches 20 Newsgroups data from scikit-learn\n",
|
||||
" Returns them in form of pandas dataframes\n",
|
||||
" '''\n",
|
||||
" remove = ('headers', 'footers', 'quotes')\n",
|
||||
" categories = [\n",
|
||||
" 'rec.sport.baseball',\n",
|
||||
" 'rec.sport.hockey',\n",
|
||||
" 'comp.graphics',\n",
|
||||
" 'sci.space',\n",
|
||||
" ]\n",
|
||||
"\n",
|
||||
" data = fetch_20newsgroups(subset = 'train', categories = categories,\n",
|
||||
" shuffle = True, random_state = 42,\n",
|
||||
" remove = remove)\n",
|
||||
" data = pd.DataFrame({feature_column_name: data.data, target_column_name: data.target})\n",
|
||||
"\n",
|
||||
" data_train = data[:200]\n",
|
||||
" data_test = data[200:300] \n",
|
||||
"\n",
|
||||
" data_train = remove_blanks_20news(data_train, feature_column_name, target_column_name)\n",
|
||||
" data_test = remove_blanks_20news(data_test, feature_column_name, target_column_name)\n",
|
||||
" \n",
|
||||
" return data_train, data_test\n",
|
||||
" \n",
|
||||
"def remove_blanks_20news(data, feature_column_name, target_column_name):\n",
|
||||
" \n",
|
||||
" data[feature_column_name] = data[feature_column_name].replace(r'\\n', ' ', regex=True).apply(lambda x: x.strip())\n",
|
||||
" data = data[data[feature_column_name] != '']\n",
|
||||
" \n",
|
||||
" return data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Fetch data and upload to datastore for use in training"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data_train, data_test = get_20newsgroups_data()\n",
|
||||
"\n",
|
||||
"if not os.path.isdir(data_dir):\n",
|
||||
" os.mkdir(data_dir)\n",
|
||||
" \n",
|
||||
"train_data_fname = data_dir + '/train_data.csv'\n",
|
||||
"test_data_fname = data_dir + '/test_data.csv'\n",
|
||||
"\n",
|
||||
"data_train.to_csv(train_data_fname, index=False)\n",
|
||||
"data_test.to_csv(test_data_fname, index=False)\n",
|
||||
"\n",
|
||||
"datastore = ws.get_default_datastore()\n",
|
||||
"datastore.upload(src_dir=data_dir, target_path=blobstore_datadir,\n",
|
||||
" overwrite=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"train_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, blobstore_datadir + '/train_data.csv')])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Prepare AutoML run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This step requires an Enterprise workspace to gain access to this feature. To learn more about creating an Enterprise workspace or upgrading to an Enterprise workspace from the Azure portal, please visit our [Workspace page](https://docs.microsoft.com/azure/machine-learning/service/concept-workspace#upgrade).\n",
|
||||
"\n",
|
||||
"This notebook uses the blocked_models parameter to exclude some models that can take a longer time to train on some text datasets. You can choose to remove models from the blocked_models list but you may need to increase the experiment_timeout_hours parameter value to get results."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"automl_settings = {\n",
|
||||
" \"experiment_timeout_minutes\": 20,\n",
|
||||
" \"primary_metric\": 'accuracy',\n",
|
||||
" \"max_concurrent_iterations\": num_nodes, \n",
|
||||
" \"max_cores_per_iteration\": -1,\n",
|
||||
" \"enable_dnn\": True,\n",
|
||||
" \"enable_early_stopping\": True,\n",
|
||||
" \"validation_size\": 0.3,\n",
|
||||
" \"verbosity\": logging.INFO,\n",
|
||||
" \"enable_voting_ensemble\": False,\n",
|
||||
" \"enable_stack_ensemble\": False,\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task = 'classification',\n",
|
||||
" debug_log = 'automl_errors.log',\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" training_data=train_dataset,\n",
|
||||
" label_column_name=target_column_name,\n",
|
||||
" blocked_models = ['LightGBM'],\n",
|
||||
" **automl_settings\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Submit AutoML Run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"automl_run = experiment.submit(automl_config, show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"automl_run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Displaying the run objects gives you links to the visual tools in the Azure Portal. Go try them!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Retrieve the Best Model\n",
|
||||
"Below we select the best model pipeline from our iterations, use it to test on test data on the same compute cluster."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can test the model locally to get a feel of the input/output. When the model contains BERT, this step will require pytorch and pytorch-transformers installed in your local environment. The exact versions of these packages can be found in the **automl_env.yml** file located in the local copy of your MachineLearningNotebooks folder here:\n",
|
||||
"MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/automl_env.yml"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"best_run, fitted_model = automl_run.get_output()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can now see what text transformations are used to convert text data to features for this dataset, including deep learning transformations based on BiLSTM or Transformer (BERT is one implementation of a Transformer) models."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"text_transformations_used = []\n",
|
||||
"for column_group in fitted_model.named_steps['datatransformer'].get_featurization_summary():\n",
|
||||
" text_transformations_used.extend(column_group['Transformations'])\n",
|
||||
"text_transformations_used"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Registering the best model\n",
|
||||
"We now register the best fitted model from the AutoML Run for use in future deployments. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Get results stats, extract the best model from AutoML run, download and register the resultant best model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"summary_df = get_result_df(automl_run)\n",
|
||||
"best_dnn_run_id = summary_df['run_id'].iloc[0]\n",
|
||||
"best_dnn_run = Run(experiment, best_dnn_run_id)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model_dir = 'Model' # Local folder where the model will be stored temporarily\n",
|
||||
"if not os.path.isdir(model_dir):\n",
|
||||
" os.mkdir(model_dir)\n",
|
||||
" \n",
|
||||
"best_dnn_run.download_file('outputs/model.pkl', model_dir + '/model.pkl')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Register the model in your Azure Machine Learning Workspace. If you previously registered a model, please make sure to delete it so as to replace it with this new model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Register the model\n",
|
||||
"model_name = 'textDNN-20News'\n",
|
||||
"model = Model.register(model_path = model_dir + '/model.pkl',\n",
|
||||
" model_name = model_name,\n",
|
||||
" tags=None,\n",
|
||||
" workspace=ws)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Evaluate on Test Data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We now use the best fitted model from the AutoML Run to make predictions on the test set. \n",
|
||||
"\n",
|
||||
"Test set schema should match that of the training set."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"test_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, blobstore_datadir + '/test_data.csv')])\n",
|
||||
"\n",
|
||||
"# preview the first 3 rows of the dataset\n",
|
||||
"test_dataset.take(3).to_pandas_dataframe()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"test_experiment = Experiment(ws, experiment_name + \"_test\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"script_folder = os.path.join(os.getcwd(), 'inference')\n",
|
||||
"os.makedirs(script_folder, exist_ok=True)\n",
|
||||
"shutil.copy('infer.py', script_folder)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"test_run = run_inference(test_experiment, compute_target, script_folder, best_dnn_run,\n",
|
||||
" train_dataset, test_dataset, target_column_name, model_name)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Display computed metrics"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"test_run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"RunDetails(test_run).show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"test_run.wait_for_completion()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pd.Series(test_run.get_metrics())"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "anshirga"
|
||||
}
|
||||
],
|
||||
"compute": [
|
||||
"AML Compute"
|
||||
],
|
||||
"datasets": [
|
||||
"None"
|
||||
],
|
||||
"deployment": [
|
||||
"None"
|
||||
],
|
||||
"exclude_from_index": false,
|
||||
"framework": [
|
||||
"None"
|
||||
],
|
||||
"friendly_name": "DNN Text Featurization",
|
||||
"index_order": 2,
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.7"
|
||||
},
|
||||
"tags": [
|
||||
"None"
|
||||
],
|
||||
"task": "Text featurization using DNNs for classification"
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -0,0 +1,4 @@
|
||||
name: auto-ml-classification-text-dnn
|
||||
dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
@@ -0,0 +1,56 @@
|
||||
import pandas as pd
|
||||
from azureml.core import Environment
|
||||
from azureml.train.estimator import Estimator
|
||||
from azureml.core.run import Run
|
||||
|
||||
|
||||
def run_inference(test_experiment, compute_target, script_folder, train_run,
|
||||
train_dataset, test_dataset, target_column_name, model_name):
|
||||
|
||||
inference_env = train_run.get_environment()
|
||||
|
||||
est = Estimator(source_directory=script_folder,
|
||||
entry_script='infer.py',
|
||||
script_params={
|
||||
'--target_column_name': target_column_name,
|
||||
'--model_name': model_name
|
||||
},
|
||||
inputs=[
|
||||
train_dataset.as_named_input('train_data'),
|
||||
test_dataset.as_named_input('test_data')
|
||||
],
|
||||
compute_target=compute_target,
|
||||
environment_definition=inference_env)
|
||||
|
||||
run = test_experiment.submit(
|
||||
est, tags={
|
||||
'training_run_id': train_run.id,
|
||||
'run_algorithm': train_run.properties['run_algorithm'],
|
||||
'valid_score': train_run.properties['score'],
|
||||
'primary_metric': train_run.properties['primary_metric']
|
||||
})
|
||||
|
||||
run.log("run_algorithm", run.tags['run_algorithm'])
|
||||
return run
|
||||
|
||||
|
||||
def get_result_df(remote_run):
|
||||
|
||||
children = list(remote_run.get_children(recursive=True))
|
||||
summary_df = pd.DataFrame(index=['run_id', 'run_algorithm',
|
||||
'primary_metric', 'Score'])
|
||||
goal_minimize = False
|
||||
for run in children:
|
||||
if('run_algorithm' in run.properties and 'score' in run.properties):
|
||||
summary_df[run.id] = [run.id, run.properties['run_algorithm'],
|
||||
run.properties['primary_metric'],
|
||||
float(run.properties['score'])]
|
||||
if('goal' in run.properties):
|
||||
goal_minimize = run.properties['goal'].split('_')[-1] == 'min'
|
||||
|
||||
summary_df = summary_df.T.sort_values(
|
||||
'Score',
|
||||
ascending=goal_minimize).drop_duplicates(['run_algorithm'])
|
||||
summary_df = summary_df.set_index('run_algorithm')
|
||||
|
||||
return summary_df
|
||||
@@ -0,0 +1,60 @@
|
||||
import argparse
|
||||
|
||||
import numpy as np
|
||||
|
||||
from sklearn.externals import joblib
|
||||
|
||||
from azureml.automl.runtime.shared.score import scoring, constants
|
||||
from azureml.core import Run
|
||||
from azureml.core.model import Model
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'--target_column_name', type=str, dest='target_column_name',
|
||||
help='Target Column Name')
|
||||
parser.add_argument(
|
||||
'--model_name', type=str, dest='model_name',
|
||||
help='Name of registered model')
|
||||
|
||||
args = parser.parse_args()
|
||||
target_column_name = args.target_column_name
|
||||
model_name = args.model_name
|
||||
|
||||
print('args passed are: ')
|
||||
print('Target column name: ', target_column_name)
|
||||
print('Name of registered model: ', model_name)
|
||||
|
||||
model_path = Model.get_model_path(model_name)
|
||||
# deserialize the model file back into a sklearn model
|
||||
model = joblib.load(model_path)
|
||||
|
||||
run = Run.get_context()
|
||||
# get input dataset by name
|
||||
test_dataset = run.input_datasets['test_data']
|
||||
train_dataset = run.input_datasets['train_data']
|
||||
|
||||
X_test_df = test_dataset.drop_columns(columns=[target_column_name]) \
|
||||
.to_pandas_dataframe()
|
||||
y_test_df = test_dataset.with_timestamp_columns(None) \
|
||||
.keep_columns(columns=[target_column_name]) \
|
||||
.to_pandas_dataframe()
|
||||
y_train_df = test_dataset.with_timestamp_columns(None) \
|
||||
.keep_columns(columns=[target_column_name]) \
|
||||
.to_pandas_dataframe()
|
||||
|
||||
predicted = model.predict_proba(X_test_df)
|
||||
|
||||
# Use the AutoML scoring module
|
||||
class_labels = np.unique(np.concatenate((y_train_df.values, y_test_df.values)))
|
||||
train_labels = model.classes_
|
||||
classification_metrics = list(constants.CLASSIFICATION_SCALAR_SET)
|
||||
scores = scoring.score_classification(y_test_df.values, predicted,
|
||||
classification_metrics,
|
||||
class_labels, train_labels)
|
||||
|
||||
print("scores:")
|
||||
print(scores)
|
||||
|
||||
for key, value in scores.items():
|
||||
run.log(key, value)
|
||||
@@ -88,7 +88,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.14.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.17.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -190,7 +190,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.runconfig import CondaDependencies, DEFAULT_CPU_IMAGE, RunConfiguration\n",
|
||||
"from azureml.core.runconfig import CondaDependencies, RunConfiguration\n",
|
||||
"\n",
|
||||
"# create a new RunConfig object\n",
|
||||
"conda_run_config = RunConfiguration(framework=\"python\")\n",
|
||||
@@ -199,7 +199,6 @@
|
||||
"conda_run_config.target = compute_target\n",
|
||||
"\n",
|
||||
"conda_run_config.environment.docker.enabled = True\n",
|
||||
"conda_run_config.environment.docker.base_image = azureml.core.runconfig.DEFAULT_CPU_IMAGE\n",
|
||||
"\n",
|
||||
"cd = CondaDependencies.create(pip_packages=['azureml-sdk[automl]', 'applicationinsights', 'azureml-opendatasets', 'azureml-defaults'], \n",
|
||||
" conda_packages=['numpy==1.16.2'], \n",
|
||||
@@ -551,7 +550,7 @@
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "vivijay"
|
||||
"name": "anshirga"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
|
||||
@@ -17,16 +17,16 @@ There's no need to install mini-conda specifically.
|
||||
- Download the sample notebooks from [GitHub](https://github.com/Azure/MachineLearningNotebooks) as zip and extract the contents to a local directory. The automated ML sample notebooks are in the "automated-machine-learning" folder.
|
||||
|
||||
### 3. Setup a new conda environment
|
||||
The **automl_setup** script creates a new conda environment, installs the necessary packages, configures the widget and starts a jupyter notebook. It takes the conda environment name as an optional parameter. The default conda environment name is azure_automl. The exact command depends on the operating system. See the specific sections below for Windows, Mac and Linux. It can take about 10 minutes to execute.
|
||||
The **automl_setup_thin_client** script creates a new conda environment, installs the necessary packages, configures the widget and starts a jupyter notebook. It takes the conda environment name as an optional parameter. The default conda environment name is azure_automl_experimental. The exact command depends on the operating system. See the specific sections below for Windows, Mac and Linux. It can take about 10 minutes to execute.
|
||||
|
||||
Packages installed by the **automl_setup** script:
|
||||
<ul><li>python</li><li>nb_conda</li><li>matplotlib</li><li>numpy</li><li>cython</li><li>urllib3</li><li>pandas</li><li>azureml-sdk</li><li>azureml-widgets</li><li>pandas-ml</li></ul>
|
||||
|
||||
For more details refer to the [automl_env.yml](./automl_env.yml)
|
||||
For more details refer to the [automl_env_thin_client.yml](./automl_env_thin_client.yml)
|
||||
## Windows
|
||||
Start an **Anaconda Prompt** window, cd to the **how-to-use-azureml/automated-machine-learning/experimental** folder where the sample notebooks were extracted and then run:
|
||||
```
|
||||
automl_setup
|
||||
automl_setup_thin_client
|
||||
```
|
||||
## Mac
|
||||
Install "Command line developer tools" if it is not already installed (you can use the command: `xcode-select --install`).
|
||||
@@ -34,14 +34,14 @@ Install "Command line developer tools" if it is not already installed (you can u
|
||||
Start a Terminal windows, cd to the **how-to-use-azureml/automated-machine-learning/experimental** folder where the sample notebooks were extracted and then run:
|
||||
|
||||
```
|
||||
bash automl_setup_mac.sh
|
||||
bash automl_setup_thin_client_mac.sh
|
||||
```
|
||||
|
||||
## Linux
|
||||
cd to the **how-to-use-azureml/automated-machine-learning/experimental** folder where the sample notebooks were extracted and then run:
|
||||
|
||||
```
|
||||
bash automl_setup_linux.sh
|
||||
bash automl_setup_thin_client_linux.sh
|
||||
```
|
||||
|
||||
### 4. Running configuration.ipynb
|
||||
@@ -49,7 +49,7 @@ bash automl_setup_linux.sh
|
||||
- Execute the cells in the notebook to Register Machine Learning Services Resource Provider and create a workspace. (*instructions in notebook*)
|
||||
|
||||
### 5. Running Samples
|
||||
- Please make sure you use the Python [conda env:azure_automl] kernel when trying the sample Notebooks.
|
||||
- Please make sure you use the Python [conda env:azure_automl_experimental] kernel when trying the sample Notebooks.
|
||||
- Follow the instructions in the individual notebooks to explore various features in automated ML.
|
||||
|
||||
### 6. Starting jupyter notebook manually
|
||||
@@ -71,7 +71,7 @@ jupyter notebook
|
||||
<a name="samples"></a>
|
||||
# Automated ML SDK Sample Notebooks
|
||||
|
||||
- [auto-ml-regression.ipynb](regression/auto-ml-regression.ipynb)
|
||||
- [auto-ml-regression-model-proxy.ipynb](regression-model-proxy/auto-ml-regression-model-proxy.ipynb)
|
||||
- Dataset: Hardware Performance Dataset
|
||||
- Simple example of using automated ML for regression
|
||||
- Uses azure compute for training
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -38,7 +38,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Introduction\n",
|
||||
"In this example we use the Hardware Performance Dataset to showcase how you can use AutoML for a simple regression problem. The Regression goal is to predict the performance of certain combinations of hardware parts.\n",
|
||||
"In this example we use an experimental feature, Model Proxy, to do a predict on the best generated model without downloading the model locally. The prediction will happen on same compute and environment that was used to train the model. This feature is currently in the experimental state, which means that the API is prone to changing, please make sure to run on the latest version of this notebook if you face any issues.\n",
|
||||
"\n",
|
||||
"If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration](../../../../configuration.ipynb) notebook first if you haven't already to establish your connection to the AzureML Workspace. \n",
|
||||
"\n",
|
||||
@@ -92,7 +92,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.14.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.17.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -384,10 +384,10 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"y_pred_train = best_model_proxy.predict(train_data).to_pandas_dataframe()\n",
|
||||
"y_pred_train = best_model_proxy.predict(train_data).to_pandas_dataframe().values.flatten()\n",
|
||||
"y_residual_train = y_train - y_pred_train\n",
|
||||
"\n",
|
||||
"y_pred_test = best_model_proxy.predict(test_data).to_pandas_dataframe()\n",
|
||||
"y_pred_test = best_model_proxy.predict(test_data).to_pandas_dataframe().values.flatten()\n",
|
||||
"y_residual_test = y_test - y_pred_test"
|
||||
]
|
||||
},
|
||||
@@ -114,7 +114,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.14.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.17.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import argparse
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
@@ -10,6 +11,13 @@ from sklearn.metrics import mean_absolute_error, mean_squared_error
|
||||
from azureml.automl.runtime.shared.score import scoring, constants
|
||||
from azureml.core import Run
|
||||
|
||||
try:
|
||||
import torch
|
||||
|
||||
_torch_present = True
|
||||
except ImportError:
|
||||
_torch_present = False
|
||||
|
||||
|
||||
def align_outputs(y_predicted, X_trans, X_test, y_test,
|
||||
predicted_column_name='predicted',
|
||||
@@ -48,7 +56,7 @@ def align_outputs(y_predicted, X_trans, X_test, y_test,
|
||||
# or at edges of time due to lags/rolling windows
|
||||
clean = together[together[[target_column_name,
|
||||
predicted_column_name]].notnull().all(axis=1)]
|
||||
return(clean)
|
||||
return (clean)
|
||||
|
||||
|
||||
def do_rolling_forecast_with_lookback(fitted_model, X_test, y_test,
|
||||
@@ -83,8 +91,7 @@ def do_rolling_forecast_with_lookback(fitted_model, X_test, y_test,
|
||||
if origin_time != X[time_column_name].min():
|
||||
# Set the context by including actuals up-to the origin time
|
||||
test_context_expand_wind = (X[time_column_name] < origin_time)
|
||||
context_expand_wind = (
|
||||
X_test_expand[time_column_name] < origin_time)
|
||||
context_expand_wind = (X_test_expand[time_column_name] < origin_time)
|
||||
y_query_expand[context_expand_wind] = y[test_context_expand_wind]
|
||||
|
||||
# Print some debug info
|
||||
@@ -115,8 +122,7 @@ def do_rolling_forecast_with_lookback(fitted_model, X_test, y_test,
|
||||
# Align forecast with test set for dates within
|
||||
# the current rolling window
|
||||
trans_tindex = X_trans.index.get_level_values(time_column_name)
|
||||
trans_roll_wind = (trans_tindex >= origin_time) & (
|
||||
trans_tindex < horizon_time)
|
||||
trans_roll_wind = (trans_tindex >= origin_time) & (trans_tindex < horizon_time)
|
||||
test_roll_wind = expand_wind & (X[time_column_name] >= origin_time)
|
||||
df_list.append(align_outputs(
|
||||
y_fcst[trans_roll_wind], X_trans[trans_roll_wind],
|
||||
@@ -155,8 +161,7 @@ def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq='D'):
|
||||
if origin_time != X_test[time_column_name].min():
|
||||
# Set the context by including actuals up-to the origin time
|
||||
test_context_expand_wind = (X_test[time_column_name] < origin_time)
|
||||
context_expand_wind = (
|
||||
X_test_expand[time_column_name] < origin_time)
|
||||
context_expand_wind = (X_test_expand[time_column_name] < origin_time)
|
||||
y_query_expand[context_expand_wind] = y_test[
|
||||
test_context_expand_wind]
|
||||
|
||||
@@ -186,10 +191,8 @@ def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq='D'):
|
||||
# Align forecast with test set for dates within the
|
||||
# current rolling window
|
||||
trans_tindex = X_trans.index.get_level_values(time_column_name)
|
||||
trans_roll_wind = (trans_tindex >= origin_time) & (
|
||||
trans_tindex < horizon_time)
|
||||
test_roll_wind = expand_wind & (
|
||||
X_test[time_column_name] >= origin_time)
|
||||
trans_roll_wind = (trans_tindex >= origin_time) & (trans_tindex < horizon_time)
|
||||
test_roll_wind = expand_wind & (X_test[time_column_name] >= origin_time)
|
||||
df_list.append(align_outputs(y_fcst[trans_roll_wind],
|
||||
X_trans[trans_roll_wind],
|
||||
X_test[test_roll_wind],
|
||||
@@ -221,6 +224,10 @@ def MAPE(actual, pred):
|
||||
return np.mean(APE(actual_safe, pred_safe))
|
||||
|
||||
|
||||
def map_location_cuda(storage, loc):
|
||||
return storage.cuda()
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'--max_horizon', type=int, dest='max_horizon',
|
||||
@@ -238,7 +245,6 @@ parser.add_argument(
|
||||
'--model_path', type=str, dest='model_path',
|
||||
default='model.pkl', help='Filename of model to be loaded')
|
||||
|
||||
|
||||
args = parser.parse_args()
|
||||
max_horizon = args.max_horizon
|
||||
target_column_name = args.target_column_name
|
||||
@@ -246,7 +252,6 @@ time_column_name = args.time_column_name
|
||||
freq = args.freq
|
||||
model_path = args.model_path
|
||||
|
||||
|
||||
print('args passed are: ')
|
||||
print(max_horizon)
|
||||
print(target_column_name)
|
||||
@@ -274,8 +279,19 @@ X_lookback_df = lookback_dataset.drop_columns(columns=[target_column_name])
|
||||
y_lookback_df = lookback_dataset.with_timestamp_columns(
|
||||
None).keep_columns(columns=[target_column_name])
|
||||
|
||||
fitted_model = joblib.load(model_path)
|
||||
|
||||
_, ext = os.path.splitext(model_path)
|
||||
if ext == '.pt':
|
||||
# Load the fc-tcn torch model.
|
||||
assert _torch_present
|
||||
if torch.cuda.is_available():
|
||||
map_location = map_location_cuda
|
||||
else:
|
||||
map_location = 'cpu'
|
||||
with open(model_path, 'rb') as fh:
|
||||
fitted_model = torch.load(fh, map_location=map_location)
|
||||
else:
|
||||
# Load the sklearn pipeline.
|
||||
fitted_model = joblib.load(model_path)
|
||||
|
||||
if hasattr(fitted_model, 'get_lookback'):
|
||||
lookback = fitted_model.get_lookback()
|
||||
|
||||
@@ -87,7 +87,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.14.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.17.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -1,20 +1,12 @@
|
||||
from azureml.core import Environment
|
||||
from azureml.core.conda_dependencies import CondaDependencies
|
||||
from azureml.train.estimator import Estimator
|
||||
from azureml.core.run import Run
|
||||
|
||||
|
||||
def run_rolling_forecast(test_experiment, compute_target, train_run, test_dataset,
|
||||
target_column_name, inference_folder='./forecast'):
|
||||
condafile = inference_folder + '/condafile.yml'
|
||||
train_run.download_file('outputs/model.pkl',
|
||||
inference_folder + '/model.pkl')
|
||||
train_run.download_file('outputs/conda_env_v_1_0_0.yml', condafile)
|
||||
|
||||
inference_env = Environment("myenv")
|
||||
inference_env.docker.enabled = True
|
||||
inference_env.python.conda_dependencies = CondaDependencies(
|
||||
conda_dependencies_file_path=condafile)
|
||||
inference_env = train_run.get_environment()
|
||||
|
||||
est = Estimator(source_directory=inference_folder,
|
||||
entry_script='forecasting_script.py',
|
||||
|
||||
@@ -97,7 +97,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.14.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.17.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -94,7 +94,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.14.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.17.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -82,7 +82,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.14.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.17.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -80,7 +80,7 @@
|
||||
"from azureml.core.workspace import Workspace\n",
|
||||
"from azureml.core.dataset import Dataset\n",
|
||||
"from azureml.train.automl import AutoMLConfig\n",
|
||||
"from azureml.interpret._internal.explanation_client import ExplanationClient"
|
||||
"from azureml.interpret import ExplanationClient"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -96,7 +96,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.14.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.17.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -98,7 +98,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.14.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.17.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -625,7 +625,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.interpret._internal.explanation_client import ExplanationClient\n",
|
||||
"from azureml.interpret import ExplanationClient\n",
|
||||
"client = ExplanationClient.from_run(automl_run)\n",
|
||||
"engineered_explanations = client.download_model_explanation(raw=False, comment='engineered explanations')\n",
|
||||
"print(engineered_explanations.get_feature_importance_dict())\n",
|
||||
|
||||
@@ -92,7 +92,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.14.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.17.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
Azure Databricks is a managed Spark offering on Azure and customers already use it for advanced analytics. It provides a collaborative Notebook based environment with CPU or GPU based compute cluster.
|
||||
|
||||
In this section, you will find sample notebooks on how to use Azure Machine Learning SDK with Azure Databricks. You can train a model using Spark MLlib and then deploy the model to ACI/AKS from within Azure Databricks. You can also use Automated ML capability (**public preview**) of Azure ML SDK with Azure Databricks.
|
||||
|
||||
- Customers who use Azure Databricks for advanced analytics can now use the same cluster to run experiments with or without automated machine learning.
|
||||
- You can keep the data within the same cluster.
|
||||
- You can leverage the local worker nodes with autoscale and auto termination capabilities.
|
||||
- You can use multiple cores of your Azure Databricks cluster to perform simultenous training.
|
||||
- You can further tune the model generated by automated machine learning if you chose to.
|
||||
- Every run (including the best run) is available as a pipeline, which you can tune further if needed.
|
||||
- The model trained using Azure Databricks can be registered in Azure ML SDK workspace and then deployed to Azure managed compute (ACI or AKS) using the Azure Machine learning SDK.
|
||||
|
||||
Please follow our [Azure doc](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-environment#azure-databricks) to install the sdk in your Azure Databricks cluster before trying any of the sample notebooks.
|
||||
|
||||
**Single file** -
|
||||
The following archive contains all the sample notebooks. You can the run notebooks after importing [DBC](Databricks_AMLSDK_1-4_6.dbc) in your Databricks workspace instead of downloading individually.
|
||||
|
||||
Notebooks 1-4 have to be run sequentially & are related to Income prediction experiment based on this [dataset](https://archive.ics.uci.edu/ml/datasets/adult) and demonstrate how to data prep, train and operationalize a Spark ML model with Azure ML Python SDK from within Azure Databricks.
|
||||
|
||||
Notebook 6 is an Automated ML sample notebook for Classification.
|
||||
|
||||
Learn more about [how to use Azure Databricks as a development environment](https://docs.microsoft.com/azure/machine-learning/service/how-to-configure-environment#azure-databricks) for Azure Machine Learning service.
|
||||
|
||||
**Databricks as a Compute Target from AML Pipelines**
|
||||
You can use Azure Databricks as a compute target from [Azure Machine Learning Pipelines](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-ml-pipelines). Take a look at this notebook for details: [aml-pipelines-use-databricks-as-compute-target.ipynb](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/azure-databricks/databricks-as-remote-compute-target/aml-pipelines-use-databricks-as-compute-target.ipynb).
|
||||
|
||||
For more on SDK concepts, please refer to [notebooks](https://github.com/Azure/MachineLearningNotebooks).
|
||||
|
||||
**Please let us know your feedback.**
|
||||
|
||||
|
||||
|
||||

|
||||
@@ -1,373 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Azure ML & Azure Databricks notebooks by Parashar Shah.\n",
|
||||
"\n",
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#Model Building"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import pprint\n",
|
||||
"import numpy as np\n",
|
||||
"\n",
|
||||
"from pyspark.ml import Pipeline, PipelineModel\n",
|
||||
"from pyspark.ml.feature import OneHotEncoder, StringIndexer, VectorAssembler\n",
|
||||
"from pyspark.ml.classification import LogisticRegression\n",
|
||||
"from pyspark.ml.evaluation import BinaryClassificationEvaluator\n",
|
||||
"from pyspark.ml.tuning import CrossValidator, ParamGridBuilder"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"\n",
|
||||
"# Check core SDK version number\n",
|
||||
"print(\"SDK version:\", azureml.core.VERSION)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Set auth to be used by workspace related APIs.\n",
|
||||
"# For automation or CI/CD ServicePrincipalAuthentication can be used.\n",
|
||||
"# https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.authentication.serviceprincipalauthentication?view=azure-ml-py\n",
|
||||
"auth = None"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# import the Workspace class and check the azureml SDK version\n",
|
||||
"from azureml.core import Workspace\n",
|
||||
"\n",
|
||||
"ws = Workspace.from_config(auth = auth)\n",
|
||||
"print('Workspace name: ' + ws.name, \n",
|
||||
" 'Azure region: ' + ws.location, \n",
|
||||
" 'Subscription id: ' + ws.subscription_id, \n",
|
||||
" 'Resource group: ' + ws.resource_group, sep = '\\n')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#get the train and test datasets\n",
|
||||
"train_data_path = \"AdultCensusIncomeTrain\"\n",
|
||||
"test_data_path = \"AdultCensusIncomeTest\"\n",
|
||||
"\n",
|
||||
"train = spark.read.parquet(train_data_path)\n",
|
||||
"test = spark.read.parquet(test_data_path)\n",
|
||||
"\n",
|
||||
"print(\"train: ({}, {})\".format(train.count(), len(train.columns)))\n",
|
||||
"print(\"test: ({}, {})\".format(test.count(), len(test.columns)))\n",
|
||||
"\n",
|
||||
"train.printSchema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#Define Model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"label = \"income\"\n",
|
||||
"dtypes = dict(train.dtypes)\n",
|
||||
"dtypes.pop(label)\n",
|
||||
"\n",
|
||||
"si_xvars = []\n",
|
||||
"ohe_xvars = []\n",
|
||||
"featureCols = []\n",
|
||||
"for idx,key in enumerate(dtypes):\n",
|
||||
" if dtypes[key] == \"string\":\n",
|
||||
" featureCol = \"-\".join([key, \"encoded\"])\n",
|
||||
" featureCols.append(featureCol)\n",
|
||||
" \n",
|
||||
" tmpCol = \"-\".join([key, \"tmp\"])\n",
|
||||
" # string-index and one-hot encode the string column\n",
|
||||
" #https://spark.apache.org/docs/2.3.0/api/java/org/apache/spark/ml/feature/StringIndexer.html\n",
|
||||
" #handleInvalid: Param for how to handle invalid data (unseen labels or NULL values). \n",
|
||||
" #Options are 'skip' (filter out rows with invalid data), 'error' (throw an error), \n",
|
||||
" #or 'keep' (put invalid data in a special additional bucket, at index numLabels). Default: \"error\"\n",
|
||||
" si_xvars.append(StringIndexer(inputCol=key, outputCol=tmpCol, handleInvalid=\"skip\"))\n",
|
||||
" ohe_xvars.append(OneHotEncoder(inputCol=tmpCol, outputCol=featureCol))\n",
|
||||
" else:\n",
|
||||
" featureCols.append(key)\n",
|
||||
"\n",
|
||||
"# string-index the label column into a column named \"label\"\n",
|
||||
"si_label = StringIndexer(inputCol=label, outputCol='label')\n",
|
||||
"\n",
|
||||
"# assemble the encoded feature columns in to a column named \"features\"\n",
|
||||
"assembler = VectorAssembler(inputCols=featureCols, outputCol=\"features\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.run import Run\n",
|
||||
"from azureml.core.experiment import Experiment\n",
|
||||
"import numpy as np\n",
|
||||
"import os\n",
|
||||
"import shutil\n",
|
||||
"\n",
|
||||
"model_name = \"AdultCensus_runHistory.mml\"\n",
|
||||
"model_dbfs = os.path.join(\"/dbfs\", model_name)\n",
|
||||
"run_history_name = 'spark-ml-notebook'\n",
|
||||
"\n",
|
||||
"# start a training run by defining an experiment\n",
|
||||
"myexperiment = Experiment(ws, \"Ignite_AI_Talk\")\n",
|
||||
"root_run = myexperiment.start_logging()\n",
|
||||
"\n",
|
||||
"# Regularization Rates - \n",
|
||||
"regs = [0.0001, 0.001, 0.01, 0.1]\n",
|
||||
" \n",
|
||||
"# try a bunch of regularization rate in a Logistic Regression model\n",
|
||||
"for reg in regs:\n",
|
||||
" print(\"Regularization rate: {}\".format(reg))\n",
|
||||
" # create a bunch of child runs\n",
|
||||
" with root_run.child_run(\"reg-\" + str(reg)) as run:\n",
|
||||
" # create a new Logistic Regression model.\n",
|
||||
" lr = LogisticRegression(regParam=reg)\n",
|
||||
" \n",
|
||||
" # put together the pipeline\n",
|
||||
" pipe = Pipeline(stages=[*si_xvars, *ohe_xvars, si_label, assembler, lr])\n",
|
||||
"\n",
|
||||
" # train the model\n",
|
||||
" model_p = pipe.fit(train)\n",
|
||||
" \n",
|
||||
" # make prediction\n",
|
||||
" pred = model_p.transform(test)\n",
|
||||
" \n",
|
||||
" # evaluate. note only 2 metrics are supported out of the box by Spark ML.\n",
|
||||
" bce = BinaryClassificationEvaluator(rawPredictionCol='rawPrediction')\n",
|
||||
" au_roc = bce.setMetricName('areaUnderROC').evaluate(pred)\n",
|
||||
" au_prc = bce.setMetricName('areaUnderPR').evaluate(pred)\n",
|
||||
"\n",
|
||||
" print(\"Area under ROC: {}\".format(au_roc))\n",
|
||||
" print(\"Area Under PR: {}\".format(au_prc))\n",
|
||||
" \n",
|
||||
" # log reg, au_roc, au_prc and feature names in run history\n",
|
||||
" run.log(\"reg\", reg)\n",
|
||||
" run.log(\"au_roc\", au_roc)\n",
|
||||
" run.log(\"au_prc\", au_prc)\n",
|
||||
" run.log_list(\"columns\", train.columns)\n",
|
||||
"\n",
|
||||
" # save model\n",
|
||||
" model_p.write().overwrite().save(model_name)\n",
|
||||
" \n",
|
||||
" # upload the serialized model into run history record\n",
|
||||
" mdl, ext = model_name.split(\".\")\n",
|
||||
" model_zip = mdl + \".zip\"\n",
|
||||
" shutil.make_archive(mdl, 'zip', model_dbfs)\n",
|
||||
" run.upload_file(\"outputs/\" + model_name, model_zip) \n",
|
||||
" #run.upload_file(\"outputs/\" + model_name, path_or_stream = model_dbfs) #cannot deal with folders\n",
|
||||
"\n",
|
||||
" # now delete the serialized model from local folder since it is already uploaded to run history \n",
|
||||
" shutil.rmtree(model_dbfs)\n",
|
||||
" os.remove(model_zip)\n",
|
||||
" \n",
|
||||
"# Declare run completed\n",
|
||||
"root_run.complete()\n",
|
||||
"root_run_id = root_run.id\n",
|
||||
"print (\"run id:\", root_run.id)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"metrics = root_run.get_metrics(recursive=True)\n",
|
||||
"best_run_id = max(metrics, key = lambda k: metrics[k]['au_roc'])\n",
|
||||
"print(best_run_id, metrics[best_run_id]['au_roc'], metrics[best_run_id]['reg'])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#Get the best run\n",
|
||||
"child_runs = {}\n",
|
||||
"\n",
|
||||
"for r in root_run.get_children():\n",
|
||||
" child_runs[r.id] = r\n",
|
||||
" \n",
|
||||
"best_run = child_runs[best_run_id]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#Download the model from the best run to a local folder\n",
|
||||
"best_model_file_name = \"best_model.zip\"\n",
|
||||
"best_run.download_file(name = 'outputs/' + model_name, output_file_path = best_model_file_name)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#Model Evaluation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"##unzip the model to dbfs (as load() seems to require that) and load it.\n",
|
||||
"if os.path.isfile(model_dbfs) or os.path.isdir(model_dbfs):\n",
|
||||
" shutil.rmtree(model_dbfs)\n",
|
||||
"shutil.unpack_archive(best_model_file_name, model_dbfs)\n",
|
||||
"\n",
|
||||
"model_p_best = PipelineModel.load(model_name)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# make prediction\n",
|
||||
"pred = model_p_best.transform(test)\n",
|
||||
"output = pred[['hours_per_week','age','workclass','marital_status','income','prediction']]\n",
|
||||
"display(output.limit(5))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# evaluate. note only 2 metrics are supported out of the box by Spark ML.\n",
|
||||
"bce = BinaryClassificationEvaluator(rawPredictionCol='rawPrediction')\n",
|
||||
"au_roc = bce.setMetricName('areaUnderROC').evaluate(pred)\n",
|
||||
"au_prc = bce.setMetricName('areaUnderPR').evaluate(pred)\n",
|
||||
"\n",
|
||||
"print(\"Area under ROC: {}\".format(au_roc))\n",
|
||||
"print(\"Area Under PR: {}\".format(au_prc))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#Model Persistence"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"##NOTE: by default the model is saved to and loaded from /dbfs/ instead of cwd!\n",
|
||||
"model_p_best.write().overwrite().save(model_name)\n",
|
||||
"print(\"saved model to {}\".format(model_dbfs))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%sh\n",
|
||||
"\n",
|
||||
"ls -la /dbfs/AdultCensus_runHistory.mml/*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"dbutils.notebook.exit(\"success\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "pasha"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.6"
|
||||
},
|
||||
"name": "build-model-run-history-03",
|
||||
"notebookId": 3836944406456339
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 1
|
||||
}
|
||||
@@ -1,320 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Azure ML & Azure Databricks notebooks by Parashar Shah.\n",
|
||||
"\n",
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Register Azure Databricks trained model and deploy it to ACI\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Please ensure you have run all previous notebooks in sequence before running this.\n",
|
||||
"\n",
|
||||
"Please Register Azure Container Instance(ACI) using Azure Portal: https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-manager-supported-services#portal in your subscription before using the SDK to deploy your ML model to ACI."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"\n",
|
||||
"# Check core SDK version number\n",
|
||||
"print(\"SDK version:\", azureml.core.VERSION)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Set auth to be used by workspace related APIs.\n",
|
||||
"# For automation or CI/CD ServicePrincipalAuthentication can be used.\n",
|
||||
"# https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.authentication.serviceprincipalauthentication?view=azure-ml-py\n",
|
||||
"auth = None"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Workspace\n",
|
||||
"\n",
|
||||
"ws = Workspace.from_config(auth = auth)\n",
|
||||
"print('Workspace name: ' + ws.name, \n",
|
||||
" 'Azure region: ' + ws.location, \n",
|
||||
" 'Subscription id: ' + ws.subscription_id, \n",
|
||||
" 'Resource group: ' + ws.resource_group, sep = '\\n')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"##NOTE: service deployment always gets the model from the current working dir.\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"model_name = \"AdultCensus_runHistory.mml\" # \n",
|
||||
"model_name_dbfs = os.path.join(\"/dbfs\", model_name)\n",
|
||||
"\n",
|
||||
"print(\"copy model from dbfs to local\")\n",
|
||||
"model_local = \"file:\" + os.getcwd() + \"/\" + model_name\n",
|
||||
"dbutils.fs.cp(model_name, model_local, True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#Register the model\n",
|
||||
"from azureml.core.model import Model\n",
|
||||
"mymodel = Model.register(model_path = model_name, # this points to a local file\n",
|
||||
" model_name = model_name, # this is the name the model is registered as, am using same name for both path and name. \n",
|
||||
" description = \"ADB trained model by Parashar\",\n",
|
||||
" workspace = ws)\n",
|
||||
"\n",
|
||||
"print(mymodel.name, mymodel.description, mymodel.version)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#%%writefile score_sparkml.py\n",
|
||||
"score_sparkml = \"\"\"\n",
|
||||
" \n",
|
||||
"import json\n",
|
||||
" \n",
|
||||
"def init():\n",
|
||||
" # One-time initialization of PySpark and predictive model\n",
|
||||
" import pyspark\n",
|
||||
" import os\n",
|
||||
" from azureml.core.model import Model\n",
|
||||
" from pyspark.ml import PipelineModel\n",
|
||||
" \n",
|
||||
" global trainedModel\n",
|
||||
" global spark\n",
|
||||
" \n",
|
||||
" spark = pyspark.sql.SparkSession.builder.appName(\"ADB and AML notebook by Parashar\").getOrCreate()\n",
|
||||
" model_name = \"{model_name}\" #interpolated\n",
|
||||
" # AZUREML_MODEL_DIR is an environment variable created during deployment.\n",
|
||||
" # It is the path to the model folder (./azureml-models/$MODEL_NAME/$VERSION)\n",
|
||||
" # For multiple models, it points to the folder containing all deployed models (./azureml-models)\n",
|
||||
" model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), model_name)\n",
|
||||
" trainedModel = PipelineModel.load(model_path)\n",
|
||||
" \n",
|
||||
"def run(input_json):\n",
|
||||
" if isinstance(trainedModel, Exception):\n",
|
||||
" return json.dumps({{\"trainedModel\":str(trainedModel)}})\n",
|
||||
" \n",
|
||||
" try:\n",
|
||||
" sc = spark.sparkContext\n",
|
||||
" input_list = json.loads(input_json)\n",
|
||||
" input_rdd = sc.parallelize(input_list)\n",
|
||||
" input_df = spark.read.json(input_rdd)\n",
|
||||
" \n",
|
||||
" # Compute prediction\n",
|
||||
" prediction = trainedModel.transform(input_df)\n",
|
||||
" #result = prediction.first().prediction\n",
|
||||
" predictions = prediction.collect()\n",
|
||||
" \n",
|
||||
" #Get each scored result\n",
|
||||
" preds = [str(x['prediction']) for x in predictions]\n",
|
||||
" result = \",\".join(preds)\n",
|
||||
" # you can return any data type as long as it is JSON-serializable\n",
|
||||
" return result.tolist()\n",
|
||||
" except Exception as e:\n",
|
||||
" result = str(e)\n",
|
||||
" return result\n",
|
||||
" \n",
|
||||
"\"\"\".format(model_name=model_name)\n",
|
||||
" \n",
|
||||
"exec(score_sparkml)\n",
|
||||
" \n",
|
||||
"with open(\"score_sparkml.py\", \"w\") as file:\n",
|
||||
" file.write(score_sparkml)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.conda_dependencies import CondaDependencies \n",
|
||||
"\n",
|
||||
"myacienv = CondaDependencies.create(conda_packages=['scikit-learn','numpy','pandas']) # showing how to add libs as an eg. - not needed for this model.\n",
|
||||
"\n",
|
||||
"with open(\"myenv.yml\",\"w\") as f:\n",
|
||||
" f.write(myacienv.serialize_to_string())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#deploy to ACI\n",
|
||||
"from azureml.core.webservice import AciWebservice, Webservice\n",
|
||||
"from azureml.exceptions import WebserviceException\n",
|
||||
"from azureml.core.model import InferenceConfig\n",
|
||||
"from azureml.core.environment import Environment\n",
|
||||
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"myaci_config = AciWebservice.deploy_configuration(cpu_cores = 2, \n",
|
||||
" memory_gb = 2, \n",
|
||||
" tags = {'name':'Databricks Azure ML ACI'}, \n",
|
||||
" description = 'This is for ADB and AML example.')\n",
|
||||
"\n",
|
||||
"service_name = 'aciws'\n",
|
||||
"\n",
|
||||
"# Remove any existing service under the same name.\n",
|
||||
"try:\n",
|
||||
" Webservice(ws, service_name).delete()\n",
|
||||
"except WebserviceException:\n",
|
||||
" pass\n",
|
||||
"\n",
|
||||
"myenv = Environment.get(ws, name='AzureML-PySpark-MmlSpark-0.15')\n",
|
||||
"# we need to add extra packages to procured environment\n",
|
||||
"# in order to deploy amended environment we need to rename it\n",
|
||||
"myenv.name = 'myenv'\n",
|
||||
"model_dependencies = CondaDependencies('myenv.yml')\n",
|
||||
"for pip_dep in model_dependencies.pip_packages:\n",
|
||||
" myenv.python.conda_dependencies.add_pip_package(pip_dep)\n",
|
||||
"for conda_dep in model_dependencies.conda_packages:\n",
|
||||
" myenv.python.conda_dependencies.add_conda_package(conda_dep)\n",
|
||||
"inference_config = InferenceConfig(entry_script='score_sparkml.py', environment=myenv)\n",
|
||||
"\n",
|
||||
"myservice = Model.deploy(ws, service_name, [mymodel], inference_config, myaci_config)\n",
|
||||
"myservice.wait_for_deployment(show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"help(Webservice)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#for using the Web HTTP API \n",
|
||||
"print(myservice.scoring_uri)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"#get the some sample data\n",
|
||||
"test_data_path = \"AdultCensusIncomeTest\"\n",
|
||||
"test = spark.read.parquet(test_data_path).limit(5)\n",
|
||||
"\n",
|
||||
"test_json = json.dumps(test.toJSON().collect())\n",
|
||||
"\n",
|
||||
"print(test_json)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#using data defined above predict if income is >50K (1) or <=50K (0)\n",
|
||||
"myservice.run(input_data=test_json)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#comment to not delete the web service\n",
|
||||
"myservice.delete()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Deploying to other types of computes\n",
|
||||
"\n",
|
||||
"In order to learn how to deploy to other types of compute targets, such as AKS, please take a look at the set of notebooks in the [deployment](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/deployment) folder."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "pasha"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.8"
|
||||
},
|
||||
"name": "deploy-to-aci-04",
|
||||
"notebookId": 3836944406456376
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 1
|
||||
}
|
||||
@@ -1,179 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Azure ML & Azure Databricks notebooks by Parashar Shah.\n",
|
||||
"\n",
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#Data Ingestion"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import urllib"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Download AdultCensusIncome.csv from Azure CDN. This file has 32,561 rows.\n",
|
||||
"dataurl = \"https://amldockerdatasets.azureedge.net/AdultCensusIncome.csv\"\n",
|
||||
"datafile = \"AdultCensusIncome.csv\"\n",
|
||||
"datafile_dbfs = os.path.join(\"/dbfs\", datafile)\n",
|
||||
"\n",
|
||||
"if os.path.isfile(datafile_dbfs):\n",
|
||||
" print(\"found {} at {}\".format(datafile, datafile_dbfs))\n",
|
||||
"else:\n",
|
||||
" print(\"downloading {} to {}\".format(datafile, datafile_dbfs))\n",
|
||||
" urllib.request.urlretrieve(dataurl, datafile_dbfs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Create a Spark dataframe out of the csv file.\n",
|
||||
"data_all = sqlContext.read.format('csv').options(header='true', inferSchema='true', ignoreLeadingWhiteSpace='true', ignoreTrailingWhiteSpace='true').load(datafile)\n",
|
||||
"print(\"({}, {})\".format(data_all.count(), len(data_all.columns)))\n",
|
||||
"data_all.printSchema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#renaming columns\n",
|
||||
"columns_new = [col.replace(\"-\", \"_\") for col in data_all.columns]\n",
|
||||
"data_all = data_all.toDF(*columns_new)\n",
|
||||
"data_all.printSchema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"display(data_all.limit(5))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#Data Preparation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Choose feature columns and the label column.\n",
|
||||
"label = \"income\"\n",
|
||||
"xvars = set(data_all.columns) - {label}\n",
|
||||
"\n",
|
||||
"print(\"label = {}\".format(label))\n",
|
||||
"print(\"features = {}\".format(xvars))\n",
|
||||
"\n",
|
||||
"data = data_all.select([*xvars, label])\n",
|
||||
"\n",
|
||||
"# Split data into train and test.\n",
|
||||
"train, test = data.randomSplit([0.75, 0.25], seed=123)\n",
|
||||
"\n",
|
||||
"print(\"train ({}, {})\".format(train.count(), len(train.columns)))\n",
|
||||
"print(\"test ({}, {})\".format(test.count(), len(test.columns)))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#Data Persistence"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Write the train and test data sets to intermediate storage\n",
|
||||
"train_data_path = \"AdultCensusIncomeTrain\"\n",
|
||||
"test_data_path = \"AdultCensusIncomeTest\"\n",
|
||||
"\n",
|
||||
"train_data_path_dbfs = os.path.join(\"/dbfs\", \"AdultCensusIncomeTrain\")\n",
|
||||
"test_data_path_dbfs = os.path.join(\"/dbfs\", \"AdultCensusIncomeTest\")\n",
|
||||
"\n",
|
||||
"train.write.mode('overwrite').parquet(train_data_path)\n",
|
||||
"test.write.mode('overwrite').parquet(test_data_path)\n",
|
||||
"print(\"train and test datasets saved to {} and {}\".format(train_data_path_dbfs, test_data_path_dbfs))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "pasha"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.6"
|
||||
},
|
||||
"name": "ingest-data-02",
|
||||
"notebookId": 3836944406456362
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 1
|
||||
}
|
||||
@@ -1,183 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Azure ML & Azure Databricks notebooks by Parashar Shah.\n",
|
||||
"\n",
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We support installing AML SDK as library from GUI. When attaching a library follow this https://docs.databricks.com/user-guide/libraries.html and add the below string as your PyPi package. You can select the option to attach the library to all clusters or just one cluster.\n",
|
||||
"\n",
|
||||
"**install azureml-sdk**\n",
|
||||
"* Source: Upload Python Egg or PyPi\n",
|
||||
"* PyPi Name: `azureml-sdk[databricks]`\n",
|
||||
"* Select Install Library"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"\n",
|
||||
"# Check core SDK version number - based on build number of preview/master.\n",
|
||||
"print(\"SDK version:\", azureml.core.VERSION)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Please specify the Azure subscription Id, resource group name, workspace name, and the region in which you want to create the Azure Machine Learning Workspace.\n",
|
||||
"\n",
|
||||
"You can get the value of your Azure subscription ID from the Azure Portal, and then selecting Subscriptions from the menu on the left.\n",
|
||||
"\n",
|
||||
"For the resource_group, use the name of the resource group that contains your Azure Databricks Workspace.\n",
|
||||
"\n",
|
||||
"NOTE: If you provide a resource group name that does not exist, the resource group will be automatically created. This may or may not succeed in your environment, depending on the permissions you have on your Azure Subscription."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# subscription_id = \"<your-subscription-id>\"\n",
|
||||
"# resource_group = \"<your-existing-resource-group>\"\n",
|
||||
"# workspace_name = \"<a-new-or-existing-workspace; it is unrelated to Databricks workspace>\"\n",
|
||||
"# workspace_region = \"<your-resource group-region>\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Set auth to be used by workspace related APIs.\n",
|
||||
"# For automation or CI/CD ServicePrincipalAuthentication can be used.\n",
|
||||
"# https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.authentication.serviceprincipalauthentication?view=azure-ml-py\n",
|
||||
"auth = None"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# import the Workspace class and check the azureml SDK version\n",
|
||||
"# exist_ok checks if workspace exists or not.\n",
|
||||
"\n",
|
||||
"from azureml.core import Workspace\n",
|
||||
"\n",
|
||||
"ws = Workspace.create(name = workspace_name,\n",
|
||||
" subscription_id = subscription_id,\n",
|
||||
" resource_group = resource_group, \n",
|
||||
" location = workspace_region,\n",
|
||||
" auth = auth,\n",
|
||||
" exist_ok=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#get workspace details\n",
|
||||
"ws.get_details()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ws = Workspace(workspace_name = workspace_name,\n",
|
||||
" subscription_id = subscription_id,\n",
|
||||
" resource_group = resource_group,\n",
|
||||
" auth = auth)\n",
|
||||
"\n",
|
||||
"# persist the subscription id, resource group name, and workspace name in aml_config/config.json.\n",
|
||||
"ws.write_config()\n",
|
||||
"#if you need to give a different path/filename please use this\n",
|
||||
"#write_config(path=\"/databricks/driver/aml_config/\",file_name=<alias_conf.cfg>)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"help(Workspace)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# import the Workspace class and check the azureml SDK version\n",
|
||||
"from azureml.core import Workspace\n",
|
||||
"\n",
|
||||
"ws = Workspace.from_config(auth = auth)\n",
|
||||
"#ws = Workspace.from_config(<full path>)\n",
|
||||
"print('Workspace name: ' + ws.name, \n",
|
||||
" 'Azure region: ' + ws.location, \n",
|
||||
" 'Subscription id: ' + ws.subscription_id, \n",
|
||||
" 'Resource group: ' + ws.resource_group, sep = '\\n')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "pasha"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.6"
|
||||
},
|
||||
"name": "installation-and-configuration-01",
|
||||
"notebookId": 3688394266452835
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 1
|
||||
}
|
||||
@@ -1,9 +1,21 @@
|
||||
# Adding an init script to an Azure Databricks cluster
|
||||
# Automated ML introduction
|
||||
Automated machine learning (automated ML) builds high quality machine learning models for you by automating model and hyperparameter selection. Bring a labelled dataset that you want to build a model for, automated ML will give you a high quality machine learning model that you can use for predictions.
|
||||
|
||||
The [azureml-cluster-init.sh](./azureml-cluster-init.sh) script configures the environment to
|
||||
1. Install the latest AutoML library
|
||||
|
||||
To create the Azure Databricks cluster-scoped init script
|
||||
If you are new to Data Science, automated ML will help you get jumpstarted by simplifying machine learning model building. It abstracts you from needing to perform model selection, hyperparameter selection and in one step creates a high quality trained model for you to use.
|
||||
|
||||
If you are an experienced data scientist, automated ML will help increase your productivity by intelligently performing the model and hyperparameter selection for your training and generates high quality models much quicker than manually specifying several combinations of the parameters and running training jobs. Automated ML provides visibility and access to all the training jobs and the performance characteristics of the models to help you further tune the pipeline if you desire.
|
||||
|
||||
# Install Instructions using Azure Databricks :
|
||||
|
||||
#### For Databricks non ML runtime 7.1(scala 2.21, spark 3.0.0) and up, Install Automated Machine Learning sdk by adding and running the following command as the first cell of your notebook. This will install AutoML dependencies specific for your notebook.
|
||||
|
||||
%pip install --upgrade --force-reinstall -r https://aka.ms/automl_linux_requirements.txt
|
||||
|
||||
|
||||
#### For Databricks non ML runtime 7.0 and lower, Install Automated Machine Learning sdk using init script as shown below before running the notebook.**
|
||||
|
||||
**Create the Azure Databricks cluster-scoped init script 'azureml-cluster-init.sh' as below
|
||||
|
||||
1. Create the base directory you want to store the init script in if it does not exist.
|
||||
```
|
||||
@@ -15,7 +27,7 @@ To create the Azure Databricks cluster-scoped init script
|
||||
dbutils.fs.put("/databricks/init/azureml-cluster-init.sh","""
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
/databricks/python/bin/pip install -r https://aka.ms/automl_linux_requirements.txt
|
||||
/databricks/python/bin/pip install --upgrade --force-reinstall -r https://aka.ms/automl_linux_requirements.txt
|
||||
""", True)
|
||||
```
|
||||
|
||||
@@ -24,6 +36,8 @@ To create the Azure Databricks cluster-scoped init script
|
||||
display(dbutils.fs.ls("dbfs:/databricks/init/azureml-cluster-init.sh"))
|
||||
```
|
||||
|
||||
**Install libraries to cluster using init script 'azureml-cluster-init.sh' created in previous step
|
||||
|
||||
1. Configure the cluster to run the script.
|
||||
* Using the cluster configuration page
|
||||
1. On the cluster configuration page, click the Advanced Options toggle.
|
||||
|
||||
@@ -17,9 +17,9 @@
|
||||
"\n",
|
||||
"**For Databricks non ML runtime 7.1(scala 2.21, spark 3.0.0) and up, Install AML sdk by running the following command in the first cell of the notebook.**\n",
|
||||
"\n",
|
||||
"%pip install -r https://aka.ms/automl_linux_requirements.txt\n",
|
||||
"%pip install --upgrade --force-reinstall -r https://aka.ms/automl_linux_requirements.txt\n",
|
||||
"\n",
|
||||
"**For Databricks non ML runtime 7.0 and lower, Install AML sdk using init script as shown in [readme](readme.md) before running this notebook.**\n"
|
||||
"**For Databricks non ML runtime 7.0 and lower, Install AML sdk using init script as shown in [readme](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/azure-databricks/automl/README.md) before running this notebook.**\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -17,9 +17,9 @@
|
||||
"\n",
|
||||
"**For Databricks non ML runtime 7.1(scala 2.21, spark 3.0.0) and up, Install AML sdk by running the following command in the first cell of the notebook.**\n",
|
||||
"\n",
|
||||
"%pip install -r https://aka.ms/automl_linux_requirements.txt\n",
|
||||
"%pip install --upgrade --force-reinstall -r https://aka.ms/automl_linux_requirements.txt\n",
|
||||
"\n",
|
||||
"**For Databricks non ML runtime 7.0 and lower, Install AML sdk using init script as shown in [readme](readme.md) before running this notebook.**"
|
||||
"**For Databricks non ML runtime 7.0 and lower, Install AML sdk using init script as shown in [readme](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/azure-databricks/automl/README.md) before running this notebook.**"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -1,719 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved. \n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Using Databricks as a Compute Target from Azure Machine Learning Pipeline\n",
|
||||
"To use Databricks as a compute target from [Azure Machine Learning Pipeline](https://aka.ms/pl-concept), a [DatabricksStep](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.databricks_step.databricksstep?view=azure-ml-py) is used. This notebook demonstrates the use of DatabricksStep in Azure Machine Learning Pipeline.\n",
|
||||
"\n",
|
||||
"The notebook will show:\n",
|
||||
"1. Running an arbitrary Databricks notebook that the customer has in Databricks workspace\n",
|
||||
"2. Running an arbitrary Python script that the customer has in DBFS\n",
|
||||
"3. Running an arbitrary Python script that is available on local computer (will upload to DBFS, and then run in Databricks) \n",
|
||||
"4. Running a JAR job that the customer has in DBFS.\n",
|
||||
"\n",
|
||||
"## Before you begin:\n",
|
||||
"\n",
|
||||
"1. **Create an Azure Databricks workspace** in the same subscription where you have your Azure Machine Learning workspace. You will need details of this workspace later on to define DatabricksStep. [Click here](https://ms.portal.azure.com/#blade/HubsExtension/Resources/resourceType/Microsoft.Databricks%2Fworkspaces) for more information.\n",
|
||||
"2. **Create PAT (access token)**: Manually create a Databricks access token at the Azure Databricks portal. See [this](https://docs.databricks.com/api/latest/authentication.html#generate-a-token) for more information.\n",
|
||||
"3. **Add demo notebook to ADB**: This notebook has a sample you can use as is. Launch Azure Databricks attached to your Azure Machine Learning workspace and add a new notebook. \n",
|
||||
"4. **Create/attach a Blob storage** for use from ADB"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Add demo notebook to ADB Workspace\n",
|
||||
"Copy and paste the below code to create a new notebook in your ADB workspace."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```python\n",
|
||||
"# direct access\n",
|
||||
"dbutils.widgets.get(\"myparam\")\n",
|
||||
"p = getArgument(\"myparam\")\n",
|
||||
"print (\"Param -\\'myparam':\")\n",
|
||||
"print (p)\n",
|
||||
"\n",
|
||||
"dbutils.widgets.get(\"input\")\n",
|
||||
"i = getArgument(\"input\")\n",
|
||||
"print (\"Param -\\'input':\")\n",
|
||||
"print (i)\n",
|
||||
"\n",
|
||||
"dbutils.widgets.get(\"output\")\n",
|
||||
"o = getArgument(\"output\")\n",
|
||||
"print (\"Param -\\'output':\")\n",
|
||||
"print (o)\n",
|
||||
"\n",
|
||||
"n = i + \"/testdata.txt\"\n",
|
||||
"df = spark.read.csv(n)\n",
|
||||
"\n",
|
||||
"display (df)\n",
|
||||
"\n",
|
||||
"data = [('value1', 'value2')]\n",
|
||||
"df2 = spark.createDataFrame(data)\n",
|
||||
"\n",
|
||||
"z = o + \"/output.txt\"\n",
|
||||
"df2.write.csv(z)\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Azure Machine Learning and Pipeline SDK-specific imports"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import azureml.core\n",
|
||||
"from azureml.core.runconfig import JarLibrary\n",
|
||||
"from azureml.core.compute import ComputeTarget, DatabricksCompute\n",
|
||||
"from azureml.exceptions import ComputeTargetException\n",
|
||||
"from azureml.core import Workspace, Experiment\n",
|
||||
"from azureml.pipeline.core import Pipeline, PipelineData\n",
|
||||
"from azureml.pipeline.steps import DatabricksStep\n",
|
||||
"from azureml.core.datastore import Datastore\n",
|
||||
"from azureml.data.data_reference import DataReference\n",
|
||||
"\n",
|
||||
"# Check core SDK version number\n",
|
||||
"print(\"SDK version:\", azureml.core.VERSION)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialize Workspace\n",
|
||||
"\n",
|
||||
"Initialize a workspace object from persisted configuration. Make sure the config file is present at .\\config.json"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ws = Workspace.from_config()\n",
|
||||
"print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\\n')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Attach Databricks compute target\n",
|
||||
"Next, you need to add your Databricks workspace to Azure Machine Learning as a compute target and give it a name. You will use this name to refer to your Databricks workspace compute target inside Azure Machine Learning.\n",
|
||||
"\n",
|
||||
"- **Resource Group** - The resource group name of your Azure Machine Learning workspace\n",
|
||||
"- **Databricks Workspace Name** - The workspace name of your Azure Databricks workspace\n",
|
||||
"- **Databricks Access Token** - The access token you created in ADB\n",
|
||||
"\n",
|
||||
"**The Databricks workspace need to be present in the same subscription as your AML workspace**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Replace with your account info before running.\n",
|
||||
" \n",
|
||||
"db_compute_name=os.getenv(\"DATABRICKS_COMPUTE_NAME\", \"<my-databricks-compute-name>\") # Databricks compute name\n",
|
||||
"db_resource_group=os.getenv(\"DATABRICKS_RESOURCE_GROUP\", \"<my-db-resource-group>\") # Databricks resource group\n",
|
||||
"db_workspace_name=os.getenv(\"DATABRICKS_WORKSPACE_NAME\", \"<my-db-workspace-name>\") # Databricks workspace name\n",
|
||||
"db_access_token=os.getenv(\"DATABRICKS_ACCESS_TOKEN\", \"<my-access-token>\") # Databricks access token\n",
|
||||
" \n",
|
||||
"try:\n",
|
||||
" databricks_compute = DatabricksCompute(workspace=ws, name=db_compute_name)\n",
|
||||
" print('Compute target {} already exists'.format(db_compute_name))\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" print('Compute not found, will use below parameters to attach new one')\n",
|
||||
" print('db_compute_name {}'.format(db_compute_name))\n",
|
||||
" print('db_resource_group {}'.format(db_resource_group))\n",
|
||||
" print('db_workspace_name {}'.format(db_workspace_name))\n",
|
||||
" print('db_access_token {}'.format(db_access_token))\n",
|
||||
" \n",
|
||||
" config = DatabricksCompute.attach_configuration(\n",
|
||||
" resource_group = db_resource_group,\n",
|
||||
" workspace_name = db_workspace_name,\n",
|
||||
" access_token= db_access_token)\n",
|
||||
" databricks_compute=ComputeTarget.attach(ws, db_compute_name, config)\n",
|
||||
" databricks_compute.wait_for_completion(True)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Data Connections with Inputs and Outputs\n",
|
||||
"The DatabricksStep supports Azure Bloband ADLS for inputs and outputs. You also will need to define a [Secrets](https://docs.azuredatabricks.net/user-guide/secrets/index.html) scope to enable authentication to external data sources such as Blob and ADLS from Databricks.\n",
|
||||
"\n",
|
||||
"- Databricks documentation on [Azure Blob](https://docs.azuredatabricks.net/spark/latest/data-sources/azure/azure-storage.html)\n",
|
||||
"- Databricks documentation on [ADLS](https://docs.databricks.com/spark/latest/data-sources/azure/azure-datalake.html)\n",
|
||||
"\n",
|
||||
"### Type of Data Access\n",
|
||||
"Databricks allows to interact with Azure Blob and ADLS in two ways.\n",
|
||||
"- **Direct Access**: Databricks allows you to interact with Azure Blob or ADLS URIs directly. The input or output URIs will be mapped to a Databricks widget param in the Databricks notebook.\n",
|
||||
"- **Mounting**: You will be supplied with additional parameters and secrets that will enable you to mount your ADLS or Azure Blob input or output location in your Databricks notebook."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Direct Access: Python sample code\n",
|
||||
"If you have a data reference named \"input\" it will represent the URI of the input and you can access it directly in the Databricks python notebook like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```python\n",
|
||||
"dbutils.widgets.get(\"input\")\n",
|
||||
"y = getArgument(\"input\")\n",
|
||||
"df = spark.read.csv(y)\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Mounting: Python sample code for Azure Blob\n",
|
||||
"Given an Azure Blob data reference named \"input\" the following widget params will be made available in the Databricks notebook:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```python\n",
|
||||
"# This contains the input URI\n",
|
||||
"dbutils.widgets.get(\"input\")\n",
|
||||
"myinput_uri = getArgument(\"input\")\n",
|
||||
"\n",
|
||||
"# How to get the input datastore name inside ADB notebook\n",
|
||||
"# This contains the name of a Databricks secret (in the predefined \"amlscope\" secret scope) \n",
|
||||
"# that contians an access key or sas for the Azure Blob input (this name is obtained by appending \n",
|
||||
"# the name of the input with \"_blob_secretname\". \n",
|
||||
"dbutils.widgets.get(\"input_blob_secretname\") \n",
|
||||
"myinput_blob_secretname = getArgument(\"input_blob_secretname\")\n",
|
||||
"\n",
|
||||
"# This contains the required configuration for mounting\n",
|
||||
"dbutils.widgets.get(\"input_blob_config\")\n",
|
||||
"myinput_blob_config = getArgument(\"input_blob_config\")\n",
|
||||
"\n",
|
||||
"# Usage\n",
|
||||
"dbutils.fs.mount(\n",
|
||||
" source = myinput_uri,\n",
|
||||
" mount_point = \"/mnt/input\",\n",
|
||||
" extra_configs = {myinput_blob_config:dbutils.secrets.get(scope = \"amlscope\", key = myinput_blob_secretname)})\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Mounting: Python sample code for ADLS\n",
|
||||
"Given an ADLS data reference named \"input\" the following widget params will be made available in the Databricks notebook:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```python\n",
|
||||
"# This contains the input URI\n",
|
||||
"dbutils.widgets.get(\"input\") \n",
|
||||
"myinput_uri = getArgument(\"input\")\n",
|
||||
"\n",
|
||||
"# This contains the client id for the service principal \n",
|
||||
"# that has access to the adls input\n",
|
||||
"dbutils.widgets.get(\"input_adls_clientid\") \n",
|
||||
"myinput_adls_clientid = getArgument(\"input_adls_clientid\")\n",
|
||||
"\n",
|
||||
"# This contains the name of a Databricks secret (in the predefined \"amlscope\" secret scope) \n",
|
||||
"# that contains the secret for the above mentioned service principal\n",
|
||||
"dbutils.widgets.get(\"input_adls_secretname\") \n",
|
||||
"myinput_adls_secretname = getArgument(\"input_adls_secretname\")\n",
|
||||
"\n",
|
||||
"# This contains the refresh url for the mounting configs\n",
|
||||
"dbutils.widgets.get(\"input_adls_refresh_url\") \n",
|
||||
"myinput_adls_refresh_url = getArgument(\"input_adls_refresh_url\")\n",
|
||||
"\n",
|
||||
"# Usage \n",
|
||||
"configs = {\"dfs.adls.oauth2.access.token.provider.type\": \"ClientCredential\",\n",
|
||||
" \"dfs.adls.oauth2.client.id\": myinput_adls_clientid,\n",
|
||||
" \"dfs.adls.oauth2.credential\": dbutils.secrets.get(scope = \"amlscope\", key =myinput_adls_secretname),\n",
|
||||
" \"dfs.adls.oauth2.refresh.url\": myinput_adls_refresh_url}\n",
|
||||
"\n",
|
||||
"dbutils.fs.mount(\n",
|
||||
" source = myinput_uri,\n",
|
||||
" mount_point = \"/mnt/output\",\n",
|
||||
" extra_configs = configs)\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use Databricks from Azure Machine Learning Pipeline\n",
|
||||
"To use Databricks as a compute target from Azure Machine Learning Pipeline, a DatabricksStep is used. Let's define a datasource (via DataReference) and intermediate data (via PipelineData) to be used in DatabricksStep."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Use the default blob storage\n",
|
||||
"def_blob_store = Datastore(ws, \"workspaceblobstore\")\n",
|
||||
"print('Datastore {} will be used'.format(def_blob_store.name))\n",
|
||||
"\n",
|
||||
"# We are uploading a sample file in the local directory to be used as a datasource\n",
|
||||
"def_blob_store.upload_files(files=[\"./testdata.txt\"], target_path=\"dbtest\", overwrite=False)\n",
|
||||
"\n",
|
||||
"step_1_input = DataReference(datastore=def_blob_store, path_on_datastore=\"dbtest\",\n",
|
||||
" data_reference_name=\"input\")\n",
|
||||
"\n",
|
||||
"step_1_output = PipelineData(\"output\", datastore=def_blob_store)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Add a DatabricksStep\n",
|
||||
"Adds a Databricks notebook as a step in a Pipeline.\n",
|
||||
"- ***name:** Name of the Module\n",
|
||||
"- **inputs:** List of input connections for data consumed by this step. Fetch this inside the notebook using dbutils.widgets.get(\"input\")\n",
|
||||
"- **outputs:** List of output port definitions for outputs produced by this step. Fetch this inside the notebook using dbutils.widgets.get(\"output\")\n",
|
||||
"- **existing_cluster_id:** Cluster ID of an existing Interactive cluster on the Databricks workspace. If you are providing this, do not provide any of the parameters below that are used to create a new cluster such as spark_version, node_type, etc.\n",
|
||||
"- **spark_version:** Version of spark for the databricks run cluster. default value: 4.0.x-scala2.11\n",
|
||||
"- **node_type:** Azure vm node types for the databricks run cluster. default value: Standard_D3_v2\n",
|
||||
"- **num_workers:** Specifies a static number of workers for the databricks run cluster\n",
|
||||
"- **min_workers:** Specifies a min number of workers to use for auto-scaling the databricks run cluster\n",
|
||||
"- **max_workers:** Specifies a max number of workers to use for auto-scaling the databricks run cluster\n",
|
||||
"- **spark_env_variables:** Spark environment variables for the databricks run cluster (dictionary of {str:str}). default value: {'PYSPARK_PYTHON': '/databricks/python3/bin/python3'}\n",
|
||||
"- **notebook_path:** Path to the notebook in the databricks instance. If you are providing this, do not provide python script related paramaters or JAR related parameters.\n",
|
||||
"- **notebook_params:** Parameters for the databricks notebook (dictionary of {str:str}). Fetch this inside the notebook using dbutils.widgets.get(\"myparam\")\n",
|
||||
"- **python_script_path:** The path to the python script in the DBFS or S3. If you are providing this, do not provide python_script_name which is used for uploading script from local machine.\n",
|
||||
"- **python_script_params:** Parameters for the python script (list of str)\n",
|
||||
"- **main_class_name:** The name of the entry point in a JAR module. If you are providing this, do not provide any python script or notebook related parameters.\n",
|
||||
"- **jar_params:** Parameters for the JAR module (list of str)\n",
|
||||
"- **python_script_name:** name of a python script on your local machine (relative to source_directory). If you are providing this do not provide python_script_path which is used to execute a remote python script; or any of the JAR or notebook related parameters.\n",
|
||||
"- **source_directory:** folder that contains the script and other files\n",
|
||||
"- **hash_paths:** list of paths to hash to detect a change in source_directory (script file is always hashed)\n",
|
||||
"- **run_name:** Name in databricks for this run\n",
|
||||
"- **timeout_seconds:** Timeout for the databricks run\n",
|
||||
"- **runconfig:** Runconfig to use. Either pass runconfig or each library type as a separate parameter but do not mix the two\n",
|
||||
"- **maven_libraries:** maven libraries for the databricks run\n",
|
||||
"- **pypi_libraries:** pypi libraries for the databricks run\n",
|
||||
"- **egg_libraries:** egg libraries for the databricks run\n",
|
||||
"- **jar_libraries:** jar libraries for the databricks run\n",
|
||||
"- **rcran_libraries:** rcran libraries for the databricks run\n",
|
||||
"- **compute_target:** Azure Databricks compute\n",
|
||||
"- **allow_reuse:** Whether the step should reuse previous results when run with the same settings/inputs\n",
|
||||
"- **version:** Optional version tag to denote a change in functionality for the step\n",
|
||||
"\n",
|
||||
"\\* *denotes required fields* \n",
|
||||
"*You must provide exactly one of num_workers or min_workers and max_workers paramaters* \n",
|
||||
"*You must provide exactly one of databricks_compute or databricks_compute_name parameters*\n",
|
||||
"\n",
|
||||
"## Use runconfig to specify library dependencies\n",
|
||||
"You can use a runconfig to specify the library dependencies for your cluster in Databricks. The runconfig will contain a databricks section as follows:\n",
|
||||
"\n",
|
||||
"```yaml\n",
|
||||
"environment:\n",
|
||||
"# Databricks details\n",
|
||||
" databricks:\n",
|
||||
"# List of maven libraries.\n",
|
||||
" mavenLibraries:\n",
|
||||
" - coordinates: org.jsoup:jsoup:1.7.1\n",
|
||||
" repo: ''\n",
|
||||
" exclusions:\n",
|
||||
" - slf4j:slf4j\n",
|
||||
" - '*:hadoop-client'\n",
|
||||
"# List of PyPi libraries\n",
|
||||
" pypiLibraries:\n",
|
||||
" - package: beautifulsoup4\n",
|
||||
" repo: ''\n",
|
||||
"# List of RCran libraries\n",
|
||||
" rcranLibraries:\n",
|
||||
" -\n",
|
||||
"# Coordinates.\n",
|
||||
" package: ada\n",
|
||||
"# Repo\n",
|
||||
" repo: http://cran.us.r-project.org\n",
|
||||
"# List of JAR libraries\n",
|
||||
" jarLibraries:\n",
|
||||
" -\n",
|
||||
"# Coordinates.\n",
|
||||
" library: dbfs:/mnt/libraries/library.jar\n",
|
||||
"# List of Egg libraries\n",
|
||||
" eggLibraries:\n",
|
||||
" -\n",
|
||||
"# Coordinates.\n",
|
||||
" library: dbfs:/mnt/libraries/library.egg\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"You can then create a RunConfiguration object using this file and pass it as the runconfig parameter to DatabricksStep.\n",
|
||||
"```python\n",
|
||||
"from azureml.core.runconfig import RunConfiguration\n",
|
||||
"\n",
|
||||
"runconfig = RunConfiguration()\n",
|
||||
"runconfig.load(path='<directory_where_runconfig_is_stored>', name='<runconfig_file_name>')\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 1. Running the demo notebook already added to the Databricks workspace\n",
|
||||
"Create a notebook in the Azure Databricks workspace, and provide the path to that notebook as the value associated with the environment variable \"DATABRICKS_NOTEBOOK_PATH\". This will then set the variable\u00c2\u00a0notebook_path\u00c2\u00a0when you run the code cell below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": [
|
||||
"databricksstep-remarks-sample"
|
||||
]
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"notebook_path=os.getenv(\"DATABRICKS_NOTEBOOK_PATH\", \"<my-databricks-notebook-path>\") # Databricks notebook path\n",
|
||||
"\n",
|
||||
"dbNbStep = DatabricksStep(\n",
|
||||
" name=\"DBNotebookInWS\",\n",
|
||||
" inputs=[step_1_input],\n",
|
||||
" outputs=[step_1_output],\n",
|
||||
" num_workers=1,\n",
|
||||
" notebook_path=notebook_path,\n",
|
||||
" notebook_params={'myparam': 'testparam'},\n",
|
||||
" run_name='DB_Notebook_demo',\n",
|
||||
" compute_target=databricks_compute,\n",
|
||||
" allow_reuse=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Build and submit the Experiment"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"steps = [dbNbStep]\n",
|
||||
"pipeline = Pipeline(workspace=ws, steps=steps)\n",
|
||||
"pipeline_run = Experiment(ws, 'DB_Notebook_demo').submit(pipeline)\n",
|
||||
"pipeline_run.wait_for_completion()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### View Run Details"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.widgets import RunDetails\n",
|
||||
"RunDetails(pipeline_run).show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 2. Running a Python script from DBFS\n",
|
||||
"This shows how to run a Python script in DBFS. \n",
|
||||
"\n",
|
||||
"To complete this, you will need to first upload the Python script in your local machine to DBFS using the [CLI](https://docs.azuredatabricks.net/user-guide/dbfs-databricks-file-system.html). The CLI command is given below:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"dbfs cp ./train-db-dbfs.py dbfs:/train-db-dbfs.py\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"The code in the below cell assumes that you have completed the previous step of uploading the script `train-db-dbfs.py` to the root folder in DBFS."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"python_script_path = os.getenv(\"DATABRICKS_PYTHON_SCRIPT_PATH\", \"<my-databricks-python-script-path>\") # Databricks python script path\n",
|
||||
"\n",
|
||||
"dbPythonInDbfsStep = DatabricksStep(\n",
|
||||
" name=\"DBPythonInDBFS\",\n",
|
||||
" inputs=[step_1_input],\n",
|
||||
" num_workers=1,\n",
|
||||
" python_script_path=python_script_path,\n",
|
||||
" python_script_params={'--input_data'},\n",
|
||||
" run_name='DB_Python_demo',\n",
|
||||
" compute_target=databricks_compute,\n",
|
||||
" allow_reuse=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Build and submit the Experiment"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"steps = [dbPythonInDbfsStep]\n",
|
||||
"pipeline = Pipeline(workspace=ws, steps=steps)\n",
|
||||
"pipeline_run = Experiment(ws, 'DB_Python_demo').submit(pipeline)\n",
|
||||
"pipeline_run.wait_for_completion()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### View Run Details"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.widgets import RunDetails\n",
|
||||
"RunDetails(pipeline_run).show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 3. Running a Python script in Databricks that currenlty is in local computer\n",
|
||||
"To run a Python script that is currently in your local computer, follow the instructions below. \n",
|
||||
"\n",
|
||||
"The commented out code below code assumes that you have `train-db-local.py` in the `scripts` subdirectory under the current working directory.\n",
|
||||
"\n",
|
||||
"In this case, the Python script will be uploaded first to DBFS, and then the script will be run in Databricks."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"python_script_name = \"train-db-local.py\"\n",
|
||||
"source_directory = \".\"\n",
|
||||
"\n",
|
||||
"dbPythonInLocalMachineStep = DatabricksStep(\n",
|
||||
" name=\"DBPythonInLocalMachine\",\n",
|
||||
" inputs=[step_1_input],\n",
|
||||
" num_workers=1,\n",
|
||||
" python_script_name=python_script_name,\n",
|
||||
" source_directory=source_directory,\n",
|
||||
" run_name='DB_Python_Local_demo',\n",
|
||||
" compute_target=databricks_compute,\n",
|
||||
" allow_reuse=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Build and submit the Experiment"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"steps = [dbPythonInLocalMachineStep]\n",
|
||||
"pipeline = Pipeline(workspace=ws, steps=steps)\n",
|
||||
"pipeline_run = Experiment(ws, 'DB_Python_Local_demo').submit(pipeline)\n",
|
||||
"pipeline_run.wait_for_completion()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### View Run Details"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.widgets import RunDetails\n",
|
||||
"RunDetails(pipeline_run).show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 4. Running a JAR job that is alreay added in DBFS\n",
|
||||
"To run a JAR job that is already uploaded to DBFS, follow the instructions below. You will first upload the JAR file to DBFS using the [CLI](https://docs.azuredatabricks.net/user-guide/dbfs-databricks-file-system.html).\n",
|
||||
"\n",
|
||||
"The commented out code in the below cell assumes that you have uploaded `train-db-dbfs.jar` to the root folder in DBFS. You can upload `train-db-dbfs.jar` to the root folder in DBFS using this commandline so you can use `jar_library_dbfs_path = \"dbfs:/train-db-dbfs.jar\"`:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"dbfs cp ./train-db-dbfs.jar dbfs:/train-db-dbfs.jar\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"main_jar_class_name = \"com.microsoft.aeva.Main\"\n",
|
||||
"jar_library_dbfs_path = os.getenv(\"DATABRICKS_JAR_LIB_PATH\", \"<my-databricks-jar-lib-path>\") # Databricks jar library path\n",
|
||||
"\n",
|
||||
"dbJarInDbfsStep = DatabricksStep(\n",
|
||||
" name=\"DBJarInDBFS\",\n",
|
||||
" inputs=[step_1_input],\n",
|
||||
" num_workers=1,\n",
|
||||
" main_class_name=main_jar_class_name,\n",
|
||||
" jar_params={'arg1', 'arg2'},\n",
|
||||
" run_name='DB_JAR_demo',\n",
|
||||
" jar_libraries=[JarLibrary(jar_library_dbfs_path)],\n",
|
||||
" compute_target=databricks_compute,\n",
|
||||
" allow_reuse=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Build and submit the Experiment"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"steps = [dbJarInDbfsStep]\n",
|
||||
"pipeline = Pipeline(workspace=ws, steps=steps)\n",
|
||||
"pipeline_run = Experiment(ws, 'DB_JAR_demo').submit(pipeline)\n",
|
||||
"pipeline_run.wait_for_completion()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### View Run Details"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.widgets import RunDetails\n",
|
||||
"RunDetails(pipeline_run).show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Next: ADLA as a Compute Target\n",
|
||||
"To use ADLA as a compute target from Azure Machine Learning Pipeline, a AdlaStep is used. This [notebook](https://aka.ms/pl-adla) demonstrates the use of AdlaStep in Azure Machine Learning Pipeline."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "diray"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.2"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
Test1
|
||||
@@ -1,5 +0,0 @@
|
||||
# Copyright (c) Microsoft. All rights reserved.
|
||||
# Licensed under the MIT license.
|
||||
|
||||
print("In train.py")
|
||||
print("As a data scientist, this is where I use my training code.")
|
||||
@@ -1,5 +0,0 @@
|
||||
# Copyright (c) Microsoft. All rights reserved.
|
||||
# Licensed under the MIT license.
|
||||
|
||||
print("In train.py")
|
||||
print("As a data scientist, this is where I use my training code.")
|
||||
@@ -518,7 +518,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.contrib.interpret.explanation.explanation_client import ExplanationClient\n",
|
||||
"from azureml.interpret import ExplanationClient\n",
|
||||
"\n",
|
||||
"# Get model explanation data\n",
|
||||
"client = ExplanationClient.from_run(run)\n",
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
from sklearn import datasets
|
||||
from sklearn.linear_model import Ridge
|
||||
from interpret.ext.blackbox import TabularExplainer
|
||||
from azureml.contrib.interpret.explanation.explanation_client import ExplanationClient
|
||||
from azureml.interpret import ExplanationClient
|
||||
from sklearn.model_selection import train_test_split
|
||||
from azureml.core.run import Run
|
||||
import joblib
|
||||
|
||||
@@ -451,7 +451,7 @@
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"from azureml.core import Workspace, Experiment\n",
|
||||
"from azureml.contrib.interpret.explanation.explanation_client import ExplanationClient\n",
|
||||
"from azureml.interpret import ExplanationClient\n",
|
||||
"# Check core SDK version number\n",
|
||||
"print(\"SDK version:\", azureml.core.VERSION)"
|
||||
]
|
||||
|
||||
@@ -295,8 +295,7 @@
|
||||
"# environment, otherwise if a model is trained or deployed in a different environment this can\n",
|
||||
"# cause errors. Please take extra care when specifying your dependencies in a production environment.\n",
|
||||
"azureml_pip_packages.extend(['sklearn-pandas', 'pyyaml', sklearn_dep, pandas_dep])\n",
|
||||
"run_config.environment.python.conda_dependencies = CondaDependencies.create(pip_packages=azureml_pip_packages,\n",
|
||||
" pin_sdk_version=False)\n",
|
||||
"run_config.environment.python.conda_dependencies = CondaDependencies.create(pip_packages=azureml_pip_packages)\n",
|
||||
"# Now submit a run on AmlCompute\n",
|
||||
"from azureml.core.script_run_config import ScriptRunConfig\n",
|
||||
"\n",
|
||||
@@ -368,7 +367,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Retrieve global explanation for visualization\n",
|
||||
"from azureml.contrib.interpret.explanation.explanation_client import ExplanationClient\n",
|
||||
"from azureml.interpret import ExplanationClient\n",
|
||||
"\n",
|
||||
"# get model explanation data\n",
|
||||
"client = ExplanationClient.from_run(run)\n",
|
||||
@@ -460,8 +459,7 @@
|
||||
"# environment, otherwise if a model is trained or deployed in a different environment this can\n",
|
||||
"# cause errors. Please take extra care when specifying your dependencies in a production environment.\n",
|
||||
"azureml_pip_packages.extend(['sklearn-pandas', 'pyyaml', sklearn_dep, pandas_dep])\n",
|
||||
"myenv = CondaDependencies.create(pip_packages=azureml_pip_packages,\n",
|
||||
" pin_sdk_version=False)\n",
|
||||
"myenv = CondaDependencies.create(pip_packages=azureml_pip_packages)\n",
|
||||
"\n",
|
||||
"with open(\"myenv.yml\",\"w\") as f:\n",
|
||||
" f.write(myenv.serialize_to_string())\n",
|
||||
|
||||
@@ -15,7 +15,7 @@ from sklearn_pandas import DataFrameMapper
|
||||
|
||||
from azureml.core.run import Run
|
||||
from interpret.ext.blackbox import TabularExplainer
|
||||
from azureml.contrib.interpret.explanation.explanation_client import ExplanationClient
|
||||
from azureml.interpret import ExplanationClient
|
||||
from azureml.interpret.scoring.scoring_explainer import LinearScoringExplainer, save
|
||||
|
||||
OUTPUT_DIR = './outputs/'
|
||||
|
||||
@@ -44,9 +44,11 @@
|
||||
"import azureml.core\n",
|
||||
"from azureml.core import Workspace, Experiment, Datastore, Dataset\n",
|
||||
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||
"from azureml.core.runconfig import RunConfiguration\n",
|
||||
"from azureml.exceptions import ComputeTargetException\n",
|
||||
"from azureml.pipeline.steps import HyperDriveStep, HyperDriveStepRun\n",
|
||||
"from azureml.pipeline.core import Pipeline, PipelineData\n",
|
||||
"from azureml.pipeline.steps import HyperDriveStep, HyperDriveStepRun, PythonScriptStep\n",
|
||||
"from azureml.pipeline.core import Pipeline, PipelineData, TrainingOutput\n",
|
||||
"from azureml.train.dnn import TensorFlow\n",
|
||||
"# from azureml.train.hyperdrive import *\n",
|
||||
"from azureml.train.hyperdrive import RandomParameterSampling, BanditPolicy, HyperDriveConfig, PrimaryMetricGoal\n",
|
||||
@@ -232,7 +234,22 @@
|
||||
" compute_target = ComputeTarget.create(ws, cluster_name, compute_config)\n",
|
||||
" compute_target.wait_for_completion(show_output=True, timeout_in_minutes=20)\n",
|
||||
"\n",
|
||||
"print(\"Azure Machine Learning Compute attached\")"
|
||||
"print(\"Azure Machine Learning Compute attached\")\n",
|
||||
"\n",
|
||||
"cpu_cluster_name = \"cpu-cluster\"\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" cpu_cluster = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
||||
" print(\"Found existing cpu-cluster\")\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" print(\"Creating new cpu-cluster\")\n",
|
||||
" \n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size=\"STANDARD_D2_V2\",\n",
|
||||
" min_nodes=0,\n",
|
||||
" max_nodes=4)\n",
|
||||
" cpu_cluster = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
||||
" \n",
|
||||
" cpu_cluster.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -401,7 +418,15 @@
|
||||
"metrics_output_name = 'metrics_output'\n",
|
||||
"metrics_data = PipelineData(name='metrics_data',\n",
|
||||
" datastore=datastore,\n",
|
||||
" pipeline_output_name=metrics_output_name)\n",
|
||||
" pipeline_output_name=metrics_output_name,\n",
|
||||
" training_output=TrainingOutput(\"Metrics\"))\n",
|
||||
"\n",
|
||||
"model_output_name = 'model_output'\n",
|
||||
"saved_model = PipelineData(name='saved_model',\n",
|
||||
" datastore=datastore,\n",
|
||||
" pipeline_output_name=model_output_name,\n",
|
||||
" training_output=TrainingOutput(\"Model\",\n",
|
||||
" model_file=\"outputs/model/saved_model.pb\"))\n",
|
||||
"\n",
|
||||
"hd_step_name='hd_step01'\n",
|
||||
"hd_step = HyperDriveStep(\n",
|
||||
@@ -409,7 +434,39 @@
|
||||
" hyperdrive_config=hd_config,\n",
|
||||
" estimator_entry_script_arguments=['--data-folder', data_folder],\n",
|
||||
" inputs=[data_folder],\n",
|
||||
" metrics_output=metrics_data)"
|
||||
" outputs=[metrics_data, saved_model])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Find and register best model\n",
|
||||
"When all the jobs finish, we can choose to register the model that has the highest accuracy through an additional PythonScriptStep.\n",
|
||||
"\n",
|
||||
"Through this additional register_model_step, we register the chosen files as a model named `tf-dnn-mnist` under the workspace for deployment."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"conda_dep = CondaDependencies()\n",
|
||||
"conda_dep.add_pip_package(\"azureml-sdk\")\n",
|
||||
"\n",
|
||||
"rcfg = RunConfiguration(conda_dependencies=conda_dep)\n",
|
||||
"\n",
|
||||
"register_model_step = PythonScriptStep(script_name='register_model.py',\n",
|
||||
" name=\"register_model_step01\",\n",
|
||||
" inputs=[saved_model],\n",
|
||||
" compute_target=cpu_cluster,\n",
|
||||
" arguments=[\"--saved-model\", saved_model],\n",
|
||||
" allow_reuse=True,\n",
|
||||
" runconfig=rcfg)\n",
|
||||
"\n",
|
||||
"register_model_step.run_after(hd_step)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -425,7 +482,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pipeline = Pipeline(workspace=ws, steps=[hd_step])\n",
|
||||
"pipeline = Pipeline(workspace=ws, steps=[hd_step, register_model_step])\n",
|
||||
"pipeline_run = exp.submit(pipeline)"
|
||||
]
|
||||
},
|
||||
@@ -500,58 +557,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Find and register best model\n",
|
||||
"When all the jobs finish, we can find out the one that has the highest accuracy."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"hd_step_run = HyperDriveStepRun(step_run=pipeline_run.find_step_run(hd_step_name)[0])\n",
|
||||
"best_run = hd_step_run.get_best_run_by_primary_metric()\n",
|
||||
"best_run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now let's list the model files uploaded during the run."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(best_run.get_file_names())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can then register the folder (and all files in it) as a model named `tf-dnn-mnist` under the workspace for deployment."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model = best_run.register_model(model_name='tf-dnn-mnist', model_path='outputs/model')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For model deployment, please refer to [Training, hyperparameter tune, and deploy with TensorFlow](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/ml-frameworks/tensorflow/deployment/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb)."
|
||||
"For model deployment, please refer to [Training, hyperparameter tune, and deploy with TensorFlow](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/ml-frameworks/tensorflow/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb)."
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -0,0 +1,274 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved. \n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Azure Machine Learning Pipeline with KustoStep\n",
|
||||
"To use Kusto as a compute target from [Azure Machine Learning Pipeline](https://aka.ms/pl-concept), a KustoStep is used. A KustoStep enables the functionality of running Kusto queries on a target Kusto cluster in Azure ML Pipelines. Each KustoStep can target one Kusto cluster and perform multiple queries on them. This notebook demonstrates the use of KustoStep in Azure Machine Learning (AML) Pipeline.\n",
|
||||
"\n",
|
||||
"## Before you begin:\n",
|
||||
"\n",
|
||||
"1. **Have an Azure Machine Learning workspace**: You will need details of this workspace later on to define KustoStep.\n",
|
||||
"2. **Have a Service Principal**: You will need a service principal and use its credentials to access your cluster. See [this](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal) for more information.\n",
|
||||
"3. **Have a Blob storage**: You will need a Azure Blob storage for uploading the output of your Kusto query."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Azure Machine Learning and Pipeline SDK-specific imports"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import azureml.core\n",
|
||||
"from azureml.core.runconfig import JarLibrary\n",
|
||||
"from azureml.core.compute import ComputeTarget, KustoCompute\n",
|
||||
"from azureml.exceptions import ComputeTargetException\n",
|
||||
"from azureml.core import Workspace, Experiment\n",
|
||||
"from azureml.pipeline.core import Pipeline, PipelineData\n",
|
||||
"from azureml.pipeline.steps import KustoStep\n",
|
||||
"from azureml.core.datastore import Datastore\n",
|
||||
"from azureml.data.data_reference import DataReference\n",
|
||||
"\n",
|
||||
"# Check core SDK version number\n",
|
||||
"print(\"SDK version:\", azureml.core.VERSION)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialize Workspace\n",
|
||||
"\n",
|
||||
"Initialize a workspace object from persisted configuration. If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure you go through the [configuration Notebook](https://aka.ms/pl-config) first if you haven't."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ws = Workspace.from_config()\n",
|
||||
"print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\\n')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Attach Kusto compute target\n",
|
||||
"Next, you need to create a Kusto compute target and give it a name. You will use this name to refer to your Kusto compute target inside Azure Machine Learning. Your workspace will be associated to this Kusto compute target. You will also need to provide some credentials that will be used to enable access to your target Kusto cluster and database.\n",
|
||||
"\n",
|
||||
"- **Resource Group** - The resource group name of your Azure Machine Learning workspace\n",
|
||||
"- **Workspace Name** - The workspace name of your Azure Machine Learning workspace\n",
|
||||
"- **Resource ID** - The resource ID of your Kusto cluster\n",
|
||||
"- **Tenant ID** - The tenant ID associated to your Kusto cluster\n",
|
||||
"- **Application ID** - The Application ID associated to your Kusto cluster\n",
|
||||
"- **Application Key** - The Application key associated to your Kusto cluster\n",
|
||||
"- **Kusto Connection String** - The connection string of your Kusto cluster\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": [
|
||||
"sample-databrickscompute-attach"
|
||||
]
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"compute_name = \"<compute_name>\" # Name to associate with new compute in workspace\n",
|
||||
"\n",
|
||||
"# Account details associated to the target Kusto cluster\n",
|
||||
"resource_id = \"<resource_id>\" # Resource ID of the Kusto cluster\n",
|
||||
"kusto_connection_string = \"<kusto_connection_string>\" # Connection string of the Kusto cluster\n",
|
||||
"application_id = \"<application_id>\" # Application ID associated to the Kusto cluster\n",
|
||||
"application_key = \"<application_key>\" # Application Key associated to the Kusto cluster\n",
|
||||
"tenant_id = \"<tenant_id>\" # Tenant ID associated to the Kusto cluster\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" kusto_compute = KustoCompute(workspace=ws, name=compute_name)\n",
|
||||
" print('Compute target {} already exists'.format(compute_name))\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" print('Compute not found, will use provided parameters to attach new one')\n",
|
||||
" config = KustoCompute.attach_configuration(resource_group=ws.resource_group, workspace_name=ws.name, \n",
|
||||
" resource_id=resource_id, tenant_id=tenant_id, \n",
|
||||
" kusto_connection_string=kusto_connection_string, \n",
|
||||
" application_id=application_id, application_key=application_key)\n",
|
||||
" kusto_compute=ComputeTarget.attach(ws, compute_name, config)\n",
|
||||
" kusto_compute.wait_for_completion(True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup output\n",
|
||||
"To use Kusto as a compute target for Azure Machine Learning Pipeline, a KustoStep is used. Currently KustoStep only supports uploading results to Azure Blob store. Let's define an output datastore via PipelineData to be used in KustoStep."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.pipeline.core import PipelineParameter\n",
|
||||
"\n",
|
||||
"# Use the default blob storage\n",
|
||||
"def_blob_store = Datastore.get(ws, \"workspaceblobstore\")\n",
|
||||
"print('Datastore {} will be used'.format(def_blob_store.name))\n",
|
||||
"\n",
|
||||
"step_1_output = PipelineData(\"output\", datastore=def_blob_store)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Add a KustoStep to Pipeline\n",
|
||||
"Adds a Kusto query as a step in a Pipeline.\n",
|
||||
"- **name:** Name of the Module\n",
|
||||
"- **compute_target:** Name of Kusto compute target\n",
|
||||
"- **database_name:** Name of the database to perform Kusto query on\n",
|
||||
"- **query_directory:** Path to folder that contains only a text file with Kusto queries (see [here](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/) for more details on Kusto queries). \n",
|
||||
" - If the query is parameterized, then the text file must also include any declaration of query parameters (see [here](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/queryparametersstatement?pivots=azuredataexplorer) for more details on query parameters declaration statements). \n",
|
||||
" - An example of the query text file could just contain the query \"StormEvents | count | as HowManyRecords;\", where StormEvents is the table name. \n",
|
||||
" - Note. the text file should just contain the declarations and queries without quotation marks around them.\n",
|
||||
"- **outputs:** Output binding to an Azure Blob Store.\n",
|
||||
"- **parameter_dict (optional):** Dictionary that contains the values of parameters declared in the query text file in the **query_directory** mentioned above.\n",
|
||||
" - Dictionary key is the parameter name, and dictionary value is the parameter value.\n",
|
||||
" - For example, parameter_dict = {\"paramName1\": \"paramValue1\", \"paramName2\": \"paramValue2\"}\n",
|
||||
"- **allow_reuse (optional):** Whether the step should reuse previous results when run with the same settings/inputs (default to False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"database_name = \"<database_name>\" # Name of the database to perform Kusto queries on\n",
|
||||
"query_directory = \"<query_directory>\" # Path to folder that contains a text file with Kusto queries\n",
|
||||
"\n",
|
||||
"kustoStep = KustoStep(\n",
|
||||
" name='KustoNotebook',\n",
|
||||
" compute_target=compute_name,\n",
|
||||
" database_name=database_name,\n",
|
||||
" query_directory=query_directory,\n",
|
||||
" output=step_1_output,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Build and submit the Experiment"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"steps = [kustoStep]\n",
|
||||
"pipeline = Pipeline(workspace=ws, steps=steps)\n",
|
||||
"pipeline_run = Experiment(ws, 'Notebook_demo').submit(pipeline)\n",
|
||||
"pipeline_run.wait_for_completion()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# View Run Details"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.widgets import RunDetails\n",
|
||||
"RunDetails(pipeline_run).show()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "t-kachia"
|
||||
}
|
||||
],
|
||||
"category": "tutorial",
|
||||
"compute": [
|
||||
"Kusto"
|
||||
],
|
||||
"datasets": [
|
||||
"Custom"
|
||||
],
|
||||
"deployment": [
|
||||
"None"
|
||||
],
|
||||
"exclude_from_index": false,
|
||||
"framework": [
|
||||
"Azure ML, Kusto"
|
||||
],
|
||||
"friendly_name": "How to use KustoStep with AML Pipelines",
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.6"
|
||||
},
|
||||
"order_index": 5,
|
||||
"star_tag": [
|
||||
"featured"
|
||||
],
|
||||
"tags": [
|
||||
"None"
|
||||
],
|
||||
"task": "Demonstrates the use of KustoStep"
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -477,7 +477,7 @@
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "sanpil"
|
||||
"name": "anshirga"
|
||||
}
|
||||
],
|
||||
"category": "tutorial",
|
||||
|
||||
@@ -0,0 +1,21 @@
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import azureml.core
|
||||
from azureml.core import Workspace, Experiment, Model
|
||||
from azureml.core import Run
|
||||
from azureml.train.hyperdrive import HyperDriveRun
|
||||
from shutil import copy2
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--saved-model', type=str, dest='saved_model', help='path to saved model file')
|
||||
args = parser.parse_args()
|
||||
|
||||
model_output_dir = './model/'
|
||||
|
||||
os.makedirs(model_output_dir, exist_ok=True)
|
||||
copy2(args.saved_model, model_output_dir)
|
||||
|
||||
ws = Run.get_context().experiment.workspace
|
||||
|
||||
model = Model.register(workspace=ws, model_name='tf-dnn-mnist', model_path=model_output_dir)
|
||||
@@ -278,9 +278,6 @@
|
||||
"# Enable Docker\n",
|
||||
"aml_run_config.environment.docker.enabled = True\n",
|
||||
"\n",
|
||||
"# Set Docker base image to the default CPU-based image\n",
|
||||
"aml_run_config.environment.docker.base_image = \"mcr.microsoft.com/azureml/base:0.2.1\"\n",
|
||||
"\n",
|
||||
"# Use conda_dependencies.yml to create a conda environment in the Docker image for execution\n",
|
||||
"aml_run_config.environment.python.user_managed_dependencies = False\n",
|
||||
"\n",
|
||||
@@ -777,7 +774,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Before we proceed we need to wait for the run to complete.\n",
|
||||
"pipeline_run.wait_for_completion()\n",
|
||||
"pipeline_run.wait_for_completion(show_output=False)\n",
|
||||
"\n",
|
||||
"# functions to download output to local and fetch as dataframe\n",
|
||||
"def get_download_path(download_path, output_name):\n",
|
||||
|
||||
11
how-to-use-azureml/ml-frameworks/README.md
Normal file
11
how-to-use-azureml/ml-frameworks/README.md
Normal file
@@ -0,0 +1,11 @@
|
||||
## Training and deployment examples with ML frameworks
|
||||
These sample notebooks show you how to train and deploy models with popular machine learning frameworks using Azure Machine Learning.
|
||||
|
||||
1. [Scikit-learn](scikit-learn): Train, hyperparameter tune and deploy scikit-learn models.
|
||||
2. [PyTorch](pytorch): Train, hyperparameter tune and deploy PyTorch models. Distributed training with PyTorch.
|
||||
3. [TensorFlow](tensorflow): Train, hyperparameter tune and deploy TensorFlow models. Distributed training with TensorFlow.
|
||||
4. [Keras](keras): Train, hyperparameter tune and deploy Keras models.
|
||||
5. [Chainer](chainer): Train, hyperparameter tune and deploy Chainer models. Distributed training with Chainer.
|
||||
6. [Fastai](fastai): Train, hyperparameter tune and deploy Fastai models.
|
||||
|
||||

|
||||
@@ -13,7 +13,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -29,7 +29,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prerequisites\n",
|
||||
"* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [Configuration](../../../configuration.ipynb) notebook to install the Azure Machine Learning Python SDK and create an Azure ML `Workspace`"
|
||||
"* If you are using an Azure Machine Learning compute instance, you are all set. Otherwise, go through the [Configuration](../../../../configuration.ipynb) notebook to install the Azure Machine Learning Python SDK and create an Azure ML `Workspace`"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -217,8 +217,9 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create a Chainer estimator\n",
|
||||
"The Azure ML SDK's Chainer estimator enables you to easily submit Chainer training jobs for both single-node and distributed runs."
|
||||
"### Create an environment\n",
|
||||
"\n",
|
||||
"In this tutorial, we will use one of the Azure ML Chainer curated environments for training."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -227,21 +228,36 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.train.dnn import Chainer, Mpi\n",
|
||||
"from azureml.core import Environment\n",
|
||||
"\n",
|
||||
"estimator = Chainer(source_directory=project_folder,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" entry_script='train_mnist.py',\n",
|
||||
" node_count=2,\n",
|
||||
" distributed_training=Mpi(),\n",
|
||||
" use_gpu=True)"
|
||||
"chainer_env = Environment.get(ws, name='AzureML-Chainer-5.1.0-GPU')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The above code specifies that we will run our training script on `2` nodes, with one worker per node. In order to execute a distributed run using MPI, you must provide the argument `distributed_backend=Mpi()`. To specify `i` workers per node, you must provide the argument `distributed_backend=Mpi(process_count_per_node=i)`.Using this estimator with these settings, Chainer and its dependencies will be installed for you. However, if your script also uses other packages, make sure to install them via the `Chainer` constructor's `pip_packages` or `conda_packages` parameters."
|
||||
"### Configure your training job\n",
|
||||
"\n",
|
||||
"Create a ScriptRunConfig object to specify the configuration details of your training job, including your training script, environment to use, and the compute target to run on.\n",
|
||||
"\n",
|
||||
"In order to execute a distributed run using MPI, you must create an `MpiConfiguration` object and specify it to the `distributed_job_config` parameter. The below code will configure a 2-node distributed job. If you would also like to run multiple processes per node (i.e. if your cluster SKU has multiple GPUs), additionally specify the `process_count_per_node` parameter in MpiConfiguration."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import ScriptRunConfig\n",
|
||||
"from azureml.core.runconfig import MpiConfiguration\n",
|
||||
"\n",
|
||||
"src = ScriptRunConfig(source_directory=project_folder,\n",
|
||||
" script='train_mnist.py',\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" environment=chainer_env,\n",
|
||||
" distributed_job_config=MpiConfiguration(node_count=2))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -249,7 +265,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Submit job\n",
|
||||
"Run your experiment by submitting your estimator object. Note that this call is asynchronous."
|
||||
"Run your experiment by submitting your ScriptRunConfig object. Note that this call is asynchronous."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -258,7 +274,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"run = experiment.submit(estimator)\n",
|
||||
"run = experiment.submit(src)\n",
|
||||
"print(run)"
|
||||
]
|
||||
},
|
||||
@@ -297,6 +313,22 @@
|
||||
"name": "ninhu"
|
||||
}
|
||||
],
|
||||
"category": "training",
|
||||
"compute": [
|
||||
"AML Compute"
|
||||
],
|
||||
"datasets": [
|
||||
"MNIST"
|
||||
],
|
||||
"deployment": [
|
||||
"None"
|
||||
],
|
||||
"exclude_from_index": false,
|
||||
"framework": [
|
||||
"Chainer"
|
||||
],
|
||||
"friendly_name": "Distributed Training with Chainer",
|
||||
"index_order": 1,
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
@@ -312,28 +344,12 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.6"
|
||||
"version": "3.7.7"
|
||||
},
|
||||
"friendly_name": "Distributed Training with Chainer",
|
||||
"exclude_from_index": false,
|
||||
"index_order": 1,
|
||||
"category": "training",
|
||||
"task": "Use the Chainer estimator to perform distributed training",
|
||||
"datasets": [
|
||||
"MNIST"
|
||||
],
|
||||
"compute": [
|
||||
"AML Compute"
|
||||
],
|
||||
"deployment": [
|
||||
"None"
|
||||
],
|
||||
"framework": [
|
||||
"Chainer"
|
||||
],
|
||||
"tags": [
|
||||
"None"
|
||||
]
|
||||
],
|
||||
"task": "Use the Chainer estimator to perform distributed training"
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
@@ -13,7 +13,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -30,7 +30,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prerequisites\n",
|
||||
"* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [Configuration](../../../configuration.ipynb) notebook to install the Azure Machine Learning Python SDK and create an Azure ML `Workspace`"
|
||||
"* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [Configuration](../../../../configuration.ipynb) notebook to install the Azure Machine Learning Python SDK and create an Azure ML `Workspace`"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -130,8 +130,7 @@
|
||||
" print('Found existing compute target.')\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" print('Creating a new compute target...')\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6', \n",
|
||||
" min_nodes=2,\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6',\n",
|
||||
" max_nodes=4)\n",
|
||||
"\n",
|
||||
" # create the cluster\n",
|
||||
@@ -245,41 +244,68 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create a Chainer estimator\n",
|
||||
"The Azure ML SDK's Chainer estimator enables you to easily submit Chainer training jobs for both single-node and distributed runs. The following code will define a single-node Chainer job."
|
||||
"### Create an environment\n",
|
||||
"\n",
|
||||
"Define a conda environment YAML file with your training script dependencies and create an Azure ML environment."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": [
|
||||
"dnn-chainer-remarks-sample"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.train.dnn import Chainer\n",
|
||||
"%%writefile conda_dependencies.yml\n",
|
||||
"\n",
|
||||
"script_params = {\n",
|
||||
" '--epochs': 10,\n",
|
||||
" '--batchsize': 128,\n",
|
||||
" '--output_dir': './outputs'\n",
|
||||
"}\n",
|
||||
"channels:\n",
|
||||
"- conda-forge\n",
|
||||
"dependencies:\n",
|
||||
"- python=3.6.2\n",
|
||||
"- pip:\n",
|
||||
" - azureml-defaults\n",
|
||||
" - chainer==5.1.0\n",
|
||||
" - cupy-cuda90==5.1.0\n",
|
||||
" - mpi4py==3.0.0\n",
|
||||
" - pytest"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Environment\n",
|
||||
"\n",
|
||||
"estimator = Chainer(source_directory=project_folder, \n",
|
||||
" script_params=script_params,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" pip_packages=['numpy', 'pytest'],\n",
|
||||
" entry_script='chainer_mnist.py',\n",
|
||||
" use_gpu=True)"
|
||||
"chainer_env = Environment.from_conda_specification(name = 'chainer-5.1.0-gpu', file_path = './conda_dependencies.yml')\n",
|
||||
"\n",
|
||||
"# Specify a GPU base image\n",
|
||||
"chainer_env.docker.enabled = True\n",
|
||||
"chainer_env.docker.base_image = 'mcr.microsoft.com/azureml/intelmpi2018.3-cuda9.0-cudnn7-ubuntu16.04'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The `script_params` parameter is a dictionary containing the command-line arguments to your training script `entry_script`. To leverage the Azure VM's GPU for training, we set `use_gpu=True`."
|
||||
"### Configure your training job\n",
|
||||
"\n",
|
||||
"Create a ScriptRunConfig object to specify the configuration details of your training job, including your training script, environment to use, and the compute target to run on."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import ScriptRunConfig\n",
|
||||
"\n",
|
||||
"src = ScriptRunConfig(source_directory=project_folder,\n",
|
||||
" script='chainer_mnist.py',\n",
|
||||
" arguments=['--epochs', 10, '--batchsize', 128, '--output_dir', './outputs'],\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" environment=chainer_env)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -287,7 +313,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Submit job\n",
|
||||
"Run your experiment by submitting your estimator object. Note that this call is asynchronous."
|
||||
"Run your experiment by submitting your ScriptRunConfig object. Note that this call is asynchronous."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -296,7 +322,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"run = experiment.submit(estimator)"
|
||||
"run = experiment.submit(src)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -366,13 +392,13 @@
|
||||
" }\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"hyperdrive_config = HyperDriveConfig(estimator=estimator,\n",
|
||||
"hyperdrive_config = HyperDriveConfig(run_config=src,\n",
|
||||
" hyperparameter_sampling=param_sampling, \n",
|
||||
" primary_metric_name='Accuracy',\n",
|
||||
" policy=BanditPolicy(evaluation_interval=1, slack_factor=0.1, delay_evaluation=3),\n",
|
||||
" primary_metric_goal=PrimaryMetricGoal.MAXIMIZE,\n",
|
||||
" max_total_runs=8,\n",
|
||||
" max_concurrent_runs=4)\n"
|
||||
" max_concurrent_runs=4)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -750,7 +776,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.6"
|
||||
"version": "3.7.7"
|
||||
},
|
||||
"tags": [
|
||||
"None"
|
||||
@@ -0,0 +1,371 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Train a model using a custom Docker image"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In this tutorial, learn how to use a custom Docker image when training models with Azure Machine Learning.\n",
|
||||
"\n",
|
||||
"The example scripts in this article are used to classify pet images by creating a convolutional neural network. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set up the experiment\n",
|
||||
"This section sets up the training experiment by initializing a workspace, creating an experiment, and uploading the training data and training scripts."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Initialize a workspace\n",
|
||||
"The Azure Machine Learning workspace is the top-level resource for the service. It provides you with a centralized place to work with all the artifacts you create. In the Python SDK, you can access the workspace artifacts by creating a `workspace` object.\n",
|
||||
"\n",
|
||||
"Create a workspace object from the config.json file."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Workspace\n",
|
||||
"\n",
|
||||
"ws = Workspace.from_config()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Prepare scripts\n",
|
||||
"Create a directory titled `fastai-example`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"os.makedirs('fastai-example', exist_ok=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Then run the cell below to create the training script train.py in the directory."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"jupyter": {
|
||||
"outputs_hidden": false,
|
||||
"source_hidden": false
|
||||
},
|
||||
"nteract": {
|
||||
"transient": {
|
||||
"deleting": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%writefile fastai-example/train.py\n",
|
||||
"\n",
|
||||
"from fastai.vision.all import *\n",
|
||||
"\n",
|
||||
"path = untar_data(URLs.PETS)\n",
|
||||
"path.ls()\n",
|
||||
"\n",
|
||||
"files = get_image_files(path/\"images\")\n",
|
||||
"len(files)\n",
|
||||
"\n",
|
||||
"#(Path('/home/ashwin/.fastai/data/oxford-iiit-pet/images/yorkshire_terrier_102.jpg'),Path('/home/ashwin/.fastai/data/oxford-iiit-pet/images/great_pyrenees_102.jpg'))\n",
|
||||
"\n",
|
||||
"def label_func(f): return f[0].isupper()\n",
|
||||
"\n",
|
||||
"#To get our data ready for a model, we need to put it in a DataLoaders object. Here we have a function that labels using the file names, so we will use ImageDataLoaders.from_name_func. There are other factory methods of ImageDataLoaders that could be more suitable for your problem, so make sure to check them all in vision.data.\n",
|
||||
"\n",
|
||||
"dls = ImageDataLoaders.from_name_func(path, files, label_func, item_tfms=Resize(224))\n",
|
||||
"\n",
|
||||
"#We have passed to this function the directory we're working in, the files we grabbed, our label_func and one last piece as item_tfms: this is a Transform applied on all items of our dataset that will resize each imge to 224 by 224, by using a random crop on the largest dimension to make it a square, then resizing to 224 by 224. If we didn't pass this, we would get an error later as it would be impossible to batch the items together.\n",
|
||||
"\n",
|
||||
"dls.show_batch()\n",
|
||||
"\n",
|
||||
"learn = cnn_learner(dls, resnet34, metrics=error_rate)\n",
|
||||
"learn.fine_tune(1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Define your environment\n",
|
||||
"Create an environment object and enable Docker."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Environment\n",
|
||||
"\n",
|
||||
"fastai_env = Environment(\"fastai\")\n",
|
||||
"fastai_env.docker.enabled = True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This specified base image supports the fast.ai library which allows for distributed deep learning capabilities. For more information, see the [fast.ai DockerHub](https://hub.docker.com/u/fastdotai). \n",
|
||||
"\n",
|
||||
"When you are using your custom Docker image, you might already have your Python environment properly set up. In that case, set the `user_managed_dependencies` flag to True in order to leverage your custom image's built-in python environment."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"fastai_env.docker.base_image = \"fastdotai/fastai:latest\"\n",
|
||||
"fastai_env.python.user_managed_dependencies = True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To use an image from a private container registry that is not in your workspace, you must use `docker.base_image_registry` to specify the address of the repository as well as a username and password."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```python\n",
|
||||
"fastai_env.docker.base_image_registry.address = \"myregistry.azurecr.io\"\n",
|
||||
"fastai_env.docker.base_image_registry.username = \"username\"\n",
|
||||
"fastai_env.docker.base_image_registry.password = \"password\"\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"It is also possible to use a custom Dockerfile. Use this approach if you need to install non-Python packages as dependencies and remember to set the base image to None. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Specify docker steps as a string:\n",
|
||||
"```python \n",
|
||||
"dockerfile = r\"\"\" \\\n",
|
||||
"FROM mcr.microsoft.com/azureml/base:intelmpi2018.3-ubuntu16.04\n",
|
||||
"RUN echo \"Hello from custom container!\" \\\n",
|
||||
"\"\"\"\n",
|
||||
"```\n",
|
||||
"Set base image to None, because the image is defined by dockerfile:\n",
|
||||
"```python\n",
|
||||
"fastai_env.docker.base_image = None \\\n",
|
||||
"fastai_env.docker.base_dockerfile = dockerfile\n",
|
||||
"```\n",
|
||||
"Alternatively, load the string from a file:\n",
|
||||
"```python\n",
|
||||
"fastai_env.docker.base_image = None \\\n",
|
||||
"fastai_env.docker.base_dockerfile = \"./Dockerfile\"\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create or attach existing AmlCompute\n",
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, you create `AmlCompute` as your training compute resource.\n",
|
||||
"\n",
|
||||
"**Creation of AmlCompute takes approximately 5 minutes.** If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
|
||||
"\n",
|
||||
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||
"\n",
|
||||
"# choose a name for your cluster\n",
|
||||
"cluster_name = \"gpu-cluster\"\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=cluster_name)\n",
|
||||
" print('Found existing compute target.')\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" print('Creating a new compute target...')\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6',\n",
|
||||
" max_nodes=4)\n",
|
||||
"\n",
|
||||
" # create the cluster\n",
|
||||
" compute_target = ComputeTarget.create(ws, cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
" compute_target.wait_for_completion(show_output=True)\n",
|
||||
"\n",
|
||||
"# use get_status() to get a detailed status for the current AmlCompute\n",
|
||||
"print(compute_target.get_status().serialize())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create a ScriptRunConfig\n",
|
||||
"This ScriptRunConfig will configure your job for execution on the desired compute target."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"jupyter": {
|
||||
"outputs_hidden": false,
|
||||
"source_hidden": false
|
||||
},
|
||||
"nteract": {
|
||||
"transient": {
|
||||
"deleting": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import ScriptRunConfig\n",
|
||||
"\n",
|
||||
"fastai_config = ScriptRunConfig(source_directory='fastai-example',\n",
|
||||
" script='train.py',\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" environment=fastai_env)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Submit your run\n",
|
||||
"When a training run is submitted using a ScriptRunConfig object, the submit method returns an object of type ScriptRun. The returned ScriptRun object gives you programmatic access to information about the training run. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"jupyter": {
|
||||
"outputs_hidden": false,
|
||||
"source_hidden": false
|
||||
},
|
||||
"nteract": {
|
||||
"transient": {
|
||||
"deleting": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Experiment\n",
|
||||
"\n",
|
||||
"run = Experiment(ws,'fastai-custom-image').submit(fastai_config)\n",
|
||||
"run.wait_for_completion(show_output=True)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "sagopal"
|
||||
}
|
||||
],
|
||||
"category": "training",
|
||||
"compute": [
|
||||
"AML Compute"
|
||||
],
|
||||
"datasets": [
|
||||
"Oxford IIIT Pet"
|
||||
],
|
||||
"deployment": [
|
||||
"None"
|
||||
],
|
||||
"exclude_from_index": false,
|
||||
"framework": [
|
||||
"Pytorch"
|
||||
],
|
||||
"friendly_name": "Train a model with a custom Docker image",
|
||||
"index_order": 1,
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.7"
|
||||
},
|
||||
"nteract": {
|
||||
"version": "nteract-front-end@1.0.0"
|
||||
},
|
||||
"tags": [
|
||||
"None"
|
||||
],
|
||||
"task": "Train with custom Docker image"
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -0,0 +1,5 @@
|
||||
name: fastai-with-custom-docker
|
||||
dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
- fastai==1.0.61
|
||||
|
Before Width: | Height: | Size: 119 KiB After Width: | Height: | Size: 119 KiB |
@@ -13,7 +13,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -33,7 +33,7 @@
|
||||
"\n",
|
||||
"## Prerequisite:\n",
|
||||
"* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning\n",
|
||||
"* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration notebook](../../../configuration.ipynb) to:\n",
|
||||
"* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration notebook](../../../../configuration.ipynb) to:\n",
|
||||
" * install the AML SDK\n",
|
||||
" * create a workspace and its configuration file (`config.json`)\n",
|
||||
"* For local scoring test, you will also need to have `tensorflow` and `keras` installed in the current Jupyter kernel."
|
||||
@@ -411,9 +411,54 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create TensorFlow estimator & add Keras\n",
|
||||
"Next, we construct an `azureml.train.dnn.TensorFlow` estimator object, use the `gpu-cluster` as compute target, and pass the mount-point of the datastore to the training code as a parameter.\n",
|
||||
"The TensorFlow estimator is providing a simple way of launching a TensorFlow training job on a compute target. It will automatically provide a docker image that has TensorFlow installed. In this case, we add `keras` package (for the Keras framework obviously), and `matplotlib` package for plotting a \"Loss vs. Accuracy\" chart and record it in run history."
|
||||
"## Create an environment\n",
|
||||
"\n",
|
||||
"Define a conda environment YAML file with your training script dependencies, which include TensorFlow, Keras and matplotlib, and create an Azure ML environment."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%writefile conda_dependencies.yml\n",
|
||||
"\n",
|
||||
"channels:\n",
|
||||
"- conda-forge\n",
|
||||
"dependencies:\n",
|
||||
"- python=3.6.2\n",
|
||||
"- pip:\n",
|
||||
" - azureml-defaults==1.13.0\n",
|
||||
" - tensorflow-gpu==2.0.0\n",
|
||||
" - keras<=2.3.1\n",
|
||||
" - matplotlib"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Environment\n",
|
||||
"\n",
|
||||
"keras_env = Environment.from_conda_specification(name = 'keras-2.3.1', file_path = './conda_dependencies.yml')\n",
|
||||
"\n",
|
||||
"# Specify a GPU base image\n",
|
||||
"keras_env.docker.enabled = True\n",
|
||||
"keras_env.docker.base_image = 'mcr.microsoft.com/azureml/openmpi3.1.2-cuda10.0-cudnn7-ubuntu18.04'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Configure the training job\n",
|
||||
"\n",
|
||||
"Create a ScriptRunConfig object to specify the configuration details of your training job, including your training script, environment to use, and the compute target to run on.\n",
|
||||
"\n",
|
||||
"Note that we are specifying a DatasetConsumptionConfig for our FileDataset as an argument to the training script. Azure ML will resolve this DatasetConsumptionConfig to the mount-point of the backing datastore, which we access from the training script."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -434,22 +479,19 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.train.dnn import TensorFlow\n",
|
||||
"from azureml.core import ScriptRunConfig\n",
|
||||
"\n",
|
||||
"script_params = {\n",
|
||||
" '--data-folder': dataset.as_named_input('mnist').as_mount(),\n",
|
||||
" '--batch-size': 50,\n",
|
||||
" '--first-layer-neurons': 300,\n",
|
||||
" '--second-layer-neurons': 100,\n",
|
||||
" '--learning-rate': 0.001\n",
|
||||
"}\n",
|
||||
"args = ['--data-folder', dataset.as_named_input('mnist').as_mount(),\n",
|
||||
" '--batch-size', 50,\n",
|
||||
" '--first-layer-neurons', 300,\n",
|
||||
" '--second-layer-neurons', 100,\n",
|
||||
" '--learning-rate', 0.001]\n",
|
||||
"\n",
|
||||
"est = TensorFlow(source_directory=script_folder,\n",
|
||||
" script_params=script_params,\n",
|
||||
" compute_target=compute_target, \n",
|
||||
" entry_script='keras_mnist.py',\n",
|
||||
" framework_version='2.0', \n",
|
||||
" pip_packages=['keras<=2.3.1','azureml-dataset-runtime[pandas,fuse]','matplotlib'])"
|
||||
"src = ScriptRunConfig(source_directory=script_folder,\n",
|
||||
" script='keras_mnist.py',\n",
|
||||
" arguments=args,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" environment=keras_env)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -457,7 +499,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Submit job to run\n",
|
||||
"Submit the estimator to the Azure ML experiment to kick off the execution."
|
||||
"Submit the ScriptRunConfig to the Azure ML experiment to kick off the execution."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -466,7 +508,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"run = exp.submit(est)"
|
||||
"run = exp.submit(src)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -475,7 +517,7 @@
|
||||
"source": [
|
||||
"### Monitor the Run\n",
|
||||
"As the Run is executed, it will go through the following stages:\n",
|
||||
"1. Preparing: A docker image is created matching the Python environment specified by the TensorFlow estimator and it will be uploaded to the workspace's Azure Container Registry. This step will only happen once for each Python environment -- the container will then be cached for subsequent runs. Creating and uploading the image takes about **5 minutes**. While the job is preparing, logs are streamed to the run history and can be viewed to monitor the progress of the image creation.\n",
|
||||
"1. Preparing: A docker image is created matching the Python environment specified by the Azure ML environment, and it will be uploaded to the workspace's Azure Container Registry. This step will only happen once for each Python environment -- the container will then be cached for subsequent runs. Creating and uploading the image takes about **5 minutes**. While the job is preparing, logs are streamed to the run history and can be viewed to monitor the progress of the image creation.\n",
|
||||
"\n",
|
||||
"2. Scaling: If the compute needs to be scaled up (i.e. the AmlCompute cluster requires more nodes to execute the run than currently available), the cluster will attempt to scale up in order to make the required amount of nodes available. Scaling typically takes about **5 minutes**.\n",
|
||||
"\n",
|
||||
@@ -708,7 +750,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Next, we will create a new estimator without the above parameters since they will be passed in later by Hyperdrive configuration. Note we still need to keep the `data-folder` parameter since that's not a hyperparamter we will sweep."
|
||||
"Next, we will create a new ScriptRunConfig without the above arguments since they will be passed in later by our Hyperdrive configuration. Note we still need to keep the `data-folder` parameter since that's not a hyperparameter we will sweep."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -717,12 +759,13 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"est = TensorFlow(source_directory=script_folder,\n",
|
||||
" script_params={'--data-folder': dataset.as_named_input('mnist').as_mount()},\n",
|
||||
"args = ['--data-folder', dataset.as_named_input('mnist').as_mount()]\n",
|
||||
"\n",
|
||||
"src = ScriptRunConfig(source_directory=script_folder,\n",
|
||||
" script='keras_mnist.py',\n",
|
||||
" arguments=args,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" entry_script='keras_mnist.py',\n",
|
||||
" framework_version='2.0',\n",
|
||||
" pip_packages=['keras<=2.3.1','azureml-dataset-runtime[pandas,fuse]','matplotlib'])"
|
||||
" environment=keras_env)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -754,11 +797,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"hdc = HyperDriveConfig(estimator=est, \n",
|
||||
" hyperparameter_sampling=ps, \n",
|
||||
" policy=policy, \n",
|
||||
" primary_metric_name='Accuracy', \n",
|
||||
" primary_metric_goal=PrimaryMetricGoal.MAXIMIZE, \n",
|
||||
"hyperdrive_config = HyperDriveConfig(run_config=src,\n",
|
||||
" hyperparameter_sampling=ps,\n",
|
||||
" policy=policy,\n",
|
||||
" primary_metric_name='Accuracy',\n",
|
||||
" primary_metric_goal=PrimaryMetricGoal.MAXIMIZE,\n",
|
||||
" max_total_runs=20,\n",
|
||||
" max_concurrent_runs=4)"
|
||||
]
|
||||
@@ -776,7 +819,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"hdr = exp.submit(config=hdc)"
|
||||
"hyperdrive_run = exp.submit(config=hyperdrive_config)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -792,7 +835,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"RunDetails(hdr).show()"
|
||||
"RunDetails(hyperdrive_run).show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -801,7 +844,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"hdr.wait_for_completion(show_output=True)"
|
||||
"hyperdrive_run.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -810,7 +853,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"assert(hdr.get_status() == \"Completed\")"
|
||||
"assert(hyperdrive_run.get_status() == \"Completed\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -836,7 +879,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"best_run = hdr.get_best_run_by_primary_metric()\n",
|
||||
"best_run = hyperdrive_run.get_best_run_by_primary_metric()\n",
|
||||
"print(best_run.get_details()['runDefinition']['arguments'])"
|
||||
]
|
||||
},
|
||||
@@ -1179,7 +1222,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.9"
|
||||
"version": "3.7.7"
|
||||
},
|
||||
"tags": [
|
||||
"None"
|
||||
@@ -13,7 +13,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -29,7 +29,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prerequisites\n",
|
||||
"* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [Configuration](../../../configuration.ipynb) notebook to install the Azure Machine Learning Python SDK and create an Azure ML `Workspace`\n",
|
||||
"* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [Configuration](../../../../configuration.ipynb) notebook to install the Azure Machine Learning Python SDK and create an Azure ML `Workspace`\n",
|
||||
"* Review the [tutorial](../train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.ipynb) on single-node PyTorch training using Azure Machine Learning"
|
||||
]
|
||||
},
|
||||
@@ -230,8 +230,9 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create a PyTorch estimator\n",
|
||||
"The Azure ML SDK's PyTorch estimator enables you to easily submit PyTorch training jobs for both single-node and distributed runs. For more information on the PyTorch estimator, refer [here](https://docs.microsoft.com/azure/machine-learning/service/how-to-train-pytorch)."
|
||||
"### Create an environment\n",
|
||||
"\n",
|
||||
"In this tutorial, we will use one of Azure ML's curated PyTorch environments for training. [Curated environments](https://docs.microsoft.com/azure/machine-learning/how-to-use-environments#use-a-curated-environment) are available in your workspace by default. Specifically, we will use the PyTorch 1.6 GPU curated environment. The curated environment includes the `torch`, `torchvision` and `horovod` packages required by the training script."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -240,21 +241,36 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.train.dnn import PyTorch, Mpi\n",
|
||||
"from azureml.core import Environment\n",
|
||||
"\n",
|
||||
"estimator = PyTorch(source_directory=project_folder,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" entry_script='pytorch_horovod_mnist.py',\n",
|
||||
" node_count=2,\n",
|
||||
" distributed_training=Mpi(),\n",
|
||||
" use_gpu=True)"
|
||||
"pytorch_env = Environment.get(ws, name='AzureML-PyTorch-1.6-GPU')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The above code specifies that we will run our training script on `2` nodes, with one worker per node. In order to execute a distributed run using MPI/Horovod, you must provide the argument `distributed_backend=Mpi()`. To specify `i` workers per node, you must provide the argument `distributed_backend=Mpi(process_count_per_node=i)`. Using this estimator with these settings, PyTorch, Horovod and their dependencies will be installed for you. However, if your script also uses other packages, make sure to install them via the `PyTorch` constructor's `pip_packages` or `conda_packages` parameters."
|
||||
"### Configure the training job\n",
|
||||
"\n",
|
||||
"Create a ScriptRunConfig object to specify the configuration details of your training job, including your training script, environment to use, and the compute target to run on.\n",
|
||||
"\n",
|
||||
"In order to execute a distributed run using MPI/Horovod, you must create an `MpiConfiguration` object and pass it to the `distributed_job_config` parameter of the ScriptRunConfig constructor. The below code will configure a 2-node distributed job running one process per node. If you would also like to run multiple processes per node (i.e. if your cluster SKU has multiple GPUs), additionally specify the `process_count_per_node` parameter in `MpiConfiguration` (the default is `1`)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import ScriptRunConfig\n",
|
||||
"from azureml.core.runconfig import MpiConfiguration\n",
|
||||
"\n",
|
||||
"src = ScriptRunConfig(source_directory=project_folder,\n",
|
||||
" script='pytorch_horovod_mnist.py',\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" environment=pytorch_env,\n",
|
||||
" distributed_job_config=MpiConfiguration(node_count=2))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -262,7 +278,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Submit job\n",
|
||||
"Run your experiment by submitting your estimator object. Note that this call is asynchronous."
|
||||
"Run your experiment by submitting your ScriptRunConfig object. Note that this call is asynchronous."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -271,7 +287,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"run = experiment.submit(estimator)\n",
|
||||
"run = experiment.submit(src)\n",
|
||||
"print(run)"
|
||||
]
|
||||
},
|
||||
@@ -317,6 +333,22 @@
|
||||
"name": "ninhu"
|
||||
}
|
||||
],
|
||||
"category": "training",
|
||||
"compute": [
|
||||
"AML Compute"
|
||||
],
|
||||
"datasets": [
|
||||
"MNIST"
|
||||
],
|
||||
"deployment": [
|
||||
"None"
|
||||
],
|
||||
"exclude_from_index": false,
|
||||
"framework": [
|
||||
"PyTorch"
|
||||
],
|
||||
"friendly_name": "Distributed PyTorch",
|
||||
"index_order": 1,
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
@@ -332,28 +364,12 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.6"
|
||||
"version": "3.6.9"
|
||||
},
|
||||
"friendly_name": "Distributed PyTorch",
|
||||
"exclude_from_index": false,
|
||||
"index_order": 1,
|
||||
"category": "training",
|
||||
"task": "Train a model using the distributed training via Horovod",
|
||||
"datasets": [
|
||||
"MNIST"
|
||||
],
|
||||
"compute": [
|
||||
"AML Compute"
|
||||
],
|
||||
"deployment": [
|
||||
"None"
|
||||
],
|
||||
"framework": [
|
||||
"PyTorch"
|
||||
],
|
||||
"tags": [
|
||||
"None"
|
||||
]
|
||||
],
|
||||
"task": "Train a model using the distributed training via Horovod"
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
@@ -13,15 +13,15 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Distributed PyTorch \n",
|
||||
"In this tutorial, you will train a PyTorch model on the [MNIST](http://yann.lecun.com/exdb/mnist/) dataset using distributed training via Nccl/Gloo across a GPU cluster. "
|
||||
"# Distributed PyTorch with DistributedDataParallel\n",
|
||||
"In this tutorial, you will train a PyTorch model on the [MNIST](http://yann.lecun.com/exdb/mnist/) dataset using distributed training with PyTorch's `DistributedDataParallel` module across a GPU cluster. "
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -29,7 +29,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prerequisites\n",
|
||||
"* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [Configuration](../../../configuration.ipynb) notebook to install the Azure Machine Learning Python SDK and create an Azure ML `Workspace`"
|
||||
"* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [Configuration](../../../../configuration.ipynb) notebook to install the Azure Machine Learning Python SDK and create an Azure ML `Workspace`"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -229,8 +229,9 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create a PyTorch estimator(Nccl Backend)\n",
|
||||
"The Azure ML SDK's PyTorch estimator enables you to easily submit PyTorch training jobs for both single-node and distributed runs. For more information on the PyTorch estimator, refer [here](https://docs.microsoft.com/azure/machine-learning/service/how-to-train-pytorch)."
|
||||
"### Create an environment\n",
|
||||
"\n",
|
||||
"Define a conda environment YAML file with your training script dependencies and create an Azure ML environment."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -239,26 +240,67 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.train.dnn import PyTorch, Nccl\n",
|
||||
"%%writefile conda_dependencies.yml\n",
|
||||
"\n",
|
||||
"estimator = PyTorch(source_directory=project_folder,\n",
|
||||
" script_params={\"--dist-backend\" : \"nccl\",\n",
|
||||
" \"--dist-url\": \"$AZ_BATCHAI_PYTORCH_INIT_METHOD\",\n",
|
||||
" \"--rank\": \"$AZ_BATCHAI_TASK_INDEX\",\n",
|
||||
" \"--world-size\": 2},\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" entry_script='pytorch_mnist.py',\n",
|
||||
" node_count=2,\n",
|
||||
" distributed_training=Nccl(),\n",
|
||||
" use_gpu=True)"
|
||||
"channels:\n",
|
||||
"- conda-forge\n",
|
||||
"dependencies:\n",
|
||||
"- python=3.6.2\n",
|
||||
"- pip:\n",
|
||||
" - azureml-defaults\n",
|
||||
" - torch==1.6.0\n",
|
||||
" - torchvision==0.7.0\n",
|
||||
" - future==0.17.1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Environment\n",
|
||||
"\n",
|
||||
"pytorch_env = Environment.from_conda_specification(name = 'pytorch-1.6-gpu', file_path = './conda_dependencies.yml')\n",
|
||||
"\n",
|
||||
"# Specify a GPU base image\n",
|
||||
"pytorch_env.docker.enabled = True\n",
|
||||
"pytorch_env.docker.base_image = 'mcr.microsoft.com/azureml/openmpi3.1.2-cuda10.1-cudnn7-ubuntu18.04'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In the above code, `script_params` uses Azure ML generated `AZ_BATCHAI_PYTORCH_INIT_METHOD` for shared file-system initialization and `AZ_BATCHAI_TASK_INDEX` as rank of each worker process.\n",
|
||||
"The above code specifies that we will run our training script on `2` nodes, with one worker per node. In order to execute a distributed run using Nccl, you must provide the argument `distributed_training=Nccl()`. Using this estimator with these settings, PyTorch and dependencies will be installed for you. However, if your script also uses other packages, make sure to install them via the `PyTorch` constructor's `pip_packages` or `conda_packages` parameters."
|
||||
"### Configure the training job: torch.distributed with NCCL backend\n",
|
||||
"\n",
|
||||
"Create a ScriptRunConfig object to specify the configuration details of your training job, including your training script, environment to use, and the compute target to run on.\n",
|
||||
"\n",
|
||||
"In order to run a distributed PyTorch job with **torch.distributed** using the NCCL backend, create a `PyTorchConfiguration` and pass it to the `distributed_job_config` parameter of the ScriptRunConfig constructor. Specify `communication_backend='Nccl'` in the PyTorchConfiguration. The below code will configure a 2-node distributed job. The NCCL backend is the recommended backend for PyTorch distributed GPU training.\n",
|
||||
"\n",
|
||||
"The script arguments refers to the Azure ML-set environment variables `AZ_BATCHAI_PYTORCH_INIT_METHOD` for shared file-system initialization and `AZ_BATCHAI_TASK_INDEX` for the global rank of each worker process."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import ScriptRunConfig\n",
|
||||
"from azureml.core.runconfig import PyTorchConfiguration\n",
|
||||
"\n",
|
||||
"args = ['--dist-backend', 'nccl',\n",
|
||||
" '--dist-url', '$AZ_BATCHAI_PYTORCH_INIT_METHOD',\n",
|
||||
" '--rank', '$AZ_BATCHAI_TASK_INDEX',\n",
|
||||
" '--world-size', 2]\n",
|
||||
"\n",
|
||||
"src = ScriptRunConfig(source_directory=project_folder,\n",
|
||||
" script='pytorch_mnist.py',\n",
|
||||
" arguments=args,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" environment=pytorch_env,\n",
|
||||
" distributed_job_config=PyTorchConfiguration(communication_backend='Nccl', node_count=2))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -266,7 +308,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Submit job\n",
|
||||
"Run your experiment by submitting your estimator object. Note that this call is asynchronous."
|
||||
"Run your experiment by submitting your ScriptRunConfig object. Note that this call is asynchronous."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -275,7 +317,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"run = experiment.submit(estimator)\n",
|
||||
"run = experiment.submit(src)\n",
|
||||
"print(run)"
|
||||
]
|
||||
},
|
||||
@@ -318,8 +360,9 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create a PyTorch estimator(Gloo Backend)\n",
|
||||
"The Azure ML SDK's PyTorch estimator enables you to easily submit PyTorch training jobs for both single-node and distributed runs. For more information on the PyTorch estimator, refer [here](https://docs.microsoft.com/azure/machine-learning/service/how-to-train-pytorch)."
|
||||
"### Configure training job: torch.distributed with Gloo backend\n",
|
||||
"\n",
|
||||
"If you would instead like to use the Gloo backend for distributed training, you can do so via the following code. The Gloo backend is recommended for distributed CPU training."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -328,28 +371,27 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.train.dnn import PyTorch, Gloo\n",
|
||||
"from azureml.core import ScriptRunConfig\n",
|
||||
"from azureml.core.runconfig import PyTorchConfiguration\n",
|
||||
"\n",
|
||||
"estimator = PyTorch(source_directory=project_folder,\n",
|
||||
" script_params={\"--dist-backend\" : \"gloo\",\n",
|
||||
" \"--dist-url\": \"$AZ_BATCHAI_PYTORCH_INIT_METHOD\",\n",
|
||||
" \"--rank\": \"$AZ_BATCHAI_TASK_INDEX\",\n",
|
||||
" \"--world-size\": 2},\n",
|
||||
"args = ['--dist-backend', 'gloo',\n",
|
||||
" '--dist-url', '$AZ_BATCHAI_PYTORCH_INIT_METHOD',\n",
|
||||
" '--rank', '$AZ_BATCHAI_TASK_INDEX',\n",
|
||||
" '--world-size', 2]\n",
|
||||
"\n",
|
||||
"src = ScriptRunConfig(source_directory=project_folder,\n",
|
||||
" script='pytorch_mnist.py',\n",
|
||||
" arguments=args,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" entry_script='pytorch_mnist.py',\n",
|
||||
" node_count=2,\n",
|
||||
" distributed_training=Gloo(),\n",
|
||||
" use_gpu=True)"
|
||||
" environment=pytorch_env,\n",
|
||||
" distributed_job_config=PyTorchConfiguration(communication_backend='Gloo', node_count=2))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In the above code, `script_params` uses Azure ML generated `AZ_BATCHAI_PYTORCH_INIT_METHOD` for shared file-system initialization and `AZ_BATCHAI_TASK_INDEX` as rank of each worker process.\n",
|
||||
"The above code specifies that we will run our training script on `2` nodes, with one worker per node. In order to execute a distributed run using Gloo, you must provide the argument `distributed_training=Gloo()`. Using this estimator with these settings, PyTorch and dependencies will be installed for you. However, if your script also uses other packages, make sure to install them via the `PyTorch` constructor's `pip_packages` or `conda_packages` parameters.\n",
|
||||
"\n",
|
||||
"Once you create the estimaotr you can follow the submit steps as shown above to submit a PyTorch run with `Gloo` backend. "
|
||||
"Once you create the ScriptRunConfig, you can follow the submit steps as shown in the previous steps to submit a PyTorch distributed run using the Gloo backend."
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -359,6 +401,22 @@
|
||||
"name": "ninhu"
|
||||
}
|
||||
],
|
||||
"category": "training",
|
||||
"compute": [
|
||||
"AML Compute"
|
||||
],
|
||||
"datasets": [
|
||||
"MNIST"
|
||||
],
|
||||
"deployment": [
|
||||
"None"
|
||||
],
|
||||
"exclude_from_index": false,
|
||||
"framework": [
|
||||
"PyTorch"
|
||||
],
|
||||
"friendly_name": "Distributed training with PyTorch",
|
||||
"index_order": 1,
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
@@ -374,28 +432,12 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.8"
|
||||
"version": "3.6.9"
|
||||
},
|
||||
"friendly_name": "Distributed training with PyTorch",
|
||||
"exclude_from_index": false,
|
||||
"index_order": 1,
|
||||
"category": "training",
|
||||
"task": "Train a model using distributed training via Nccl/Gloo",
|
||||
"datasets": [
|
||||
"MNIST"
|
||||
],
|
||||
"compute": [
|
||||
"AML Compute"
|
||||
],
|
||||
"deployment": [
|
||||
"None"
|
||||
],
|
||||
"framework": [
|
||||
"PyTorch"
|
||||
],
|
||||
"tags": [
|
||||
"None"
|
||||
]
|
||||
],
|
||||
"task": "Train a model using distributed training via Nccl/Gloo"
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
|
Before Width: | Height: | Size: 1.6 MiB After Width: | Height: | Size: 1.6 MiB |
@@ -13,7 +13,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -32,7 +32,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prerequisites\n",
|
||||
"* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [Configuration](../../../configuration.ipynb) notebook to install the Azure Machine Learning Python SDK and create an Azure ML `Workspace`"
|
||||
"* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [Configuration](../../../../configuration.ipynb) notebook to install the Azure Machine Learning Python SDK and create an Azure ML `Workspace`"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -243,44 +243,68 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create a PyTorch estimator\n",
|
||||
"The Azure ML SDK's PyTorch estimator enables you to easily submit PyTorch training jobs for both single-node and distributed runs. For more information on the PyTorch estimator, refer [here](https://docs.microsoft.com/azure/machine-learning/service/how-to-train-pytorch). The following code will define a single-node PyTorch job."
|
||||
"### Create an environment\n",
|
||||
"\n",
|
||||
"Define a conda environment YAML file with your training script dependencies and create an Azure ML environment."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": [
|
||||
"dnn-pytorch-remarks-sample"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.train.dnn import PyTorch\n",
|
||||
"%%writefile conda_dependencies.yml\n",
|
||||
"\n",
|
||||
"script_params = {\n",
|
||||
" '--num_epochs': 30,\n",
|
||||
" '--output_dir': './outputs'\n",
|
||||
"}\n",
|
||||
"channels:\n",
|
||||
"- conda-forge\n",
|
||||
"dependencies:\n",
|
||||
"- python=3.6.2\n",
|
||||
"- pip:\n",
|
||||
" - azureml-defaults\n",
|
||||
" - torch==1.6.0\n",
|
||||
" - torchvision==0.7.0\n",
|
||||
" - future==0.17.1\n",
|
||||
" - pillow"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Environment\n",
|
||||
"\n",
|
||||
"estimator = PyTorch(source_directory=project_folder, \n",
|
||||
" script_params=script_params,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" entry_script='pytorch_train.py',\n",
|
||||
" use_gpu=True,\n",
|
||||
" pip_packages=['pillow==5.4.1'])"
|
||||
"pytorch_env = Environment.from_conda_specification(name = 'pytorch-1.6-gpu', file_path = './conda_dependencies.yml')\n",
|
||||
"\n",
|
||||
"# Specify a GPU base image\n",
|
||||
"pytorch_env.docker.enabled = True\n",
|
||||
"pytorch_env.docker.base_image = 'mcr.microsoft.com/azureml/openmpi3.1.2-cuda10.1-cudnn7-ubuntu18.04'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The `script_params` parameter is a dictionary containing the command-line arguments to your training script `entry_script`. Please note the following:\n",
|
||||
"- We passed our training data reference `ds_data` to our script's `--data_dir` argument. This will 1) mount our datastore on the remote compute and 2) provide the path to the training data `fowl_data` on our datastore.\n",
|
||||
"- We specified the output directory as `./outputs`. The `outputs` directory is specially treated by Azure ML in that all the content in this directory gets uploaded to your workspace as part of your run history. The files written to this directory are therefore accessible even once your remote run is over. In this tutorial, we will save our trained model to this output directory.\n",
|
||||
"### Configure the training job\n",
|
||||
"\n",
|
||||
"To leverage the Azure VM's GPU for training, we set `use_gpu=True`."
|
||||
"Create a ScriptRunConfig object to specify the configuration details of your training job, including your training script, environment to use, and the compute target to run on. The following code will configure a single-node PyTorch job."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import ScriptRunConfig\n",
|
||||
"\n",
|
||||
"src = ScriptRunConfig(source_directory=project_folder,\n",
|
||||
" script='pytorch_train.py',\n",
|
||||
" arguments=['--num_epochs', 30, '--output_dir', './outputs'],\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" environment=pytorch_env)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -288,7 +312,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Submit job\n",
|
||||
"Run your experiment by submitting your estimator object. Note that this call is asynchronous."
|
||||
"Run your experiment by submitting your ScriptRunConfig object. Note that this call is asynchronous."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -297,7 +321,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"run = experiment.submit(estimator)\n",
|
||||
"run = experiment.submit(src)\n",
|
||||
"print(run)"
|
||||
]
|
||||
},
|
||||
@@ -381,7 +405,7 @@
|
||||
"\n",
|
||||
"early_termination_policy = BanditPolicy(slack_factor=0.15, evaluation_interval=1, delay_evaluation=10)\n",
|
||||
"\n",
|
||||
"hyperdrive_config = HyperDriveConfig(estimator=estimator,\n",
|
||||
"hyperdrive_config = HyperDriveConfig(run_config=src,\n",
|
||||
" hyperparameter_sampling=param_sampling, \n",
|
||||
" policy=early_termination_policy,\n",
|
||||
" primary_metric_name='best_val_acc',\n",
|
||||
@@ -532,24 +556,9 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create environment file\n",
|
||||
"Then, we will need to create an environment file (`myenv.yml`) that specifies all of the scoring script's package dependencies. This file is used to ensure that all of those dependencies are installed in the Docker image by Azure ML. In this case, we need to specify `azureml-core`, `torch` and `torchvision`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.conda_dependencies import CondaDependencies \n",
|
||||
"### Define the environment\n",
|
||||
"\n",
|
||||
"myenv = CondaDependencies.create(pip_packages=['azureml-defaults', 'torch', 'torchvision>=0.5.0'])\n",
|
||||
"\n",
|
||||
"with open(\"myenv.yml\",\"w\") as f:\n",
|
||||
" f.write(myenv.serialize_to_string())\n",
|
||||
" \n",
|
||||
"print(myenv.serialize_to_string())"
|
||||
"Then, we will need to create an Azure ML environment that specifies all of the scoring script's package dependencies. In this tutorial, we will reuse the same environment, `pytorch_env`, that we created for training."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -570,11 +579,8 @@
|
||||
"from azureml.core.model import InferenceConfig\n",
|
||||
"from azureml.core.webservice import Webservice\n",
|
||||
"from azureml.core.model import Model\n",
|
||||
"from azureml.core.environment import Environment\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"myenv = Environment.from_conda_specification(name=\"myenv\", file_path=\"myenv.yml\")\n",
|
||||
"inference_config = InferenceConfig(entry_script=\"pytorch_score.py\", environment=myenv)\n",
|
||||
"inference_config = InferenceConfig(entry_script=\"pytorch_score.py\", environment=pytorch_env)\n",
|
||||
"\n",
|
||||
"aciconfig = AciWebservice.deploy_configuration(cpu_cores=1, \n",
|
||||
" memory_gb=1, \n",
|
||||
@@ -742,7 +748,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.6"
|
||||
"version": "3.7.7"
|
||||
},
|
||||
"tags": [
|
||||
"None"
|
||||
@@ -13,7 +13,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -35,7 +35,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"* Go through the [Configuration](../../../configuration.ipynb) notebook to install the Azure Machine Learning Python SDK and create an Azure ML Workspace"
|
||||
"* Go through the [Configuration](../../../../configuration.ipynb) notebook to install the Azure Machine Learning Python SDK and create an Azure ML Workspace"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -285,46 +285,59 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create a Scikit-learn estimator"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The Azure ML SDK's Scikit-learn estimator enables you to easily submit Scikit-learn training jobs for single-node runs. The following code will define a single-node Scikit-learn job."
|
||||
"### Create an environment\n",
|
||||
"\n",
|
||||
"Define a conda environment YAML file with your training script dependencies and create an Azure ML environment."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": [
|
||||
"sklearn-remarks-sample"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.train.sklearn import SKLearn\n",
|
||||
"%%writefile conda_dependencies.yml\n",
|
||||
"\n",
|
||||
"script_params = {\n",
|
||||
" '--kernel': 'linear',\n",
|
||||
" '--penalty': 1.0,\n",
|
||||
"}\n",
|
||||
"dependencies:\n",
|
||||
"- python=3.6.2\n",
|
||||
"- scikit-learn\n",
|
||||
"- pip:\n",
|
||||
" - azureml-defaults"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Environment\n",
|
||||
"\n",
|
||||
"estimator = SKLearn(source_directory=project_folder, \n",
|
||||
" script_params=script_params,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" entry_script='train_iris.py',\n",
|
||||
" pip_packages=['joblib==0.13.2']\n",
|
||||
" )"
|
||||
"sklearn_env = Environment.from_conda_specification(name = 'sklearn-env', file_path = './conda_dependencies.yml')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The `script_params` parameter is a dictionary containing the command-line arguments to your training script `entry_script`."
|
||||
"### Configure the training job\n",
|
||||
"\n",
|
||||
"Create a ScriptRunConfig object to specify the configuration details of your training job, including your training script, environment to use, and the compute target to run on."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import ScriptRunConfig\n",
|
||||
"\n",
|
||||
"src = ScriptRunConfig(source_directory=project_folder,\n",
|
||||
" script='train_iris.py',\n",
|
||||
" arguments=['--kernel', 'linear', '--penalty', 1.0],\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" environment=sklearn_env)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -338,7 +351,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Run your experiment by submitting your estimator object. Note that this call is asynchronous."
|
||||
"Run your experiment by submitting your ScriptRunConfig object. Note that this call is asynchronous."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -347,7 +360,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"run = experiment.submit(estimator)"
|
||||
"run = experiment.submit(src)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -430,7 +443,7 @@
|
||||
" }\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"hyperdrive_config = HyperDriveConfig(estimator=estimator,\n",
|
||||
"hyperdrive_config = HyperDriveConfig(run_config=src,\n",
|
||||
" hyperparameter_sampling=param_sampling, \n",
|
||||
" primary_metric_name='Accuracy',\n",
|
||||
" primary_metric_goal=PrimaryMetricGoal.MAXIMIZE,\n",
|
||||
@@ -593,7 +606,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.6"
|
||||
"version": "3.7.7"
|
||||
},
|
||||
"msauthor": "dipeck",
|
||||
"tags": [
|
||||
@@ -13,7 +13,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -30,7 +30,7 @@
|
||||
"source": [
|
||||
"## Prerequisites\n",
|
||||
"* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning (AML)\n",
|
||||
"* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration notebook](../../../configuration.ipynb) to:\n",
|
||||
"* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration notebook](../../../../configuration.ipynb) to:\n",
|
||||
" * install the AML SDK\n",
|
||||
" * create a workspace and its configuration file (`config.json`)\n",
|
||||
"* Review the [tutorial](../train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb) on single-node TensorFlow training using the SDK"
|
||||
@@ -176,8 +176,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"dataset = dataset.register(workspace=ws,\n",
|
||||
" name='mattmahoney dataset',\n",
|
||||
" description='mattmahoney training and test dataset',\n",
|
||||
" name='wikipedia-text',\n",
|
||||
" description='Wikipedia text training and test dataset',\n",
|
||||
" create_new_version=True)"
|
||||
]
|
||||
},
|
||||
@@ -259,10 +259,9 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create a TensorFlow estimator\n",
|
||||
"The AML SDK's TensorFlow estimator enables you to easily submit TensorFlow training jobs for both single-node and distributed runs. For more information on the TensorFlow estimator, refer [here](https://docs.microsoft.com/azure/machine-learning/service/how-to-train-tensorflow).\n",
|
||||
"### Create an environment\n",
|
||||
"\n",
|
||||
"The TensorFlow estimator also takes a `framework_version` parameter -- if no version is provided, the estimator will default to the latest version supported by AzureML. Use `TensorFlow.get_supported_versions()` to get a list of all versions supported by your current SDK version or see the [SDK documentation](https://docs.microsoft.com/en-us/python/api/azureml-train-core/azureml.train.dnn?view=azure-ml-py) for the versions supported in the most current release."
|
||||
"In this tutorial, we will use one of Azure ML's curated TensorFlow environments for training. [Curated environments](https://docs.microsoft.com/azure/machine-learning/how-to-use-environments#use-a-curated-environment) are available in your workspace by default. Specifically, we will use the TensorFlow 1.13 GPU curated environment."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -271,28 +270,37 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.train.dnn import TensorFlow, Mpi\n",
|
||||
"from azureml.core import Environment\n",
|
||||
"\n",
|
||||
"script_params={\n",
|
||||
" '--input_data': dataset.as_named_input('mattmahoney').as_mount(),\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"estimator= TensorFlow(source_directory=project_folder,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" script_params=script_params,\n",
|
||||
" entry_script='tf_horovod_word2vec.py',\n",
|
||||
" node_count=2,\n",
|
||||
" distributed_training=Mpi(),\n",
|
||||
" framework_version='1.13', \n",
|
||||
" use_gpu=True,\n",
|
||||
" pip_packages=['azureml-dataset-runtime[pandas,fuse]'])"
|
||||
"tf_env = Environment.get(ws, name='AzureML-TensorFlow-1.13-GPU')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The above code specifies that we will run our training script on `2` nodes, with one worker per node. In order to execute a distributed run using MPI/Horovod, you must provide the argument `distributed_backend=Mpi()`. To specify `i` workers per node, you must provide the argument `distributed_backend=Mpi(process_count_per_node=i)`. Using this estimator with these settings, TensorFlow, Horovod and their dependencies will be installed for you. However, if your script also uses other packages, make sure to install them via the `TensorFlow` constructor's `pip_packages` or `conda_packages` parameters."
|
||||
"### Configure the training job\n",
|
||||
"\n",
|
||||
"Create a ScriptRunConfig object to specify the configuration details of your training job, including your training script, environment to use, and the compute target to run on.\n",
|
||||
"\n",
|
||||
"In order to execute a distributed run using MPI/Horovod, you must create an `MpiConfiguration` object and pass it to the `distributed_job_config` parameter of the ScriptRunConfig constructor. The below code will configure a 2-node distributed job running one process per node. If you would also like to run multiple processes per node (i.e. if your cluster SKU has multiple GPUs), additionally specify the `process_count_per_node` parameter in `MpiConfiguration` (the default is `1`)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import ScriptRunConfig\n",
|
||||
"from azureml.core.runconfig import MpiConfiguration\n",
|
||||
"\n",
|
||||
"src = ScriptRunConfig(source_directory=project_folder,\n",
|
||||
" script='tf_horovod_word2vec.py',\n",
|
||||
" arguments=['--input_data', dataset.as_mount()],\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" environment=tf_env,\n",
|
||||
" distributed_job_config=MpiConfiguration(node_count=2))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -300,7 +308,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Submit job\n",
|
||||
"Run your experiment by submitting your estimator object. Note that this call is asynchronous."
|
||||
"Run your experiment by submitting your ScriptRunConfig object. Note that this call is asynchronous."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -309,7 +317,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"run = experiment.submit(estimator)\n",
|
||||
"run = experiment.submit(src)\n",
|
||||
"print(run)\n",
|
||||
"run.get_details()"
|
||||
]
|
||||
@@ -352,7 +360,7 @@
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "maxluk"
|
||||
"name": "minxia"
|
||||
}
|
||||
],
|
||||
"category": "training",
|
||||
@@ -386,7 +394,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.4"
|
||||
"version": "3.6.9"
|
||||
},
|
||||
"tags": [
|
||||
"None"
|
||||
@@ -13,7 +13,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -30,7 +30,7 @@
|
||||
"source": [
|
||||
"## Prerequisites\n",
|
||||
"* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning (AML)\n",
|
||||
"* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration notebook](../../../configuration.ipynb) to:\n",
|
||||
"* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration notebook](../../../../configuration.ipynb) to:\n",
|
||||
" * install the AML SDK\n",
|
||||
" * create a workspace and its configuration file (`config.json`)\n",
|
||||
"* Review the [tutorial](../train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb) on single-node TensorFlow training using the SDK"
|
||||
@@ -205,8 +205,9 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create a TensorFlow estimator\n",
|
||||
"The AML SDK's TensorFlow estimator enables you to easily submit TensorFlow training jobs for both single-node and distributed runs. For more information on the TensorFlow estimator, refer [here](https://docs.microsoft.com/azure/machine-learning/service/how-to-train-tensorflow)."
|
||||
"### Create an environment\n",
|
||||
"\n",
|
||||
"In this tutorial, we will use one of Azure ML's curated TensorFlow environments for training. [Curated environments](https://docs.microsoft.com/azure/machine-learning/how-to-use-environments#use-a-curated-environment) are available in your workspace by default. Specifically, we will use the TensorFlow 1.13 GPU curated environment."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -215,27 +216,37 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.train.dnn import TensorFlow, ParameterServer\n",
|
||||
"from azureml.core import Environment\n",
|
||||
"\n",
|
||||
"script_params={\n",
|
||||
" '--num_gpus': 1,\n",
|
||||
" '--train_steps': 500\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"estimator = TensorFlow(source_directory=project_folder,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" script_params=script_params,\n",
|
||||
" entry_script='tf_mnist_replica.py',\n",
|
||||
" node_count=2,\n",
|
||||
" distributed_training=ParameterServer(worker_count=2),\n",
|
||||
" use_gpu=True)"
|
||||
"tf_env = Environment.get(ws, name='AzureML-TensorFlow-1.13-GPU')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The above code specifies that we will run our training script on `2` nodes, with two workers and one parameter server. In order to execute a native distributed TensorFlow run, you must provide the argument `distributed_backend=ParameterServer()`. Using this estimator with these settings, TensorFlow and its dependencies will be installed for you. However, if your script also uses other packages, make sure to install them via the `TensorFlow` constructor's `pip_packages` or `conda_packages` parameters."
|
||||
"### Configure the training job\n",
|
||||
"\n",
|
||||
"Create a ScriptRunConfig object to specify the configuration details of your training job, including your training script, environment to use, and the compute target to run on.\n",
|
||||
"\n",
|
||||
"In order to execute a distributed TensorFlow run with the parameter server strategy, you must create a `TensorflowConfiguration` object and pass it to the `distributed_job_config` parameter of the ScriptRunConfig constructor. The below code configures a distributed TensorFlow run with `2` workers and `1` parameter server."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import ScriptRunConfig\n",
|
||||
"from azureml.core.runconfig import TensorflowConfiguration\n",
|
||||
"\n",
|
||||
"src = ScriptRunConfig(source_directory=project_folder,\n",
|
||||
" script='tf_mnist_replica.py',\n",
|
||||
" arguments=['--num_gpus', 1, '--train_steps', 500],\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" environment=tf_env,\n",
|
||||
" distributed_job_config=TensorflowConfiguration(worker_count=2, parameter_server_count=1))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -243,7 +254,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Submit job\n",
|
||||
"Run your experiment by submitting your estimator object. Note that this call is asynchronous."
|
||||
"Run your experiment by submitting your ScriptRunConfig object. Note that this call is asynchronous."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -252,7 +263,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"run = experiment.submit(estimator)\n",
|
||||
"run = experiment.submit(src)\n",
|
||||
"print(run)"
|
||||
]
|
||||
},
|
||||
@@ -295,9 +306,25 @@
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "ninhu"
|
||||
"name": "minxia"
|
||||
}
|
||||
],
|
||||
"category": "training",
|
||||
"compute": [
|
||||
"AML Compute"
|
||||
],
|
||||
"datasets": [
|
||||
"MNIST"
|
||||
],
|
||||
"deployment": [
|
||||
"None"
|
||||
],
|
||||
"exclude_from_index": false,
|
||||
"framework": [
|
||||
"TensorFlow"
|
||||
],
|
||||
"friendly_name": "Distributed TensorFlow with parameter server",
|
||||
"index_order": 1,
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
@@ -313,28 +340,12 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.6"
|
||||
"version": "3.6.9"
|
||||
},
|
||||
"friendly_name": "Distributed TensorFlow with parameter server",
|
||||
"exclude_from_index": false,
|
||||
"index_order": 1,
|
||||
"category": "training",
|
||||
"task": "Use the TensorFlow estimator to train a model using distributed training",
|
||||
"datasets": [
|
||||
"MNIST"
|
||||
],
|
||||
"compute": [
|
||||
"AML Compute"
|
||||
],
|
||||
"deployment": [
|
||||
"None"
|
||||
],
|
||||
"framework": [
|
||||
"TensorFlow"
|
||||
],
|
||||
"tags": [
|
||||
"None"
|
||||
]
|
||||
],
|
||||
"task": "Use the TensorFlow estimator to train a model using distributed training"
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
@@ -13,29 +13,15 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"nbpresent": {
|
||||
"id": "bf74d2e9-2708-49b1-934b-e0ede342f475"
|
||||
}
|
||||
},
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Hyperparameter tuning and warm start with TensorFlow\n",
|
||||
"\n",
|
||||
"## Introduction\n",
|
||||
"This tutorial shows how to tune the hyperparameters of a simple deep neural network using the MNIST dataset and TensorFlow on Azure Machine Learning. MNIST is a popular dataset consisting of 70,000 grayscale images. Each image is a handwritten digit of `28x28` pixels, representing number from 0 to 9. The goal is to create a multi-class classifier to identify the digit each image represents, and deploy it as a web service in Azure.\n",
|
||||
"\n",
|
||||
"For more information about the MNIST dataset, please visit [Yan LeCun's website](http://yann.lecun.com/exdb/mnist/).\n",
|
||||
"\n",
|
||||
"## Prerequisite:\n",
|
||||
"* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning\n",
|
||||
"* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration notebook](../../../configuration.ipynb) to:\n",
|
||||
" * install the AML SDK\n",
|
||||
" * create a workspace and its configuration file (`config.json`)"
|
||||
"# Warm start hyperparameter tuning\n",
|
||||
"In this tutorial, you will learn how to warm start a hyperparameter tuning run from a previous tuning run."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -423,12 +409,9 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create TensorFlow estimator\n",
|
||||
"Next, we construct an `azureml.train.dnn.TensorFlow` estimator object, use the Batch AI cluster as compute target, and pass the mount-point of the datastore to the training code as a parameter.\n",
|
||||
"### Create an environment\n",
|
||||
"\n",
|
||||
"The TensorFlow estimator is providing a simple way of launching a TensorFlow training job on a compute target. It will automatically provide a docker image that has TensorFlow installed -- if additional pip or conda packages are required, their names can be passed in via the `pip_packages` and `conda_packages` arguments and they will be included in the resulting docker.\n",
|
||||
"\n",
|
||||
"The TensorFlow estimator also takes a `framework_version` parameter -- if no version is provided, the estimator will default to the latest version supported by AzureML. Use `TensorFlow.get_supported_versions()` to get a list of all versions supported by your current SDK version or see the [SDK documentation](https://docs.microsoft.com/en-us/python/api/azureml-train-core/azureml.train.dnn?view=azure-ml-py) for the versions supported in the most current release."
|
||||
"In this tutorial, we will use one of Azure ML's curated TensorFlow environments for training. [Curated environments](https://docs.microsoft.com/azure/machine-learning/how-to-use-environments#use-a-curated-environment) are available in your workspace by default. Specifically, we will use the TensorFlow 2.0 GPU curated environment."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -437,47 +420,38 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.environment import Environment\n",
|
||||
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||
"from azureml.core import Environment\n",
|
||||
"\n",
|
||||
"# set up environment\\n\n",
|
||||
"env = Environment('my_env')\n",
|
||||
"# ensure latest azureml-dataset-runtime and other required packages installed in the environment\n",
|
||||
"cd = CondaDependencies.create(pip_packages=['keras',\n",
|
||||
" 'azureml-sdk',\n",
|
||||
" 'tensorflow==2.0.0',\n",
|
||||
" 'matplotlib',\n",
|
||||
" 'azureml-dataset-runtime[pandas,fuse]'])\n",
|
||||
"\n",
|
||||
"env.python.conda_dependencies = cd"
|
||||
"tf_env = Environment.get(ws, name='AzureML-TensorFlow-2.0-GPU')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Configure the training job\u00c2\u00b6\n",
|
||||
"Create a ScriptRunConfig object to specify the configuration details of your training job, including your training script, environment to use, and the compute target to run on."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": [
|
||||
"dnn-tensorflow-remarks-sample"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.train.dnn import TensorFlow\n",
|
||||
"from azureml.core import ScriptRunConfig\n",
|
||||
"\n",
|
||||
"script_params = {\n",
|
||||
" '--data-folder': dataset.as_named_input('mnist').as_mount(),\n",
|
||||
" '--batch-size': 64,\n",
|
||||
" '--first-layer-neurons': 256,\n",
|
||||
" '--second-layer-neurons': 128,\n",
|
||||
" '--learning-rate': 0.01\n",
|
||||
"}\n",
|
||||
"args = ['--data-folder', dataset.as_mount(),\n",
|
||||
" '--batch-size', 64,\n",
|
||||
" '--first-layer-neurons', 256,\n",
|
||||
" '--second-layer-neurons', 128,\n",
|
||||
" '--learning-rate', 0.01]\n",
|
||||
"\n",
|
||||
"est = TensorFlow(source_directory=script_folder,\n",
|
||||
" script_params=script_params,\n",
|
||||
"src = ScriptRunConfig(source_directory=script_folder,\n",
|
||||
" script='tf_mnist.py',\n",
|
||||
" arguments=args,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" entry_script='tf_mnist.py', \n",
|
||||
" framework_version='2.0',\n",
|
||||
" environment_definition= env)"
|
||||
" environment=tf_env)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -485,7 +459,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Submit job to run\n",
|
||||
"Submit the estimator to an Azure ML experiment to kick off the execution."
|
||||
"Submit the ScriptRunConfig to an Azure ML experiment to kick off the execution."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -494,7 +468,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"run = exp.submit(est)"
|
||||
"run = exp.submit(src)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -546,7 +520,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Next, we will create a new estimator without the above parameters since they will be passed in later. Note we still need to keep the `data-folder` parameter since that's not a hyperparamter we will sweep."
|
||||
"Next, we will create a new ScriptRunConfig without the above parameters since they will be passed in later. Note we still need to keep the `data-folder` parameter since that's not a hyperparamter we will sweep."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -555,12 +529,13 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"est = TensorFlow(source_directory=script_folder,\n",
|
||||
" script_params={'--data-folder': dataset.as_named_input('mnist').as_mount()},\n",
|
||||
"args = ['--data-folder', dataset.as_mount()]\n",
|
||||
"\n",
|
||||
"src = ScriptRunConfig(source_directory=script_folder,\n",
|
||||
" script='tf_mnist.py',\n",
|
||||
" arguments=args,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" entry_script='tf_mnist.py',\n",
|
||||
" framework_version='2.0',\n",
|
||||
" environment_definition = env)"
|
||||
" environment=tf_env)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -584,7 +559,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we are ready to configure a run configuration object, and specify the primary metric `validation_acc` that's recorded in your training runs. If you go back to visit the training script, you will notice that this value is being logged after every epoch (a full batch set). We also want to tell the service that we are looking to maximizing this value. We also set the number of samples to 20, and maximal concurrent job to 4, which is the same as the number of nodes in our computer cluster."
|
||||
"Now we are ready to configure a run configuration object, and specify the primary metric `validation_acc` that's recorded in your training runs. If you go back to visit the training script, you will notice that this value is being logged after every epoch (a full batch set). We also want to tell the service that we are looking to maximizing this value. We also set the number of samples to 15, and maximal concurrent job to 4, which is the same as the number of nodes in our computer cluster."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -594,7 +569,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.train.hyperdrive import HyperDriveConfig, PrimaryMetricGoal\n",
|
||||
"htc = HyperDriveConfig(estimator=est, \n",
|
||||
"htc = HyperDriveConfig(run_config=src, \n",
|
||||
" hyperparameter_sampling=ps, \n",
|
||||
" policy=policy, \n",
|
||||
" primary_metric_name='validation_acc', \n",
|
||||
@@ -720,7 +695,7 @@
|
||||
"source": [
|
||||
"warm_start_parents_to_resume_from=[htr]\n",
|
||||
"\n",
|
||||
"warm_start_htc = HyperDriveConfig(estimator=est, \n",
|
||||
"warm_start_htc = HyperDriveConfig(run_config=src, \n",
|
||||
" hyperparameter_sampling=ps, \n",
|
||||
" policy=policy, \n",
|
||||
" resume_from=warm_start_parents_to_resume_from, \n",
|
||||
@@ -818,7 +793,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"resume_child_runs_htc = HyperDriveConfig(estimator=est, \n",
|
||||
"resume_child_runs_htc = HyperDriveConfig(run_config=src, \n",
|
||||
" hyperparameter_sampling=ps, \n",
|
||||
" policy=policy, \n",
|
||||
" resume_child_runs=child_runs_to_resume, \n",
|
||||
@@ -872,13 +847,6 @@
|
||||
"best_resume_child_run = resume_child_runs_htr.get_best_run_by_primary_metric()\n",
|
||||
"resume_child_run_model = best_resume_child_run.register_model(model_name='tf-dnn-mnist-resumed', model_path='outputs/model')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -918,7 +886,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.8"
|
||||
"version": "3.6.9"
|
||||
},
|
||||
"tags": [
|
||||
"None"
|
||||
|
Before Width: | Height: | Size: 119 KiB After Width: | Height: | Size: 119 KiB |
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user