mirror of
https://github.com/Azure/MachineLearningNotebooks.git
synced 2025-12-20 09:37:04 -05:00
Compare commits
45 Commits
azureml-sd
...
azureml-sd
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ec97207bb1 | ||
|
|
a2d20b0f47 | ||
|
|
8180cebd75 | ||
|
|
700ab2d782 | ||
|
|
ec9a5a061d | ||
|
|
467630f955 | ||
|
|
eac6b69bae | ||
|
|
441a5b0141 | ||
|
|
70902df6da | ||
|
|
6f893ff0b4 | ||
|
|
bda592a236 | ||
|
|
8b32e8d5ad | ||
|
|
54a065c698 | ||
|
|
b9718678b3 | ||
|
|
3fa40d2c6d | ||
|
|
883e4a4c59 | ||
|
|
e90826b331 | ||
|
|
ac04172f6d | ||
|
|
8c0000beb4 | ||
|
|
35287ab0d8 | ||
|
|
3fe4f8b038 | ||
|
|
1722678469 | ||
|
|
17da7e8706 | ||
|
|
d2e7213ff3 | ||
|
|
882cb76e8a | ||
|
|
37f37a46c1 | ||
|
|
0cd1412421 | ||
|
|
c3ae9f00f6 | ||
|
|
11b02c650c | ||
|
|
606048c71f | ||
|
|
cb1c354d44 | ||
|
|
c868fff5a2 | ||
|
|
bc4e6611c4 | ||
|
|
0a58881b70 | ||
|
|
2544e85c5f | ||
|
|
7fe27501d1 | ||
|
|
624c46e7f9 | ||
|
|
40fbadd85c | ||
|
|
0c1fc25542 | ||
|
|
e8e1357229 | ||
|
|
ad44f8fa2b | ||
|
|
ee63e759f0 | ||
|
|
b81d97ebbf | ||
|
|
249fb6bbb5 | ||
|
|
cda1f3e4cf |
@@ -103,7 +103,7 @@
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"\n",
|
||||
"print(\"This notebook was created using version 1.20.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.29.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -254,6 +254,8 @@
|
||||
"\n",
|
||||
"Many of the sample notebooks use Azure ML managed compute (AmlCompute) to train models using a dynamically scalable pool of compute. In this section you will create default compute clusters for use by the other notebooks and any other operations you choose.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||
"\n",
|
||||
"To create a cluster, you need to specify a compute configuration that specifies the type of machine to be used and the scalability behaviors. Then you choose a name for the cluster that is unique within the workspace that can be used to address the cluster later.\n",
|
||||
"\n",
|
||||
"The cluster parameters are:\n",
|
||||
|
||||
@@ -36,9 +36,9 @@
|
||||
"\n",
|
||||
"<a id=\"Introduction\"></a>\n",
|
||||
"## Introduction\n",
|
||||
"This notebook shows how to use [Fairlearn (an open source fairness assessment and unfairness mitigation package)](http://fairlearn.github.io) and Azure Machine Learning Studio for a binary classification problem. This example uses the well-known adult census dataset. For the purposes of this notebook, we shall treat this as a loan decision problem. We will pretend that the label indicates whether or not each individual repaid a loan in the past. We will use the data to train a predictor to predict whether previously unseen individuals will repay a loan or not. The assumption is that the model predictions are used to decide whether an individual should be offered a loan. Its purpose is purely illustrative of a workflow including a fairness dashboard - in particular, we do **not** include a full discussion of the detailed issues which arise when considering fairness in machine learning. For such discussions, please [refer to the Fairlearn website](http://fairlearn.github.io/).\n",
|
||||
"This notebook shows how to use [Fairlearn (an open source fairness assessment and unfairness mitigation package)](http://fairlearn.org) and Azure Machine Learning Studio for a binary classification problem. This example uses the well-known adult census dataset. For the purposes of this notebook, we shall treat this as a loan decision problem. We will pretend that the label indicates whether or not each individual repaid a loan in the past. We will use the data to train a predictor to predict whether previously unseen individuals will repay a loan or not. The assumption is that the model predictions are used to decide whether an individual should be offered a loan. Its purpose is purely illustrative of a workflow including a fairness dashboard - in particular, we do **not** include a full discussion of the detailed issues which arise when considering fairness in machine learning. For such discussions, please [refer to the Fairlearn website](http://fairlearn.org/).\n",
|
||||
"\n",
|
||||
"We will apply the [grid search algorithm](https://fairlearn.github.io/master/api_reference/fairlearn.reductions.html#fairlearn.reductions.GridSearch) from the Fairlearn package using a specific notion of fairness called Demographic Parity. This produces a set of models, and we will view these in a dashboard both locally and in the Azure Machine Learning Studio.\n",
|
||||
"We will apply the [grid search algorithm](https://fairlearn.org/v0.4.6/api_reference/fairlearn.reductions.html#fairlearn.reductions.GridSearch) from the Fairlearn package using a specific notion of fairness called Demographic Parity. This produces a set of models, and we will view these in a dashboard both locally and in the Azure Machine Learning Studio.\n",
|
||||
"\n",
|
||||
"### Setup\n",
|
||||
"\n",
|
||||
@@ -48,7 +48,7 @@
|
||||
"* `azureml-contrib-fairness`\n",
|
||||
"* `fairlearn==0.4.6` (v0.5.0 will work with minor modifications)\n",
|
||||
"* `joblib`\n",
|
||||
"* `shap`\n",
|
||||
"* `liac-arff`\n",
|
||||
"\n",
|
||||
"Fairlearn relies on features introduced in v0.22.1 of `scikit-learn`. If you have an older version already installed, please uncomment and run the following cell:"
|
||||
]
|
||||
@@ -88,7 +88,6 @@
|
||||
"from fairlearn.widget import FairlearnDashboard\n",
|
||||
"\n",
|
||||
"from sklearn.compose import ColumnTransformer\n",
|
||||
"from sklearn.datasets import fetch_openml\n",
|
||||
"from sklearn.impute import SimpleImputer\n",
|
||||
"from sklearn.linear_model import LogisticRegression\n",
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
@@ -112,9 +111,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from fairness_nb_utils import fetch_openml_with_retries\n",
|
||||
"from fairness_nb_utils import fetch_census_dataset\n",
|
||||
"\n",
|
||||
"data = fetch_openml_with_retries(data_id=1590)\n",
|
||||
"data = fetch_census_dataset()\n",
|
||||
" \n",
|
||||
"# Extract the items we want\n",
|
||||
"X_raw = data.data\n",
|
||||
@@ -137,7 +136,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"A = X_raw[['sex','race']]\n",
|
||||
"X_raw = X_raw.drop(labels=['sex', 'race'],axis = 1)"
|
||||
"X_raw = X_raw.drop(labels=['sex', 'race'], axis = 1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -584,7 +583,7 @@
|
||||
"<a id=\"Conclusion\"></a>\n",
|
||||
"## Conclusion\n",
|
||||
"\n",
|
||||
"In this notebook we have demonstrated how to use the `GridSearch` algorithm from Fairlearn to generate a collection of models, and then present them in the fairness dashboard in Azure Machine Learning Studio. Please remember that this notebook has not attempted to discuss the many considerations which should be part of any approach to unfairness mitigation. The [Fairlearn website](http://fairlearn.github.io/) provides that discussion"
|
||||
"In this notebook we have demonstrated how to use the `GridSearch` algorithm from Fairlearn to generate a collection of models, and then present them in the fairness dashboard in Azure Machine Learning Studio. Please remember that this notebook has not attempted to discuss the many considerations which should be part of any approach to unfairness mitigation. The [Fairlearn website](http://fairlearn.org/) provides that discussion"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -5,3 +5,4 @@ dependencies:
|
||||
- azureml-contrib-fairness
|
||||
- fairlearn==0.4.6
|
||||
- joblib
|
||||
- liac-arff
|
||||
|
||||
@@ -4,7 +4,13 @@
|
||||
|
||||
"""Utilities for azureml-contrib-fairness notebooks."""
|
||||
|
||||
import arff
|
||||
from collections import OrderedDict
|
||||
from contextlib import closing
|
||||
import gzip
|
||||
import pandas as pd
|
||||
from sklearn.datasets import fetch_openml
|
||||
from sklearn.utils import Bunch
|
||||
import time
|
||||
|
||||
|
||||
@@ -15,7 +21,7 @@ def fetch_openml_with_retries(data_id, max_retries=4, retry_delay=60):
|
||||
print("Download attempt {0} of {1}".format(i + 1, max_retries))
|
||||
data = fetch_openml(data_id=data_id, as_frame=True)
|
||||
break
|
||||
except Exception as e:
|
||||
except Exception as e: # noqa: B902
|
||||
print("Download attempt failed with exception:")
|
||||
print(e)
|
||||
if i + 1 != max_retries:
|
||||
@@ -26,3 +32,80 @@ def fetch_openml_with_retries(data_id, max_retries=4, retry_delay=60):
|
||||
raise RuntimeError("Unable to download dataset from OpenML")
|
||||
|
||||
return data
|
||||
|
||||
|
||||
_categorical_columns = [
|
||||
'workclass',
|
||||
'education',
|
||||
'marital-status',
|
||||
'occupation',
|
||||
'relationship',
|
||||
'race',
|
||||
'sex',
|
||||
'native-country'
|
||||
]
|
||||
|
||||
|
||||
def fetch_census_dataset():
|
||||
"""Fetch the Adult Census Dataset.
|
||||
|
||||
This uses a particular URL for the Adult Census dataset. The code
|
||||
is a simplified version of fetch_openml() in sklearn.
|
||||
|
||||
The data are copied from:
|
||||
https://openml.org/data/v1/download/1595261.gz
|
||||
(as of 2021-03-31)
|
||||
"""
|
||||
try:
|
||||
from urllib import urlretrieve
|
||||
except ImportError:
|
||||
from urllib.request import urlretrieve
|
||||
|
||||
filename = "1595261.gz"
|
||||
data_url = "https://rainotebookscdn.blob.core.windows.net/datasets/"
|
||||
|
||||
remaining_attempts = 5
|
||||
sleep_duration = 10
|
||||
while remaining_attempts > 0:
|
||||
try:
|
||||
urlretrieve(data_url + filename, filename)
|
||||
|
||||
http_stream = gzip.GzipFile(filename=filename, mode='rb')
|
||||
|
||||
with closing(http_stream):
|
||||
def _stream_generator(response):
|
||||
for line in response:
|
||||
yield line.decode('utf-8')
|
||||
|
||||
stream = _stream_generator(http_stream)
|
||||
data = arff.load(stream)
|
||||
except Exception as exc: # noqa: B902
|
||||
remaining_attempts -= 1
|
||||
print("Error downloading dataset from {} ({} attempt(s) remaining)"
|
||||
.format(data_url, remaining_attempts))
|
||||
print(exc)
|
||||
time.sleep(sleep_duration)
|
||||
sleep_duration *= 2
|
||||
continue
|
||||
else:
|
||||
# dataset successfully downloaded
|
||||
break
|
||||
else:
|
||||
raise Exception("Could not retrieve dataset from {}.".format(data_url))
|
||||
|
||||
attributes = OrderedDict(data['attributes'])
|
||||
arff_columns = list(attributes)
|
||||
|
||||
raw_df = pd.DataFrame(data=data['data'], columns=arff_columns)
|
||||
|
||||
target_column_name = 'class'
|
||||
target = raw_df.pop(target_column_name)
|
||||
for col_name in _categorical_columns:
|
||||
dtype = pd.api.types.CategoricalDtype(attributes[col_name])
|
||||
raw_df[col_name] = raw_df[col_name].astype(dtype, copy=False)
|
||||
|
||||
result = Bunch()
|
||||
result.data = raw_df
|
||||
result.target = target
|
||||
|
||||
return result
|
||||
|
||||
@@ -50,7 +50,7 @@
|
||||
"* `azureml-contrib-fairness`\n",
|
||||
"* `fairlearn==0.4.6` (should also work with v0.5.0)\n",
|
||||
"* `joblib`\n",
|
||||
"* `shap`\n",
|
||||
"* `liac-arff`\n",
|
||||
"\n",
|
||||
"Fairlearn relies on features introduced in v0.22.1 of `scikit-learn`. If you have an older version already installed, please uncomment and run the following cell:"
|
||||
]
|
||||
@@ -88,7 +88,6 @@
|
||||
"source": [
|
||||
"from sklearn import svm\n",
|
||||
"from sklearn.compose import ColumnTransformer\n",
|
||||
"from sklearn.datasets import fetch_openml\n",
|
||||
"from sklearn.impute import SimpleImputer\n",
|
||||
"from sklearn.linear_model import LogisticRegression\n",
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
@@ -110,9 +109,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from fairness_nb_utils import fetch_openml_with_retries\n",
|
||||
"from fairness_nb_utils import fetch_census_dataset\n",
|
||||
"\n",
|
||||
"data = fetch_openml_with_retries(data_id=1590)\n",
|
||||
"data = fetch_census_dataset()\n",
|
||||
" \n",
|
||||
"# Extract the items we want\n",
|
||||
"X_raw = data.data\n",
|
||||
|
||||
@@ -5,3 +5,4 @@ dependencies:
|
||||
- azureml-contrib-fairness
|
||||
- fairlearn==0.4.6
|
||||
- joblib
|
||||
- liac-arff
|
||||
|
||||
@@ -21,8 +21,8 @@ dependencies:
|
||||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azureml-widgets~=1.20.0
|
||||
- azureml-widgets~=1.29.0
|
||||
- pytorch-transformers==1.0.0
|
||||
- spacy==2.1.8
|
||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.20.0/validated_win32_requirements.txt [--no-deps]
|
||||
- -r https://automlresources-prod.azureedge.net/validated-requirements/1.29.0/validated_win32_requirements.txt [--no-deps]
|
||||
|
||||
@@ -21,9 +21,8 @@ dependencies:
|
||||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azureml-widgets~=1.20.0
|
||||
- azureml-widgets~=1.29.0
|
||||
- pytorch-transformers==1.0.0
|
||||
- spacy==2.1.8
|
||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.20.0/validated_linux_requirements.txt [--no-deps]
|
||||
|
||||
- -r https://automlresources-prod.azureedge.net/validated-requirements/1.29.0/validated_linux_requirements.txt [--no-deps]
|
||||
|
||||
@@ -22,8 +22,8 @@ dependencies:
|
||||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azureml-widgets~=1.20.0
|
||||
- azureml-widgets~=1.29.0
|
||||
- pytorch-transformers==1.0.0
|
||||
- spacy==2.1.8
|
||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.20.0/validated_darwin_requirements.txt [--no-deps]
|
||||
- -r https://automlresources-prod.azureedge.net/validated-requirements/1.29.0/validated_darwin_requirements.txt [--no-deps]
|
||||
|
||||
@@ -32,6 +32,7 @@ if [ $? -ne 0 ]; then
|
||||
fi
|
||||
|
||||
sed -i '' 's/AZUREML-SDK-VERSION/latest/' $AUTOML_ENV_FILE
|
||||
brew install libomp
|
||||
|
||||
if source activate $CONDA_ENV_NAME 2> /dev/null
|
||||
then
|
||||
|
||||
@@ -105,7 +105,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.20.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.29.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -165,6 +165,9 @@
|
||||
"source": [
|
||||
"## Create or Attach existing AmlCompute\n",
|
||||
"You will need to create a compute target for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||
"\n",
|
||||
"#### Creation of AmlCompute takes approximately 5 minutes. \n",
|
||||
"If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
|
||||
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||
@@ -374,15 +377,6 @@
|
||||
"remote_run = experiment.submit(automl_config, show_output = False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
|
||||
@@ -93,7 +93,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.20.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.29.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -127,6 +127,9 @@
|
||||
"source": [
|
||||
"## Create or Attach existing AmlCompute\n",
|
||||
"A compute target is required to execute the Automated ML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||
"\n",
|
||||
"#### Creation of AmlCompute takes approximately 5 minutes. \n",
|
||||
"If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
|
||||
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||
@@ -255,15 +258,6 @@
|
||||
"#remote_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
|
||||
@@ -96,7 +96,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.20.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.29.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -138,6 +138,8 @@
|
||||
"## Set up a compute cluster\n",
|
||||
"This section uses a user-provided compute cluster (named \"dnntext-cluster\" in this example). If a cluster with this name does not exist in the user's workspace, the below code will create a new cluster. You can choose the parameters of the cluster as mentioned in the comments.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||
"\n",
|
||||
"Whether you provide/select a CPU or GPU cluster, AutoML will choose the appropriate DNN for that setup - BiLSTM or BERT text featurizer will be included in the candidate featurizers on CPU and GPU respectively. If your goal is to obtain the most accurate model, we recommend you use GPU clusters since BERT featurizers usually outperform BiLSTM featurizers."
|
||||
]
|
||||
},
|
||||
@@ -281,7 +283,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"automl_settings = {\n",
|
||||
" \"experiment_timeout_minutes\": 20,\n",
|
||||
" \"experiment_timeout_minutes\": 30,\n",
|
||||
" \"primary_metric\": 'accuracy',\n",
|
||||
" \"max_concurrent_iterations\": num_nodes, \n",
|
||||
" \"max_cores_per_iteration\": -1,\n",
|
||||
@@ -319,15 +321,6 @@
|
||||
"automl_run = experiment.submit(automl_config, show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"automl_run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -494,7 +487,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"test_run = run_inference(test_experiment, compute_target, script_folder, best_dnn_run,\n",
|
||||
" train_dataset, test_dataset, target_column_name, model_name)"
|
||||
" test_dataset, target_column_name, model_name)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -5,7 +5,7 @@ from azureml.core.run import Run
|
||||
|
||||
|
||||
def run_inference(test_experiment, compute_target, script_folder, train_run,
|
||||
train_dataset, test_dataset, target_column_name, model_name):
|
||||
test_dataset, target_column_name, model_name):
|
||||
|
||||
inference_env = train_run.get_environment()
|
||||
|
||||
@@ -16,7 +16,6 @@ def run_inference(test_experiment, compute_target, script_folder, train_run,
|
||||
'--model_name': model_name
|
||||
},
|
||||
inputs=[
|
||||
train_dataset.as_named_input('train_data'),
|
||||
test_dataset.as_named_input('test_data')
|
||||
],
|
||||
compute_target=compute_target,
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import argparse
|
||||
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
|
||||
from sklearn.externals import joblib
|
||||
@@ -32,22 +33,21 @@ model = joblib.load(model_path)
|
||||
run = Run.get_context()
|
||||
# get input dataset by name
|
||||
test_dataset = run.input_datasets['test_data']
|
||||
train_dataset = run.input_datasets['train_data']
|
||||
|
||||
X_test_df = test_dataset.drop_columns(columns=[target_column_name]) \
|
||||
.to_pandas_dataframe()
|
||||
y_test_df = test_dataset.with_timestamp_columns(None) \
|
||||
.keep_columns(columns=[target_column_name]) \
|
||||
.to_pandas_dataframe()
|
||||
y_train_df = test_dataset.with_timestamp_columns(None) \
|
||||
.keep_columns(columns=[target_column_name]) \
|
||||
.to_pandas_dataframe()
|
||||
|
||||
predicted = model.predict_proba(X_test_df)
|
||||
|
||||
if isinstance(predicted, pd.DataFrame):
|
||||
predicted = predicted.values
|
||||
|
||||
# Use the AutoML scoring module
|
||||
class_labels = np.unique(np.concatenate((y_train_df.values, y_test_df.values)))
|
||||
train_labels = model.classes_
|
||||
class_labels = np.unique(np.concatenate((y_test_df.values, np.reshape(train_labels, (-1, 1)))))
|
||||
classification_metrics = list(constants.CLASSIFICATION_SCALAR_SET)
|
||||
scores = scoring.score_classification(y_test_df.values, predicted,
|
||||
classification_metrics,
|
||||
|
||||
@@ -81,7 +81,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.20.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.29.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -141,6 +141,9 @@
|
||||
"#### Create or Attach existing AmlCompute\n",
|
||||
"\n",
|
||||
"You will need to create a compute target for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||
"\n",
|
||||
"#### Creation of AmlCompute takes approximately 5 minutes. \n",
|
||||
"If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
|
||||
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||
|
||||
@@ -54,17 +54,17 @@ try:
|
||||
end_time_last_slice = ds.data_changed_time.replace(tzinfo=None)
|
||||
print("Dataset {0} last updated on {1}".format(args.ds_name,
|
||||
end_time_last_slice))
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
print(traceback.format_exc())
|
||||
print("Dataset with name {0} not found, registering new dataset.".format(args.ds_name))
|
||||
register_dataset = True
|
||||
end_time_last_slice = datetime.today() - relativedelta(weeks=2)
|
||||
end_time_last_slice = datetime.today() - relativedelta(weeks=4)
|
||||
|
||||
end_time = datetime.utcnow()
|
||||
train_df = get_noaa_data(end_time_last_slice, end_time)
|
||||
|
||||
if train_df.size > 0:
|
||||
print("Received {0} rows of new data after {0}.".format(
|
||||
print("Received {0} rows of new data after {1}.".format(
|
||||
train_df.shape[0], end_time_last_slice))
|
||||
folder_name = "{}/{:04d}/{:02d}/{:02d}/{:02d}/{:02d}/{:02d}".format(args.ds_name, end_time.year,
|
||||
end_time.month, end_time.day,
|
||||
|
||||
@@ -5,7 +5,7 @@ set options=%3
|
||||
set PIP_NO_WARN_SCRIPT_LOCATION=0
|
||||
|
||||
IF "%conda_env_name%"=="" SET conda_env_name="azure_automl_experimental"
|
||||
IF "%automl_env_file%"=="" SET automl_env_file="automl_env.yml"
|
||||
IF "%automl_env_file%"=="" SET automl_env_file="automl_thin_client_env.yml"
|
||||
|
||||
IF NOT EXIST %automl_env_file% GOTO YmlMissing
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ fi
|
||||
|
||||
if [ "$AUTOML_ENV_FILE" == "" ]
|
||||
then
|
||||
AUTOML_ENV_FILE="automl_env.yml"
|
||||
AUTOML_ENV_FILE="automl_thin_client_env.yml"
|
||||
fi
|
||||
|
||||
if [ ! -f $AUTOML_ENV_FILE ]; then
|
||||
|
||||
@@ -12,7 +12,7 @@ fi
|
||||
|
||||
if [ "$AUTOML_ENV_FILE" == "" ]
|
||||
then
|
||||
AUTOML_ENV_FILE="automl_env.yml"
|
||||
AUTOML_ENV_FILE="automl_thin_client_env_mac.yml"
|
||||
fi
|
||||
|
||||
if [ ! -f $AUTOML_ENV_FILE ]; then
|
||||
|
||||
@@ -5,16 +5,14 @@ dependencies:
|
||||
- pip<=19.3.1
|
||||
- python>=3.5.2,<3.8
|
||||
- nb_conda
|
||||
- matplotlib==2.1.0
|
||||
- numpy~=1.18.0
|
||||
- cython
|
||||
- urllib3<1.24
|
||||
- scikit-learn==0.22.1
|
||||
- pandas==0.25.1
|
||||
- PyJWT < 2.0.0
|
||||
- numpy==1.18.5
|
||||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azureml-defaults
|
||||
- azureml-sdk
|
||||
- azureml-widgets
|
||||
- azureml-explain-model
|
||||
- pandas
|
||||
|
||||
@@ -6,16 +6,14 @@ dependencies:
|
||||
- nomkl
|
||||
- python>=3.5.2,<3.8
|
||||
- nb_conda
|
||||
- matplotlib==2.1.0
|
||||
- numpy~=1.18.0
|
||||
- cython
|
||||
- urllib3<1.24
|
||||
- scikit-learn==0.22.1
|
||||
- pandas==0.25.1
|
||||
- PyJWT < 2.0.0
|
||||
- numpy==1.18.5
|
||||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azureml-defaults
|
||||
- azureml-sdk
|
||||
- azureml-widgets
|
||||
- azureml-explain-model
|
||||
- pandas
|
||||
|
||||
@@ -39,6 +39,7 @@
|
||||
"source": [
|
||||
"## Introduction\n",
|
||||
"In this example we use an experimental feature, Model Proxy, to do a predict on the best generated model without downloading the model locally. The prediction will happen on same compute and environment that was used to train the model. This feature is currently in the experimental state, which means that the API is prone to changing, please make sure to run on the latest version of this notebook if you face any issues.\n",
|
||||
"This notebook will also leverage MLFlow for saving models, allowing for more portability of the resulting models. See https://docs.microsoft.com/en-us/azure/machine-learning/how-to-use-mlflow for more details around MLFlow is AzureML.\n",
|
||||
"\n",
|
||||
"If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration](../../../../configuration.ipynb) notebook first if you haven't already to establish your connection to the AzureML Workspace. \n",
|
||||
"\n",
|
||||
@@ -67,11 +68,8 @@
|
||||
"source": [
|
||||
"import logging\n",
|
||||
"\n",
|
||||
"from matplotlib import pyplot as plt\n",
|
||||
"import json\n",
|
||||
"import numpy as np\n",
|
||||
"import pandas as pd\n",
|
||||
" \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"import azureml.core\n",
|
||||
"from azureml.core.experiment import Experiment\n",
|
||||
@@ -93,7 +91,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.20.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.29.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -116,9 +114,7 @@
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Run History Name'] = experiment_name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"outputDf.T"
|
||||
"output"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -199,7 +195,6 @@
|
||||
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
||||
"|**training_data**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
||||
"|**label_column_name**|(sparse) array-like, shape = [n_samples, ], targets values.|\n",
|
||||
"|**scenario**|We need to set this parameter to 'Latest' to enable some experimental features. This parameter should not be set outside of this experimental notebook.|\n",
|
||||
"\n",
|
||||
"**_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric)"
|
||||
]
|
||||
@@ -218,17 +213,17 @@
|
||||
" \"n_cross_validations\": 3,\n",
|
||||
" \"primary_metric\": 'r2_score',\n",
|
||||
" \"enable_early_stopping\": True, \n",
|
||||
" \"experiment_timeout_hours\": 0.3, #for real scenarios we reccommend a timeout of at least one hour \n",
|
||||
" \"experiment_timeout_hours\": 0.3, #for real scenarios we recommend a timeout of at least one hour \n",
|
||||
" \"max_concurrent_iterations\": 4,\n",
|
||||
" \"max_cores_per_iteration\": -1,\n",
|
||||
" \"verbosity\": logging.INFO,\n",
|
||||
" \"save_mlflow\": True,\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task = 'regression',\n",
|
||||
" compute_target = compute_target,\n",
|
||||
" training_data = train_data,\n",
|
||||
" label_column_name = label,\n",
|
||||
" scenario='Latest',\n",
|
||||
" **automl_settings\n",
|
||||
" )"
|
||||
]
|
||||
@@ -276,34 +271,13 @@
|
||||
"## Results"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Widget for Monitoring Runs\n",
|
||||
"\n",
|
||||
"The widget will first report a \"loading\" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete.\n",
|
||||
"\n",
|
||||
"**Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.widgets import RunDetails\n",
|
||||
"RunDetails(remote_run).show() "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run.wait_for_completion()"
|
||||
"remote_run.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -368,18 +342,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# preview the first 3 rows of the dataset\n",
|
||||
"\n",
|
||||
"test_data = test_data.to_pandas_dataframe()\n",
|
||||
"y_test = test_data['ERP'].fillna(0)\n",
|
||||
"test_data = test_data.drop('ERP', 1)\n",
|
||||
"test_data = test_data.fillna(0)\n",
|
||||
"y_test = test_data.keep_columns('ERP')\n",
|
||||
"test_data = test_data.drop_columns('ERP')\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"train_data = train_data.to_pandas_dataframe()\n",
|
||||
"y_train = train_data['ERP'].fillna(0)\n",
|
||||
"train_data = train_data.drop('ERP', 1)\n",
|
||||
"train_data = train_data.fillna(0)\n"
|
||||
"y_train = train_data.keep_columns('ERP')\n",
|
||||
"train_data = train_data.drop_columns('ERP')\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -397,7 +365,16 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.train.automl.model_proxy import ModelProxy\n",
|
||||
"best_model_proxy = ModelProxy(best_run)"
|
||||
"best_model_proxy = ModelProxy(best_run)\n",
|
||||
"y_pred_train = best_model_proxy.predict(train_data)\n",
|
||||
"y_pred_test = best_model_proxy.predict(test_data)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Exploring results"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -406,60 +383,15 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"y_pred_train = best_model_proxy.predict(train_data).to_pandas_dataframe().values.flatten()\n",
|
||||
"y_pred_train = y_pred_train.to_pandas_dataframe().values.flatten()\n",
|
||||
"y_train = y_train.to_pandas_dataframe().values.flatten()\n",
|
||||
"y_residual_train = y_train - y_pred_train\n",
|
||||
"\n",
|
||||
"y_pred_test = best_model_proxy.predict(test_data).to_pandas_dataframe().values.flatten()\n",
|
||||
"y_residual_test = y_test - y_pred_test"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%matplotlib inline\n",
|
||||
"from sklearn.metrics import mean_squared_error, r2_score\n",
|
||||
"\n",
|
||||
"# Set up a multi-plot chart.\n",
|
||||
"f, (a0, a1) = plt.subplots(1, 2, gridspec_kw = {'width_ratios':[1, 1], 'wspace':0, 'hspace': 0})\n",
|
||||
"f.suptitle('Regression Residual Values', fontsize = 18)\n",
|
||||
"f.set_figheight(6)\n",
|
||||
"f.set_figwidth(16)\n",
|
||||
"\n",
|
||||
"# Plot residual values of training set.\n",
|
||||
"a0.axis([0, 360, -100, 100])\n",
|
||||
"a0.plot(y_residual_train, 'bo', alpha = 0.5)\n",
|
||||
"a0.plot([-10,360],[0,0], 'r-', lw = 3)\n",
|
||||
"a0.text(16,170,'RMSE = {0:.2f}'.format(np.sqrt(mean_squared_error(y_train, y_pred_train))), fontsize = 12)\n",
|
||||
"a0.text(16,140,'R2 score = {0:.2f}'.format(r2_score(y_train, y_pred_train)),fontsize = 12)\n",
|
||||
"a0.set_xlabel('Training samples', fontsize = 12)\n",
|
||||
"a0.set_ylabel('Residual Values', fontsize = 12)\n",
|
||||
"\n",
|
||||
"# Plot residual values of test set.\n",
|
||||
"a1.axis([0, 90, -100, 100])\n",
|
||||
"a1.plot(y_residual_test, 'bo', alpha = 0.5)\n",
|
||||
"a1.plot([-10,360],[0,0], 'r-', lw = 3)\n",
|
||||
"a1.text(5,170,'RMSE = {0:.2f}'.format(np.sqrt(mean_squared_error(y_test, y_pred_test))), fontsize = 12)\n",
|
||||
"a1.text(5,140,'R2 score = {0:.2f}'.format(r2_score(y_test, y_pred_test)),fontsize = 12)\n",
|
||||
"a1.set_xlabel('Test samples', fontsize = 12)\n",
|
||||
"a1.set_yticklabels([])\n",
|
||||
"\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%matplotlib inline\n",
|
||||
"test_pred = plt.scatter(y_test, y_pred_test, color='')\n",
|
||||
"test_test = plt.scatter(y_test, y_test, color='g')\n",
|
||||
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n",
|
||||
"plt.show()"
|
||||
"y_pred_test = y_pred_test.to_pandas_dataframe().values.flatten()\n",
|
||||
"y_test = y_test.to_pandas_dataframe().values.flatten()\n",
|
||||
"y_residual_test = y_test - y_pred_test\n",
|
||||
"print(y_residual_train)\n",
|
||||
"print(y_residual_test)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -113,7 +113,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.20.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.29.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -162,7 +162,9 @@
|
||||
},
|
||||
"source": [
|
||||
"### Using AmlCompute\n",
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you use `AmlCompute` as your training compute resource."
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you use `AmlCompute` as your training compute resource.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -218,6 +220,8 @@
|
||||
"\n",
|
||||
"**Time series identifier columns** are identified by values of the columns listed `time_series_id_column_names`, for example \"store\" and \"item\" if your data has multiple time series of sales, one series for each combination of store and item sold.\n",
|
||||
"\n",
|
||||
"**Forecast frequency (freq)** This optional parameter represents the period with which the forecast is desired, for example, daily, weekly, yearly, etc. Use this parameter for the correction of time series containing irregular data points or for padding of short time series. The frequency needs to be a pandas offset alias. Please refer to [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects) for more information.\n",
|
||||
"\n",
|
||||
"This dataset has only one time series. Please see the [orange juice notebook](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales) for an example of a multi-time series dataset."
|
||||
]
|
||||
},
|
||||
@@ -363,7 +367,9 @@
|
||||
"source": [
|
||||
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
|
||||
"forecasting_parameters = ForecastingParameters(\n",
|
||||
" time_column_name=time_column_name, forecast_horizon=forecast_horizon\n",
|
||||
" time_column_name=time_column_name,\n",
|
||||
" forecast_horizon=forecast_horizon,\n",
|
||||
" freq='MS' # Set the forecast frequency to be monthly (start of the month)\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task='forecasting', \n",
|
||||
@@ -399,8 +405,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run = experiment.submit(automl_config, show_output= False)\n",
|
||||
"remote_run"
|
||||
"remote_run = experiment.submit(automl_config, show_output= True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -417,15 +422,6 @@
|
||||
"# remote_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run.wait_for_completion()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
|
||||
@@ -87,7 +87,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.20.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.29.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -129,6 +129,9 @@
|
||||
"source": [
|
||||
"## Compute\n",
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||
"\n",
|
||||
"#### Creation of AmlCompute takes approximately 5 minutes. \n",
|
||||
"If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
|
||||
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||
@@ -205,6 +208,10 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'dataset/bike-no.csv')]).with_timestamp_columns(fine_grain_timestamp=time_column_name) \n",
|
||||
"\n",
|
||||
"# Drop the columns 'casual' and 'registered' as these columns are a breakdown of the total and therefore a leak.\n",
|
||||
"dataset = dataset.drop_columns(columns=['casual', 'registered'])\n",
|
||||
"\n",
|
||||
"dataset.take(5).to_pandas_dataframe().reset_index(drop=True)"
|
||||
]
|
||||
},
|
||||
@@ -251,7 +258,7 @@
|
||||
"|**forecast_horizon**|The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly).|\n",
|
||||
"|**country_or_region_for_holidays**|The country/region used to generate holiday features. These should be ISO 3166 two-letter country/region codes (i.e. 'US', 'GB').|\n",
|
||||
"|**target_lags**|The target_lags specifies how far back we will construct the lags of the target variable.|\n",
|
||||
"|**drop_column_names**|Name(s) of columns to drop prior to modeling|"
|
||||
"|**freq**|Forecast frequency. This optional parameter represents the period with which the forecast is desired, for example, daily, weekly, yearly, etc. Use this parameter for the correction of time series containing irregular data points or for padding of short time series. The frequency needs to be a pandas offset alias. Please refer to [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects) for more information."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -314,8 +321,8 @@
|
||||
" time_column_name=time_column_name,\n",
|
||||
" forecast_horizon=forecast_horizon,\n",
|
||||
" country_or_region_for_holidays='US', # set country_or_region will trigger holiday featurizer\n",
|
||||
" target_lags='auto', # use heuristic based lag setting \n",
|
||||
" drop_column_names=['casual', 'registered'] # these columns are a breakdown of the total and therefore a leak\n",
|
||||
" target_lags='auto', # use heuristic based lag setting\n",
|
||||
" freq='D' # Set the forecast frequency to be daily\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task='forecasting', \n",
|
||||
@@ -346,8 +353,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run = experiment.submit(automl_config, show_output=False)\n",
|
||||
"remote_run"
|
||||
"remote_run = experiment.submit(automl_config, show_output=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -97,7 +97,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.20.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.29.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -301,7 +301,8 @@
|
||||
"|Property|Description|\n",
|
||||
"|-|-|\n",
|
||||
"|**time_column_name**|The name of your time column.|\n",
|
||||
"|**forecast_horizon**|The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly).|"
|
||||
"|**forecast_horizon**|The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly).|\n",
|
||||
"|**freq**|Forecast frequency. This optional parameter represents the period with which the forecast is desired, for example, daily, weekly, yearly, etc. Use this parameter for the correction of time series containing irregular data points or for padding of short time series. The frequency needs to be a pandas offset alias. Please refer to [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects) for more information."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -341,7 +342,9 @@
|
||||
"source": [
|
||||
"from azureml.automl.core.forecasting_parameters import ForecastingParameters\n",
|
||||
"forecasting_parameters = ForecastingParameters(\n",
|
||||
" time_column_name=time_column_name, forecast_horizon=forecast_horizon\n",
|
||||
" time_column_name=time_column_name,\n",
|
||||
" forecast_horizon=forecast_horizon,\n",
|
||||
" freq='H' # Set the forecast frequency to be hourly\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task='forecasting', \n",
|
||||
@@ -374,15 +377,6 @@
|
||||
"remote_run = experiment.submit(automl_config, show_output=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
|
||||
@@ -94,7 +94,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.20.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.29.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -263,7 +263,9 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource."
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -302,7 +304,8 @@
|
||||
"* Set early termination to True, so the iterations through the models will stop when no improvements in accuracy score will be made.\n",
|
||||
"* Set limitations on the length of experiment run to 15 minutes.\n",
|
||||
"* Finally, we set the task to be forecasting.\n",
|
||||
"* We apply the lag lead operator to the target value i.e. we use the previous values as a predictor for the future ones."
|
||||
"* We apply the lag lead operator to the target value i.e. we use the previous values as a predictor for the future ones.\n",
|
||||
"* [Optional] Forecast frequency parameter (freq) represents the period with which the forecast is desired, for example, daily, weekly, yearly, etc. Use this parameter for the correction of time series containing irregular data points or for padding of short time series. The frequency needs to be a pandas offset alias. Please refer to [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects) for more information."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -318,7 +321,8 @@
|
||||
" time_column_name=TIME_COLUMN_NAME,\n",
|
||||
" forecast_horizon=forecast_horizon,\n",
|
||||
" time_series_id_column_names=[ TIME_SERIES_ID_COLUMN_NAME ],\n",
|
||||
" target_lags=lags\n",
|
||||
" target_lags=lags,\n",
|
||||
" freq='H' # Set the forecast frequency to be hourly\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -82,7 +82,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.20.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.29.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -124,6 +124,9 @@
|
||||
"source": [
|
||||
"## Compute\n",
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||
"\n",
|
||||
"#### Creation of AmlCompute takes approximately 5 minutes. \n",
|
||||
"If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
|
||||
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||
@@ -169,6 +172,10 @@
|
||||
"source": [
|
||||
"time_column_name = 'WeekStarting'\n",
|
||||
"data = pd.read_csv(\"dominicks_OJ.csv\", parse_dates=[time_column_name])\n",
|
||||
"\n",
|
||||
"# Drop the columns 'logQuantity' as it is a leaky feature.\n",
|
||||
"data.drop('logQuantity', axis=1, inplace=True)\n",
|
||||
"\n",
|
||||
"data.head()"
|
||||
]
|
||||
},
|
||||
@@ -343,7 +350,6 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"featurization_config = FeaturizationConfig()\n",
|
||||
"featurization_config.drop_columns = ['logQuantity'] # 'logQuantity' is a leaky feature, so we remove it.\n",
|
||||
"# Force the CPWVOL5 feature to be numeric type.\n",
|
||||
"featurization_config.add_column_purpose('CPWVOL5', 'Numeric')\n",
|
||||
"# Fill missing values in the target column, Quantity, with zeros.\n",
|
||||
@@ -366,7 +372,8 @@
|
||||
"|-|-|\n",
|
||||
"|**time_column_name**|The name of your time column.|\n",
|
||||
"|**forecast_horizon**|The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly).|\n",
|
||||
"|**time_series_id_column_names**|The column names used to uniquely identify the time series in data that has multiple rows with the same timestamp. If the time series identifiers are not defined, the data set is assumed to be one time series.|"
|
||||
"|**time_series_id_column_names**|The column names used to uniquely identify the time series in data that has multiple rows with the same timestamp. If the time series identifiers are not defined, the data set is assumed to be one time series.|\n",
|
||||
"|**freq**|Forecast frequency. This optional parameter represents the period with which the forecast is desired, for example, daily, weekly, yearly, etc. Use this parameter for the correction of time series containing irregular data points or for padding of short time series. The frequency needs to be a pandas offset alias. Please refer to [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects) for more information."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -419,7 +426,8 @@
|
||||
"forecasting_parameters = ForecastingParameters(\n",
|
||||
" time_column_name=time_column_name,\n",
|
||||
" forecast_horizon=n_test_periods,\n",
|
||||
" time_series_id_column_names=time_series_id_column_names\n",
|
||||
" time_series_id_column_names=time_series_id_column_names,\n",
|
||||
" freq='W-THU' # Set the forecast frequency to be weekly (start on each Thursday)\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task='forecasting',\n",
|
||||
@@ -451,8 +459,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run = experiment.submit(automl_config, show_output=False)\n",
|
||||
"remote_run"
|
||||
"remote_run = experiment.submit(automl_config, show_output=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -96,7 +96,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.20.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.29.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -215,15 +215,6 @@
|
||||
"#local_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"local_run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
|
||||
@@ -96,7 +96,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.20.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.29.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -130,6 +130,8 @@
|
||||
"### Create or Attach existing AmlCompute\n",
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you create `AmlCompute` as your training compute resource.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||
"\n",
|
||||
"**Creation of AmlCompute takes approximately 5 minutes.** If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
|
||||
"\n",
|
||||
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||
@@ -305,15 +307,6 @@
|
||||
"remote_run = experiment.submit(automl_config, show_output = False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -448,7 +441,7 @@
|
||||
"\n",
|
||||
"### Retrieve any AutoML Model for explanations\n",
|
||||
"\n",
|
||||
"Below we select the some AutoML pipeline from our iterations. The `get_output` method returns the a AutoML run and the fitted model for the last invocation. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*."
|
||||
"Below we select an AutoML pipeline from our iterations. The `get_output` method returns the a AutoML run and the fitted model for the last invocation. Overloads on `get_output` allow you to retrieve the best run and fitted model for any logged `metric` or for a particular `iteration`."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -457,7 +450,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"automl_run, fitted_model = remote_run.get_output(metric='r2_score')"
|
||||
"#automl_run, fitted_model = remote_run.get_output(metric='r2_score')\n",
|
||||
"automl_run, fitted_model = remote_run.get_output(iteration=2)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -27,7 +27,7 @@ automl_run = Run(experiment=experiment, run_id='<<run_id>>')
|
||||
|
||||
# Check if this AutoML model is explainable
|
||||
if not automl_check_model_if_explainable(automl_run):
|
||||
raise Exception("Model explanations is currently not supported for " + automl_run.get_properties().get(
|
||||
raise Exception("Model explanations are currently not supported for " + automl_run.get_properties().get(
|
||||
'run_algorithm'))
|
||||
|
||||
# Download the best model from the artifact store
|
||||
@@ -38,16 +38,16 @@ fitted_model = joblib.load('model.pkl')
|
||||
|
||||
# Get the train dataset from the workspace
|
||||
train_dataset = Dataset.get_by_name(workspace=ws, name='<<train_dataset_name>>')
|
||||
# Drop the lablled column to get the training set.
|
||||
# Drop the labeled column to get the training set.
|
||||
X_train = train_dataset.drop_columns(columns=['<<target_column_name>>'])
|
||||
y_train = train_dataset.keep_columns(columns=['<<target_column_name>>'], validate=True)
|
||||
|
||||
# Get the train dataset from the workspace
|
||||
# Get the test dataset from the workspace
|
||||
test_dataset = Dataset.get_by_name(workspace=ws, name='<<test_dataset_name>>')
|
||||
# Drop the lablled column to get the testing set.
|
||||
# Drop the labeled column to get the testing set.
|
||||
X_test = test_dataset.drop_columns(columns=['<<target_column_name>>'])
|
||||
|
||||
# Setup the class for explaining the AtuoML models
|
||||
# Setup the class for explaining the AutoML models
|
||||
automl_explainer_setup_obj = automl_setup_model_explanations(fitted_model, '<<task>>',
|
||||
X=X_train, X_test=X_test,
|
||||
y=y_train)
|
||||
|
||||
@@ -92,7 +92,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.20.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.29.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -256,15 +256,6 @@
|
||||
"#remote_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -375,18 +366,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# preview the first 3 rows of the dataset\n",
|
||||
"\n",
|
||||
"test_data = test_data.to_pandas_dataframe()\n",
|
||||
"y_test = test_data['ERP'].fillna(0)\n",
|
||||
"test_data = test_data.drop('ERP', 1)\n",
|
||||
"test_data = test_data.fillna(0)\n",
|
||||
"y_test = test_data.keep_columns('ERP').to_pandas_dataframe()\n",
|
||||
"test_data = test_data.drop_columns('ERP').to_pandas_dataframe()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"train_data = train_data.to_pandas_dataframe()\n",
|
||||
"y_train = train_data['ERP'].fillna(0)\n",
|
||||
"train_data = train_data.drop('ERP', 1)\n",
|
||||
"train_data = train_data.fillna(0)\n"
|
||||
"y_train = train_data.keep_columns('ERP').to_pandas_dataframe()\n",
|
||||
"train_data = train_data.drop_columns('ERP').to_pandas_dataframe()\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -396,10 +381,10 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"y_pred_train = fitted_model.predict(train_data)\n",
|
||||
"y_residual_train = y_train - y_pred_train\n",
|
||||
"y_residual_train = y_train.values - y_pred_train\n",
|
||||
"\n",
|
||||
"y_pred_test = fitted_model.predict(test_data)\n",
|
||||
"y_residual_test = y_test - y_pred_test"
|
||||
"y_residual_test = y_test.values - y_pred_test"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
84
how-to-use-azureml/azure-synapse/README.md
Normal file
84
how-to-use-azureml/azure-synapse/README.md
Normal file
@@ -0,0 +1,84 @@
|
||||
Azure Synapse Analytics is a limitless analytics service that brings together data integration, enterprise data warehousing, and big data analytics. It gives you the freedom to query data on your terms, using either serverless or dedicated resources—at scale. Azure Synapse brings these worlds together with a unified experience to ingest, explore, prepare, manage, and serve data for immediate BI and machine learning needs. A core offering within Azure Synapse Analytics are serverless Apache Spark pools enhanced for big data workloads.
|
||||
|
||||
Synapse in Aml integration is for customers who want to use Apache Spark in Azure Synapse Analytics to prepare data at scale in Azure ML before training their ML model. This will allow customers to work on their end-to-end ML lifecycle including large-scale data preparation, model training and deployment within Azure ML workspace without having to use suboptimal tools for machine learning or switch between multiple tools for data preparation and model training. The ability to perform all ML tasks within Azure ML will reduce time required for customers to iterate on a machine learning project which typically includes multiple rounds of data preparation and training.
|
||||
|
||||
In the public preview, the capabilities are provided:
|
||||
|
||||
- Link Azure Synapse Analytics workspace to Azure Machine Learning workspace (via ARM, UI or SDK)
|
||||
- Attach Apache Spark pools powered by Azure Synapse Analytics as Azure Machine Learning compute targets (via ARM, UI or SDK)
|
||||
- Launch Apache Spark sessions in notebooks and perform interactive data exploration and preparation. This interactive experience leverages Apache Spark magic and customers will have session-level Conda support to install packages.
|
||||
- Productionize ML pipelines by leveraging Apache Spark pools to pre-process big data
|
||||
|
||||
# Using Synapse in Azure machine learning
|
||||
|
||||
## Create synapse resources
|
||||
|
||||
Follow up the documents to create Synapse workspace and resource-setup.sh is available for you to create the resources.
|
||||
|
||||
- Create from [Portal](https://docs.microsoft.com/en-us/azure/synapse-analytics/quickstart-create-workspace)
|
||||
- Create from [Cli](https://docs.microsoft.com/en-us/azure/synapse-analytics/quickstart-create-workspace-cli)
|
||||
|
||||
Follow up the documents to create Synapse spark pool
|
||||
|
||||
- Create from [Portal](https://docs.microsoft.com/en-us/azure/synapse-analytics/quickstart-create-apache-spark-pool-portal)
|
||||
- Create from [Cli](https://docs.microsoft.com/en-us/cli/azure/ext/synapse/synapse/spark/pool?view=azure-cli-latest)
|
||||
|
||||
## Link Synapse Workspace
|
||||
|
||||
Make sure you are the owner of synapse workspace so that you can link synapse workspace into AML.
|
||||
You can run resource-setup.py to link the synapse workspace and attach compute
|
||||
|
||||
```python
|
||||
from azureml.core import Workspace
|
||||
ws = Workspace.from_config()
|
||||
|
||||
from azureml.core import LinkedService, SynapseWorkspaceLinkedServiceConfiguration
|
||||
synapse_link_config = SynapseWorkspaceLinkedServiceConfiguration(
|
||||
subscription_id="<subscription id>",
|
||||
resource_group="<resource group",
|
||||
name="<synapse workspace name>"
|
||||
)
|
||||
|
||||
linked_service = LinkedService.register(
|
||||
workspace=ws,
|
||||
name='<link name>',
|
||||
linked_service_config=synapse_link_config)
|
||||
|
||||
```
|
||||
|
||||
## Attach synapse spark pool as AzureML compute
|
||||
|
||||
```python
|
||||
|
||||
from azureml.core.compute import SynapseCompute, ComputeTarget
|
||||
spark_pool_name = "<spark pool name>"
|
||||
attached_synapse_name = "<attached compute name>"
|
||||
|
||||
attach_config = SynapseCompute.attach_configuration(
|
||||
linked_service,
|
||||
type="SynapseSpark",
|
||||
pool_name=spark_pool_name)
|
||||
|
||||
synapse_compute=ComputeTarget.attach(
|
||||
workspace=ws,
|
||||
name=attached_synapse_name,
|
||||
attach_configuration=attach_config)
|
||||
|
||||
synapse_compute.wait_for_completion()
|
||||
```
|
||||
|
||||
## Set up permission
|
||||
|
||||
Grant Spark admin role to system assigned identity of the linked service so that the user can submit experiment run or pipeline run from AML workspace to synapse spark pool.
|
||||
|
||||
Grant Spark admin role to the specific user so that the user can start spark session to synapse spark pool.
|
||||
|
||||
You can get the system assigned identity information by running
|
||||
|
||||
```python
|
||||
print(linked_service.system_assigned_identity_principal_id)
|
||||
```
|
||||
|
||||
- Launch synapse studio of the synapse workspace and grant linked service MSI "Synapse Apache Spark administrator" role.
|
||||
|
||||
- In azure portal grant linked service MSI "Storage Blob Data Contributor" role of the primary adlsgen2 account of synapse workspace to use the library management feature.
|
||||
892
how-to-use-azureml/azure-synapse/Titanic.csv
Normal file
892
how-to-use-azureml/azure-synapse/Titanic.csv
Normal file
@@ -0,0 +1,892 @@
|
||||
PassengerId,Survived,Pclass,Name,Sex,Age,SibSp,Parch,Ticket,Fare,Cabin,Embarked
|
||||
1,0,3,"Braund, Mr. Owen Harris",male,22,1,0,A/5 21171,7.25,,S
|
||||
2,1,1,"Cumings, Mrs. John Bradley (Florence Briggs Thayer)",female,38,1,0,PC 17599,71.2833,C85,C
|
||||
3,1,3,"Heikkinen, Miss. Laina",female,26,0,0,STON/O2. 3101282,7.925,,S
|
||||
4,1,1,"Futrelle, Mrs. Jacques Heath (Lily May Peel)",female,35,1,0,113803,53.1,C123,S
|
||||
5,0,3,"Allen, Mr. William Henry",male,35,0,0,373450,8.05,,S
|
||||
6,0,3,"Moran, Mr. James",male,,0,0,330877,8.4583,,Q
|
||||
7,0,1,"McCarthy, Mr. Timothy J",male,54,0,0,17463,51.8625,E46,S
|
||||
8,0,3,"Palsson, Master. Gosta Leonard",male,2,3,1,349909,21.075,,S
|
||||
9,1,3,"Johnson, Mrs. Oscar W (Elisabeth Vilhelmina Berg)",female,27,0,2,347742,11.1333,,S
|
||||
10,1,2,"Nasser, Mrs. Nicholas (Adele Achem)",female,14,1,0,237736,30.0708,,C
|
||||
11,1,3,"Sandstrom, Miss. Marguerite Rut",female,4,1,1,PP 9549,16.7,G6,S
|
||||
12,1,1,"Bonnell, Miss. Elizabeth",female,58,0,0,113783,26.55,C103,S
|
||||
13,0,3,"Saundercock, Mr. William Henry",male,20,0,0,A/5. 2151,8.05,,S
|
||||
14,0,3,"Andersson, Mr. Anders Johan",male,39,1,5,347082,31.275,,S
|
||||
15,0,3,"Vestrom, Miss. Hulda Amanda Adolfina",female,14,0,0,350406,7.8542,,S
|
||||
16,1,2,"Hewlett, Mrs. (Mary D Kingcome) ",female,55,0,0,248706,16,,S
|
||||
17,0,3,"Rice, Master. Eugene",male,2,4,1,382652,29.125,,Q
|
||||
18,1,2,"Williams, Mr. Charles Eugene",male,,0,0,244373,13,,S
|
||||
19,0,3,"Vander Planke, Mrs. Julius (Emelia Maria Vandemoortele)",female,31,1,0,345763,18,,S
|
||||
20,1,3,"Masselmani, Mrs. Fatima",female,,0,0,2649,7.225,,C
|
||||
21,0,2,"Fynney, Mr. Joseph J",male,35,0,0,239865,26,,S
|
||||
22,1,2,"Beesley, Mr. Lawrence",male,34,0,0,248698,13,D56,S
|
||||
23,1,3,"McGowan, Miss. Anna ""Annie""",female,15,0,0,330923,8.0292,,Q
|
||||
24,1,1,"Sloper, Mr. William Thompson",male,28,0,0,113788,35.5,A6,S
|
||||
25,0,3,"Palsson, Miss. Torborg Danira",female,8,3,1,349909,21.075,,S
|
||||
26,1,3,"Asplund, Mrs. Carl Oscar (Selma Augusta Emilia Johansson)",female,38,1,5,347077,31.3875,,S
|
||||
27,0,3,"Emir, Mr. Farred Chehab",male,,0,0,2631,7.225,,C
|
||||
28,0,1,"Fortune, Mr. Charles Alexander",male,19,3,2,19950,263,C23 C25 C27,S
|
||||
29,1,3,"O'Dwyer, Miss. Ellen ""Nellie""",female,,0,0,330959,7.8792,,Q
|
||||
30,0,3,"Todoroff, Mr. Lalio",male,,0,0,349216,7.8958,,S
|
||||
31,0,1,"Uruchurtu, Don. Manuel E",male,40,0,0,PC 17601,27.7208,,C
|
||||
32,1,1,"Spencer, Mrs. William Augustus (Marie Eugenie)",female,,1,0,PC 17569,146.5208,B78,C
|
||||
33,1,3,"Glynn, Miss. Mary Agatha",female,,0,0,335677,7.75,,Q
|
||||
34,0,2,"Wheadon, Mr. Edward H",male,66,0,0,C.A. 24579,10.5,,S
|
||||
35,0,1,"Meyer, Mr. Edgar Joseph",male,28,1,0,PC 17604,82.1708,,C
|
||||
36,0,1,"Holverson, Mr. Alexander Oskar",male,42,1,0,113789,52,,S
|
||||
37,1,3,"Mamee, Mr. Hanna",male,,0,0,2677,7.2292,,C
|
||||
38,0,3,"Cann, Mr. Ernest Charles",male,21,0,0,A./5. 2152,8.05,,S
|
||||
39,0,3,"Vander Planke, Miss. Augusta Maria",female,18,2,0,345764,18,,S
|
||||
40,1,3,"Nicola-Yarred, Miss. Jamila",female,14,1,0,2651,11.2417,,C
|
||||
41,0,3,"Ahlin, Mrs. Johan (Johanna Persdotter Larsson)",female,40,1,0,7546,9.475,,S
|
||||
42,0,2,"Turpin, Mrs. William John Robert (Dorothy Ann Wonnacott)",female,27,1,0,11668,21,,S
|
||||
43,0,3,"Kraeff, Mr. Theodor",male,,0,0,349253,7.8958,,C
|
||||
44,1,2,"Laroche, Miss. Simonne Marie Anne Andree",female,3,1,2,SC/Paris 2123,41.5792,,C
|
||||
45,1,3,"Devaney, Miss. Margaret Delia",female,19,0,0,330958,7.8792,,Q
|
||||
46,0,3,"Rogers, Mr. William John",male,,0,0,S.C./A.4. 23567,8.05,,S
|
||||
47,0,3,"Lennon, Mr. Denis",male,,1,0,370371,15.5,,Q
|
||||
48,1,3,"O'Driscoll, Miss. Bridget",female,,0,0,14311,7.75,,Q
|
||||
49,0,3,"Samaan, Mr. Youssef",male,,2,0,2662,21.6792,,C
|
||||
50,0,3,"Arnold-Franchi, Mrs. Josef (Josefine Franchi)",female,18,1,0,349237,17.8,,S
|
||||
51,0,3,"Panula, Master. Juha Niilo",male,7,4,1,3101295,39.6875,,S
|
||||
52,0,3,"Nosworthy, Mr. Richard Cater",male,21,0,0,A/4. 39886,7.8,,S
|
||||
53,1,1,"Harper, Mrs. Henry Sleeper (Myna Haxtun)",female,49,1,0,PC 17572,76.7292,D33,C
|
||||
54,1,2,"Faunthorpe, Mrs. Lizzie (Elizabeth Anne Wilkinson)",female,29,1,0,2926,26,,S
|
||||
55,0,1,"Ostby, Mr. Engelhart Cornelius",male,65,0,1,113509,61.9792,B30,C
|
||||
56,1,1,"Woolner, Mr. Hugh",male,,0,0,19947,35.5,C52,S
|
||||
57,1,2,"Rugg, Miss. Emily",female,21,0,0,C.A. 31026,10.5,,S
|
||||
58,0,3,"Novel, Mr. Mansouer",male,28.5,0,0,2697,7.2292,,C
|
||||
59,1,2,"West, Miss. Constance Mirium",female,5,1,2,C.A. 34651,27.75,,S
|
||||
60,0,3,"Goodwin, Master. William Frederick",male,11,5,2,CA 2144,46.9,,S
|
||||
61,0,3,"Sirayanian, Mr. Orsen",male,22,0,0,2669,7.2292,,C
|
||||
62,1,1,"Icard, Miss. Amelie",female,38,0,0,113572,80,B28,
|
||||
63,0,1,"Harris, Mr. Henry Birkhardt",male,45,1,0,36973,83.475,C83,S
|
||||
64,0,3,"Skoog, Master. Harald",male,4,3,2,347088,27.9,,S
|
||||
65,0,1,"Stewart, Mr. Albert A",male,,0,0,PC 17605,27.7208,,C
|
||||
66,1,3,"Moubarek, Master. Gerios",male,,1,1,2661,15.2458,,C
|
||||
67,1,2,"Nye, Mrs. (Elizabeth Ramell)",female,29,0,0,C.A. 29395,10.5,F33,S
|
||||
68,0,3,"Crease, Mr. Ernest James",male,19,0,0,S.P. 3464,8.1583,,S
|
||||
69,1,3,"Andersson, Miss. Erna Alexandra",female,17,4,2,3101281,7.925,,S
|
||||
70,0,3,"Kink, Mr. Vincenz",male,26,2,0,315151,8.6625,,S
|
||||
71,0,2,"Jenkin, Mr. Stephen Curnow",male,32,0,0,C.A. 33111,10.5,,S
|
||||
72,0,3,"Goodwin, Miss. Lillian Amy",female,16,5,2,CA 2144,46.9,,S
|
||||
73,0,2,"Hood, Mr. Ambrose Jr",male,21,0,0,S.O.C. 14879,73.5,,S
|
||||
74,0,3,"Chronopoulos, Mr. Apostolos",male,26,1,0,2680,14.4542,,C
|
||||
75,1,3,"Bing, Mr. Lee",male,32,0,0,1601,56.4958,,S
|
||||
76,0,3,"Moen, Mr. Sigurd Hansen",male,25,0,0,348123,7.65,F G73,S
|
||||
77,0,3,"Staneff, Mr. Ivan",male,,0,0,349208,7.8958,,S
|
||||
78,0,3,"Moutal, Mr. Rahamin Haim",male,,0,0,374746,8.05,,S
|
||||
79,1,2,"Caldwell, Master. Alden Gates",male,0.83,0,2,248738,29,,S
|
||||
80,1,3,"Dowdell, Miss. Elizabeth",female,30,0,0,364516,12.475,,S
|
||||
81,0,3,"Waelens, Mr. Achille",male,22,0,0,345767,9,,S
|
||||
82,1,3,"Sheerlinck, Mr. Jan Baptist",male,29,0,0,345779,9.5,,S
|
||||
83,1,3,"McDermott, Miss. Brigdet Delia",female,,0,0,330932,7.7875,,Q
|
||||
84,0,1,"Carrau, Mr. Francisco M",male,28,0,0,113059,47.1,,S
|
||||
85,1,2,"Ilett, Miss. Bertha",female,17,0,0,SO/C 14885,10.5,,S
|
||||
86,1,3,"Backstrom, Mrs. Karl Alfred (Maria Mathilda Gustafsson)",female,33,3,0,3101278,15.85,,S
|
||||
87,0,3,"Ford, Mr. William Neal",male,16,1,3,W./C. 6608,34.375,,S
|
||||
88,0,3,"Slocovski, Mr. Selman Francis",male,,0,0,SOTON/OQ 392086,8.05,,S
|
||||
89,1,1,"Fortune, Miss. Mabel Helen",female,23,3,2,19950,263,C23 C25 C27,S
|
||||
90,0,3,"Celotti, Mr. Francesco",male,24,0,0,343275,8.05,,S
|
||||
91,0,3,"Christmann, Mr. Emil",male,29,0,0,343276,8.05,,S
|
||||
92,0,3,"Andreasson, Mr. Paul Edvin",male,20,0,0,347466,7.8542,,S
|
||||
93,0,1,"Chaffee, Mr. Herbert Fuller",male,46,1,0,W.E.P. 5734,61.175,E31,S
|
||||
94,0,3,"Dean, Mr. Bertram Frank",male,26,1,2,C.A. 2315,20.575,,S
|
||||
95,0,3,"Coxon, Mr. Daniel",male,59,0,0,364500,7.25,,S
|
||||
96,0,3,"Shorney, Mr. Charles Joseph",male,,0,0,374910,8.05,,S
|
||||
97,0,1,"Goldschmidt, Mr. George B",male,71,0,0,PC 17754,34.6542,A5,C
|
||||
98,1,1,"Greenfield, Mr. William Bertram",male,23,0,1,PC 17759,63.3583,D10 D12,C
|
||||
99,1,2,"Doling, Mrs. John T (Ada Julia Bone)",female,34,0,1,231919,23,,S
|
||||
100,0,2,"Kantor, Mr. Sinai",male,34,1,0,244367,26,,S
|
||||
101,0,3,"Petranec, Miss. Matilda",female,28,0,0,349245,7.8958,,S
|
||||
102,0,3,"Petroff, Mr. Pastcho (""Pentcho"")",male,,0,0,349215,7.8958,,S
|
||||
103,0,1,"White, Mr. Richard Frasar",male,21,0,1,35281,77.2875,D26,S
|
||||
104,0,3,"Johansson, Mr. Gustaf Joel",male,33,0,0,7540,8.6542,,S
|
||||
105,0,3,"Gustafsson, Mr. Anders Vilhelm",male,37,2,0,3101276,7.925,,S
|
||||
106,0,3,"Mionoff, Mr. Stoytcho",male,28,0,0,349207,7.8958,,S
|
||||
107,1,3,"Salkjelsvik, Miss. Anna Kristine",female,21,0,0,343120,7.65,,S
|
||||
108,1,3,"Moss, Mr. Albert Johan",male,,0,0,312991,7.775,,S
|
||||
109,0,3,"Rekic, Mr. Tido",male,38,0,0,349249,7.8958,,S
|
||||
110,1,3,"Moran, Miss. Bertha",female,,1,0,371110,24.15,,Q
|
||||
111,0,1,"Porter, Mr. Walter Chamberlain",male,47,0,0,110465,52,C110,S
|
||||
112,0,3,"Zabour, Miss. Hileni",female,14.5,1,0,2665,14.4542,,C
|
||||
113,0,3,"Barton, Mr. David John",male,22,0,0,324669,8.05,,S
|
||||
114,0,3,"Jussila, Miss. Katriina",female,20,1,0,4136,9.825,,S
|
||||
115,0,3,"Attalah, Miss. Malake",female,17,0,0,2627,14.4583,,C
|
||||
116,0,3,"Pekoniemi, Mr. Edvard",male,21,0,0,STON/O 2. 3101294,7.925,,S
|
||||
117,0,3,"Connors, Mr. Patrick",male,70.5,0,0,370369,7.75,,Q
|
||||
118,0,2,"Turpin, Mr. William John Robert",male,29,1,0,11668,21,,S
|
||||
119,0,1,"Baxter, Mr. Quigg Edmond",male,24,0,1,PC 17558,247.5208,B58 B60,C
|
||||
120,0,3,"Andersson, Miss. Ellis Anna Maria",female,2,4,2,347082,31.275,,S
|
||||
121,0,2,"Hickman, Mr. Stanley George",male,21,2,0,S.O.C. 14879,73.5,,S
|
||||
122,0,3,"Moore, Mr. Leonard Charles",male,,0,0,A4. 54510,8.05,,S
|
||||
123,0,2,"Nasser, Mr. Nicholas",male,32.5,1,0,237736,30.0708,,C
|
||||
124,1,2,"Webber, Miss. Susan",female,32.5,0,0,27267,13,E101,S
|
||||
125,0,1,"White, Mr. Percival Wayland",male,54,0,1,35281,77.2875,D26,S
|
||||
126,1,3,"Nicola-Yarred, Master. Elias",male,12,1,0,2651,11.2417,,C
|
||||
127,0,3,"McMahon, Mr. Martin",male,,0,0,370372,7.75,,Q
|
||||
128,1,3,"Madsen, Mr. Fridtjof Arne",male,24,0,0,C 17369,7.1417,,S
|
||||
129,1,3,"Peter, Miss. Anna",female,,1,1,2668,22.3583,F E69,C
|
||||
130,0,3,"Ekstrom, Mr. Johan",male,45,0,0,347061,6.975,,S
|
||||
131,0,3,"Drazenoic, Mr. Jozef",male,33,0,0,349241,7.8958,,C
|
||||
132,0,3,"Coelho, Mr. Domingos Fernandeo",male,20,0,0,SOTON/O.Q. 3101307,7.05,,S
|
||||
133,0,3,"Robins, Mrs. Alexander A (Grace Charity Laury)",female,47,1,0,A/5. 3337,14.5,,S
|
||||
134,1,2,"Weisz, Mrs. Leopold (Mathilde Francoise Pede)",female,29,1,0,228414,26,,S
|
||||
135,0,2,"Sobey, Mr. Samuel James Hayden",male,25,0,0,C.A. 29178,13,,S
|
||||
136,0,2,"Richard, Mr. Emile",male,23,0,0,SC/PARIS 2133,15.0458,,C
|
||||
137,1,1,"Newsom, Miss. Helen Monypeny",female,19,0,2,11752,26.2833,D47,S
|
||||
138,0,1,"Futrelle, Mr. Jacques Heath",male,37,1,0,113803,53.1,C123,S
|
||||
139,0,3,"Osen, Mr. Olaf Elon",male,16,0,0,7534,9.2167,,S
|
||||
140,0,1,"Giglio, Mr. Victor",male,24,0,0,PC 17593,79.2,B86,C
|
||||
141,0,3,"Boulos, Mrs. Joseph (Sultana)",female,,0,2,2678,15.2458,,C
|
||||
142,1,3,"Nysten, Miss. Anna Sofia",female,22,0,0,347081,7.75,,S
|
||||
143,1,3,"Hakkarainen, Mrs. Pekka Pietari (Elin Matilda Dolck)",female,24,1,0,STON/O2. 3101279,15.85,,S
|
||||
144,0,3,"Burke, Mr. Jeremiah",male,19,0,0,365222,6.75,,Q
|
||||
145,0,2,"Andrew, Mr. Edgardo Samuel",male,18,0,0,231945,11.5,,S
|
||||
146,0,2,"Nicholls, Mr. Joseph Charles",male,19,1,1,C.A. 33112,36.75,,S
|
||||
147,1,3,"Andersson, Mr. August Edvard (""Wennerstrom"")",male,27,0,0,350043,7.7958,,S
|
||||
148,0,3,"Ford, Miss. Robina Maggie ""Ruby""",female,9,2,2,W./C. 6608,34.375,,S
|
||||
149,0,2,"Navratil, Mr. Michel (""Louis M Hoffman"")",male,36.5,0,2,230080,26,F2,S
|
||||
150,0,2,"Byles, Rev. Thomas Roussel Davids",male,42,0,0,244310,13,,S
|
||||
151,0,2,"Bateman, Rev. Robert James",male,51,0,0,S.O.P. 1166,12.525,,S
|
||||
152,1,1,"Pears, Mrs. Thomas (Edith Wearne)",female,22,1,0,113776,66.6,C2,S
|
||||
153,0,3,"Meo, Mr. Alfonzo",male,55.5,0,0,A.5. 11206,8.05,,S
|
||||
154,0,3,"van Billiard, Mr. Austin Blyler",male,40.5,0,2,A/5. 851,14.5,,S
|
||||
155,0,3,"Olsen, Mr. Ole Martin",male,,0,0,Fa 265302,7.3125,,S
|
||||
156,0,1,"Williams, Mr. Charles Duane",male,51,0,1,PC 17597,61.3792,,C
|
||||
157,1,3,"Gilnagh, Miss. Katherine ""Katie""",female,16,0,0,35851,7.7333,,Q
|
||||
158,0,3,"Corn, Mr. Harry",male,30,0,0,SOTON/OQ 392090,8.05,,S
|
||||
159,0,3,"Smiljanic, Mr. Mile",male,,0,0,315037,8.6625,,S
|
||||
160,0,3,"Sage, Master. Thomas Henry",male,,8,2,CA. 2343,69.55,,S
|
||||
161,0,3,"Cribb, Mr. John Hatfield",male,44,0,1,371362,16.1,,S
|
||||
162,1,2,"Watt, Mrs. James (Elizabeth ""Bessie"" Inglis Milne)",female,40,0,0,C.A. 33595,15.75,,S
|
||||
163,0,3,"Bengtsson, Mr. John Viktor",male,26,0,0,347068,7.775,,S
|
||||
164,0,3,"Calic, Mr. Jovo",male,17,0,0,315093,8.6625,,S
|
||||
165,0,3,"Panula, Master. Eino Viljami",male,1,4,1,3101295,39.6875,,S
|
||||
166,1,3,"Goldsmith, Master. Frank John William ""Frankie""",male,9,0,2,363291,20.525,,S
|
||||
167,1,1,"Chibnall, Mrs. (Edith Martha Bowerman)",female,,0,1,113505,55,E33,S
|
||||
168,0,3,"Skoog, Mrs. William (Anna Bernhardina Karlsson)",female,45,1,4,347088,27.9,,S
|
||||
169,0,1,"Baumann, Mr. John D",male,,0,0,PC 17318,25.925,,S
|
||||
170,0,3,"Ling, Mr. Lee",male,28,0,0,1601,56.4958,,S
|
||||
171,0,1,"Van der hoef, Mr. Wyckoff",male,61,0,0,111240,33.5,B19,S
|
||||
172,0,3,"Rice, Master. Arthur",male,4,4,1,382652,29.125,,Q
|
||||
173,1,3,"Johnson, Miss. Eleanor Ileen",female,1,1,1,347742,11.1333,,S
|
||||
174,0,3,"Sivola, Mr. Antti Wilhelm",male,21,0,0,STON/O 2. 3101280,7.925,,S
|
||||
175,0,1,"Smith, Mr. James Clinch",male,56,0,0,17764,30.6958,A7,C
|
||||
176,0,3,"Klasen, Mr. Klas Albin",male,18,1,1,350404,7.8542,,S
|
||||
177,0,3,"Lefebre, Master. Henry Forbes",male,,3,1,4133,25.4667,,S
|
||||
178,0,1,"Isham, Miss. Ann Elizabeth",female,50,0,0,PC 17595,28.7125,C49,C
|
||||
179,0,2,"Hale, Mr. Reginald",male,30,0,0,250653,13,,S
|
||||
180,0,3,"Leonard, Mr. Lionel",male,36,0,0,LINE,0,,S
|
||||
181,0,3,"Sage, Miss. Constance Gladys",female,,8,2,CA. 2343,69.55,,S
|
||||
182,0,2,"Pernot, Mr. Rene",male,,0,0,SC/PARIS 2131,15.05,,C
|
||||
183,0,3,"Asplund, Master. Clarence Gustaf Hugo",male,9,4,2,347077,31.3875,,S
|
||||
184,1,2,"Becker, Master. Richard F",male,1,2,1,230136,39,F4,S
|
||||
185,1,3,"Kink-Heilmann, Miss. Luise Gretchen",female,4,0,2,315153,22.025,,S
|
||||
186,0,1,"Rood, Mr. Hugh Roscoe",male,,0,0,113767,50,A32,S
|
||||
187,1,3,"O'Brien, Mrs. Thomas (Johanna ""Hannah"" Godfrey)",female,,1,0,370365,15.5,,Q
|
||||
188,1,1,"Romaine, Mr. Charles Hallace (""Mr C Rolmane"")",male,45,0,0,111428,26.55,,S
|
||||
189,0,3,"Bourke, Mr. John",male,40,1,1,364849,15.5,,Q
|
||||
190,0,3,"Turcin, Mr. Stjepan",male,36,0,0,349247,7.8958,,S
|
||||
191,1,2,"Pinsky, Mrs. (Rosa)",female,32,0,0,234604,13,,S
|
||||
192,0,2,"Carbines, Mr. William",male,19,0,0,28424,13,,S
|
||||
193,1,3,"Andersen-Jensen, Miss. Carla Christine Nielsine",female,19,1,0,350046,7.8542,,S
|
||||
194,1,2,"Navratil, Master. Michel M",male,3,1,1,230080,26,F2,S
|
||||
195,1,1,"Brown, Mrs. James Joseph (Margaret Tobin)",female,44,0,0,PC 17610,27.7208,B4,C
|
||||
196,1,1,"Lurette, Miss. Elise",female,58,0,0,PC 17569,146.5208,B80,C
|
||||
197,0,3,"Mernagh, Mr. Robert",male,,0,0,368703,7.75,,Q
|
||||
198,0,3,"Olsen, Mr. Karl Siegwart Andreas",male,42,0,1,4579,8.4042,,S
|
||||
199,1,3,"Madigan, Miss. Margaret ""Maggie""",female,,0,0,370370,7.75,,Q
|
||||
200,0,2,"Yrois, Miss. Henriette (""Mrs Harbeck"")",female,24,0,0,248747,13,,S
|
||||
201,0,3,"Vande Walle, Mr. Nestor Cyriel",male,28,0,0,345770,9.5,,S
|
||||
202,0,3,"Sage, Mr. Frederick",male,,8,2,CA. 2343,69.55,,S
|
||||
203,0,3,"Johanson, Mr. Jakob Alfred",male,34,0,0,3101264,6.4958,,S
|
||||
204,0,3,"Youseff, Mr. Gerious",male,45.5,0,0,2628,7.225,,C
|
||||
205,1,3,"Cohen, Mr. Gurshon ""Gus""",male,18,0,0,A/5 3540,8.05,,S
|
||||
206,0,3,"Strom, Miss. Telma Matilda",female,2,0,1,347054,10.4625,G6,S
|
||||
207,0,3,"Backstrom, Mr. Karl Alfred",male,32,1,0,3101278,15.85,,S
|
||||
208,1,3,"Albimona, Mr. Nassef Cassem",male,26,0,0,2699,18.7875,,C
|
||||
209,1,3,"Carr, Miss. Helen ""Ellen""",female,16,0,0,367231,7.75,,Q
|
||||
210,1,1,"Blank, Mr. Henry",male,40,0,0,112277,31,A31,C
|
||||
211,0,3,"Ali, Mr. Ahmed",male,24,0,0,SOTON/O.Q. 3101311,7.05,,S
|
||||
212,1,2,"Cameron, Miss. Clear Annie",female,35,0,0,F.C.C. 13528,21,,S
|
||||
213,0,3,"Perkin, Mr. John Henry",male,22,0,0,A/5 21174,7.25,,S
|
||||
214,0,2,"Givard, Mr. Hans Kristensen",male,30,0,0,250646,13,,S
|
||||
215,0,3,"Kiernan, Mr. Philip",male,,1,0,367229,7.75,,Q
|
||||
216,1,1,"Newell, Miss. Madeleine",female,31,1,0,35273,113.275,D36,C
|
||||
217,1,3,"Honkanen, Miss. Eliina",female,27,0,0,STON/O2. 3101283,7.925,,S
|
||||
218,0,2,"Jacobsohn, Mr. Sidney Samuel",male,42,1,0,243847,27,,S
|
||||
219,1,1,"Bazzani, Miss. Albina",female,32,0,0,11813,76.2917,D15,C
|
||||
220,0,2,"Harris, Mr. Walter",male,30,0,0,W/C 14208,10.5,,S
|
||||
221,1,3,"Sunderland, Mr. Victor Francis",male,16,0,0,SOTON/OQ 392089,8.05,,S
|
||||
222,0,2,"Bracken, Mr. James H",male,27,0,0,220367,13,,S
|
||||
223,0,3,"Green, Mr. George Henry",male,51,0,0,21440,8.05,,S
|
||||
224,0,3,"Nenkoff, Mr. Christo",male,,0,0,349234,7.8958,,S
|
||||
225,1,1,"Hoyt, Mr. Frederick Maxfield",male,38,1,0,19943,90,C93,S
|
||||
226,0,3,"Berglund, Mr. Karl Ivar Sven",male,22,0,0,PP 4348,9.35,,S
|
||||
227,1,2,"Mellors, Mr. William John",male,19,0,0,SW/PP 751,10.5,,S
|
||||
228,0,3,"Lovell, Mr. John Hall (""Henry"")",male,20.5,0,0,A/5 21173,7.25,,S
|
||||
229,0,2,"Fahlstrom, Mr. Arne Jonas",male,18,0,0,236171,13,,S
|
||||
230,0,3,"Lefebre, Miss. Mathilde",female,,3,1,4133,25.4667,,S
|
||||
231,1,1,"Harris, Mrs. Henry Birkhardt (Irene Wallach)",female,35,1,0,36973,83.475,C83,S
|
||||
232,0,3,"Larsson, Mr. Bengt Edvin",male,29,0,0,347067,7.775,,S
|
||||
233,0,2,"Sjostedt, Mr. Ernst Adolf",male,59,0,0,237442,13.5,,S
|
||||
234,1,3,"Asplund, Miss. Lillian Gertrud",female,5,4,2,347077,31.3875,,S
|
||||
235,0,2,"Leyson, Mr. Robert William Norman",male,24,0,0,C.A. 29566,10.5,,S
|
||||
236,0,3,"Harknett, Miss. Alice Phoebe",female,,0,0,W./C. 6609,7.55,,S
|
||||
237,0,2,"Hold, Mr. Stephen",male,44,1,0,26707,26,,S
|
||||
238,1,2,"Collyer, Miss. Marjorie ""Lottie""",female,8,0,2,C.A. 31921,26.25,,S
|
||||
239,0,2,"Pengelly, Mr. Frederick William",male,19,0,0,28665,10.5,,S
|
||||
240,0,2,"Hunt, Mr. George Henry",male,33,0,0,SCO/W 1585,12.275,,S
|
||||
241,0,3,"Zabour, Miss. Thamine",female,,1,0,2665,14.4542,,C
|
||||
242,1,3,"Murphy, Miss. Katherine ""Kate""",female,,1,0,367230,15.5,,Q
|
||||
243,0,2,"Coleridge, Mr. Reginald Charles",male,29,0,0,W./C. 14263,10.5,,S
|
||||
244,0,3,"Maenpaa, Mr. Matti Alexanteri",male,22,0,0,STON/O 2. 3101275,7.125,,S
|
||||
245,0,3,"Attalah, Mr. Sleiman",male,30,0,0,2694,7.225,,C
|
||||
246,0,1,"Minahan, Dr. William Edward",male,44,2,0,19928,90,C78,Q
|
||||
247,0,3,"Lindahl, Miss. Agda Thorilda Viktoria",female,25,0,0,347071,7.775,,S
|
||||
248,1,2,"Hamalainen, Mrs. William (Anna)",female,24,0,2,250649,14.5,,S
|
||||
249,1,1,"Beckwith, Mr. Richard Leonard",male,37,1,1,11751,52.5542,D35,S
|
||||
250,0,2,"Carter, Rev. Ernest Courtenay",male,54,1,0,244252,26,,S
|
||||
251,0,3,"Reed, Mr. James George",male,,0,0,362316,7.25,,S
|
||||
252,0,3,"Strom, Mrs. Wilhelm (Elna Matilda Persson)",female,29,1,1,347054,10.4625,G6,S
|
||||
253,0,1,"Stead, Mr. William Thomas",male,62,0,0,113514,26.55,C87,S
|
||||
254,0,3,"Lobb, Mr. William Arthur",male,30,1,0,A/5. 3336,16.1,,S
|
||||
255,0,3,"Rosblom, Mrs. Viktor (Helena Wilhelmina)",female,41,0,2,370129,20.2125,,S
|
||||
256,1,3,"Touma, Mrs. Darwis (Hanne Youssef Razi)",female,29,0,2,2650,15.2458,,C
|
||||
257,1,1,"Thorne, Mrs. Gertrude Maybelle",female,,0,0,PC 17585,79.2,,C
|
||||
258,1,1,"Cherry, Miss. Gladys",female,30,0,0,110152,86.5,B77,S
|
||||
259,1,1,"Ward, Miss. Anna",female,35,0,0,PC 17755,512.3292,,C
|
||||
260,1,2,"Parrish, Mrs. (Lutie Davis)",female,50,0,1,230433,26,,S
|
||||
261,0,3,"Smith, Mr. Thomas",male,,0,0,384461,7.75,,Q
|
||||
262,1,3,"Asplund, Master. Edvin Rojj Felix",male,3,4,2,347077,31.3875,,S
|
||||
263,0,1,"Taussig, Mr. Emil",male,52,1,1,110413,79.65,E67,S
|
||||
264,0,1,"Harrison, Mr. William",male,40,0,0,112059,0,B94,S
|
||||
265,0,3,"Henry, Miss. Delia",female,,0,0,382649,7.75,,Q
|
||||
266,0,2,"Reeves, Mr. David",male,36,0,0,C.A. 17248,10.5,,S
|
||||
267,0,3,"Panula, Mr. Ernesti Arvid",male,16,4,1,3101295,39.6875,,S
|
||||
268,1,3,"Persson, Mr. Ernst Ulrik",male,25,1,0,347083,7.775,,S
|
||||
269,1,1,"Graham, Mrs. William Thompson (Edith Junkins)",female,58,0,1,PC 17582,153.4625,C125,S
|
||||
270,1,1,"Bissette, Miss. Amelia",female,35,0,0,PC 17760,135.6333,C99,S
|
||||
271,0,1,"Cairns, Mr. Alexander",male,,0,0,113798,31,,S
|
||||
272,1,3,"Tornquist, Mr. William Henry",male,25,0,0,LINE,0,,S
|
||||
273,1,2,"Mellinger, Mrs. (Elizabeth Anne Maidment)",female,41,0,1,250644,19.5,,S
|
||||
274,0,1,"Natsch, Mr. Charles H",male,37,0,1,PC 17596,29.7,C118,C
|
||||
275,1,3,"Healy, Miss. Hanora ""Nora""",female,,0,0,370375,7.75,,Q
|
||||
276,1,1,"Andrews, Miss. Kornelia Theodosia",female,63,1,0,13502,77.9583,D7,S
|
||||
277,0,3,"Lindblom, Miss. Augusta Charlotta",female,45,0,0,347073,7.75,,S
|
||||
278,0,2,"Parkes, Mr. Francis ""Frank""",male,,0,0,239853,0,,S
|
||||
279,0,3,"Rice, Master. Eric",male,7,4,1,382652,29.125,,Q
|
||||
280,1,3,"Abbott, Mrs. Stanton (Rosa Hunt)",female,35,1,1,C.A. 2673,20.25,,S
|
||||
281,0,3,"Duane, Mr. Frank",male,65,0,0,336439,7.75,,Q
|
||||
282,0,3,"Olsson, Mr. Nils Johan Goransson",male,28,0,0,347464,7.8542,,S
|
||||
283,0,3,"de Pelsmaeker, Mr. Alfons",male,16,0,0,345778,9.5,,S
|
||||
284,1,3,"Dorking, Mr. Edward Arthur",male,19,0,0,A/5. 10482,8.05,,S
|
||||
285,0,1,"Smith, Mr. Richard William",male,,0,0,113056,26,A19,S
|
||||
286,0,3,"Stankovic, Mr. Ivan",male,33,0,0,349239,8.6625,,C
|
||||
287,1,3,"de Mulder, Mr. Theodore",male,30,0,0,345774,9.5,,S
|
||||
288,0,3,"Naidenoff, Mr. Penko",male,22,0,0,349206,7.8958,,S
|
||||
289,1,2,"Hosono, Mr. Masabumi",male,42,0,0,237798,13,,S
|
||||
290,1,3,"Connolly, Miss. Kate",female,22,0,0,370373,7.75,,Q
|
||||
291,1,1,"Barber, Miss. Ellen ""Nellie""",female,26,0,0,19877,78.85,,S
|
||||
292,1,1,"Bishop, Mrs. Dickinson H (Helen Walton)",female,19,1,0,11967,91.0792,B49,C
|
||||
293,0,2,"Levy, Mr. Rene Jacques",male,36,0,0,SC/Paris 2163,12.875,D,C
|
||||
294,0,3,"Haas, Miss. Aloisia",female,24,0,0,349236,8.85,,S
|
||||
295,0,3,"Mineff, Mr. Ivan",male,24,0,0,349233,7.8958,,S
|
||||
296,0,1,"Lewy, Mr. Ervin G",male,,0,0,PC 17612,27.7208,,C
|
||||
297,0,3,"Hanna, Mr. Mansour",male,23.5,0,0,2693,7.2292,,C
|
||||
298,0,1,"Allison, Miss. Helen Loraine",female,2,1,2,113781,151.55,C22 C26,S
|
||||
299,1,1,"Saalfeld, Mr. Adolphe",male,,0,0,19988,30.5,C106,S
|
||||
300,1,1,"Baxter, Mrs. James (Helene DeLaudeniere Chaput)",female,50,0,1,PC 17558,247.5208,B58 B60,C
|
||||
301,1,3,"Kelly, Miss. Anna Katherine ""Annie Kate""",female,,0,0,9234,7.75,,Q
|
||||
302,1,3,"McCoy, Mr. Bernard",male,,2,0,367226,23.25,,Q
|
||||
303,0,3,"Johnson, Mr. William Cahoone Jr",male,19,0,0,LINE,0,,S
|
||||
304,1,2,"Keane, Miss. Nora A",female,,0,0,226593,12.35,E101,Q
|
||||
305,0,3,"Williams, Mr. Howard Hugh ""Harry""",male,,0,0,A/5 2466,8.05,,S
|
||||
306,1,1,"Allison, Master. Hudson Trevor",male,0.92,1,2,113781,151.55,C22 C26,S
|
||||
307,1,1,"Fleming, Miss. Margaret",female,,0,0,17421,110.8833,,C
|
||||
308,1,1,"Penasco y Castellana, Mrs. Victor de Satode (Maria Josefa Perez de Soto y Vallejo)",female,17,1,0,PC 17758,108.9,C65,C
|
||||
309,0,2,"Abelson, Mr. Samuel",male,30,1,0,P/PP 3381,24,,C
|
||||
310,1,1,"Francatelli, Miss. Laura Mabel",female,30,0,0,PC 17485,56.9292,E36,C
|
||||
311,1,1,"Hays, Miss. Margaret Bechstein",female,24,0,0,11767,83.1583,C54,C
|
||||
312,1,1,"Ryerson, Miss. Emily Borie",female,18,2,2,PC 17608,262.375,B57 B59 B63 B66,C
|
||||
313,0,2,"Lahtinen, Mrs. William (Anna Sylfven)",female,26,1,1,250651,26,,S
|
||||
314,0,3,"Hendekovic, Mr. Ignjac",male,28,0,0,349243,7.8958,,S
|
||||
315,0,2,"Hart, Mr. Benjamin",male,43,1,1,F.C.C. 13529,26.25,,S
|
||||
316,1,3,"Nilsson, Miss. Helmina Josefina",female,26,0,0,347470,7.8542,,S
|
||||
317,1,2,"Kantor, Mrs. Sinai (Miriam Sternin)",female,24,1,0,244367,26,,S
|
||||
318,0,2,"Moraweck, Dr. Ernest",male,54,0,0,29011,14,,S
|
||||
319,1,1,"Wick, Miss. Mary Natalie",female,31,0,2,36928,164.8667,C7,S
|
||||
320,1,1,"Spedden, Mrs. Frederic Oakley (Margaretta Corning Stone)",female,40,1,1,16966,134.5,E34,C
|
||||
321,0,3,"Dennis, Mr. Samuel",male,22,0,0,A/5 21172,7.25,,S
|
||||
322,0,3,"Danoff, Mr. Yoto",male,27,0,0,349219,7.8958,,S
|
||||
323,1,2,"Slayter, Miss. Hilda Mary",female,30,0,0,234818,12.35,,Q
|
||||
324,1,2,"Caldwell, Mrs. Albert Francis (Sylvia Mae Harbaugh)",female,22,1,1,248738,29,,S
|
||||
325,0,3,"Sage, Mr. George John Jr",male,,8,2,CA. 2343,69.55,,S
|
||||
326,1,1,"Young, Miss. Marie Grice",female,36,0,0,PC 17760,135.6333,C32,C
|
||||
327,0,3,"Nysveen, Mr. Johan Hansen",male,61,0,0,345364,6.2375,,S
|
||||
328,1,2,"Ball, Mrs. (Ada E Hall)",female,36,0,0,28551,13,D,S
|
||||
329,1,3,"Goldsmith, Mrs. Frank John (Emily Alice Brown)",female,31,1,1,363291,20.525,,S
|
||||
330,1,1,"Hippach, Miss. Jean Gertrude",female,16,0,1,111361,57.9792,B18,C
|
||||
331,1,3,"McCoy, Miss. Agnes",female,,2,0,367226,23.25,,Q
|
||||
332,0,1,"Partner, Mr. Austen",male,45.5,0,0,113043,28.5,C124,S
|
||||
333,0,1,"Graham, Mr. George Edward",male,38,0,1,PC 17582,153.4625,C91,S
|
||||
334,0,3,"Vander Planke, Mr. Leo Edmondus",male,16,2,0,345764,18,,S
|
||||
335,1,1,"Frauenthal, Mrs. Henry William (Clara Heinsheimer)",female,,1,0,PC 17611,133.65,,S
|
||||
336,0,3,"Denkoff, Mr. Mitto",male,,0,0,349225,7.8958,,S
|
||||
337,0,1,"Pears, Mr. Thomas Clinton",male,29,1,0,113776,66.6,C2,S
|
||||
338,1,1,"Burns, Miss. Elizabeth Margaret",female,41,0,0,16966,134.5,E40,C
|
||||
339,1,3,"Dahl, Mr. Karl Edwart",male,45,0,0,7598,8.05,,S
|
||||
340,0,1,"Blackwell, Mr. Stephen Weart",male,45,0,0,113784,35.5,T,S
|
||||
341,1,2,"Navratil, Master. Edmond Roger",male,2,1,1,230080,26,F2,S
|
||||
342,1,1,"Fortune, Miss. Alice Elizabeth",female,24,3,2,19950,263,C23 C25 C27,S
|
||||
343,0,2,"Collander, Mr. Erik Gustaf",male,28,0,0,248740,13,,S
|
||||
344,0,2,"Sedgwick, Mr. Charles Frederick Waddington",male,25,0,0,244361,13,,S
|
||||
345,0,2,"Fox, Mr. Stanley Hubert",male,36,0,0,229236,13,,S
|
||||
346,1,2,"Brown, Miss. Amelia ""Mildred""",female,24,0,0,248733,13,F33,S
|
||||
347,1,2,"Smith, Miss. Marion Elsie",female,40,0,0,31418,13,,S
|
||||
348,1,3,"Davison, Mrs. Thomas Henry (Mary E Finck)",female,,1,0,386525,16.1,,S
|
||||
349,1,3,"Coutts, Master. William Loch ""William""",male,3,1,1,C.A. 37671,15.9,,S
|
||||
350,0,3,"Dimic, Mr. Jovan",male,42,0,0,315088,8.6625,,S
|
||||
351,0,3,"Odahl, Mr. Nils Martin",male,23,0,0,7267,9.225,,S
|
||||
352,0,1,"Williams-Lambert, Mr. Fletcher Fellows",male,,0,0,113510,35,C128,S
|
||||
353,0,3,"Elias, Mr. Tannous",male,15,1,1,2695,7.2292,,C
|
||||
354,0,3,"Arnold-Franchi, Mr. Josef",male,25,1,0,349237,17.8,,S
|
||||
355,0,3,"Yousif, Mr. Wazli",male,,0,0,2647,7.225,,C
|
||||
356,0,3,"Vanden Steen, Mr. Leo Peter",male,28,0,0,345783,9.5,,S
|
||||
357,1,1,"Bowerman, Miss. Elsie Edith",female,22,0,1,113505,55,E33,S
|
||||
358,0,2,"Funk, Miss. Annie Clemmer",female,38,0,0,237671,13,,S
|
||||
359,1,3,"McGovern, Miss. Mary",female,,0,0,330931,7.8792,,Q
|
||||
360,1,3,"Mockler, Miss. Helen Mary ""Ellie""",female,,0,0,330980,7.8792,,Q
|
||||
361,0,3,"Skoog, Mr. Wilhelm",male,40,1,4,347088,27.9,,S
|
||||
362,0,2,"del Carlo, Mr. Sebastiano",male,29,1,0,SC/PARIS 2167,27.7208,,C
|
||||
363,0,3,"Barbara, Mrs. (Catherine David)",female,45,0,1,2691,14.4542,,C
|
||||
364,0,3,"Asim, Mr. Adola",male,35,0,0,SOTON/O.Q. 3101310,7.05,,S
|
||||
365,0,3,"O'Brien, Mr. Thomas",male,,1,0,370365,15.5,,Q
|
||||
366,0,3,"Adahl, Mr. Mauritz Nils Martin",male,30,0,0,C 7076,7.25,,S
|
||||
367,1,1,"Warren, Mrs. Frank Manley (Anna Sophia Atkinson)",female,60,1,0,110813,75.25,D37,C
|
||||
368,1,3,"Moussa, Mrs. (Mantoura Boulos)",female,,0,0,2626,7.2292,,C
|
||||
369,1,3,"Jermyn, Miss. Annie",female,,0,0,14313,7.75,,Q
|
||||
370,1,1,"Aubart, Mme. Leontine Pauline",female,24,0,0,PC 17477,69.3,B35,C
|
||||
371,1,1,"Harder, Mr. George Achilles",male,25,1,0,11765,55.4417,E50,C
|
||||
372,0,3,"Wiklund, Mr. Jakob Alfred",male,18,1,0,3101267,6.4958,,S
|
||||
373,0,3,"Beavan, Mr. William Thomas",male,19,0,0,323951,8.05,,S
|
||||
374,0,1,"Ringhini, Mr. Sante",male,22,0,0,PC 17760,135.6333,,C
|
||||
375,0,3,"Palsson, Miss. Stina Viola",female,3,3,1,349909,21.075,,S
|
||||
376,1,1,"Meyer, Mrs. Edgar Joseph (Leila Saks)",female,,1,0,PC 17604,82.1708,,C
|
||||
377,1,3,"Landergren, Miss. Aurora Adelia",female,22,0,0,C 7077,7.25,,S
|
||||
378,0,1,"Widener, Mr. Harry Elkins",male,27,0,2,113503,211.5,C82,C
|
||||
379,0,3,"Betros, Mr. Tannous",male,20,0,0,2648,4.0125,,C
|
||||
380,0,3,"Gustafsson, Mr. Karl Gideon",male,19,0,0,347069,7.775,,S
|
||||
381,1,1,"Bidois, Miss. Rosalie",female,42,0,0,PC 17757,227.525,,C
|
||||
382,1,3,"Nakid, Miss. Maria (""Mary"")",female,1,0,2,2653,15.7417,,C
|
||||
383,0,3,"Tikkanen, Mr. Juho",male,32,0,0,STON/O 2. 3101293,7.925,,S
|
||||
384,1,1,"Holverson, Mrs. Alexander Oskar (Mary Aline Towner)",female,35,1,0,113789,52,,S
|
||||
385,0,3,"Plotcharsky, Mr. Vasil",male,,0,0,349227,7.8958,,S
|
||||
386,0,2,"Davies, Mr. Charles Henry",male,18,0,0,S.O.C. 14879,73.5,,S
|
||||
387,0,3,"Goodwin, Master. Sidney Leonard",male,1,5,2,CA 2144,46.9,,S
|
||||
388,1,2,"Buss, Miss. Kate",female,36,0,0,27849,13,,S
|
||||
389,0,3,"Sadlier, Mr. Matthew",male,,0,0,367655,7.7292,,Q
|
||||
390,1,2,"Lehmann, Miss. Bertha",female,17,0,0,SC 1748,12,,C
|
||||
391,1,1,"Carter, Mr. William Ernest",male,36,1,2,113760,120,B96 B98,S
|
||||
392,1,3,"Jansson, Mr. Carl Olof",male,21,0,0,350034,7.7958,,S
|
||||
393,0,3,"Gustafsson, Mr. Johan Birger",male,28,2,0,3101277,7.925,,S
|
||||
394,1,1,"Newell, Miss. Marjorie",female,23,1,0,35273,113.275,D36,C
|
||||
395,1,3,"Sandstrom, Mrs. Hjalmar (Agnes Charlotta Bengtsson)",female,24,0,2,PP 9549,16.7,G6,S
|
||||
396,0,3,"Johansson, Mr. Erik",male,22,0,0,350052,7.7958,,S
|
||||
397,0,3,"Olsson, Miss. Elina",female,31,0,0,350407,7.8542,,S
|
||||
398,0,2,"McKane, Mr. Peter David",male,46,0,0,28403,26,,S
|
||||
399,0,2,"Pain, Dr. Alfred",male,23,0,0,244278,10.5,,S
|
||||
400,1,2,"Trout, Mrs. William H (Jessie L)",female,28,0,0,240929,12.65,,S
|
||||
401,1,3,"Niskanen, Mr. Juha",male,39,0,0,STON/O 2. 3101289,7.925,,S
|
||||
402,0,3,"Adams, Mr. John",male,26,0,0,341826,8.05,,S
|
||||
403,0,3,"Jussila, Miss. Mari Aina",female,21,1,0,4137,9.825,,S
|
||||
404,0,3,"Hakkarainen, Mr. Pekka Pietari",male,28,1,0,STON/O2. 3101279,15.85,,S
|
||||
405,0,3,"Oreskovic, Miss. Marija",female,20,0,0,315096,8.6625,,S
|
||||
406,0,2,"Gale, Mr. Shadrach",male,34,1,0,28664,21,,S
|
||||
407,0,3,"Widegren, Mr. Carl/Charles Peter",male,51,0,0,347064,7.75,,S
|
||||
408,1,2,"Richards, Master. William Rowe",male,3,1,1,29106,18.75,,S
|
||||
409,0,3,"Birkeland, Mr. Hans Martin Monsen",male,21,0,0,312992,7.775,,S
|
||||
410,0,3,"Lefebre, Miss. Ida",female,,3,1,4133,25.4667,,S
|
||||
411,0,3,"Sdycoff, Mr. Todor",male,,0,0,349222,7.8958,,S
|
||||
412,0,3,"Hart, Mr. Henry",male,,0,0,394140,6.8583,,Q
|
||||
413,1,1,"Minahan, Miss. Daisy E",female,33,1,0,19928,90,C78,Q
|
||||
414,0,2,"Cunningham, Mr. Alfred Fleming",male,,0,0,239853,0,,S
|
||||
415,1,3,"Sundman, Mr. Johan Julian",male,44,0,0,STON/O 2. 3101269,7.925,,S
|
||||
416,0,3,"Meek, Mrs. Thomas (Annie Louise Rowley)",female,,0,0,343095,8.05,,S
|
||||
417,1,2,"Drew, Mrs. James Vivian (Lulu Thorne Christian)",female,34,1,1,28220,32.5,,S
|
||||
418,1,2,"Silven, Miss. Lyyli Karoliina",female,18,0,2,250652,13,,S
|
||||
419,0,2,"Matthews, Mr. William John",male,30,0,0,28228,13,,S
|
||||
420,0,3,"Van Impe, Miss. Catharina",female,10,0,2,345773,24.15,,S
|
||||
421,0,3,"Gheorgheff, Mr. Stanio",male,,0,0,349254,7.8958,,C
|
||||
422,0,3,"Charters, Mr. David",male,21,0,0,A/5. 13032,7.7333,,Q
|
||||
423,0,3,"Zimmerman, Mr. Leo",male,29,0,0,315082,7.875,,S
|
||||
424,0,3,"Danbom, Mrs. Ernst Gilbert (Anna Sigrid Maria Brogren)",female,28,1,1,347080,14.4,,S
|
||||
425,0,3,"Rosblom, Mr. Viktor Richard",male,18,1,1,370129,20.2125,,S
|
||||
426,0,3,"Wiseman, Mr. Phillippe",male,,0,0,A/4. 34244,7.25,,S
|
||||
427,1,2,"Clarke, Mrs. Charles V (Ada Maria Winfield)",female,28,1,0,2003,26,,S
|
||||
428,1,2,"Phillips, Miss. Kate Florence (""Mrs Kate Louise Phillips Marshall"")",female,19,0,0,250655,26,,S
|
||||
429,0,3,"Flynn, Mr. James",male,,0,0,364851,7.75,,Q
|
||||
430,1,3,"Pickard, Mr. Berk (Berk Trembisky)",male,32,0,0,SOTON/O.Q. 392078,8.05,E10,S
|
||||
431,1,1,"Bjornstrom-Steffansson, Mr. Mauritz Hakan",male,28,0,0,110564,26.55,C52,S
|
||||
432,1,3,"Thorneycroft, Mrs. Percival (Florence Kate White)",female,,1,0,376564,16.1,,S
|
||||
433,1,2,"Louch, Mrs. Charles Alexander (Alice Adelaide Slow)",female,42,1,0,SC/AH 3085,26,,S
|
||||
434,0,3,"Kallio, Mr. Nikolai Erland",male,17,0,0,STON/O 2. 3101274,7.125,,S
|
||||
435,0,1,"Silvey, Mr. William Baird",male,50,1,0,13507,55.9,E44,S
|
||||
436,1,1,"Carter, Miss. Lucile Polk",female,14,1,2,113760,120,B96 B98,S
|
||||
437,0,3,"Ford, Miss. Doolina Margaret ""Daisy""",female,21,2,2,W./C. 6608,34.375,,S
|
||||
438,1,2,"Richards, Mrs. Sidney (Emily Hocking)",female,24,2,3,29106,18.75,,S
|
||||
439,0,1,"Fortune, Mr. Mark",male,64,1,4,19950,263,C23 C25 C27,S
|
||||
440,0,2,"Kvillner, Mr. Johan Henrik Johannesson",male,31,0,0,C.A. 18723,10.5,,S
|
||||
441,1,2,"Hart, Mrs. Benjamin (Esther Ada Bloomfield)",female,45,1,1,F.C.C. 13529,26.25,,S
|
||||
442,0,3,"Hampe, Mr. Leon",male,20,0,0,345769,9.5,,S
|
||||
443,0,3,"Petterson, Mr. Johan Emil",male,25,1,0,347076,7.775,,S
|
||||
444,1,2,"Reynaldo, Ms. Encarnacion",female,28,0,0,230434,13,,S
|
||||
445,1,3,"Johannesen-Bratthammer, Mr. Bernt",male,,0,0,65306,8.1125,,S
|
||||
446,1,1,"Dodge, Master. Washington",male,4,0,2,33638,81.8583,A34,S
|
||||
447,1,2,"Mellinger, Miss. Madeleine Violet",female,13,0,1,250644,19.5,,S
|
||||
448,1,1,"Seward, Mr. Frederic Kimber",male,34,0,0,113794,26.55,,S
|
||||
449,1,3,"Baclini, Miss. Marie Catherine",female,5,2,1,2666,19.2583,,C
|
||||
450,1,1,"Peuchen, Major. Arthur Godfrey",male,52,0,0,113786,30.5,C104,S
|
||||
451,0,2,"West, Mr. Edwy Arthur",male,36,1,2,C.A. 34651,27.75,,S
|
||||
452,0,3,"Hagland, Mr. Ingvald Olai Olsen",male,,1,0,65303,19.9667,,S
|
||||
453,0,1,"Foreman, Mr. Benjamin Laventall",male,30,0,0,113051,27.75,C111,C
|
||||
454,1,1,"Goldenberg, Mr. Samuel L",male,49,1,0,17453,89.1042,C92,C
|
||||
455,0,3,"Peduzzi, Mr. Joseph",male,,0,0,A/5 2817,8.05,,S
|
||||
456,1,3,"Jalsevac, Mr. Ivan",male,29,0,0,349240,7.8958,,C
|
||||
457,0,1,"Millet, Mr. Francis Davis",male,65,0,0,13509,26.55,E38,S
|
||||
458,1,1,"Kenyon, Mrs. Frederick R (Marion)",female,,1,0,17464,51.8625,D21,S
|
||||
459,1,2,"Toomey, Miss. Ellen",female,50,0,0,F.C.C. 13531,10.5,,S
|
||||
460,0,3,"O'Connor, Mr. Maurice",male,,0,0,371060,7.75,,Q
|
||||
461,1,1,"Anderson, Mr. Harry",male,48,0,0,19952,26.55,E12,S
|
||||
462,0,3,"Morley, Mr. William",male,34,0,0,364506,8.05,,S
|
||||
463,0,1,"Gee, Mr. Arthur H",male,47,0,0,111320,38.5,E63,S
|
||||
464,0,2,"Milling, Mr. Jacob Christian",male,48,0,0,234360,13,,S
|
||||
465,0,3,"Maisner, Mr. Simon",male,,0,0,A/S 2816,8.05,,S
|
||||
466,0,3,"Goncalves, Mr. Manuel Estanslas",male,38,0,0,SOTON/O.Q. 3101306,7.05,,S
|
||||
467,0,2,"Campbell, Mr. William",male,,0,0,239853,0,,S
|
||||
468,0,1,"Smart, Mr. John Montgomery",male,56,0,0,113792,26.55,,S
|
||||
469,0,3,"Scanlan, Mr. James",male,,0,0,36209,7.725,,Q
|
||||
470,1,3,"Baclini, Miss. Helene Barbara",female,0.75,2,1,2666,19.2583,,C
|
||||
471,0,3,"Keefe, Mr. Arthur",male,,0,0,323592,7.25,,S
|
||||
472,0,3,"Cacic, Mr. Luka",male,38,0,0,315089,8.6625,,S
|
||||
473,1,2,"West, Mrs. Edwy Arthur (Ada Mary Worth)",female,33,1,2,C.A. 34651,27.75,,S
|
||||
474,1,2,"Jerwan, Mrs. Amin S (Marie Marthe Thuillard)",female,23,0,0,SC/AH Basle 541,13.7917,D,C
|
||||
475,0,3,"Strandberg, Miss. Ida Sofia",female,22,0,0,7553,9.8375,,S
|
||||
476,0,1,"Clifford, Mr. George Quincy",male,,0,0,110465,52,A14,S
|
||||
477,0,2,"Renouf, Mr. Peter Henry",male,34,1,0,31027,21,,S
|
||||
478,0,3,"Braund, Mr. Lewis Richard",male,29,1,0,3460,7.0458,,S
|
||||
479,0,3,"Karlsson, Mr. Nils August",male,22,0,0,350060,7.5208,,S
|
||||
480,1,3,"Hirvonen, Miss. Hildur E",female,2,0,1,3101298,12.2875,,S
|
||||
481,0,3,"Goodwin, Master. Harold Victor",male,9,5,2,CA 2144,46.9,,S
|
||||
482,0,2,"Frost, Mr. Anthony Wood ""Archie""",male,,0,0,239854,0,,S
|
||||
483,0,3,"Rouse, Mr. Richard Henry",male,50,0,0,A/5 3594,8.05,,S
|
||||
484,1,3,"Turkula, Mrs. (Hedwig)",female,63,0,0,4134,9.5875,,S
|
||||
485,1,1,"Bishop, Mr. Dickinson H",male,25,1,0,11967,91.0792,B49,C
|
||||
486,0,3,"Lefebre, Miss. Jeannie",female,,3,1,4133,25.4667,,S
|
||||
487,1,1,"Hoyt, Mrs. Frederick Maxfield (Jane Anne Forby)",female,35,1,0,19943,90,C93,S
|
||||
488,0,1,"Kent, Mr. Edward Austin",male,58,0,0,11771,29.7,B37,C
|
||||
489,0,3,"Somerton, Mr. Francis William",male,30,0,0,A.5. 18509,8.05,,S
|
||||
490,1,3,"Coutts, Master. Eden Leslie ""Neville""",male,9,1,1,C.A. 37671,15.9,,S
|
||||
491,0,3,"Hagland, Mr. Konrad Mathias Reiersen",male,,1,0,65304,19.9667,,S
|
||||
492,0,3,"Windelov, Mr. Einar",male,21,0,0,SOTON/OQ 3101317,7.25,,S
|
||||
493,0,1,"Molson, Mr. Harry Markland",male,55,0,0,113787,30.5,C30,S
|
||||
494,0,1,"Artagaveytia, Mr. Ramon",male,71,0,0,PC 17609,49.5042,,C
|
||||
495,0,3,"Stanley, Mr. Edward Roland",male,21,0,0,A/4 45380,8.05,,S
|
||||
496,0,3,"Yousseff, Mr. Gerious",male,,0,0,2627,14.4583,,C
|
||||
497,1,1,"Eustis, Miss. Elizabeth Mussey",female,54,1,0,36947,78.2667,D20,C
|
||||
498,0,3,"Shellard, Mr. Frederick William",male,,0,0,C.A. 6212,15.1,,S
|
||||
499,0,1,"Allison, Mrs. Hudson J C (Bessie Waldo Daniels)",female,25,1,2,113781,151.55,C22 C26,S
|
||||
500,0,3,"Svensson, Mr. Olof",male,24,0,0,350035,7.7958,,S
|
||||
501,0,3,"Calic, Mr. Petar",male,17,0,0,315086,8.6625,,S
|
||||
502,0,3,"Canavan, Miss. Mary",female,21,0,0,364846,7.75,,Q
|
||||
503,0,3,"O'Sullivan, Miss. Bridget Mary",female,,0,0,330909,7.6292,,Q
|
||||
504,0,3,"Laitinen, Miss. Kristina Sofia",female,37,0,0,4135,9.5875,,S
|
||||
505,1,1,"Maioni, Miss. Roberta",female,16,0,0,110152,86.5,B79,S
|
||||
506,0,1,"Penasco y Castellana, Mr. Victor de Satode",male,18,1,0,PC 17758,108.9,C65,C
|
||||
507,1,2,"Quick, Mrs. Frederick Charles (Jane Richards)",female,33,0,2,26360,26,,S
|
||||
508,1,1,"Bradley, Mr. George (""George Arthur Brayton"")",male,,0,0,111427,26.55,,S
|
||||
509,0,3,"Olsen, Mr. Henry Margido",male,28,0,0,C 4001,22.525,,S
|
||||
510,1,3,"Lang, Mr. Fang",male,26,0,0,1601,56.4958,,S
|
||||
511,1,3,"Daly, Mr. Eugene Patrick",male,29,0,0,382651,7.75,,Q
|
||||
512,0,3,"Webber, Mr. James",male,,0,0,SOTON/OQ 3101316,8.05,,S
|
||||
513,1,1,"McGough, Mr. James Robert",male,36,0,0,PC 17473,26.2875,E25,S
|
||||
514,1,1,"Rothschild, Mrs. Martin (Elizabeth L. Barrett)",female,54,1,0,PC 17603,59.4,,C
|
||||
515,0,3,"Coleff, Mr. Satio",male,24,0,0,349209,7.4958,,S
|
||||
516,0,1,"Walker, Mr. William Anderson",male,47,0,0,36967,34.0208,D46,S
|
||||
517,1,2,"Lemore, Mrs. (Amelia Milley)",female,34,0,0,C.A. 34260,10.5,F33,S
|
||||
518,0,3,"Ryan, Mr. Patrick",male,,0,0,371110,24.15,,Q
|
||||
519,1,2,"Angle, Mrs. William A (Florence ""Mary"" Agnes Hughes)",female,36,1,0,226875,26,,S
|
||||
520,0,3,"Pavlovic, Mr. Stefo",male,32,0,0,349242,7.8958,,S
|
||||
521,1,1,"Perreault, Miss. Anne",female,30,0,0,12749,93.5,B73,S
|
||||
522,0,3,"Vovk, Mr. Janko",male,22,0,0,349252,7.8958,,S
|
||||
523,0,3,"Lahoud, Mr. Sarkis",male,,0,0,2624,7.225,,C
|
||||
524,1,1,"Hippach, Mrs. Louis Albert (Ida Sophia Fischer)",female,44,0,1,111361,57.9792,B18,C
|
||||
525,0,3,"Kassem, Mr. Fared",male,,0,0,2700,7.2292,,C
|
||||
526,0,3,"Farrell, Mr. James",male,40.5,0,0,367232,7.75,,Q
|
||||
527,1,2,"Ridsdale, Miss. Lucy",female,50,0,0,W./C. 14258,10.5,,S
|
||||
528,0,1,"Farthing, Mr. John",male,,0,0,PC 17483,221.7792,C95,S
|
||||
529,0,3,"Salonen, Mr. Johan Werner",male,39,0,0,3101296,7.925,,S
|
||||
530,0,2,"Hocking, Mr. Richard George",male,23,2,1,29104,11.5,,S
|
||||
531,1,2,"Quick, Miss. Phyllis May",female,2,1,1,26360,26,,S
|
||||
532,0,3,"Toufik, Mr. Nakli",male,,0,0,2641,7.2292,,C
|
||||
533,0,3,"Elias, Mr. Joseph Jr",male,17,1,1,2690,7.2292,,C
|
||||
534,1,3,"Peter, Mrs. Catherine (Catherine Rizk)",female,,0,2,2668,22.3583,,C
|
||||
535,0,3,"Cacic, Miss. Marija",female,30,0,0,315084,8.6625,,S
|
||||
536,1,2,"Hart, Miss. Eva Miriam",female,7,0,2,F.C.C. 13529,26.25,,S
|
||||
537,0,1,"Butt, Major. Archibald Willingham",male,45,0,0,113050,26.55,B38,S
|
||||
538,1,1,"LeRoy, Miss. Bertha",female,30,0,0,PC 17761,106.425,,C
|
||||
539,0,3,"Risien, Mr. Samuel Beard",male,,0,0,364498,14.5,,S
|
||||
540,1,1,"Frolicher, Miss. Hedwig Margaritha",female,22,0,2,13568,49.5,B39,C
|
||||
541,1,1,"Crosby, Miss. Harriet R",female,36,0,2,WE/P 5735,71,B22,S
|
||||
542,0,3,"Andersson, Miss. Ingeborg Constanzia",female,9,4,2,347082,31.275,,S
|
||||
543,0,3,"Andersson, Miss. Sigrid Elisabeth",female,11,4,2,347082,31.275,,S
|
||||
544,1,2,"Beane, Mr. Edward",male,32,1,0,2908,26,,S
|
||||
545,0,1,"Douglas, Mr. Walter Donald",male,50,1,0,PC 17761,106.425,C86,C
|
||||
546,0,1,"Nicholson, Mr. Arthur Ernest",male,64,0,0,693,26,,S
|
||||
547,1,2,"Beane, Mrs. Edward (Ethel Clarke)",female,19,1,0,2908,26,,S
|
||||
548,1,2,"Padro y Manent, Mr. Julian",male,,0,0,SC/PARIS 2146,13.8625,,C
|
||||
549,0,3,"Goldsmith, Mr. Frank John",male,33,1,1,363291,20.525,,S
|
||||
550,1,2,"Davies, Master. John Morgan Jr",male,8,1,1,C.A. 33112,36.75,,S
|
||||
551,1,1,"Thayer, Mr. John Borland Jr",male,17,0,2,17421,110.8833,C70,C
|
||||
552,0,2,"Sharp, Mr. Percival James R",male,27,0,0,244358,26,,S
|
||||
553,0,3,"O'Brien, Mr. Timothy",male,,0,0,330979,7.8292,,Q
|
||||
554,1,3,"Leeni, Mr. Fahim (""Philip Zenni"")",male,22,0,0,2620,7.225,,C
|
||||
555,1,3,"Ohman, Miss. Velin",female,22,0,0,347085,7.775,,S
|
||||
556,0,1,"Wright, Mr. George",male,62,0,0,113807,26.55,,S
|
||||
557,1,1,"Duff Gordon, Lady. (Lucille Christiana Sutherland) (""Mrs Morgan"")",female,48,1,0,11755,39.6,A16,C
|
||||
558,0,1,"Robbins, Mr. Victor",male,,0,0,PC 17757,227.525,,C
|
||||
559,1,1,"Taussig, Mrs. Emil (Tillie Mandelbaum)",female,39,1,1,110413,79.65,E67,S
|
||||
560,1,3,"de Messemaeker, Mrs. Guillaume Joseph (Emma)",female,36,1,0,345572,17.4,,S
|
||||
561,0,3,"Morrow, Mr. Thomas Rowan",male,,0,0,372622,7.75,,Q
|
||||
562,0,3,"Sivic, Mr. Husein",male,40,0,0,349251,7.8958,,S
|
||||
563,0,2,"Norman, Mr. Robert Douglas",male,28,0,0,218629,13.5,,S
|
||||
564,0,3,"Simmons, Mr. John",male,,0,0,SOTON/OQ 392082,8.05,,S
|
||||
565,0,3,"Meanwell, Miss. (Marion Ogden)",female,,0,0,SOTON/O.Q. 392087,8.05,,S
|
||||
566,0,3,"Davies, Mr. Alfred J",male,24,2,0,A/4 48871,24.15,,S
|
||||
567,0,3,"Stoytcheff, Mr. Ilia",male,19,0,0,349205,7.8958,,S
|
||||
568,0,3,"Palsson, Mrs. Nils (Alma Cornelia Berglund)",female,29,0,4,349909,21.075,,S
|
||||
569,0,3,"Doharr, Mr. Tannous",male,,0,0,2686,7.2292,,C
|
||||
570,1,3,"Jonsson, Mr. Carl",male,32,0,0,350417,7.8542,,S
|
||||
571,1,2,"Harris, Mr. George",male,62,0,0,S.W./PP 752,10.5,,S
|
||||
572,1,1,"Appleton, Mrs. Edward Dale (Charlotte Lamson)",female,53,2,0,11769,51.4792,C101,S
|
||||
573,1,1,"Flynn, Mr. John Irwin (""Irving"")",male,36,0,0,PC 17474,26.3875,E25,S
|
||||
574,1,3,"Kelly, Miss. Mary",female,,0,0,14312,7.75,,Q
|
||||
575,0,3,"Rush, Mr. Alfred George John",male,16,0,0,A/4. 20589,8.05,,S
|
||||
576,0,3,"Patchett, Mr. George",male,19,0,0,358585,14.5,,S
|
||||
577,1,2,"Garside, Miss. Ethel",female,34,0,0,243880,13,,S
|
||||
578,1,1,"Silvey, Mrs. William Baird (Alice Munger)",female,39,1,0,13507,55.9,E44,S
|
||||
579,0,3,"Caram, Mrs. Joseph (Maria Elias)",female,,1,0,2689,14.4583,,C
|
||||
580,1,3,"Jussila, Mr. Eiriik",male,32,0,0,STON/O 2. 3101286,7.925,,S
|
||||
581,1,2,"Christy, Miss. Julie Rachel",female,25,1,1,237789,30,,S
|
||||
582,1,1,"Thayer, Mrs. John Borland (Marian Longstreth Morris)",female,39,1,1,17421,110.8833,C68,C
|
||||
583,0,2,"Downton, Mr. William James",male,54,0,0,28403,26,,S
|
||||
584,0,1,"Ross, Mr. John Hugo",male,36,0,0,13049,40.125,A10,C
|
||||
585,0,3,"Paulner, Mr. Uscher",male,,0,0,3411,8.7125,,C
|
||||
586,1,1,"Taussig, Miss. Ruth",female,18,0,2,110413,79.65,E68,S
|
||||
587,0,2,"Jarvis, Mr. John Denzil",male,47,0,0,237565,15,,S
|
||||
588,1,1,"Frolicher-Stehli, Mr. Maxmillian",male,60,1,1,13567,79.2,B41,C
|
||||
589,0,3,"Gilinski, Mr. Eliezer",male,22,0,0,14973,8.05,,S
|
||||
590,0,3,"Murdlin, Mr. Joseph",male,,0,0,A./5. 3235,8.05,,S
|
||||
591,0,3,"Rintamaki, Mr. Matti",male,35,0,0,STON/O 2. 3101273,7.125,,S
|
||||
592,1,1,"Stephenson, Mrs. Walter Bertram (Martha Eustis)",female,52,1,0,36947,78.2667,D20,C
|
||||
593,0,3,"Elsbury, Mr. William James",male,47,0,0,A/5 3902,7.25,,S
|
||||
594,0,3,"Bourke, Miss. Mary",female,,0,2,364848,7.75,,Q
|
||||
595,0,2,"Chapman, Mr. John Henry",male,37,1,0,SC/AH 29037,26,,S
|
||||
596,0,3,"Van Impe, Mr. Jean Baptiste",male,36,1,1,345773,24.15,,S
|
||||
597,1,2,"Leitch, Miss. Jessie Wills",female,,0,0,248727,33,,S
|
||||
598,0,3,"Johnson, Mr. Alfred",male,49,0,0,LINE,0,,S
|
||||
599,0,3,"Boulos, Mr. Hanna",male,,0,0,2664,7.225,,C
|
||||
600,1,1,"Duff Gordon, Sir. Cosmo Edmund (""Mr Morgan"")",male,49,1,0,PC 17485,56.9292,A20,C
|
||||
601,1,2,"Jacobsohn, Mrs. Sidney Samuel (Amy Frances Christy)",female,24,2,1,243847,27,,S
|
||||
602,0,3,"Slabenoff, Mr. Petco",male,,0,0,349214,7.8958,,S
|
||||
603,0,1,"Harrington, Mr. Charles H",male,,0,0,113796,42.4,,S
|
||||
604,0,3,"Torber, Mr. Ernst William",male,44,0,0,364511,8.05,,S
|
||||
605,1,1,"Homer, Mr. Harry (""Mr E Haven"")",male,35,0,0,111426,26.55,,C
|
||||
606,0,3,"Lindell, Mr. Edvard Bengtsson",male,36,1,0,349910,15.55,,S
|
||||
607,0,3,"Karaic, Mr. Milan",male,30,0,0,349246,7.8958,,S
|
||||
608,1,1,"Daniel, Mr. Robert Williams",male,27,0,0,113804,30.5,,S
|
||||
609,1,2,"Laroche, Mrs. Joseph (Juliette Marie Louise Lafargue)",female,22,1,2,SC/Paris 2123,41.5792,,C
|
||||
610,1,1,"Shutes, Miss. Elizabeth W",female,40,0,0,PC 17582,153.4625,C125,S
|
||||
611,0,3,"Andersson, Mrs. Anders Johan (Alfrida Konstantia Brogren)",female,39,1,5,347082,31.275,,S
|
||||
612,0,3,"Jardin, Mr. Jose Neto",male,,0,0,SOTON/O.Q. 3101305,7.05,,S
|
||||
613,1,3,"Murphy, Miss. Margaret Jane",female,,1,0,367230,15.5,,Q
|
||||
614,0,3,"Horgan, Mr. John",male,,0,0,370377,7.75,,Q
|
||||
615,0,3,"Brocklebank, Mr. William Alfred",male,35,0,0,364512,8.05,,S
|
||||
616,1,2,"Herman, Miss. Alice",female,24,1,2,220845,65,,S
|
||||
617,0,3,"Danbom, Mr. Ernst Gilbert",male,34,1,1,347080,14.4,,S
|
||||
618,0,3,"Lobb, Mrs. William Arthur (Cordelia K Stanlick)",female,26,1,0,A/5. 3336,16.1,,S
|
||||
619,1,2,"Becker, Miss. Marion Louise",female,4,2,1,230136,39,F4,S
|
||||
620,0,2,"Gavey, Mr. Lawrence",male,26,0,0,31028,10.5,,S
|
||||
621,0,3,"Yasbeck, Mr. Antoni",male,27,1,0,2659,14.4542,,C
|
||||
622,1,1,"Kimball, Mr. Edwin Nelson Jr",male,42,1,0,11753,52.5542,D19,S
|
||||
623,1,3,"Nakid, Mr. Sahid",male,20,1,1,2653,15.7417,,C
|
||||
624,0,3,"Hansen, Mr. Henry Damsgaard",male,21,0,0,350029,7.8542,,S
|
||||
625,0,3,"Bowen, Mr. David John ""Dai""",male,21,0,0,54636,16.1,,S
|
||||
626,0,1,"Sutton, Mr. Frederick",male,61,0,0,36963,32.3208,D50,S
|
||||
627,0,2,"Kirkland, Rev. Charles Leonard",male,57,0,0,219533,12.35,,Q
|
||||
628,1,1,"Longley, Miss. Gretchen Fiske",female,21,0,0,13502,77.9583,D9,S
|
||||
629,0,3,"Bostandyeff, Mr. Guentcho",male,26,0,0,349224,7.8958,,S
|
||||
630,0,3,"O'Connell, Mr. Patrick D",male,,0,0,334912,7.7333,,Q
|
||||
631,1,1,"Barkworth, Mr. Algernon Henry Wilson",male,80,0,0,27042,30,A23,S
|
||||
632,0,3,"Lundahl, Mr. Johan Svensson",male,51,0,0,347743,7.0542,,S
|
||||
633,1,1,"Stahelin-Maeglin, Dr. Max",male,32,0,0,13214,30.5,B50,C
|
||||
634,0,1,"Parr, Mr. William Henry Marsh",male,,0,0,112052,0,,S
|
||||
635,0,3,"Skoog, Miss. Mabel",female,9,3,2,347088,27.9,,S
|
||||
636,1,2,"Davis, Miss. Mary",female,28,0,0,237668,13,,S
|
||||
637,0,3,"Leinonen, Mr. Antti Gustaf",male,32,0,0,STON/O 2. 3101292,7.925,,S
|
||||
638,0,2,"Collyer, Mr. Harvey",male,31,1,1,C.A. 31921,26.25,,S
|
||||
639,0,3,"Panula, Mrs. Juha (Maria Emilia Ojala)",female,41,0,5,3101295,39.6875,,S
|
||||
640,0,3,"Thorneycroft, Mr. Percival",male,,1,0,376564,16.1,,S
|
||||
641,0,3,"Jensen, Mr. Hans Peder",male,20,0,0,350050,7.8542,,S
|
||||
642,1,1,"Sagesser, Mlle. Emma",female,24,0,0,PC 17477,69.3,B35,C
|
||||
643,0,3,"Skoog, Miss. Margit Elizabeth",female,2,3,2,347088,27.9,,S
|
||||
644,1,3,"Foo, Mr. Choong",male,,0,0,1601,56.4958,,S
|
||||
645,1,3,"Baclini, Miss. Eugenie",female,0.75,2,1,2666,19.2583,,C
|
||||
646,1,1,"Harper, Mr. Henry Sleeper",male,48,1,0,PC 17572,76.7292,D33,C
|
||||
647,0,3,"Cor, Mr. Liudevit",male,19,0,0,349231,7.8958,,S
|
||||
648,1,1,"Simonius-Blumer, Col. Oberst Alfons",male,56,0,0,13213,35.5,A26,C
|
||||
649,0,3,"Willey, Mr. Edward",male,,0,0,S.O./P.P. 751,7.55,,S
|
||||
650,1,3,"Stanley, Miss. Amy Zillah Elsie",female,23,0,0,CA. 2314,7.55,,S
|
||||
651,0,3,"Mitkoff, Mr. Mito",male,,0,0,349221,7.8958,,S
|
||||
652,1,2,"Doling, Miss. Elsie",female,18,0,1,231919,23,,S
|
||||
653,0,3,"Kalvik, Mr. Johannes Halvorsen",male,21,0,0,8475,8.4333,,S
|
||||
654,1,3,"O'Leary, Miss. Hanora ""Norah""",female,,0,0,330919,7.8292,,Q
|
||||
655,0,3,"Hegarty, Miss. Hanora ""Nora""",female,18,0,0,365226,6.75,,Q
|
||||
656,0,2,"Hickman, Mr. Leonard Mark",male,24,2,0,S.O.C. 14879,73.5,,S
|
||||
657,0,3,"Radeff, Mr. Alexander",male,,0,0,349223,7.8958,,S
|
||||
658,0,3,"Bourke, Mrs. John (Catherine)",female,32,1,1,364849,15.5,,Q
|
||||
659,0,2,"Eitemiller, Mr. George Floyd",male,23,0,0,29751,13,,S
|
||||
660,0,1,"Newell, Mr. Arthur Webster",male,58,0,2,35273,113.275,D48,C
|
||||
661,1,1,"Frauenthal, Dr. Henry William",male,50,2,0,PC 17611,133.65,,S
|
||||
662,0,3,"Badt, Mr. Mohamed",male,40,0,0,2623,7.225,,C
|
||||
663,0,1,"Colley, Mr. Edward Pomeroy",male,47,0,0,5727,25.5875,E58,S
|
||||
664,0,3,"Coleff, Mr. Peju",male,36,0,0,349210,7.4958,,S
|
||||
665,1,3,"Lindqvist, Mr. Eino William",male,20,1,0,STON/O 2. 3101285,7.925,,S
|
||||
666,0,2,"Hickman, Mr. Lewis",male,32,2,0,S.O.C. 14879,73.5,,S
|
||||
667,0,2,"Butler, Mr. Reginald Fenton",male,25,0,0,234686,13,,S
|
||||
668,0,3,"Rommetvedt, Mr. Knud Paust",male,,0,0,312993,7.775,,S
|
||||
669,0,3,"Cook, Mr. Jacob",male,43,0,0,A/5 3536,8.05,,S
|
||||
670,1,1,"Taylor, Mrs. Elmer Zebley (Juliet Cummins Wright)",female,,1,0,19996,52,C126,S
|
||||
671,1,2,"Brown, Mrs. Thomas William Solomon (Elizabeth Catherine Ford)",female,40,1,1,29750,39,,S
|
||||
672,0,1,"Davidson, Mr. Thornton",male,31,1,0,F.C. 12750,52,B71,S
|
||||
673,0,2,"Mitchell, Mr. Henry Michael",male,70,0,0,C.A. 24580,10.5,,S
|
||||
674,1,2,"Wilhelms, Mr. Charles",male,31,0,0,244270,13,,S
|
||||
675,0,2,"Watson, Mr. Ennis Hastings",male,,0,0,239856,0,,S
|
||||
676,0,3,"Edvardsson, Mr. Gustaf Hjalmar",male,18,0,0,349912,7.775,,S
|
||||
677,0,3,"Sawyer, Mr. Frederick Charles",male,24.5,0,0,342826,8.05,,S
|
||||
678,1,3,"Turja, Miss. Anna Sofia",female,18,0,0,4138,9.8417,,S
|
||||
679,0,3,"Goodwin, Mrs. Frederick (Augusta Tyler)",female,43,1,6,CA 2144,46.9,,S
|
||||
680,1,1,"Cardeza, Mr. Thomas Drake Martinez",male,36,0,1,PC 17755,512.3292,B51 B53 B55,C
|
||||
681,0,3,"Peters, Miss. Katie",female,,0,0,330935,8.1375,,Q
|
||||
682,1,1,"Hassab, Mr. Hammad",male,27,0,0,PC 17572,76.7292,D49,C
|
||||
683,0,3,"Olsvigen, Mr. Thor Anderson",male,20,0,0,6563,9.225,,S
|
||||
684,0,3,"Goodwin, Mr. Charles Edward",male,14,5,2,CA 2144,46.9,,S
|
||||
685,0,2,"Brown, Mr. Thomas William Solomon",male,60,1,1,29750,39,,S
|
||||
686,0,2,"Laroche, Mr. Joseph Philippe Lemercier",male,25,1,2,SC/Paris 2123,41.5792,,C
|
||||
687,0,3,"Panula, Mr. Jaako Arnold",male,14,4,1,3101295,39.6875,,S
|
||||
688,0,3,"Dakic, Mr. Branko",male,19,0,0,349228,10.1708,,S
|
||||
689,0,3,"Fischer, Mr. Eberhard Thelander",male,18,0,0,350036,7.7958,,S
|
||||
690,1,1,"Madill, Miss. Georgette Alexandra",female,15,0,1,24160,211.3375,B5,S
|
||||
691,1,1,"Dick, Mr. Albert Adrian",male,31,1,0,17474,57,B20,S
|
||||
692,1,3,"Karun, Miss. Manca",female,4,0,1,349256,13.4167,,C
|
||||
693,1,3,"Lam, Mr. Ali",male,,0,0,1601,56.4958,,S
|
||||
694,0,3,"Saad, Mr. Khalil",male,25,0,0,2672,7.225,,C
|
||||
695,0,1,"Weir, Col. John",male,60,0,0,113800,26.55,,S
|
||||
696,0,2,"Chapman, Mr. Charles Henry",male,52,0,0,248731,13.5,,S
|
||||
697,0,3,"Kelly, Mr. James",male,44,0,0,363592,8.05,,S
|
||||
698,1,3,"Mullens, Miss. Katherine ""Katie""",female,,0,0,35852,7.7333,,Q
|
||||
699,0,1,"Thayer, Mr. John Borland",male,49,1,1,17421,110.8833,C68,C
|
||||
700,0,3,"Humblen, Mr. Adolf Mathias Nicolai Olsen",male,42,0,0,348121,7.65,F G63,S
|
||||
701,1,1,"Astor, Mrs. John Jacob (Madeleine Talmadge Force)",female,18,1,0,PC 17757,227.525,C62 C64,C
|
||||
702,1,1,"Silverthorne, Mr. Spencer Victor",male,35,0,0,PC 17475,26.2875,E24,S
|
||||
703,0,3,"Barbara, Miss. Saiide",female,18,0,1,2691,14.4542,,C
|
||||
704,0,3,"Gallagher, Mr. Martin",male,25,0,0,36864,7.7417,,Q
|
||||
705,0,3,"Hansen, Mr. Henrik Juul",male,26,1,0,350025,7.8542,,S
|
||||
706,0,2,"Morley, Mr. Henry Samuel (""Mr Henry Marshall"")",male,39,0,0,250655,26,,S
|
||||
707,1,2,"Kelly, Mrs. Florence ""Fannie""",female,45,0,0,223596,13.5,,S
|
||||
708,1,1,"Calderhead, Mr. Edward Pennington",male,42,0,0,PC 17476,26.2875,E24,S
|
||||
709,1,1,"Cleaver, Miss. Alice",female,22,0,0,113781,151.55,,S
|
||||
710,1,3,"Moubarek, Master. Halim Gonios (""William George"")",male,,1,1,2661,15.2458,,C
|
||||
711,1,1,"Mayne, Mlle. Berthe Antonine (""Mrs de Villiers"")",female,24,0,0,PC 17482,49.5042,C90,C
|
||||
712,0,1,"Klaber, Mr. Herman",male,,0,0,113028,26.55,C124,S
|
||||
713,1,1,"Taylor, Mr. Elmer Zebley",male,48,1,0,19996,52,C126,S
|
||||
714,0,3,"Larsson, Mr. August Viktor",male,29,0,0,7545,9.4833,,S
|
||||
715,0,2,"Greenberg, Mr. Samuel",male,52,0,0,250647,13,,S
|
||||
716,0,3,"Soholt, Mr. Peter Andreas Lauritz Andersen",male,19,0,0,348124,7.65,F G73,S
|
||||
717,1,1,"Endres, Miss. Caroline Louise",female,38,0,0,PC 17757,227.525,C45,C
|
||||
718,1,2,"Troutt, Miss. Edwina Celia ""Winnie""",female,27,0,0,34218,10.5,E101,S
|
||||
719,0,3,"McEvoy, Mr. Michael",male,,0,0,36568,15.5,,Q
|
||||
720,0,3,"Johnson, Mr. Malkolm Joackim",male,33,0,0,347062,7.775,,S
|
||||
721,1,2,"Harper, Miss. Annie Jessie ""Nina""",female,6,0,1,248727,33,,S
|
||||
722,0,3,"Jensen, Mr. Svend Lauritz",male,17,1,0,350048,7.0542,,S
|
||||
723,0,2,"Gillespie, Mr. William Henry",male,34,0,0,12233,13,,S
|
||||
724,0,2,"Hodges, Mr. Henry Price",male,50,0,0,250643,13,,S
|
||||
725,1,1,"Chambers, Mr. Norman Campbell",male,27,1,0,113806,53.1,E8,S
|
||||
726,0,3,"Oreskovic, Mr. Luka",male,20,0,0,315094,8.6625,,S
|
||||
727,1,2,"Renouf, Mrs. Peter Henry (Lillian Jefferys)",female,30,3,0,31027,21,,S
|
||||
728,1,3,"Mannion, Miss. Margareth",female,,0,0,36866,7.7375,,Q
|
||||
729,0,2,"Bryhl, Mr. Kurt Arnold Gottfrid",male,25,1,0,236853,26,,S
|
||||
730,0,3,"Ilmakangas, Miss. Pieta Sofia",female,25,1,0,STON/O2. 3101271,7.925,,S
|
||||
731,1,1,"Allen, Miss. Elisabeth Walton",female,29,0,0,24160,211.3375,B5,S
|
||||
732,0,3,"Hassan, Mr. Houssein G N",male,11,0,0,2699,18.7875,,C
|
||||
733,0,2,"Knight, Mr. Robert J",male,,0,0,239855,0,,S
|
||||
734,0,2,"Berriman, Mr. William John",male,23,0,0,28425,13,,S
|
||||
735,0,2,"Troupiansky, Mr. Moses Aaron",male,23,0,0,233639,13,,S
|
||||
736,0,3,"Williams, Mr. Leslie",male,28.5,0,0,54636,16.1,,S
|
||||
737,0,3,"Ford, Mrs. Edward (Margaret Ann Watson)",female,48,1,3,W./C. 6608,34.375,,S
|
||||
738,1,1,"Lesurer, Mr. Gustave J",male,35,0,0,PC 17755,512.3292,B101,C
|
||||
739,0,3,"Ivanoff, Mr. Kanio",male,,0,0,349201,7.8958,,S
|
||||
740,0,3,"Nankoff, Mr. Minko",male,,0,0,349218,7.8958,,S
|
||||
741,1,1,"Hawksford, Mr. Walter James",male,,0,0,16988,30,D45,S
|
||||
742,0,1,"Cavendish, Mr. Tyrell William",male,36,1,0,19877,78.85,C46,S
|
||||
743,1,1,"Ryerson, Miss. Susan Parker ""Suzette""",female,21,2,2,PC 17608,262.375,B57 B59 B63 B66,C
|
||||
744,0,3,"McNamee, Mr. Neal",male,24,1,0,376566,16.1,,S
|
||||
745,1,3,"Stranden, Mr. Juho",male,31,0,0,STON/O 2. 3101288,7.925,,S
|
||||
746,0,1,"Crosby, Capt. Edward Gifford",male,70,1,1,WE/P 5735,71,B22,S
|
||||
747,0,3,"Abbott, Mr. Rossmore Edward",male,16,1,1,C.A. 2673,20.25,,S
|
||||
748,1,2,"Sinkkonen, Miss. Anna",female,30,0,0,250648,13,,S
|
||||
749,0,1,"Marvin, Mr. Daniel Warner",male,19,1,0,113773,53.1,D30,S
|
||||
750,0,3,"Connaghton, Mr. Michael",male,31,0,0,335097,7.75,,Q
|
||||
751,1,2,"Wells, Miss. Joan",female,4,1,1,29103,23,,S
|
||||
752,1,3,"Moor, Master. Meier",male,6,0,1,392096,12.475,E121,S
|
||||
753,0,3,"Vande Velde, Mr. Johannes Joseph",male,33,0,0,345780,9.5,,S
|
||||
754,0,3,"Jonkoff, Mr. Lalio",male,23,0,0,349204,7.8958,,S
|
||||
755,1,2,"Herman, Mrs. Samuel (Jane Laver)",female,48,1,2,220845,65,,S
|
||||
756,1,2,"Hamalainen, Master. Viljo",male,0.67,1,1,250649,14.5,,S
|
||||
757,0,3,"Carlsson, Mr. August Sigfrid",male,28,0,0,350042,7.7958,,S
|
||||
758,0,2,"Bailey, Mr. Percy Andrew",male,18,0,0,29108,11.5,,S
|
||||
759,0,3,"Theobald, Mr. Thomas Leonard",male,34,0,0,363294,8.05,,S
|
||||
760,1,1,"Rothes, the Countess. of (Lucy Noel Martha Dyer-Edwards)",female,33,0,0,110152,86.5,B77,S
|
||||
761,0,3,"Garfirth, Mr. John",male,,0,0,358585,14.5,,S
|
||||
762,0,3,"Nirva, Mr. Iisakki Antino Aijo",male,41,0,0,SOTON/O2 3101272,7.125,,S
|
||||
763,1,3,"Barah, Mr. Hanna Assi",male,20,0,0,2663,7.2292,,C
|
||||
764,1,1,"Carter, Mrs. William Ernest (Lucile Polk)",female,36,1,2,113760,120,B96 B98,S
|
||||
765,0,3,"Eklund, Mr. Hans Linus",male,16,0,0,347074,7.775,,S
|
||||
766,1,1,"Hogeboom, Mrs. John C (Anna Andrews)",female,51,1,0,13502,77.9583,D11,S
|
||||
767,0,1,"Brewe, Dr. Arthur Jackson",male,,0,0,112379,39.6,,C
|
||||
768,0,3,"Mangan, Miss. Mary",female,30.5,0,0,364850,7.75,,Q
|
||||
769,0,3,"Moran, Mr. Daniel J",male,,1,0,371110,24.15,,Q
|
||||
770,0,3,"Gronnestad, Mr. Daniel Danielsen",male,32,0,0,8471,8.3625,,S
|
||||
771,0,3,"Lievens, Mr. Rene Aime",male,24,0,0,345781,9.5,,S
|
||||
772,0,3,"Jensen, Mr. Niels Peder",male,48,0,0,350047,7.8542,,S
|
||||
773,0,2,"Mack, Mrs. (Mary)",female,57,0,0,S.O./P.P. 3,10.5,E77,S
|
||||
774,0,3,"Elias, Mr. Dibo",male,,0,0,2674,7.225,,C
|
||||
775,1,2,"Hocking, Mrs. Elizabeth (Eliza Needs)",female,54,1,3,29105,23,,S
|
||||
776,0,3,"Myhrman, Mr. Pehr Fabian Oliver Malkolm",male,18,0,0,347078,7.75,,S
|
||||
777,0,3,"Tobin, Mr. Roger",male,,0,0,383121,7.75,F38,Q
|
||||
778,1,3,"Emanuel, Miss. Virginia Ethel",female,5,0,0,364516,12.475,,S
|
||||
779,0,3,"Kilgannon, Mr. Thomas J",male,,0,0,36865,7.7375,,Q
|
||||
780,1,1,"Robert, Mrs. Edward Scott (Elisabeth Walton McMillan)",female,43,0,1,24160,211.3375,B3,S
|
||||
781,1,3,"Ayoub, Miss. Banoura",female,13,0,0,2687,7.2292,,C
|
||||
782,1,1,"Dick, Mrs. Albert Adrian (Vera Gillespie)",female,17,1,0,17474,57,B20,S
|
||||
783,0,1,"Long, Mr. Milton Clyde",male,29,0,0,113501,30,D6,S
|
||||
784,0,3,"Johnston, Mr. Andrew G",male,,1,2,W./C. 6607,23.45,,S
|
||||
785,0,3,"Ali, Mr. William",male,25,0,0,SOTON/O.Q. 3101312,7.05,,S
|
||||
786,0,3,"Harmer, Mr. Abraham (David Lishin)",male,25,0,0,374887,7.25,,S
|
||||
787,1,3,"Sjoblom, Miss. Anna Sofia",female,18,0,0,3101265,7.4958,,S
|
||||
788,0,3,"Rice, Master. George Hugh",male,8,4,1,382652,29.125,,Q
|
||||
789,1,3,"Dean, Master. Bertram Vere",male,1,1,2,C.A. 2315,20.575,,S
|
||||
790,0,1,"Guggenheim, Mr. Benjamin",male,46,0,0,PC 17593,79.2,B82 B84,C
|
||||
791,0,3,"Keane, Mr. Andrew ""Andy""",male,,0,0,12460,7.75,,Q
|
||||
792,0,2,"Gaskell, Mr. Alfred",male,16,0,0,239865,26,,S
|
||||
793,0,3,"Sage, Miss. Stella Anna",female,,8,2,CA. 2343,69.55,,S
|
||||
794,0,1,"Hoyt, Mr. William Fisher",male,,0,0,PC 17600,30.6958,,C
|
||||
795,0,3,"Dantcheff, Mr. Ristiu",male,25,0,0,349203,7.8958,,S
|
||||
796,0,2,"Otter, Mr. Richard",male,39,0,0,28213,13,,S
|
||||
797,1,1,"Leader, Dr. Alice (Farnham)",female,49,0,0,17465,25.9292,D17,S
|
||||
798,1,3,"Osman, Mrs. Mara",female,31,0,0,349244,8.6833,,S
|
||||
799,0,3,"Ibrahim Shawah, Mr. Yousseff",male,30,0,0,2685,7.2292,,C
|
||||
800,0,3,"Van Impe, Mrs. Jean Baptiste (Rosalie Paula Govaert)",female,30,1,1,345773,24.15,,S
|
||||
801,0,2,"Ponesell, Mr. Martin",male,34,0,0,250647,13,,S
|
||||
802,1,2,"Collyer, Mrs. Harvey (Charlotte Annie Tate)",female,31,1,1,C.A. 31921,26.25,,S
|
||||
803,1,1,"Carter, Master. William Thornton II",male,11,1,2,113760,120,B96 B98,S
|
||||
804,1,3,"Thomas, Master. Assad Alexander",male,0.42,0,1,2625,8.5167,,C
|
||||
805,1,3,"Hedman, Mr. Oskar Arvid",male,27,0,0,347089,6.975,,S
|
||||
806,0,3,"Johansson, Mr. Karl Johan",male,31,0,0,347063,7.775,,S
|
||||
807,0,1,"Andrews, Mr. Thomas Jr",male,39,0,0,112050,0,A36,S
|
||||
808,0,3,"Pettersson, Miss. Ellen Natalia",female,18,0,0,347087,7.775,,S
|
||||
809,0,2,"Meyer, Mr. August",male,39,0,0,248723,13,,S
|
||||
810,1,1,"Chambers, Mrs. Norman Campbell (Bertha Griggs)",female,33,1,0,113806,53.1,E8,S
|
||||
811,0,3,"Alexander, Mr. William",male,26,0,0,3474,7.8875,,S
|
||||
812,0,3,"Lester, Mr. James",male,39,0,0,A/4 48871,24.15,,S
|
||||
813,0,2,"Slemen, Mr. Richard James",male,35,0,0,28206,10.5,,S
|
||||
814,0,3,"Andersson, Miss. Ebba Iris Alfrida",female,6,4,2,347082,31.275,,S
|
||||
815,0,3,"Tomlin, Mr. Ernest Portage",male,30.5,0,0,364499,8.05,,S
|
||||
816,0,1,"Fry, Mr. Richard",male,,0,0,112058,0,B102,S
|
||||
817,0,3,"Heininen, Miss. Wendla Maria",female,23,0,0,STON/O2. 3101290,7.925,,S
|
||||
818,0,2,"Mallet, Mr. Albert",male,31,1,1,S.C./PARIS 2079,37.0042,,C
|
||||
819,0,3,"Holm, Mr. John Fredrik Alexander",male,43,0,0,C 7075,6.45,,S
|
||||
820,0,3,"Skoog, Master. Karl Thorsten",male,10,3,2,347088,27.9,,S
|
||||
821,1,1,"Hays, Mrs. Charles Melville (Clara Jennings Gregg)",female,52,1,1,12749,93.5,B69,S
|
||||
822,1,3,"Lulic, Mr. Nikola",male,27,0,0,315098,8.6625,,S
|
||||
823,0,1,"Reuchlin, Jonkheer. John George",male,38,0,0,19972,0,,S
|
||||
824,1,3,"Moor, Mrs. (Beila)",female,27,0,1,392096,12.475,E121,S
|
||||
825,0,3,"Panula, Master. Urho Abraham",male,2,4,1,3101295,39.6875,,S
|
||||
826,0,3,"Flynn, Mr. John",male,,0,0,368323,6.95,,Q
|
||||
827,0,3,"Lam, Mr. Len",male,,0,0,1601,56.4958,,S
|
||||
828,1,2,"Mallet, Master. Andre",male,1,0,2,S.C./PARIS 2079,37.0042,,C
|
||||
829,1,3,"McCormack, Mr. Thomas Joseph",male,,0,0,367228,7.75,,Q
|
||||
830,1,1,"Stone, Mrs. George Nelson (Martha Evelyn)",female,62,0,0,113572,80,B28,
|
||||
831,1,3,"Yasbeck, Mrs. Antoni (Selini Alexander)",female,15,1,0,2659,14.4542,,C
|
||||
832,1,2,"Richards, Master. George Sibley",male,0.83,1,1,29106,18.75,,S
|
||||
833,0,3,"Saad, Mr. Amin",male,,0,0,2671,7.2292,,C
|
||||
834,0,3,"Augustsson, Mr. Albert",male,23,0,0,347468,7.8542,,S
|
||||
835,0,3,"Allum, Mr. Owen George",male,18,0,0,2223,8.3,,S
|
||||
836,1,1,"Compton, Miss. Sara Rebecca",female,39,1,1,PC 17756,83.1583,E49,C
|
||||
837,0,3,"Pasic, Mr. Jakob",male,21,0,0,315097,8.6625,,S
|
||||
838,0,3,"Sirota, Mr. Maurice",male,,0,0,392092,8.05,,S
|
||||
839,1,3,"Chip, Mr. Chang",male,32,0,0,1601,56.4958,,S
|
||||
840,1,1,"Marechal, Mr. Pierre",male,,0,0,11774,29.7,C47,C
|
||||
841,0,3,"Alhomaki, Mr. Ilmari Rudolf",male,20,0,0,SOTON/O2 3101287,7.925,,S
|
||||
842,0,2,"Mudd, Mr. Thomas Charles",male,16,0,0,S.O./P.P. 3,10.5,,S
|
||||
843,1,1,"Serepeca, Miss. Augusta",female,30,0,0,113798,31,,C
|
||||
844,0,3,"Lemberopolous, Mr. Peter L",male,34.5,0,0,2683,6.4375,,C
|
||||
845,0,3,"Culumovic, Mr. Jeso",male,17,0,0,315090,8.6625,,S
|
||||
846,0,3,"Abbing, Mr. Anthony",male,42,0,0,C.A. 5547,7.55,,S
|
||||
847,0,3,"Sage, Mr. Douglas Bullen",male,,8,2,CA. 2343,69.55,,S
|
||||
848,0,3,"Markoff, Mr. Marin",male,35,0,0,349213,7.8958,,C
|
||||
849,0,2,"Harper, Rev. John",male,28,0,1,248727,33,,S
|
||||
850,1,1,"Goldenberg, Mrs. Samuel L (Edwiga Grabowska)",female,,1,0,17453,89.1042,C92,C
|
||||
851,0,3,"Andersson, Master. Sigvard Harald Elias",male,4,4,2,347082,31.275,,S
|
||||
852,0,3,"Svensson, Mr. Johan",male,74,0,0,347060,7.775,,S
|
||||
853,0,3,"Boulos, Miss. Nourelain",female,9,1,1,2678,15.2458,,C
|
||||
854,1,1,"Lines, Miss. Mary Conover",female,16,0,1,PC 17592,39.4,D28,S
|
||||
855,0,2,"Carter, Mrs. Ernest Courtenay (Lilian Hughes)",female,44,1,0,244252,26,,S
|
||||
856,1,3,"Aks, Mrs. Sam (Leah Rosen)",female,18,0,1,392091,9.35,,S
|
||||
857,1,1,"Wick, Mrs. George Dennick (Mary Hitchcock)",female,45,1,1,36928,164.8667,,S
|
||||
858,1,1,"Daly, Mr. Peter Denis ",male,51,0,0,113055,26.55,E17,S
|
||||
859,1,3,"Baclini, Mrs. Solomon (Latifa Qurban)",female,24,0,3,2666,19.2583,,C
|
||||
860,0,3,"Razi, Mr. Raihed",male,,0,0,2629,7.2292,,C
|
||||
861,0,3,"Hansen, Mr. Claus Peter",male,41,2,0,350026,14.1083,,S
|
||||
862,0,2,"Giles, Mr. Frederick Edward",male,21,1,0,28134,11.5,,S
|
||||
863,1,1,"Swift, Mrs. Frederick Joel (Margaret Welles Barron)",female,48,0,0,17466,25.9292,D17,S
|
||||
864,0,3,"Sage, Miss. Dorothy Edith ""Dolly""",female,,8,2,CA. 2343,69.55,,S
|
||||
865,0,2,"Gill, Mr. John William",male,24,0,0,233866,13,,S
|
||||
866,1,2,"Bystrom, Mrs. (Karolina)",female,42,0,0,236852,13,,S
|
||||
867,1,2,"Duran y More, Miss. Asuncion",female,27,1,0,SC/PARIS 2149,13.8583,,C
|
||||
868,0,1,"Roebling, Mr. Washington Augustus II",male,31,0,0,PC 17590,50.4958,A24,S
|
||||
869,0,3,"van Melkebeke, Mr. Philemon",male,,0,0,345777,9.5,,S
|
||||
870,1,3,"Johnson, Master. Harold Theodor",male,4,1,1,347742,11.1333,,S
|
||||
871,0,3,"Balkic, Mr. Cerin",male,26,0,0,349248,7.8958,,S
|
||||
872,1,1,"Beckwith, Mrs. Richard Leonard (Sallie Monypeny)",female,47,1,1,11751,52.5542,D35,S
|
||||
873,0,1,"Carlsson, Mr. Frans Olof",male,33,0,0,695,5,B51 B53 B55,S
|
||||
874,0,3,"Vander Cruyssen, Mr. Victor",male,47,0,0,345765,9,,S
|
||||
875,1,2,"Abelson, Mrs. Samuel (Hannah Wizosky)",female,28,1,0,P/PP 3381,24,,C
|
||||
876,1,3,"Najib, Miss. Adele Kiamie ""Jane""",female,15,0,0,2667,7.225,,C
|
||||
877,0,3,"Gustafsson, Mr. Alfred Ossian",male,20,0,0,7534,9.8458,,S
|
||||
878,0,3,"Petroff, Mr. Nedelio",male,19,0,0,349212,7.8958,,S
|
||||
879,0,3,"Laleff, Mr. Kristo",male,,0,0,349217,7.8958,,S
|
||||
880,1,1,"Potter, Mrs. Thomas Jr (Lily Alexenia Wilson)",female,56,0,1,11767,83.1583,C50,C
|
||||
881,1,2,"Shelley, Mrs. William (Imanita Parrish Hall)",female,25,0,1,230433,26,,S
|
||||
882,0,3,"Markun, Mr. Johann",male,33,0,0,349257,7.8958,,S
|
||||
883,0,3,"Dahlberg, Miss. Gerda Ulrika",female,22,0,0,7552,10.5167,,S
|
||||
884,0,2,"Banfield, Mr. Frederick James",male,28,0,0,C.A./SOTON 34068,10.5,,S
|
||||
885,0,3,"Sutehall, Mr. Henry Jr",male,25,0,0,SOTON/OQ 392076,7.05,,S
|
||||
886,0,3,"Rice, Mrs. William (Margaret Norton)",female,39,0,5,382652,29.125,,Q
|
||||
887,0,2,"Montvila, Rev. Juozas",male,27,0,0,211536,13,,S
|
||||
888,1,1,"Graham, Miss. Margaret Edith",female,19,0,0,112053,30,B42,S
|
||||
889,0,3,"Johnston, Miss. Catherine Helen ""Carrie""",female,,1,2,W./C. 6607,23.45,,S
|
||||
890,1,1,"Behr, Mr. Karl Howell",male,26,0,0,111369,30,C148,C
|
||||
891,0,3,"Dooley, Mr. Patrick",male,32,0,0,370376,7.75,,Q
|
||||
|
@@ -0,0 +1,507 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved. \n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Using Synapse Spark Pool as a Compute Target from Azure Machine Learning Remote Run\n",
|
||||
"1. To use Synapse Spark Pool as a compute target from Experiment Run, [ScriptRunConfig](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.script_run_config.scriptrunconfig?view=azure-ml-py) is used, the same as other Experiment Runs. This notebook demonstrates how to leverage ScriptRunConfig to submit an experiment run to an attached Synapse Spark cluster.\n",
|
||||
"2. To use Synapse Spark Pool as a compute target from [Azure Machine Learning Pipeline](https://aka.ms/pl-concept), a [SynapseSparkStep](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.synapse_spark_step.synapsesparkstep?view=azure-ml-py) is used. This notebook demonstrates how to leverage SynapseSparkStep in Azure Machine Learning Pipeline.\n",
|
||||
"\n",
|
||||
"## Before you begin:\n",
|
||||
"1. **Create an Azure Synapse workspace**, check [this] (https://docs.microsoft.com/en-us/azure/synapse-analytics/quickstart-create-workspace) for more information.\n",
|
||||
"2. **Create Spark Pool in Synapse workspace**: check [this] (https://docs.microsoft.com/en-us/azure/synapse-analytics/quickstart-create-apache-spark-pool-portal) for more information."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Azure Machine Learning and Pipeline SDK-specific imports"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import azureml.core\n",
|
||||
"from azureml.core import Workspace, Experiment\n",
|
||||
"from azureml.core import LinkedService, SynapseWorkspaceLinkedServiceConfiguration\n",
|
||||
"from azureml.core.compute import ComputeTarget, AmlCompute, SynapseCompute\n",
|
||||
"from azureml.exceptions import ComputeTargetException\n",
|
||||
"from azureml.data import HDFSOutputDatasetConfig\n",
|
||||
"from azureml.core.datastore import Datastore\n",
|
||||
"from azureml.core.runconfig import RunConfiguration\n",
|
||||
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||
"from azureml.pipeline.core import Pipeline\n",
|
||||
"from azureml.pipeline.steps import PythonScriptStep, SynapseSparkStep\n",
|
||||
"\n",
|
||||
"# Check core SDK version number\n",
|
||||
"print(\"SDK version:\", azureml.core.VERSION)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ws = Workspace.from_config()\n",
|
||||
"print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\\n')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Link Synapse workspace to AML \n",
|
||||
"You have to be an \"Owner\" of Synapse workspace resource to perform linking. You can check your role in the Azure resource management portal, if you don't have an \"Owner\" role, you can contact an \"Owner\" to link the workspaces for you."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"# Replace with your resource info before running.\n",
|
||||
"\n",
|
||||
"synapse_subscription_id=os.getenv(\"SYNAPSE_SUBSCRIPTION_ID\", \"<my-synapse-subscription-id>\")\n",
|
||||
"synapse_resource_group=os.getenv(\"SYNAPSE_RESOURCE_GROUP\", \"<my-synapse-resource-group>\")\n",
|
||||
"synapse_workspace_name=os.getenv(\"SYNAPSE_WORKSPACE_NAME\", \"<my-synapse-workspace-name>\")\n",
|
||||
"synapse_linked_service_name=os.getenv(\"SYNAPSE_LINKED_SERVICE_NAME\", \"<my-synapse-linked-service-name>\")\n",
|
||||
"\n",
|
||||
"synapse_link_config = SynapseWorkspaceLinkedServiceConfiguration(\n",
|
||||
" subscription_id=synapse_subscription_id,\n",
|
||||
" resource_group=synapse_resource_group,\n",
|
||||
" name=synapse_workspace_name\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"linked_service = LinkedService.register(\n",
|
||||
" workspace=ws,\n",
|
||||
" name=synapse_linked_service_name,\n",
|
||||
" linked_service_config=synapse_link_config)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Linked service property\n",
|
||||
"\n",
|
||||
"A MSI (system_assigned_identity_principal_id) will be generated for each linked service, for example:\n",
|
||||
"\n",
|
||||
"name=synapselink,</p>\n",
|
||||
"type=Synapse, </p>\n",
|
||||
"linked_service_resource_id=/subscriptions/4faaaf21-663f-4391-96fd-47197c630979/resourceGroups/static_resources_synapse_test/providers/Microsoft.Synapse/workspaces/synapsetest2, </p>\n",
|
||||
"system_assigned_identity_principal_id=eb355d52-3806-4c5a-aec9-91447e8cfc2e </p>\n",
|
||||
"\n",
|
||||
"#### Make sure you grant \"Synapse Apache Spark Administrator\" role of the synapse workspace to the generated workspace linking MSI in Synapse studio portal before you submit job."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"linked_service"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"LinkedService.list(ws)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Attach Synapse spark pool as AML compute target"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"synapse_spark_pool_name=os.getenv(\"SYNAPSE_SPARK_POOL_NAME\", \"<my-synapse-spark-pool-name>\")\n",
|
||||
"synapse_compute_name=os.getenv(\"SYNAPSE_COMPUTE_NAME\", \"<my-synapse-compute-name>\")\n",
|
||||
"\n",
|
||||
"attach_config = SynapseCompute.attach_configuration(\n",
|
||||
" linked_service,\n",
|
||||
" type=\"SynapseSpark\",\n",
|
||||
" pool_name=synapse_spark_pool_name)\n",
|
||||
"\n",
|
||||
"synapse_compute=ComputeTarget.attach(\n",
|
||||
" workspace=ws,\n",
|
||||
" name=synapse_compute_name,\n",
|
||||
" attach_configuration=attach_config)\n",
|
||||
"\n",
|
||||
"synapse_compute.wait_for_completion()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Start an experiment run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prepare data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Use the default blob storage\n",
|
||||
"def_blob_store = Datastore(ws, \"workspaceblobstore\")\n",
|
||||
"print('Datastore {} will be used'.format(def_blob_store.name))\n",
|
||||
"\n",
|
||||
"# We are uploading a sample file in the local directory to be used as a datasource\n",
|
||||
"file_name = \"Titanic.csv\"\n",
|
||||
"def_blob_store.upload_files(files=[\"./{}\".format(file_name)], overwrite=False)\n",
|
||||
" "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tabular dataset as input"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Dataset\n",
|
||||
"titanic_tabular_dataset = Dataset.Tabular.from_delimited_files(path=[(def_blob_store, file_name)])\n",
|
||||
"input1 = titanic_tabular_dataset.as_named_input(\"tabular_input\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## File dataset as input"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Dataset\n",
|
||||
"titanic_file_dataset = Dataset.File.from_files(path=[(def_blob_store, file_name)])\n",
|
||||
"input2 = titanic_file_dataset.as_named_input(\"file_input\").as_hdfs()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Output config: the output will be registered as a File dataset\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.data import HDFSOutputDatasetConfig\n",
|
||||
"output = HDFSOutputDatasetConfig(destination=(def_blob_store,\"test\")).register_on_complete(name=\"registered_dataset\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Dataprep script"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"os.makedirs(\"code\", exist_ok=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%writefile code/dataprep.py\n",
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import azureml.core\n",
|
||||
"from pyspark.sql import SparkSession\n",
|
||||
"from azureml.core import Run, Dataset\n",
|
||||
"\n",
|
||||
"print(azureml.core.VERSION)\n",
|
||||
"print(os.environ)\n",
|
||||
"\n",
|
||||
"import argparse\n",
|
||||
"parser = argparse.ArgumentParser()\n",
|
||||
"parser.add_argument(\"--tabular_input\")\n",
|
||||
"parser.add_argument(\"--file_input\")\n",
|
||||
"parser.add_argument(\"--output_dir\")\n",
|
||||
"args = parser.parse_args()\n",
|
||||
"\n",
|
||||
"# use dataset sdk to read tabular dataset\n",
|
||||
"run_context = Run.get_context()\n",
|
||||
"dataset = Dataset.get_by_id(run_context.experiment.workspace,id=args.tabular_input)\n",
|
||||
"sdf = dataset.to_spark_dataframe()\n",
|
||||
"sdf.show()\n",
|
||||
"\n",
|
||||
"# use hdfs path to read file dataset\n",
|
||||
"spark= SparkSession.builder.getOrCreate()\n",
|
||||
"sdf = spark.read.option(\"header\", \"true\").csv(args.file_input)\n",
|
||||
"sdf.show()\n",
|
||||
"\n",
|
||||
"sdf.coalesce(1).write\\\n",
|
||||
".option(\"header\", \"true\")\\\n",
|
||||
".mode(\"append\")\\\n",
|
||||
".csv(args.output_dir)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set up Conda dependency for the following Script Run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.environment import CondaDependencies\n",
|
||||
"conda_dep = CondaDependencies()\n",
|
||||
"conda_dep.add_pip_package(\"azureml-core==1.20.0\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## How to leverage ScriptRunConfig to submit an experiment run to an attached Synapse Spark cluster"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import RunConfiguration\n",
|
||||
"from azureml.core import ScriptRunConfig \n",
|
||||
"from azureml.core import Experiment\n",
|
||||
"\n",
|
||||
"run_config = RunConfiguration(framework=\"pyspark\")\n",
|
||||
"run_config.target = synapse_compute_name\n",
|
||||
"\n",
|
||||
"run_config.spark.configuration[\"spark.driver.memory\"] = \"1g\" \n",
|
||||
"run_config.spark.configuration[\"spark.driver.cores\"] = 2 \n",
|
||||
"run_config.spark.configuration[\"spark.executor.memory\"] = \"1g\" \n",
|
||||
"run_config.spark.configuration[\"spark.executor.cores\"] = 1 \n",
|
||||
"run_config.spark.configuration[\"spark.executor.instances\"] = 1 \n",
|
||||
"\n",
|
||||
"run_config.environment.python.conda_dependencies = conda_dep\n",
|
||||
"\n",
|
||||
"script_run_config = ScriptRunConfig(source_directory = './code',\n",
|
||||
" script= 'dataprep.py',\n",
|
||||
" arguments = [\"--tabular_input\", input1, \n",
|
||||
" \"--file_input\", input2,\n",
|
||||
" \"--output_dir\", output],\n",
|
||||
" run_config = run_config) "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Experiment \n",
|
||||
"exp = Experiment(workspace=ws, name=\"synapse-spark\") \n",
|
||||
"run = exp.submit(config=script_run_config) \n",
|
||||
"run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## How to leverage SynapseSparkStep in an AML pipeline to orchestrate data prep step on Synapse Spark and training step on AzureML compute."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Choose a name for your CPU cluster\n",
|
||||
"cpu_cluster_name = \"cpucluster\"\n",
|
||||
"\n",
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" cpu_cluster = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
||||
" print('Found existing cluster, use it.')\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2',\n",
|
||||
" max_nodes=1)\n",
|
||||
" cpu_cluster = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
"cpu_cluster.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%writefile code/train.py\n",
|
||||
"import glob\n",
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"from os import listdir\n",
|
||||
"from os.path import isfile, join\n",
|
||||
"\n",
|
||||
"mypath = os.environ[\"step2_input\"]\n",
|
||||
"files = [f for f in listdir(mypath) if isfile(join(mypath, f))]\n",
|
||||
"for file in files:\n",
|
||||
" with open(join(mypath,file)) as f:\n",
|
||||
" print(f.read())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"titanic_tabular_dataset = Dataset.Tabular.from_delimited_files(path=[(def_blob_store, file_name)])\n",
|
||||
"titanic_file_dataset = Dataset.File.from_files(path=[(def_blob_store, file_name)])\n",
|
||||
"\n",
|
||||
"step1_input1 = titanic_tabular_dataset.as_named_input(\"tabular_input\")\n",
|
||||
"step1_input2 = titanic_file_dataset.as_named_input(\"file_input\").as_hdfs()\n",
|
||||
"step1_output = HDFSOutputDatasetConfig(destination=(def_blob_store,\"test\")).register_on_complete(name=\"registered_dataset\")\n",
|
||||
"\n",
|
||||
"step2_input = step1_output.as_input(\"step2_input\").as_download()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"from azureml.core.environment import Environment\n",
|
||||
"env = Environment(name=\"myenv\")\n",
|
||||
"env.python.conda_dependencies.add_pip_package(\"azureml-core==1.20.0\")\n",
|
||||
"\n",
|
||||
"step_1 = SynapseSparkStep(name = 'synapse-spark',\n",
|
||||
" file = 'dataprep.py',\n",
|
||||
" source_directory=\"./code\", \n",
|
||||
" inputs=[step1_input1, step1_input2],\n",
|
||||
" outputs=[step1_output],\n",
|
||||
" arguments = [\"--tabular_input\", step1_input1, \n",
|
||||
" \"--file_input\", step1_input2,\n",
|
||||
" \"--output_dir\", step1_output],\n",
|
||||
" compute_target = synapse_compute_name,\n",
|
||||
" driver_memory = \"7g\",\n",
|
||||
" driver_cores = 4,\n",
|
||||
" executor_memory = \"7g\",\n",
|
||||
" executor_cores = 2,\n",
|
||||
" num_executors = 1,\n",
|
||||
" environment = env)\n",
|
||||
"\n",
|
||||
"step_2 = PythonScriptStep(script_name=\"train.py\",\n",
|
||||
" arguments=[step2_input],\n",
|
||||
" inputs=[step2_input],\n",
|
||||
" compute_target=cpu_cluster_name,\n",
|
||||
" source_directory=\"./code\",\n",
|
||||
" allow_reuse=False)\n",
|
||||
"\n",
|
||||
"pipeline = Pipeline(workspace=ws, steps=[step_1, step_2])\n",
|
||||
"pipeline_run = pipeline.submit('synapse-pipeline', regenerate_outputs=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "yunzhan"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.7"
|
||||
},
|
||||
"nteract": {
|
||||
"version": "0.28.0"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -0,0 +1,327 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Interactive Spark Session on Synapse Spark Pool"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Install package"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install -U \"azureml-synapse\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For JupyterLab, please additionally run:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!jupyter lab build --minimize=False"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## PLEASE restart kernel and then refresh web page before starting spark session."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 0. How to leverage Spark Magic for interactive Spark experience"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2020-06-05T03:22:14.965395Z",
|
||||
"iopub.status.busy": "2020-06-05T03:22:14.965395Z",
|
||||
"iopub.status.idle": "2020-06-05T03:22:14.970398Z",
|
||||
"shell.execute_reply": "2020-06-05T03:22:14.969397Z",
|
||||
"shell.execute_reply.started": "2020-06-05T03:22:14.965395Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# show help\n",
|
||||
"%synapse ?"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 1. Start Synapse Session"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"synapse_compute_name=os.getenv(\"SYNAPSE_COMPUTE_NAME\", \"<my-synapse-compute-name>\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# use Synapse compute linked to the Compute Instance's workspace with an aml envrionment.\n",
|
||||
"# conda dependencies specified in the environment will be installed before the spark session started.\n",
|
||||
"\n",
|
||||
"%synapse start -c $synapse_compute_name -e AzureML-Minimal"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# use Synapse compute from anther workspace via its config file\n",
|
||||
"\n",
|
||||
"# %synapse start -c <compute-name> -f config.json"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# use Synapse compute from anther workspace via subscription_id, resource_group and workspace_name\n",
|
||||
"\n",
|
||||
"# %synapse start -c <compute-name> -s <subscription-id> -r <resource group> -w <workspace-name>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# start a spark session with an AML environment, \n",
|
||||
"# %synapse start -c <compute-name> -s <subscription-id> -r <resource group> -w <workspace-name> -e AzureML-Minimal"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 2. Data prepration\n",
|
||||
"\n",
|
||||
"Three types of datastore are supported in synapse spark, and you have two ways to load the data.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"| Datastore Type | Data Acess |\n",
|
||||
"|--------------------|-------------------------------|\n",
|
||||
"| Blob | Credential |\n",
|
||||
"| Adlsgen1 | Credential & Credential-less |\n",
|
||||
"| Adlsgen2 | Credential & Credential-less |"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Example 1: Data loading by HDFS path"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Read data from Blob**\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"# setup access key or sas token\n",
|
||||
"\n",
|
||||
"sc._jsc.hadoopConfiguration().set(\"fs.azure.account.key.<storage account name>.blob.core.windows.net\", \"<acess key>\")\n",
|
||||
"sc._jsc.hadoopConfiguration().set(\"fs.azure.sas.<container name>.<storage account name>.blob.core.windows.net\", \"sas token\")\n",
|
||||
"\n",
|
||||
"df = spark.read.parquet(\"wasbs://<container name>@<storage account name>.blob.core.windows.net/<path>\")\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"**Read data from Adlsgen1**\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"# setup service pricinpal which has access of the data\n",
|
||||
"# If no data Credential is setup, the user identity will be used to do access control\n",
|
||||
"\n",
|
||||
"sc._jsc.hadoopConfiguration().set(\"fs.adl.account.<storage account name>.oauth2.access.token.provider.type\",\"ClientCredential\")\n",
|
||||
"sc._jsc.hadoopConfiguration().set(\"fs.adl.account.<storage account name>.oauth2.client.id\", \"<client id>\")\n",
|
||||
"sc._jsc.hadoopConfiguration().set(\"fs.adl.account.<storage account name>.oauth2.credential\", \"<client secret>\")\n",
|
||||
"sc._jsc.hadoopConfiguration().set(\"fs.adl.account.<storage account name>.oauth2.refresh.url\", \"https://login.microsoftonline.com/<tenant id>/oauth2/token\")\n",
|
||||
"\n",
|
||||
"df = spark.read.csv(\"adl://<storage account name>.azuredatalakestore.net/<path>\")\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"**Read data from Adlsgen2**\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"# setup service pricinpal which has access of the data\n",
|
||||
"# If no data Credential is setup, the user identity will be used to do access control\n",
|
||||
"\n",
|
||||
"sc._jsc.hadoopConfiguration().set(\"fs.azure.account.auth.type.<storage account name>.dfs.core.windows.net\",\"OAuth\")\n",
|
||||
"sc._jsc.hadoopConfiguration().set(\"fs.azure.account.oauth.provider.type.<storage account name>.dfs.core.windows.net\", \"org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider\")\n",
|
||||
"sc._jsc.hadoopConfiguration().set(\"fs.azure.account.oauth2.client.id.<storage account name>.dfs.core.windows.net\", \"<client id>\")\n",
|
||||
"sc._jsc.hadoopConfiguration().set(\"fs.azure.account.oauth2.client.secret.<storage account name>.dfs.core.windows.net\", \"<client secret>\")\n",
|
||||
"sc._jsc.hadoopConfiguration().set(\"fs.azure.account.oauth2.client.endpoint.<storage account name>.dfs.core.windows.net\", \"https://login.microsoftonline.com/<tenant id>/oauth2/token\")\n",
|
||||
"\n",
|
||||
"df = spark.read.csv(\"abfss://<container name>@<storage account>.dfs.core.windows.net/<path>\")\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2020-06-04T08:11:18.812276Z",
|
||||
"iopub.status.busy": "2020-06-04T08:11:18.812276Z",
|
||||
"iopub.status.idle": "2020-06-04T08:11:23.854526Z",
|
||||
"shell.execute_reply": "2020-06-04T08:11:23.853525Z",
|
||||
"shell.execute_reply.started": "2020-06-04T08:11:18.812276Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%synapse\n",
|
||||
"\n",
|
||||
"from pyspark.sql.functions import col, desc\n",
|
||||
"\n",
|
||||
"df = spark.read.option(\"header\", \"true\").csv(\"wasbs://demo@dprepdata.blob.core.windows.net/Titanic.csv\")\n",
|
||||
"df.filter(col('Survived') == 1).groupBy('Age').count().orderBy(desc('count')).show(10)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Example 2: Data loading by AML Dataset\n",
|
||||
"\n",
|
||||
"You can create tabular data by following the [guidance](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-register-datasets) and use to_spark_dataframe() to load the data.\n",
|
||||
"\n",
|
||||
"```text\n",
|
||||
"%%synapse\n",
|
||||
"\n",
|
||||
"import azureml.core\n",
|
||||
"print(azureml.core.VERSION)\n",
|
||||
"\n",
|
||||
"from azureml.core import Workspace, Dataset\n",
|
||||
"ws = Workspace.get(name='<workspace name>', subscription_id='<subscription id>', resource_group='<resource group>')\n",
|
||||
"ds = Dataset.get_by_name(ws, \"<tabular dataset name>\")\n",
|
||||
"df = ds.to_spark_dataframe()\n",
|
||||
"\n",
|
||||
"# You can do more data transformation on spark dataframe\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 3. Session Metadata\n",
|
||||
"After session started, you can check the session's metadata, find the links to Synapse portal."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%synapse meta"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 4. Stop Session\n",
|
||||
"When current session reach the status timeout, dead or any failure, you must explicitly stop it before start new one. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%synapse stop"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "yunzhan"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.7"
|
||||
},
|
||||
"nteract": {
|
||||
"version": "0.28.0"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -77,7 +77,7 @@
|
||||
"source": [
|
||||
"## Create trained model\n",
|
||||
"\n",
|
||||
"For this example, we will train a small model on scikit-learn's [diabetes dataset](https://scikit-learn.org/stable/datasets/index.html#diabetes-dataset). "
|
||||
"For this example, we will train a small model on scikit-learn's [diabetes dataset](https://scikit-learn.org/stable/datasets/toy_dataset.html#diabetes-dataset). "
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -382,13 +382,111 @@
|
||||
"source": [
|
||||
"## Update Service\n",
|
||||
"\n",
|
||||
"If you want to change your model(s), Conda dependencies, or deployment configuration, call `update()` to rebuild the Docker image.\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"local_service.update(models=[SomeOtherModelObject],\n",
|
||||
"If you want to change your model(s), Conda dependencies or deployment configuration, call `update()` to rebuild the Docker image.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"local_service.update(models=[model],\n",
|
||||
" inference_config=inference_config,\n",
|
||||
" deployment_config=local_config)\n",
|
||||
"```"
|
||||
" deployment_config=deployment_config)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Deploy model to AKS cluster based on the LocalWebservice's configuration."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# This is a one time setup for AKS Cluster. You can reuse this cluster for multiple deployments after it has been created. If you delete the cluster or the resource group that contains it, then you would have to recreate it.\n",
|
||||
"from azureml.core.compute import AksCompute, ComputeTarget\n",
|
||||
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||
"\n",
|
||||
"# Choose a name for your AKS cluster\n",
|
||||
"aks_name = 'my-aks-9' \n",
|
||||
"\n",
|
||||
"# Verify the cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" aks_target = ComputeTarget(workspace=ws, name=aks_name)\n",
|
||||
" print('Found existing cluster, use it.')\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" # Use the default configuration (can also provide parameters to customize)\n",
|
||||
" prov_config = AksCompute.provisioning_configuration()\n",
|
||||
"\n",
|
||||
" # Create the cluster\n",
|
||||
" aks_target = ComputeTarget.create(workspace = ws, \n",
|
||||
" name = aks_name, \n",
|
||||
" provisioning_configuration = prov_config)\n",
|
||||
"\n",
|
||||
"if aks_target.get_status() != \"Succeeded\":\n",
|
||||
" aks_target.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.webservice import AksWebservice\n",
|
||||
"# Set the web service configuration (using default here)\n",
|
||||
"aks_config = AksWebservice.deploy_configuration()\n",
|
||||
"\n",
|
||||
"# # Enable token auth and disable (key) auth on the webservice\n",
|
||||
"# aks_config = AksWebservice.deploy_configuration(token_auth_enabled=True, auth_enabled=False)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"aks_service_name ='aks-service-1'\n",
|
||||
"\n",
|
||||
"aks_service = local_service.deploy_to_cloud(name=aks_service_name,\n",
|
||||
" deployment_config=aks_config,\n",
|
||||
" deployment_target=aks_target)\n",
|
||||
"\n",
|
||||
"aks_service.wait_for_deployment(show_output = True)\n",
|
||||
"print(aks_service.state)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Test aks service\n",
|
||||
"\n",
|
||||
"sample_input = json.dumps({\n",
|
||||
" 'data': dataset_x[0:2].tolist()\n",
|
||||
"})\n",
|
||||
"\n",
|
||||
"aks_service.run(sample_input)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Delete the service if not needed.\n",
|
||||
"aks_service.delete()"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -157,7 +157,9 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Provision the AKS Cluster\n",
|
||||
"If you already have an AKS cluster attached to this workspace, skip the step below and provide the name of the cluster."
|
||||
"If you already have an AKS cluster attached to this workspace, skip the step below and provide the name of the cluster.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -267,7 +267,9 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create AKS compute if you haven't done so."
|
||||
"### Create AKS compute if you haven't done so.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -94,6 +94,17 @@ def main():
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
|
||||
# Use Azure Open Datasets for MNIST dataset
|
||||
datasets.MNIST.resources = [
|
||||
("https://azureopendatastorage.azurefd.net/mnist/train-images-idx3-ubyte.gz",
|
||||
"f68b3c2dcbeaaa9fbdd348bbdeb94873"),
|
||||
("https://azureopendatastorage.azurefd.net/mnist/train-labels-idx1-ubyte.gz",
|
||||
"d53e105ee54ea40749a09fcbcd1e9432"),
|
||||
("https://azureopendatastorage.azurefd.net/mnist/t10k-images-idx3-ubyte.gz",
|
||||
"9fb629c4189551a2d022fa330f9573f3"),
|
||||
("https://azureopendatastorage.azurefd.net/mnist/t10k-labels-idx1-ubyte.gz",
|
||||
"ec29112dd5afa0611ce80d1b7f02629c")
|
||||
]
|
||||
train_loader = torch.utils.data.DataLoader(
|
||||
datasets.MNIST('data', train=True, download=True,
|
||||
transform=transforms.Compose([transforms.ToTensor(),
|
||||
|
||||
@@ -70,16 +70,16 @@
|
||||
"\n",
|
||||
"import urllib.request\n",
|
||||
"\n",
|
||||
"onnx_model_url = \"https://www.cntk.ai/OnnxModels/emotion_ferplus/opset_7/emotion_ferplus.tar.gz\"\n",
|
||||
"onnx_model_url = \"https://github.com/onnx/models/blob/master/vision/body_analysis/emotion_ferplus/model/emotion-ferplus-7.tar.gz?raw=true\"\n",
|
||||
"\n",
|
||||
"urllib.request.urlretrieve(onnx_model_url, filename=\"emotion_ferplus.tar.gz\")\n",
|
||||
"urllib.request.urlretrieve(onnx_model_url, filename=\"emotion-ferplus-7.tar.gz\")\n",
|
||||
"\n",
|
||||
"# the ! magic command tells our jupyter notebook kernel to run the following line of \n",
|
||||
"# code from the command line instead of the notebook kernel\n",
|
||||
"\n",
|
||||
"# We use tar and xvcf to unzip the files we just retrieved from the ONNX model zoo\n",
|
||||
"\n",
|
||||
"!tar xvzf emotion_ferplus.tar.gz"
|
||||
"!tar xvzf emotion-ferplus-7.tar.gz"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -570,7 +570,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"plt.figure(figsize = (16, 6), frameon=False)\n",
|
||||
"plt.figure(figsize = (16, 6))\n",
|
||||
"plt.subplot(1, 8, 1)\n",
|
||||
"\n",
|
||||
"plt.text(x = 0, y = -30, s = \"True Label: \", fontsize = 13, color = 'black')\n",
|
||||
|
||||
@@ -70,9 +70,9 @@
|
||||
"\n",
|
||||
"import urllib.request\n",
|
||||
"\n",
|
||||
"onnx_model_url = \"https://www.cntk.ai/OnnxModels/mnist/opset_7/mnist.tar.gz\"\n",
|
||||
"onnx_model_url = \"https://github.com/onnx/models/blob/master/vision/classification/mnist/model/mnist-7.tar.gz?raw=true\"\n",
|
||||
"\n",
|
||||
"urllib.request.urlretrieve(onnx_model_url, filename=\"mnist.tar.gz\")"
|
||||
"urllib.request.urlretrieve(onnx_model_url, filename=\"mnist-7.tar.gz\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -86,7 +86,7 @@
|
||||
"\n",
|
||||
"# We use tar and xvcf to unzip the files we just retrieved from the ONNX model zoo\n",
|
||||
"\n",
|
||||
"!tar xvzf mnist.tar.gz"
|
||||
"!tar xvzf mnist-7.tar.gz"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -521,7 +521,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"plt.figure(figsize = (16, 6), frameon=False)\n",
|
||||
"plt.figure(figsize = (16, 6))\n",
|
||||
"plt.subplot(1, 8, 1)\n",
|
||||
"\n",
|
||||
"plt.text(x = 0, y = -30, s = \"True Label: \", fontsize = 13, color = 'black')\n",
|
||||
@@ -684,18 +684,7 @@
|
||||
"\n",
|
||||
"A convolution layer is a set of filters. Each filter is defined by a weight (**W**) matrix, and bias ($b$).\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"These filters are scanned across the image performing the dot product between the weights and corresponding input value ($x$). The bias value is added to the output of the dot product and the resulting sum is optionally mapped through an activation function. This process is illustrated in the following animation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"Image(url=\"https://www.cntk.ai/jup/cntk103d_conv2d_final.gif\", width= 200)"
|
||||
"These filters are scanned across the image performing the dot product between the weights and corresponding input value ($x$). The bias value is added to the output of the dot product and the resulting sum is optionally mapped through an activation function."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -707,24 +696,6 @@
|
||||
"The MNIST model from the ONNX Model Zoo uses maxpooling to update the weights in its convolutions, summarized by the graphic below. You can see the entire workflow of our pre-trained model in the following image, with our input images and our output probabilities of each of our 10 labels. If you're interested in exploring the logic behind creating a Deep Learning model further, please look at the [training tutorial for our ONNX MNIST Convolutional Neural Network](https://github.com/Microsoft/CNTK/blob/master/Tutorials/CNTK_103D_MNIST_ConvolutionalNeuralNetwork.ipynb). "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Max-Pooling for Convolutional Neural Nets\n",
|
||||
"\n",
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Pre-Trained Model Architecture\n",
|
||||
"\n",
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
|
||||
@@ -211,6 +211,8 @@
|
||||
"# Provision the AKS Cluster with SSL\n",
|
||||
"This is a one time setup. You can reuse this cluster for multiple deployments after it has been created. If you delete the cluster or the resource group that contains it, then you would have to recreate it.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||
"\n",
|
||||
"See code snippet below. Check the documentation [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-secure-web-service) for more details"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -325,7 +325,9 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Provision the AKS Cluster\n",
|
||||
"This is a one time setup. You can reuse this cluster for multiple deployments after it has been created. If you delete the cluster or the resource group that contains it, then you would have to recreate it."
|
||||
"This is a one time setup. You can reuse this cluster for multiple deployments after it has been created. If you delete the cluster or the resource group that contains it, then you would have to recreate it.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -203,6 +203,8 @@
|
||||
"source": [
|
||||
"### Provision a compute target\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||
"\n",
|
||||
"You can provision an AmlCompute resource by simply defining two parameters thanks to smart defaults. By default it autoscales from 0 nodes and provisions dedicated VMs to run your job in a container. This is useful when you want to continously re-use the same target, debug it between jobs or simply share the resource with other users of your workspace.\n",
|
||||
"\n",
|
||||
"* `vm_size`: VM family of the nodes provisioned by AmlCompute. Simply choose from the supported_vmsizes() above\n",
|
||||
@@ -255,11 +257,8 @@
|
||||
"# Set compute target to AmlCompute target created in previous step\n",
|
||||
"run_config.target = cpu_cluster.name\n",
|
||||
"\n",
|
||||
"# Enable Docker \n",
|
||||
"run_config.environment.docker.enabled = True\n",
|
||||
"\n",
|
||||
"azureml_pip_packages = [\n",
|
||||
" 'azureml-defaults', 'azureml-contrib-interpret', 'azureml-telemetry', 'azureml-interpret'\n",
|
||||
" 'azureml-defaults', 'azureml-telemetry', 'azureml-interpret'\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"# Note: this is to pin the scikit-learn and pandas versions to be same as notebook.\n",
|
||||
|
||||
@@ -3,9 +3,11 @@ dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
- azureml-interpret
|
||||
- interpret-community[visualization]
|
||||
- flask
|
||||
- flask-cors
|
||||
- gevent>=1.3.6
|
||||
- jinja2
|
||||
- ipython
|
||||
- matplotlib
|
||||
- azureml-contrib-interpret
|
||||
- sklearn-pandas<2.0.0
|
||||
- azureml-dataset-runtime
|
||||
- ipywidgets
|
||||
|
||||
@@ -57,7 +57,7 @@
|
||||
"Problem: IBM employee attrition classification with scikit-learn (run model explainer locally and upload explanation to the Azure Machine Learning Run History)\n",
|
||||
"\n",
|
||||
"1. Train a SVM classification model using Scikit-learn\n",
|
||||
"2. Run 'explain_model' with AML Run History, which leverages run history service to store and manage the explanation data\n",
|
||||
"2. Run 'explain-model-sample' with AML Run History, which leverages run history service to store and manage the explanation data\n",
|
||||
"---\n",
|
||||
"\n",
|
||||
"Setup: If you are using Jupyter notebooks, the extensions should be installed automatically with the package.\n",
|
||||
@@ -226,36 +226,6 @@
|
||||
" ('classifier', SVC(C=1.0, probability=True))])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"'''\n",
|
||||
"# Uncomment below if sklearn-pandas is not installed\n",
|
||||
"#!pip install sklearn-pandas\n",
|
||||
"from sklearn_pandas import DataFrameMapper\n",
|
||||
"\n",
|
||||
"# Impute, standardize the numeric features and one-hot encode the categorical features. \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"numeric_transformations = [([f], Pipeline(steps=[('imputer', SimpleImputer(strategy='median')), ('scaler', StandardScaler())])) for f in numerical]\n",
|
||||
"\n",
|
||||
"categorical_transformations = [([f], OneHotEncoder(handle_unknown='ignore', sparse=False)) for f in categorical]\n",
|
||||
"\n",
|
||||
"transformations = numeric_transformations + categorical_transformations\n",
|
||||
"\n",
|
||||
"# Append classifier to preprocessing pipeline.\n",
|
||||
"# Now we have a full prediction pipeline.\n",
|
||||
"clf = Pipeline(steps=[('preprocessor', transformations),\n",
|
||||
" ('classifier', SVC(C=1.0, probability=True))]) \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"'''"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -475,7 +445,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"experiment_name = 'explain_model'\n",
|
||||
"experiment_name = 'explain-model-sample'\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"run = experiment.start_logging()\n",
|
||||
"client = ExplanationClient.from_run(run)"
|
||||
|
||||
@@ -3,7 +3,10 @@ dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
- azureml-interpret
|
||||
- interpret-community[visualization]
|
||||
- flask
|
||||
- flask-cors
|
||||
- gevent>=1.3.6
|
||||
- jinja2
|
||||
- ipython
|
||||
- matplotlib
|
||||
- azureml-contrib-interpret
|
||||
- ipywidgets
|
||||
|
||||
@@ -166,12 +166,12 @@
|
||||
"source": [
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
"import joblib\n",
|
||||
"from sklearn.compose import ColumnTransformer\n",
|
||||
"from sklearn.preprocessing import StandardScaler, OneHotEncoder\n",
|
||||
"from sklearn.impute import SimpleImputer\n",
|
||||
"from sklearn.pipeline import Pipeline\n",
|
||||
"from sklearn.linear_model import LogisticRegression\n",
|
||||
"from sklearn.ensemble import RandomForestClassifier\n",
|
||||
"from sklearn_pandas import DataFrameMapper\n",
|
||||
"\n",
|
||||
"from interpret.ext.blackbox import TabularExplainer\n",
|
||||
"\n",
|
||||
@@ -201,17 +201,23 @@
|
||||
"# Store the numerical columns in a list numerical\n",
|
||||
"numerical = attritionXData.columns.difference(categorical)\n",
|
||||
"\n",
|
||||
"numeric_transformations = [([f], Pipeline(steps=[\n",
|
||||
"# We create the preprocessing pipelines for both numeric and categorical data.\n",
|
||||
"numeric_transformer = Pipeline(steps=[\n",
|
||||
" ('imputer', SimpleImputer(strategy='median')),\n",
|
||||
" ('scaler', StandardScaler())])) for f in numerical]\n",
|
||||
" ('scaler', StandardScaler())])\n",
|
||||
"\n",
|
||||
"categorical_transformations = [([f], OneHotEncoder(handle_unknown='ignore', sparse=False)) for f in categorical]\n",
|
||||
"categorical_transformer = Pipeline(steps=[\n",
|
||||
" ('imputer', SimpleImputer(strategy='constant', fill_value='missing')),\n",
|
||||
" ('onehot', OneHotEncoder(handle_unknown='ignore'))])\n",
|
||||
"\n",
|
||||
"transformations = numeric_transformations + categorical_transformations\n",
|
||||
"transformations = ColumnTransformer(\n",
|
||||
" transformers=[\n",
|
||||
" ('num', numeric_transformer, numerical),\n",
|
||||
" ('cat', categorical_transformer, categorical)])\n",
|
||||
"\n",
|
||||
"# Append classifier to preprocessing pipeline.\n",
|
||||
"# Now we have a full prediction pipeline.\n",
|
||||
"clf = Pipeline(steps=[('preprocessor', DataFrameMapper(transformations)),\n",
|
||||
"clf = Pipeline(steps=[('preprocessor', transformations),\n",
|
||||
" ('classifier', RandomForestClassifier())])\n",
|
||||
"\n",
|
||||
"# Split data into train and test\n",
|
||||
@@ -323,7 +329,7 @@
|
||||
"\n",
|
||||
"# azureml-defaults is required to host the model as a web service.\n",
|
||||
"azureml_pip_packages = [\n",
|
||||
" 'azureml-defaults', 'azureml-contrib-interpret', 'azureml-core', 'azureml-telemetry',\n",
|
||||
" 'azureml-defaults', 'azureml-core', 'azureml-telemetry',\n",
|
||||
" 'azureml-interpret'\n",
|
||||
"]\n",
|
||||
" \n",
|
||||
@@ -350,7 +356,7 @@
|
||||
"# the submitted job is run in. Note the remote environment(s) needs to be similar to the local\n",
|
||||
"# environment, otherwise if a model is trained or deployed in a different environment this can\n",
|
||||
"# cause errors. Please take extra care when specifying your dependencies in a production environment.\n",
|
||||
"myenv = CondaDependencies.create(pip_packages=['sklearn-pandas', 'pyyaml', sklearn_dep, pandas_dep] + azureml_pip_packages,\n",
|
||||
"myenv = CondaDependencies.create(pip_packages=['pyyaml', sklearn_dep, pandas_dep] + azureml_pip_packages,\n",
|
||||
" pin_sdk_version=False)\n",
|
||||
"\n",
|
||||
"with open(\"myenv.yml\",\"w\") as f:\n",
|
||||
@@ -382,6 +388,7 @@
|
||||
"from azureml.core.webservice import AciWebservice\n",
|
||||
"from azureml.core.model import Model\n",
|
||||
"from azureml.core.environment import Environment\n",
|
||||
"from azureml.exceptions import WebserviceException\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"aciconfig = AciWebservice.deploy_configuration(cpu_cores=1, \n",
|
||||
@@ -395,7 +402,12 @@
|
||||
"\n",
|
||||
"# Use configs and models generated above\n",
|
||||
"service = Model.deploy(ws, 'model-scoring-deploy-local', [scoring_explainer_model, original_model], inference_config, aciconfig)\n",
|
||||
"service.wait_for_deployment(show_output=True)"
|
||||
"try:\n",
|
||||
" service.wait_for_deployment(show_output=True)\n",
|
||||
"except WebserviceException as e:\n",
|
||||
" print(e.message)\n",
|
||||
" print(service.get_logs())\n",
|
||||
" raise"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -3,8 +3,10 @@ dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
- azureml-interpret
|
||||
- interpret-community[visualization]
|
||||
- flask
|
||||
- flask-cors
|
||||
- gevent>=1.3.6
|
||||
- jinja2
|
||||
- ipython
|
||||
- matplotlib
|
||||
- azureml-contrib-interpret
|
||||
- sklearn-pandas<2.0.0
|
||||
- ipywidgets
|
||||
|
||||
@@ -204,6 +204,8 @@
|
||||
"source": [
|
||||
"### Provision a compute target\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||
"\n",
|
||||
"You can provision an AmlCompute resource by simply defining two parameters thanks to smart defaults. By default it autoscales from 0 nodes and provisions dedicated VMs to run your job in a container. This is useful when you want to continously re-use the same target, debug it between jobs or simply share the resource with other users of your workspace.\n",
|
||||
"\n",
|
||||
"* `vm_size`: VM family of the nodes provisioned by AmlCompute. Simply choose from the supported_vmsizes() above\n",
|
||||
@@ -257,9 +259,6 @@
|
||||
"# Set compute target to AmlCompute target created in previous step\n",
|
||||
"run_config.target = cpu_cluster.name\n",
|
||||
"\n",
|
||||
"# Enable Docker \n",
|
||||
"run_config.environment.docker.enabled = True\n",
|
||||
"\n",
|
||||
"# Set Docker base image to the default CPU-based image\n",
|
||||
"run_config.environment.docker.base_image = DEFAULT_CPU_IMAGE\n",
|
||||
"\n",
|
||||
@@ -267,7 +266,7 @@
|
||||
"run_config.environment.python.user_managed_dependencies = False\n",
|
||||
"\n",
|
||||
"azureml_pip_packages = [\n",
|
||||
" 'azureml-defaults', 'azureml-contrib-interpret', 'azureml-telemetry', 'azureml-interpret'\n",
|
||||
" 'azureml-defaults', 'azureml-telemetry', 'azureml-interpret'\n",
|
||||
"]\n",
|
||||
" \n",
|
||||
"\n",
|
||||
@@ -294,7 +293,7 @@
|
||||
"# the submitted job is run in. Note the remote environment(s) needs to be similar to the local\n",
|
||||
"# environment, otherwise if a model is trained or deployed in a different environment this can\n",
|
||||
"# cause errors. Please take extra care when specifying your dependencies in a production environment.\n",
|
||||
"azureml_pip_packages.extend(['sklearn-pandas', 'pyyaml', sklearn_dep, pandas_dep])\n",
|
||||
"azureml_pip_packages.extend(['pyyaml', sklearn_dep, pandas_dep])\n",
|
||||
"run_config.environment.python.conda_dependencies = CondaDependencies.create(pip_packages=azureml_pip_packages)\n",
|
||||
"# Now submit a run on AmlCompute\n",
|
||||
"from azureml.core.script_run_config import ScriptRunConfig\n",
|
||||
@@ -431,7 +430,7 @@
|
||||
"\n",
|
||||
"# WARNING: to install this, g++ needs to be available on the Docker image and is not by default (look at the next cell)\n",
|
||||
"azureml_pip_packages = [\n",
|
||||
" 'azureml-defaults', 'azureml-contrib-interpret', 'azureml-core', 'azureml-telemetry',\n",
|
||||
" 'azureml-defaults', 'azureml-core', 'azureml-telemetry',\n",
|
||||
" 'azureml-interpret'\n",
|
||||
"]\n",
|
||||
" \n",
|
||||
@@ -458,7 +457,7 @@
|
||||
"# the submitted job is run in. Note the remote environment(s) needs to be similar to the local\n",
|
||||
"# environment, otherwise if a model is trained or deployed in a different environment this can\n",
|
||||
"# cause errors. Please take extra care when specifying your dependencies in a production environment.\n",
|
||||
"azureml_pip_packages.extend(['sklearn-pandas', 'pyyaml', sklearn_dep, pandas_dep])\n",
|
||||
"azureml_pip_packages.extend(['pyyaml', sklearn_dep, pandas_dep])\n",
|
||||
"myenv = CondaDependencies.create(pip_packages=azureml_pip_packages)\n",
|
||||
"\n",
|
||||
"with open(\"myenv.yml\",\"w\") as f:\n",
|
||||
@@ -489,6 +488,7 @@
|
||||
"from azureml.core.webservice import AciWebservice\n",
|
||||
"from azureml.core.model import Model\n",
|
||||
"from azureml.core.environment import Environment\n",
|
||||
"from azureml.exceptions import WebserviceException\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"aciconfig = AciWebservice.deploy_configuration(cpu_cores=1, \n",
|
||||
@@ -502,7 +502,12 @@
|
||||
"\n",
|
||||
"# Use configs and models generated above\n",
|
||||
"service = Model.deploy(ws, 'model-scoring-service', [scoring_explainer_model, original_model], inference_config, aciconfig)\n",
|
||||
"service.wait_for_deployment(show_output=True)"
|
||||
"try:\n",
|
||||
" service.wait_for_deployment(show_output=True)\n",
|
||||
"except WebserviceException as e:\n",
|
||||
" print(e.message)\n",
|
||||
" print(service.get_logs())\n",
|
||||
" raise"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -3,10 +3,12 @@ dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
- azureml-interpret
|
||||
- interpret-community[visualization]
|
||||
- flask
|
||||
- flask-cors
|
||||
- gevent>=1.3.6
|
||||
- jinja2
|
||||
- ipython
|
||||
- matplotlib
|
||||
- azureml-contrib-interpret
|
||||
- sklearn-pandas<2.0.0
|
||||
- azureml-dataset-runtime
|
||||
- azureml-core
|
||||
- ipywidgets
|
||||
|
||||
@@ -5,13 +5,13 @@
|
||||
import os
|
||||
import pandas as pd
|
||||
import zipfile
|
||||
from sklearn.model_selection import train_test_split
|
||||
import joblib
|
||||
from sklearn.compose import ColumnTransformer
|
||||
from sklearn.model_selection import train_test_split
|
||||
from sklearn.preprocessing import StandardScaler, OneHotEncoder
|
||||
from sklearn.impute import SimpleImputer
|
||||
from sklearn.pipeline import Pipeline
|
||||
from sklearn.linear_model import LogisticRegression
|
||||
from sklearn_pandas import DataFrameMapper
|
||||
|
||||
from azureml.core.run import Run
|
||||
from interpret.ext.blackbox import TabularExplainer
|
||||
@@ -57,16 +57,22 @@ for col, value in attritionXData.iteritems():
|
||||
# store the numerical columns
|
||||
numerical = attritionXData.columns.difference(categorical)
|
||||
|
||||
numeric_transformations = [([f], Pipeline(steps=[
|
||||
# We create the preprocessing pipelines for both numeric and categorical data.
|
||||
numeric_transformer = Pipeline(steps=[
|
||||
('imputer', SimpleImputer(strategy='median')),
|
||||
('scaler', StandardScaler())])) for f in numerical]
|
||||
('scaler', StandardScaler())])
|
||||
|
||||
categorical_transformations = [([f], OneHotEncoder(handle_unknown='ignore', sparse=False)) for f in categorical]
|
||||
categorical_transformer = Pipeline(steps=[
|
||||
('imputer', SimpleImputer(strategy='constant', fill_value='missing')),
|
||||
('onehot', OneHotEncoder(handle_unknown='ignore'))])
|
||||
|
||||
transformations = numeric_transformations + categorical_transformations
|
||||
transformations = ColumnTransformer(
|
||||
transformers=[
|
||||
('num', numeric_transformer, numerical),
|
||||
('cat', categorical_transformer, categorical)])
|
||||
|
||||
# append classifier to preprocessing pipeline
|
||||
clf = Pipeline(steps=[('preprocessor', DataFrameMapper(transformations)),
|
||||
clf = Pipeline(steps=[('preprocessor', transformations),
|
||||
('classifier', LogisticRegression(solver='lbfgs'))])
|
||||
|
||||
# get the run this was submitted from to interact with run history
|
||||
|
||||
@@ -9,7 +9,7 @@ These notebooks below are designed to go in sequence.
|
||||
4. [aml-pipelines-data-transfer.ipynb](https://aka.ms/pl-data-trans): This notebook shows how you transfer data between supported datastores.
|
||||
5. [aml-pipelines-use-databricks-as-compute-target.ipynb](https://aka.ms/pl-databricks): This notebooks shows how you can use Pipelines to send your compute payload to Azure Databricks.
|
||||
6. [aml-pipelines-use-adla-as-compute-target.ipynb](https://aka.ms/pl-adla): This notebook shows how you can use Azure Data Lake Analytics (ADLA) as a compute target.
|
||||
7. [aml-pipelines-how-to-use-estimatorstep.ipynb](https://aka.ms/pl-estimator): This notebook shows how to use the EstimatorStep.
|
||||
7. [aml-pipelines-with-commandstep.ipynb](aml-pipelines-with-commandstep.ipynb): This notebook shows how to use the CommandStep.
|
||||
8. [aml-pipelines-parameter-tuning-with-hyperdrive.ipynb](https://aka.ms/pl-hyperdrive): HyperDriveStep in Pipelines shows how you can do hyper parameter tuning using Pipelines.
|
||||
9. [aml-pipelines-how-to-use-azurebatch-to-run-a-windows-executable.ipynb](https://aka.ms/pl-azbatch): AzureBatchStep can be used to run your custom code in AzureBatch cluster.
|
||||
10. [aml-pipelines-setup-schedule-for-a-published-pipeline.ipynb](https://aka.ms/pl-schedule): Once you publish a Pipeline, you can schedule it to trigger based on an interval or on data change in a defined datastore.
|
||||
@@ -19,5 +19,6 @@ These notebooks below are designed to go in sequence.
|
||||
14. [aml-pipelines-how-to-use-pipeline-drafts.ipynb](http://aka.ms/pl-pl-draft): This notebook shows how to use Pipeline Drafts. Pipeline Drafts are mutable pipelines which can be used to submit runs and create Published Pipelines.
|
||||
15. [aml-pipelines-hot-to-use-modulestep.ipynb](https://aka.ms/pl-modulestep): This notebook shows how to define Module, ModuleVersion and how to use them in an AML Pipeline using ModuleStep.
|
||||
16. [aml-pipelines-with-notebook-runner-step.ipynb](https://aka.ms/pl-nbrstep): This notebook shows how you can run another notebook as a step in Azure Machine Learning Pipeline.
|
||||
17. [aml-pipelines-with-commandstep-r.ipynb](aml-pipelines-with-commandstep-r.ipynb): This notebook shows how to use CommandStep to run R scripts.
|
||||
|
||||

|
||||
|
||||
@@ -22,6 +22,8 @@
|
||||
"# Azure Machine Learning Pipeline with DataTransferStep\n",
|
||||
"This notebook is used to demonstrate the use of DataTransferStep in an Azure Machine Learning Pipeline.\n",
|
||||
"\n",
|
||||
"> **Note:** In Azure Machine Learning, you can write output data directly to Azure Blob Storage, Azure Data Lake Storage Gen 1, Azure Data Lake Storage Gen 2, Azure FileShare without going through extra DataTransferStep. Learn how to use [OutputFileDatasetConfig](https://docs.microsoft.com/python/api/azureml-core/azureml.data.output_dataset_config.outputfiledatasetconfig?view=azure-ml-py) to achieve that with sample notebooks [here](https://aka.ms/pipeline-with-dataset).**\n",
|
||||
"\n",
|
||||
"In certain cases, you will need to transfer data from one data location to another. For example, your data may be in Azure SQL Database and you may want to move it to Azure Data Lake storage. Or, your data is in an ADLS account and you want to make it available in the Blob storage. The built-in **DataTransferStep** class helps you transfer data in these situations.\n",
|
||||
"\n",
|
||||
"The below examples show how to move data between different storage types supported in Azure Machine Learning.\n",
|
||||
|
||||
@@ -209,6 +209,8 @@
|
||||
"#### Retrieve or create a Azure Machine Learning compute\n",
|
||||
"Azure Machine Learning Compute is a service for provisioning and managing clusters of Azure virtual machines for running machine learning workloads. Let's create a new Azure Machine Learning Compute in the current workspace, if it doesn't already exist. We will then run the training script on this compute target.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||
"\n",
|
||||
"If we could not find the compute with the given name in the previous cell, then we will create a new compute here. We will create an Azure Machine Learning Compute containing **STANDARD_D2_V2 CPU VMs**. This process is broken down into the following steps:\n",
|
||||
"\n",
|
||||
"1. Create the configuration\n",
|
||||
|
||||
@@ -341,7 +341,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pipeline = Pipeline(workspace=ws, steps=[step])\n",
|
||||
"pipeline_run = Experiment(ws, 'azurebatch_experiment').submit(pipeline)"
|
||||
"pipeline_run = Experiment(ws, 'azurebatch_sample').submit(pipeline)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -55,7 +55,9 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Compute Target\n",
|
||||
"Retrieve an already attached Azure Machine Learning Compute to use in the Pipeline."
|
||||
"Retrieve an already attached Azure Machine Learning Compute to use in the Pipeline.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -130,7 +132,7 @@
|
||||
"\n",
|
||||
"pipeline_draft = PipelineDraft.create(ws, name=\"TestPipelineDraft\",\n",
|
||||
" description=\"draft description\",\n",
|
||||
" experiment_name=\"helloworld\",\n",
|
||||
" experiment_name=\"pipeline_draft_sample\",\n",
|
||||
" pipeline=pipeline,\n",
|
||||
" continue_on_step_failure=True,\n",
|
||||
" tags={'dev': 'true'},\n",
|
||||
|
||||
@@ -42,15 +42,13 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"from azureml.core import Workspace, Experiment, Datastore, Dataset\n",
|
||||
"from azureml.core import Workspace, Environment, Experiment, Datastore, Dataset, ScriptRunConfig\n",
|
||||
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||
"from azureml.core.runconfig import RunConfiguration\n",
|
||||
"from azureml.exceptions import ComputeTargetException\n",
|
||||
"from azureml.pipeline.steps import HyperDriveStep, HyperDriveStepRun, PythonScriptStep\n",
|
||||
"from azureml.pipeline.core import Pipeline, PipelineData, TrainingOutput\n",
|
||||
"from azureml.train.dnn import TensorFlow\n",
|
||||
"# from azureml.train.hyperdrive import *\n",
|
||||
"from azureml.train.hyperdrive import RandomParameterSampling, BanditPolicy, HyperDriveConfig, PrimaryMetricGoal\n",
|
||||
"from azureml.train.hyperdrive import choice, loguniform\n",
|
||||
"\n",
|
||||
@@ -121,12 +119,17 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"os.makedirs('./data/mnist', exist_ok=True)\n",
|
||||
"data_folder = os.path.join(os.getcwd(), 'data/mnist')\n",
|
||||
"os.makedirs(data_folder, exist_ok=True)\n",
|
||||
"\n",
|
||||
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz', filename = './data/mnist/train-images.gz')\n",
|
||||
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz', filename = './data/mnist/train-labels.gz')\n",
|
||||
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz', filename = './data/mnist/test-images.gz')\n",
|
||||
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz', filename = './data/mnist/test-labels.gz')"
|
||||
"urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/train-images-idx3-ubyte.gz',\n",
|
||||
" filename=os.path.join(data_folder, 'train-images.gz'))\n",
|
||||
"urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/train-labels-idx1-ubyte.gz',\n",
|
||||
" filename=os.path.join(data_folder, 'train-labels.gz'))\n",
|
||||
"urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/t10k-images-idx3-ubyte.gz',\n",
|
||||
" filename=os.path.join(data_folder, 'test-images.gz'))\n",
|
||||
"urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/t10k-labels-idx1-ubyte.gz',\n",
|
||||
" filename=os.path.join(data_folder, 'test-labels.gz'))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -146,11 +149,11 @@
|
||||
"from utils import load_data\n",
|
||||
"\n",
|
||||
"# note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the neural network converge faster.\n",
|
||||
"X_train = load_data('./data/mnist/train-images.gz', False) / 255.0\n",
|
||||
"y_train = load_data('./data/mnist/train-labels.gz', True).reshape(-1)\n",
|
||||
"X_train = load_data(os.path.join(data_folder, 'train-images.gz'), False) / np.float32(255.0)\n",
|
||||
"X_test = load_data(os.path.join(data_folder, 'test-images.gz'), False) / np.float32(255.0)\n",
|
||||
"y_train = load_data(os.path.join(data_folder, 'train-labels.gz'), True).reshape(-1)\n",
|
||||
"y_test = load_data(os.path.join(data_folder, 'test-labels.gz'), True).reshape(-1)\n",
|
||||
"\n",
|
||||
"X_test = load_data('./data/mnist/test-images.gz', False) / 255.0\n",
|
||||
"y_test = load_data('./data/mnist/test-labels.gz', True).reshape(-1)\n",
|
||||
"\n",
|
||||
"count = 0\n",
|
||||
"sample_size = 30\n",
|
||||
@@ -207,6 +210,8 @@
|
||||
"## Retrieve or create a Azure Machine Learning compute\n",
|
||||
"Azure Machine Learning Compute is a service for provisioning and managing clusters of Azure virtual machines for running machine learning workloads. Let's create a new Azure Machine Learning Compute in the current workspace, if it doesn't already exist. We will then run the training script on this compute target.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||
"\n",
|
||||
"If we could not find the compute with the given name in the previous cell, then we will create a new compute here. This process is broken down into the following steps:\n",
|
||||
"\n",
|
||||
"1. Create the configuration\n",
|
||||
@@ -277,13 +282,8 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create TensorFlow estimator\n",
|
||||
"Next, we construct an [TensorFlow](https://docs.microsoft.com/python/api/azureml-train-core/azureml.train.dnn.tensorflow?view=azure-ml-py) estimator object.\n",
|
||||
"The TensorFlow estimator is providing a simple way of launching a TensorFlow training job on a compute target. It will automatically provide a docker image that has TensorFlow installed -- if additional pip or conda packages are required, their names can be passed in via the `pip_packages` and `conda_packages` arguments and they will be included in the resulting docker.\n",
|
||||
"\n",
|
||||
"The TensorFlow estimator also takes a `framework_version` parameter -- if no version is provided, the estimator will default to the latest version supported by AzureML. Use `TensorFlow.get_supported_versions()` to get a list of all versions supported by your current SDK version or see the [SDK documentation](https://docs.microsoft.com/en-us/python/api/azureml-train-core/azureml.train.dnn?view=azure-ml-py) for the versions supported in the most current release.\n",
|
||||
"\n",
|
||||
"The TensorFlow estimator also takes a `framework_version` parameter -- if no version is provided, the estimator will default to the latest version supported by AzureML. Use `TensorFlow.get_supported_versions()` to get a list of all versions supported by your current SDK version or see the [SDK documentation](https://docs.microsoft.com/en-us/python/api/azureml-train-core/azureml.train.dnn?view=azure-ml-py) for the versions supported in the most current release."
|
||||
"## Retrieve an Environment\n",
|
||||
"In this tutorial, we will use one of Azure ML's curated TensorFlow environments for training. Curated environments are available in your workspace by default. Specifically, we will use the TensorFlow 2.0 GPU curated environment."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -292,12 +292,45 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"est = TensorFlow(source_directory=script_folder, \n",
|
||||
"tf_env = Environment.get(ws, name='AzureML-TensorFlow-2.0-GPU')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Setup an input for the ScriptRunConfig step\n",
|
||||
"You can mount dataset to remote compute."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data_folder = dataset.as_mount()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Configure the training job\n",
|
||||
"Create a ScriptRunConfig object to specify the configuration details of your training job, including your training script, environment to use, and the compute target to run on"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"src = ScriptRunConfig(source_directory=script_folder,\n",
|
||||
" script='tf_mnist.py',\n",
|
||||
" arguments=['--data-folder', data_folder],\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" entry_script='tf_mnist.py', \n",
|
||||
" use_gpu=True,\n",
|
||||
" framework_version='2.0',\n",
|
||||
" pip_packages=['azureml-dataset-runtime[pandas,fuse]'])"
|
||||
" environment=tf_env)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -361,7 +394,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"hd_config = HyperDriveConfig(estimator=est, \n",
|
||||
"hd_config = HyperDriveConfig(run_config=src, \n",
|
||||
" hyperparameter_sampling=ps,\n",
|
||||
" policy=early_termination_policy,\n",
|
||||
" primary_metric_name='validation_acc', \n",
|
||||
@@ -370,25 +403,6 @@
|
||||
" max_concurrent_runs=4)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Add HyperDrive as a step of pipeline\n",
|
||||
"\n",
|
||||
"### Setup an input for the hypderdrive step\n",
|
||||
"You can mount dataset to remote compute."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data_folder = dataset.as_mount()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -397,7 +411,6 @@
|
||||
"HyperDriveStep can be used to run HyperDrive job as a step in pipeline.\n",
|
||||
"- **name:** Name of the step\n",
|
||||
"- **hyperdrive_config:** A HyperDriveConfig that defines the configuration for this HyperDrive run\n",
|
||||
"- **estimator_entry_script_arguments:** List of command-line arguments for estimator entry script\n",
|
||||
"- **inputs:** List of input port bindings\n",
|
||||
"- **outputs:** List of output port bindings\n",
|
||||
"- **metrics_output:** Optional value specifying the location to store HyperDrive run metrics as a JSON file\n",
|
||||
@@ -432,7 +445,6 @@
|
||||
"hd_step = HyperDriveStep(\n",
|
||||
" name=hd_step_name,\n",
|
||||
" hyperdrive_config=hd_config,\n",
|
||||
" estimator_entry_script_arguments=['--data-folder', data_folder],\n",
|
||||
" inputs=[data_folder],\n",
|
||||
" outputs=[metrics_data, saved_model])"
|
||||
]
|
||||
|
||||
@@ -41,14 +41,14 @@
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"from azureml.core import Workspace, Datastore, Experiment, Dataset\n",
|
||||
"from azureml.data import OutputFileDatasetConfig\n",
|
||||
"from azureml.core.compute import AmlCompute\n",
|
||||
"from azureml.core.compute import ComputeTarget\n",
|
||||
"\n",
|
||||
"# Check core SDK version number\n",
|
||||
"print(\"SDK version:\", azureml.core.VERSION)\n",
|
||||
"\n",
|
||||
"from azureml.data.data_reference import DataReference\n",
|
||||
"from azureml.pipeline.core import Pipeline, PipelineData\n",
|
||||
"from azureml.pipeline.core import Pipeline\n",
|
||||
"from azureml.pipeline.steps import PythonScriptStep\n",
|
||||
"from azureml.pipeline.core.graph import PipelineParameter\n",
|
||||
"\n",
|
||||
@@ -68,7 +68,9 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Compute Targets\n",
|
||||
"#### Retrieve an already attached Azure Machine Learning Compute"
|
||||
"#### Retrieve an already attached Azure Machine Learning Compute\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -140,9 +142,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Define intermediate data using PipelineData\n",
|
||||
"processed_data1 = PipelineData(\"processed_data1\",datastore=def_blob_store)\n",
|
||||
"print(\"PipelineData object created\")"
|
||||
"# Define intermediate data using OutputFileDatasetConfig\n",
|
||||
"processed_data1 = OutputFileDatasetConfig(name=\"processed_data1\")\n",
|
||||
"print(\"Output dataset object created\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -170,9 +172,7 @@
|
||||
"\n",
|
||||
"trainStep = PythonScriptStep(\n",
|
||||
" script_name=\"train.py\", \n",
|
||||
" arguments=[\"--input_data\", blob_input_data, \"--output_train\", processed_data1],\n",
|
||||
" inputs=[blob_input_data],\n",
|
||||
" outputs=[processed_data1],\n",
|
||||
" arguments=[\"--input_data\", blob_input_data.as_mount(), \"--output_train\", processed_data1],\n",
|
||||
" compute_target=aml_compute, \n",
|
||||
" source_directory=source_directory\n",
|
||||
")\n",
|
||||
@@ -195,16 +195,14 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# extractStep to use the intermediate data produced by step4\n",
|
||||
"# extractStep to use the intermediate data produced by trainStep\n",
|
||||
"# This step also produces an output processed_data2\n",
|
||||
"processed_data2 = PipelineData(\"processed_data2\", datastore=def_blob_store)\n",
|
||||
"processed_data2 = OutputFileDatasetConfig(name=\"processed_data2\")\n",
|
||||
"source_directory = \"publish_run_extract\"\n",
|
||||
"\n",
|
||||
"extractStep = PythonScriptStep(\n",
|
||||
" script_name=\"extract.py\",\n",
|
||||
" arguments=[\"--input_extract\", processed_data1, \"--output_extract\", processed_data2],\n",
|
||||
" inputs=[processed_data1],\n",
|
||||
" outputs=[processed_data2],\n",
|
||||
" arguments=[\"--input_extract\", processed_data1.as_input(), \"--output_extract\", processed_data2],\n",
|
||||
" compute_target=aml_compute, \n",
|
||||
" source_directory=source_directory)\n",
|
||||
"print(\"extractStep created\")"
|
||||
@@ -256,15 +254,17 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Now define step6 that takes two inputs (both intermediate data), and produce an output\n",
|
||||
"processed_data3 = PipelineData(\"processed_data3\", datastore=def_blob_store)\n",
|
||||
"# Now define compareStep that takes two inputs (both intermediate data), and produce an output\n",
|
||||
"processed_data3 = OutputFileDatasetConfig(name=\"processed_data3\")\n",
|
||||
"\n",
|
||||
"# You can register the output as dataset after job completion\n",
|
||||
"processed_data3 = processed_data3.register_on_complete(\"compare_result\")\n",
|
||||
"\n",
|
||||
"source_directory = \"publish_run_compare\"\n",
|
||||
"\n",
|
||||
"compareStep = PythonScriptStep(\n",
|
||||
" script_name=\"compare.py\",\n",
|
||||
" arguments=[\"--compare_data1\", processed_data1, \"--compare_data2\", processed_data2, \"--output_compare\", processed_data3, \"--pipeline_param\", pipeline_param],\n",
|
||||
" inputs=[processed_data1, processed_data2],\n",
|
||||
" outputs=[processed_data3], \n",
|
||||
" arguments=[\"--compare_data1\", processed_data1.as_input(), \"--compare_data2\", processed_data2.as_input(), \"--output_compare\", processed_data3, \"--pipeline_param\", pipeline_param], \n",
|
||||
" compute_target=aml_compute, \n",
|
||||
" source_directory=source_directory)\n",
|
||||
"print(\"compareStep created\")"
|
||||
@@ -327,7 +327,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# submit a pipeline run\n",
|
||||
"pipeline_run1 = Experiment(ws, 'Pipeline_experiment').submit(pipeline1)\n",
|
||||
"pipeline_run1 = Experiment(ws, 'Pipeline_experiment_sample').submit(pipeline1)\n",
|
||||
"# publish a pipeline from the submitted pipeline run\n",
|
||||
"published_pipeline2 = pipeline_run1.publish_pipeline(name=\"My_New_Pipeline2\", description=\"My Published Pipeline Description\", version=\"0.1\", continue_on_step_failure=True)\n",
|
||||
"published_pipeline2"
|
||||
|
||||
@@ -54,7 +54,9 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Compute Targets\n",
|
||||
"#### Retrieve an already attached Azure Machine Learning Compute"
|
||||
"#### Retrieve an already attached Azure Machine Learning Compute\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -259,7 +261,7 @@
|
||||
"\n",
|
||||
"schedule = Schedule.create(workspace=ws, name=\"My_Schedule\",\n",
|
||||
" pipeline_id=pub_pipeline_id, \n",
|
||||
" experiment_name='Schedule_Run',\n",
|
||||
" experiment_name='Schedule-run-sample',\n",
|
||||
" recurrence=recurrence,\n",
|
||||
" wait_for_provisioning=True,\n",
|
||||
" description=\"Schedule Run\")\n",
|
||||
@@ -445,7 +447,7 @@
|
||||
"\n",
|
||||
"schedule = Schedule.create(workspace=ws, name=\"My_Schedule\",\n",
|
||||
" pipeline_id=pub_pipeline_id, \n",
|
||||
" experiment_name='Schedule_Run',\n",
|
||||
" experiment_name='Schedule-run-sample',\n",
|
||||
" datastore=datastore,\n",
|
||||
" wait_for_provisioning=True,\n",
|
||||
" description=\"Schedule Run\")\n",
|
||||
@@ -516,7 +518,7 @@
|
||||
"\n",
|
||||
"schedule = Schedule.create_for_pipeline_endpoint(workspace=ws, name=\"My_Endpoint_Schedule\",\n",
|
||||
" pipeline_endpoint_id=published_pipeline_endpoint_id,\n",
|
||||
" experiment_name='Schedule_Run',\n",
|
||||
" experiment_name='Schedule-run-sample',\n",
|
||||
" recurrence=recurrence, description=\"Schedule_Run\",\n",
|
||||
" wait_for_provisioning=True)\n",
|
||||
"\n",
|
||||
|
||||
@@ -78,7 +78,9 @@
|
||||
"source": [
|
||||
"#### Initialization, Steps to create a Pipeline\n",
|
||||
"\n",
|
||||
"The best practice is to use separate folders for scripts and its dependent files for each step and specify that folder as the `source_directory` for the step. This helps reduce the size of the snapshot created for the step (only the specific folder is snapshotted). Since changes in any files in the `source_directory` would trigger a re-upload of the snapshot, this helps keep the reuse of the step when there are no changes in the `source_directory` of the step."
|
||||
"The best practice is to use separate folders for scripts and its dependent files for each step and specify that folder as the `source_directory` for the step. This helps reduce the size of the snapshot created for the step (only the specific folder is snapshotted). Since changes in any files in the `source_directory` would trigger a re-upload of the snapshot, this helps keep the reuse of the step when there are no changes in the `source_directory` of the step.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -553,7 +555,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Experiment\n",
|
||||
"pipeline_run = Experiment(ws, name=\"submit_from_endpoint\").submit(pipeline_endpoint_by_name, tags={'endpoint_tag': \"1\"}, pipeline_version=\"0\")"
|
||||
"pipeline_run = Experiment(ws, name=\"submit_endpoint_sample\").submit(pipeline_endpoint_by_name, tags={'endpoint_tag': \"1\"}, pipeline_version=\"0\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -109,7 +109,9 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create or Attach an AmlCompute cluster\n",
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you get the default `AmlCompute` as your training compute resource."
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you get the default `AmlCompute` as your training compute resource.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -111,7 +111,9 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create or Attach an AmlCompute cluster\n",
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you get the default `AmlCompute` as your training compute resource."
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you get the default `AmlCompute` as your training compute resource.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -699,12 +699,162 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"source": [
|
||||
"### 5. Running demo notebook already added to the Databricks workspace using existing cluster\n",
|
||||
"First you need register DBFS datastore and make sure path_on_datastore does exist in databricks file system, you can browser the files by refering [this](https://docs.azuredatabricks.net/user-guide/dbfs-databricks-file-system.html).\n",
|
||||
"\n",
|
||||
"Find existing_cluster_id by opeing Azure Databricks UI with Clusters page and in url you will find a string connected with '-' right after \"clusters/\"."
|
||||
],
|
||||
"cell_type": "markdown",
|
||||
"metadata": {}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"try:\n",
|
||||
" dbfs_ds = Datastore.get(workspace=ws, datastore_name='dbfs_datastore')\n",
|
||||
" print('DBFS Datastore already exists')\n",
|
||||
"except Exception as ex:\n",
|
||||
" dbfs_ds = Datastore.register_dbfs(ws, datastore_name='dbfs_datastore')\n",
|
||||
"\n",
|
||||
"step_1_input = DataReference(datastore=dbfs_ds, path_on_datastore=\"FileStore\", data_reference_name=\"input\")\n",
|
||||
"step_1_output = PipelineData(\"output\", datastore=dbfs_ds)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"dbNbWithExistingClusterStep = DatabricksStep(\n",
|
||||
" name=\"DBFSReferenceWithExisting\",\n",
|
||||
" inputs=[step_1_input],\n",
|
||||
" outputs=[step_1_output],\n",
|
||||
" notebook_path=notebook_path,\n",
|
||||
" notebook_params={'myparam': 'testparam', \n",
|
||||
" 'myparam2': pipeline_param},\n",
|
||||
" run_name='DBFS_Reference_With_Existing',\n",
|
||||
" compute_target=databricks_compute,\n",
|
||||
" existing_cluster_id=\"your existing cluster id\",\n",
|
||||
" allow_reuse=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"source": [
|
||||
"#### Build and submit the Experiment"
|
||||
],
|
||||
"cell_type": "markdown",
|
||||
"metadata": {}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"steps = [dbNbWithExistingClusterStep]\n",
|
||||
"pipeline = Pipeline(workspace=ws, steps=steps)\n",
|
||||
"pipeline_run = Experiment(ws, 'DBFS_Reference_With_Existing').submit(pipeline)\n",
|
||||
"pipeline_run.wait_for_completion()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"source": [
|
||||
"#### View Run Details"
|
||||
],
|
||||
"cell_type": "markdown",
|
||||
"metadata": {}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.widgets import RunDetails\n",
|
||||
"RunDetails(pipeline_run).show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"source": [
|
||||
"### 6. Running a Python script in Databricks that currenlty is in local computer with existing cluster\n",
|
||||
"When you access azure blob or data lake storage from an existing (interactive) cluster, you need to ensure the Spark configuration is set up correctly to access this storage and this set up may require the cluster to be restarted.\n",
|
||||
"\n",
|
||||
"If you set permit_cluster_restart to True, AML will check if the spark configuration needs to be updated and restart the cluster for you if required. This will ensure that the storage can be correctly accessed from the Databricks cluster."
|
||||
],
|
||||
"cell_type": "markdown",
|
||||
"metadata": {}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"step_1_input = DataReference(datastore=def_blob_store, path_on_datastore=\"dbtest\",\n",
|
||||
" data_reference_name=\"input\")\n",
|
||||
"\n",
|
||||
"dbPythonInLocalWithExistingStep = DatabricksStep(\n",
|
||||
" name=\"DBPythonInLocalMachineWithExisting\",\n",
|
||||
" inputs=[step_1_input],\n",
|
||||
" python_script_name=python_script_name,\n",
|
||||
" source_directory=source_directory,\n",
|
||||
" run_name='DB_Python_Local_existing_demo',\n",
|
||||
" compute_target=databricks_compute,\n",
|
||||
" existing_cluster_id=\"your existing cluster id\",\n",
|
||||
" allow_reuse=False,\n",
|
||||
" permit_cluster_restart=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"source": [
|
||||
"#### Build and submit the Experiment"
|
||||
],
|
||||
"cell_type": "markdown",
|
||||
"metadata": {}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"steps = [dbPythonInLocalWithExistingStep]\n",
|
||||
"pipeline = Pipeline(workspace=ws, steps=steps)\n",
|
||||
"pipeline_run = Experiment(ws, 'DB_Python_Local_existing_demo').submit(pipeline)\n",
|
||||
"pipeline_run.wait_for_completion()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"source": [
|
||||
"#### View Run Details"
|
||||
],
|
||||
"cell_type": "markdown",
|
||||
"metadata": {}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.widgets import RunDetails\n",
|
||||
"RunDetails(pipeline_run).show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"source": [
|
||||
"# Next: ADLA as a Compute Target\n",
|
||||
"To use ADLA as a compute target from Azure Machine Learning Pipeline, a AdlaStep is used. This [notebook](https://aka.ms/pl-adla) demonstrates the use of AdlaStep in Azure Machine Learning Pipeline."
|
||||
]
|
||||
],
|
||||
"cell_type": "markdown",
|
||||
"metadata": {}
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -101,7 +101,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create an Azure ML experiment\n",
|
||||
"Let's create an experiment named \"automlstep-classification\" and a folder to hold the training scripts. The script runs will be recorded under the experiment in Azure.\n",
|
||||
"Let's create an experiment named \"automlstep-sample\" and a folder to hold the training scripts. The script runs will be recorded under the experiment in Azure.\n",
|
||||
"\n",
|
||||
"The best practice is to use separate folders for scripts and its dependent files for each step and specify that folder as the `source_directory` for the step. This helps reduce the size of the snapshot created for the step (only the specific folder is snapshotted). Since changes in any files in the `source_directory` would trigger a re-upload of the snapshot, this helps keep the reuse of the step when there are no changes in the `source_directory` of the step."
|
||||
]
|
||||
@@ -113,7 +113,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Choose a name for the run history container in the workspace.\n",
|
||||
"experiment_name = 'automlstep-classification'\n",
|
||||
"experiment_name = 'automlstep-sample'\n",
|
||||
"project_folder = './project'\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
@@ -125,7 +125,9 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create or Attach an AmlCompute cluster\n",
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you get the default `AmlCompute` as your training compute resource."
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you get the default `AmlCompute` as your training compute resource.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -0,0 +1,345 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to use CommandStep in Azure ML Pipelines\n",
|
||||
"\n",
|
||||
"This notebook shows how to use the CommandStep with Azure Machine Learning Pipelines for running R scripts in a pipeline.\n",
|
||||
"\n",
|
||||
"The example shows training a model in R to predict probability of fatality for vehicle crashes.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Prerequisite:\n",
|
||||
"* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning\n",
|
||||
"* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration notebook](https://aka.ms/pl-config) to:\n",
|
||||
" * install the Azure ML SDK\n",
|
||||
" * create a workspace and its configuration file (`config.json`)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's get started. First let's import some Python libraries."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"# check core SDK version number\n",
|
||||
"print(\"Azure ML SDK Version: \", azureml.core.VERSION)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialize workspace\n",
|
||||
"Initialize a [Workspace](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#workspace) object from the existing workspace you created in the Prerequisites step. `Workspace.from_config()` creates a workspace object from the details stored in `config.json`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Workspace\n",
|
||||
"ws = Workspace.from_config()\n",
|
||||
"print('Workspace name: ' + ws.name, \n",
|
||||
" 'Azure region: ' + ws.location, \n",
|
||||
" 'Subscription id: ' + ws.subscription_id, \n",
|
||||
" 'Resource group: ' + ws.resource_group, sep = '\\n')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create or Attach existing AmlCompute\n",
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, you create `AmlCompute` as your training compute resource.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If we could not find the cluster with the given name, then we will create a new cluster here. We will create an `AmlCompute` cluster of `STANDARD_D2_V2` CPU VMs. This process is broken down into 3 steps:\n",
|
||||
"1. create the configuration (this step is local and only takes a second)\n",
|
||||
"2. create the cluster (this step will take about **20 seconds**)\n",
|
||||
"3. provision the VMs to bring the cluster to the initial size (of 1 in this case). This step will take about **3-5 minutes** and is providing only sparse output in the process. Please make sure to wait until the call returns before moving to the next cell"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||
"\n",
|
||||
"# choose a name for your cluster\n",
|
||||
"cluster_name = \"cpu-cluster\"\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=cluster_name)\n",
|
||||
" print('Found existing compute target')\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" print('Creating a new compute target...')\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2', max_nodes=4)\n",
|
||||
"\n",
|
||||
" # create the cluster\n",
|
||||
" compute_target = ComputeTarget.create(ws, cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
" # can poll for a minimum number of nodes and for a specific timeout. \n",
|
||||
" # if no min node count is provided it uses the scale settings for the cluster\n",
|
||||
" compute_target.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)\n",
|
||||
"\n",
|
||||
"# use get_status() to get a detailed status for the current cluster. \n",
|
||||
"print(compute_target.get_status().serialize())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now that you have created the compute target, let's see what the workspace's `compute_targets` property returns. You should now see one entry named 'cpu-cluster' of type `AmlCompute`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create a CommandStep\n",
|
||||
"CommandStep adds a step to run a command in a Pipeline. For the full set of configurable options see the CommandStep [reference docs](https://docs.microsoft.com/python/api/azureml-pipeline-steps/azureml.pipeline.steps.commandstep?view=azure-ml-py).\n",
|
||||
"\n",
|
||||
"- **name:** Name of the step\n",
|
||||
"- **runconfig:** ScriptRunConfig object. You can configure a ScriptRunConfig object as you would for a standalone non-pipeline run and pass it in to this parameter. If using this option, you do not have to specify the `command`, `source_directory`, `compute_target` parameters of the CommandStep constructor as they are already defined in your ScriptRunConfig.\n",
|
||||
"- **runconfig_pipeline_params:** Override runconfig properties at runtime using key-value pairs each with name of the runconfig property and PipelineParameter for that property\n",
|
||||
"- **command:** The command to run or path of the executable/script relative to `source_directory`. It is required unless the `runconfig` parameter is specified. It can be specified with string arguments in a single string or with input/output/PipelineParameter in a list.\n",
|
||||
"- **source_directory:** A folder containing the script and other resources used in the step.\n",
|
||||
"- **compute_target:** Compute target to use \n",
|
||||
"- **allow_reuse:** Whether the step should reuse previous results when run with the same settings/inputs. If this is false, a new run will always be generated for this step during pipeline execution.\n",
|
||||
"- **version:** Optional version tag to denote a change in functionality for the step\n",
|
||||
"\n",
|
||||
"> The best practice is to use separate folders for scripts and its dependent files for each step and specify that folder as the `source_directory` for the step. This helps reduce the size of the snapshot created for the step (only the specific folder is snapshotted). Since changes in any files in the `source_directory` would trigger a re-upload of the snapshot, this helps keep the reuse of the step when there are no changes in the `source_directory` of the step."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Configure environment\n",
|
||||
"\n",
|
||||
"Configure the environment for the train step. In this example we will create an environment from the Dockerfile we have included.\n",
|
||||
"\n",
|
||||
"> Azure ML currently requires Python as an implicit dependency, so Python must installed in your image even if your training script does not have this dependency."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Environment\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"src_dir = 'commandstep_r'\n",
|
||||
"\n",
|
||||
"env = Environment.from_dockerfile(name='r_env', dockerfile=os.path.join(src_dir, 'Dockerfile'))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Configure input training dataset\n",
|
||||
"\n",
|
||||
"This tutorial uses data from the US National Highway Traffic Safety Administration. This dataset includes data from over 25,000 car crashes in the US, with variables you can use to predict the likelihood of a fatality. We have included an Rdata file that includes the accidents data for analysis.\n",
|
||||
"\n",
|
||||
"Here we use the workspace's default datastore to upload the training data file (**accidents.Rd**); in practice you can use any datastore you want."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"datastore = ws.get_default_datastore()\n",
|
||||
"data_ref = datastore.upload_files(files=[os.path.join(src_dir, 'accidents.Rd')], target_path='accidentdata')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now create a FileDataset from the data, which will be used as an input to the train step."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Dataset\n",
|
||||
"dataset = Dataset.File.from_files(datastore.path('accidentdata'))\n",
|
||||
"dataset"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now create a ScriptRunConfig that configures the training run. Note that in the `command` we include the input dataset for the training data.\n",
|
||||
"\n",
|
||||
"> For detailed guidance on how to move data in pipelines for input and output data, see the documentation [Moving data into and between ML pipelines](https://docs.microsoft.com/azure/machine-learning/how-to-move-data-in-out-of-pipelines)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import ScriptRunConfig\n",
|
||||
"\n",
|
||||
"train_config = ScriptRunConfig(source_directory=src_dir,\n",
|
||||
" command=['Rscript accidents.R --data_folder', dataset.as_mount(), '--output_folder outputs'],\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" environment=env)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now create a CommandStep and pass in the ScriptRunConfig object to the `runconfig` parameter."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.pipeline.steps import CommandStep\n",
|
||||
"\n",
|
||||
"train = CommandStep(name='train', runconfig=train_config)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Build and Submit the Pipeline"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.pipeline.core import Pipeline\n",
|
||||
"from azureml.core import Experiment\n",
|
||||
"\n",
|
||||
"pipeline = Pipeline(workspace=ws, steps=[train])\n",
|
||||
"pipeline_run = Experiment(ws, 'r-commandstep-pipeline').submit(pipeline)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## View Run Details"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.widgets import RunDetails\n",
|
||||
"RunDetails(pipeline_run).show()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "minxia"
|
||||
}
|
||||
],
|
||||
"category": "tutorial",
|
||||
"compute": [
|
||||
"AML Compute"
|
||||
],
|
||||
"datasets": [
|
||||
"Custom"
|
||||
],
|
||||
"deployment": [
|
||||
"None"
|
||||
],
|
||||
"exclude_from_index": false,
|
||||
"framework": [
|
||||
"Azure ML"
|
||||
],
|
||||
"friendly_name": "Azure Machine Learning Pipeline with CommandStep for R",
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.7"
|
||||
},
|
||||
"order_index": 7,
|
||||
"star_tag": [
|
||||
"None"
|
||||
],
|
||||
"tags": [
|
||||
"None"
|
||||
],
|
||||
"task": "Demonstrates the use of CommandStep for running R scripts"
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
name: distributed-pytorch-with-nccl-gloo
|
||||
name: aml-pipelines-with-commandstep-r
|
||||
dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
@@ -20,15 +20,15 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to use EstimatorStep in AML Pipeline\n",
|
||||
"# How to use CommandStep in Azure ML Pipelines\n",
|
||||
"\n",
|
||||
"This notebook shows how to use the EstimatorStep with Azure Machine Learning Pipelines. Estimator is a convenient object in Azure Machine Learning that wraps run configuration information to help simplify the tasks of specifying how a script is executed.\n",
|
||||
"This notebook shows how to use the CommandStep with Azure Machine Learning Pipelines for running commands in steps. The example shows running distributed TensorFlow training from within a pipeline.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Prerequisite:\n",
|
||||
"* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning\n",
|
||||
"* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration notebook](https://aka.ms/pl-config) to:\n",
|
||||
" * install the AML SDK\n",
|
||||
" * install the Azure ML SDK\n",
|
||||
" * create a workspace and its configuration file (`config.json`)"
|
||||
]
|
||||
},
|
||||
@@ -77,7 +77,9 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create or Attach existing AmlCompute\n",
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, you create `AmlCompute` as your training compute resource."
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, you create `AmlCompute` as your training compute resource.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -100,75 +102,57 @@
|
||||
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||
"\n",
|
||||
"# choose a name for your cluster\n",
|
||||
"cluster_name = \"amlcomp\"\n",
|
||||
"cluster_name = \"gpu-cluster\"\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" cpu_cluster = ComputeTarget(workspace=ws, name=cluster_name)\n",
|
||||
" gpu_cluster = ComputeTarget(workspace=ws, name=cluster_name)\n",
|
||||
" print('Found existing compute target')\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" print('Creating a new compute target...')\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6', max_nodes=4)\n",
|
||||
"\n",
|
||||
" # create the cluster\n",
|
||||
" cpu_cluster = ComputeTarget.create(ws, cluster_name, compute_config)\n",
|
||||
" gpu_cluster = ComputeTarget.create(ws, cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
" # can poll for a minimum number of nodes and for a specific timeout. \n",
|
||||
" # if no min node count is provided it uses the scale settings for the cluster\n",
|
||||
" cpu_cluster.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)\n",
|
||||
" gpu_cluster.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)\n",
|
||||
"\n",
|
||||
"# use get_status() to get a detailed status for the current cluster. \n",
|
||||
"print(cpu_cluster.get_status().serialize())"
|
||||
"print(gpu_cluster.get_status().serialize())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now that you have created the compute target, let's see what the workspace's `compute_targets` property returns. You should now see one entry named 'cpu-cluster' of type `AmlCompute`."
|
||||
"Now that you have created the compute target, let's see what the workspace's `compute_targets` property returns. You should now see one entry named 'gpu-cluster' of type `AmlCompute`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use a simple script\n",
|
||||
"We have already created a simple \"hello world\" script. This is the script that we will submit through the estimator pattern. It prints a hello-world message, and if Azure ML SDK is installed, it will also logs an array of values ([Fibonacci numbers](https://en.wikipedia.org/wiki/Fibonacci_number))."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Build an Estimator object\n",
|
||||
"Estimator by default will attempt to use Docker-based execution. You can also enable Docker and let estimator pick the default CPU image supplied by Azure ML for execution. You can target an AmlCompute cluster (or any other supported compute target types). You can also customize the conda environment by adding conda and/or pip packages.\n",
|
||||
"## Create a CommandStep\n",
|
||||
"CommandStep adds a step to run a command in a Pipeline. For the full set of configurable options see the CommandStep [reference docs](https://docs.microsoft.com/python/api/azureml-pipeline-steps/azureml.pipeline.steps.commandstep?view=azure-ml-py).\n",
|
||||
"\n",
|
||||
"> Note: The arguments to the entry script used in the Estimator object should be specified as *list* using\n",
|
||||
" 'estimator_entry_script_arguments' parameter when instantiating EstimatorStep. Estimator object's parameter\n",
|
||||
" 'script_params' accepts a dictionary. However 'estimator_entry_script_arguments' parameter expects arguments as\n",
|
||||
" a list.\n",
|
||||
"- **name:** Name of the step\n",
|
||||
"- **runconfig:** ScriptRunConfig object. You can configure a ScriptRunConfig object as you would for a standalone non-pipeline run and pass it in to this parameter. If using this option, you do not have to specify the `command`, `source_directory`, `compute_target` parameters of the CommandStep constructor as they are already defined in your ScriptRunConfig.\n",
|
||||
"- **runconfig_pipeline_params:** Override runconfig properties at runtime using key-value pairs each with name of the runconfig property and PipelineParameter for that property\n",
|
||||
"- **command:** The command to run or path of the executable/script relative to `source_directory`. It is required unless the `runconfig` parameter is specified. It can be specified with string arguments in a single string or with input/output/PipelineParameter in a list.\n",
|
||||
"- **source_directory:** A folder containing the script and other resources used in the step.\n",
|
||||
"- **compute_target:** Compute target to use \n",
|
||||
"- **allow_reuse:** Whether the step should reuse previous results when run with the same settings/inputs. If this is false, a new run will always be generated for this step during pipeline execution.\n",
|
||||
"- **version:** Optional version tag to denote a change in functionality for the step\n",
|
||||
"\n",
|
||||
"> Estimator object initialization involves specifying a list of data input and output.\n",
|
||||
" In Pipelines, a step can take another step's output as input. So when creating an EstimatorStep.\n",
|
||||
" \n",
|
||||
"> The best practice is to use separate folders for scripts and its dependent files for each step and specify that folder as the `source_directory` for the step. This helps reduce the size of the snapshot created for the step (only the specific folder is snapshotted). Since changes in any files in the `source_directory` would trigger a re-upload of the snapshot, this helps keep the reuse of the step when there are no changes in the `source_directory` of the step."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": [
|
||||
"datareference-remarks-sample"
|
||||
]
|
||||
},
|
||||
"outputs": [],
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"from azureml.core import Datastore\n",
|
||||
"\n",
|
||||
"def_blob_store = Datastore(ws, \"workspaceblobstore\")\n",
|
||||
"\n",
|
||||
"#upload input data to workspaceblobstore\n",
|
||||
"def_blob_store.upload_files(files=['20news.pkl'], target_path='20newsgroups', overwrite=True)"
|
||||
"First define the environment that you want to step to run in. This example users a curated TensorFlow environment, but in practice you can configure any environment you want."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -177,46 +161,46 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Dataset\n",
|
||||
"from azureml.data import OutputFileDatasetConfig\n",
|
||||
"from azureml.core import Environment\n",
|
||||
"\n",
|
||||
"# create dataset to be used as the input to estimator step\n",
|
||||
"input_data = Dataset.File.from_files(def_blob_store.path('20newsgroups/20news.pkl'))\n",
|
||||
"\n",
|
||||
"# OutputFileDatasetConfig by default write output to the default workspaceblobstore\n",
|
||||
"output = OutputFileDatasetConfig()\n",
|
||||
"\n",
|
||||
"source_directory = 'estimator_train'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.train.estimator import Estimator\n",
|
||||
"\n",
|
||||
"est = Estimator(source_directory=source_directory, \n",
|
||||
" compute_target=cpu_cluster, \n",
|
||||
" entry_script='dummy_train.py', \n",
|
||||
" conda_packages=['scikit-learn'])"
|
||||
"tf_env = Environment.get(ws, name='AzureML-TensorFlow-2.3-GPU')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create an EstimatorStep\n",
|
||||
"[EstimatorStep](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.estimator_step.estimatorstep?view=azure-ml-py) adds a step to run Estimator in a Pipeline.\n",
|
||||
"This example will first create a ScriptRunConfig object that configures the training job. Since we are running a distributed job, specify the `distributed_job_config` parameter. If you are just running a single-node job, omit that parameter.\n",
|
||||
"\n",
|
||||
"- **name:** Name of the step\n",
|
||||
"- **estimator:** Estimator object\n",
|
||||
"- **estimator_entry_script_arguments:** A list of command-line arguments\n",
|
||||
"- **runconfig_pipeline_params:** Override runconfig properties at runtime using key-value pairs each with name of the runconfig property and PipelineParameter for that property\n",
|
||||
"- **compute_target:** Compute target to use \n",
|
||||
"- **allow_reuse:** Whether the step should reuse previous results when run with the same settings/inputs. If this is false, a new run will always be generated for this step during pipeline execution.\n",
|
||||
"- **version:** Optional version tag to denote a change in functionality for the step"
|
||||
"> If you have an input dataset you want to use in this step, you can specify that as part of the command. For example, if you have a FileDataset object called `dataset` and a `--data-dir` script argument, you can do the following: `command=['python train.py --epochs 30 --data-dir', dataset.as_mount()]`.\n",
|
||||
"\n",
|
||||
"> For detailed guidance on how to move data in pipelines for input and output data, see the documentation [Moving data into and between ML pipelines](https://docs.microsoft.com/azure/machine-learning/how-to-move-data-in-out-of-pipelines)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import ScriptRunConfig\n",
|
||||
"from azureml.core.runconfig import MpiConfiguration\n",
|
||||
"\n",
|
||||
"src_dir = 'commandstep_train'\n",
|
||||
"distr_config = MpiConfiguration(node_count=2) # you can also specify the process_count_per_node parameter for multi-process-per-node training\n",
|
||||
"\n",
|
||||
"src = ScriptRunConfig(source_directory=src_dir,\n",
|
||||
" command=['python train.py --epochs 30'],\n",
|
||||
" compute_target=gpu_cluster,\n",
|
||||
" environment=tf_env,\n",
|
||||
" distributed_job_config=distr_config)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now create a CommandStep and pass in the ScriptRunConfig object to the `runconfig` parameter."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -229,20 +213,16 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.pipeline.steps import EstimatorStep\n",
|
||||
"from azureml.pipeline.steps import CommandStep\n",
|
||||
"\n",
|
||||
"est_step = EstimatorStep(name=\"Estimator_Train\", \n",
|
||||
" estimator=est, \n",
|
||||
" estimator_entry_script_arguments=[\"--datadir\", input_data.as_mount(), \"--output\", output],\n",
|
||||
" runconfig_pipeline_params=None, \n",
|
||||
" compute_target=cpu_cluster)"
|
||||
"train = CommandStep(name='train-mnist', runconfig=src)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Build and Submit the Experiment"
|
||||
"## Build and Submit the Pipeline"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -253,8 +233,9 @@
|
||||
"source": [
|
||||
"from azureml.pipeline.core import Pipeline\n",
|
||||
"from azureml.core import Experiment\n",
|
||||
"pipeline = Pipeline(workspace=ws, steps=[est_step])\n",
|
||||
"pipeline_run = Experiment(ws, 'Estimator_sample').submit(pipeline)"
|
||||
"\n",
|
||||
"pipeline = Pipeline(workspace=ws, steps=[train])\n",
|
||||
"pipeline_run = Experiment(ws, 'train-commandstep-pipeline').submit(pipeline)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -295,7 +276,7 @@
|
||||
"framework": [
|
||||
"Azure ML"
|
||||
],
|
||||
"friendly_name": "Azure Machine Learning Pipeline with EstimatorStep",
|
||||
"friendly_name": "Azure Machine Learning Pipeline with CommandStep",
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
@@ -311,7 +292,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.7"
|
||||
"version": "3.7.7"
|
||||
},
|
||||
"order_index": 7,
|
||||
"star_tag": [
|
||||
@@ -320,7 +301,7 @@
|
||||
"tags": [
|
||||
"None"
|
||||
],
|
||||
"task": "Demonstrates the use of EstimatorStep"
|
||||
"task": "Demonstrates the use of CommandStep"
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
@@ -1,4 +1,4 @@
|
||||
name: day1-part2-hello-world
|
||||
name: aml-pipelines-with-commandstep
|
||||
dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
@@ -134,7 +134,9 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Retrieve or create an Aml compute\n",
|
||||
"Azure Machine Learning Compute is a service for provisioning and managing clusters of Azure virtual machines for running machine learning workloads. Let's get the default Aml Compute in the current workspace. We will then run the training script on this compute target."
|
||||
"Azure Machine Learning Compute is a service for provisioning and managing clusters of Azure virtual machines for running machine learning workloads. Let's get the default Aml Compute in the current workspace. We will then run the training script on this compute target.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -428,7 +430,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pipeline_run1 = Experiment(ws, 'Data_dependency').submit(pipeline1)\n",
|
||||
"pipeline_run1 = Experiment(ws, 'Data_dependency_sample').submit(pipeline1)\n",
|
||||
"print(\"Pipeline is submitted for execution\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -147,7 +147,9 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create or Attach an AmlCompute cluster\n",
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.computetarget?view=azure-ml-py) for your remote run. In this tutorial, you get the default `AmlCompute` as your training compute resource."
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.computetarget?view=azure-ml-py) for your remote run. In this tutorial, you get the default `AmlCompute` as your training compute resource.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
FROM rocker/tidyverse:4.0.0-ubuntu18.04
|
||||
|
||||
# Install python
|
||||
RUN apt-get update -qq && \
|
||||
apt-get install -y python3
|
||||
|
||||
# Create link for python
|
||||
RUN ln -f /usr/bin/python3 /usr/bin/python
|
||||
|
||||
# Install additional R packages
|
||||
RUN R -e "install.packages(c('optparse'), repos = 'https://cloud.r-project.org/')"
|
||||
@@ -0,0 +1,34 @@
|
||||
#' Copyright(c) Microsoft Corporation.
|
||||
#' Licensed under the MIT license.
|
||||
|
||||
library(optparse)
|
||||
|
||||
options <- list(
|
||||
make_option(c("-d", "--data_folder")),
|
||||
make_option(c("--output_folder"))
|
||||
|
||||
)
|
||||
|
||||
opt_parser <- OptionParser(option_list = options)
|
||||
opt <- parse_args(opt_parser)
|
||||
|
||||
paste(opt$data_folder)
|
||||
|
||||
accidents <- readRDS(file.path(opt$data_folder, "accidents.Rd"))
|
||||
summary(accidents)
|
||||
|
||||
mod <- glm(dead ~ dvcat + seatbelt + frontal + sex + ageOFocc + yearVeh + airbag + occRole, family=binomial, data=accidents)
|
||||
summary(mod)
|
||||
predictions <- factor(ifelse(predict(mod)>0.1, "dead","alive"))
|
||||
accuracy <- mean(predictions == accidents$dead)
|
||||
|
||||
# make directory for output dir
|
||||
output_dir = opt$output_folder
|
||||
if (!dir.exists(output_dir)){
|
||||
dir.create(output_dir)
|
||||
}
|
||||
|
||||
# save model
|
||||
model_path = file.path(output_dir, "model.rds")
|
||||
saveRDS(mod, file = model_path)
|
||||
message("Model saved")
|
||||
Binary file not shown.
@@ -0,0 +1,8 @@
|
||||
channels:
|
||||
- conda-forge
|
||||
dependencies:
|
||||
- python=3.7
|
||||
- pip:
|
||||
- azureml-defaults
|
||||
- tensorflow-gpu==2.3.0
|
||||
- horovod==0.19.5
|
||||
@@ -0,0 +1,120 @@
|
||||
# Copyright 2019 Uber Technologies, Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Script adapted from: https://github.com/horovod/horovod/blob/master/examples/tensorflow2_keras_mnist.py
|
||||
# ==============================================================================
|
||||
|
||||
import tensorflow as tf
|
||||
import horovod.tensorflow.keras as hvd
|
||||
|
||||
import os
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--learning-rate", "-lr", type=float, default=0.001)
|
||||
parser.add_argument("--epochs", type=int, default=24)
|
||||
parser.add_argument("--steps-per-epoch", type=int, default=500)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Horovod: initialize Horovod.
|
||||
hvd.init()
|
||||
|
||||
# Horovod: pin GPU to be used to process local rank (one GPU per process)
|
||||
gpus = tf.config.experimental.list_physical_devices("GPU")
|
||||
for gpu in gpus:
|
||||
tf.config.experimental.set_memory_growth(gpu, True)
|
||||
if gpus:
|
||||
tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], "GPU")
|
||||
|
||||
(mnist_images, mnist_labels), _ = tf.keras.datasets.mnist.load_data(
|
||||
path="mnist-%d.npz" % hvd.rank()
|
||||
)
|
||||
|
||||
dataset = tf.data.Dataset.from_tensor_slices(
|
||||
(
|
||||
tf.cast(mnist_images[..., tf.newaxis] / 255.0, tf.float32),
|
||||
tf.cast(mnist_labels, tf.int64),
|
||||
)
|
||||
)
|
||||
dataset = dataset.repeat().shuffle(10000).batch(128)
|
||||
|
||||
mnist_model = tf.keras.Sequential(
|
||||
[
|
||||
tf.keras.layers.Conv2D(32, [3, 3], activation="relu"),
|
||||
tf.keras.layers.Conv2D(64, [3, 3], activation="relu"),
|
||||
tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
|
||||
tf.keras.layers.Dropout(0.25),
|
||||
tf.keras.layers.Flatten(),
|
||||
tf.keras.layers.Dense(128, activation="relu"),
|
||||
tf.keras.layers.Dropout(0.5),
|
||||
tf.keras.layers.Dense(10, activation="softmax"),
|
||||
]
|
||||
)
|
||||
|
||||
# Horovod: adjust learning rate based on number of GPUs.
|
||||
scaled_lr = args.learning_rate * hvd.size()
|
||||
opt = tf.optimizers.Adam(scaled_lr)
|
||||
|
||||
# Horovod: add Horovod DistributedOptimizer.
|
||||
opt = hvd.DistributedOptimizer(opt)
|
||||
|
||||
# Horovod: Specify `experimental_run_tf_function=False` to ensure TensorFlow
|
||||
# uses hvd.DistributedOptimizer() to compute gradients.
|
||||
mnist_model.compile(
|
||||
loss=tf.losses.SparseCategoricalCrossentropy(),
|
||||
optimizer=opt,
|
||||
metrics=["accuracy"],
|
||||
experimental_run_tf_function=False,
|
||||
)
|
||||
|
||||
callbacks = [
|
||||
# Horovod: broadcast initial variable states from rank 0 to all other processes.
|
||||
# This is necessary to ensure consistent initialization of all workers when
|
||||
# training is started with random weights or restored from a checkpoint.
|
||||
hvd.callbacks.BroadcastGlobalVariablesCallback(0),
|
||||
# Horovod: average metrics among workers at the end of every epoch.
|
||||
#
|
||||
# Note: This callback must be in the list before the ReduceLROnPlateau,
|
||||
# TensorBoard or other metrics-based callbacks.
|
||||
hvd.callbacks.MetricAverageCallback(),
|
||||
# Horovod: using `lr = 1.0 * hvd.size()` from the very beginning leads to worse final
|
||||
# accuracy. Scale the learning rate `lr = 1.0` ---> `lr = 1.0 * hvd.size()` during
|
||||
# the first three epochs. See https://arxiv.org/abs/1706.02677 for details.
|
||||
hvd.callbacks.LearningRateWarmupCallback(
|
||||
warmup_epochs=3, initial_lr=scaled_lr, verbose=1
|
||||
),
|
||||
]
|
||||
|
||||
# Horovod: save checkpoints only on worker 0 to prevent other workers from corrupting them.
|
||||
if hvd.rank() == 0:
|
||||
output_dir = "./outputs"
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
callbacks.append(
|
||||
tf.keras.callbacks.ModelCheckpoint(
|
||||
os.path.join(output_dir, "checkpoint-{epoch}.h5")
|
||||
)
|
||||
)
|
||||
|
||||
# Horovod: write logs on worker 0.
|
||||
verbose = 1 if hvd.rank() == 0 else 0
|
||||
|
||||
# Train the model.
|
||||
# Horovod: adjust number of steps based on number of GPUs.
|
||||
mnist_model.fit(
|
||||
dataset,
|
||||
steps_per_epoch=args.steps_per_epoch // hvd.size(),
|
||||
callbacks=callbacks,
|
||||
epochs=args.epochs,
|
||||
verbose=verbose,
|
||||
)
|
||||
@@ -1,30 +0,0 @@
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License.
|
||||
import argparse
|
||||
import os
|
||||
|
||||
print("*********************************************************")
|
||||
print("Hello Azure ML!")
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--datadir', type=str, help="data directory")
|
||||
parser.add_argument('--output', type=str, help="output")
|
||||
args = parser.parse_args()
|
||||
|
||||
print("Argument 1: %s" % args.datadir)
|
||||
print("Argument 2: %s" % args.output)
|
||||
|
||||
if not (args.output is None):
|
||||
os.makedirs(args.output, exist_ok=True)
|
||||
print("%s created" % args.output)
|
||||
|
||||
try:
|
||||
from azureml.core import Run
|
||||
run = Run.get_context()
|
||||
print("Log Fibonacci numbers.")
|
||||
run.log_list('Fibonacci numbers', [0, 1, 1, 2, 3, 5, 8, 13, 21, 34])
|
||||
run.complete()
|
||||
except:
|
||||
print("Warning: you need to install Azure ML SDK in order to log metrics.")
|
||||
|
||||
print("*********************************************************")
|
||||
@@ -22,3 +22,6 @@ print("Argument 4: %s" % args.pipeline_param)
|
||||
if not (args.output_compare is None):
|
||||
os.makedirs(args.output_compare, exist_ok=True)
|
||||
print("%s created" % args.output_compare)
|
||||
|
||||
with open(os.path.join(args.output_compare, 'compare.txt'), 'w') as fw:
|
||||
fw.write('here is the compare result')
|
||||
|
||||
@@ -19,3 +19,8 @@ print("Argument 2: %s" % args.output_extract)
|
||||
if not (args.output_extract is None):
|
||||
os.makedirs(args.output_extract, exist_ok=True)
|
||||
print("%s created" % args.output_extract)
|
||||
|
||||
with open(os.path.join(args.input_extract, '20news.pkl'), 'rb') as f:
|
||||
content = f.read()
|
||||
with open(os.path.join(args.output_extract, '20news.pkl'), 'wb') as fw:
|
||||
fw.write(content)
|
||||
|
||||
@@ -20,3 +20,8 @@ print("Argument 2: %s" % args.output_train)
|
||||
if not (args.output_train is None):
|
||||
os.makedirs(args.output_train, exist_ok=True)
|
||||
print("%s created" % args.output_train)
|
||||
|
||||
with open(os.path.join(args.input_data, '20news.pkl'), 'rb') as f:
|
||||
content = f.read()
|
||||
with open(os.path.join(args.output_train, '20news.pkl'), 'wb') as fw:
|
||||
fw.write(content)
|
||||
|
||||
@@ -225,7 +225,9 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Setup Compute\n",
|
||||
"#### Create new or use an existing compute"
|
||||
"#### Create new or use an existing compute\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -24,9 +24,9 @@
|
||||
"In this notebook, we will demonstrate how to make predictions on large quantities of data asynchronously using the ML pipelines with Azure Machine Learning. Batch inference (or batch scoring) provides cost-effective inference, with unparalleled throughput for asynchronous applications. Batch prediction pipelines can scale to perform inference on terabytes of production data. Batch prediction is optimized for high throughput, fire-and-forget predictions for a large collection of data.\n",
|
||||
"\n",
|
||||
"> **Tip**\n",
|
||||
"If your system requires low-latency processing (to process a single document or small set of documents quickly), use [real-time scoring](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-consume-web-service) instead of batch prediction.\n",
|
||||
"If your system requires low-latency processing (to process a single document or small set of documents quickly), use [real-time scoring](https://docs.microsoft.com/azure/machine-learning/service/how-to-consume-web-service) instead of batch prediction.\n",
|
||||
"\n",
|
||||
"In this example will be take a digit identification model already-trained on MNIST dataset using the [AzureML training with deep learning example notebook](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/training-with-deep-learning/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.ipynb), and run that trained model on some of the MNIST test images in batch. \n",
|
||||
"In this example will be take a digit identification model already-trained on MNIST dataset using the [AzureML training with deep learning example notebook](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/ml-frameworks/keras/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.ipynb), and run that trained model on some of the MNIST test images in batch. \n",
|
||||
"\n",
|
||||
"The input dataset used for this notebook differs from a standard MNIST dataset in that it has been converted to PNG images to demonstrate use of files as inputs to Batch Inference. A sample of PNG-converted images of the MNIST dataset were take from [this repository](https://github.com/myleott/mnist_png). \n",
|
||||
"\n",
|
||||
@@ -86,6 +86,8 @@
|
||||
"### Create or Attach existing compute resource\n",
|
||||
"By using Azure Machine Learning Compute, a managed service, data scientists can train machine learning models on clusters of Azure virtual machines. Examples include VMs with GPU support. In this tutorial, you create Azure Machine Learning Compute as your training environment. The code below creates the compute clusters for you if they don't already exist in your workspace.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||
"\n",
|
||||
"**Creation of compute takes approximately 5 minutes. If the AmlCompute with that name is already in your workspace the code will skip the creation process.**"
|
||||
]
|
||||
},
|
||||
@@ -180,8 +182,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create a FileDataset\n",
|
||||
"A [FileDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.filedataset?view=azure-ml-py) references single or multiple files in your datastores or public urls. The files can be of any format. FileDataset provides you with the ability to download or mount the files to your compute. By creating a dataset, you create a reference to the data source location. If you applied any subsetting transformations to the dataset, they will be stored in the dataset as well. The data remains in its existing location, so no extra storage cost is incurred.",
|
||||
"\n",
|
||||
"A [FileDataset](https://docs.microsoft.com/python/api/azureml-core/azureml.data.filedataset?view=azure-ml-py) references single or multiple files in your datastores or public urls. The files can be of any format. FileDataset provides you with the ability to download or mount the files to your compute. By creating a dataset, you create a reference to the data source location. If you applied any subsetting transformations to the dataset, they will be stored in the dataset as well. The data remains in its existing location, so no extra storage cost is incurred.\n",
|
||||
"You can use dataset objects as inputs. Register the datasets to the workspace if you want to reuse them later."
|
||||
]
|
||||
},
|
||||
@@ -224,7 +225,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Intermediate/Output Data\n",
|
||||
"Intermediate data (or output of a Step) is represented by [PipelineData](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-core/azureml.pipeline.core.pipelinedata?view=azure-ml-py) object. PipelineData can be produced by one step and consumed in another step by providing the PipelineData object as an output of one step and the input of one or more steps."
|
||||
"Intermediate data (or output of a Step) is represented by [PipelineData](https://docs.microsoft.com/python/api/azureml-pipeline-core/azureml.pipeline.core.pipelinedata?view=azure-ml-py) object. PipelineData can be produced by one step and consumed in another step by providing the PipelineData object as an output of one step and the input of one or more steps."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -276,7 +277,7 @@
|
||||
"### Register the model with Workspace\n",
|
||||
"A registered model is a logical container for one or more files that make up your model. For example, if you have a model that's stored in multiple files, you can register them as a single model in the workspace. After you register the files, you can then download or deploy the registered model and receive all the files that you registered.\n",
|
||||
"\n",
|
||||
"Using tags, you can track useful information such as the name and version of the machine learning library used to train the model. Note that tags must be alphanumeric. Learn more about registering models [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-deploy-and-where#registermodel) "
|
||||
"Using tags, you can track useful information such as the name and version of the machine learning library used to train the model. Note that tags must be alphanumeric. Learn more about registering models [here](https://docs.microsoft.com/azure/machine-learning/service/how-to-deploy-and-where#registermodel) "
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -362,7 +363,6 @@
|
||||
" \"azureml-core\", \"azureml-dataset-runtime[fuse]\"])\n",
|
||||
"batch_env = Environment(name=\"batch_environment\")\n",
|
||||
"batch_env.python.conda_dependencies = batch_conda_deps\n",
|
||||
"batch_env.docker.enabled = True\n",
|
||||
"batch_env.docker.base_image = DEFAULT_CPU_IMAGE"
|
||||
]
|
||||
},
|
||||
@@ -379,7 +379,6 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.pipeline.core import PipelineParameter\n",
|
||||
"from azureml.pipeline.steps import ParallelRunStep, ParallelRunConfig\n",
|
||||
"\n",
|
||||
"parallel_run_config = ParallelRunConfig(\n",
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
"In this notebook, we will demonstrate how to make predictions on large quantities of data asynchronously using the ML pipelines with Azure Machine Learning. Batch inference (or batch scoring) provides cost-effective inference, with unparalleled throughput for asynchronous applications. Batch prediction pipelines can scale to perform inference on terabytes of production data. Batch prediction is optimized for high throughput, fire-and-forget predictions for a large collection of data.\n",
|
||||
"\n",
|
||||
"> **Tip**\n",
|
||||
"If your system requires low-latency processing (to process a single document or small set of documents quickly), use [real-time scoring](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-consume-web-service) instead of batch prediction.\n",
|
||||
"If your system requires low-latency processing (to process a single document or small set of documents quickly), use [real-time scoring](https://docs.microsoft.com/azure/machine-learning/service/how-to-consume-web-service) instead of batch prediction.\n",
|
||||
"\n",
|
||||
"In this example we will take use a machine learning model already trained to predict different types of iris flowers and run that trained model on some of the data in a CSV file which has characteristics of different iris flowers. However, the same example can be extended to manipulating data to any embarrassingly-parallel processing through a python script.\n",
|
||||
"\n",
|
||||
@@ -84,6 +84,8 @@
|
||||
"### Create or Attach existing compute resource\n",
|
||||
"By using Azure Machine Learning Compute, a managed service, data scientists can train machine learning models on clusters of Azure virtual machines. Examples include VMs with GPU support. In this tutorial, you create Azure Machine Learning Compute as your training environment. The code below creates the compute clusters for you if they don't already exist in your workspace.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||
"\n",
|
||||
"**Creation of compute takes approximately 5 minutes. If the AmlCompute with that name is already in your workspace the code will skip the creation process.**"
|
||||
]
|
||||
},
|
||||
@@ -160,7 +162,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create a TabularDataset\n",
|
||||
"A [TabularDataSet](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py) references single or multiple files which contain data in a tabular structure (ie like CSV files) in your datastores or public urls. TabularDatasets provides you with the ability to download or mount the files to your compute. By creating a dataset, you create a reference to the data source location. If you applied any subsetting transformations to the dataset, they will be stored in the dataset as well. The data remains in its existing location, so no extra storage cost is incurred.\n",
|
||||
"A [TabularDataSet](https://docs.microsoft.com/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py) references single or multiple files which contain data in a tabular structure (ie like CSV files) in your datastores or public urls. TabularDatasets provides you with the ability to download or mount the files to your compute. By creating a dataset, you create a reference to the data source location. If you applied any subsetting transformations to the dataset, they will be stored in the dataset as well. The data remains in its existing location, so no extra storage cost is incurred.\n",
|
||||
"You can use dataset objects as inputs. Register the datasets to the workspace if you want to reuse them later."
|
||||
]
|
||||
},
|
||||
@@ -184,7 +186,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Intermediate/Output Data\n",
|
||||
"Intermediate data (or output of a Step) is represented by [PipelineData](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-core/azureml.pipeline.core.pipelinedata?view=azure-ml-py) object. PipelineData can be produced by one step and consumed in another step by providing the PipelineData object as an output of one step and the input of one or more steps."
|
||||
"Intermediate data (or output of a Step) is represented by [PipelineData](https://docs.microsoft.com/python/api/azureml-pipeline-core/azureml.pipeline.core.pipelinedata?view=azure-ml-py) object. PipelineData can be produced by one step and consumed in another step by providing the PipelineData object as an output of one step and the input of one or more steps."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -311,7 +313,6 @@
|
||||
"\n",
|
||||
"predict_env = Environment(name=\"predict_environment\")\n",
|
||||
"predict_env.python.conda_dependencies = predict_conda_deps\n",
|
||||
"predict_env.docker.enabled = True\n",
|
||||
"predict_env.spark.precache_packages = False"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -81,12 +81,12 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.compute import AmlCompute, ComputeTarget\n",
|
||||
"from azureml.core.datastore import Datastore\n",
|
||||
"from azureml.data.data_reference import DataReference\n",
|
||||
"from azureml.pipeline.core import Pipeline, PipelineData\n",
|
||||
"from azureml.core import Datastore, Dataset\n",
|
||||
"from azureml.pipeline.core import Pipeline\n",
|
||||
"from azureml.pipeline.steps import PythonScriptStep\n",
|
||||
"from azureml.core.runconfig import CondaDependencies, RunConfiguration\n",
|
||||
"from azureml.core.compute_target import ComputeTargetException"
|
||||
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||
"from azureml.data import OutputFileDatasetConfig"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -178,7 +178,9 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Create or use existing compute"
|
||||
"# Create or use existing compute\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -297,9 +299,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"video_name=os.getenv(\"STYLE_TRANSFER_VIDEO_NAME\", \"orangutan.mp4\") \n",
|
||||
"orangutan_video = DataReference(datastore=video_ds,\n",
|
||||
" data_reference_name=\"video\",\n",
|
||||
" path_on_datastore=video_name, mode=\"download\")"
|
||||
"orangutan_video = Dataset.File.from_files((video_ds,video_name))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -325,13 +325,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ffmpeg_audio = PipelineData(name=\"ffmpeg_audio\", datastore=default_datastore)\n",
|
||||
"processed_images = PipelineData(name=\"processed_images\", datastore=default_datastore)\n",
|
||||
"output_video = PipelineData(name=\"output_video\", datastore=default_datastore)\n",
|
||||
"ffmpeg_audio = OutputFileDatasetConfig(name=\"ffmpeg_audio\")\n",
|
||||
"processed_images = OutputFileDatasetConfig(name=\"processed_images\")\n",
|
||||
"output_video = OutputFileDatasetConfig(name=\"output_video\")\n",
|
||||
"\n",
|
||||
"ffmpeg_images_ds_name = \"ffmpeg_images_data\"\n",
|
||||
"ffmpeg_images = PipelineData(name=\"ffmpeg_images\", datastore=default_datastore)\n",
|
||||
"ffmpeg_images_file_dataset = ffmpeg_images.as_dataset()"
|
||||
"ffmpeg_images = OutputFileDatasetConfig(name=\"ffmpeg_images\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -367,13 +365,10 @@
|
||||
"split_video_step = PythonScriptStep(\n",
|
||||
" name=\"split video\",\n",
|
||||
" script_name=\"process_video.py\",\n",
|
||||
" arguments=[\"--input_video\", orangutan_video,\n",
|
||||
" arguments=[\"--input_video\", orangutan_video.as_mount(),\n",
|
||||
" \"--output_audio\", ffmpeg_audio,\n",
|
||||
" \"--output_images\", ffmpeg_images_file_dataset,\n",
|
||||
" ],\n",
|
||||
" \"--output_images\", ffmpeg_images],\n",
|
||||
" compute_target=cpu_cluster,\n",
|
||||
" inputs=[orangutan_video],\n",
|
||||
" outputs=[ffmpeg_images_file_dataset, ffmpeg_audio],\n",
|
||||
" runconfig=amlcompute_run_config,\n",
|
||||
" source_directory=scripts_folder\n",
|
||||
")\n",
|
||||
@@ -381,12 +376,10 @@
|
||||
"stitch_video_step = PythonScriptStep(\n",
|
||||
" name=\"stitch\",\n",
|
||||
" script_name=\"stitch_video.py\",\n",
|
||||
" arguments=[\"--images_dir\", processed_images, \n",
|
||||
" \"--input_audio\", ffmpeg_audio, \n",
|
||||
" arguments=[\"--images_dir\", processed_images.as_input(), \n",
|
||||
" \"--input_audio\", ffmpeg_audio.as_input(), \n",
|
||||
" \"--output_dir\", output_video],\n",
|
||||
" compute_target=cpu_cluster,\n",
|
||||
" inputs=[processed_images, ffmpeg_audio],\n",
|
||||
" outputs=[output_video],\n",
|
||||
" runconfig=amlcompute_run_config,\n",
|
||||
" source_directory=scripts_folder\n",
|
||||
")"
|
||||
@@ -415,7 +408,6 @@
|
||||
"parallel_cd.add_conda_package(\"torchvision\")\n",
|
||||
"parallel_cd.add_conda_package(\"pillow<7\") # needed for torchvision==0.4.0\n",
|
||||
"parallel_cd.add_pip_package(\"azureml-core\")\n",
|
||||
"parallel_cd.add_pip_package(\"azureml-dataset-runtime[fuse]\")\n",
|
||||
"\n",
|
||||
"styleenvironment = Environment(name=\"styleenvironment\")\n",
|
||||
"styleenvironment.python.conda_dependencies=parallel_cd\n",
|
||||
@@ -457,7 +449,7 @@
|
||||
"\n",
|
||||
"distributed_style_transfer_step = ParallelRunStep(\n",
|
||||
" name=parallel_step_name,\n",
|
||||
" inputs=[ffmpeg_images_file_dataset], # Input file share/blob container/file dataset\n",
|
||||
" inputs=[ffmpeg_images], # Input file share/blob container/file dataset\n",
|
||||
" output=processed_images, # Output file share/blob container\n",
|
||||
" arguments=[\"--style\", style_param],\n",
|
||||
" parallel_run_config=parallel_run_config,\n",
|
||||
@@ -552,8 +544,8 @@
|
||||
"source": [
|
||||
"def download_video(run, target_dir=None):\n",
|
||||
" stitch_run = run.find_step_run(stitch_video_step.name)[0]\n",
|
||||
" port_data = stitch_run.get_output_data(output_video.name)\n",
|
||||
" port_data.download(target_dir, show_progress=True)"
|
||||
" port_data = stitch_run.get_details()['outputDatasets'][0]['dataset']\n",
|
||||
" port_data.download(target_dir)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -28,6 +28,7 @@
|
||||
" 2. Azure CLI Authentication\n",
|
||||
" 3. Managed Service Identity (MSI) Authentication\n",
|
||||
" 4. Service Principal Authentication\n",
|
||||
" 5. Token Authentication\n",
|
||||
" \n",
|
||||
"The interactive authentication is suitable for local experimentation on your own computer. Azure CLI authentication is suitable if you are already using Azure CLI for managing Azure resources, and want to sign in only once. The MSI and Service Principal authentication are suitable for automated workflows, for example as part of Azure Devops build."
|
||||
]
|
||||
@@ -319,6 +320,66 @@
|
||||
"See [Register an application with the Microsoft identity platform](https://docs.microsoft.com/en-us/azure/active-directory/develop/quickstart-register-app) quickstart for more details about application registrations. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Token Authentication\n",
|
||||
"\n",
|
||||
"When token generation and its refresh needs to be outside on AML SDK, we recommend using Token Authentication. It can be used for getting token for AML or ARM audience. Thus giving more granular control over token generated.\n",
|
||||
"\n",
|
||||
"This authentication class requires users to provide method `get_token_for_audience` which will be called to retrieve the token based on the audience passed.\n",
|
||||
"\n",
|
||||
"Audience that is passed to `get_token_for_audience` can be ARM or AML. Exact value that will be passed as audience will depend on cloud and type for audience."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.authentication import TokenAuthentication, Audience\n",
|
||||
"\n",
|
||||
"# This is a sample method to retrieve token and will be passed to TokenAuthentication\n",
|
||||
"def get_token_for_audience(audience):\n",
|
||||
" from adal import AuthenticationContext\n",
|
||||
" client_id = \"my-client-id\"\n",
|
||||
" client_secret = \"my-client-secret\"\n",
|
||||
" tenant_id = \"my-tenant-id\"\n",
|
||||
" auth_context = AuthenticationContext(\"https://login.microsoftonline.com/{}\".format(tenant_id))\n",
|
||||
" resp = auth_context.acquire_token_with_client_credentials(audience,client_id,client_secret)\n",
|
||||
" token = resp[\"accessToken\"]\n",
|
||||
" return token\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"token_auth = TokenAuthentication(get_token_for_audience=get_token_for_audience)\n",
|
||||
"\n",
|
||||
"ws = Workspace(\n",
|
||||
" subscription_id=\"my-subscription-id\",\n",
|
||||
" resource_group=\"my-ml-rg\",\n",
|
||||
" workspace_name=\"my-ml-workspace\",\n",
|
||||
" auth=token_auth\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(\"Found workspace {} at location {}\".format(ws.name, ws.location))\n",
|
||||
"\n",
|
||||
"token_aml_audience = token_auth.get_token(Audience.aml)\n",
|
||||
"token_arm_audience = token_auth.get_token(Audience.arm)\n",
|
||||
"\n",
|
||||
"# Value of audience pass to `get_token_for_audience` can be retrieved as follows:\n",
|
||||
"# aud_aml_val = token_auth.get_aml_resource_id() # For AML\n",
|
||||
"# aud_arm_val = token_auth._cloud_type.endpoints.active_directory_resource_id # For ARM\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Token authentication object can be used to retrieve token for either AML or ARM audience,\n",
|
||||
"which can be used by other clients to authenticate to AML or ARM"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -350,7 +411,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os, uuid\n",
|
||||
"import uuid\n",
|
||||
"\n",
|
||||
"local_secret = os.environ.get(\"LOCAL_SECRET\", default = str(uuid.uuid4())) # Use random UUID as a substitute for real secret.\n",
|
||||
"keyvault = ws.get_default_keyvault()\n",
|
||||
|
||||
@@ -98,6 +98,8 @@
|
||||
"## Create or attach existing AmlCompute\n",
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, we use Azure ML managed compute ([AmlCompute](https://docs.microsoft.com/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute)) for our remote training compute resource. Specifically, the below code creates an `STANDARD_NC6` GPU cluster that autoscales from `0` to `4` nodes.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||
"\n",
|
||||
"**Creation of AmlCompute takes approximately 5 minutes.** If the AmlCompute with that name is already in your workspace, this code will skip the creation process.\n",
|
||||
"\n",
|
||||
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||
|
||||
@@ -4,6 +4,8 @@ import os
|
||||
|
||||
import numpy as np
|
||||
|
||||
from utils import download_mnist
|
||||
|
||||
import chainer
|
||||
from chainer import backend
|
||||
from chainer import backends
|
||||
@@ -17,6 +19,7 @@ from chainer.training import extensions
|
||||
from chainer.dataset import concat_examples
|
||||
from chainer.backends.cuda import to_cpu
|
||||
|
||||
|
||||
from azureml.core.run import Run
|
||||
run = Run.get_context()
|
||||
|
||||
@@ -49,7 +52,7 @@ def main():
|
||||
args = parser.parse_args()
|
||||
|
||||
# Download the MNIST data if you haven't downloaded it yet
|
||||
train, test = datasets.mnist.get_mnist(withlabel=True, ndim=1)
|
||||
train, test = download_mnist()
|
||||
|
||||
gpu_id = args.gpu_id
|
||||
batchsize = args.batchsize
|
||||
|
||||
@@ -2,6 +2,8 @@ import numpy as np
|
||||
import os
|
||||
import json
|
||||
|
||||
from utils import download_mnist
|
||||
|
||||
from chainer import serializers, using_config, Variable, datasets
|
||||
import chainer.functions as F
|
||||
import chainer.links as L
|
||||
@@ -41,7 +43,7 @@ def init():
|
||||
def run(input_data):
|
||||
i = np.array(json.loads(input_data)['data'])
|
||||
|
||||
_, test = datasets.get_mnist()
|
||||
_, test = download_mnist()
|
||||
x = Variable(np.asarray([test[i][0]]))
|
||||
y = model(x)
|
||||
|
||||
|
||||
@@ -45,16 +45,6 @@
|
||||
"print(\"SDK version:\", azureml.core.VERSION)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!jupyter nbextension install --py --user azureml.widgets\n",
|
||||
"!jupyter nbextension enable --py --user azureml.widgets"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -108,6 +98,8 @@
|
||||
"## Create or Attach existing AmlCompute\n",
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, we use Azure ML managed compute ([AmlCompute](https://docs.microsoft.com/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute)) for our remote training compute resource.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||
"\n",
|
||||
"**Creation of AmlCompute takes approximately 5 minutes.** If the AmlCompute with that name is already in your workspace, this code will skip the creation process.\n",
|
||||
"\n",
|
||||
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||
@@ -217,7 +209,8 @@
|
||||
"import shutil\n",
|
||||
"\n",
|
||||
"shutil.copy('chainer_mnist.py', project_folder)\n",
|
||||
"shutil.copy('chainer_score.py', project_folder)"
|
||||
"shutil.copy('chainer_score.py', project_folder)\n",
|
||||
"shutil.copy('utils.py', project_folder)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -263,6 +256,7 @@
|
||||
"- python=3.6.2\n",
|
||||
"- pip:\n",
|
||||
" - azureml-defaults\n",
|
||||
" - azureml-opendatasets\n",
|
||||
" - chainer==5.1.0\n",
|
||||
" - cupy-cuda90==5.1.0\n",
|
||||
" - mpi4py==3.0.0\n",
|
||||
@@ -276,12 +270,14 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Environment\n",
|
||||
"from azureml.core.runconfig import DockerConfiguration\n",
|
||||
"\n",
|
||||
"chainer_env = Environment.from_conda_specification(name = 'chainer-5.1.0-gpu', file_path = './conda_dependencies.yml')\n",
|
||||
"\n",
|
||||
"# Specify a GPU base image\n",
|
||||
"chainer_env.docker.enabled = True\n",
|
||||
"chainer_env.docker.base_image = 'mcr.microsoft.com/azureml/intelmpi2018.3-cuda9.0-cudnn7-ubuntu16.04'"
|
||||
"chainer_env.docker.base_image = 'mcr.microsoft.com/azureml/intelmpi2018.3-cuda9.0-cudnn7-ubuntu16.04'\n",
|
||||
"\n",
|
||||
"docker_config = DockerConfiguration(use_docker=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -305,7 +301,8 @@
|
||||
" script='chainer_mnist.py',\n",
|
||||
" arguments=['--epochs', 10, '--batchsize', 128, '--output_dir', './outputs'],\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" environment=chainer_env)"
|
||||
" environment=chainer_env,\n",
|
||||
" docker_runtime_config=docker_config)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -557,6 +554,7 @@
|
||||
"cd.add_conda_package('numpy')\n",
|
||||
"cd.add_pip_package('chainer==5.1.0')\n",
|
||||
"cd.add_pip_package(\"azureml-defaults\")\n",
|
||||
"cd.add_pip_package(\"azureml-opendatasets\")\n",
|
||||
"cd.save_to_file(base_directory='./', conda_file_path='myenv.yml')\n",
|
||||
"\n",
|
||||
"print(cd.serialize_to_string())"
|
||||
@@ -584,7 +582,8 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"myenv = Environment.from_conda_specification(name=\"myenv\", file_path=\"myenv.yml\")\n",
|
||||
"inference_config = InferenceConfig(entry_script=\"chainer_score.py\", environment=myenv)\n",
|
||||
"inference_config = InferenceConfig(entry_script=\"chainer_score.py\", environment=myenv,\n",
|
||||
" source_directory=project_folder)\n",
|
||||
"\n",
|
||||
"aciconfig = AciWebservice.deploy_configuration(cpu_cores=1,\n",
|
||||
" auth_enabled=True, # this flag generates API keys to secure access\n",
|
||||
@@ -592,10 +591,10 @@
|
||||
" tags={'name': 'mnist', 'framework': 'Chainer'},\n",
|
||||
" description='Chainer DNN with MNIST')\n",
|
||||
"\n",
|
||||
"service = Model.deploy(workspace=ws, \n",
|
||||
" name='chainer-mnist-1', \n",
|
||||
" models=[model], \n",
|
||||
" inference_config=inference_config, \n",
|
||||
"service = Model.deploy(workspace=ws,\n",
|
||||
" name='chainer-mnist-1',\n",
|
||||
" models=[model],\n",
|
||||
" inference_config=inference_config,\n",
|
||||
" deployment_config=aciconfig)\n",
|
||||
"service.wait_for_deployment(True)\n",
|
||||
"print(service.state)\n",
|
||||
@@ -685,13 +684,16 @@
|
||||
" res = res.reshape(n_items[0], 1)\n",
|
||||
" return res\n",
|
||||
"\n",
|
||||
"os.makedirs('./data/mnist', exist_ok=True)\n",
|
||||
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz', filename = './data/mnist/test-images.gz')\n",
|
||||
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz', filename = './data/mnist/test-labels.gz')\n",
|
||||
"data_folder = os.path.join(os.getcwd(), 'data/mnist')\n",
|
||||
"os.makedirs(data_folder, exist_ok=True)\n",
|
||||
"\n",
|
||||
"X_test = load_data('./data/mnist/test-images.gz', False)\n",
|
||||
"y_test = load_data('./data/mnist/test-labels.gz', True).reshape(-1)\n",
|
||||
"urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/t10k-images-idx3-ubyte.gz',\n",
|
||||
" filename=os.path.join(data_folder, 't10k-images-idx3-ubyte.gz'))\n",
|
||||
"urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/t10k-labels-idx1-ubyte.gz',\n",
|
||||
" filename=os.path.join(data_folder, 't10k-labels-idx1-ubyte.gz'))\n",
|
||||
"\n",
|
||||
"X_test = load_data(os.path.join(data_folder, 't10k-images-idx3-ubyte.gz'), False) / np.float32(255.0)\n",
|
||||
"y_test = load_data(os.path.join(data_folder, 't10k-labels-idx1-ubyte.gz'), True).reshape(-1)\n",
|
||||
"\n",
|
||||
"# send a random row from the test set to score\n",
|
||||
"random_index = np.random.randint(0, len(X_test)-1)\n",
|
||||
|
||||
@@ -10,3 +10,4 @@ dependencies:
|
||||
- gzip
|
||||
- struct
|
||||
- requests
|
||||
- azureml-opendatasets
|
||||
|
||||
@@ -0,0 +1,50 @@
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License.
|
||||
|
||||
import glob
|
||||
import gzip
|
||||
import numpy as np
|
||||
import os
|
||||
import struct
|
||||
|
||||
from azureml.core import Dataset
|
||||
from azureml.opendatasets import MNIST
|
||||
from chainer.datasets import tuple_dataset
|
||||
|
||||
|
||||
# load compressed MNIST gz files and return numpy arrays
|
||||
def load_data(filename, label=False):
|
||||
with gzip.open(filename) as gz:
|
||||
struct.unpack('I', gz.read(4))
|
||||
n_items = struct.unpack('>I', gz.read(4))
|
||||
if not label:
|
||||
n_rows = struct.unpack('>I', gz.read(4))[0]
|
||||
n_cols = struct.unpack('>I', gz.read(4))[0]
|
||||
res = np.frombuffer(gz.read(n_items[0] * n_rows * n_cols), dtype=np.uint8)
|
||||
res = res.reshape(n_items[0], n_rows * n_cols)
|
||||
else:
|
||||
res = np.frombuffer(gz.read(n_items[0]), dtype=np.uint8)
|
||||
res = res.reshape(n_items[0], 1)
|
||||
return res
|
||||
|
||||
|
||||
def download_mnist():
|
||||
data_folder = os.path.join(os.getcwd(), 'data/mnist')
|
||||
os.makedirs(data_folder, exist_ok=True)
|
||||
|
||||
mnist_file_dataset = MNIST.get_file_dataset()
|
||||
mnist_file_dataset.download(data_folder, overwrite=True)
|
||||
|
||||
X_train = load_data(glob.glob(os.path.join(data_folder, "**/train-images-idx3-ubyte.gz"),
|
||||
recursive=True)[0], False) / 255.0
|
||||
X_test = load_data(glob.glob(os.path.join(data_folder, "**/t10k-images-idx3-ubyte.gz"),
|
||||
recursive=True)[0], False) / 255.0
|
||||
y_train = load_data(glob.glob(os.path.join(data_folder, "**/train-labels-idx1-ubyte.gz"),
|
||||
recursive=True)[0], True).reshape(-1)
|
||||
y_test = load_data(glob.glob(os.path.join(data_folder, "**/t10k-labels-idx1-ubyte.gz"),
|
||||
recursive=True)[0], True).reshape(-1)
|
||||
|
||||
train = tuple_dataset.TupleDataset(X_train.astype(np.float32), y_train.astype(np.int32))
|
||||
test = tuple_dataset.TupleDataset(X_test.astype(np.float32), y_test.astype(np.int32))
|
||||
|
||||
return train, test
|
||||
@@ -222,6 +222,8 @@
|
||||
"### Create or attach existing AmlCompute\n",
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, you create `AmlCompute` as your training compute resource.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||
"\n",
|
||||
"**Creation of AmlCompute takes approximately 5 minutes.** If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
|
||||
"\n",
|
||||
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||
|
||||
@@ -272,7 +272,9 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create or Attach existing AmlCompute\n",
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, you create `AmlCompute` as your training compute resource."
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, you create `AmlCompute` as your training compute resource.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -21,7 +21,8 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Distributed PyTorch with DistributedDataParallel\n",
|
||||
"In this tutorial, you will train a PyTorch model on the [MNIST](http://yann.lecun.com/exdb/mnist/) dataset using distributed training with PyTorch's `DistributedDataParallel` module across a GPU cluster. "
|
||||
"\n",
|
||||
"In this tutorial, you will train a PyTorch model on the [CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset using distributed training with PyTorch's `DistributedDataParallel` module across a GPU cluster."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -98,6 +99,8 @@
|
||||
"## Create or attach existing AmlCompute\n",
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, we use Azure ML managed compute ([AmlCompute](https://docs.microsoft.com/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute)) for our remote training compute resource. Specifically, the below code creates an `STANDARD_NC6` GPU cluster that autoscales from `0` to `4` nodes.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||
"\n",
|
||||
"**Creation of AmlCompute takes approximately 5 minutes.** If the AmlCompute with that name is already in your workspace, this code will skip the creation process.\n",
|
||||
"\n",
|
||||
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||
@@ -113,7 +116,7 @@
|
||||
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||
"\n",
|
||||
"# choose a name for your cluster\n",
|
||||
"cluster_name = \"gpu-cluster\"\n",
|
||||
"cluster_name = 'gpu-cluster'\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=cluster_name)\n",
|
||||
@@ -139,6 +142,68 @@
|
||||
"The above code creates GPU compute. If you instead want to create CPU compute, provide a different VM size to the `vm_size` parameter, such as `STANDARD_D2_V2`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prepare dataset\n",
|
||||
"\n",
|
||||
"Prepare the dataset used for training. We will first download and extract the publicly available CIFAR-10 dataset from the cs.toronto.edu website and then create an Azure ML FileDataset to use the data for training."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Download and extract CIFAR-10 data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import urllib\n",
|
||||
"import tarfile\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"url = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'\n",
|
||||
"filename = 'cifar-10-python.tar.gz'\n",
|
||||
"data_root = 'cifar-10'\n",
|
||||
"filepath = os.path.join(data_root, filename)\n",
|
||||
"\n",
|
||||
"if not os.path.isdir(data_root):\n",
|
||||
" os.makedirs(data_root, exist_ok=True)\n",
|
||||
" urllib.request.urlretrieve(url, filepath)\n",
|
||||
" with tarfile.open(filepath, \"r:gz\") as tar:\n",
|
||||
" tar.extractall(path=data_root)\n",
|
||||
" os.remove(filepath) # delete tar.gz file after extraction"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create Azure ML dataset\n",
|
||||
"\n",
|
||||
"The `upload_directory` method will upload the data to a datastore and create a FileDataset from it. In this tutorial we will use the workspace's default datastore."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Dataset\n",
|
||||
"\n",
|
||||
"datastore = ws.get_default_datastore()\n",
|
||||
"dataset = Dataset.File.upload_directory(\n",
|
||||
" src_dir=data_root, target=(datastore, data_root)\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -161,8 +226,6 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"project_folder = './pytorch-distr'\n",
|
||||
"os.makedirs(project_folder, exist_ok=True)"
|
||||
]
|
||||
@@ -172,26 +235,14 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Prepare training script\n",
|
||||
"Now you will need to create your training script. In this tutorial, the script for distributed training of MNIST is already provided for you at `pytorch_mnist.py`. In practice, you should be able to take any custom PyTorch training script as is and run it with Azure ML without having to modify your code.\n",
|
||||
"\n",
|
||||
"However, if you would like to use Azure ML's [metric logging](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#logging) capabilities, you will have to add a small amount of Azure ML logic inside your training script. In this example, at each logging interval, we will log the loss for that minibatch to our Azure ML run.\n",
|
||||
"\n",
|
||||
"To do so, in `pytorch_mnist.py`, we will first access the Azure ML `Run` object within the script:\n",
|
||||
"```Python\n",
|
||||
"from azureml.core.run import Run\n",
|
||||
"run = Run.get_context()\n",
|
||||
"```\n",
|
||||
"Later within the script, we log the loss metric to our run:\n",
|
||||
"```Python\n",
|
||||
"run.log('loss', losses.avg)\n",
|
||||
"```"
|
||||
"Now you will need to create your training script. In this tutorial, the script for distributed training on CIFAR-10 is already provided for you at `train.py`. In practice, you should be able to take any custom PyTorch training script as is and run it with Azure ML without having to modify your code."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Once your script is ready, copy the training script `pytorch_mnist.py` into the project directory."
|
||||
"Once your script is ready, copy the training script `train.py` into the project directory."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -202,7 +253,7 @@
|
||||
"source": [
|
||||
"import shutil\n",
|
||||
"\n",
|
||||
"shutil.copy('pytorch_mnist.py', project_folder)"
|
||||
"shutil.copy('train.py', project_folder)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -231,26 +282,7 @@
|
||||
"source": [
|
||||
"### Create an environment\n",
|
||||
"\n",
|
||||
"Define a conda environment YAML file with your training script dependencies and create an Azure ML environment."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%writefile conda_dependencies.yml\n",
|
||||
"\n",
|
||||
"channels:\n",
|
||||
"- conda-forge\n",
|
||||
"dependencies:\n",
|
||||
"- python=3.6.2\n",
|
||||
"- pip:\n",
|
||||
" - azureml-defaults\n",
|
||||
" - torch==1.6.0\n",
|
||||
" - torchvision==0.7.0\n",
|
||||
" - future==0.17.1"
|
||||
"In this tutorial, we will use one of Azure ML's curated PyTorch environments for training. [Curated environments](https://docs.microsoft.com/azure/machine-learning/how-to-use-environments#use-a-curated-environment) are available in your workspace by default. Specifically, we will use the PyTorch 1.6 GPU curated environment."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -261,24 +293,39 @@
|
||||
"source": [
|
||||
"from azureml.core import Environment\n",
|
||||
"\n",
|
||||
"pytorch_env = Environment.from_conda_specification(name = 'pytorch-1.6-gpu', file_path = './conda_dependencies.yml')\n",
|
||||
"\n",
|
||||
"# Specify a GPU base image\n",
|
||||
"pytorch_env.docker.enabled = True\n",
|
||||
"pytorch_env.docker.base_image = 'mcr.microsoft.com/azureml/openmpi3.1.2-cuda10.1-cudnn7-ubuntu18.04'"
|
||||
"pytorch_env = Environment.get(ws, name='AzureML-PyTorch-1.6-GPU')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Configure the training job: torch.distributed with NCCL backend\n",
|
||||
"### Configure the training job\n",
|
||||
"\n",
|
||||
"Create a ScriptRunConfig object to specify the configuration details of your training job, including your training script, environment to use, and the compute target to run on.\n",
|
||||
"To launch a distributed PyTorch job on Azure ML, you have two options:\n",
|
||||
"\n",
|
||||
"In order to run a distributed PyTorch job with **torch.distributed** using the NCCL backend, create a `PyTorchConfiguration` and pass it to the `distributed_job_config` parameter of the ScriptRunConfig constructor. Specify `communication_backend='Nccl'` in the PyTorchConfiguration. The below code will configure a 2-node distributed job. The NCCL backend is the recommended backend for PyTorch distributed GPU training.\n",
|
||||
"1. Per-process launch - specify the total # of worker processes (typically one per GPU) you want to run, and\n",
|
||||
"Azure ML will handle launching each process.\n",
|
||||
"2. Per-node launch with [torch.distributed.launch](https://pytorch.org/docs/stable/distributed.html#launch-utility) - provide the `torch.distributed.launch` command you want to\n",
|
||||
"run on each node.\n",
|
||||
"\n",
|
||||
"The script arguments refers to the Azure ML-set environment variables `AZ_BATCHAI_PYTORCH_INIT_METHOD` for shared file-system initialization and `AZ_BATCHAI_TASK_INDEX` for the global rank of each worker process."
|
||||
"For more information, see the [documentation](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-train-pytorch#distributeddataparallel).\n",
|
||||
"\n",
|
||||
"Both options are shown below."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Per-process launch\n",
|
||||
"\n",
|
||||
"To use the per-process launch option in which Azure ML will handle launching each of the processes to run your training script,\n",
|
||||
"\n",
|
||||
"1. Specify the training script and arguments\n",
|
||||
"2. Create a `PyTorchConfiguration` and specify `node_count` and `process_count`. The `process_count` is the total number of processes you want to run for the job; this should typically equal the # of GPUs available on each node multiplied by the # of nodes. Since this tutorial uses the `STANDARD_NC6` SKU, which has one GPU, the total process count for a 2-node job is `2`. If you are using a SKU with >1 GPUs, adjust the `process_count` accordingly.\n",
|
||||
"\n",
|
||||
"Azure ML will set the `MASTER_ADDR`, `MASTER_PORT`, `NODE_RANK`, `WORLD_SIZE` environment variables on each node, in addition to the process-level `RANK` and `LOCAL_RANK` environment variables, that are needed for distributed PyTorch training."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -290,17 +337,61 @@
|
||||
"from azureml.core import ScriptRunConfig\n",
|
||||
"from azureml.core.runconfig import PyTorchConfiguration\n",
|
||||
"\n",
|
||||
"args = ['--dist-backend', 'nccl',\n",
|
||||
" '--dist-url', '$AZ_BATCHAI_PYTORCH_INIT_METHOD',\n",
|
||||
" '--rank', '$AZ_BATCHAI_TASK_INDEX',\n",
|
||||
" '--world-size', 2]\n",
|
||||
"# create distributed config\n",
|
||||
"distr_config = PyTorchConfiguration(process_count=2, node_count=2)\n",
|
||||
"\n",
|
||||
"# create args\n",
|
||||
"args = [\"--data-dir\", dataset.as_download(), \"--epochs\", 25]\n",
|
||||
"\n",
|
||||
"# create job config\n",
|
||||
"src = ScriptRunConfig(source_directory=project_folder,\n",
|
||||
" script='pytorch_mnist.py',\n",
|
||||
" script='train.py',\n",
|
||||
" arguments=args,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" environment=pytorch_env,\n",
|
||||
" distributed_job_config=PyTorchConfiguration(communication_backend='Nccl', node_count=2))"
|
||||
" distributed_job_config=distr_config)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Per-node launch with `torch.distributed.launch`\n",
|
||||
"\n",
|
||||
"If you would instead like to use the PyTorch-provided launch utility `torch.distributed.launch` to handle launching the worker processes on each node, you can do so as well. \n",
|
||||
"\n",
|
||||
"1. Provide the launch command to the `command` parameter of ScriptRunConfig. For PyTorch jobs Azure ML will set the `MASTER_ADDR`, `MASTER_PORT`, and `NODE_RANK` environment variables on each node, so you can simply just reference those environment variables in your command. If you are using a SKU with >1 GPUs, adjust the `--nproc_per_node` argument accordingly.\n",
|
||||
"\n",
|
||||
"2. Create a `PyTorchConfiguration` and specify the `node_count`. You do not need to specify the `process_count`; by default Azure ML will launch one process per node to run the `command` you provided.\n",
|
||||
"\n",
|
||||
"Uncomment the code below to configure a job with this method."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"'''\n",
|
||||
"from azureml.core import ScriptRunConfig\n",
|
||||
"from azureml.core.runconfig import PyTorchConfiguration\n",
|
||||
"\n",
|
||||
"# create distributed config\n",
|
||||
"distr_config = PyTorchConfiguration(node_count=2)\n",
|
||||
"\n",
|
||||
"# define command\n",
|
||||
"launch_cmd = [\"python -m torch.distributed.launch --nproc_per_node 1 --nnodes 2 \" \\\n",
|
||||
" \"--node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT --use_env \" \\\n",
|
||||
" \"train.py --data-dir\", dataset.as_download(), \"--epochs 25\"]\n",
|
||||
"\n",
|
||||
"# create job config\n",
|
||||
"src = ScriptRunConfig(source_directory=project_folder,\n",
|
||||
" command=launch_cmd,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" environment=pytorch_env,\n",
|
||||
" distributed_job_config=distr_config)\n",
|
||||
"'''"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -308,7 +399,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Submit job\n",
|
||||
"Run your experiment by submitting your ScriptRunConfig object. Note that this call is asynchronous."
|
||||
"Run your experiment by submitting your `ScriptRunConfig` object. Note that this call is asynchronous."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -355,50 +446,12 @@
|
||||
"source": [
|
||||
"run.wait_for_completion(show_output=True) # this provides a verbose log"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Configure training job: torch.distributed with Gloo backend\n",
|
||||
"\n",
|
||||
"If you would instead like to use the Gloo backend for distributed training, you can do so via the following code. The Gloo backend is recommended for distributed CPU training."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import ScriptRunConfig\n",
|
||||
"from azureml.core.runconfig import PyTorchConfiguration\n",
|
||||
"\n",
|
||||
"args = ['--dist-backend', 'gloo',\n",
|
||||
" '--dist-url', '$AZ_BATCHAI_PYTORCH_INIT_METHOD',\n",
|
||||
" '--rank', '$AZ_BATCHAI_TASK_INDEX',\n",
|
||||
" '--world-size', 2]\n",
|
||||
"\n",
|
||||
"src = ScriptRunConfig(source_directory=project_folder,\n",
|
||||
" script='pytorch_mnist.py',\n",
|
||||
" arguments=args,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" environment=pytorch_env,\n",
|
||||
" distributed_job_config=PyTorchConfiguration(communication_backend='Gloo', node_count=2))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Once you create the ScriptRunConfig, you can follow the submit steps as shown in the previous steps to submit a PyTorch distributed run using the Gloo backend."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "ninhu"
|
||||
"name": "minxia"
|
||||
}
|
||||
],
|
||||
"category": "training",
|
||||
@@ -406,7 +459,7 @@
|
||||
"AML Compute"
|
||||
],
|
||||
"datasets": [
|
||||
"MNIST"
|
||||
"CIFAR-10"
|
||||
],
|
||||
"deployment": [
|
||||
"None"
|
||||
@@ -432,12 +485,12 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.9"
|
||||
"version": "3.7.7"
|
||||
},
|
||||
"tags": [
|
||||
"None"
|
||||
],
|
||||
"task": "Train a model using distributed training via Nccl/Gloo"
|
||||
"task": "Train a model using distributed training via PyTorch DistributedDataParallel"
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
@@ -1,4 +1,4 @@
|
||||
name: aml-pipelines-how-to-use-estimatorstep
|
||||
name: distributed-pytorch-with-distributeddataparallel
|
||||
dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
@@ -0,0 +1,238 @@
|
||||
# Copyright (c) 2017 Facebook, Inc. All rights reserved.
|
||||
# BSD 3-Clause License
|
||||
#
|
||||
# Script adapted from:
|
||||
# https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html
|
||||
# ==============================================================================
|
||||
|
||||
# imports
|
||||
import torch
|
||||
import torchvision
|
||||
import torchvision.transforms as transforms
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
import torch.optim as optim
|
||||
import os
|
||||
import argparse
|
||||
|
||||
|
||||
# define network architecture
|
||||
class Net(nn.Module):
|
||||
def __init__(self):
|
||||
super(Net, self).__init__()
|
||||
self.conv1 = nn.Conv2d(3, 32, 3)
|
||||
self.pool = nn.MaxPool2d(2, 2)
|
||||
self.conv2 = nn.Conv2d(32, 64, 3)
|
||||
self.conv3 = nn.Conv2d(64, 128, 3)
|
||||
self.fc1 = nn.Linear(128 * 6 * 6, 120)
|
||||
self.dropout = nn.Dropout(p=0.2)
|
||||
self.fc2 = nn.Linear(120, 84)
|
||||
self.fc3 = nn.Linear(84, 10)
|
||||
|
||||
def forward(self, x):
|
||||
x = F.relu(self.conv1(x))
|
||||
x = self.pool(F.relu(self.conv2(x)))
|
||||
x = self.pool(F.relu(self.conv3(x)))
|
||||
x = x.view(-1, 128 * 6 * 6)
|
||||
x = self.dropout(F.relu(self.fc1(x)))
|
||||
x = F.relu(self.fc2(x))
|
||||
x = self.fc3(x)
|
||||
return x
|
||||
|
||||
|
||||
def train(train_loader, model, criterion, optimizer, epoch, device, print_freq, rank):
|
||||
running_loss = 0.0
|
||||
for i, data in enumerate(train_loader, 0):
|
||||
# get the inputs; data is a list of [inputs, labels]
|
||||
inputs, labels = data[0].to(device), data[1].to(device)
|
||||
|
||||
# zero the parameter gradients
|
||||
optimizer.zero_grad()
|
||||
|
||||
# forward + backward + optimize
|
||||
outputs = model(inputs)
|
||||
loss = criterion(outputs, labels)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
# print statistics
|
||||
running_loss += loss.item()
|
||||
if i % print_freq == 0: # print every print_freq mini-batches
|
||||
print(
|
||||
"Rank %d: [%d, %5d] loss: %.3f"
|
||||
% (rank, epoch + 1, i + 1, running_loss / print_freq)
|
||||
)
|
||||
running_loss = 0.0
|
||||
|
||||
|
||||
def evaluate(test_loader, model, device):
|
||||
classes = (
|
||||
"plane",
|
||||
"car",
|
||||
"bird",
|
||||
"cat",
|
||||
"deer",
|
||||
"dog",
|
||||
"frog",
|
||||
"horse",
|
||||
"ship",
|
||||
"truck",
|
||||
)
|
||||
|
||||
model.eval()
|
||||
|
||||
correct = 0
|
||||
total = 0
|
||||
class_correct = list(0.0 for i in range(10))
|
||||
class_total = list(0.0 for i in range(10))
|
||||
with torch.no_grad():
|
||||
for data in test_loader:
|
||||
images, labels = data[0].to(device), data[1].to(device)
|
||||
outputs = model(images)
|
||||
_, predicted = torch.max(outputs.data, 1)
|
||||
total += labels.size(0)
|
||||
correct += (predicted == labels).sum().item()
|
||||
c = (predicted == labels).squeeze()
|
||||
for i in range(10):
|
||||
label = labels[i]
|
||||
class_correct[label] += c[i].item()
|
||||
class_total[label] += 1
|
||||
|
||||
# print total test set accuracy
|
||||
print(
|
||||
"Accuracy of the network on the 10000 test images: %d %%"
|
||||
% (100 * correct / total)
|
||||
)
|
||||
|
||||
# print test accuracy for each of the classes
|
||||
for i in range(10):
|
||||
print(
|
||||
"Accuracy of %5s : %2d %%"
|
||||
% (classes[i], 100 * class_correct[i] / class_total[i])
|
||||
)
|
||||
|
||||
|
||||
def main(args):
|
||||
# get PyTorch environment variables
|
||||
world_size = int(os.environ["WORLD_SIZE"])
|
||||
rank = int(os.environ["RANK"])
|
||||
local_rank = int(os.environ["LOCAL_RANK"])
|
||||
|
||||
distributed = world_size > 1
|
||||
|
||||
# set device
|
||||
if distributed:
|
||||
device = torch.device("cuda", local_rank)
|
||||
else:
|
||||
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
||||
|
||||
# initialize distributed process group using default env:// method
|
||||
if distributed:
|
||||
torch.distributed.init_process_group(backend="nccl")
|
||||
|
||||
# define train and test dataset DataLoaders
|
||||
transform = transforms.Compose(
|
||||
[transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
|
||||
)
|
||||
|
||||
train_set = torchvision.datasets.CIFAR10(
|
||||
root=args.data_dir, train=True, download=False, transform=transform
|
||||
)
|
||||
|
||||
if distributed:
|
||||
train_sampler = torch.utils.data.distributed.DistributedSampler(train_set)
|
||||
else:
|
||||
train_sampler = None
|
||||
|
||||
train_loader = torch.utils.data.DataLoader(
|
||||
train_set,
|
||||
batch_size=args.batch_size,
|
||||
shuffle=(train_sampler is None),
|
||||
num_workers=args.workers,
|
||||
sampler=train_sampler,
|
||||
)
|
||||
|
||||
test_set = torchvision.datasets.CIFAR10(
|
||||
root=args.data_dir, train=False, download=False, transform=transform
|
||||
)
|
||||
test_loader = torch.utils.data.DataLoader(
|
||||
test_set, batch_size=args.batch_size, shuffle=False, num_workers=args.workers
|
||||
)
|
||||
|
||||
model = Net().to(device)
|
||||
|
||||
# wrap model with DDP
|
||||
if distributed:
|
||||
model = nn.parallel.DistributedDataParallel(
|
||||
model, device_ids=[local_rank], output_device=local_rank
|
||||
)
|
||||
|
||||
# define loss function and optimizer
|
||||
criterion = nn.CrossEntropyLoss()
|
||||
optimizer = optim.SGD(
|
||||
model.parameters(), lr=args.learning_rate, momentum=args.momentum
|
||||
)
|
||||
|
||||
# train the model
|
||||
for epoch in range(args.epochs):
|
||||
print("Rank %d: Starting epoch %d" % (rank, epoch))
|
||||
if distributed:
|
||||
train_sampler.set_epoch(epoch)
|
||||
model.train()
|
||||
train(
|
||||
train_loader,
|
||||
model,
|
||||
criterion,
|
||||
optimizer,
|
||||
epoch,
|
||||
device,
|
||||
args.print_freq,
|
||||
rank,
|
||||
)
|
||||
|
||||
print("Rank %d: Finished Training" % (rank))
|
||||
|
||||
if not distributed or rank == 0:
|
||||
os.makedirs(args.output_dir, exist_ok=True)
|
||||
model_path = os.path.join(args.output_dir, "cifar_net.pt")
|
||||
torch.save(model.state_dict(), model_path)
|
||||
|
||||
# evaluate on full test dataset
|
||||
evaluate(test_loader, model, device)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# setup argparse
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--data-dir", type=str, help="directory containing CIFAR-10 dataset"
|
||||
)
|
||||
parser.add_argument("--epochs", default=10, type=int, help="number of epochs")
|
||||
parser.add_argument(
|
||||
"--batch-size",
|
||||
default=16,
|
||||
type=int,
|
||||
help="mini batch size for each gpu/process",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--workers",
|
||||
default=2,
|
||||
type=int,
|
||||
help="number of data loading workers for each gpu/process",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--learning-rate", default=0.001, type=float, help="learning rate"
|
||||
)
|
||||
parser.add_argument("--momentum", default=0.9, type=float, help="momentum")
|
||||
parser.add_argument(
|
||||
"--output-dir", default="outputs", type=str, help="directory to save model to"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--print-freq",
|
||||
default=200,
|
||||
type=int,
|
||||
help="frequency of printing training statistics",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
main(args)
|
||||
@@ -99,6 +99,8 @@
|
||||
"## Create or attach existing AmlCompute\n",
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, we use Azure ML managed compute ([AmlCompute](https://docs.microsoft.com/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute)) for our remote training compute resource. Specifically, the below code creates an `STANDARD_NC6` GPU cluster that autoscales from `0` to `4` nodes.\n",
|
||||
"\n",
|
||||
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
||||
"\n",
|
||||
"**Creation of AmlCompute takes approximately 5 minutes.** If the AmlCompute with that name is already in your workspace, this code will skip the creation process.\n",
|
||||
"\n",
|
||||
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||
|
||||
@@ -51,6 +51,17 @@ if args.cuda:
|
||||
|
||||
|
||||
kwargs = {}
|
||||
# Use Azure Open Datasets for MNIST dataset
|
||||
datasets.MNIST.resources = [
|
||||
("https://azureopendatastorage.azurefd.net/mnist/train-images-idx3-ubyte.gz",
|
||||
"f68b3c2dcbeaaa9fbdd348bbdeb94873"),
|
||||
("https://azureopendatastorage.azurefd.net/mnist/train-labels-idx1-ubyte.gz",
|
||||
"d53e105ee54ea40749a09fcbcd1e9432"),
|
||||
("https://azureopendatastorage.azurefd.net/mnist/t10k-images-idx3-ubyte.gz",
|
||||
"9fb629c4189551a2d022fa330f9573f3"),
|
||||
("https://azureopendatastorage.azurefd.net/mnist/t10k-labels-idx1-ubyte.gz",
|
||||
"ec29112dd5afa0611ce80d1b7f02629c")
|
||||
]
|
||||
train_dataset = \
|
||||
datasets.MNIST('data-%d' % hvd.rank(), train=True, download=True,
|
||||
transform=transforms.Compose([
|
||||
|
||||
@@ -1,209 +0,0 @@
|
||||
# Copyright (c) 2017, PyTorch contributors
|
||||
# Modifications copyright (C) Microsoft Corporation
|
||||
# Licensed under the BSD license
|
||||
# Adapted from https://github.com/Azure/BatchAI/tree/master/recipes/PyTorch/PyTorch-GPU-Distributed-Gloo
|
||||
|
||||
from __future__ import print_function
|
||||
import argparse
|
||||
import os
|
||||
import shutil
|
||||
import time
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
import torch.optim as optim
|
||||
from torchvision import datasets, transforms
|
||||
import torch.nn.parallel
|
||||
import torch.backends.cudnn as cudnn
|
||||
import torch.distributed as dist
|
||||
import torch.utils.data
|
||||
import torch.utils.data.distributed
|
||||
import torchvision.models as models
|
||||
|
||||
from azureml.core.run import Run
|
||||
# get the Azure ML run object
|
||||
run = Run.get_context()
|
||||
|
||||
# Training settings
|
||||
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
|
||||
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
|
||||
help='input batch size for training (default: 64)')
|
||||
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
|
||||
help='input batch size for testing (default: 1000)')
|
||||
parser.add_argument('--epochs', type=int, default=10, metavar='N',
|
||||
help='number of epochs to train (default: 10)')
|
||||
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
|
||||
help='learning rate (default: 0.01)')
|
||||
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
|
||||
help='SGD momentum (default: 0.5)')
|
||||
parser.add_argument('--seed', type=int, default=1, metavar='S',
|
||||
help='random seed (default: 1)')
|
||||
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
|
||||
help='number of data loading workers (default: 4)')
|
||||
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
|
||||
help='how many batches to wait before logging training status')
|
||||
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
|
||||
metavar='W', help='weight decay (default: 1e-4)')
|
||||
parser.add_argument('--world-size', default=1, type=int,
|
||||
help='number of distributed processes')
|
||||
parser.add_argument('--dist-url', type=str,
|
||||
help='url used to set up distributed training')
|
||||
parser.add_argument('--dist-backend', default='nccl', type=str,
|
||||
help='distributed backend')
|
||||
parser.add_argument('--rank', default=-1, type=int,
|
||||
help='rank of the worker')
|
||||
|
||||
best_prec1 = 0
|
||||
args = parser.parse_args()
|
||||
|
||||
args.distributed = args.world_size >= 2
|
||||
|
||||
if args.distributed:
|
||||
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
|
||||
world_size=args.world_size, rank=args.rank)
|
||||
|
||||
train_dataset = datasets.MNIST('data-%d' % args.rank, train=True, download=True,
|
||||
transform=transforms.Compose([
|
||||
transforms.ToTensor(),
|
||||
transforms.Normalize((0.1307,), (0.3081,))
|
||||
]))
|
||||
|
||||
if args.distributed:
|
||||
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
|
||||
else:
|
||||
train_sampler = None
|
||||
|
||||
train_loader = torch.utils.data.DataLoader(
|
||||
train_dataset,
|
||||
batch_size=args.batch_size, shuffle=(train_sampler is None),
|
||||
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
|
||||
|
||||
|
||||
test_loader = torch.utils.data.DataLoader(
|
||||
train_dataset,
|
||||
batch_size=args.batch_size, shuffle=False,
|
||||
num_workers=args.workers, pin_memory=True)
|
||||
|
||||
|
||||
class Net(nn.Module):
|
||||
def __init__(self):
|
||||
super(Net, self).__init__()
|
||||
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
|
||||
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
|
||||
self.conv2_drop = nn.Dropout2d()
|
||||
self.fc1 = nn.Linear(320, 50)
|
||||
self.fc2 = nn.Linear(50, 10)
|
||||
|
||||
def forward(self, x):
|
||||
x = F.relu(F.max_pool2d(self.conv1(x), 2))
|
||||
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
|
||||
x = x.view(-1, 320)
|
||||
x = F.relu(self.fc1(x))
|
||||
x = F.dropout(x, training=self.training)
|
||||
x = self.fc2(x)
|
||||
return F.log_softmax(x)
|
||||
|
||||
|
||||
model = Net()
|
||||
|
||||
if not args.distributed:
|
||||
model = torch.nn.DataParallel(model).cuda()
|
||||
else:
|
||||
model.cuda()
|
||||
model = torch.nn.parallel.DistributedDataParallel(model)
|
||||
|
||||
# define loss function (criterion) and optimizer
|
||||
criterion = nn.CrossEntropyLoss().cuda()
|
||||
|
||||
optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
|
||||
|
||||
|
||||
def train(epoch):
|
||||
batch_time = AverageMeter()
|
||||
data_time = AverageMeter()
|
||||
losses = AverageMeter()
|
||||
top1 = AverageMeter()
|
||||
top5 = AverageMeter()
|
||||
|
||||
# switch to train mode
|
||||
model.train()
|
||||
end = time.time()
|
||||
for i, (input, target) in enumerate(train_loader):
|
||||
# measure data loading time
|
||||
data_time.update(time.time() - end)
|
||||
|
||||
input, target = input.cuda(), target.cuda()
|
||||
|
||||
# compute output
|
||||
try:
|
||||
output = model(input)
|
||||
loss = criterion(output, target)
|
||||
|
||||
# measure accuracy and record loss
|
||||
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
|
||||
losses.update(loss.item(), input.size(0))
|
||||
top1.update(prec1[0], input.size(0))
|
||||
top5.update(prec5[0], input.size(0))
|
||||
|
||||
# compute gradient and do SGD step
|
||||
optimizer.zero_grad()
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
# measure elapsed time
|
||||
batch_time.update(time.time() - end)
|
||||
end = time.time()
|
||||
|
||||
if i % 10 == 0:
|
||||
run.log("loss", losses.avg)
|
||||
run.log("prec@1", "{0:.3f}".format(top1.avg))
|
||||
run.log("prec@5", "{0:.3f}".format(top5.avg))
|
||||
print('Epoch: [{0}][{1}/{2}]\t'
|
||||
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
|
||||
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
|
||||
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
|
||||
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
|
||||
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(epoch, i, len(train_loader),
|
||||
batch_time=batch_time, data_time=data_time,
|
||||
loss=losses, top1=top1, top5=top5))
|
||||
except:
|
||||
import sys
|
||||
print("Unexpected error:", sys.exc_info()[0])
|
||||
|
||||
|
||||
class AverageMeter(object):
|
||||
"""Computes and stores the average and current value"""
|
||||
def __init__(self):
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
self.val = 0
|
||||
self.avg = 0
|
||||
self.sum = 0
|
||||
self.count = 0
|
||||
|
||||
def update(self, val, n=1):
|
||||
self.val = val
|
||||
self.sum += val * n
|
||||
self.count += n
|
||||
self.avg = self.sum / self.count
|
||||
|
||||
|
||||
def accuracy(output, target, topk=(1,)):
|
||||
"""Computes the precision@k for the specified values of k"""
|
||||
maxk = max(topk)
|
||||
batch_size = target.size(0)
|
||||
|
||||
_, pred = output.topk(maxk, 1, True, True)
|
||||
pred = pred.t()
|
||||
correct = pred.eq(target.view(1, -1).expand_as(pred))
|
||||
|
||||
res = []
|
||||
for k in topk:
|
||||
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
|
||||
res.append(correct_k.mul_(100.0 / batch_size))
|
||||
return res
|
||||
|
||||
|
||||
for epoch in range(1, args.epochs + 1):
|
||||
train(epoch)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user