mirror of
https://github.com/Azure/MachineLearningNotebooks.git
synced 2025-12-20 01:27:06 -05:00
Compare commits
10 Commits
azureml-sd
...
azureml-sd
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d3f1212440 | ||
|
|
b95a65eef4 | ||
|
|
2218af619f | ||
|
|
0401128638 | ||
|
|
59fcb54998 | ||
|
|
e0ea99a6bb | ||
|
|
b06f5ce269 | ||
|
|
ed0ce9e895 | ||
|
|
71053d705b | ||
|
|
77f98bf75f |
@@ -103,7 +103,7 @@
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"\n",
|
||||
"print(\"This notebook was created using version 1.1.5rc0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.2.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -13,7 +13,7 @@ dependencies:
|
||||
- scipy>=1.0.0,<=1.1.0
|
||||
- scikit-learn>=0.19.0,<=0.20.3
|
||||
- pandas>=0.22.0,<=0.23.4
|
||||
- py-xgboost<=0.80
|
||||
- py-xgboost<=0.90
|
||||
- fbprophet==0.5
|
||||
- pytorch=1.1.0
|
||||
- cudatoolkit=9.0
|
||||
@@ -33,5 +33,6 @@ dependencies:
|
||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||
|
||||
channels:
|
||||
- anaconda
|
||||
- conda-forge
|
||||
- pytorch
|
||||
- pytorch
|
||||
|
||||
@@ -34,5 +34,6 @@ dependencies:
|
||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||
|
||||
channels:
|
||||
- anaconda
|
||||
- conda-forge
|
||||
- pytorch
|
||||
@@ -5,7 +5,6 @@ dependencies:
|
||||
- azureml-train-automl
|
||||
- azureml-widgets
|
||||
- matplotlib
|
||||
- interpret
|
||||
- onnxruntime==1.0.0
|
||||
- azureml-explain-model
|
||||
- azureml-contrib-interpret
|
||||
|
||||
@@ -122,35 +122,22 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.compute import AmlCompute\n",
|
||||
"from azureml.core.compute import ComputeTarget\n",
|
||||
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||
"\n",
|
||||
"# Choose a name for your AmlCompute cluster.\n",
|
||||
"amlcompute_cluster_name = \"cpu-cluster-1\"\n",
|
||||
"# Choose a name for your CPU cluster\n",
|
||||
"cpu_cluster_name = \"cpu-cluster-1\"\n",
|
||||
"\n",
|
||||
"found = False\n",
|
||||
"# Check if this compute target already exists in the workspace.\n",
|
||||
"cts = ws.compute_targets\n",
|
||||
"if amlcompute_cluster_name in cts and cts[amlcompute_cluster_name].type == 'cpu-cluster-1':\n",
|
||||
" found = True\n",
|
||||
" print('Found existing compute target.')\n",
|
||||
" compute_target = cts[amlcompute_cluster_name]\n",
|
||||
" \n",
|
||||
"if not found:\n",
|
||||
" print('Creating a new compute target...')\n",
|
||||
" provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_DS12_V2\", # for GPU, use \"STANDARD_NC6\"\n",
|
||||
" #vm_priority = 'lowpriority', # optional\n",
|
||||
" max_nodes = 6)\n",
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
||||
" print('Found existing cluster, use it.')\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
|
||||
" max_nodes=6)\n",
|
||||
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
" # Create the cluster.\n",
|
||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, provisioning_config)\n",
|
||||
" \n",
|
||||
"print('Checking cluster status...')\n",
|
||||
"# Can poll for a minimum number of nodes and for a specific timeout.\n",
|
||||
"# If no min_node_count is provided, it will use the scale settings for the cluster.\n",
|
||||
"compute_target.wait_for_completion(show_output = True, min_node_count = None, timeout_in_minutes = 20)\n",
|
||||
"\n",
|
||||
"# For a more detailed view of current AmlCompute status, use get_status()."
|
||||
"compute_target.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -5,5 +5,4 @@ dependencies:
|
||||
- azureml-train-automl
|
||||
- azureml-widgets
|
||||
- matplotlib
|
||||
- interpret
|
||||
- azureml-explain-model
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
name: auto-ml-forecasting-beer-remote
|
||||
dependencies:
|
||||
- fbprophet==0.5
|
||||
- py-xgboost<=0.80
|
||||
- py-xgboost<=0.90
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
- numpy==1.16.2
|
||||
- azureml-train-automl
|
||||
- azureml-widgets
|
||||
- matplotlib
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
name: auto-ml-forecasting-bike-share
|
||||
dependencies:
|
||||
- fbprophet==0.5
|
||||
- py-xgboost<=0.80
|
||||
- py-xgboost<=0.90
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
- numpy==1.16.2
|
||||
- azureml-train-automl
|
||||
- azureml-widgets
|
||||
- matplotlib
|
||||
|
||||
@@ -2,9 +2,9 @@ name: auto-ml-forecasting-energy-demand
|
||||
dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
- numpy==1.16.2
|
||||
- azureml-train-automl
|
||||
- azureml-widgets
|
||||
- matplotlib
|
||||
- interpret
|
||||
- azureml-explain-model
|
||||
- azureml-contrib-interpret
|
||||
|
||||
@@ -701,7 +701,7 @@
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "erwright, nirovins"
|
||||
"name": "erwright"
|
||||
}
|
||||
],
|
||||
"category": "tutorial",
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
name: auto-ml-forecasting-function
|
||||
dependencies:
|
||||
- fbprophet==0.5
|
||||
- py-xgboost<=0.80
|
||||
- py-xgboost<=0.90
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
- numpy==1.16.2
|
||||
- azureml-train-automl
|
||||
- azureml-widgets
|
||||
- matplotlib
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
name: auto-ml-forecasting-orange-juice-sales
|
||||
dependencies:
|
||||
- fbprophet==0.5
|
||||
- py-xgboost<=0.80
|
||||
- py-xgboost<=0.90
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
- numpy==1.16.2
|
||||
- pandas==0.23.4
|
||||
- azureml-train-automl
|
||||
- azureml-widgets
|
||||
|
||||
@@ -49,7 +49,9 @@
|
||||
"2. Configure AutoML using `AutoMLConfig`.\n",
|
||||
"3. Train the model.\n",
|
||||
"4. Explore the results.\n",
|
||||
"5. Test the fitted model."
|
||||
"5. Visualization model's feature importance in azure portal\n",
|
||||
"6. Explore any model's explanation and explore feature importance in azure portal\n",
|
||||
"7. Test the fitted model."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -71,13 +73,13 @@
|
||||
"\n",
|
||||
"from matplotlib import pyplot as plt\n",
|
||||
"import pandas as pd\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"import azureml.core\n",
|
||||
"from azureml.core.experiment import Experiment\n",
|
||||
"from azureml.core.workspace import Workspace\n",
|
||||
"from azureml.core.dataset import Dataset\n",
|
||||
"from azureml.train.automl import AutoMLConfig"
|
||||
"from azureml.train.automl import AutoMLConfig\n",
|
||||
"from azureml.explain.model._internal.explanation_client import ExplanationClient"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -262,6 +264,133 @@
|
||||
"The fitted_model is a python object and you can read the different properties of the object.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Best Model 's explanation\n",
|
||||
"Retrieve the explanation from the best_run which includes explanations for engineered features and raw features.\n",
|
||||
"\n",
|
||||
"#### Download engineered feature importance from artifact store\n",
|
||||
"You can use ExplanationClient to download the engineered feature explanations from the artifact store of the best_run. You can also use azure portal url to view the dash board visualization of the feature importance values of the engineered features."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"client = ExplanationClient.from_run(best_run)\n",
|
||||
"engineered_explanations = client.download_model_explanation(raw=False)\n",
|
||||
"print(engineered_explanations.get_feature_importance_dict())\n",
|
||||
"print(\"You can visualize the engineered explanations under the 'Explanations (preview)' tab in the AutoML run at:-\\n\" + best_run.get_portal_url())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Explanations\n",
|
||||
"In this section, we will show how to compute model explanations and visualize the explanations using azureml-explain-model package. Besides retrieving an existing model explanation for an AutoML model, you can also explain your AutoML model with different test data. The following steps will allow you to compute and visualize engineered feature importance based on your test data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Retrieve any other AutoML model from training"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"automl_run, fitted_model = local_run.get_output(metric='accuracy')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Setup the model explanations for AutoML models\n",
|
||||
"The fitted_model can generate the following which will be used for getting the engineered explanations using automl_setup_model_explanations:-\n",
|
||||
"\n",
|
||||
"1. Featurized data from train samples/test samples\n",
|
||||
"2. Gather engineered name lists\n",
|
||||
"3. Find the classes in your labeled column in classification scenarios\n",
|
||||
"\n",
|
||||
"The automl_explainer_setup_obj contains all the structures from above list."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"X_train = training_data.drop_columns(columns=[label_column_name])\n",
|
||||
"y_train = training_data.keep_columns(columns=[label_column_name], validate=True)\n",
|
||||
"X_test = validation_data.drop_columns(columns=[label_column_name])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.train.automl.runtime.automl_explain_utilities import automl_setup_model_explanations\n",
|
||||
"\n",
|
||||
"automl_explainer_setup_obj = automl_setup_model_explanations(fitted_model, X=X_train, \n",
|
||||
" X_test=X_test, y=y_train, \n",
|
||||
" task='classification')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Initialize the Mimic Explainer for feature importance\n",
|
||||
"For explaining the AutoML models, use the MimicWrapper from azureml.explain.model package. The MimicWrapper can be initialized with fields in automl_explainer_setup_obj, your workspace and a LightGBM model which acts as a surrogate model to explain the AutoML model (fitted_model here). The MimicWrapper also takes the automl_run object where engineered explanations will be uploaded."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.explain.model.mimic.models.lightgbm_model import LGBMExplainableModel\n",
|
||||
"from azureml.explain.model.mimic_wrapper import MimicWrapper\n",
|
||||
"explainer = MimicWrapper(ws, automl_explainer_setup_obj.automl_estimator, LGBMExplainableModel, \n",
|
||||
" init_dataset=automl_explainer_setup_obj.X_transform, run=automl_run,\n",
|
||||
" features=automl_explainer_setup_obj.engineered_feature_names, \n",
|
||||
" feature_maps=[automl_explainer_setup_obj.feature_map],\n",
|
||||
" classes=automl_explainer_setup_obj.classes)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Use Mimic Explainer for computing and visualizing engineered feature importance\n",
|
||||
"The explain() method in MimicWrapper can be called with the transformed test samples to get the feature importance for the generated engineered features. You can also use azure portal url to view the dash board visualization of the feature importance values of the engineered features."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"engineered_explanations = explainer.explain(['local', 'global'], eval_dataset=automl_explainer_setup_obj.X_test_transform)\n",
|
||||
"print(engineered_explanations.get_feature_importance_dict())\n",
|
||||
"print(\"You can visualize the engineered explanations under the 'Explanations (preview)' tab in the AutoML run at:-\\n\" + automl_run.get_portal_url())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -358,7 +487,7 @@
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "tzvikei"
|
||||
"name": "anumamah"
|
||||
}
|
||||
],
|
||||
"category": "tutorial",
|
||||
|
||||
@@ -5,5 +5,4 @@ dependencies:
|
||||
- azureml-train-automl
|
||||
- azureml-widgets
|
||||
- matplotlib
|
||||
- interpret
|
||||
- azureml-explain-model
|
||||
|
||||
@@ -51,8 +51,8 @@
|
||||
"4. Explore the results and featurization transparency options\n",
|
||||
"5. Setup remote compute for computing the model explanations for a given AutoML model.\n",
|
||||
"6. Start an AzureML experiment on your remote compute to compute explanations for an AutoML model.\n",
|
||||
"7. Download the feature importance for engineered features and visualize the explanations for engineered features. \n",
|
||||
"8. Download the feature importance for raw features and visualize the explanations for raw features. \n"
|
||||
"7. Download the feature importance for engineered features and visualize the explanations for engineered features on azure portal. \n",
|
||||
"8. Download the feature importance for raw features and visualize the explanations for raw features on azure portal. \n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -598,38 +598,8 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Feature importance and explanation dashboard\n",
|
||||
"In this section we describe how you can download the explanation results from the explanations experiment and visualize the feature importance for your AutoML model. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Setup for visualizing the model explanation results\n",
|
||||
"For visualizing the explanation results for the *fitted_model* we need to perform the following steps:-\n",
|
||||
"1. Featurize test data samples.\n",
|
||||
"\n",
|
||||
"The *automl_explainer_setup_obj* contains all the structures from above list. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"X_test = test_data.drop_columns([label]).to_pandas_dataframe()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.train.automl.runtime.automl_explain_utilities import AutoMLExplainerSetupClass, automl_setup_model_explanations\n",
|
||||
"explainer_setup_class = automl_setup_model_explanations(fitted_model, 'regression', X_test=X_test)"
|
||||
"### Feature importance and visualizing explanation dashboard\n",
|
||||
"In this section we describe how you can download the explanation results from the explanations experiment and visualize the feature importance for your AutoML model on the azure portal."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -637,7 +607,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Download engineered feature importance from artifact store\n",
|
||||
"You can use *ExplanationClient* to download the engineered feature explanations from the artifact store of the *automl_run*. You can also use ExplanationDashboard to view the dash board visualization of the feature importance values of the engineered features."
|
||||
"You can use *ExplanationClient* to download the engineered feature explanations from the artifact store of the *automl_run*. You can also use azure portal url to view the dash board visualization of the feature importance values of the engineered features."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -647,11 +617,10 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.explain.model._internal.explanation_client import ExplanationClient\n",
|
||||
"from interpret_community.widget import ExplanationDashboard\n",
|
||||
"client = ExplanationClient.from_run(automl_run)\n",
|
||||
"engineered_explanations = client.download_model_explanation(raw=False)\n",
|
||||
"print(engineered_explanations.get_feature_importance_dict())\n",
|
||||
"ExplanationDashboard(engineered_explanations, explainer_setup_class.automl_estimator, datasetX=explainer_setup_class.X_test_transform)"
|
||||
"print(\"You can visualize the engineered explanations under the 'Explanations (preview)' tab in the AutoML run at:-\\n\" + automl_run.get_portal_url())"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -659,7 +628,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Download raw feature importance from artifact store\n",
|
||||
"You can use *ExplanationClient* to download the raw feature explanations from the artifact store of the *automl_run*. You can also use ExplanationDashboard to view the dash board visualization of the feature importance values of the raw features."
|
||||
"You can use *ExplanationClient* to download the raw feature explanations from the artifact store of the *automl_run*. You can also use azure portal url to view the dash board visualization of the feature importance values of the raw features."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -670,7 +639,7 @@
|
||||
"source": [
|
||||
"raw_explanations = client.download_model_explanation(raw=True)\n",
|
||||
"print(raw_explanations.get_feature_importance_dict())\n",
|
||||
"ExplanationDashboard(raw_explanations, explainer_setup_class.automl_pipeline, datasetX=explainer_setup_class.X_test_raw)"
|
||||
"print(\"You can visualize the raw explanations under the 'Explanations (preview)' tab in the AutoML run at:-\\n\" + automl_run.get_portal_url())"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -803,6 +772,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"if service.state == 'Healthy':\n",
|
||||
" X_test = test_data.drop_columns([label]).to_pandas_dataframe()\n",
|
||||
" # Serialize the first row of the test data into json\n",
|
||||
" X_test_json = X_test[:1].to_json(orient='records')\n",
|
||||
" print(X_test_json)\n",
|
||||
|
||||
@@ -5,7 +5,6 @@ dependencies:
|
||||
- azureml-train-automl
|
||||
- azureml-widgets
|
||||
- matplotlib
|
||||
- interpret
|
||||
- azureml-explain-model
|
||||
- azureml-explain-model
|
||||
- azureml-contrib-interpret
|
||||
|
||||
@@ -2,7 +2,6 @@ name: explain-model-on-amlcompute
|
||||
dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
- interpret
|
||||
- azureml-interpret
|
||||
- azureml-contrib-interpret
|
||||
- sklearn-pandas
|
||||
|
||||
@@ -2,7 +2,6 @@ name: save-retrieve-explanations-run-history
|
||||
dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
- interpret
|
||||
- azureml-interpret
|
||||
- azureml-contrib-interpret
|
||||
- ipywidgets
|
||||
|
||||
@@ -2,7 +2,6 @@ name: train-explain-model-locally-and-deploy
|
||||
dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
- interpret
|
||||
- azureml-interpret
|
||||
- azureml-contrib-interpret
|
||||
- sklearn-pandas
|
||||
|
||||
@@ -2,7 +2,6 @@ name: train-explain-model-on-amlcompute-and-deploy
|
||||
dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
- interpret
|
||||
- azureml-interpret
|
||||
- azureml-contrib-interpret
|
||||
- sklearn-pandas
|
||||
|
||||
@@ -507,7 +507,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create myenv.yml\n",
|
||||
"We also need to create an environment file so that Azure Machine Learning can install the necessary packages in the Docker image which are required by your scoring script. In this case, we need to specify conda packages `numpy` and `chainer`. Please note that you must indicate azureml-defaults with verion >= 1.0.45 as a pip dependency, because it contains the functionality needed to host the model as a web service."
|
||||
"We also need to create an environment file so that Azure Machine Learning can install the necessary packages in the Docker image which are required by your scoring script. In this case, we need to specify conda package `numpy` and pip install `chainer`. Please note that you must indicate azureml-defaults with verion >= 1.0.45 as a pip dependency, because it contains the functionality needed to host the model as a web service."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -520,7 +520,7 @@
|
||||
"\n",
|
||||
"cd = CondaDependencies.create()\n",
|
||||
"cd.add_conda_package('numpy')\n",
|
||||
"cd.add_conda_package('chainer')\n",
|
||||
"cd.add_pip_package('chainer==5.1.0')\n",
|
||||
"cd.add_pip_package(\"azureml-defaults\")\n",
|
||||
"cd.save_to_file(base_directory='./', conda_file_path='myenv.yml')\n",
|
||||
"\n",
|
||||
|
||||
@@ -161,7 +161,7 @@
|
||||
},
|
||||
"source": [
|
||||
"## Download MNIST dataset\n",
|
||||
"In order to train on the MNIST dataset we will first need to download it from Yan LeCun's web site directly and save them in a `data` folder locally."
|
||||
"In order to train on the MNIST dataset we will first need to download it from azuremlopendatasets blob directly and save them in a `data` folder locally. If you want you can directly download the same data from Yan LeCun's web site."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -171,13 +171,17 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import urllib\n",
|
||||
"data_folder = 'data'\n",
|
||||
"os.makedirs(data_folder, exist_ok=True)\n",
|
||||
"\n",
|
||||
"os.makedirs('./data/mnist', exist_ok=True)\n",
|
||||
"\n",
|
||||
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz', filename = './data/mnist/train-images.gz')\n",
|
||||
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz', filename = './data/mnist/train-labels.gz')\n",
|
||||
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz', filename = './data/mnist/test-images.gz')\n",
|
||||
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz', filename = './data/mnist/test-labels.gz')"
|
||||
"urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/train-images-idx3-ubyte.gz',\n",
|
||||
" filename=os.path.join(data_folder, 'train-images.gz'))\n",
|
||||
"urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/train-labels-idx1-ubyte.gz',\n",
|
||||
" filename=os.path.join(data_folder, 'train-labels.gz'))\n",
|
||||
"urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/t10k-images-idx3-ubyte.gz',\n",
|
||||
" filename=os.path.join(data_folder, 'test-images.gz'))\n",
|
||||
"urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/t10k-labels-idx1-ubyte.gz',\n",
|
||||
" filename=os.path.join(data_folder, 'test-labels.gz'))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -205,11 +209,11 @@
|
||||
"from utils import load_data\n",
|
||||
"\n",
|
||||
"# note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the neural network converge faster.\n",
|
||||
"X_train = load_data('./data/mnist/train-images.gz', False) / 255.0\n",
|
||||
"y_train = load_data('./data/mnist/train-labels.gz', True).reshape(-1)\n",
|
||||
"X_train = load_data(os.path.join(data_folder, 'train-images.gz'), False) / 255.0\n",
|
||||
"y_train = load_data(os.path.join(data_folder, 'train-labels.gz'), True).reshape(-1)\n",
|
||||
"\n",
|
||||
"X_test = load_data('./data/mnist/test-images.gz', False) / 255.0\n",
|
||||
"y_test = load_data('./data/mnist/test-labels.gz', True).reshape(-1)\n",
|
||||
"X_test = load_data(os.path.join(data_folder, 'test-images.gz'), False) / 255.0\n",
|
||||
"y_test = load_data(os.path.join(data_folder, 'test-labels.gz'), True).reshape(-1)\n",
|
||||
"\n",
|
||||
"count = 0\n",
|
||||
"sample_size = 30\n",
|
||||
@@ -239,10 +243,10 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.dataset import Dataset\n",
|
||||
"web_paths = ['http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz',\n",
|
||||
" 'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz',\n",
|
||||
" 'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz',\n",
|
||||
" 'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz'\n",
|
||||
"web_paths = ['https://azureopendatastorage.blob.core.windows.net/mnist/train-images-idx3-ubyte.gz',\n",
|
||||
" 'https://azureopendatastorage.blob.core.windows.net/mnist/train-labels-idx1-ubyte.gz',\n",
|
||||
" 'https://azureopendatastorage.blob.core.windows.net/mnist/t10k-images-idx3-ubyte.gz',\n",
|
||||
" 'https://azureopendatastorage.blob.core.windows.net/mnist/t10k-labels-idx1-ubyte.gz'\n",
|
||||
" ]\n",
|
||||
"dataset = Dataset.File.from_files(path = web_paths)"
|
||||
]
|
||||
@@ -945,7 +949,7 @@
|
||||
"\n",
|
||||
"cd = CondaDependencies.create()\n",
|
||||
"cd.add_conda_package('numpy')\n",
|
||||
"cd.add_tensorflow_conda_package()\n",
|
||||
"cd.add_pip_package('tensorflow==1.13.1')\n",
|
||||
"cd.add_pip_package(\"azureml-defaults\")\n",
|
||||
"cd.save_to_file(base_directory='./', conda_file_path='myenv.yml')\n",
|
||||
"\n",
|
||||
@@ -968,7 +972,6 @@
|
||||
"source": [
|
||||
"from azureml.core.webservice import AciWebservice\n",
|
||||
"from azureml.core.model import InferenceConfig\n",
|
||||
"from azureml.core.webservice import Webservice\n",
|
||||
"from azureml.core.model import Model\n",
|
||||
"from azureml.core.environment import Environment\n",
|
||||
"\n",
|
||||
|
||||
@@ -171,13 +171,17 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import urllib\n",
|
||||
"data_folder = 'data'\n",
|
||||
"os.makedirs(data_folder, exist_ok=True)\n",
|
||||
"\n",
|
||||
"os.makedirs('./data/mnist', exist_ok=True)\n",
|
||||
"\n",
|
||||
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz', filename = './data/mnist/train-images.gz')\n",
|
||||
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz', filename = './data/mnist/train-labels.gz')\n",
|
||||
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz', filename = './data/mnist/test-images.gz')\n",
|
||||
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz', filename = './data/mnist/test-labels.gz')"
|
||||
"urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/train-images-idx3-ubyte.gz',\n",
|
||||
" filename=os.path.join(data_folder, 'train-images.gz'))\n",
|
||||
"urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/train-labels-idx1-ubyte.gz',\n",
|
||||
" filename=os.path.join(data_folder, 'train-labels.gz'))\n",
|
||||
"urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/t10k-images-idx3-ubyte.gz',\n",
|
||||
" filename=os.path.join(data_folder, 'test-images.gz'))\n",
|
||||
"urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/t10k-labels-idx1-ubyte.gz',\n",
|
||||
" filename=os.path.join(data_folder, 'test-labels.gz'))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -204,13 +208,13 @@
|
||||
"source": [
|
||||
"from utils import load_data\n",
|
||||
"\n",
|
||||
"# note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the neural network converge faster.\n",
|
||||
"X_train = load_data('./data/mnist/train-images.gz', False) / 255.0\n",
|
||||
"y_train = load_data('./data/mnist/train-labels.gz', True).reshape(-1)\n",
|
||||
"\n",
|
||||
"X_test = load_data('./data/mnist/test-images.gz', False) / 255.0\n",
|
||||
"y_test = load_data('./data/mnist/test-labels.gz', True).reshape(-1)\n",
|
||||
"# note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the model converge faster.\n",
|
||||
"X_train = load_data(os.path.join(data_folder, 'train-images.gz'), False) / 255.0\n",
|
||||
"X_test = load_data(os.path.join(data_folder, 'test-images.gz'), False) / 255.0\n",
|
||||
"y_train = load_data(os.path.join(data_folder, 'train-labels.gz'), True).reshape(-1)\n",
|
||||
"y_test = load_data(os.path.join(data_folder, 'test-labels.gz'), True).reshape(-1)\n",
|
||||
"\n",
|
||||
"# now let's show some randomly chosen images from the training set.\n",
|
||||
"count = 0\n",
|
||||
"sample_size = 30\n",
|
||||
"plt.figure(figsize = (16, 6))\n",
|
||||
@@ -219,8 +223,8 @@
|
||||
" plt.subplot(1, sample_size, count)\n",
|
||||
" plt.axhline('')\n",
|
||||
" plt.axvline('')\n",
|
||||
" plt.text(x = 10, y = -10, s = y_train[i], fontsize = 18)\n",
|
||||
" plt.imshow(X_train[i].reshape(28, 28), cmap = plt.cm.Greys)\n",
|
||||
" plt.text(x=10, y=-10, s=y_train[i], fontsize=18)\n",
|
||||
" plt.imshow(X_train[i].reshape(28, 28), cmap=plt.cm.Greys)\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -100,7 +100,7 @@
|
||||
"\n",
|
||||
"# Check core SDK version number\n",
|
||||
"\n",
|
||||
"print(\"This notebook was created using SDK version 1.1.5rc0, you are currently running version\", azureml.core.VERSION)"
|
||||
"print(\"This notebook was created using SDK version 1.2.0, you are currently running version\", azureml.core.VERSION)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -157,10 +157,14 @@
|
||||
"data_folder = os.path.join(os.getcwd(), 'data')\n",
|
||||
"os.makedirs(data_folder, exist_ok=True)\n",
|
||||
"\n",
|
||||
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz', filename=os.path.join(data_folder, 'train-images.gz'))\n",
|
||||
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz', filename=os.path.join(data_folder, 'train-labels.gz'))\n",
|
||||
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz', filename=os.path.join(data_folder, 'test-images.gz'))\n",
|
||||
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz', filename=os.path.join(data_folder, 'test-labels.gz'))"
|
||||
"urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/train-images-idx3-ubyte.gz',\n",
|
||||
" filename=os.path.join(data_folder, 'train-images.gz'))\n",
|
||||
"urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/train-labels-idx1-ubyte.gz',\n",
|
||||
" filename=os.path.join(data_folder, 'train-labels.gz'))\n",
|
||||
"urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/t10k-images-idx3-ubyte.gz',\n",
|
||||
" filename=os.path.join(data_folder, 'test-images.gz'))\n",
|
||||
"urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/t10k-labels-idx1-ubyte.gz',\n",
|
||||
" filename=os.path.join(data_folder, 'test-labels.gz'))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -227,12 +231,10 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.dataset import Dataset\n",
|
||||
"\n",
|
||||
"web_paths = [\n",
|
||||
" 'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz',\n",
|
||||
" 'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz',\n",
|
||||
" 'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz',\n",
|
||||
" 'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz'\n",
|
||||
"web_paths = ['https://azureopendatastorage.blob.core.windows.net/mnist/train-images-idx3-ubyte.gz',\n",
|
||||
" 'https://azureopendatastorage.blob.core.windows.net/mnist/train-labels-idx1-ubyte.gz',\n",
|
||||
" 'https://azureopendatastorage.blob.core.windows.net/mnist/t10k-images-idx3-ubyte.gz',\n",
|
||||
" 'https://azureopendatastorage.blob.core.windows.net/mnist/t10k-labels-idx1-ubyte.gz'\n",
|
||||
" ]\n",
|
||||
"dataset = Dataset.File.from_files(path = web_paths)"
|
||||
]
|
||||
|
||||
@@ -149,6 +149,20 @@
|
||||
" ssh_port=22, \n",
|
||||
" username=os.environ.get('hdiusername', '<ssh_username>'), \n",
|
||||
" password=os.environ.get('hdipassword', '<my_password>'))\n",
|
||||
"\n",
|
||||
"# The following Azure regions do not support attaching a HDI Cluster using the public IP address of the HDI Cluster.\n",
|
||||
"# Instead, use the Azure Resource Manager ID of the HDI Cluster with the resource_id parameter:\n",
|
||||
"# US East\n",
|
||||
"# US West 2\n",
|
||||
"# US South Central\n",
|
||||
"# The resource ID of the HDI Cluster can be constructed using the\n",
|
||||
"# subscription ID, resource group name, and cluster name using the following string format:\n",
|
||||
"# /subscriptions/<subscription_id>/resourceGroups/<resource_group>/providers/Microsoft.HDInsight/clusters/<cluster_name>. \n",
|
||||
"# If in US East, US West 2, or US South Central, use the following instead:\n",
|
||||
"# attach_config = HDInsightCompute.attach_configuration(resource_id='<resource_id>',\n",
|
||||
"# ssh_port=22,\n",
|
||||
"# username=os.environ.get('hdiusername', '<ssh_username>'),\n",
|
||||
"# password=os.environ.get('hdipassword', '<my_password>'))\n",
|
||||
" hdi_compute = ComputeTarget.attach(workspace=ws, \n",
|
||||
" name='myhdi', \n",
|
||||
" attach_configuration=attach_config)\n",
|
||||
|
||||
@@ -266,7 +266,23 @@
|
||||
" ssh_port=22,\n",
|
||||
" username=username,\n",
|
||||
" private_key_file='./.ssh/id_rsa')\n",
|
||||
" attached_dsvm_compute = ComputeTarget.attach(workspace=ws,\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# The following Azure regions do not support attaching a virtual machine using the public IP address of the VM.\n",
|
||||
"# Instead, use the Azure Resource Manager ID of the VM with the resource_id parameter:\n",
|
||||
"# US East\n",
|
||||
"# US West 2\n",
|
||||
"# US South Central\n",
|
||||
"# The resource ID of the VM can be constructed using the\n",
|
||||
"# subscription ID, resource group name, and VM name using the following string format:\n",
|
||||
"# /subscriptions/<subscription_id>/resourceGroups/<resource_group>/providers/Microsoft.Compute/virtualMachines/<vm_name>. \n",
|
||||
"# If in US East, US West 2, or US South Central, use the following instead:\n",
|
||||
"# attach_config = RemoteCompute.attach_configuration(resource_id='<resource_id>',\n",
|
||||
"# ssh_port=22,\n",
|
||||
"# username='username',\n",
|
||||
"# private_key_file='./.ssh/id_rsa')\n",
|
||||
"\n",
|
||||
" attached_dsvm_compute = ComputeTarget.attach(workspace=ws,\n",
|
||||
" name=compute_target_name,\n",
|
||||
" attach_configuration=attach_config)\n",
|
||||
" attached_dsvm_compute.wait_for_completion(show_output=True)"
|
||||
|
||||
@@ -23,8 +23,8 @@
|
||||
"\n",
|
||||
"The detailed APIs to be demoed in this script are:\n",
|
||||
"- Create Tabular Dataset instance\n",
|
||||
"- Assign fine timestamp column and coarse timestamp column for Tabular Dataset to activate Time Series related APIs\n",
|
||||
"- Clear fine timestamp column and coarse timestamp column\n",
|
||||
"- Assign timestamp column and partition timestamp column for Tabular Dataset to activate Time Series related APIs\n",
|
||||
"- Clear timestamp column and partition timestamp column\n",
|
||||
"- Filter in data before a specific time\n",
|
||||
"- Filter in data after a specific time\n",
|
||||
"- Filter in data in a specific time range\n",
|
||||
@@ -157,7 +157,7 @@
|
||||
"source": [
|
||||
"Create Tabular Dataset instance from blob storage datapath.\n",
|
||||
"\n",
|
||||
"**TIP:** you can set virtual columns in the partition_format. I.e. if you partition the weather data by state and city, the path can be '/{STATE}/{CITY}/{coarse_time:yyy/MM}/data.parquet'. STATE and CITY would then appear as virtual columns in the dataset, allowing for efficient filtering by these grains. "
|
||||
"**TIP:** you can set virtual columns in the partition_format. I.e. if you partition the weather data by state and city, the path can be '/{STATE}/{CITY}/{partition_time:yyy/MM}/data.parquet'. STATE and CITY would then appear as virtual columns in the dataset, allowing for efficient filtering by these timestamps. "
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -167,14 +167,14 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"datastore_path = [(dstore, dset_name + '/*/*/data.parquet')]\n",
|
||||
"dataset = Dataset.Tabular.from_parquet_files(path=datastore_path, partition_format = dset_name + '/{coarse_time:yyyy/MM}/data.parquet')"
|
||||
"dataset = Dataset.Tabular.from_parquet_files(path=datastore_path, partition_format = dset_name + '/{partition_time:yyyy/MM}/data.parquet')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Assign fine timestamp column for Tabular Dataset to activate Time Series related APIs. The column to be assigned should be a Date type, otherwise the assigning will fail."
|
||||
"Assign timestamp column for Tabular Dataset to activate Time Series related APIs. The column to be assigned should be a Date type, otherwise the assigning will fail."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -183,8 +183,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# for this demo, leave out coarse_time so fine_grain_timestamp is used\n",
|
||||
"tsd = dataset.with_timestamp_columns(fine_grain_timestamp='datetime') # coarse_grain_timestamp='coarse_time')"
|
||||
"# for this demo, leave out partition_time so timestamp is used\n",
|
||||
"tsd = dataset.with_timestamp_columns(timestamp='datetime') # partition_timestamp='partition_time')"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -280,7 +280,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**NOTE:** You must set the coarse_grain_timestamp to None to filter on the fine_grain_timestamp. The below cell will fail unless the second line is uncommented "
|
||||
"**NOTE:** You must set the partition_timestamp to None to filter on the timestamp. The below cell will fail unless the second line is uncommented "
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -290,7 +290,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# select data that occurs within a given time range\n",
|
||||
"#tsd = tsd.with_timestamp_columns(fine_grain_timestamp='datetime', coarse_grain_timestamp=None)\n",
|
||||
"#tsd = tsd.with_timestamp_columns(timestamp='datetime', partition_timestamp=None)\n",
|
||||
"tsd2 = tsd.time_after(datetime(2019, 1, 2)).time_before(datetime(2019, 1, 10))\n",
|
||||
"tsd2.to_pandas_dataframe().head(5)"
|
||||
]
|
||||
@@ -371,9 +371,7 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tsd2 = tsd.drop_columns(columns=['snowDepth', 'version', 'datetime'])\n",
|
||||
@@ -481,7 +479,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tsd2 = tsd.keep_columns(columns=['snowDepth', 'datetime', 'coarse_time'], validate=False)\n",
|
||||
"tsd2 = tsd.keep_columns(columns=['snowDepth', 'datetime', 'partition_time'], validate=False)\n",
|
||||
"tsd2.to_pandas_dataframe().tail()"
|
||||
]
|
||||
},
|
||||
@@ -506,9 +504,9 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Rules for reseting are:\n",
|
||||
"- You cannot assign 'None' to fine_grain_timestamp while assign a valid column name to coarse_grain_timestamp because coarse_grain_timestamp is optional while fine_grain_timestamp is mandatory for Tabular time series data.\n",
|
||||
"- If you assign 'None' to fine_grain_timestamp, then both fine_grain_timestamp and coarse_grain_timestamp will all be cleared.\n",
|
||||
"- If you assign only 'None' to coarse_grain_timestamp, then only coarse_grain_timestamp will be cleared."
|
||||
"- You cannot assign 'None' to timestamp while assign a valid column name to partition_timestamp because partition_timestamp is optional while timestamp is mandatory for Tabular time series data.\n",
|
||||
"- If you assign 'None' to timestamp, then both timestamp and partition_timestamp will all be cleared.\n",
|
||||
"- If you assign only 'None' to partition_timestamp, then only partition_timestamp will be cleared."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -519,17 +517,17 @@
|
||||
"source": [
|
||||
"# Illegal clearing, exception is expected.\n",
|
||||
"try:\n",
|
||||
" tsd2 = tsd.with_timestamp_columns(fine_grain_timestamp=None, coarse_grain_timestamp='coarse_time')\n",
|
||||
" tsd2 = tsd.with_timestamp_columns(timestamp=None, partition_timestamp='partition_time')\n",
|
||||
"except Exception as e:\n",
|
||||
" print('Cleaning not allowed because {}'.format(str(e)))\n",
|
||||
"\n",
|
||||
"# clear both\n",
|
||||
"tsd2 = tsd.with_timestamp_columns(fine_grain_timestamp=None, coarse_grain_timestamp=None)\n",
|
||||
"tsd2 = tsd.with_timestamp_columns(timestamp=None, partition_timestamp=None)\n",
|
||||
"print('after clean both with None/None, timestamp columns are: {}'.format(tsd2.timestamp_columns))\n",
|
||||
"\n",
|
||||
"# clear coarse_grain_timestamp only and assign 'datetime' as fine timestamp column\n",
|
||||
"tsd2 = tsd2.with_timestamp_columns(fine_grain_timestamp='datetime', coarse_grain_timestamp=None)\n",
|
||||
"print('after clean coarse timestamp column, timestamp columns are: {}'.format(tsd2.timestamp_columns))"
|
||||
"# clear partition_timestamp only and assign 'datetime' as timestamp column\n",
|
||||
"tsd2 = tsd2.with_timestamp_columns(timestamp='datetime', partition_timestamp=None)\n",
|
||||
"print('after clean partition timestamp column, timestamp columns are: {}'.format(tsd2.timestamp_columns))"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -102,7 +102,7 @@
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"\n",
|
||||
"print(\"This notebook was created using version 1.1.5rc0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.2.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -30,7 +30,9 @@
|
||||
"\n",
|
||||
"## Prerequisites\n",
|
||||
"\n",
|
||||
"See prerequisites in the [Azure Machine Learning documentation](https://docs.microsoft.com/azure/machine-learning/service/tutorial-train-models-with-aml#prerequisites)."
|
||||
"See prerequisites in the [Azure Machine Learning documentation](https://docs.microsoft.com/azure/machine-learning/service/tutorial-train-models-with-aml#prerequisites).\n",
|
||||
"\n",
|
||||
"On the computer running this notebook, conda install matplotlib, numpy, scikit-learn=0.22.1"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -309,7 +311,7 @@
|
||||
"import glob\n",
|
||||
"\n",
|
||||
"from sklearn.linear_model import LogisticRegression\n",
|
||||
"from sklearn.externals import joblib\n",
|
||||
"import joblib\n",
|
||||
"\n",
|
||||
"from azureml.core import Run\n",
|
||||
"from utils import load_data\n",
|
||||
@@ -397,15 +399,20 @@
|
||||
"source": [
|
||||
"### Create an estimator\n",
|
||||
"\n",
|
||||
"An estimator object is used to submit the run. Azure Machine Learning has pre-configured estimators for common machine learning frameworks, as well as generic Estimator. Create SKLearn estimator for scikit-learn model, by specifying\n",
|
||||
"An estimator object is used to submit the run. Azure Machine Learning has pre-configured estimators for common machine learning frameworks, as well as generic Estimator. Create an estimator by specifying\n",
|
||||
"\n",
|
||||
"* The name of the estimator object, `est`\n",
|
||||
"* The directory that contains your scripts. All the files in this directory are uploaded into the cluster nodes for execution. \n",
|
||||
"* The compute target. In this case you will use the AmlCompute you created\n",
|
||||
"* The training script name, train.py\n",
|
||||
"* Parameters required from the training script \n",
|
||||
"* An environment that contains the libraries needed to run the script\n",
|
||||
"* Parameters required from the training script. \n",
|
||||
"\n",
|
||||
"In this tutorial, the target is AmlCompute. All files in the script folder are uploaded into the cluster nodes for execution. The data_folder is set to use the dataset."
|
||||
"In this tutorial, the target is AmlCompute. All files in the script folder are uploaded into the cluster nodes for execution. The data_folder is set to use the dataset.\n",
|
||||
"\n",
|
||||
"First, create the environment that contains: the scikit-learn library, azureml-dataprep required for accessing the dataset, and azureml-defaults which contains the dependencies for logging metrics. The azureml-defaults also contains the dependencies required for deploying the model as a web service later in the part 2 of the tutorial.\n",
|
||||
"\n",
|
||||
"Once the environment is defined, register it with the Workspace to re-use it in part 2 of the tutorial."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -418,10 +425,20 @@
|
||||
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||
"\n",
|
||||
"# to install required packages\n",
|
||||
"env = Environment('my_env')\n",
|
||||
"cd = CondaDependencies.create(pip_packages=['azureml-sdk','scikit-learn==0.22.1','azureml-dataprep[pandas,fuse]>=1.1.14'])\n",
|
||||
"env = Environment('tutorial-env')\n",
|
||||
"cd = CondaDependencies.create(pip_packages=['azureml-dataprep[pandas,fuse]>=1.1.14', 'azureml-defaults'], conda_packages = ['scikit-learn==0.22.1'])\n",
|
||||
"\n",
|
||||
"env.python.conda_dependencies = cd"
|
||||
"env.python.conda_dependencies = cd\n",
|
||||
"\n",
|
||||
"# Register environment to re-use later\n",
|
||||
"env.register(workspace = ws)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Then, create the estimator by specifying the training script, compute target and environment."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -434,7 +451,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.train.sklearn import SKLearn\n",
|
||||
"from azureml.train.estimator import Estimator\n",
|
||||
"\n",
|
||||
"script_params = {\n",
|
||||
" # to mount files referenced by mnist dataset\n",
|
||||
@@ -442,7 +459,7 @@
|
||||
" '--regularization': 0.5\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"est = SKLearn(source_directory=script_folder,\n",
|
||||
"est = Estimator(source_directory=script_folder,\n",
|
||||
" script_params=script_params,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" environment_definition=env,\n",
|
||||
@@ -667,7 +684,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.9"
|
||||
"version": "3.7.6"
|
||||
},
|
||||
"msauthor": "roastala"
|
||||
},
|
||||
|
||||
@@ -159,16 +159,14 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# download test data\n",
|
||||
"import os\n",
|
||||
"import urllib.request\n",
|
||||
"from azureml.core import Dataset\n",
|
||||
"from azureml.opendatasets import MNIST\n",
|
||||
"\n",
|
||||
"data_folder = os.path.join(os.getcwd(), 'data')\n",
|
||||
"os.makedirs(data_folder, exist_ok = True)\n",
|
||||
"os.makedirs(data_folder, exist_ok=True)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz', filename=os.path.join(data_folder, 'test-images.gz'))\n",
|
||||
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz', filename=os.path.join(data_folder, 'test-labels.gz'))"
|
||||
"mnist_file_dataset = MNIST.get_file_dataset()\n",
|
||||
"mnist_file_dataset.download(data_folder, overwrite=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -191,8 +189,8 @@
|
||||
"\n",
|
||||
"data_folder = os.path.join(os.getcwd(), 'data')\n",
|
||||
"# note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the neural network converge faster\n",
|
||||
"X_test = load_data(os.path.join(data_folder, 'test-images.gz'), False) / 255.0\n",
|
||||
"y_test = load_data(os.path.join(data_folder, 'test-labels.gz'), True).reshape(-1)"
|
||||
"X_test = load_data(os.path.join(data_folder, 't10k-images-idx3-ubyte.gz'), False) / 255.0\n",
|
||||
"y_test = load_data(os.path.join(data_folder, 't10k-labels-idx1-ubyte.gz'), True).reshape(-1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -348,7 +346,7 @@
|
||||
"from azureml.core.conda_dependencies import CondaDependencies \n",
|
||||
"\n",
|
||||
"myenv = CondaDependencies()\n",
|
||||
"myenv.add_pip_package(\"scikit-learn==0.22.1\")\n",
|
||||
"myenv.add_conda_package(\"scikit-learn==0.22.1\")\n",
|
||||
"myenv.add_pip_package(\"azureml-defaults\")\n",
|
||||
"\n",
|
||||
"with open(\"myenv.yml\",\"w\") as f:\n",
|
||||
@@ -405,7 +403,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Deploy in ACI\n",
|
||||
"Estimated time to complete: **about 7-8 minutes**\n",
|
||||
"Estimated time to complete: **about 2-5 minutes**\n",
|
||||
"\n",
|
||||
"Configure the image and deploy. The following code goes through these steps:\n",
|
||||
"\n",
|
||||
@@ -436,7 +434,7 @@
|
||||
"from azureml.core.environment import Environment\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"myenv = Environment.from_conda_specification(name=\"myenv\", file_path=\"myenv.yml\")\n",
|
||||
"myenv = Environment.get(workspace=ws, name=\"tutorial-env\", version=\"1\")\n",
|
||||
"inference_config = InferenceConfig(entry_script=\"score.py\", environment=myenv)\n",
|
||||
"\n",
|
||||
"service = Model.deploy(workspace=ws, \n",
|
||||
@@ -637,7 +635,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.6"
|
||||
"version": "3.7.6"
|
||||
},
|
||||
"msauthor": "sgilley"
|
||||
},
|
||||
|
||||
@@ -4,3 +4,5 @@ dependencies:
|
||||
- azureml-sdk
|
||||
- matplotlib
|
||||
- sklearn
|
||||
- pandas
|
||||
- azureml-opendatasets
|
||||
|
||||
Reference in New Issue
Block a user