Compare commits

...

2 Commits

Author SHA1 Message Date
vizhur
d3f1212440 update samples from Release-43 as a part of SDK release 2020-03-23 23:39:45 +00:00
Harneet Virk
b95a65eef4 Merge pull request #883 from Azure/release_update_stablev2/Release-3
update samples from Release-3 as a part of 1.2.0 SDK stable release
2020-03-23 16:21:53 -07:00
16 changed files with 35 additions and 88 deletions

View File

@@ -5,7 +5,6 @@ dependencies:
- azureml-train-automl - azureml-train-automl
- azureml-widgets - azureml-widgets
- matplotlib - matplotlib
- interpret
- onnxruntime==1.0.0 - onnxruntime==1.0.0
- azureml-explain-model - azureml-explain-model
- azureml-contrib-interpret - azureml-contrib-interpret

View File

@@ -122,35 +122,22 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.core.compute import AmlCompute\n", "from azureml.core.compute import ComputeTarget, AmlCompute\n",
"from azureml.core.compute import ComputeTarget\n", "from azureml.core.compute_target import ComputeTargetException\n",
"\n", "\n",
"# Choose a name for your AmlCompute cluster.\n", "# Choose a name for your CPU cluster\n",
"amlcompute_cluster_name = \"cpu-cluster-1\"\n", "cpu_cluster_name = \"cpu-cluster-1\"\n",
"\n", "\n",
"found = False\n", "# Verify that cluster does not exist already\n",
"# Check if this compute target already exists in the workspace.\n", "try:\n",
"cts = ws.compute_targets\n", " compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
"if amlcompute_cluster_name in cts and cts[amlcompute_cluster_name].type == 'cpu-cluster-1':\n", " print('Found existing cluster, use it.')\n",
" found = True\n", "except ComputeTargetException:\n",
" print('Found existing compute target.')\n", " compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',\n",
" compute_target = cts[amlcompute_cluster_name]\n",
" \n",
"if not found:\n",
" print('Creating a new compute target...')\n",
" provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_DS12_V2\", # for GPU, use \"STANDARD_NC6\"\n",
" #vm_priority = 'lowpriority', # optional\n",
" max_nodes=6)\n", " max_nodes=6)\n",
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
"\n", "\n",
" # Create the cluster.\n", "compute_target.wait_for_completion(show_output=True)"
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, provisioning_config)\n",
" \n",
"print('Checking cluster status...')\n",
"# Can poll for a minimum number of nodes and for a specific timeout.\n",
"# If no min_node_count is provided, it will use the scale settings for the cluster.\n",
"compute_target.wait_for_completion(show_output = True, min_node_count = None, timeout_in_minutes = 20)\n",
"\n",
"# For a more detailed view of current AmlCompute status, use get_status()."
] ]
}, },
{ {

View File

@@ -5,5 +5,4 @@ dependencies:
- azureml-train-automl - azureml-train-automl
- azureml-widgets - azureml-widgets
- matplotlib - matplotlib
- interpret
- azureml-explain-model - azureml-explain-model

View File

@@ -1,10 +1,9 @@
name: auto-ml-forecasting-beer-remote name: auto-ml-forecasting-beer-remote
dependencies: dependencies:
- fbprophet==0.5
- numpy==1.16.2
- py-xgboost<=0.90 - py-xgboost<=0.90
- pip: - pip:
- azureml-sdk - azureml-sdk
- numpy==1.16.2
- azureml-train-automl - azureml-train-automl
- azureml-widgets - azureml-widgets
- matplotlib - matplotlib

View File

@@ -1,10 +1,9 @@
name: auto-ml-forecasting-bike-share name: auto-ml-forecasting-bike-share
dependencies: dependencies:
- fbprophet==0.5
- numpy==1.16.2
- py-xgboost<=0.90 - py-xgboost<=0.90
- pip: - pip:
- azureml-sdk - azureml-sdk
- numpy==1.16.2
- azureml-train-automl - azureml-train-automl
- azureml-widgets - azureml-widgets
- matplotlib - matplotlib

View File

@@ -2,9 +2,9 @@ name: auto-ml-forecasting-energy-demand
dependencies: dependencies:
- pip: - pip:
- azureml-sdk - azureml-sdk
- numpy==1.16.2
- azureml-train-automl - azureml-train-automl
- azureml-widgets - azureml-widgets
- matplotlib - matplotlib
- interpret
- azureml-explain-model - azureml-explain-model
- azureml-contrib-interpret - azureml-contrib-interpret

View File

@@ -1,10 +1,9 @@
name: auto-ml-forecasting-function name: auto-ml-forecasting-function
dependencies: dependencies:
- fbprophet==0.5
- numpy==1.16.2
- py-xgboost<=0.90 - py-xgboost<=0.90
- pip: - pip:
- azureml-sdk - azureml-sdk
- numpy==1.16.2
- azureml-train-automl - azureml-train-automl
- azureml-widgets - azureml-widgets
- matplotlib - matplotlib

View File

@@ -1,10 +1,9 @@
name: auto-ml-forecasting-orange-juice-sales name: auto-ml-forecasting-orange-juice-sales
dependencies: dependencies:
- fbprophet==0.5
- numpy==1.16.2
- py-xgboost<=0.90 - py-xgboost<=0.90
- pip: - pip:
- azureml-sdk - azureml-sdk
- numpy==1.16.2
- pandas==0.23.4 - pandas==0.23.4
- azureml-train-automl - azureml-train-automl
- azureml-widgets - azureml-widgets

View File

@@ -49,8 +49,8 @@
"2. Configure AutoML using `AutoMLConfig`.\n", "2. Configure AutoML using `AutoMLConfig`.\n",
"3. Train the model.\n", "3. Train the model.\n",
"4. Explore the results.\n", "4. Explore the results.\n",
"5. Visualization model's feature importance in widget\n", "5. Visualization model's feature importance in azure portal\n",
"6. Explore any model's explanation\n", "6. Explore any model's explanation and explore feature importance in azure portal\n",
"7. Test the fitted model." "7. Test the fitted model."
] ]
}, },
@@ -272,7 +272,7 @@
"Retrieve the explanation from the best_run which includes explanations for engineered features and raw features.\n", "Retrieve the explanation from the best_run which includes explanations for engineered features and raw features.\n",
"\n", "\n",
"#### Download engineered feature importance from artifact store\n", "#### Download engineered feature importance from artifact store\n",
"You can use ExplanationClient to download the engineered feature explanations from the artifact store of the best_run." "You can use ExplanationClient to download the engineered feature explanations from the artifact store of the best_run. You can also use azure portal url to view the dash board visualization of the feature importance values of the engineered features."
] ]
}, },
{ {
@@ -283,7 +283,8 @@
"source": [ "source": [
"client = ExplanationClient.from_run(best_run)\n", "client = ExplanationClient.from_run(best_run)\n",
"engineered_explanations = client.download_model_explanation(raw=False)\n", "engineered_explanations = client.download_model_explanation(raw=False)\n",
"print(engineered_explanations.get_feature_importance_dict())" "print(engineered_explanations.get_feature_importance_dict())\n",
"print(\"You can visualize the engineered explanations under the 'Explanations (preview)' tab in the AutoML run at:-\\n\" + best_run.get_portal_url())"
] ]
}, },
{ {
@@ -376,7 +377,7 @@
"metadata": {}, "metadata": {},
"source": [ "source": [
"#### Use Mimic Explainer for computing and visualizing engineered feature importance\n", "#### Use Mimic Explainer for computing and visualizing engineered feature importance\n",
"The explain() method in MimicWrapper can be called with the transformed test samples to get the feature importance for the generated engineered features." "The explain() method in MimicWrapper can be called with the transformed test samples to get the feature importance for the generated engineered features. You can also use azure portal url to view the dash board visualization of the feature importance values of the engineered features."
] ]
}, },
{ {
@@ -386,7 +387,8 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"engineered_explanations = explainer.explain(['local', 'global'], eval_dataset=automl_explainer_setup_obj.X_test_transform)\n", "engineered_explanations = explainer.explain(['local', 'global'], eval_dataset=automl_explainer_setup_obj.X_test_transform)\n",
"print(engineered_explanations.get_feature_importance_dict())\n" "print(engineered_explanations.get_feature_importance_dict())\n",
"print(\"You can visualize the engineered explanations under the 'Explanations (preview)' tab in the AutoML run at:-\\n\" + automl_run.get_portal_url())"
] ]
}, },
{ {

View File

@@ -5,5 +5,4 @@ dependencies:
- azureml-train-automl - azureml-train-automl
- azureml-widgets - azureml-widgets
- matplotlib - matplotlib
- interpret
- azureml-explain-model - azureml-explain-model

View File

@@ -51,8 +51,8 @@
"4. Explore the results and featurization transparency options\n", "4. Explore the results and featurization transparency options\n",
"5. Setup remote compute for computing the model explanations for a given AutoML model.\n", "5. Setup remote compute for computing the model explanations for a given AutoML model.\n",
"6. Start an AzureML experiment on your remote compute to compute explanations for an AutoML model.\n", "6. Start an AzureML experiment on your remote compute to compute explanations for an AutoML model.\n",
"7. Download the feature importance for engineered features and visualize the explanations for engineered features. \n", "7. Download the feature importance for engineered features and visualize the explanations for engineered features on azure portal. \n",
"8. Download the feature importance for raw features and visualize the explanations for raw features. \n" "8. Download the feature importance for raw features and visualize the explanations for raw features on azure portal. \n"
] ]
}, },
{ {
@@ -598,38 +598,8 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"### Feature importance and explanation dashboard\n", "### Feature importance and visualizing explanation dashboard\n",
"In this section we describe how you can download the explanation results from the explanations experiment and visualize the feature importance for your AutoML model. " "In this section we describe how you can download the explanation results from the explanations experiment and visualize the feature importance for your AutoML model on the azure portal."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Setup for visualizing the model explanation results\n",
"For visualizing the explanation results for the *fitted_model* we need to perform the following steps:-\n",
"1. Featurize test data samples.\n",
"\n",
"The *automl_explainer_setup_obj* contains all the structures from above list. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"X_test = test_data.drop_columns([label]).to_pandas_dataframe()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.train.automl.runtime.automl_explain_utilities import AutoMLExplainerSetupClass, automl_setup_model_explanations\n",
"explainer_setup_class = automl_setup_model_explanations(fitted_model, 'regression', X_test=X_test)"
] ]
}, },
{ {
@@ -637,7 +607,7 @@
"metadata": {}, "metadata": {},
"source": [ "source": [
"#### Download engineered feature importance from artifact store\n", "#### Download engineered feature importance from artifact store\n",
"You can use *ExplanationClient* to download the engineered feature explanations from the artifact store of the *automl_run*. You can also use ExplanationDashboard to view the dash board visualization of the feature importance values of the engineered features." "You can use *ExplanationClient* to download the engineered feature explanations from the artifact store of the *automl_run*. You can also use azure portal url to view the dash board visualization of the feature importance values of the engineered features."
] ]
}, },
{ {
@@ -647,11 +617,10 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.explain.model._internal.explanation_client import ExplanationClient\n", "from azureml.explain.model._internal.explanation_client import ExplanationClient\n",
"from interpret_community.widget import ExplanationDashboard\n",
"client = ExplanationClient.from_run(automl_run)\n", "client = ExplanationClient.from_run(automl_run)\n",
"engineered_explanations = client.download_model_explanation(raw=False)\n", "engineered_explanations = client.download_model_explanation(raw=False)\n",
"print(engineered_explanations.get_feature_importance_dict())\n", "print(engineered_explanations.get_feature_importance_dict())\n",
"ExplanationDashboard(engineered_explanations, explainer_setup_class.automl_estimator, datasetX=explainer_setup_class.X_test_transform)" "print(\"You can visualize the engineered explanations under the 'Explanations (preview)' tab in the AutoML run at:-\\n\" + automl_run.get_portal_url())"
] ]
}, },
{ {
@@ -659,7 +628,7 @@
"metadata": {}, "metadata": {},
"source": [ "source": [
"#### Download raw feature importance from artifact store\n", "#### Download raw feature importance from artifact store\n",
"You can use *ExplanationClient* to download the raw feature explanations from the artifact store of the *automl_run*. You can also use ExplanationDashboard to view the dash board visualization of the feature importance values of the raw features." "You can use *ExplanationClient* to download the raw feature explanations from the artifact store of the *automl_run*. You can also use azure portal url to view the dash board visualization of the feature importance values of the raw features."
] ]
}, },
{ {
@@ -670,7 +639,7 @@
"source": [ "source": [
"raw_explanations = client.download_model_explanation(raw=True)\n", "raw_explanations = client.download_model_explanation(raw=True)\n",
"print(raw_explanations.get_feature_importance_dict())\n", "print(raw_explanations.get_feature_importance_dict())\n",
"ExplanationDashboard(raw_explanations, explainer_setup_class.automl_pipeline, datasetX=explainer_setup_class.X_test_raw)" "print(\"You can visualize the raw explanations under the 'Explanations (preview)' tab in the AutoML run at:-\\n\" + automl_run.get_portal_url())"
] ]
}, },
{ {
@@ -803,6 +772,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"if service.state == 'Healthy':\n", "if service.state == 'Healthy':\n",
" X_test = test_data.drop_columns([label]).to_pandas_dataframe()\n",
" # Serialize the first row of the test data into json\n", " # Serialize the first row of the test data into json\n",
" X_test_json = X_test[:1].to_json(orient='records')\n", " X_test_json = X_test[:1].to_json(orient='records')\n",
" print(X_test_json)\n", " print(X_test_json)\n",

View File

@@ -5,7 +5,6 @@ dependencies:
- azureml-train-automl - azureml-train-automl
- azureml-widgets - azureml-widgets
- matplotlib - matplotlib
- interpret
- azureml-explain-model - azureml-explain-model
- azureml-explain-model - azureml-explain-model
- azureml-contrib-interpret - azureml-contrib-interpret

View File

@@ -2,7 +2,6 @@ name: explain-model-on-amlcompute
dependencies: dependencies:
- pip: - pip:
- azureml-sdk - azureml-sdk
- interpret
- azureml-interpret - azureml-interpret
- azureml-contrib-interpret - azureml-contrib-interpret
- sklearn-pandas - sklearn-pandas

View File

@@ -2,7 +2,6 @@ name: save-retrieve-explanations-run-history
dependencies: dependencies:
- pip: - pip:
- azureml-sdk - azureml-sdk
- interpret
- azureml-interpret - azureml-interpret
- azureml-contrib-interpret - azureml-contrib-interpret
- ipywidgets - ipywidgets

View File

@@ -2,7 +2,6 @@ name: train-explain-model-locally-and-deploy
dependencies: dependencies:
- pip: - pip:
- azureml-sdk - azureml-sdk
- interpret
- azureml-interpret - azureml-interpret
- azureml-contrib-interpret - azureml-contrib-interpret
- sklearn-pandas - sklearn-pandas

View File

@@ -2,7 +2,6 @@ name: train-explain-model-on-amlcompute-and-deploy
dependencies: dependencies:
- pip: - pip:
- azureml-sdk - azureml-sdk
- interpret
- azureml-interpret - azureml-interpret
- azureml-contrib-interpret - azureml-contrib-interpret
- sklearn-pandas - sklearn-pandas