diff --git a/configuration.ipynb b/configuration.ipynb index 185b0add..761aa8fd 100644 --- a/configuration.ipynb +++ b/configuration.ipynb @@ -103,7 +103,7 @@ "source": [ "import azureml.core\n", "\n", - "print(\"This notebook was created using version 1.1.5 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.2.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, diff --git a/how-to-use-azureml/automated-machine-learning/automl_env.yml b/how-to-use-azureml/automated-machine-learning/automl_env.yml index 3a77d177..569f205e 100644 --- a/how-to-use-azureml/automated-machine-learning/automl_env.yml +++ b/how-to-use-azureml/automated-machine-learning/automl_env.yml @@ -13,7 +13,7 @@ dependencies: - scipy>=1.0.0,<=1.1.0 - scikit-learn>=0.19.0,<=0.20.3 - pandas>=0.22.0,<=0.23.4 -- py-xgboost<=0.80 +- py-xgboost<=0.90 - fbprophet==0.5 - pytorch=1.1.0 - cudatoolkit=9.0 @@ -33,5 +33,6 @@ dependencies: - https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz channels: +- anaconda - conda-forge -- pytorch \ No newline at end of file +- pytorch diff --git a/how-to-use-azureml/automated-machine-learning/automl_env_mac.yml b/how-to-use-azureml/automated-machine-learning/automl_env_mac.yml index bfd8b358..1027dc27 100644 --- a/how-to-use-azureml/automated-machine-learning/automl_env_mac.yml +++ b/how-to-use-azureml/automated-machine-learning/automl_env_mac.yml @@ -34,5 +34,6 @@ dependencies: - https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz channels: +- anaconda - conda-forge - pytorch \ No newline at end of file diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-beer-remote/auto-ml-forecasting-beer-remote.yml b/how-to-use-azureml/automated-machine-learning/forecasting-beer-remote/auto-ml-forecasting-beer-remote.yml index b859d781..1a0a93ff 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-beer-remote/auto-ml-forecasting-beer-remote.yml +++ b/how-to-use-azureml/automated-machine-learning/forecasting-beer-remote/auto-ml-forecasting-beer-remote.yml @@ -1,10 +1,10 @@ name: auto-ml-forecasting-beer-remote dependencies: - fbprophet==0.5 -- py-xgboost<=0.80 +- numpy==1.16.2 +- py-xgboost<=0.90 - pip: - azureml-sdk - - numpy==1.16.2 - azureml-train-automl - azureml-widgets - matplotlib diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-bike-share/auto-ml-forecasting-bike-share.yml b/how-to-use-azureml/automated-machine-learning/forecasting-bike-share/auto-ml-forecasting-bike-share.yml index cde6b45d..d8ad2181 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-bike-share/auto-ml-forecasting-bike-share.yml +++ b/how-to-use-azureml/automated-machine-learning/forecasting-bike-share/auto-ml-forecasting-bike-share.yml @@ -1,10 +1,10 @@ name: auto-ml-forecasting-bike-share dependencies: - fbprophet==0.5 -- py-xgboost<=0.80 +- numpy==1.16.2 +- py-xgboost<=0.90 - pip: - azureml-sdk - - numpy==1.16.2 - azureml-train-automl - azureml-widgets - matplotlib diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.yml b/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.yml index 0c28e055..8c42187c 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.yml +++ b/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.yml @@ -2,7 +2,6 @@ name: auto-ml-forecasting-energy-demand dependencies: - pip: - azureml-sdk - - numpy==1.16.2 - azureml-train-automl - azureml-widgets - matplotlib diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-high-frequency/auto-ml-forecasting-function.yml b/how-to-use-azureml/automated-machine-learning/forecasting-high-frequency/auto-ml-forecasting-function.yml index adc5dc69..a46c0195 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-high-frequency/auto-ml-forecasting-function.yml +++ b/how-to-use-azureml/automated-machine-learning/forecasting-high-frequency/auto-ml-forecasting-function.yml @@ -1,10 +1,10 @@ name: auto-ml-forecasting-function dependencies: - fbprophet==0.5 -- py-xgboost<=0.80 +- numpy==1.16.2 +- py-xgboost<=0.90 - pip: - azureml-sdk - - numpy==1.16.2 - azureml-train-automl - azureml-widgets - matplotlib diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.yml b/how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.yml index 8aa9aa5f..cec567dd 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.yml +++ b/how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.yml @@ -1,10 +1,10 @@ name: auto-ml-forecasting-orange-juice-sales dependencies: - fbprophet==0.5 -- py-xgboost<=0.80 +- numpy==1.16.2 +- py-xgboost<=0.90 - pip: - azureml-sdk - - numpy==1.16.2 - pandas==0.23.4 - azureml-train-automl - azureml-widgets diff --git a/how-to-use-azureml/automated-machine-learning/local-run-classification-credit-card-fraud/auto-ml-classification-credit-card-fraud-local.ipynb b/how-to-use-azureml/automated-machine-learning/local-run-classification-credit-card-fraud/auto-ml-classification-credit-card-fraud-local.ipynb index 16eebfae..9d41f3a3 100644 --- a/how-to-use-azureml/automated-machine-learning/local-run-classification-credit-card-fraud/auto-ml-classification-credit-card-fraud-local.ipynb +++ b/how-to-use-azureml/automated-machine-learning/local-run-classification-credit-card-fraud/auto-ml-classification-credit-card-fraud-local.ipynb @@ -49,7 +49,9 @@ "2. Configure AutoML using `AutoMLConfig`.\n", "3. Train the model.\n", "4. Explore the results.\n", - "5. Test the fitted model." + "5. Visualization model's feature importance in widget\n", + "6. Explore any model's explanation\n", + "7. Test the fitted model." ] }, { @@ -71,13 +73,13 @@ "\n", "from matplotlib import pyplot as plt\n", "import pandas as pd\n", - "import os\n", "\n", "import azureml.core\n", "from azureml.core.experiment import Experiment\n", "from azureml.core.workspace import Workspace\n", "from azureml.core.dataset import Dataset\n", - "from azureml.train.automl import AutoMLConfig" + "from azureml.train.automl import AutoMLConfig\n", + "from azureml.explain.model._internal.explanation_client import ExplanationClient" ] }, { @@ -262,6 +264,131 @@ "The fitted_model is a python object and you can read the different properties of the object.\n" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Best Model 's explanation\n", + "Retrieve the explanation from the best_run which includes explanations for engineered features and raw features.\n", + "\n", + "#### Download engineered feature importance from artifact store\n", + "You can use ExplanationClient to download the engineered feature explanations from the artifact store of the best_run." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "client = ExplanationClient.from_run(best_run)\n", + "engineered_explanations = client.download_model_explanation(raw=False)\n", + "print(engineered_explanations.get_feature_importance_dict())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Explanations\n", + "In this section, we will show how to compute model explanations and visualize the explanations using azureml-explain-model package. Besides retrieving an existing model explanation for an AutoML model, you can also explain your AutoML model with different test data. The following steps will allow you to compute and visualize engineered feature importance based on your test data." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Retrieve any other AutoML model from training" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "automl_run, fitted_model = local_run.get_output(metric='accuracy')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Setup the model explanations for AutoML models\n", + "The fitted_model can generate the following which will be used for getting the engineered explanations using automl_setup_model_explanations:-\n", + "\n", + "1. Featurized data from train samples/test samples\n", + "2. Gather engineered name lists\n", + "3. Find the classes in your labeled column in classification scenarios\n", + "\n", + "The automl_explainer_setup_obj contains all the structures from above list." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "X_train = training_data.drop_columns(columns=[label_column_name])\n", + "y_train = training_data.keep_columns(columns=[label_column_name], validate=True)\n", + "X_test = validation_data.drop_columns(columns=[label_column_name])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from azureml.train.automl.runtime.automl_explain_utilities import automl_setup_model_explanations\n", + "\n", + "automl_explainer_setup_obj = automl_setup_model_explanations(fitted_model, X=X_train, \n", + " X_test=X_test, y=y_train, \n", + " task='classification')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Initialize the Mimic Explainer for feature importance\n", + "For explaining the AutoML models, use the MimicWrapper from azureml.explain.model package. The MimicWrapper can be initialized with fields in automl_explainer_setup_obj, your workspace and a LightGBM model which acts as a surrogate model to explain the AutoML model (fitted_model here). The MimicWrapper also takes the automl_run object where engineered explanations will be uploaded." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from azureml.explain.model.mimic.models.lightgbm_model import LGBMExplainableModel\n", + "from azureml.explain.model.mimic_wrapper import MimicWrapper\n", + "explainer = MimicWrapper(ws, automl_explainer_setup_obj.automl_estimator, LGBMExplainableModel, \n", + " init_dataset=automl_explainer_setup_obj.X_transform, run=automl_run,\n", + " features=automl_explainer_setup_obj.engineered_feature_names, \n", + " feature_maps=[automl_explainer_setup_obj.feature_map],\n", + " classes=automl_explainer_setup_obj.classes)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Use Mimic Explainer for computing and visualizing engineered feature importance\n", + "The explain() method in MimicWrapper can be called with the transformed test samples to get the feature importance for the generated engineered features." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "engineered_explanations = explainer.explain(['local', 'global'], eval_dataset=automl_explainer_setup_obj.X_test_transform)\n", + "print(engineered_explanations.get_feature_importance_dict())\n" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -358,7 +485,7 @@ "metadata": { "authors": [ { - "name": "tzvikei" + "name": "anumamah" } ], "category": "tutorial", diff --git a/how-to-use-azureml/ml-frameworks/chainer/deployment/train-hyperparameter-tune-deploy-with-chainer/train-hyperparameter-tune-deploy-with-chainer.ipynb b/how-to-use-azureml/ml-frameworks/chainer/deployment/train-hyperparameter-tune-deploy-with-chainer/train-hyperparameter-tune-deploy-with-chainer.ipynb index 6d4c30a7..a5ad0e1a 100644 --- a/how-to-use-azureml/ml-frameworks/chainer/deployment/train-hyperparameter-tune-deploy-with-chainer/train-hyperparameter-tune-deploy-with-chainer.ipynb +++ b/how-to-use-azureml/ml-frameworks/chainer/deployment/train-hyperparameter-tune-deploy-with-chainer/train-hyperparameter-tune-deploy-with-chainer.ipynb @@ -507,7 +507,7 @@ "metadata": {}, "source": [ "### Create myenv.yml\n", - "We also need to create an environment file so that Azure Machine Learning can install the necessary packages in the Docker image which are required by your scoring script. In this case, we need to specify conda packages `numpy` and `chainer`. Please note that you must indicate azureml-defaults with verion >= 1.0.45 as a pip dependency, because it contains the functionality needed to host the model as a web service." + "We also need to create an environment file so that Azure Machine Learning can install the necessary packages in the Docker image which are required by your scoring script. In this case, we need to specify conda package `numpy` and pip install `chainer`. Please note that you must indicate azureml-defaults with verion >= 1.0.45 as a pip dependency, because it contains the functionality needed to host the model as a web service." ] }, { @@ -520,7 +520,7 @@ "\n", "cd = CondaDependencies.create()\n", "cd.add_conda_package('numpy')\n", - "cd.add_conda_package('chainer')\n", + "cd.add_pip_package('chainer==5.1.0')\n", "cd.add_pip_package(\"azureml-defaults\")\n", "cd.save_to_file(base_directory='./', conda_file_path='myenv.yml')\n", "\n", diff --git a/how-to-use-azureml/ml-frameworks/tensorflow/deployment/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb b/how-to-use-azureml/ml-frameworks/tensorflow/deployment/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb index f35b534c..1d4f2de7 100644 --- a/how-to-use-azureml/ml-frameworks/tensorflow/deployment/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb +++ b/how-to-use-azureml/ml-frameworks/tensorflow/deployment/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb @@ -161,7 +161,7 @@ }, "source": [ "## Download MNIST dataset\n", - "In order to train on the MNIST dataset we will first need to download it from Yan LeCun's web site directly and save them in a `data` folder locally." + "In order to train on the MNIST dataset we will first need to download it from azuremlopendatasets blob directly and save them in a `data` folder locally. If you want you can directly download the same data from Yan LeCun's web site." ] }, { @@ -171,13 +171,17 @@ "outputs": [], "source": [ "import urllib\n", + "data_folder = 'data'\n", + "os.makedirs(data_folder, exist_ok=True)\n", "\n", - "os.makedirs('./data/mnist', exist_ok=True)\n", - "\n", - "urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz', filename = './data/mnist/train-images.gz')\n", - "urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz', filename = './data/mnist/train-labels.gz')\n", - "urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz', filename = './data/mnist/test-images.gz')\n", - "urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz', filename = './data/mnist/test-labels.gz')" + "urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/train-images-idx3-ubyte.gz',\n", + " filename=os.path.join(data_folder, 'train-images.gz'))\n", + "urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/train-labels-idx1-ubyte.gz',\n", + " filename=os.path.join(data_folder, 'train-labels.gz'))\n", + "urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/t10k-images-idx3-ubyte.gz',\n", + " filename=os.path.join(data_folder, 'test-images.gz'))\n", + "urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/t10k-labels-idx1-ubyte.gz',\n", + " filename=os.path.join(data_folder, 'test-labels.gz'))" ] }, { @@ -205,11 +209,11 @@ "from utils import load_data\n", "\n", "# note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the neural network converge faster.\n", - "X_train = load_data('./data/mnist/train-images.gz', False) / 255.0\n", - "y_train = load_data('./data/mnist/train-labels.gz', True).reshape(-1)\n", + "X_train = load_data(os.path.join(data_folder, 'train-images.gz'), False) / 255.0\n", + "y_train = load_data(os.path.join(data_folder, 'train-labels.gz'), True).reshape(-1)\n", "\n", - "X_test = load_data('./data/mnist/test-images.gz', False) / 255.0\n", - "y_test = load_data('./data/mnist/test-labels.gz', True).reshape(-1)\n", + "X_test = load_data(os.path.join(data_folder, 'test-images.gz'), False) / 255.0\n", + "y_test = load_data(os.path.join(data_folder, 'test-labels.gz'), True).reshape(-1)\n", "\n", "count = 0\n", "sample_size = 30\n", @@ -239,10 +243,10 @@ "outputs": [], "source": [ "from azureml.core.dataset import Dataset\n", - "web_paths = ['http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz',\n", - " 'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz',\n", - " 'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz',\n", - " 'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz'\n", + "web_paths = ['https://azureopendatastorage.blob.core.windows.net/mnist/train-images-idx3-ubyte.gz',\n", + " 'https://azureopendatastorage.blob.core.windows.net/mnist/train-labels-idx1-ubyte.gz',\n", + " 'https://azureopendatastorage.blob.core.windows.net/mnist/t10k-images-idx3-ubyte.gz',\n", + " 'https://azureopendatastorage.blob.core.windows.net/mnist/t10k-labels-idx1-ubyte.gz'\n", " ]\n", "dataset = Dataset.File.from_files(path = web_paths)" ] @@ -945,7 +949,7 @@ "\n", "cd = CondaDependencies.create()\n", "cd.add_conda_package('numpy')\n", - "cd.add_tensorflow_conda_package()\n", + "cd.add_pip_package('tensorflow==1.13.1')\n", "cd.add_pip_package(\"azureml-defaults\")\n", "cd.save_to_file(base_directory='./', conda_file_path='myenv.yml')\n", "\n", @@ -968,7 +972,6 @@ "source": [ "from azureml.core.webservice import AciWebservice\n", "from azureml.core.model import InferenceConfig\n", - "from azureml.core.webservice import Webservice\n", "from azureml.core.model import Model\n", "from azureml.core.environment import Environment\n", "\n", diff --git a/how-to-use-azureml/ml-frameworks/tensorflow/training/hyperparameter-tune-and-warm-start-with-tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow.ipynb b/how-to-use-azureml/ml-frameworks/tensorflow/training/hyperparameter-tune-and-warm-start-with-tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow.ipynb index 4539c436..cddc5d42 100644 --- a/how-to-use-azureml/ml-frameworks/tensorflow/training/hyperparameter-tune-and-warm-start-with-tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow.ipynb +++ b/how-to-use-azureml/ml-frameworks/tensorflow/training/hyperparameter-tune-and-warm-start-with-tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow.ipynb @@ -171,13 +171,17 @@ "outputs": [], "source": [ "import urllib\n", + "data_folder = 'data'\n", + "os.makedirs(data_folder, exist_ok=True)\n", "\n", - "os.makedirs('./data/mnist', exist_ok=True)\n", - "\n", - "urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz', filename = './data/mnist/train-images.gz')\n", - "urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz', filename = './data/mnist/train-labels.gz')\n", - "urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz', filename = './data/mnist/test-images.gz')\n", - "urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz', filename = './data/mnist/test-labels.gz')" + "urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/train-images-idx3-ubyte.gz',\n", + " filename=os.path.join(data_folder, 'train-images.gz'))\n", + "urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/train-labels-idx1-ubyte.gz',\n", + " filename=os.path.join(data_folder, 'train-labels.gz'))\n", + "urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/t10k-images-idx3-ubyte.gz',\n", + " filename=os.path.join(data_folder, 'test-images.gz'))\n", + "urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/t10k-labels-idx1-ubyte.gz',\n", + " filename=os.path.join(data_folder, 'test-labels.gz'))" ] }, { @@ -204,13 +208,13 @@ "source": [ "from utils import load_data\n", "\n", - "# note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the neural network converge faster.\n", - "X_train = load_data('./data/mnist/train-images.gz', False) / 255.0\n", - "y_train = load_data('./data/mnist/train-labels.gz', True).reshape(-1)\n", - "\n", - "X_test = load_data('./data/mnist/test-images.gz', False) / 255.0\n", - "y_test = load_data('./data/mnist/test-labels.gz', True).reshape(-1)\n", + "# note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the model converge faster.\n", + "X_train = load_data(os.path.join(data_folder, 'train-images.gz'), False) / 255.0\n", + "X_test = load_data(os.path.join(data_folder, 'test-images.gz'), False) / 255.0\n", + "y_train = load_data(os.path.join(data_folder, 'train-labels.gz'), True).reshape(-1)\n", + "y_test = load_data(os.path.join(data_folder, 'test-labels.gz'), True).reshape(-1)\n", "\n", + "# now let's show some randomly chosen images from the training set.\n", "count = 0\n", "sample_size = 30\n", "plt.figure(figsize = (16, 6))\n", @@ -219,8 +223,8 @@ " plt.subplot(1, sample_size, count)\n", " plt.axhline('')\n", " plt.axvline('')\n", - " plt.text(x = 10, y = -10, s = y_train[i], fontsize = 18)\n", - " plt.imshow(X_train[i].reshape(28, 28), cmap = plt.cm.Greys)\n", + " plt.text(x=10, y=-10, s=y_train[i], fontsize=18)\n", + " plt.imshow(X_train[i].reshape(28, 28), cmap=plt.cm.Greys)\n", "plt.show()" ] }, diff --git a/how-to-use-azureml/track-and-monitor-experiments/logging-api/logging-api.ipynb b/how-to-use-azureml/track-and-monitor-experiments/logging-api/logging-api.ipynb index dcc741e3..e1a16f21 100644 --- a/how-to-use-azureml/track-and-monitor-experiments/logging-api/logging-api.ipynb +++ b/how-to-use-azureml/track-and-monitor-experiments/logging-api/logging-api.ipynb @@ -100,7 +100,7 @@ "\n", "# Check core SDK version number\n", "\n", - "print(\"This notebook was created using SDK version 1.1.5, you are currently running version\", azureml.core.VERSION)" + "print(\"This notebook was created using SDK version 1.2.0, you are currently running version\", azureml.core.VERSION)" ] }, { diff --git a/how-to-use-azureml/training-with-deep-learning/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.ipynb b/how-to-use-azureml/training-with-deep-learning/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.ipynb index 508ec236..a8e3bdfa 100644 --- a/how-to-use-azureml/training-with-deep-learning/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.ipynb +++ b/how-to-use-azureml/training-with-deep-learning/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.ipynb @@ -157,10 +157,14 @@ "data_folder = os.path.join(os.getcwd(), 'data')\n", "os.makedirs(data_folder, exist_ok=True)\n", "\n", - "urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz', filename=os.path.join(data_folder, 'train-images.gz'))\n", - "urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz', filename=os.path.join(data_folder, 'train-labels.gz'))\n", - "urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz', filename=os.path.join(data_folder, 'test-images.gz'))\n", - "urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz', filename=os.path.join(data_folder, 'test-labels.gz'))" + "urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/train-images-idx3-ubyte.gz',\n", + " filename=os.path.join(data_folder, 'train-images.gz'))\n", + "urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/train-labels-idx1-ubyte.gz',\n", + " filename=os.path.join(data_folder, 'train-labels.gz'))\n", + "urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/t10k-images-idx3-ubyte.gz',\n", + " filename=os.path.join(data_folder, 'test-images.gz'))\n", + "urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/t10k-labels-idx1-ubyte.gz',\n", + " filename=os.path.join(data_folder, 'test-labels.gz'))" ] }, { @@ -227,12 +231,10 @@ "outputs": [], "source": [ "from azureml.core.dataset import Dataset\n", - "\n", - "web_paths = [\n", - " 'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz',\n", - " 'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz',\n", - " 'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz',\n", - " 'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz'\n", + "web_paths = ['https://azureopendatastorage.blob.core.windows.net/mnist/train-images-idx3-ubyte.gz',\n", + " 'https://azureopendatastorage.blob.core.windows.net/mnist/train-labels-idx1-ubyte.gz',\n", + " 'https://azureopendatastorage.blob.core.windows.net/mnist/t10k-images-idx3-ubyte.gz',\n", + " 'https://azureopendatastorage.blob.core.windows.net/mnist/t10k-labels-idx1-ubyte.gz'\n", " ]\n", "dataset = Dataset.File.from_files(path = web_paths)" ] diff --git a/how-to-use-azureml/work-with-data/datasets-tutorial/timeseries-datasets/tabular-timeseries-dataset-filtering.ipynb b/how-to-use-azureml/work-with-data/datasets-tutorial/timeseries-datasets/tabular-timeseries-dataset-filtering.ipynb index fc4cdd9e..f3269da3 100644 --- a/how-to-use-azureml/work-with-data/datasets-tutorial/timeseries-datasets/tabular-timeseries-dataset-filtering.ipynb +++ b/how-to-use-azureml/work-with-data/datasets-tutorial/timeseries-datasets/tabular-timeseries-dataset-filtering.ipynb @@ -23,8 +23,8 @@ "\n", "The detailed APIs to be demoed in this script are:\n", "- Create Tabular Dataset instance\n", - "- Assign fine timestamp column and coarse timestamp column for Tabular Dataset to activate Time Series related APIs\n", - "- Clear fine timestamp column and coarse timestamp column\n", + "- Assign timestamp column and partition timestamp column for Tabular Dataset to activate Time Series related APIs\n", + "- Clear timestamp column and partition timestamp column\n", "- Filter in data before a specific time\n", "- Filter in data after a specific time\n", "- Filter in data in a specific time range\n", @@ -157,7 +157,7 @@ "source": [ "Create Tabular Dataset instance from blob storage datapath.\n", "\n", - "**TIP:** you can set virtual columns in the partition_format. I.e. if you partition the weather data by state and city, the path can be '/{STATE}/{CITY}/{coarse_time:yyy/MM}/data.parquet'. STATE and CITY would then appear as virtual columns in the dataset, allowing for efficient filtering by these grains. " + "**TIP:** you can set virtual columns in the partition_format. I.e. if you partition the weather data by state and city, the path can be '/{STATE}/{CITY}/{partition_time:yyy/MM}/data.parquet'. STATE and CITY would then appear as virtual columns in the dataset, allowing for efficient filtering by these timestamps. " ] }, { @@ -167,14 +167,14 @@ "outputs": [], "source": [ "datastore_path = [(dstore, dset_name + '/*/*/data.parquet')]\n", - "dataset = Dataset.Tabular.from_parquet_files(path=datastore_path, partition_format = dset_name + '/{coarse_time:yyyy/MM}/data.parquet')" + "dataset = Dataset.Tabular.from_parquet_files(path=datastore_path, partition_format = dset_name + '/{partition_time:yyyy/MM}/data.parquet')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Assign fine timestamp column for Tabular Dataset to activate Time Series related APIs. The column to be assigned should be a Date type, otherwise the assigning will fail." + "Assign timestamp column for Tabular Dataset to activate Time Series related APIs. The column to be assigned should be a Date type, otherwise the assigning will fail." ] }, { @@ -183,8 +183,8 @@ "metadata": {}, "outputs": [], "source": [ - "# for this demo, leave out coarse_time so fine_grain_timestamp is used\n", - "tsd = dataset.with_timestamp_columns(fine_grain_timestamp='datetime') # coarse_grain_timestamp='coarse_time')" + "# for this demo, leave out partition_time so timestamp is used\n", + "tsd = dataset.with_timestamp_columns(timestamp='datetime') # partition_timestamp='partition_time')" ] }, { @@ -280,7 +280,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "**NOTE:** You must set the coarse_grain_timestamp to None to filter on the fine_grain_timestamp. The below cell will fail unless the second line is uncommented " + "**NOTE:** You must set the partition_timestamp to None to filter on the timestamp. The below cell will fail unless the second line is uncommented " ] }, { @@ -290,7 +290,7 @@ "outputs": [], "source": [ "# select data that occurs within a given time range\n", - "#tsd = tsd.with_timestamp_columns(fine_grain_timestamp='datetime', coarse_grain_timestamp=None)\n", + "#tsd = tsd.with_timestamp_columns(timestamp='datetime', partition_timestamp=None)\n", "tsd2 = tsd.time_after(datetime(2019, 1, 2)).time_before(datetime(2019, 1, 10))\n", "tsd2.to_pandas_dataframe().head(5)" ] @@ -371,9 +371,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "scrolled": true - }, + "metadata": {}, "outputs": [], "source": [ "tsd2 = tsd.drop_columns(columns=['snowDepth', 'version', 'datetime'])\n", @@ -481,7 +479,7 @@ "metadata": {}, "outputs": [], "source": [ - "tsd2 = tsd.keep_columns(columns=['snowDepth', 'datetime', 'coarse_time'], validate=False)\n", + "tsd2 = tsd.keep_columns(columns=['snowDepth', 'datetime', 'partition_time'], validate=False)\n", "tsd2.to_pandas_dataframe().tail()" ] }, @@ -506,9 +504,9 @@ "metadata": {}, "source": [ "Rules for reseting are:\n", - "- You cannot assign 'None' to fine_grain_timestamp while assign a valid column name to coarse_grain_timestamp because coarse_grain_timestamp is optional while fine_grain_timestamp is mandatory for Tabular time series data.\n", - "- If you assign 'None' to fine_grain_timestamp, then both fine_grain_timestamp and coarse_grain_timestamp will all be cleared.\n", - "- If you assign only 'None' to coarse_grain_timestamp, then only coarse_grain_timestamp will be cleared." + "- You cannot assign 'None' to timestamp while assign a valid column name to partition_timestamp because partition_timestamp is optional while timestamp is mandatory for Tabular time series data.\n", + "- If you assign 'None' to timestamp, then both timestamp and partition_timestamp will all be cleared.\n", + "- If you assign only 'None' to partition_timestamp, then only partition_timestamp will be cleared." ] }, { @@ -519,17 +517,17 @@ "source": [ "# Illegal clearing, exception is expected.\n", "try:\n", - " tsd2 = tsd.with_timestamp_columns(fine_grain_timestamp=None, coarse_grain_timestamp='coarse_time')\n", + " tsd2 = tsd.with_timestamp_columns(timestamp=None, partition_timestamp='partition_time')\n", "except Exception as e:\n", " print('Cleaning not allowed because {}'.format(str(e)))\n", "\n", "# clear both\n", - "tsd2 = tsd.with_timestamp_columns(fine_grain_timestamp=None, coarse_grain_timestamp=None)\n", + "tsd2 = tsd.with_timestamp_columns(timestamp=None, partition_timestamp=None)\n", "print('after clean both with None/None, timestamp columns are: {}'.format(tsd2.timestamp_columns))\n", "\n", - "# clear coarse_grain_timestamp only and assign 'datetime' as fine timestamp column\n", - "tsd2 = tsd2.with_timestamp_columns(fine_grain_timestamp='datetime', coarse_grain_timestamp=None)\n", - "print('after clean coarse timestamp column, timestamp columns are: {}'.format(tsd2.timestamp_columns))" + "# clear partition_timestamp only and assign 'datetime' as timestamp column\n", + "tsd2 = tsd2.with_timestamp_columns(timestamp='datetime', partition_timestamp=None)\n", + "print('after clean partition timestamp column, timestamp columns are: {}'.format(tsd2.timestamp_columns))" ] }, { diff --git a/setup-environment/configuration.ipynb b/setup-environment/configuration.ipynb index aaab27f4..7a7312c6 100644 --- a/setup-environment/configuration.ipynb +++ b/setup-environment/configuration.ipynb @@ -102,7 +102,7 @@ "source": [ "import azureml.core\n", "\n", - "print(\"This notebook was created using version 1.1.5 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.2.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, diff --git a/tutorials/image-classification-mnist-data/img-classification-part2-deploy.ipynb b/tutorials/image-classification-mnist-data/img-classification-part2-deploy.ipynb index 3e734a9c..de8090f3 100644 --- a/tutorials/image-classification-mnist-data/img-classification-part2-deploy.ipynb +++ b/tutorials/image-classification-mnist-data/img-classification-part2-deploy.ipynb @@ -39,7 +39,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "tags": [ + "register model from file" + ] + }, "outputs": [], "source": [ "# If you did NOT complete the tutorial, you can instead run this cell \n", @@ -58,19 +62,7 @@ " model_name=model_name,\n", " tags={\"data\": \"mnist\", \"model\": \"classification\"},\n", " description=\"Mnist handwriting recognition\",\n", - " workspace=ws)\n", - "\n", - "from azureml.core.environment import Environment\n", - "from azureml.core.conda_dependencies import CondaDependencies\n", - "\n", - "# to install required packages\n", - "env = Environment('tutorial-env')\n", - "cd = CondaDependencies.create(pip_packages=['azureml-dataprep[pandas,fuse]>=1.1.14', 'azureml-defaults'], conda_packages = ['scikit-learn==0.22.1'])\n", - "\n", - "env.python.conda_dependencies = cd\n", - "\n", - "# Register environment to re-use later\n", - "env.register(workspace = ws)" + " workspace=ws)" ] }, { @@ -106,16 +98,190 @@ "print(\"Azure ML SDK Version: \", azureml.core.VERSION)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Retrieve the model\n", + "\n", + "You registered a model in your workspace in the previous tutorial. Now, load this workspace and download the model to your local directory." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "load workspace", + "download model" + ] + }, + "outputs": [], + "source": [ + "from azureml.core import Workspace\n", + "from azureml.core.model import Model\n", + "import os \n", + "ws = Workspace.from_config()\n", + "model=Model(ws, 'sklearn_mnist')\n", + "\n", + "model.download(target_dir=os.getcwd(), exist_ok=True)\n", + "\n", + "# verify the downloaded model file\n", + "file_path = os.path.join(os.getcwd(), \"sklearn_mnist_model.pkl\")\n", + "\n", + "os.stat(file_path)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Test model locally\n", + "\n", + "Before deploying, make sure your model is working locally by:\n", + "* Downloading the test data if you haven't already\n", + "* Loading test data\n", + "* Predicting test data\n", + "* Examining the confusion matrix" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Download test data\n", + "If you haven't already, download the test data to the **./data/** directory" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from azureml.core import Dataset\n", + "from azureml.opendatasets import MNIST\n", + "\n", + "data_folder = os.path.join(os.getcwd(), 'data')\n", + "os.makedirs(data_folder, exist_ok=True)\n", + "\n", + "mnist_file_dataset = MNIST.get_file_dataset()\n", + "mnist_file_dataset.download(data_folder, overwrite=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Load test data\n", + "\n", + "Load the test data from the **./data/** directory created during the training tutorial." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from utils import load_data\n", + "import os\n", + "\n", + "data_folder = os.path.join(os.getcwd(), 'data')\n", + "# note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the neural network converge faster\n", + "X_test = load_data(os.path.join(data_folder, 't10k-images-idx3-ubyte.gz'), False) / 255.0\n", + "y_test = load_data(os.path.join(data_folder, 't10k-labels-idx1-ubyte.gz'), True).reshape(-1)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Predict test data\n", + "\n", + "Feed the test dataset to the model to get predictions." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pickle\n", + "import joblib\n", + "\n", + "clf = joblib.load( os.path.join(os.getcwd(), 'sklearn_mnist_model.pkl'))\n", + "y_hat = clf.predict(X_test)\n", + "print(y_hat)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Examine the confusion matrix\n", + "\n", + "Generate a confusion matrix to see how many samples from the test set are classified correctly. Notice the mis-classified value for the incorrect predictions." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.metrics import confusion_matrix\n", + "\n", + "conf_mx = confusion_matrix(y_test, y_hat)\n", + "print(conf_mx)\n", + "print('Overall accuracy:', np.average(y_hat == y_test))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Use `matplotlib` to display the confusion matrix as a graph. In this graph, the X axis represents the actual values, and the Y axis represents the predicted values. The color in each grid represents the error rate. The lighter the color, the higher the error rate is. For example, many 5's are mis-classified as 3's. Hence you see a bright grid at (5,3)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# normalize the diagonal cells so that they don't overpower the rest of the cells when visualized\n", + "row_sums = conf_mx.sum(axis=1, keepdims=True)\n", + "norm_conf_mx = conf_mx / row_sums\n", + "np.fill_diagonal(norm_conf_mx, 0)\n", + "\n", + "fig = plt.figure(figsize=(8,5))\n", + "ax = fig.add_subplot(111)\n", + "cax = ax.matshow(norm_conf_mx, cmap=plt.cm.bone)\n", + "ticks = np.arange(0, 10, 1)\n", + "ax.set_xticks(ticks)\n", + "ax.set_yticks(ticks)\n", + "ax.set_xticklabels(ticks)\n", + "ax.set_yticklabels(ticks)\n", + "fig.colorbar(cax)\n", + "plt.ylabel('true labels', fontsize=14)\n", + "plt.xlabel('predicted values', fontsize=14)\n", + "plt.savefig('conf.png')\n", + "plt.show()" + ] + }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Deploy as web service\n", "\n", - "Deploy the model as a web service hosted in ACI. \n", + "Once you've tested the model and are satisfied with the results, deploy the model as a web service hosted in ACI. \n", "\n", "To build the correct environment for ACI, provide the following:\n", "* A scoring script to show how to use the model\n", + "* An environment file to show what packages need to be installed\n", "* A configuration file to build the ACI\n", "* The model you trained before\n", "\n", @@ -158,6 +324,52 @@ " return y_hat.tolist()" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Create environment file\n", + "\n", + "Next, create an environment file, called myenv.yml, that specifies all of the script's package dependencies. This file is used to ensure that all of those dependencies are installed in the Docker image. This model needs `scikit-learn` and `azureml-sdk`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "set conda dependencies" + ] + }, + "outputs": [], + "source": [ + "from azureml.core.conda_dependencies import CondaDependencies \n", + "\n", + "myenv = CondaDependencies()\n", + "myenv.add_conda_package(\"scikit-learn==0.22.1\")\n", + "myenv.add_pip_package(\"azureml-defaults\")\n", + "\n", + "with open(\"myenv.yml\",\"w\") as f:\n", + " f.write(myenv.serialize_to_string())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Review the content of the `myenv.yml` file." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "with open(\"myenv.yml\",\"r\") as f:\n", + " print(f.read())" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -220,11 +432,6 @@ "from azureml.core.webservice import Webservice\n", "from azureml.core.model import InferenceConfig\n", "from azureml.core.environment import Environment\n", - "from azureml.core import Workspace\n", - "from azureml.core.model import Model\n", - "\n", - "ws = Workspace.from_config()\n", - "model = Model(ws, 'sklearn_mnist')\n", "\n", "\n", "myenv = Environment.get(workspace=ws, name=\"tutorial-env\", version=\"1\")\n", @@ -263,148 +470,14 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Test the model\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Download test data\n", - "Download the test data to the **./data/** directory" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "from azureml.core import Dataset\n", - "from azureml.opendatasets import MNIST\n", - "\n", - "data_folder = os.path.join(os.getcwd(), 'data')\n", - "os.makedirs(data_folder, exist_ok=True)\n", - "\n", - "mnist_file_dataset = MNIST.get_file_dataset()\n", - "mnist_file_dataset.download(data_folder, overwrite=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Load test data\n", - "\n", - "Load the test data from the **./data/** directory created during the training tutorial." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from utils import load_data\n", - "import os\n", - "\n", - "data_folder = os.path.join(os.getcwd(), 'data')\n", - "# note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the neural network converge faster\n", - "X_test = load_data(os.path.join(data_folder, 't10k-images-idx3-ubyte.gz'), False) / 255.0\n", - "y_test = load_data(os.path.join(data_folder, 't10k-labels-idx1-ubyte.gz'), True).reshape(-1)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Predict test data\n", - "\n", - "Feed the test dataset to the model to get predictions.\n", + "## Test deployed service\n", "\n", + "Earlier you scored all the test data with the local version of the model. Now, you can test the deployed model with a random sample of 30 images from the test data. \n", "\n", "The following code goes through these steps:\n", "1. Send the data as a JSON array to the web service hosted in ACI. \n", "\n", - "1. Use the SDK's `run` API to invoke the service. You can also make raw calls using any HTTP tool such as curl." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import json\n", - "test = json.dumps({\"data\": X_test.tolist()})\n", - "test = bytes(test, encoding='utf8')\n", - "y_hat = service.run(input_data=test)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Examine the confusion matrix\n", - "\n", - "Generate a confusion matrix to see how many samples from the test set are classified correctly. Notice the mis-classified value for the incorrect predictions." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from sklearn.metrics import confusion_matrix\n", - "\n", - "conf_mx = confusion_matrix(y_test, y_hat)\n", - "print(conf_mx)\n", - "print('Overall accuracy:', np.average(y_hat == y_test))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Use `matplotlib` to display the confusion matrix as a graph. In this graph, the X axis represents the actual values, and the Y axis represents the predicted values. The color in each grid represents the error rate. The lighter the color, the higher the error rate is. For example, many 5's are mis-classified as 3's. Hence you see a bright grid at (5,3)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# normalize the diagonal cells so that they don't overpower the rest of the cells when visualized\n", - "row_sums = conf_mx.sum(axis=1, keepdims=True)\n", - "norm_conf_mx = conf_mx / row_sums\n", - "np.fill_diagonal(norm_conf_mx, 0)\n", - "\n", - "fig = plt.figure(figsize=(8,5))\n", - "ax = fig.add_subplot(111)\n", - "cax = ax.matshow(norm_conf_mx, cmap=plt.cm.bone)\n", - "ticks = np.arange(0, 10, 1)\n", - "ax.set_xticks(ticks)\n", - "ax.set_yticks(ticks)\n", - "ax.set_xticklabels(ticks)\n", - "ax.set_yticklabels(ticks)\n", - "fig.colorbar(cax)\n", - "plt.ylabel('true labels', fontsize=14)\n", - "plt.xlabel('predicted values', fontsize=14)\n", - "plt.savefig('conf.png')\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Show predictions\n", - "\n", - "Test the deployed model with a random sample of 30 images from the test data. \n", - "\n", + "1. Use the SDK's `run` API to invoke the service. You can also make raw calls using any HTTP tool such as curl.\n", "\n", "1. Print the returned predictions and plot them along with the input images. Red font and inverse image (white on black) is used to highlight the misclassified samples. \n", "\n", @@ -562,7 +635,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.6" + "version": "3.7.6" }, "msauthor": "sgilley" }, diff --git a/tutorials/image-classification-mnist-data/img-classification-part2-deploy.yml b/tutorials/image-classification-mnist-data/img-classification-part2-deploy.yml index bc88852c..b3d47ee5 100644 --- a/tutorials/image-classification-mnist-data/img-classification-part2-deploy.yml +++ b/tutorials/image-classification-mnist-data/img-classification-part2-deploy.yml @@ -4,3 +4,5 @@ dependencies: - azureml-sdk - matplotlib - sklearn + - pandas + - azureml-opendatasets