mirror of
https://github.com/Azure/MachineLearningNotebooks.git
synced 2025-12-22 10:35:12 -05:00
Compare commits
14 Commits
release_up
...
azureml-sd
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
15a3ca649d | ||
|
|
3c4770cfe5 | ||
|
|
8d7de05908 | ||
|
|
863faae57f | ||
|
|
8d3f5adcdb | ||
|
|
cd3394e129 | ||
|
|
ee5d0239a3 | ||
|
|
388111cedc | ||
|
|
b86191ed7f | ||
|
|
22753486de | ||
|
|
cf1d1dbf01 | ||
|
|
2e45d9800d | ||
|
|
a9a8de02ec | ||
|
|
dd8339e650 |
@@ -103,7 +103,7 @@
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"\n",
|
||||
"print(\"This notebook was created using version 1.42.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.43.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -6,6 +6,7 @@ dependencies:
|
||||
- fairlearn>=0.6.2
|
||||
- joblib
|
||||
- liac-arff
|
||||
- raiwidgets~=0.18.1
|
||||
- raiwidgets~=0.19.0
|
||||
- itsdangerous==2.0.1
|
||||
- markupsafe<2.1.0
|
||||
- protobuf==3.20.0
|
||||
|
||||
@@ -6,6 +6,7 @@ dependencies:
|
||||
- fairlearn>=0.6.2
|
||||
- joblib
|
||||
- liac-arff
|
||||
- raiwidgets~=0.18.1
|
||||
- raiwidgets~=0.19.0
|
||||
- itsdangerous==2.0.1
|
||||
- markupsafe<2.1.0
|
||||
- protobuf==3.20.0
|
||||
|
||||
@@ -13,19 +13,19 @@ dependencies:
|
||||
- pytorch::pytorch=1.4.0
|
||||
- conda-forge::fbprophet==0.7.1
|
||||
- cudatoolkit=10.1.243
|
||||
- scipy==1.5.2
|
||||
- scipy==1.5.3
|
||||
- notebook
|
||||
- pywin32==227
|
||||
- PySocks==1.7.1
|
||||
- Pygments==2.11.2
|
||||
- jsonschema==4.6.0
|
||||
- conda-forge::pyqt==5.12.3
|
||||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azureml-widgets~=1.42.0
|
||||
- azureml-widgets~=1.43.0
|
||||
- pytorch-transformers==1.0.0
|
||||
- spacy==2.2.4
|
||||
- pystan==2.19.1.1
|
||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.42.0/validated_win32_requirements.txt [--no-deps]
|
||||
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.43.0/validated_win32_requirements.txt [--no-deps]
|
||||
- arch==4.14
|
||||
|
||||
@@ -14,7 +14,7 @@ dependencies:
|
||||
- numpy==1.19.5
|
||||
- cython==0.29.14
|
||||
- urllib3==1.26.7
|
||||
- scipy>=1.4.1,<=1.5.2
|
||||
- scipy>=1.4.1,<=1.5.3
|
||||
- scikit-learn==0.22.1
|
||||
- py-xgboost<=1.3.3
|
||||
- holidays==0.10.3
|
||||
@@ -24,10 +24,10 @@ dependencies:
|
||||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azureml-widgets~=1.42.0
|
||||
- azureml-widgets~=1.43.0
|
||||
- pytorch-transformers==1.0.0
|
||||
- spacy==2.2.4
|
||||
- pystan==2.19.1.1
|
||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.42.0/validated_linux_requirements.txt [--no-deps]
|
||||
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.43.0/validated_linux_requirements.txt [--no-deps]
|
||||
- arch==4.14
|
||||
|
||||
@@ -15,7 +15,7 @@ dependencies:
|
||||
- numpy==1.19.5
|
||||
- cython==0.29.14
|
||||
- urllib3==1.26.7
|
||||
- scipy>=1.4.1,<=1.5.2
|
||||
- scipy>=1.4.1,<=1.5.3
|
||||
- scikit-learn==0.22.1
|
||||
- py-xgboost<=1.3.3
|
||||
- holidays==0.10.3
|
||||
@@ -25,10 +25,10 @@ dependencies:
|
||||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azureml-widgets~=1.42.0
|
||||
- azureml-widgets~=1.43.0
|
||||
- pytorch-transformers==1.0.0
|
||||
- spacy==2.2.4
|
||||
- pystan==2.19.1.1
|
||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.42.0/validated_darwin_requirements.txt [--no-deps]
|
||||
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.43.0/validated_darwin_requirements.txt [--no-deps]
|
||||
- arch==4.14
|
||||
|
||||
@@ -9,9 +9,11 @@ dependencies:
|
||||
- PyJWT < 2.0.0
|
||||
- numpy==1.18.5
|
||||
- pywin32==227
|
||||
- cryptography<37.0.0
|
||||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azure-mgmt-core==1.3.0
|
||||
- azure-core==1.21.1
|
||||
- azure-identity==1.7.0
|
||||
- azureml-defaults
|
||||
|
||||
@@ -11,9 +11,11 @@ dependencies:
|
||||
- urllib3==1.26.7
|
||||
- PyJWT < 2.0.0
|
||||
- numpy==1.19.5
|
||||
- cryptography<37.0.0
|
||||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azure-mgmt-core==1.3.0
|
||||
- azure-core==1.21.1
|
||||
- azure-identity==1.7.0
|
||||
- azureml-defaults
|
||||
|
||||
@@ -92,7 +92,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.42.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.43.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -91,7 +91,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.42.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.43.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -149,12 +149,7 @@ def get_backtest_pipeline(
|
||||
inputs=[forecasts.as_mount()],
|
||||
outputs=[data_results],
|
||||
source_directory=PROJECT_FOLDER,
|
||||
arguments=[
|
||||
"--forecasts",
|
||||
forecasts,
|
||||
"--output-dir",
|
||||
data_results,
|
||||
],
|
||||
arguments=["--forecasts", forecasts, "--output-dir", data_results],
|
||||
runconfig=run_config,
|
||||
compute_target=compute_target,
|
||||
allow_reuse=False,
|
||||
|
||||
@@ -242,6 +242,34 @@
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### 2.4 Configure data with ``OutputFileDatasetConfig`` objects\n",
|
||||
"This step shows how to configure output data from a pipeline step. One of the use cases for this step is when you want to do some preprocessing before feeding the data to training step. Intermediate data (or output of a step) is represented by an ``OutputFileDatasetConfig`` object. ``output_data`` is produced as the output of a step. Optionally, this data can be registered as a dataset by calling the ``register_on_complete`` method. If you create an ``OutputFileDatasetConfig`` in one step and use it as an input to another step, that data dependency between steps creates an implicit execution order in the pipeline.\n",
|
||||
"\n",
|
||||
"``OutputFileDatasetConfig`` objects return a directory, and by default write output to the default datastore of the workspace.\n",
|
||||
"\n",
|
||||
"Since instance creation for class ``OutputTabularDatasetConfig`` is not allowed, we first create an instance of this class. Then we use the ``read_parquet_files`` method to read the parquet file into ``OutputTabularDatasetConfig``."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.data.output_dataset_config import OutputFileDatasetConfig\n",
|
||||
"\n",
|
||||
"output_data = OutputFileDatasetConfig(\n",
|
||||
" name=\"processed_data\", destination=(dstore, \"outputdataset/{run-id}/{output-name}\")\n",
|
||||
").as_upload()\n",
|
||||
"# output_data_dataset = output_data.register_on_complete(\n",
|
||||
"# name='processed_data', description = 'files from prev step')\n",
|
||||
"output_data = output_data.read_parquet_files()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -303,6 +331,48 @@
|
||||
" print(compute_target.status.serialize())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Configure the training run's environment\n",
|
||||
"The next step is making sure that the remote training run has all the dependencies needed by the training steps. Dependencies and the runtime context are set by creating and configuring a RunConfiguration object.\n",
|
||||
"\n",
|
||||
"The code below shows two options for handling dependencies. As presented, with ``USE_CURATED_ENV = True``, the configuration is based on a [curated environment](https://docs.microsoft.com/en-us/azure/machine-learning/resource-curated-environments). Curated environments have prebuilt Docker images in the [Microsoft Container Registry](https://hub.docker.com/publishers/microsoftowner). For more information, see [Azure Machine Learning curated environments](https://docs.microsoft.com/en-us/azure/machine-learning/resource-curated-environments).\n",
|
||||
"\n",
|
||||
"The path taken if you change ``USE_CURATED_ENV`` to False shows the pattern for explicitly setting your dependencies. In that scenario, a new custom Docker image will be created and registered in an Azure Container Registry within your resource group (see [Introduction to private Docker container registries in Azure](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-intro)). Building and registering this image can take quite a few minutes."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.runconfig import RunConfiguration\n",
|
||||
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||
"from azureml.core import Environment\n",
|
||||
"\n",
|
||||
"aml_run_config = RunConfiguration()\n",
|
||||
"aml_run_config.target = compute_target\n",
|
||||
"\n",
|
||||
"USE_CURATED_ENV = True\n",
|
||||
"if USE_CURATED_ENV:\n",
|
||||
" curated_environment = Environment.get(\n",
|
||||
" workspace=ws, name=\"AzureML-sklearn-0.24-ubuntu18.04-py37-cpu\"\n",
|
||||
" )\n",
|
||||
" aml_run_config.environment = curated_environment\n",
|
||||
"else:\n",
|
||||
" aml_run_config.environment.python.user_managed_dependencies = False\n",
|
||||
"\n",
|
||||
" # Add some packages relied on by data prep step\n",
|
||||
" aml_run_config.environment.python.conda_dependencies = CondaDependencies.create(\n",
|
||||
" conda_packages=[\"pandas\", \"scikit-learn\"],\n",
|
||||
" pip_packages=[\"azureml-sdk\", \"azureml-dataset-runtime[fuse,pandas]\"],\n",
|
||||
" pin_sdk_version=False,\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -366,6 +436,46 @@
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Construct your pipeline steps\n",
|
||||
"Once you have the compute resource and environment created, you're ready to define your pipeline's steps. There are many built-in steps available via the Azure Machine Learning SDK, as you can see on the [reference documentation for the azureml.pipeline.steps package](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps?view=azure-ml-py). The most flexible class is [PythonScriptStep](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.python_script_step.pythonscriptstep?view=azure-ml-py), which runs a Python script.\n",
|
||||
"\n",
|
||||
"Your data preparation code is in a subdirectory (in this example, \"data_preprocessing_tabular.py\" in the directory \"./scripts\"). As part of the pipeline creation process, this directory is zipped and uploaded to the compute_target and the step runs the script specified as the value for ``script_name``.\n",
|
||||
"\n",
|
||||
"The ``arguments`` values specify the inputs and outputs of the step. In the example below, the baseline data is the ``input_ds_small`` dataset. The script data_preprocessing_tabular.py does whatever data-transformation tasks are appropriate to the task at hand and outputs the data to ``output_data``, of type ``OutputFileDatasetConfig``. For more information, see [Moving data into and between ML pipeline steps (Python)](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-move-data-in-out-of-pipelines). The step will run on the machine defined by ``compute_target``, using the configuration ``aml_run_config``.\n",
|
||||
"\n",
|
||||
"Reuse of previous results (``allow_reuse``) is key when using pipelines in a collaborative environment since eliminating unnecessary reruns offers agility. Reuse is the default behavior when the ``script_name``, ``inputs``, and the parameters of a step remain the same. When reuse is allowed, results from the previous run are immediately sent to the next step. If ``allow_reuse`` is set to False, a new run will always be generated for this step during pipeline execution.\n",
|
||||
"\n",
|
||||
"> Note that we only support partitioned FileDataset and TabularDataset without partition when using such output as input."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.pipeline.steps import PythonScriptStep\n",
|
||||
"\n",
|
||||
"dataprep_source_dir = \"./scripts\"\n",
|
||||
"entry_point = \"data_preprocessing_tabular.py\"\n",
|
||||
"ds_input = input_ds_small.as_named_input(\"train_10_models\")\n",
|
||||
"\n",
|
||||
"data_prep_step = PythonScriptStep(\n",
|
||||
" script_name=entry_point,\n",
|
||||
" source_directory=dataprep_source_dir,\n",
|
||||
" arguments=[\"--input\", ds_input, \"--output\", output_data],\n",
|
||||
" compute_target=compute_target,\n",
|
||||
" runconfig=aml_run_config,\n",
|
||||
" allow_reuse=False,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"input_ds_small = output_data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
|
||||
@@ -23,11 +23,7 @@ except ImportError:
|
||||
|
||||
|
||||
def infer_forecasting_dataset_tcn(
|
||||
X_test,
|
||||
y_test,
|
||||
model,
|
||||
output_path,
|
||||
output_dataset_name="results",
|
||||
X_test, y_test, model, output_path, output_dataset_name="results"
|
||||
):
|
||||
|
||||
y_pred, df_all = model.forecast(X_test, y_test)
|
||||
@@ -71,10 +67,7 @@ def get_model(model_path, model_file_name):
|
||||
def get_args():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--model_name",
|
||||
type=str,
|
||||
dest="model_name",
|
||||
help="Model to be loaded",
|
||||
"--model_name", type=str, dest="model_name", help="Model to be loaded"
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
@@ -108,12 +101,7 @@ def get_args():
|
||||
return args
|
||||
|
||||
|
||||
def get_data(
|
||||
run,
|
||||
fitted_model,
|
||||
target_column_name,
|
||||
test_dataset_name,
|
||||
):
|
||||
def get_data(run, fitted_model, target_column_name, test_dataset_name):
|
||||
|
||||
# get input dataset by name
|
||||
test_dataset = Dataset.get_by_name(run.experiment.workspace, test_dataset_name)
|
||||
@@ -159,10 +147,7 @@ if __name__ == "__main__":
|
||||
fitted_model = get_model(model_path, model_file_name)
|
||||
|
||||
X_test_df, y_test = get_data(
|
||||
run,
|
||||
fitted_model,
|
||||
target_column_name,
|
||||
test_dataset_name,
|
||||
run, fitted_model, target_column_name, test_dataset_name
|
||||
)
|
||||
|
||||
infer_forecasting_dataset_tcn(
|
||||
|
||||
@@ -513,13 +513,7 @@
|
||||
"conda_run_config.environment.docker.enabled = True\n",
|
||||
"\n",
|
||||
"# specify CondaDependencies obj\n",
|
||||
"conda_run_config.environment.python.conda_dependencies = (\n",
|
||||
" automl_run.get_environment().python.conda_dependencies\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"conda_run_config.environment.python.conda_dependencies.add_pip_package(\n",
|
||||
" \"dotnetcore2==2.1.23\"\n",
|
||||
")"
|
||||
"conda_run_config.environment = automl_run.get_environment()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -648,28 +642,6 @@
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create the conda dependencies for setting up the service\n",
|
||||
"We need to create the conda dependencies comprising of the *azureml* packages using the training environment from the *automl_run*."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"conda_dep = automl_run.get_environment().python.conda_dependencies\n",
|
||||
"\n",
|
||||
"with open(\"myenv.yml\", \"w\") as f:\n",
|
||||
" f.write(conda_dep.serialize_to_string())\n",
|
||||
"with open(\"myenv.yml\", \"r\") as f:\n",
|
||||
" print(f.read())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -692,7 +664,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Deploy the service\n",
|
||||
"In the cell below, we deploy the service using the conda file and the scoring file from the previous steps. "
|
||||
"In the cell below, we deploy the service using the automl training environment and the scoring file from the previous steps. "
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -714,7 +686,7 @@
|
||||
" description=\"Get local explanations for Machine test data\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"myenv = Environment.from_conda_specification(name=\"myenv\", file_path=\"myenv.yml\")\n",
|
||||
"myenv = automl_run.get_environment()\n",
|
||||
"inference_config = InferenceConfig(entry_script=\"score_explain.py\", environment=myenv)\n",
|
||||
"\n",
|
||||
"# Use configs and models generated above\n",
|
||||
|
||||
@@ -69,17 +69,19 @@
|
||||
"# ONNX Model Zoo and save it in the same folder as this tutorial\n",
|
||||
"\n",
|
||||
"import urllib.request\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"onnx_model_url = \"https://github.com/onnx/models/blob/main/vision/body_analysis/emotion_ferplus/model/emotion-ferplus-7.tar.gz?raw=true\"\n",
|
||||
"\n",
|
||||
"urllib.request.urlretrieve(onnx_model_url, filename=\"emotion-ferplus-7.tar.gz\")\n",
|
||||
"os.mkdir(\"emotion_ferplus\")\n",
|
||||
"\n",
|
||||
"# the ! magic command tells our jupyter notebook kernel to run the following line of \n",
|
||||
"# code from the command line instead of the notebook kernel\n",
|
||||
"\n",
|
||||
"# We use tar and xvcf to unzip the files we just retrieved from the ONNX model zoo\n",
|
||||
"\n",
|
||||
"!tar xvzf emotion-ferplus-7.tar.gz"
|
||||
"!tar xvzf emotion-ferplus-7.tar.gz -C emotion_ferplus"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -130,7 +132,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model_dir = \"emotion_ferplus\" # replace this with the location of your model files\n",
|
||||
"model_dir = \"emotion_ferplus/model\" # replace this with the location of your model files\n",
|
||||
"\n",
|
||||
"# leave as is if it's in the same folder as this notebook"
|
||||
]
|
||||
@@ -496,13 +498,12 @@
|
||||
"\n",
|
||||
"# to use parsers to read in our model/data\n",
|
||||
"import json\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"test_inputs = []\n",
|
||||
"test_outputs = []\n",
|
||||
"\n",
|
||||
"# read in 3 testing images from .pb files\n",
|
||||
"test_data_size = 3\n",
|
||||
"# read in 1 testing images from .pb files\n",
|
||||
"test_data_size = 1\n",
|
||||
"\n",
|
||||
"for num in np.arange(test_data_size):\n",
|
||||
" input_test_data = os.path.join(model_dir, 'test_data_set_{0}'.format(num), 'input_0.pb')\n",
|
||||
@@ -533,7 +534,7 @@
|
||||
},
|
||||
"source": [
|
||||
"### Show some sample images\n",
|
||||
"We use `matplotlib` to plot 3 test images from the dataset."
|
||||
"We use `matplotlib` to plot 1 test images from the dataset."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -547,7 +548,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"plt.figure(figsize = (20, 20))\n",
|
||||
"for test_image in np.arange(3):\n",
|
||||
"for test_image in np.arange(test_data_size):\n",
|
||||
" test_inputs[test_image].reshape(1, 64, 64)\n",
|
||||
" plt.subplot(1, 8, test_image+1)\n",
|
||||
" plt.axhline('')\n",
|
||||
|
||||
@@ -69,10 +69,12 @@
|
||||
"# ONNX Model Zoo and save it in the same folder as this tutorial\n",
|
||||
"\n",
|
||||
"import urllib.request\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"onnx_model_url = \"https://github.com/onnx/models/blob/main/vision/classification/mnist/model/mnist-7.tar.gz?raw=true\"\n",
|
||||
"\n",
|
||||
"urllib.request.urlretrieve(onnx_model_url, filename=\"mnist-7.tar.gz\")"
|
||||
"urllib.request.urlretrieve(onnx_model_url, filename=\"mnist-7.tar.gz\")\n",
|
||||
"os.mkdir(\"mnist\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -86,7 +88,7 @@
|
||||
"\n",
|
||||
"# We use tar and xvcf to unzip the files we just retrieved from the ONNX model zoo\n",
|
||||
"\n",
|
||||
"!tar xvzf mnist-7.tar.gz"
|
||||
"!tar xvzf mnist-7.tar.gz -C mnist"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -137,7 +139,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model_dir = \"mnist\" # replace this with the location of your model files\n",
|
||||
"model_dir = \"mnist/model\" # replace this with the location of your model files\n",
|
||||
"\n",
|
||||
"# leave as is if it's in the same folder as this notebook"
|
||||
]
|
||||
@@ -447,13 +449,12 @@
|
||||
"\n",
|
||||
"# to use parsers to read in our model/data\n",
|
||||
"import json\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"test_inputs = []\n",
|
||||
"test_outputs = []\n",
|
||||
"\n",
|
||||
"# read in 3 testing images from .pb files\n",
|
||||
"test_data_size = 3\n",
|
||||
"# read in 1 testing images from .pb files\n",
|
||||
"test_data_size = 1\n",
|
||||
"\n",
|
||||
"for i in np.arange(test_data_size):\n",
|
||||
" input_test_data = os.path.join(model_dir, 'test_data_set_{0}'.format(i), 'input_0.pb')\n",
|
||||
@@ -486,7 +487,7 @@
|
||||
},
|
||||
"source": [
|
||||
"### Show some sample images\n",
|
||||
"We use `matplotlib` to plot 3 test images from the dataset."
|
||||
"We use `matplotlib` to plot 1 test images from the dataset."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -500,7 +501,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"plt.figure(figsize = (16, 6))\n",
|
||||
"for test_image in np.arange(3):\n",
|
||||
"for test_image in np.arange(test_data_size):\n",
|
||||
" plt.subplot(1, 15, test_image+1)\n",
|
||||
" plt.axhline('')\n",
|
||||
" plt.axvline('')\n",
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
# Licensed under the MIT license.
|
||||
|
||||
from azureml.core.run import Run
|
||||
from azureml.interpret import ExplanationClient
|
||||
from interpret_community.adapter import ExplanationAdapter
|
||||
import joblib
|
||||
import os
|
||||
import shap
|
||||
@@ -11,9 +13,11 @@ OUTPUT_DIR = './outputs/'
|
||||
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
||||
|
||||
run = Run.get_context()
|
||||
client = ExplanationClient.from_run(run)
|
||||
|
||||
# get a dataset on income prediction
|
||||
X, y = shap.datasets.adult()
|
||||
features = X.columns.values
|
||||
|
||||
# train an XGBoost model (but any other tree model type should work)
|
||||
model = xgboost.XGBClassifier()
|
||||
@@ -26,6 +30,12 @@ shap_values = explainer(X_shap)
|
||||
print("computed shap values:")
|
||||
print(shap_values)
|
||||
|
||||
# Use the explanation adapter to convert the importances into an interpret-community
|
||||
# style explanation which can be uploaded to AzureML or visualized in the
|
||||
# ExplanationDashboard widget
|
||||
adapter = ExplanationAdapter(features, classification=True)
|
||||
global_explanation = adapter.create_global(shap_values.values, X_shap, expected_values=shap_values.base_values)
|
||||
|
||||
# write X_shap out as a pickle file for later visualization
|
||||
x_shap_pkl = 'x_shap.pkl'
|
||||
with open(x_shap_pkl, 'wb') as file:
|
||||
@@ -42,3 +52,8 @@ with open(model_file_name, 'wb') as file:
|
||||
run.upload_file('xgboost_model.pkl', os.path.join('./outputs/', model_file_name))
|
||||
original_model = run.register_model(model_name='xgboost_with_gpu_tree_explainer',
|
||||
model_path='xgboost_model.pkl')
|
||||
|
||||
# Uploading model explanation data for storage or visualization in webUX
|
||||
# The explanation can then be downloaded on any compute
|
||||
comment = 'Global explanation on classification model trained on adult census income dataset'
|
||||
client.upload_model_explanation(global_explanation, comment=comment, model_id=original_model.id)
|
||||
|
||||
@@ -106,7 +106,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.42.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.43.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -225,36 +225,73 @@
|
||||
"\n",
|
||||
"from azureml.core import Environment\n",
|
||||
"\n",
|
||||
"environment_name = \"shap-gpu-tree\"\n",
|
||||
"\n",
|
||||
"environment_name = \"shapgpu\"\n",
|
||||
"env = Environment(environment_name)\n",
|
||||
"\n",
|
||||
"env.docker.enabled = True\n",
|
||||
"env.docker.base_image = None\n",
|
||||
"env.docker.base_dockerfile = \"\"\"\n",
|
||||
"FROM rapidsai/rapidsai:cuda10.0-devel-ubuntu18.04\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Note: this is to pin the pandas and xgboost versions to be same as notebook.\n",
|
||||
"# In production scenario user would choose their dependencies\n",
|
||||
"import pkg_resources\n",
|
||||
"available_packages = pkg_resources.working_set\n",
|
||||
"xgboost_ver = None\n",
|
||||
"pandas_ver = None\n",
|
||||
"for dist in list(available_packages):\n",
|
||||
" if dist.key == 'xgboost':\n",
|
||||
" xgboost_ver = dist.version\n",
|
||||
" elif dist.key == 'pandas':\n",
|
||||
" pandas_ver = dist.version\n",
|
||||
"xgboost_dep = 'xgboost'\n",
|
||||
"pandas_dep = 'pandas'\n",
|
||||
"if pandas_ver:\n",
|
||||
" pandas_dep = 'pandas=={}'.format(pandas_ver)\n",
|
||||
"if xgboost_dep:\n",
|
||||
" xgboost_dep = 'xgboost=={}'.format(xgboost_ver)\n",
|
||||
"\n",
|
||||
"# Note: we build shap at commit 690245 for Tesla K80 GPUs\n",
|
||||
"env.docker.base_dockerfile = f\"\"\"\n",
|
||||
"FROM nvidia/cuda:10.2-devel-ubuntu18.04\n",
|
||||
"ENV PATH=\"/root/miniconda3/bin:${{PATH}}\"\n",
|
||||
"ARG PATH=\"/root/miniconda3/bin:${{PATH}}\"\n",
|
||||
"RUN apt-get update && \\\n",
|
||||
"apt-get install -y fuse && \\\n",
|
||||
"apt-get install -y build-essential && \\\n",
|
||||
"apt-get install -y python3-dev && \\\n",
|
||||
"source activate rapids && \\\n",
|
||||
"apt-get install -y wget && \\\n",
|
||||
"apt-get install -y git && \\\n",
|
||||
"rm -rf /var/lib/apt/lists/* && \\\n",
|
||||
"wget \\\n",
|
||||
"https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh && \\\n",
|
||||
"mkdir /root/.conda && \\\n",
|
||||
"bash Miniconda3-latest-Linux-x86_64.sh -b && \\\n",
|
||||
"rm -f Miniconda3-latest-Linux-x86_64.sh && \\\n",
|
||||
"conda init bash && \\\n",
|
||||
". ~/.bashrc && \\\n",
|
||||
"conda create -n shapgpu python=3.7 && \\\n",
|
||||
"conda activate shapgpu && \\\n",
|
||||
"apt-get install -y g++ && \\\n",
|
||||
"printenv && \\\n",
|
||||
"echo \"which nvcc: \" && \\\n",
|
||||
"which nvcc && \\\n",
|
||||
"pip install numpy==1.20.3 && \\\n",
|
||||
"pip install azureml-defaults && \\\n",
|
||||
"pip install azureml-telemetry && \\\n",
|
||||
"pip install azureml-interpret && \\\n",
|
||||
"pip install {pandas_dep} && \\\n",
|
||||
"cd /usr/local/src && \\\n",
|
||||
"git clone https://github.com/slundberg/shap && \\\n",
|
||||
"git clone https://github.com/slundberg/shap.git --single-branch && \\\n",
|
||||
"cd shap && \\\n",
|
||||
"git reset --hard 690245c6ab043edf40cfce3d8438a62e29ab599f && \\\n",
|
||||
"mkdir build && \\\n",
|
||||
"python setup.py install --user && \\\n",
|
||||
"pip uninstall -y xgboost && \\\n",
|
||||
"rm /conda/envs/rapids/lib/libxgboost.so && \\\n",
|
||||
"pip install xgboost==1.4.2\n",
|
||||
"pip install {xgboost_dep} \\\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"env.python.user_managed_dependencies = True\n",
|
||||
"env.python.interpreter_path = '/root/miniconda3/envs/shapgpu/bin/python'\n",
|
||||
"\n",
|
||||
"from azureml.core import Run\n",
|
||||
"from azureml.core import ScriptRunConfig\n",
|
||||
@@ -266,6 +303,176 @@
|
||||
"run = experiment.submit(config=src)\n",
|
||||
"run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note: if you need to cancel a run, you can follow [these instructions](https://aka.ms/aml-docs-cancel-run)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"# Shows output of the run on stdout.\n",
|
||||
"run.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"run.get_metrics()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Download \n",
|
||||
"1. Download model explanation data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.interpret import ExplanationClient\n",
|
||||
"\n",
|
||||
"# Get model explanation data\n",
|
||||
"client = ExplanationClient.from_run(run)\n",
|
||||
"global_explanation = client.download_model_explanation()\n",
|
||||
"local_importance_values = global_explanation.local_importance_values\n",
|
||||
"expected_values = global_explanation.expected_values"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Get the top k (e.g., 4) most important features with their importance values\n",
|
||||
"global_explanation_topk = client.download_model_explanation(top_k=4)\n",
|
||||
"global_importance_values = global_explanation_topk.get_ranked_global_values()\n",
|
||||
"global_importance_names = global_explanation_topk.get_ranked_global_names()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print('global importance values: {}'.format(global_importance_values))\n",
|
||||
"print('global importance names: {}'.format(global_importance_names))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"2. Download model file."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Retrieve model for visualization and deployment\n",
|
||||
"from azureml.core.model import Model\n",
|
||||
"import joblib\n",
|
||||
"original_model = Model(ws, 'xgboost_with_gpu_tree_explainer')\n",
|
||||
"model_path = original_model.download(exist_ok=True)\n",
|
||||
"original_model = joblib.load(model_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"3. Download test dataset."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Retrieve x_test for visualization\n",
|
||||
"x_test_path = './x_shap_adult_census.pkl'\n",
|
||||
"run.download_file('x_shap_adult_census.pkl', output_file_path=x_test_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"x_test = joblib.load('x_shap_adult_census.pkl')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Visualize\n",
|
||||
"Load the visualization dashboard"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from raiwidgets import ExplanationDashboard"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from interpret_community.common.model_wrapper import wrap_model\n",
|
||||
"from interpret_community.dataset.dataset_wrapper import DatasetWrapper\n",
|
||||
"# note we need to wrap the XGBoost model to output predictions and probabilities in the scikit-learn format\n",
|
||||
"class WrappedXGBoostModel(object):\n",
|
||||
" \"\"\"A class for wrapping an XGBoost model to output integer predicted classes.\"\"\"\n",
|
||||
"\n",
|
||||
" def __init__(self, model):\n",
|
||||
" self.model = model\n",
|
||||
"\n",
|
||||
" def predict(self, dataset):\n",
|
||||
" return self.model.predict(dataset).astype(int)\n",
|
||||
"\n",
|
||||
" def predict_proba(self, dataset):\n",
|
||||
" return self.model.predict_proba(dataset)\n",
|
||||
"\n",
|
||||
"wrapped_model = WrappedXGBoostModel(wrap_model(original_model, DatasetWrapper(x_test), model_task='classification'))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ExplanationDashboard(global_explanation, wrapped_model, dataset=x_test)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -1,5 +1,18 @@
|
||||
name: train-explain-model-gpu-tree-explainer
|
||||
dependencies:
|
||||
- py-xgboost==1.3.3
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
- azureml-interpret
|
||||
- flask
|
||||
- flask-cors
|
||||
- gevent>=1.3.6
|
||||
- jinja2
|
||||
- ipython
|
||||
- matplotlib
|
||||
- ipywidgets
|
||||
- raiwidgets~=0.19.0
|
||||
- itsdangerous==2.0.1
|
||||
- markupsafe<2.1.0
|
||||
- scipy>=1.5.3
|
||||
- protobuf==3.20.0
|
||||
|
||||
@@ -11,6 +11,8 @@ dependencies:
|
||||
- matplotlib
|
||||
- azureml-dataset-runtime
|
||||
- ipywidgets
|
||||
- raiwidgets~=0.18.1
|
||||
- raiwidgets~=0.19.0
|
||||
- itsdangerous==2.0.1
|
||||
- markupsafe<2.1.0
|
||||
- scipy>=1.5.3
|
||||
- protobuf==3.20.0
|
||||
|
||||
@@ -10,7 +10,9 @@ dependencies:
|
||||
- ipython
|
||||
- matplotlib
|
||||
- ipywidgets
|
||||
- raiwidgets~=0.18.1
|
||||
- raiwidgets~=0.19.0
|
||||
- packaging>=20.9
|
||||
- itsdangerous==2.0.1
|
||||
- markupsafe<2.1.0
|
||||
- scipy>=1.5.3
|
||||
- protobuf==3.20.0
|
||||
|
||||
@@ -18,7 +18,9 @@ def init():
|
||||
original_model_path = Model.get_model_path('local_deploy_model')
|
||||
scoring_explainer_path = Model.get_model_path('IBM_attrition_explainer')
|
||||
|
||||
# Load the original model into the environment
|
||||
original_model = joblib.load(original_model_path)
|
||||
# Load the scoring explainer into the environment
|
||||
scoring_explainer = joblib.load(scoring_explainer_path)
|
||||
|
||||
|
||||
@@ -29,5 +31,15 @@ def run(raw_data):
|
||||
predictions = original_model.predict(data)
|
||||
# Retrieve model explanations
|
||||
local_importance_values = scoring_explainer.explain(data)
|
||||
# Retrieve the feature names, which we may want to return to the user.
|
||||
# Note: you can also get the raw_features and engineered_features
|
||||
# by calling scoring_explainer.raw_features and
|
||||
# scoring_explainer.engineered_features but you may need to pass
|
||||
# the raw or engineered feature names in the ScoringExplainer
|
||||
# constructor, depending on if you are using feature maps or
|
||||
# transformations on the original explainer.
|
||||
features = scoring_explainer.features
|
||||
# You can return any data type as long as it is JSON-serializable
|
||||
return {'predictions': predictions.tolist(), 'local_importance_values': local_importance_values}
|
||||
return {'predictions': predictions.tolist(),
|
||||
'local_importance_values': local_importance_values,
|
||||
'features': features}
|
||||
|
||||
@@ -10,7 +10,9 @@ dependencies:
|
||||
- ipython
|
||||
- matplotlib
|
||||
- ipywidgets
|
||||
- raiwidgets~=0.18.1
|
||||
- raiwidgets~=0.19.0
|
||||
- packaging>=20.9
|
||||
- itsdangerous==2.0.1
|
||||
- markupsafe<2.1.0
|
||||
- scipy>=1.5.3
|
||||
- protobuf==3.20.0
|
||||
|
||||
@@ -12,6 +12,8 @@ dependencies:
|
||||
- azureml-dataset-runtime
|
||||
- azureml-core
|
||||
- ipywidgets
|
||||
- raiwidgets~=0.18.1
|
||||
- raiwidgets~=0.19.0
|
||||
- itsdangerous==2.0.1
|
||||
- markupsafe<2.1.0
|
||||
- scipy>=1.5.3
|
||||
- protobuf==3.20.0
|
||||
|
||||
@@ -3,3 +3,4 @@ dependencies:
|
||||
- pip:
|
||||
- azureml-sdk
|
||||
- azureml-widgets
|
||||
- protobuf==3.20.0
|
||||
|
||||
@@ -361,7 +361,7 @@
|
||||
"\n",
|
||||
"batch_conda_deps = CondaDependencies.create(python_version=\"3.7\",\n",
|
||||
" conda_packages=['pip==20.2.4'],\n",
|
||||
" pip_packages=[\"tensorflow==1.15.2\", \"pillow\", \n",
|
||||
" pip_packages=[\"tensorflow==1.15.2\", \"pillow\", \"protobuf==3.20.1\",\n",
|
||||
" \"azureml-core\", \"azureml-dataset-runtime[fuse]\"])\n",
|
||||
"batch_env = Environment(name=\"batch_environment\")\n",
|
||||
"batch_env.python.conda_dependencies = batch_conda_deps\n",
|
||||
|
||||
@@ -437,7 +437,8 @@
|
||||
" - azureml-defaults\n",
|
||||
" - tensorflow-gpu==2.0.0\n",
|
||||
" - keras<=2.3.1\n",
|
||||
" - matplotlib"
|
||||
" - matplotlib\n",
|
||||
" - protobuf==3.20.1"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -989,6 +990,7 @@
|
||||
"cd.add_conda_package('h5py<=2.10.0')\n",
|
||||
"cd.add_conda_package('keras<=2.3.1')\n",
|
||||
"cd.add_pip_package(\"azureml-defaults\")\n",
|
||||
"cd.add_pip_package(\"protobuf==3.20.1\")\n",
|
||||
"cd.save_to_file(base_directory='./', conda_file_path='myenv.yml')\n",
|
||||
"\n",
|
||||
"print(cd.serialize_to_string())"
|
||||
|
||||
@@ -943,6 +943,7 @@
|
||||
"cd.add_conda_package('numpy')\n",
|
||||
"cd.add_pip_package('tensorflow==2.2.0')\n",
|
||||
"cd.add_pip_package(\"azureml-defaults\")\n",
|
||||
"cd.add_pip_package(\"protobuf==3.20.1\")\n",
|
||||
"cd.save_to_file(base_directory='./', conda_file_path='myenv.yml')\n",
|
||||
"\n",
|
||||
"print(cd.serialize_to_string())"
|
||||
|
||||
@@ -11,6 +11,8 @@ RUN pip install azureml-core
|
||||
RUN pip install ray==0.8.7
|
||||
RUN pip install ray[rllib,tune,serve]==0.8.7
|
||||
RUN pip install tensorflow==1.14.0
|
||||
RUN pip install 'msrest<0.7.0'
|
||||
RUN pip install protobuf==3.20.0
|
||||
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y jq
|
||||
|
||||
@@ -37,8 +37,7 @@ RUN pip install gym[atari]==0.19.0
|
||||
RUN pip install gym[accept-rom-license]==0.19.0
|
||||
|
||||
# Install pip dependencies
|
||||
RUN HOROVOD_WITH_TENSORFLOW=1 \
|
||||
pip install 'matplotlib>=3.3,<3.4' \
|
||||
RUN pip install 'matplotlib>=3.3,<3.4' \
|
||||
'psutil>=5.8,<5.9' \
|
||||
'tqdm>=4.59,<4.60' \
|
||||
'pandas>=1.1,<1.2' \
|
||||
@@ -70,6 +69,9 @@ RUN pip install --no-cache-dir \
|
||||
# This is required for ray 0.8.7
|
||||
RUN pip install -U aiohttp==3.7.4
|
||||
|
||||
RUN pip install 'msrest<0.7.0'
|
||||
RUN pip install protobuf==3.20.0
|
||||
|
||||
# This is needed for mpi to locate libpython
|
||||
ENV LD_LIBRARY_PATH $AZUREML_CONDA_ENVIRONMENT_PATH/lib:$LD_LIBRARY_PATH
|
||||
|
||||
|
||||
@@ -93,7 +93,7 @@
|
||||
"source": [
|
||||
"%matplotlib inline\n",
|
||||
"\n",
|
||||
"# Azure Machine Learning Core imports\n",
|
||||
"# Azure Machine Learning core imports\n",
|
||||
"import azureml.core\n",
|
||||
"\n",
|
||||
"# Check core SDK version number\n",
|
||||
|
||||
@@ -12,6 +12,7 @@ RUN pip install azureml-dataset-runtime
|
||||
RUN pip install ray==0.8.7
|
||||
RUN pip install ray[rllib,tune,serve]==0.8.7
|
||||
RUN pip install tensorflow==1.14.0
|
||||
RUN pip install 'msrest<0.7.0'
|
||||
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y jq
|
||||
|
||||
@@ -8,8 +8,8 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
rm -rf /var/lib/apt/lists/* && \
|
||||
rm -rf /usr/share/man/*
|
||||
|
||||
RUN conda install -y conda=4.12.0 python=3.7 && conda clean -ay
|
||||
RUN pip install ray-on-aml==0.1.6 & \
|
||||
RUN conda install -y conda=4.13.0 python=3.7 && conda clean -ay
|
||||
RUN pip install ray-on-aml==0.2.1 & \
|
||||
pip install --no-cache-dir \
|
||||
azureml-defaults \
|
||||
azureml-dataset-runtime[fuse,pandas] \
|
||||
@@ -30,5 +30,9 @@ RUN pip install ray-on-aml==0.1.6 & \
|
||||
conda install -y -c conda-forge x264='1!152.20180717' ffmpeg=4.0.2 && \
|
||||
conda install -c anaconda opencv
|
||||
|
||||
RUN pip install protobuf==3.20.0
|
||||
|
||||
RUN pip install --upgrade ray==0.8.3 \
|
||||
ray[rllib,dashboard,tune]==0.8.3
|
||||
|
||||
RUN pip install 'msrest<0.7.0'
|
||||
@@ -28,7 +28,11 @@ RUN cd multiagent-particle-envs && \
|
||||
|
||||
RUN pip3 install ray-on-aml==0.1.6
|
||||
|
||||
RUN pip install protobuf==3.20.0
|
||||
|
||||
RUN pip3 install --upgrade \
|
||||
ray==0.8.7 \
|
||||
ray[rllib]==0.8.7 \
|
||||
ray[tune]==0.8.7
|
||||
|
||||
RUN pip install 'msrest<0.7.0'
|
||||
@@ -8,8 +8,9 @@ dependencies:
|
||||
- matplotlib
|
||||
- azureml-dataset-runtime
|
||||
- ipywidgets
|
||||
- raiwidgets~=0.18.1
|
||||
- raiwidgets~=0.19.0
|
||||
- liac-arff
|
||||
- packaging>=20.9
|
||||
- itsdangerous==2.0.1
|
||||
- markupsafe<2.1.0
|
||||
- protobuf==3.20.0
|
||||
|
||||
@@ -43,6 +43,7 @@
|
||||
" 1. Logging numeric metrics\n",
|
||||
" 1. Logging vectors\n",
|
||||
" 1. Logging tables\n",
|
||||
" 1. Logging when additional Metric Names are required\n",
|
||||
" 1. Uploading files\n",
|
||||
"1. [Analyzing results](#Analyzing-results)\n",
|
||||
" 1. Tagging a run\n",
|
||||
@@ -100,7 +101,7 @@
|
||||
"\n",
|
||||
"# Check core SDK version number\n",
|
||||
"\n",
|
||||
"print(\"This notebook was created using SDK version 1.42.0, you are currently running version\", azureml.core.VERSION)"
|
||||
"print(\"This notebook was created using SDK version 1.43.0, you are currently running version\", azureml.core.VERSION)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -367,7 +368,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Logging for when more Metric Names are required\n",
|
||||
"### Logging when additional Metric Names are required\n",
|
||||
"\n",
|
||||
"Limits on logging are internally enforced to ensure a smooth experience, however these can sometimes be limiting, particularly in terms of the limit on metric names.\n",
|
||||
"\n",
|
||||
|
||||
@@ -102,7 +102,7 @@
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"\n",
|
||||
"print(\"This notebook was created using version 1.42.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.43.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -5,17 +5,19 @@ import os
|
||||
import argparse
|
||||
import datetime
|
||||
import time
|
||||
import tensorflow as tf
|
||||
import tensorflow.compat.v1 as tf
|
||||
from math import ceil
|
||||
import numpy as np
|
||||
import sys
|
||||
import shutil
|
||||
from tensorflow.contrib.slim.python.slim.nets import inception_v3
|
||||
import subprocess
|
||||
import tf_slim
|
||||
|
||||
from azureml.core import Run
|
||||
from azureml.core.model import Model
|
||||
from azureml.core.dataset import Dataset
|
||||
|
||||
slim = tf.contrib.slim
|
||||
slim = tf_slim
|
||||
|
||||
image_size = 299
|
||||
num_channel = 3
|
||||
@@ -32,16 +34,18 @@ def get_class_label_dict(labels_dir):
|
||||
|
||||
def init():
|
||||
global g_tf_sess, probabilities, label_dict, input_images
|
||||
subprocess.run(["git", "clone", "https://github.com/tensorflow/models/"])
|
||||
sys.path.append("./models/research/slim")
|
||||
|
||||
parser = argparse.ArgumentParser(description="Start a tensorflow model serving")
|
||||
parser.add_argument('--model_name', dest="model_name", required=True)
|
||||
parser.add_argument('--labels_dir', dest="labels_dir", required=True)
|
||||
args, _ = parser.parse_known_args()
|
||||
|
||||
from nets import inception_v3, inception_utils
|
||||
label_dict = get_class_label_dict(args.labels_dir)
|
||||
classes_num = len(label_dict)
|
||||
|
||||
with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
|
||||
tf.disable_v2_behavior()
|
||||
with slim.arg_scope(inception_utils.inception_arg_scope()):
|
||||
input_images = tf.placeholder(tf.float32, [1, image_size, image_size, num_channel])
|
||||
logits, _ = inception_v3.inception_v3(input_images,
|
||||
num_classes=classes_num,
|
||||
|
||||
@@ -247,7 +247,7 @@
|
||||
" config = AmlCompute.provisioning_configuration(vm_size=\"STANDARD_NC6\",\n",
|
||||
" vm_priority=\"lowpriority\", \n",
|
||||
" min_nodes=0, \n",
|
||||
" max_nodes=1)\n",
|
||||
" max_nodes=2)\n",
|
||||
"\n",
|
||||
" compute_target = ComputeTarget.create(workspace=ws, name=compute_name, provisioning_configuration=config)\n",
|
||||
" compute_target.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)"
|
||||
@@ -305,9 +305,10 @@
|
||||
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||
"from azureml.core.runconfig import DEFAULT_GPU_IMAGE\n",
|
||||
"\n",
|
||||
"cd = CondaDependencies.create(python_version=\"3.7\",\n",
|
||||
"cd = CondaDependencies.create(python_version=\"3.8\",\n",
|
||||
" conda_packages=['pip==20.2.4'],\n",
|
||||
" pip_packages=[\"tensorflow-gpu==1.15.2\",\n",
|
||||
" pip_packages=[\"tensorflow-gpu==2.3.0\",\n",
|
||||
" \"tf_slim==1.1.0\", \"protobuf==3.20.1\",\n",
|
||||
" \"azureml-core\", \"azureml-dataset-runtime[fuse]\"])\n",
|
||||
"\n",
|
||||
"env = Environment(name=\"parallelenv\")\n",
|
||||
|
||||
Reference in New Issue
Block a user