mirror of
https://github.com/Azure/MachineLearningNotebooks.git
synced 2025-12-20 09:37:04 -05:00
Compare commits
10 Commits
minxia/dis
...
release_up
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
883e4a4c59 | ||
|
|
e90826b331 | ||
|
|
ac04172f6d | ||
|
|
8c0000beb4 | ||
|
|
35287ab0d8 | ||
|
|
3fe4f8b038 | ||
|
|
1722678469 | ||
|
|
17da7e8706 | ||
|
|
d2e7213ff3 | ||
|
|
882cb76e8a |
@@ -103,7 +103,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"import azureml.core\n",
|
"import azureml.core\n",
|
||||||
"\n",
|
"\n",
|
||||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
"print(\"This notebook was created using version 1.24.0 of the Azure ML SDK\")\n",
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -21,9 +21,8 @@ dependencies:
|
|||||||
|
|
||||||
- pip:
|
- pip:
|
||||||
# Required packages for AzureML execution, history, and data preparation.
|
# Required packages for AzureML execution, history, and data preparation.
|
||||||
- azureml-widgets~=1.23.0
|
- azureml-widgets~=1.24.0
|
||||||
- pytorch-transformers==1.0.0
|
- pytorch-transformers==1.0.0
|
||||||
- spacy==2.1.8
|
- spacy==2.1.8
|
||||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||||
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.23.0/validated_win32_requirements.txt [--no-deps]
|
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.24.0/validated_win32_requirements.txt [--no-deps]
|
||||||
- PyJWT < 2.0.0
|
|
||||||
|
|||||||
@@ -21,10 +21,8 @@ dependencies:
|
|||||||
|
|
||||||
- pip:
|
- pip:
|
||||||
# Required packages for AzureML execution, history, and data preparation.
|
# Required packages for AzureML execution, history, and data preparation.
|
||||||
- azureml-widgets~=1.23.0
|
- azureml-widgets~=1.24.0
|
||||||
- pytorch-transformers==1.0.0
|
- pytorch-transformers==1.0.0
|
||||||
- spacy==2.1.8
|
- spacy==2.1.8
|
||||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||||
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.23.0/validated_linux_requirements.txt [--no-deps]
|
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.24.0/validated_linux_requirements.txt [--no-deps]
|
||||||
- PyJWT < 2.0.0
|
|
||||||
|
|
||||||
|
|||||||
@@ -22,9 +22,8 @@ dependencies:
|
|||||||
|
|
||||||
- pip:
|
- pip:
|
||||||
# Required packages for AzureML execution, history, and data preparation.
|
# Required packages for AzureML execution, history, and data preparation.
|
||||||
- azureml-widgets~=1.23.0
|
- azureml-widgets~=1.24.0
|
||||||
- pytorch-transformers==1.0.0
|
- pytorch-transformers==1.0.0
|
||||||
- spacy==2.1.8
|
- spacy==2.1.8
|
||||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||||
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.23.0/validated_darwin_requirements.txt [--no-deps]
|
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.24.0/validated_darwin_requirements.txt [--no-deps]
|
||||||
- PyJWT < 2.0.0
|
|
||||||
|
|||||||
@@ -105,7 +105,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
"print(\"This notebook was created using version 1.24.0 of the Azure ML SDK\")\n",
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: auto-ml-classification-bank-marketing-all-features
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
@@ -93,7 +93,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
"print(\"This notebook was created using version 1.24.0 of the Azure ML SDK\")\n",
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: auto-ml-classification-credit-card-fraud
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
@@ -96,7 +96,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
"print(\"This notebook was created using version 1.24.0 of the Azure ML SDK\")\n",
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: auto-ml-classification-text-dnn
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
@@ -81,7 +81,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
"print(\"This notebook was created using version 1.24.0 of the Azure ML SDK\")\n",
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: auto-ml-continuous-retraining
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
@@ -5,7 +5,7 @@ set options=%3
|
|||||||
set PIP_NO_WARN_SCRIPT_LOCATION=0
|
set PIP_NO_WARN_SCRIPT_LOCATION=0
|
||||||
|
|
||||||
IF "%conda_env_name%"=="" SET conda_env_name="azure_automl_experimental"
|
IF "%conda_env_name%"=="" SET conda_env_name="azure_automl_experimental"
|
||||||
IF "%automl_env_file%"=="" SET automl_env_file="automl_env.yml"
|
IF "%automl_env_file%"=="" SET automl_env_file="automl_thin_client_env.yml"
|
||||||
|
|
||||||
IF NOT EXIST %automl_env_file% GOTO YmlMissing
|
IF NOT EXIST %automl_env_file% GOTO YmlMissing
|
||||||
|
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ fi
|
|||||||
|
|
||||||
if [ "$AUTOML_ENV_FILE" == "" ]
|
if [ "$AUTOML_ENV_FILE" == "" ]
|
||||||
then
|
then
|
||||||
AUTOML_ENV_FILE="automl_env.yml"
|
AUTOML_ENV_FILE="automl_thin_client_env.yml"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ! -f $AUTOML_ENV_FILE ]; then
|
if [ ! -f $AUTOML_ENV_FILE ]; then
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ fi
|
|||||||
|
|
||||||
if [ "$AUTOML_ENV_FILE" == "" ]
|
if [ "$AUTOML_ENV_FILE" == "" ]
|
||||||
then
|
then
|
||||||
AUTOML_ENV_FILE="automl_env.yml"
|
AUTOML_ENV_FILE="automl_thin_client_env_mac.yml"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ! -f $AUTOML_ENV_FILE ]; then
|
if [ ! -f $AUTOML_ENV_FILE ]; then
|
||||||
|
|||||||
@@ -7,6 +7,8 @@ dependencies:
|
|||||||
- nb_conda
|
- nb_conda
|
||||||
- cython
|
- cython
|
||||||
- urllib3<1.24
|
- urllib3<1.24
|
||||||
|
- PyJWT < 2.0.0
|
||||||
|
- numpy==1.18.5
|
||||||
|
|
||||||
- pip:
|
- pip:
|
||||||
# Required packages for AzureML execution, history, and data preparation.
|
# Required packages for AzureML execution, history, and data preparation.
|
||||||
@@ -14,4 +16,3 @@ dependencies:
|
|||||||
- azureml-sdk
|
- azureml-sdk
|
||||||
- azureml-widgets
|
- azureml-widgets
|
||||||
- pandas
|
- pandas
|
||||||
- PyJWT < 2.0.0
|
|
||||||
|
|||||||
@@ -8,6 +8,8 @@ dependencies:
|
|||||||
- nb_conda
|
- nb_conda
|
||||||
- cython
|
- cython
|
||||||
- urllib3<1.24
|
- urllib3<1.24
|
||||||
|
- PyJWT < 2.0.0
|
||||||
|
- numpy==1.18.5
|
||||||
|
|
||||||
- pip:
|
- pip:
|
||||||
# Required packages for AzureML execution, history, and data preparation.
|
# Required packages for AzureML execution, history, and data preparation.
|
||||||
@@ -15,4 +17,3 @@ dependencies:
|
|||||||
- azureml-sdk
|
- azureml-sdk
|
||||||
- azureml-widgets
|
- azureml-widgets
|
||||||
- pandas
|
- pandas
|
||||||
- PyJWT < 2.0.0
|
|
||||||
|
|||||||
@@ -90,7 +90,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
"print(\"This notebook was created using version 1.24.0 of the Azure ML SDK\")\n",
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -194,7 +194,6 @@
|
|||||||
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
||||||
"|**training_data**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
"|**training_data**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
||||||
"|**label_column_name**|(sparse) array-like, shape = [n_samples, ], targets values.|\n",
|
"|**label_column_name**|(sparse) array-like, shape = [n_samples, ], targets values.|\n",
|
||||||
"|**scenario**|We need to set this parameter to 'Latest' to enable some experimental features. This parameter should not be set outside of this experimental notebook.|\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"**_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric)"
|
"**_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric)"
|
||||||
]
|
]
|
||||||
@@ -223,7 +222,6 @@
|
|||||||
" compute_target = compute_target,\n",
|
" compute_target = compute_target,\n",
|
||||||
" training_data = train_data,\n",
|
" training_data = train_data,\n",
|
||||||
" label_column_name = label,\n",
|
" label_column_name = label,\n",
|
||||||
" scenario='Latest',\n",
|
|
||||||
" **automl_settings\n",
|
" **automl_settings\n",
|
||||||
" )"
|
" )"
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: auto-ml-regression-model-proxy
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
@@ -113,7 +113,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
"print(\"This notebook was created using version 1.24.0 of the Azure ML SDK\")\n",
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: auto-ml-forecasting-beer-remote
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
@@ -87,7 +87,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
"print(\"This notebook was created using version 1.24.0 of the Azure ML SDK\")\n",
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: auto-ml-forecasting-bike-share
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
@@ -97,7 +97,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
"print(\"This notebook was created using version 1.24.0 of the Azure ML SDK\")\n",
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: auto-ml-forecasting-energy-demand
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
@@ -94,7 +94,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
"print(\"This notebook was created using version 1.24.0 of the Azure ML SDK\")\n",
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: auto-ml-forecasting-function
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
@@ -82,7 +82,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
"print(\"This notebook was created using version 1.24.0 of the Azure ML SDK\")\n",
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: auto-ml-forecasting-orange-juice-sales
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
@@ -96,7 +96,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
"print(\"This notebook was created using version 1.24.0 of the Azure ML SDK\")\n",
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: auto-ml-classification-credit-card-fraud-local
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
@@ -96,7 +96,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
"print(\"This notebook was created using version 1.24.0 of the Azure ML SDK\")\n",
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: auto-ml-regression-explanation-featurization
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
@@ -92,7 +92,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
"print(\"This notebook was created using version 1.24.0 of the Azure ML SDK\")\n",
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: auto-ml-regression
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
84
how-to-use-azureml/azure-synapse/README.md
Normal file
84
how-to-use-azureml/azure-synapse/README.md
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
Azure Synapse Analytics is a limitless analytics service that brings together data integration, enterprise data warehousing, and big data analytics. It gives you the freedom to query data on your terms, using either serverless or dedicated resources—at scale. Azure Synapse brings these worlds together with a unified experience to ingest, explore, prepare, manage, and serve data for immediate BI and machine learning needs. A core offering within Azure Synapse Analytics are serverless Apache Spark pools enhanced for big data workloads.
|
||||||
|
|
||||||
|
Synapse in Aml integration is for customers who want to use Apache Spark in Azure Synapse Analytics to prepare data at scale in Azure ML before training their ML model. This will allow customers to work on their end-to-end ML lifecycle including large-scale data preparation, model training and deployment within Azure ML workspace without having to use suboptimal tools for machine learning or switch between multiple tools for data preparation and model training. The ability to perform all ML tasks within Azure ML will reduce time required for customers to iterate on a machine learning project which typically includes multiple rounds of data preparation and training.
|
||||||
|
|
||||||
|
In the public preview, the capabilities are provided:
|
||||||
|
|
||||||
|
- Link Azure Synapse Analytics workspace to Azure Machine Learning workspace (via ARM, UI or SDK)
|
||||||
|
- Attach Apache Spark pools powered by Azure Synapse Analytics as Azure Machine Learning compute targets (via ARM, UI or SDK)
|
||||||
|
- Launch Apache Spark sessions in notebooks and perform interactive data exploration and preparation. This interactive experience leverages Apache Spark magic and customers will have session-level Conda support to install packages.
|
||||||
|
- Productionize ML pipelines by leveraging Apache Spark pools to pre-process big data
|
||||||
|
|
||||||
|
# Using Synapse in Azure machine learning
|
||||||
|
|
||||||
|
## Create synapse resources
|
||||||
|
|
||||||
|
Follow up the documents to create Synapse workspace and resource-setup.sh is available for you to create the resources.
|
||||||
|
|
||||||
|
- Create from [Portal](https://docs.microsoft.com/en-us/azure/synapse-analytics/quickstart-create-workspace)
|
||||||
|
- Create from [Cli](https://docs.microsoft.com/en-us/azure/synapse-analytics/quickstart-create-workspace-cli)
|
||||||
|
|
||||||
|
Follow up the documents to create Synapse spark pool
|
||||||
|
|
||||||
|
- Create from [Portal](https://docs.microsoft.com/en-us/azure/synapse-analytics/quickstart-create-apache-spark-pool-portal)
|
||||||
|
- Create from [Cli](https://docs.microsoft.com/en-us/cli/azure/ext/synapse/synapse/spark/pool?view=azure-cli-latest)
|
||||||
|
|
||||||
|
## Link Synapse Workspace
|
||||||
|
|
||||||
|
Make sure you are the owner of synapse workspace so that you can link synapse workspace into AML.
|
||||||
|
You can run resource-setup.py to link the synapse workspace and attach compute
|
||||||
|
|
||||||
|
```python
|
||||||
|
from azureml.core import Workspace
|
||||||
|
ws = Workspace.from_config()
|
||||||
|
|
||||||
|
from azureml.core import LinkedService, SynapseWorkspaceLinkedServiceConfiguration
|
||||||
|
synapse_link_config = SynapseWorkspaceLinkedServiceConfiguration(
|
||||||
|
subscription_id="<subscription id>",
|
||||||
|
resource_group="<resource group",
|
||||||
|
name="<synapse workspace name>"
|
||||||
|
)
|
||||||
|
|
||||||
|
linked_service = LinkedService.register(
|
||||||
|
workspace=ws,
|
||||||
|
name='<link name>',
|
||||||
|
linked_service_config=synapse_link_config)
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
## Attach synapse spark pool as AzureML compute
|
||||||
|
|
||||||
|
```python
|
||||||
|
|
||||||
|
from azureml.core.compute import SynapseCompute, ComputeTarget
|
||||||
|
spark_pool_name = "<spark pool name>"
|
||||||
|
attached_synapse_name = "<attached compute name>"
|
||||||
|
|
||||||
|
attach_config = SynapseCompute.attach_configuration(
|
||||||
|
linked_service,
|
||||||
|
type="SynapseSpark",
|
||||||
|
pool_name=spark_pool_name)
|
||||||
|
|
||||||
|
synapse_compute=ComputeTarget.attach(
|
||||||
|
workspace=ws,
|
||||||
|
name=attached_synapse_name,
|
||||||
|
attach_configuration=attach_config)
|
||||||
|
|
||||||
|
synapse_compute.wait_for_completion()
|
||||||
|
```
|
||||||
|
|
||||||
|
## Set up permission
|
||||||
|
|
||||||
|
Grant Spark admin role to system assigned identity of the linked service so that the user can submit experiment run or pipeline run from AML workspace to synapse spark pool.
|
||||||
|
|
||||||
|
Grant Spark admin role to the specific user so that the user can start spark session to synapse spark pool.
|
||||||
|
|
||||||
|
You can get the system assigned identity information by running
|
||||||
|
|
||||||
|
```python
|
||||||
|
print(linked_service.system_assigned_identity_principal_id)
|
||||||
|
```
|
||||||
|
|
||||||
|
- Launch synapse studio of the synapse workspace and grant linked service MSI "Synapse Apache Spark administrator" role.
|
||||||
|
|
||||||
|
- In azure portal grant linked service MSI "Storage Blob Data Contributor" role of the primary adlsgen2 account of synapse workspace to use the library management feature.
|
||||||
@@ -0,0 +1,6 @@
|
|||||||
|
name: multi-model-register-and-deploy
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- numpy
|
||||||
|
- scikit-learn
|
||||||
@@ -0,0 +1,6 @@
|
|||||||
|
name: model-register-and-deploy
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- numpy
|
||||||
|
- scikit-learn
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: deploy-aks-with-controlled-rollout
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: enable-app-insights-in-production-service
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
@@ -94,6 +94,17 @@ def main():
|
|||||||
os.makedirs(output_dir, exist_ok=True)
|
os.makedirs(output_dir, exist_ok=True)
|
||||||
|
|
||||||
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
|
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
|
||||||
|
# Use Azure Open Datasets for MNIST dataset
|
||||||
|
datasets.MNIST.resources = [
|
||||||
|
("https://azureopendatastorage.azurefd.net/mnist/train-images-idx3-ubyte.gz",
|
||||||
|
"f68b3c2dcbeaaa9fbdd348bbdeb94873"),
|
||||||
|
("https://azureopendatastorage.azurefd.net/mnist/train-labels-idx1-ubyte.gz",
|
||||||
|
"d53e105ee54ea40749a09fcbcd1e9432"),
|
||||||
|
("https://azureopendatastorage.azurefd.net/mnist/t10k-images-idx3-ubyte.gz",
|
||||||
|
"9fb629c4189551a2d022fa330f9573f3"),
|
||||||
|
("https://azureopendatastorage.azurefd.net/mnist/t10k-labels-idx1-ubyte.gz",
|
||||||
|
"ec29112dd5afa0611ce80d1b7f02629c")
|
||||||
|
]
|
||||||
train_loader = torch.utils.data.DataLoader(
|
train_loader = torch.utils.data.DataLoader(
|
||||||
datasets.MNIST('data', train=True, download=True,
|
datasets.MNIST('data', train=True, download=True,
|
||||||
transform=transforms.Compose([transforms.ToTensor(),
|
transform=transforms.Compose([transforms.ToTensor(),
|
||||||
|
|||||||
@@ -0,0 +1,8 @@
|
|||||||
|
name: onnx-convert-aml-deploy-tinyyolo
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- numpy
|
||||||
|
- git+https://github.com/apple/coremltools@v2.1
|
||||||
|
- onnx<1.7.0
|
||||||
|
- onnxmltools
|
||||||
@@ -0,0 +1,9 @@
|
|||||||
|
name: onnx-inference-facial-expression-recognition-deploy
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-widgets
|
||||||
|
- matplotlib
|
||||||
|
- numpy
|
||||||
|
- onnx<1.7.0
|
||||||
|
- opencv-python-headless
|
||||||
@@ -0,0 +1,9 @@
|
|||||||
|
name: onnx-inference-mnist-deploy
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-widgets
|
||||||
|
- matplotlib
|
||||||
|
- numpy
|
||||||
|
- onnx<1.7.0
|
||||||
|
- opencv-python-headless
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: onnx-model-register-and-deploy
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: onnx-modelzoo-aml-deploy-resnet50
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
@@ -0,0 +1,5 @@
|
|||||||
|
name: onnx-train-pytorch-aml-deploy-mnist
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-widgets
|
||||||
@@ -0,0 +1,5 @@
|
|||||||
|
name: production-deploy-to-aks-gpu
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- tensorflow
|
||||||
@@ -0,0 +1,8 @@
|
|||||||
|
name: production-deploy-to-aks-ssl
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- matplotlib
|
||||||
|
- tqdm
|
||||||
|
- scipy
|
||||||
|
- sklearn
|
||||||
@@ -0,0 +1,8 @@
|
|||||||
|
name: production-deploy-to-aks
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- matplotlib
|
||||||
|
- tqdm
|
||||||
|
- scipy
|
||||||
|
- sklearn
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: model-register-and-deploy-spark
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
@@ -0,0 +1,13 @@
|
|||||||
|
name: explain-model-on-amlcompute
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-interpret
|
||||||
|
- flask
|
||||||
|
- flask-cors
|
||||||
|
- gevent>=1.3.6
|
||||||
|
- jinja2
|
||||||
|
- ipython
|
||||||
|
- matplotlib
|
||||||
|
- azureml-dataset-runtime
|
||||||
|
- ipywidgets
|
||||||
@@ -226,36 +226,6 @@
|
|||||||
" ('classifier', SVC(C=1.0, probability=True))])"
|
" ('classifier', SVC(C=1.0, probability=True))])"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"'''\n",
|
|
||||||
"# Uncomment below if sklearn-pandas is not installed\n",
|
|
||||||
"#!pip install sklearn-pandas\n",
|
|
||||||
"from sklearn_pandas import DataFrameMapper\n",
|
|
||||||
"\n",
|
|
||||||
"# Impute, standardize the numeric features and one-hot encode the categorical features. \n",
|
|
||||||
"\n",
|
|
||||||
"\n",
|
|
||||||
"numeric_transformations = [([f], Pipeline(steps=[('imputer', SimpleImputer(strategy='median')), ('scaler', StandardScaler())])) for f in numerical]\n",
|
|
||||||
"\n",
|
|
||||||
"categorical_transformations = [([f], OneHotEncoder(handle_unknown='ignore', sparse=False)) for f in categorical]\n",
|
|
||||||
"\n",
|
|
||||||
"transformations = numeric_transformations + categorical_transformations\n",
|
|
||||||
"\n",
|
|
||||||
"# Append classifier to preprocessing pipeline.\n",
|
|
||||||
"# Now we have a full prediction pipeline.\n",
|
|
||||||
"clf = Pipeline(steps=[('preprocessor', transformations),\n",
|
|
||||||
" ('classifier', SVC(C=1.0, probability=True))]) \n",
|
|
||||||
"\n",
|
|
||||||
"\n",
|
|
||||||
"\n",
|
|
||||||
"'''"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
|
|||||||
@@ -0,0 +1,12 @@
|
|||||||
|
name: save-retrieve-explanations-run-history
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-interpret
|
||||||
|
- flask
|
||||||
|
- flask-cors
|
||||||
|
- gevent>=1.3.6
|
||||||
|
- jinja2
|
||||||
|
- ipython
|
||||||
|
- matplotlib
|
||||||
|
- ipywidgets
|
||||||
@@ -166,12 +166,12 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"from sklearn.model_selection import train_test_split\n",
|
"from sklearn.model_selection import train_test_split\n",
|
||||||
"import joblib\n",
|
"import joblib\n",
|
||||||
|
"from sklearn.compose import ColumnTransformer\n",
|
||||||
"from sklearn.preprocessing import StandardScaler, OneHotEncoder\n",
|
"from sklearn.preprocessing import StandardScaler, OneHotEncoder\n",
|
||||||
"from sklearn.impute import SimpleImputer\n",
|
"from sklearn.impute import SimpleImputer\n",
|
||||||
"from sklearn.pipeline import Pipeline\n",
|
"from sklearn.pipeline import Pipeline\n",
|
||||||
"from sklearn.linear_model import LogisticRegression\n",
|
"from sklearn.linear_model import LogisticRegression\n",
|
||||||
"from sklearn.ensemble import RandomForestClassifier\n",
|
"from sklearn.ensemble import RandomForestClassifier\n",
|
||||||
"from sklearn_pandas import DataFrameMapper\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"from interpret.ext.blackbox import TabularExplainer\n",
|
"from interpret.ext.blackbox import TabularExplainer\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -201,17 +201,23 @@
|
|||||||
"# Store the numerical columns in a list numerical\n",
|
"# Store the numerical columns in a list numerical\n",
|
||||||
"numerical = attritionXData.columns.difference(categorical)\n",
|
"numerical = attritionXData.columns.difference(categorical)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"numeric_transformations = [([f], Pipeline(steps=[\n",
|
"# We create the preprocessing pipelines for both numeric and categorical data.\n",
|
||||||
|
"numeric_transformer = Pipeline(steps=[\n",
|
||||||
" ('imputer', SimpleImputer(strategy='median')),\n",
|
" ('imputer', SimpleImputer(strategy='median')),\n",
|
||||||
" ('scaler', StandardScaler())])) for f in numerical]\n",
|
" ('scaler', StandardScaler())])\n",
|
||||||
"\n",
|
"\n",
|
||||||
"categorical_transformations = [([f], OneHotEncoder(handle_unknown='ignore', sparse=False)) for f in categorical]\n",
|
"categorical_transformer = Pipeline(steps=[\n",
|
||||||
|
" ('imputer', SimpleImputer(strategy='constant', fill_value='missing')),\n",
|
||||||
|
" ('onehot', OneHotEncoder(handle_unknown='ignore'))])\n",
|
||||||
"\n",
|
"\n",
|
||||||
"transformations = numeric_transformations + categorical_transformations\n",
|
"transformations = ColumnTransformer(\n",
|
||||||
|
" transformers=[\n",
|
||||||
|
" ('num', numeric_transformer, numerical),\n",
|
||||||
|
" ('cat', categorical_transformer, categorical)])\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Append classifier to preprocessing pipeline.\n",
|
"# Append classifier to preprocessing pipeline.\n",
|
||||||
"# Now we have a full prediction pipeline.\n",
|
"# Now we have a full prediction pipeline.\n",
|
||||||
"clf = Pipeline(steps=[('preprocessor', DataFrameMapper(transformations)),\n",
|
"clf = Pipeline(steps=[('preprocessor', transformations),\n",
|
||||||
" ('classifier', RandomForestClassifier())])\n",
|
" ('classifier', RandomForestClassifier())])\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Split data into train and test\n",
|
"# Split data into train and test\n",
|
||||||
@@ -350,7 +356,7 @@
|
|||||||
"# the submitted job is run in. Note the remote environment(s) needs to be similar to the local\n",
|
"# the submitted job is run in. Note the remote environment(s) needs to be similar to the local\n",
|
||||||
"# environment, otherwise if a model is trained or deployed in a different environment this can\n",
|
"# environment, otherwise if a model is trained or deployed in a different environment this can\n",
|
||||||
"# cause errors. Please take extra care when specifying your dependencies in a production environment.\n",
|
"# cause errors. Please take extra care when specifying your dependencies in a production environment.\n",
|
||||||
"myenv = CondaDependencies.create(pip_packages=['sklearn-pandas', 'pyyaml', sklearn_dep, pandas_dep] + azureml_pip_packages,\n",
|
"myenv = CondaDependencies.create(pip_packages=['pyyaml', sklearn_dep, pandas_dep] + azureml_pip_packages,\n",
|
||||||
" pin_sdk_version=False)\n",
|
" pin_sdk_version=False)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"with open(\"myenv.yml\",\"w\") as f:\n",
|
"with open(\"myenv.yml\",\"w\") as f:\n",
|
||||||
|
|||||||
@@ -0,0 +1,12 @@
|
|||||||
|
name: train-explain-model-locally-and-deploy
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-interpret
|
||||||
|
- flask
|
||||||
|
- flask-cors
|
||||||
|
- gevent>=1.3.6
|
||||||
|
- jinja2
|
||||||
|
- ipython
|
||||||
|
- matplotlib
|
||||||
|
- ipywidgets
|
||||||
@@ -294,7 +294,7 @@
|
|||||||
"# the submitted job is run in. Note the remote environment(s) needs to be similar to the local\n",
|
"# the submitted job is run in. Note the remote environment(s) needs to be similar to the local\n",
|
||||||
"# environment, otherwise if a model is trained or deployed in a different environment this can\n",
|
"# environment, otherwise if a model is trained or deployed in a different environment this can\n",
|
||||||
"# cause errors. Please take extra care when specifying your dependencies in a production environment.\n",
|
"# cause errors. Please take extra care when specifying your dependencies in a production environment.\n",
|
||||||
"azureml_pip_packages.extend(['sklearn-pandas', 'pyyaml', sklearn_dep, pandas_dep])\n",
|
"azureml_pip_packages.extend(['pyyaml', sklearn_dep, pandas_dep])\n",
|
||||||
"run_config.environment.python.conda_dependencies = CondaDependencies.create(pip_packages=azureml_pip_packages)\n",
|
"run_config.environment.python.conda_dependencies = CondaDependencies.create(pip_packages=azureml_pip_packages)\n",
|
||||||
"# Now submit a run on AmlCompute\n",
|
"# Now submit a run on AmlCompute\n",
|
||||||
"from azureml.core.script_run_config import ScriptRunConfig\n",
|
"from azureml.core.script_run_config import ScriptRunConfig\n",
|
||||||
@@ -458,7 +458,7 @@
|
|||||||
"# the submitted job is run in. Note the remote environment(s) needs to be similar to the local\n",
|
"# the submitted job is run in. Note the remote environment(s) needs to be similar to the local\n",
|
||||||
"# environment, otherwise if a model is trained or deployed in a different environment this can\n",
|
"# environment, otherwise if a model is trained or deployed in a different environment this can\n",
|
||||||
"# cause errors. Please take extra care when specifying your dependencies in a production environment.\n",
|
"# cause errors. Please take extra care when specifying your dependencies in a production environment.\n",
|
||||||
"azureml_pip_packages.extend(['sklearn-pandas', 'pyyaml', sklearn_dep, pandas_dep])\n",
|
"azureml_pip_packages.extend(['pyyaml', sklearn_dep, pandas_dep])\n",
|
||||||
"myenv = CondaDependencies.create(pip_packages=azureml_pip_packages)\n",
|
"myenv = CondaDependencies.create(pip_packages=azureml_pip_packages)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"with open(\"myenv.yml\",\"w\") as f:\n",
|
"with open(\"myenv.yml\",\"w\") as f:\n",
|
||||||
|
|||||||
@@ -0,0 +1,14 @@
|
|||||||
|
name: train-explain-model-on-amlcompute-and-deploy
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-interpret
|
||||||
|
- flask
|
||||||
|
- flask-cors
|
||||||
|
- gevent>=1.3.6
|
||||||
|
- jinja2
|
||||||
|
- ipython
|
||||||
|
- matplotlib
|
||||||
|
- azureml-dataset-runtime
|
||||||
|
- azureml-core
|
||||||
|
- ipywidgets
|
||||||
@@ -5,13 +5,13 @@
|
|||||||
import os
|
import os
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import zipfile
|
import zipfile
|
||||||
from sklearn.model_selection import train_test_split
|
|
||||||
import joblib
|
import joblib
|
||||||
|
from sklearn.compose import ColumnTransformer
|
||||||
|
from sklearn.model_selection import train_test_split
|
||||||
from sklearn.preprocessing import StandardScaler, OneHotEncoder
|
from sklearn.preprocessing import StandardScaler, OneHotEncoder
|
||||||
from sklearn.impute import SimpleImputer
|
from sklearn.impute import SimpleImputer
|
||||||
from sklearn.pipeline import Pipeline
|
from sklearn.pipeline import Pipeline
|
||||||
from sklearn.linear_model import LogisticRegression
|
from sklearn.linear_model import LogisticRegression
|
||||||
from sklearn_pandas import DataFrameMapper
|
|
||||||
|
|
||||||
from azureml.core.run import Run
|
from azureml.core.run import Run
|
||||||
from interpret.ext.blackbox import TabularExplainer
|
from interpret.ext.blackbox import TabularExplainer
|
||||||
@@ -57,16 +57,22 @@ for col, value in attritionXData.iteritems():
|
|||||||
# store the numerical columns
|
# store the numerical columns
|
||||||
numerical = attritionXData.columns.difference(categorical)
|
numerical = attritionXData.columns.difference(categorical)
|
||||||
|
|
||||||
numeric_transformations = [([f], Pipeline(steps=[
|
# We create the preprocessing pipelines for both numeric and categorical data.
|
||||||
|
numeric_transformer = Pipeline(steps=[
|
||||||
('imputer', SimpleImputer(strategy='median')),
|
('imputer', SimpleImputer(strategy='median')),
|
||||||
('scaler', StandardScaler())])) for f in numerical]
|
('scaler', StandardScaler())])
|
||||||
|
|
||||||
categorical_transformations = [([f], OneHotEncoder(handle_unknown='ignore', sparse=False)) for f in categorical]
|
categorical_transformer = Pipeline(steps=[
|
||||||
|
('imputer', SimpleImputer(strategy='constant', fill_value='missing')),
|
||||||
|
('onehot', OneHotEncoder(handle_unknown='ignore'))])
|
||||||
|
|
||||||
transformations = numeric_transformations + categorical_transformations
|
transformations = ColumnTransformer(
|
||||||
|
transformers=[
|
||||||
|
('num', numeric_transformer, numerical),
|
||||||
|
('cat', categorical_transformer, categorical)])
|
||||||
|
|
||||||
# append classifier to preprocessing pipeline
|
# append classifier to preprocessing pipeline
|
||||||
clf = Pipeline(steps=[('preprocessor', DataFrameMapper(transformations)),
|
clf = Pipeline(steps=[('preprocessor', transformations),
|
||||||
('classifier', LogisticRegression(solver='lbfgs'))])
|
('classifier', LogisticRegression(solver='lbfgs'))])
|
||||||
|
|
||||||
# get the run this was submitted from to interact with run history
|
# get the run this was submitted from to interact with run history
|
||||||
|
|||||||
@@ -0,0 +1,5 @@
|
|||||||
|
name: aml-pipelines-data-transfer
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-widgets
|
||||||
@@ -0,0 +1,5 @@
|
|||||||
|
name: aml-pipelines-getting-started
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-widgets
|
||||||
@@ -0,0 +1,5 @@
|
|||||||
|
name: aml-pipelines-how-to-use-modulestep
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-widgets
|
||||||
@@ -0,0 +1,5 @@
|
|||||||
|
name: aml-pipelines-how-to-use-pipeline-drafts
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-widgets
|
||||||
@@ -121,12 +121,17 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"os.makedirs('./data/mnist', exist_ok=True)\n",
|
"data_folder = os.path.join(os.getcwd(), 'data/mnist')\n",
|
||||||
|
"os.makedirs(data_folder, exist_ok=True)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz', filename = './data/mnist/train-images.gz')\n",
|
"urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/train-images-idx3-ubyte.gz',\n",
|
||||||
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz', filename = './data/mnist/train-labels.gz')\n",
|
" filename=os.path.join(data_folder, 'train-images-idx3-ubyte.gz'))\n",
|
||||||
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz', filename = './data/mnist/test-images.gz')\n",
|
"urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/train-labels-idx1-ubyte.gz',\n",
|
||||||
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz', filename = './data/mnist/test-labels.gz')"
|
" filename=os.path.join(data_folder, 'train-labels-idx1-ubyte.gz'))\n",
|
||||||
|
"urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/t10k-images-idx3-ubyte.gz',\n",
|
||||||
|
" filename=os.path.join(data_folder, 't10k-images-idx3-ubyte.gz'))\n",
|
||||||
|
"urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/t10k-labels-idx1-ubyte.gz',\n",
|
||||||
|
" filename=os.path.join(data_folder, 't10k-labels-idx1-ubyte.gz'))"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -146,11 +151,11 @@
|
|||||||
"from utils import load_data\n",
|
"from utils import load_data\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the neural network converge faster.\n",
|
"# note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the neural network converge faster.\n",
|
||||||
"X_train = load_data('./data/mnist/train-images.gz', False) / 255.0\n",
|
"X_train = load_data(os.path.join(data_folder, 'train-images-idx3-ubyte.gz'), False) / np.float32(255.0)\n",
|
||||||
"y_train = load_data('./data/mnist/train-labels.gz', True).reshape(-1)\n",
|
"X_test = load_data(os.path.join(data_folder, 't10k-images-idx3-ubyte.gz'), False) / np.float32(255.0)\n",
|
||||||
|
"y_train = load_data(os.path.join(data_folder, 'train-labels-idx1-ubyte.gz'), True).reshape(-1)\n",
|
||||||
|
"y_test = load_data(os.path.join(data_folder, 't10k-labels-idx1-ubyte.gz'), True).reshape(-1)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"X_test = load_data('./data/mnist/test-images.gz', False) / 255.0\n",
|
|
||||||
"y_test = load_data('./data/mnist/test-labels.gz', True).reshape(-1)\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"count = 0\n",
|
"count = 0\n",
|
||||||
"sample_size = 30\n",
|
"sample_size = 30\n",
|
||||||
|
|||||||
@@ -0,0 +1,9 @@
|
|||||||
|
name: aml-pipelines-parameter-tuning-with-hyperdrive
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-widgets
|
||||||
|
- matplotlib
|
||||||
|
- numpy
|
||||||
|
- pandas_ml
|
||||||
|
- azureml-dataset-runtime[pandas,fuse]
|
||||||
@@ -0,0 +1,6 @@
|
|||||||
|
name: aml-pipelines-publish-and-run-using-rest-endpoint
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-widgets
|
||||||
|
- requests
|
||||||
@@ -0,0 +1,5 @@
|
|||||||
|
name: aml-pipelines-setup-schedule-for-a-published-pipeline
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-widgets
|
||||||
@@ -0,0 +1,6 @@
|
|||||||
|
name: aml-pipelines-setup-versioned-pipeline-endpoints
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-widgets
|
||||||
|
- requests
|
||||||
@@ -0,0 +1,5 @@
|
|||||||
|
name: aml-pipelines-showcasing-datapath-and-pipelineparameter
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-widgets
|
||||||
@@ -0,0 +1,5 @@
|
|||||||
|
name: aml-pipelines-showcasing-dataset-and-pipelineparameter
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-widgets
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
name: aml-pipelines-with-automated-machine-learning-step
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
@@ -0,0 +1,5 @@
|
|||||||
|
name: aml-pipelines-with-commandstep-r
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-widgets
|
||||||
@@ -0,0 +1,5 @@
|
|||||||
|
name: aml-pipelines-with-commandstep
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-widgets
|
||||||
@@ -0,0 +1,5 @@
|
|||||||
|
name: aml-pipelines-with-data-dependency-steps
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-widgets
|
||||||
@@ -0,0 +1,6 @@
|
|||||||
|
name: aml-pipelines-with-notebook-runner-step
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-widgets
|
||||||
|
- azureml-contrib-notebook
|
||||||
@@ -0,0 +1,10 @@
|
|||||||
|
name: nyc-taxi-data-regression-model-building
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-widgets
|
||||||
|
- azureml-opendatasets
|
||||||
|
- azureml-train-automl
|
||||||
|
- matplotlib
|
||||||
|
- pandas
|
||||||
|
- pyarrow
|
||||||
@@ -0,0 +1,7 @@
|
|||||||
|
name: file-dataset-image-inference-mnist
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-pipeline-steps
|
||||||
|
- azureml-widgets
|
||||||
|
- pandas
|
||||||
@@ -0,0 +1,7 @@
|
|||||||
|
name: tabular-dataset-inference-iris
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-pipeline-steps
|
||||||
|
- azureml-widgets
|
||||||
|
- pandas
|
||||||
@@ -0,0 +1,7 @@
|
|||||||
|
name: pipeline-style-transfer-parallel-run
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-pipeline-steps
|
||||||
|
- azureml-widgets
|
||||||
|
- requests
|
||||||
@@ -0,0 +1,5 @@
|
|||||||
|
name: distributed-chainer
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-widgets
|
||||||
@@ -4,6 +4,8 @@ import os
|
|||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
|
from utils import download_mnist
|
||||||
|
|
||||||
import chainer
|
import chainer
|
||||||
from chainer import backend
|
from chainer import backend
|
||||||
from chainer import backends
|
from chainer import backends
|
||||||
@@ -17,6 +19,7 @@ from chainer.training import extensions
|
|||||||
from chainer.dataset import concat_examples
|
from chainer.dataset import concat_examples
|
||||||
from chainer.backends.cuda import to_cpu
|
from chainer.backends.cuda import to_cpu
|
||||||
|
|
||||||
|
|
||||||
from azureml.core.run import Run
|
from azureml.core.run import Run
|
||||||
run = Run.get_context()
|
run = Run.get_context()
|
||||||
|
|
||||||
@@ -49,7 +52,7 @@ def main():
|
|||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
# Download the MNIST data if you haven't downloaded it yet
|
# Download the MNIST data if you haven't downloaded it yet
|
||||||
train, test = datasets.mnist.get_mnist(withlabel=True, ndim=1)
|
train, test = download_mnist()
|
||||||
|
|
||||||
gpu_id = args.gpu_id
|
gpu_id = args.gpu_id
|
||||||
batchsize = args.batchsize
|
batchsize = args.batchsize
|
||||||
|
|||||||
@@ -2,6 +2,8 @@ import numpy as np
|
|||||||
import os
|
import os
|
||||||
import json
|
import json
|
||||||
|
|
||||||
|
from utils import download_mnist
|
||||||
|
|
||||||
from chainer import serializers, using_config, Variable, datasets
|
from chainer import serializers, using_config, Variable, datasets
|
||||||
import chainer.functions as F
|
import chainer.functions as F
|
||||||
import chainer.links as L
|
import chainer.links as L
|
||||||
@@ -41,7 +43,7 @@ def init():
|
|||||||
def run(input_data):
|
def run(input_data):
|
||||||
i = np.array(json.loads(input_data)['data'])
|
i = np.array(json.loads(input_data)['data'])
|
||||||
|
|
||||||
_, test = datasets.get_mnist()
|
_, test = download_mnist()
|
||||||
x = Variable(np.asarray([test[i][0]]))
|
x = Variable(np.asarray([test[i][0]]))
|
||||||
y = model(x)
|
y = model(x)
|
||||||
|
|
||||||
|
|||||||
@@ -217,7 +217,8 @@
|
|||||||
"import shutil\n",
|
"import shutil\n",
|
||||||
"\n",
|
"\n",
|
||||||
"shutil.copy('chainer_mnist.py', project_folder)\n",
|
"shutil.copy('chainer_mnist.py', project_folder)\n",
|
||||||
"shutil.copy('chainer_score.py', project_folder)"
|
"shutil.copy('chainer_score.py', project_folder)\n",
|
||||||
|
"shutil.copy('utils.py', project_folder)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -263,6 +264,7 @@
|
|||||||
"- python=3.6.2\n",
|
"- python=3.6.2\n",
|
||||||
"- pip:\n",
|
"- pip:\n",
|
||||||
" - azureml-defaults\n",
|
" - azureml-defaults\n",
|
||||||
|
" - azureml-opendatasets\n",
|
||||||
" - chainer==5.1.0\n",
|
" - chainer==5.1.0\n",
|
||||||
" - cupy-cuda90==5.1.0\n",
|
" - cupy-cuda90==5.1.0\n",
|
||||||
" - mpi4py==3.0.0\n",
|
" - mpi4py==3.0.0\n",
|
||||||
@@ -557,6 +559,7 @@
|
|||||||
"cd.add_conda_package('numpy')\n",
|
"cd.add_conda_package('numpy')\n",
|
||||||
"cd.add_pip_package('chainer==5.1.0')\n",
|
"cd.add_pip_package('chainer==5.1.0')\n",
|
||||||
"cd.add_pip_package(\"azureml-defaults\")\n",
|
"cd.add_pip_package(\"azureml-defaults\")\n",
|
||||||
|
"cd.add_pip_package(\"azureml-opendatasets\")\n",
|
||||||
"cd.save_to_file(base_directory='./', conda_file_path='myenv.yml')\n",
|
"cd.save_to_file(base_directory='./', conda_file_path='myenv.yml')\n",
|
||||||
"\n",
|
"\n",
|
||||||
"print(cd.serialize_to_string())"
|
"print(cd.serialize_to_string())"
|
||||||
@@ -584,7 +587,8 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"myenv = Environment.from_conda_specification(name=\"myenv\", file_path=\"myenv.yml\")\n",
|
"myenv = Environment.from_conda_specification(name=\"myenv\", file_path=\"myenv.yml\")\n",
|
||||||
"inference_config = InferenceConfig(entry_script=\"chainer_score.py\", environment=myenv)\n",
|
"inference_config = InferenceConfig(entry_script=\"chainer_score.py\", environment=myenv,\n",
|
||||||
|
" source_directory=project_folder)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"aciconfig = AciWebservice.deploy_configuration(cpu_cores=1,\n",
|
"aciconfig = AciWebservice.deploy_configuration(cpu_cores=1,\n",
|
||||||
" auth_enabled=True, # this flag generates API keys to secure access\n",
|
" auth_enabled=True, # this flag generates API keys to secure access\n",
|
||||||
@@ -685,13 +689,16 @@
|
|||||||
" res = res.reshape(n_items[0], 1)\n",
|
" res = res.reshape(n_items[0], 1)\n",
|
||||||
" return res\n",
|
" return res\n",
|
||||||
"\n",
|
"\n",
|
||||||
"os.makedirs('./data/mnist', exist_ok=True)\n",
|
"data_folder = os.path.join(os.getcwd(), 'data/mnist')\n",
|
||||||
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz', filename = './data/mnist/test-images.gz')\n",
|
"os.makedirs(data_folder, exist_ok=True)\n",
|
||||||
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz', filename = './data/mnist/test-labels.gz')\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"X_test = load_data('./data/mnist/test-images.gz', False)\n",
|
"urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/t10k-images-idx3-ubyte.gz',\n",
|
||||||
"y_test = load_data('./data/mnist/test-labels.gz', True).reshape(-1)\n",
|
" filename=os.path.join(data_folder, 't10k-images-idx3-ubyte.gz'))\n",
|
||||||
|
"urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/t10k-labels-idx1-ubyte.gz',\n",
|
||||||
|
" filename=os.path.join(data_folder, 't10k-labels-idx1-ubyte.gz'))\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"X_test = load_data(os.path.join(data_folder, 't10k-images-idx3-ubyte.gz'), False) / np.float32(255.0)\n",
|
||||||
|
"y_test = load_data(os.path.join(data_folder, 't10k-labels-idx1-ubyte.gz'), True).reshape(-1)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# send a random row from the test set to score\n",
|
"# send a random row from the test set to score\n",
|
||||||
"random_index = np.random.randint(0, len(X_test)-1)\n",
|
"random_index = np.random.randint(0, len(X_test)-1)\n",
|
||||||
|
|||||||
@@ -0,0 +1,13 @@
|
|||||||
|
name: train-hyperparameter-tune-deploy-with-chainer
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-widgets
|
||||||
|
- numpy
|
||||||
|
- matplotlib
|
||||||
|
- json
|
||||||
|
- urllib
|
||||||
|
- gzip
|
||||||
|
- struct
|
||||||
|
- requests
|
||||||
|
- azureml-opendatasets
|
||||||
@@ -0,0 +1,50 @@
|
|||||||
|
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||||
|
# Licensed under the MIT License.
|
||||||
|
|
||||||
|
import glob
|
||||||
|
import gzip
|
||||||
|
import numpy as np
|
||||||
|
import os
|
||||||
|
import struct
|
||||||
|
|
||||||
|
from azureml.core import Dataset
|
||||||
|
from azureml.opendatasets import MNIST
|
||||||
|
from chainer.datasets import tuple_dataset
|
||||||
|
|
||||||
|
|
||||||
|
# load compressed MNIST gz files and return numpy arrays
|
||||||
|
def load_data(filename, label=False):
|
||||||
|
with gzip.open(filename) as gz:
|
||||||
|
struct.unpack('I', gz.read(4))
|
||||||
|
n_items = struct.unpack('>I', gz.read(4))
|
||||||
|
if not label:
|
||||||
|
n_rows = struct.unpack('>I', gz.read(4))[0]
|
||||||
|
n_cols = struct.unpack('>I', gz.read(4))[0]
|
||||||
|
res = np.frombuffer(gz.read(n_items[0] * n_rows * n_cols), dtype=np.uint8)
|
||||||
|
res = res.reshape(n_items[0], n_rows * n_cols)
|
||||||
|
else:
|
||||||
|
res = np.frombuffer(gz.read(n_items[0]), dtype=np.uint8)
|
||||||
|
res = res.reshape(n_items[0], 1)
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
def download_mnist():
|
||||||
|
data_folder = os.path.join(os.getcwd(), 'data/mnist')
|
||||||
|
os.makedirs(data_folder, exist_ok=True)
|
||||||
|
|
||||||
|
mnist_file_dataset = MNIST.get_file_dataset()
|
||||||
|
mnist_file_dataset.download(data_folder, overwrite=True)
|
||||||
|
|
||||||
|
X_train = load_data(glob.glob(os.path.join(data_folder, "**/train-images-idx3-ubyte.gz"),
|
||||||
|
recursive=True)[0], False) / 255.0
|
||||||
|
X_test = load_data(glob.glob(os.path.join(data_folder, "**/t10k-images-idx3-ubyte.gz"),
|
||||||
|
recursive=True)[0], False) / 255.0
|
||||||
|
y_train = load_data(glob.glob(os.path.join(data_folder, "**/train-labels-idx1-ubyte.gz"),
|
||||||
|
recursive=True)[0], True).reshape(-1)
|
||||||
|
y_test = load_data(glob.glob(os.path.join(data_folder, "**/t10k-labels-idx1-ubyte.gz"),
|
||||||
|
recursive=True)[0], True).reshape(-1)
|
||||||
|
|
||||||
|
train = tuple_dataset.TupleDataset(X_train.astype(np.float32), y_train.astype(np.int32))
|
||||||
|
test = tuple_dataset.TupleDataset(X_test.astype(np.float32), y_test.astype(np.int32))
|
||||||
|
|
||||||
|
return train, test
|
||||||
@@ -0,0 +1,5 @@
|
|||||||
|
name: fastai-with-custom-docker
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- fastai==1.0.61
|
||||||
@@ -0,0 +1,8 @@
|
|||||||
|
name: train-hyperparameter-tune-deploy-with-keras
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-widgets
|
||||||
|
- tensorflow
|
||||||
|
- keras<=2.3.1
|
||||||
|
- matplotlib
|
||||||
@@ -0,0 +1,5 @@
|
|||||||
|
name: distributed-pytorch-with-distributeddataparallel
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-widgets
|
||||||
@@ -0,0 +1,5 @@
|
|||||||
|
name: distributed-pytorch-with-horovod
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-widgets
|
||||||
@@ -51,6 +51,17 @@ if args.cuda:
|
|||||||
|
|
||||||
|
|
||||||
kwargs = {}
|
kwargs = {}
|
||||||
|
# Use Azure Open Datasets for MNIST dataset
|
||||||
|
datasets.MNIST.resources = [
|
||||||
|
("https://azureopendatastorage.azurefd.net/mnist/train-images-idx3-ubyte.gz",
|
||||||
|
"f68b3c2dcbeaaa9fbdd348bbdeb94873"),
|
||||||
|
("https://azureopendatastorage.azurefd.net/mnist/train-labels-idx1-ubyte.gz",
|
||||||
|
"d53e105ee54ea40749a09fcbcd1e9432"),
|
||||||
|
("https://azureopendatastorage.azurefd.net/mnist/t10k-images-idx3-ubyte.gz",
|
||||||
|
"9fb629c4189551a2d022fa330f9573f3"),
|
||||||
|
("https://azureopendatastorage.azurefd.net/mnist/t10k-labels-idx1-ubyte.gz",
|
||||||
|
"ec29112dd5afa0611ce80d1b7f02629c")
|
||||||
|
]
|
||||||
train_dataset = \
|
train_dataset = \
|
||||||
datasets.MNIST('data-%d' % hvd.rank(), train=True, download=True,
|
datasets.MNIST('data-%d' % hvd.rank(), train=True, download=True,
|
||||||
transform=transforms.Compose([
|
transform=transforms.Compose([
|
||||||
|
|||||||
@@ -0,0 +1,10 @@
|
|||||||
|
name: train-hyperparameter-tune-deploy-with-pytorch
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-widgets
|
||||||
|
- pillow==5.4.1
|
||||||
|
- matplotlib
|
||||||
|
- numpy==1.19.3
|
||||||
|
- https://download.pytorch.org/whl/cpu/torch-1.6.0%2Bcpu-cp36-cp36m-win_amd64.whl
|
||||||
|
- https://download.pytorch.org/whl/cpu/torchvision-0.7.0%2Bcpu-cp36-cp36m-win_amd64.whl
|
||||||
@@ -0,0 +1,6 @@
|
|||||||
|
name: train-hyperparameter-tune-deploy-with-sklearn
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-widgets
|
||||||
|
- numpy
|
||||||
@@ -0,0 +1,11 @@
|
|||||||
|
name: distributed-tensorflow-with-horovod
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-widgets
|
||||||
|
- keras
|
||||||
|
- tensorflow-gpu==1.13.2
|
||||||
|
- horovod==0.19.1
|
||||||
|
- matplotlib
|
||||||
|
- pandas
|
||||||
|
- fuse
|
||||||
@@ -0,0 +1,5 @@
|
|||||||
|
name: distributed-tensorflow-with-parameter-server
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-widgets
|
||||||
@@ -0,0 +1,12 @@
|
|||||||
|
name: train-hyperparameter-tune-deploy-with-tensorflow
|
||||||
|
dependencies:
|
||||||
|
- numpy
|
||||||
|
- matplotlib
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-widgets
|
||||||
|
- pandas
|
||||||
|
- keras
|
||||||
|
- tensorflow==2.0.0
|
||||||
|
- matplotlib
|
||||||
|
- fuse
|
||||||
@@ -102,6 +102,17 @@ torch.manual_seed(args.seed)
|
|||||||
device = torch.device("cuda" if use_cuda else "cpu")
|
device = torch.device("cuda" if use_cuda else "cpu")
|
||||||
|
|
||||||
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
|
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
|
||||||
|
# Use Azure Open Datasets for MNIST dataset
|
||||||
|
datasets.MNIST.resources = [
|
||||||
|
("https://azureopendatastorage.azurefd.net/mnist/train-images-idx3-ubyte.gz",
|
||||||
|
"f68b3c2dcbeaaa9fbdd348bbdeb94873"),
|
||||||
|
("https://azureopendatastorage.azurefd.net/mnist/train-labels-idx1-ubyte.gz",
|
||||||
|
"d53e105ee54ea40749a09fcbcd1e9432"),
|
||||||
|
("https://azureopendatastorage.azurefd.net/mnist/t10k-images-idx3-ubyte.gz",
|
||||||
|
"9fb629c4189551a2d022fa330f9573f3"),
|
||||||
|
("https://azureopendatastorage.azurefd.net/mnist/t10k-labels-idx1-ubyte.gz",
|
||||||
|
"ec29112dd5afa0611ce80d1b7f02629c")
|
||||||
|
]
|
||||||
train_loader = torch.utils.data.DataLoader(
|
train_loader = torch.utils.data.DataLoader(
|
||||||
datasets.MNIST('../data', train=True, download=True,
|
datasets.MNIST('../data', train=True, download=True,
|
||||||
transform=transforms.Compose([
|
transform=transforms.Compose([
|
||||||
|
|||||||
@@ -332,6 +332,18 @@
|
|||||||
"import random\n",
|
"import random\n",
|
||||||
"import numpy as np\n",
|
"import numpy as np\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"# Use Azure Open Datasets for MNIST dataset\n",
|
||||||
|
"datasets.MNIST.resources = [\n",
|
||||||
|
" (\"https://azureopendatastorage.azurefd.net/mnist/train-images-idx3-ubyte.gz\",\n",
|
||||||
|
" \"f68b3c2dcbeaaa9fbdd348bbdeb94873\"),\n",
|
||||||
|
" (\"https://azureopendatastorage.azurefd.net/mnist/train-labels-idx1-ubyte.gz\",\n",
|
||||||
|
" \"d53e105ee54ea40749a09fcbcd1e9432\"),\n",
|
||||||
|
" (\"https://azureopendatastorage.azurefd.net/mnist/t10k-images-idx3-ubyte.gz\",\n",
|
||||||
|
" \"9fb629c4189551a2d022fa330f9573f3\"),\n",
|
||||||
|
" (\"https://azureopendatastorage.azurefd.net/mnist/t10k-labels-idx1-ubyte.gz\",\n",
|
||||||
|
" \"ec29112dd5afa0611ce80d1b7f02629c\")\n",
|
||||||
|
"]\n",
|
||||||
|
"\n",
|
||||||
"test_data = datasets.MNIST('../data', train=False, transform=transforms.Compose([\n",
|
"test_data = datasets.MNIST('../data', train=False, transform=transforms.Compose([\n",
|
||||||
" transforms.ToTensor(),\n",
|
" transforms.ToTensor(),\n",
|
||||||
" transforms.Normalize((0.1307,), (0.3081,))]))\n",
|
" transforms.Normalize((0.1307,), (0.3081,))]))\n",
|
||||||
|
|||||||
@@ -0,0 +1,8 @@
|
|||||||
|
name: pong_rllib
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-contrib-reinforcementlearning
|
||||||
|
- azureml-widgets
|
||||||
|
- matplotlib
|
||||||
|
- azure-mgmt-network==12.0.0
|
||||||
@@ -0,0 +1,6 @@
|
|||||||
|
name: cartpole_ci
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-contrib-reinforcementlearning
|
||||||
|
- azureml-widgets
|
||||||
@@ -0,0 +1,6 @@
|
|||||||
|
name: cartpole_sc
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-contrib-reinforcementlearning
|
||||||
|
- azureml-widgets
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user