Compare commits

...

6 Commits

Author SHA1 Message Date
amlrelsa-ms
35287ab0d8 update samples from Release-91 as a part of SDK release 2021-03-09 05:36:08 +00:00
Harneet Virk
3fe4f8b038 Merge pull request #1375 from Azure/release_update/Release-90
update samples from Release-90 as a part of  SDK release
2021-03-01 09:15:14 -08:00
amlrelsa-ms
1722678469 update samples from Release-90 as a part of SDK release 2021-03-01 17:13:25 +00:00
Harneet Virk
17da7e8706 Merge pull request #1364 from Azure/release_update/Release-89
update samples from Release-89 as a part of  SDK release
2021-02-23 17:27:27 -08:00
amlrelsa-ms
d2e7213ff3 update samples from Release-89 as a part of SDK release 2021-02-24 01:26:17 +00:00
mx-iao
882cb76e8a Merge pull request #1361 from Azure/minxia/distr-pytorch
Update distributed pytorch example
2021-02-23 12:07:20 -08:00
134 changed files with 1249 additions and 9740 deletions

View File

@@ -103,7 +103,7 @@
"source": [ "source": [
"import azureml.core\n", "import azureml.core\n",
"\n", "\n",
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n", "print(\"This notebook was created using version 1.24.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
] ]
}, },

View File

@@ -21,9 +21,8 @@ dependencies:
- pip: - pip:
# Required packages for AzureML execution, history, and data preparation. # Required packages for AzureML execution, history, and data preparation.
- azureml-widgets~=1.23.0 - azureml-widgets~=1.24.0
- pytorch-transformers==1.0.0 - pytorch-transformers==1.0.0
- spacy==2.1.8 - spacy==2.1.8
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz - https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.23.0/validated_win32_requirements.txt [--no-deps] - -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.24.0/validated_win32_requirements.txt [--no-deps]
- PyJWT < 2.0.0

View File

@@ -21,10 +21,8 @@ dependencies:
- pip: - pip:
# Required packages for AzureML execution, history, and data preparation. # Required packages for AzureML execution, history, and data preparation.
- azureml-widgets~=1.23.0 - azureml-widgets~=1.24.0
- pytorch-transformers==1.0.0 - pytorch-transformers==1.0.0
- spacy==2.1.8 - spacy==2.1.8
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz - https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.23.0/validated_linux_requirements.txt [--no-deps] - -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.24.0/validated_linux_requirements.txt [--no-deps]
- PyJWT < 2.0.0

View File

@@ -22,9 +22,8 @@ dependencies:
- pip: - pip:
# Required packages for AzureML execution, history, and data preparation. # Required packages for AzureML execution, history, and data preparation.
- azureml-widgets~=1.23.0 - azureml-widgets~=1.24.0
- pytorch-transformers==1.0.0 - pytorch-transformers==1.0.0
- spacy==2.1.8 - spacy==2.1.8
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz - https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.23.0/validated_darwin_requirements.txt [--no-deps] - -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.24.0/validated_darwin_requirements.txt [--no-deps]
- PyJWT < 2.0.0

View File

@@ -105,7 +105,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n", "print(\"This notebook was created using version 1.24.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
] ]
}, },

View File

@@ -0,0 +1,4 @@
name: auto-ml-classification-bank-marketing-all-features
dependencies:
- pip:
- azureml-sdk

View File

@@ -93,7 +93,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n", "print(\"This notebook was created using version 1.24.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
] ]
}, },

View File

@@ -0,0 +1,4 @@
name: auto-ml-classification-credit-card-fraud
dependencies:
- pip:
- azureml-sdk

View File

@@ -96,7 +96,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n", "print(\"This notebook was created using version 1.24.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
] ]
}, },

View File

@@ -0,0 +1,4 @@
name: auto-ml-classification-text-dnn
dependencies:
- pip:
- azureml-sdk

View File

@@ -81,7 +81,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n", "print(\"This notebook was created using version 1.24.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
] ]
}, },

View File

@@ -0,0 +1,4 @@
name: auto-ml-continuous-retraining
dependencies:
- pip:
- azureml-sdk

View File

@@ -5,7 +5,7 @@ set options=%3
set PIP_NO_WARN_SCRIPT_LOCATION=0 set PIP_NO_WARN_SCRIPT_LOCATION=0
IF "%conda_env_name%"=="" SET conda_env_name="azure_automl_experimental" IF "%conda_env_name%"=="" SET conda_env_name="azure_automl_experimental"
IF "%automl_env_file%"=="" SET automl_env_file="automl_env.yml" IF "%automl_env_file%"=="" SET automl_env_file="automl_thin_client_env.yml"
IF NOT EXIST %automl_env_file% GOTO YmlMissing IF NOT EXIST %automl_env_file% GOTO YmlMissing

View File

@@ -12,7 +12,7 @@ fi
if [ "$AUTOML_ENV_FILE" == "" ] if [ "$AUTOML_ENV_FILE" == "" ]
then then
AUTOML_ENV_FILE="automl_env.yml" AUTOML_ENV_FILE="automl_thin_client_env.yml"
fi fi
if [ ! -f $AUTOML_ENV_FILE ]; then if [ ! -f $AUTOML_ENV_FILE ]; then

View File

@@ -12,7 +12,7 @@ fi
if [ "$AUTOML_ENV_FILE" == "" ] if [ "$AUTOML_ENV_FILE" == "" ]
then then
AUTOML_ENV_FILE="automl_env.yml" AUTOML_ENV_FILE="automl_thin_client_env_mac.yml"
fi fi
if [ ! -f $AUTOML_ENV_FILE ]; then if [ ! -f $AUTOML_ENV_FILE ]; then

View File

@@ -7,6 +7,8 @@ dependencies:
- nb_conda - nb_conda
- cython - cython
- urllib3<1.24 - urllib3<1.24
- PyJWT < 2.0.0
- numpy==1.18.5
- pip: - pip:
# Required packages for AzureML execution, history, and data preparation. # Required packages for AzureML execution, history, and data preparation.
@@ -14,4 +16,3 @@ dependencies:
- azureml-sdk - azureml-sdk
- azureml-widgets - azureml-widgets
- pandas - pandas
- PyJWT < 2.0.0

View File

@@ -8,6 +8,8 @@ dependencies:
- nb_conda - nb_conda
- cython - cython
- urllib3<1.24 - urllib3<1.24
- PyJWT < 2.0.0
- numpy==1.18.5
- pip: - pip:
# Required packages for AzureML execution, history, and data preparation. # Required packages for AzureML execution, history, and data preparation.
@@ -15,4 +17,3 @@ dependencies:
- azureml-sdk - azureml-sdk
- azureml-widgets - azureml-widgets
- pandas - pandas
- PyJWT < 2.0.0

View File

@@ -90,7 +90,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n", "print(\"This notebook was created using version 1.24.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
] ]
}, },
@@ -194,7 +194,6 @@
"|**n_cross_validations**|Number of cross validation splits.|\n", "|**n_cross_validations**|Number of cross validation splits.|\n",
"|**training_data**|(sparse) array-like, shape = [n_samples, n_features]|\n", "|**training_data**|(sparse) array-like, shape = [n_samples, n_features]|\n",
"|**label_column_name**|(sparse) array-like, shape = [n_samples, ], targets values.|\n", "|**label_column_name**|(sparse) array-like, shape = [n_samples, ], targets values.|\n",
"|**scenario**|We need to set this parameter to 'Latest' to enable some experimental features. This parameter should not be set outside of this experimental notebook.|\n",
"\n", "\n",
"**_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric)" "**_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric)"
] ]
@@ -223,7 +222,6 @@
" compute_target = compute_target,\n", " compute_target = compute_target,\n",
" training_data = train_data,\n", " training_data = train_data,\n",
" label_column_name = label,\n", " label_column_name = label,\n",
" scenario='Latest',\n",
" **automl_settings\n", " **automl_settings\n",
" )" " )"
] ]

View File

@@ -0,0 +1,4 @@
name: auto-ml-regression-model-proxy
dependencies:
- pip:
- azureml-sdk

View File

@@ -113,7 +113,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n", "print(\"This notebook was created using version 1.24.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
] ]
}, },

View File

@@ -0,0 +1,4 @@
name: auto-ml-forecasting-beer-remote
dependencies:
- pip:
- azureml-sdk

View File

@@ -87,7 +87,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n", "print(\"This notebook was created using version 1.24.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
] ]
}, },

View File

@@ -0,0 +1,4 @@
name: auto-ml-forecasting-bike-share
dependencies:
- pip:
- azureml-sdk

View File

@@ -97,7 +97,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n", "print(\"This notebook was created using version 1.24.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
] ]
}, },

View File

@@ -0,0 +1,4 @@
name: auto-ml-forecasting-energy-demand
dependencies:
- pip:
- azureml-sdk

View File

@@ -94,7 +94,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n", "print(\"This notebook was created using version 1.24.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
] ]
}, },

View File

@@ -0,0 +1,4 @@
name: auto-ml-forecasting-function
dependencies:
- pip:
- azureml-sdk

View File

@@ -82,7 +82,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n", "print(\"This notebook was created using version 1.24.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
] ]
}, },

View File

@@ -0,0 +1,4 @@
name: auto-ml-forecasting-orange-juice-sales
dependencies:
- pip:
- azureml-sdk

View File

@@ -96,7 +96,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n", "print(\"This notebook was created using version 1.24.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
] ]
}, },

View File

@@ -0,0 +1,4 @@
name: auto-ml-classification-credit-card-fraud-local
dependencies:
- pip:
- azureml-sdk

View File

@@ -96,7 +96,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n", "print(\"This notebook was created using version 1.24.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
] ]
}, },

View File

@@ -0,0 +1,4 @@
name: auto-ml-regression-explanation-featurization
dependencies:
- pip:
- azureml-sdk

View File

@@ -92,7 +92,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n", "print(\"This notebook was created using version 1.24.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
] ]
}, },

View File

@@ -0,0 +1,4 @@
name: auto-ml-regression
dependencies:
- pip:
- azureml-sdk

View File

@@ -0,0 +1,84 @@
Azure Synapse Analyticsis a limitless analytics service that brings together data integration, enterprise data warehousing, and big data analytics. It gives you the freedom to query data on your terms, using either serverless or dedicated resources—at scale. Azure Synapse brings these worlds together with a unified experience to ingest, explore, prepare, manage, and serve data for immediate BI and machine learning needs.A coreoffering within Azure Synapse Analyticsare serverlessApache Spark poolsenhanced for big data workloads.
Synapse in Aml integration is for customerswho want to useApacheSparkin AzureSynapse Analyticsto prepare data at scale in Azure ML before training their ML model. This will allow customers to work on their end-to-end ML lifecycle including large-scale data preparation, model training and deployment within Azure ML workspace without having to use suboptimal tools for machine learning or switch between multipletools for data preparation and model training.The ability to perform all ML tasks within Azure ML willreducetimerequired for customersto iterate on a machine learning project which typically includesmultiple rounds ofdata preparation and training.
In the public preview, the capabilities are provided:
- Link Azure Synapse Analytics workspace to Azure Machine Learning workspace (via ARM, UI or SDK)
- Attach Apache Spark pools powered by Azure Synapse Analytics as Azure Machine Learning compute targets (via ARM, UI or SDK)
- Launch Apache Spark sessions in notebooks and perform interactive data exploration and preparation. This interactive experience leverages Apache Spark magic and customers will have session-level Conda support to install packages.
- Productionize ML pipelines by leveraging Apache Spark pools to pre-process big data
# Using Synapse in Azure machine learning
## Create synapse resources
Follow up the documents to create Synapse workspace and resource-setup.sh is available for you to create the resources.
- Create from [Portal](https://docs.microsoft.com/en-us/azure/synapse-analytics/quickstart-create-workspace)
- Create from [Cli](https://docs.microsoft.com/en-us/azure/synapse-analytics/quickstart-create-workspace-cli)
Follow up the documents to create Synapse spark pool
- Create from [Portal](https://docs.microsoft.com/en-us/azure/synapse-analytics/quickstart-create-apache-spark-pool-portal)
- Create from [Cli](https://docs.microsoft.com/en-us/cli/azure/ext/synapse/synapse/spark/pool?view=azure-cli-latest)
## Link Synapse Workspace
Make sure you are the owner of synapse workspace so that you can link synapse workspace into AML.
You can run resource-setup.py to link the synapse workspace and attach compute
```python
from azureml.core import Workspace
ws = Workspace.from_config()
from azureml.core import LinkedService, SynapseWorkspaceLinkedServiceConfiguration
synapse_link_config = SynapseWorkspaceLinkedServiceConfiguration(
subscription_id="<subscription id>",
resource_group="<resource group",
name="<synapse workspace name>"
)
linked_service = LinkedService.register(
workspace=ws,
name='<link name>',
linked_service_config=synapse_link_config)
```
## Attach synapse spark pool as AzureML compute
```python
from azureml.core.compute import SynapseCompute, ComputeTarget
spark_pool_name = "<spark pool name>"
attached_synapse_name = "<attached compute name>"
attach_config = SynapseCompute.attach_configuration(
linked_service,
type="SynapseSpark",
pool_name=spark_pool_name)
synapse_compute=ComputeTarget.attach(
workspace=ws,
name=attached_synapse_name,
attach_configuration=attach_config)
synapse_compute.wait_for_completion()
```
## Set up permission
Grant Spark admin role to system assigned identity of the linked service so that the user can submit experiment run or pipeline run from AML workspace to synapse spark pool.
Grant Spark admin role to the specific user so that the user can start spark session to synapse spark pool.
You can get the system assigned identity information by running
```python
print(linked_service.system_assigned_identity_principal_id)
```
- Launch synapse studio of the synapse workspace and grant linked service MSI "Synapse Apache Spark administrator" role.
- In azure portal grant linked service MSI "Storage Blob Data Contributor" role of the primary adlsgen2 account of synapse workspace to use the library management feature.

View File

@@ -0,0 +1,6 @@
name: multi-model-register-and-deploy
dependencies:
- pip:
- azureml-sdk
- numpy
- scikit-learn

View File

@@ -0,0 +1,6 @@
name: model-register-and-deploy
dependencies:
- pip:
- azureml-sdk
- numpy
- scikit-learn

View File

@@ -0,0 +1,4 @@
name: deploy-aks-with-controlled-rollout
dependencies:
- pip:
- azureml-sdk

View File

@@ -0,0 +1,4 @@
name: enable-app-insights-in-production-service
dependencies:
- pip:
- azureml-sdk

View File

@@ -0,0 +1,8 @@
name: onnx-convert-aml-deploy-tinyyolo
dependencies:
- pip:
- azureml-sdk
- numpy
- git+https://github.com/apple/coremltools@v2.1
- onnx<1.7.0
- onnxmltools

View File

@@ -0,0 +1,9 @@
name: onnx-inference-facial-expression-recognition-deploy
dependencies:
- pip:
- azureml-sdk
- azureml-widgets
- matplotlib
- numpy
- onnx<1.7.0
- opencv-python-headless

View File

@@ -0,0 +1,9 @@
name: onnx-inference-mnist-deploy
dependencies:
- pip:
- azureml-sdk
- azureml-widgets
- matplotlib
- numpy
- onnx<1.7.0
- opencv-python-headless

View File

@@ -0,0 +1,4 @@
name: onnx-model-register-and-deploy
dependencies:
- pip:
- azureml-sdk

View File

@@ -0,0 +1,4 @@
name: onnx-modelzoo-aml-deploy-resnet50
dependencies:
- pip:
- azureml-sdk

View File

@@ -0,0 +1,5 @@
name: onnx-train-pytorch-aml-deploy-mnist
dependencies:
- pip:
- azureml-sdk
- azureml-widgets

View File

@@ -0,0 +1,5 @@
name: production-deploy-to-aks-gpu
dependencies:
- pip:
- azureml-sdk
- tensorflow

View File

@@ -0,0 +1,8 @@
name: production-deploy-to-aks-ssl
dependencies:
- pip:
- azureml-sdk
- matplotlib
- tqdm
- scipy
- sklearn

View File

@@ -0,0 +1,8 @@
name: production-deploy-to-aks
dependencies:
- pip:
- azureml-sdk
- matplotlib
- tqdm
- scipy
- sklearn

View File

@@ -0,0 +1,4 @@
name: model-register-and-deploy-spark
dependencies:
- pip:
- azureml-sdk

View File

@@ -0,0 +1,9 @@
name: explain-model-on-amlcompute
dependencies:
- pip:
- azureml-sdk
- azureml-interpret
- interpret-community[visualization]
- matplotlib
- azureml-dataset-runtime
- ipywidgets

View File

@@ -226,36 +226,6 @@
" ('classifier', SVC(C=1.0, probability=True))])" " ('classifier', SVC(C=1.0, probability=True))])"
] ]
}, },
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"'''\n",
"# Uncomment below if sklearn-pandas is not installed\n",
"#!pip install sklearn-pandas\n",
"from sklearn_pandas import DataFrameMapper\n",
"\n",
"# Impute, standardize the numeric features and one-hot encode the categorical features. \n",
"\n",
"\n",
"numeric_transformations = [([f], Pipeline(steps=[('imputer', SimpleImputer(strategy='median')), ('scaler', StandardScaler())])) for f in numerical]\n",
"\n",
"categorical_transformations = [([f], OneHotEncoder(handle_unknown='ignore', sparse=False)) for f in categorical]\n",
"\n",
"transformations = numeric_transformations + categorical_transformations\n",
"\n",
"# Append classifier to preprocessing pipeline.\n",
"# Now we have a full prediction pipeline.\n",
"clf = Pipeline(steps=[('preprocessor', transformations),\n",
" ('classifier', SVC(C=1.0, probability=True))]) \n",
"\n",
"\n",
"\n",
"'''"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},

View File

@@ -0,0 +1,8 @@
name: save-retrieve-explanations-run-history
dependencies:
- pip:
- azureml-sdk
- azureml-interpret
- interpret-community[visualization]
- matplotlib
- ipywidgets

View File

@@ -166,12 +166,12 @@
"source": [ "source": [
"from sklearn.model_selection import train_test_split\n", "from sklearn.model_selection import train_test_split\n",
"import joblib\n", "import joblib\n",
"from sklearn.compose import ColumnTransformer\n",
"from sklearn.preprocessing import StandardScaler, OneHotEncoder\n", "from sklearn.preprocessing import StandardScaler, OneHotEncoder\n",
"from sklearn.impute import SimpleImputer\n", "from sklearn.impute import SimpleImputer\n",
"from sklearn.pipeline import Pipeline\n", "from sklearn.pipeline import Pipeline\n",
"from sklearn.linear_model import LogisticRegression\n", "from sklearn.linear_model import LogisticRegression\n",
"from sklearn.ensemble import RandomForestClassifier\n", "from sklearn.ensemble import RandomForestClassifier\n",
"from sklearn_pandas import DataFrameMapper\n",
"\n", "\n",
"from interpret.ext.blackbox import TabularExplainer\n", "from interpret.ext.blackbox import TabularExplainer\n",
"\n", "\n",
@@ -201,17 +201,23 @@
"# Store the numerical columns in a list numerical\n", "# Store the numerical columns in a list numerical\n",
"numerical = attritionXData.columns.difference(categorical)\n", "numerical = attritionXData.columns.difference(categorical)\n",
"\n", "\n",
"numeric_transformations = [([f], Pipeline(steps=[\n", "# We create the preprocessing pipelines for both numeric and categorical data.\n",
"numeric_transformer = Pipeline(steps=[\n",
" ('imputer', SimpleImputer(strategy='median')),\n", " ('imputer', SimpleImputer(strategy='median')),\n",
" ('scaler', StandardScaler())])) for f in numerical]\n", " ('scaler', StandardScaler())])\n",
"\n", "\n",
"categorical_transformations = [([f], OneHotEncoder(handle_unknown='ignore', sparse=False)) for f in categorical]\n", "categorical_transformer = Pipeline(steps=[\n",
" ('imputer', SimpleImputer(strategy='constant', fill_value='missing')),\n",
" ('onehot', OneHotEncoder(handle_unknown='ignore'))])\n",
"\n", "\n",
"transformations = numeric_transformations + categorical_transformations\n", "transformations = ColumnTransformer(\n",
" transformers=[\n",
" ('num', numeric_transformer, numerical),\n",
" ('cat', categorical_transformer, categorical)])\n",
"\n", "\n",
"# Append classifier to preprocessing pipeline.\n", "# Append classifier to preprocessing pipeline.\n",
"# Now we have a full prediction pipeline.\n", "# Now we have a full prediction pipeline.\n",
"clf = Pipeline(steps=[('preprocessor', DataFrameMapper(transformations)),\n", "clf = Pipeline(steps=[('preprocessor', transformations),\n",
" ('classifier', RandomForestClassifier())])\n", " ('classifier', RandomForestClassifier())])\n",
"\n", "\n",
"# Split data into train and test\n", "# Split data into train and test\n",
@@ -350,7 +356,7 @@
"# the submitted job is run in. Note the remote environment(s) needs to be similar to the local\n", "# the submitted job is run in. Note the remote environment(s) needs to be similar to the local\n",
"# environment, otherwise if a model is trained or deployed in a different environment this can\n", "# environment, otherwise if a model is trained or deployed in a different environment this can\n",
"# cause errors. Please take extra care when specifying your dependencies in a production environment.\n", "# cause errors. Please take extra care when specifying your dependencies in a production environment.\n",
"myenv = CondaDependencies.create(pip_packages=['sklearn-pandas', 'pyyaml', sklearn_dep, pandas_dep] + azureml_pip_packages,\n", "myenv = CondaDependencies.create(pip_packages=['pyyaml', sklearn_dep, pandas_dep] + azureml_pip_packages,\n",
" pin_sdk_version=False)\n", " pin_sdk_version=False)\n",
"\n", "\n",
"with open(\"myenv.yml\",\"w\") as f:\n", "with open(\"myenv.yml\",\"w\") as f:\n",

View File

@@ -0,0 +1,8 @@
name: train-explain-model-locally-and-deploy
dependencies:
- pip:
- azureml-sdk
- azureml-interpret
- interpret-community[visualization]
- matplotlib
- ipywidgets

View File

@@ -294,7 +294,7 @@
"# the submitted job is run in. Note the remote environment(s) needs to be similar to the local\n", "# the submitted job is run in. Note the remote environment(s) needs to be similar to the local\n",
"# environment, otherwise if a model is trained or deployed in a different environment this can\n", "# environment, otherwise if a model is trained or deployed in a different environment this can\n",
"# cause errors. Please take extra care when specifying your dependencies in a production environment.\n", "# cause errors. Please take extra care when specifying your dependencies in a production environment.\n",
"azureml_pip_packages.extend(['sklearn-pandas', 'pyyaml', sklearn_dep, pandas_dep])\n", "azureml_pip_packages.extend(['pyyaml', sklearn_dep, pandas_dep])\n",
"run_config.environment.python.conda_dependencies = CondaDependencies.create(pip_packages=azureml_pip_packages)\n", "run_config.environment.python.conda_dependencies = CondaDependencies.create(pip_packages=azureml_pip_packages)\n",
"# Now submit a run on AmlCompute\n", "# Now submit a run on AmlCompute\n",
"from azureml.core.script_run_config import ScriptRunConfig\n", "from azureml.core.script_run_config import ScriptRunConfig\n",
@@ -458,7 +458,7 @@
"# the submitted job is run in. Note the remote environment(s) needs to be similar to the local\n", "# the submitted job is run in. Note the remote environment(s) needs to be similar to the local\n",
"# environment, otherwise if a model is trained or deployed in a different environment this can\n", "# environment, otherwise if a model is trained or deployed in a different environment this can\n",
"# cause errors. Please take extra care when specifying your dependencies in a production environment.\n", "# cause errors. Please take extra care when specifying your dependencies in a production environment.\n",
"azureml_pip_packages.extend(['sklearn-pandas', 'pyyaml', sklearn_dep, pandas_dep])\n", "azureml_pip_packages.extend(['pyyaml', sklearn_dep, pandas_dep])\n",
"myenv = CondaDependencies.create(pip_packages=azureml_pip_packages)\n", "myenv = CondaDependencies.create(pip_packages=azureml_pip_packages)\n",
"\n", "\n",
"with open(\"myenv.yml\",\"w\") as f:\n", "with open(\"myenv.yml\",\"w\") as f:\n",

View File

@@ -0,0 +1,10 @@
name: train-explain-model-on-amlcompute-and-deploy
dependencies:
- pip:
- azureml-sdk
- azureml-interpret
- interpret-community[visualization]
- matplotlib
- azureml-dataset-runtime
- azureml-core
- ipywidgets

View File

@@ -5,13 +5,13 @@
import os import os
import pandas as pd import pandas as pd
import zipfile import zipfile
from sklearn.model_selection import train_test_split
import joblib import joblib
from sklearn.compose import ColumnTransformer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, OneHotEncoder from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.impute import SimpleImputer from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LogisticRegression
from sklearn_pandas import DataFrameMapper
from azureml.core.run import Run from azureml.core.run import Run
from interpret.ext.blackbox import TabularExplainer from interpret.ext.blackbox import TabularExplainer
@@ -57,16 +57,22 @@ for col, value in attritionXData.iteritems():
# store the numerical columns # store the numerical columns
numerical = attritionXData.columns.difference(categorical) numerical = attritionXData.columns.difference(categorical)
numeric_transformations = [([f], Pipeline(steps=[ # We create the preprocessing pipelines for both numeric and categorical data.
numeric_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='median')), ('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())])) for f in numerical] ('scaler', StandardScaler())])
categorical_transformations = [([f], OneHotEncoder(handle_unknown='ignore', sparse=False)) for f in categorical] categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='constant', fill_value='missing')),
('onehot', OneHotEncoder(handle_unknown='ignore'))])
transformations = numeric_transformations + categorical_transformations transformations = ColumnTransformer(
transformers=[
('num', numeric_transformer, numerical),
('cat', categorical_transformer, categorical)])
# append classifier to preprocessing pipeline # append classifier to preprocessing pipeline
clf = Pipeline(steps=[('preprocessor', DataFrameMapper(transformations)), clf = Pipeline(steps=[('preprocessor', transformations),
('classifier', LogisticRegression(solver='lbfgs'))]) ('classifier', LogisticRegression(solver='lbfgs'))])
# get the run this was submitted from to interact with run history # get the run this was submitted from to interact with run history

View File

@@ -0,0 +1,5 @@
name: aml-pipelines-data-transfer
dependencies:
- pip:
- azureml-sdk
- azureml-widgets

View File

@@ -0,0 +1,5 @@
name: aml-pipelines-getting-started
dependencies:
- pip:
- azureml-sdk
- azureml-widgets

View File

@@ -0,0 +1,5 @@
name: aml-pipelines-how-to-use-modulestep
dependencies:
- pip:
- azureml-sdk
- azureml-widgets

View File

@@ -0,0 +1,5 @@
name: aml-pipelines-how-to-use-pipeline-drafts
dependencies:
- pip:
- azureml-sdk
- azureml-widgets

View File

@@ -121,12 +121,17 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"os.makedirs('./data/mnist', exist_ok=True)\n", "data_folder = os.path.join(os.getcwd(), 'data/mnist')\n",
"os.makedirs(data_folder, exist_ok=True)\n",
"\n", "\n",
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz', filename = './data/mnist/train-images.gz')\n", "urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/train-images-idx3-ubyte.gz',\n",
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz', filename = './data/mnist/train-labels.gz')\n", " filename=os.path.join(data_folder, 'train-images-idx3-ubyte.gz'))\n",
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz', filename = './data/mnist/test-images.gz')\n", "urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/train-labels-idx1-ubyte.gz',\n",
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz', filename = './data/mnist/test-labels.gz')" " filename=os.path.join(data_folder, 'train-labels-idx1-ubyte.gz'))\n",
"urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/t10k-images-idx3-ubyte.gz',\n",
" filename=os.path.join(data_folder, 't10k-images-idx3-ubyte.gz'))\n",
"urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/t10k-labels-idx1-ubyte.gz',\n",
" filename=os.path.join(data_folder, 't10k-labels-idx1-ubyte.gz'))"
] ]
}, },
{ {
@@ -146,11 +151,11 @@
"from utils import load_data\n", "from utils import load_data\n",
"\n", "\n",
"# note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the neural network converge faster.\n", "# note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the neural network converge faster.\n",
"X_train = load_data('./data/mnist/train-images.gz', False) / 255.0\n", "X_train = load_data(os.path.join(data_folder, 'train-images-idx3-ubyte.gz'), False) / np.float32(255.0)\n",
"y_train = load_data('./data/mnist/train-labels.gz', True).reshape(-1)\n", "X_test = load_data(os.path.join(data_folder, 't10k-images-idx3-ubyte.gz'), False) / np.float32(255.0)\n",
"y_train = load_data(os.path.join(data_folder, 'train-labels-idx1-ubyte.gz'), True).reshape(-1)\n",
"y_test = load_data(os.path.join(data_folder, 't10k-labels-idx1-ubyte.gz'), True).reshape(-1)\n",
"\n", "\n",
"X_test = load_data('./data/mnist/test-images.gz', False) / 255.0\n",
"y_test = load_data('./data/mnist/test-labels.gz', True).reshape(-1)\n",
"\n", "\n",
"count = 0\n", "count = 0\n",
"sample_size = 30\n", "sample_size = 30\n",

View File

@@ -0,0 +1,9 @@
name: aml-pipelines-parameter-tuning-with-hyperdrive
dependencies:
- pip:
- azureml-sdk
- azureml-widgets
- matplotlib
- numpy
- pandas_ml
- azureml-dataset-runtime[pandas,fuse]

View File

@@ -0,0 +1,6 @@
name: aml-pipelines-publish-and-run-using-rest-endpoint
dependencies:
- pip:
- azureml-sdk
- azureml-widgets
- requests

View File

@@ -0,0 +1,5 @@
name: aml-pipelines-setup-schedule-for-a-published-pipeline
dependencies:
- pip:
- azureml-sdk
- azureml-widgets

View File

@@ -0,0 +1,6 @@
name: aml-pipelines-setup-versioned-pipeline-endpoints
dependencies:
- pip:
- azureml-sdk
- azureml-widgets
- requests

View File

@@ -0,0 +1,5 @@
name: aml-pipelines-showcasing-datapath-and-pipelineparameter
dependencies:
- pip:
- azureml-sdk
- azureml-widgets

View File

@@ -0,0 +1,5 @@
name: aml-pipelines-showcasing-dataset-and-pipelineparameter
dependencies:
- pip:
- azureml-sdk
- azureml-widgets

View File

@@ -0,0 +1,4 @@
name: aml-pipelines-with-automated-machine-learning-step
dependencies:
- pip:
- azureml-sdk

View File

@@ -0,0 +1,5 @@
name: aml-pipelines-with-commandstep-r
dependencies:
- pip:
- azureml-sdk
- azureml-widgets

View File

@@ -0,0 +1,5 @@
name: aml-pipelines-with-commandstep
dependencies:
- pip:
- azureml-sdk
- azureml-widgets

View File

@@ -0,0 +1,5 @@
name: aml-pipelines-with-data-dependency-steps
dependencies:
- pip:
- azureml-sdk
- azureml-widgets

View File

@@ -0,0 +1,6 @@
name: aml-pipelines-with-notebook-runner-step
dependencies:
- pip:
- azureml-sdk
- azureml-widgets
- azureml-contrib-notebook

View File

@@ -0,0 +1,10 @@
name: nyc-taxi-data-regression-model-building
dependencies:
- pip:
- azureml-sdk
- azureml-widgets
- azureml-opendatasets
- azureml-train-automl
- matplotlib
- pandas
- pyarrow

View File

@@ -0,0 +1,7 @@
name: file-dataset-image-inference-mnist
dependencies:
- pip:
- azureml-sdk
- azureml-pipeline-steps
- azureml-widgets
- pandas

View File

@@ -0,0 +1,7 @@
name: tabular-dataset-inference-iris
dependencies:
- pip:
- azureml-sdk
- azureml-pipeline-steps
- azureml-widgets
- pandas

View File

@@ -0,0 +1,7 @@
name: pipeline-style-transfer-parallel-run
dependencies:
- pip:
- azureml-sdk
- azureml-pipeline-steps
- azureml-widgets
- requests

View File

@@ -0,0 +1,5 @@
name: distributed-chainer
dependencies:
- pip:
- azureml-sdk
- azureml-widgets

View File

@@ -4,6 +4,8 @@ import os
import numpy as np import numpy as np
from utils import download_mnist
import chainer import chainer
from chainer import backend from chainer import backend
from chainer import backends from chainer import backends
@@ -17,6 +19,7 @@ from chainer.training import extensions
from chainer.dataset import concat_examples from chainer.dataset import concat_examples
from chainer.backends.cuda import to_cpu from chainer.backends.cuda import to_cpu
from azureml.core.run import Run from azureml.core.run import Run
run = Run.get_context() run = Run.get_context()
@@ -49,7 +52,7 @@ def main():
args = parser.parse_args() args = parser.parse_args()
# Download the MNIST data if you haven't downloaded it yet # Download the MNIST data if you haven't downloaded it yet
train, test = datasets.mnist.get_mnist(withlabel=True, ndim=1) train, test = download_mnist()
gpu_id = args.gpu_id gpu_id = args.gpu_id
batchsize = args.batchsize batchsize = args.batchsize

View File

@@ -2,6 +2,8 @@ import numpy as np
import os import os
import json import json
from utils import download_mnist
from chainer import serializers, using_config, Variable, datasets from chainer import serializers, using_config, Variable, datasets
import chainer.functions as F import chainer.functions as F
import chainer.links as L import chainer.links as L
@@ -41,7 +43,7 @@ def init():
def run(input_data): def run(input_data):
i = np.array(json.loads(input_data)['data']) i = np.array(json.loads(input_data)['data'])
_, test = datasets.get_mnist() _, test = download_mnist()
x = Variable(np.asarray([test[i][0]])) x = Variable(np.asarray([test[i][0]]))
y = model(x) y = model(x)

View File

@@ -217,7 +217,8 @@
"import shutil\n", "import shutil\n",
"\n", "\n",
"shutil.copy('chainer_mnist.py', project_folder)\n", "shutil.copy('chainer_mnist.py', project_folder)\n",
"shutil.copy('chainer_score.py', project_folder)" "shutil.copy('chainer_score.py', project_folder)\n",
"shutil.copy('utils.py', project_folder)"
] ]
}, },
{ {
@@ -263,6 +264,7 @@
"- python=3.6.2\n", "- python=3.6.2\n",
"- pip:\n", "- pip:\n",
" - azureml-defaults\n", " - azureml-defaults\n",
" - azureml-opendatasets\n",
" - chainer==5.1.0\n", " - chainer==5.1.0\n",
" - cupy-cuda90==5.1.0\n", " - cupy-cuda90==5.1.0\n",
" - mpi4py==3.0.0\n", " - mpi4py==3.0.0\n",
@@ -557,6 +559,7 @@
"cd.add_conda_package('numpy')\n", "cd.add_conda_package('numpy')\n",
"cd.add_pip_package('chainer==5.1.0')\n", "cd.add_pip_package('chainer==5.1.0')\n",
"cd.add_pip_package(\"azureml-defaults\")\n", "cd.add_pip_package(\"azureml-defaults\")\n",
"cd.add_pip_package(\"azureml-opendatasets\")\n",
"cd.save_to_file(base_directory='./', conda_file_path='myenv.yml')\n", "cd.save_to_file(base_directory='./', conda_file_path='myenv.yml')\n",
"\n", "\n",
"print(cd.serialize_to_string())" "print(cd.serialize_to_string())"
@@ -584,7 +587,8 @@
"\n", "\n",
"\n", "\n",
"myenv = Environment.from_conda_specification(name=\"myenv\", file_path=\"myenv.yml\")\n", "myenv = Environment.from_conda_specification(name=\"myenv\", file_path=\"myenv.yml\")\n",
"inference_config = InferenceConfig(entry_script=\"chainer_score.py\", environment=myenv)\n", "inference_config = InferenceConfig(entry_script=\"chainer_score.py\", environment=myenv,\n",
" source_directory=project_folder)\n",
"\n", "\n",
"aciconfig = AciWebservice.deploy_configuration(cpu_cores=1,\n", "aciconfig = AciWebservice.deploy_configuration(cpu_cores=1,\n",
" auth_enabled=True, # this flag generates API keys to secure access\n", " auth_enabled=True, # this flag generates API keys to secure access\n",
@@ -592,10 +596,10 @@
" tags={'name': 'mnist', 'framework': 'Chainer'},\n", " tags={'name': 'mnist', 'framework': 'Chainer'},\n",
" description='Chainer DNN with MNIST')\n", " description='Chainer DNN with MNIST')\n",
"\n", "\n",
"service = Model.deploy(workspace=ws, \n", "service = Model.deploy(workspace=ws,\n",
" name='chainer-mnist-1', \n", " name='chainer-mnist-1',\n",
" models=[model], \n", " models=[model],\n",
" inference_config=inference_config, \n", " inference_config=inference_config,\n",
" deployment_config=aciconfig)\n", " deployment_config=aciconfig)\n",
"service.wait_for_deployment(True)\n", "service.wait_for_deployment(True)\n",
"print(service.state)\n", "print(service.state)\n",
@@ -685,13 +689,16 @@
" res = res.reshape(n_items[0], 1)\n", " res = res.reshape(n_items[0], 1)\n",
" return res\n", " return res\n",
"\n", "\n",
"os.makedirs('./data/mnist', exist_ok=True)\n", "data_folder = os.path.join(os.getcwd(), 'data/mnist')\n",
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz', filename = './data/mnist/test-images.gz')\n", "os.makedirs(data_folder, exist_ok=True)\n",
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz', filename = './data/mnist/test-labels.gz')\n",
"\n", "\n",
"X_test = load_data('./data/mnist/test-images.gz', False)\n", "urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/t10k-images-idx3-ubyte.gz',\n",
"y_test = load_data('./data/mnist/test-labels.gz', True).reshape(-1)\n", " filename=os.path.join(data_folder, 't10k-images-idx3-ubyte.gz'))\n",
"urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/t10k-labels-idx1-ubyte.gz',\n",
" filename=os.path.join(data_folder, 't10k-labels-idx1-ubyte.gz'))\n",
"\n", "\n",
"X_test = load_data(os.path.join(data_folder, 't10k-images-idx3-ubyte.gz'), False) / np.float32(255.0)\n",
"y_test = load_data(os.path.join(data_folder, 't10k-labels-idx1-ubyte.gz'), True).reshape(-1)\n",
"\n", "\n",
"# send a random row from the test set to score\n", "# send a random row from the test set to score\n",
"random_index = np.random.randint(0, len(X_test)-1)\n", "random_index = np.random.randint(0, len(X_test)-1)\n",

View File

@@ -0,0 +1,13 @@
name: train-hyperparameter-tune-deploy-with-chainer
dependencies:
- pip:
- azureml-sdk
- azureml-widgets
- numpy
- matplotlib
- json
- urllib
- gzip
- struct
- requests
- azureml-opendatasets

View File

@@ -0,0 +1,50 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import glob
import gzip
import numpy as np
import os
import struct
from azureml.core import Dataset
from azureml.opendatasets import MNIST
from chainer.datasets import tuple_dataset
# load compressed MNIST gz files and return numpy arrays
def load_data(filename, label=False):
with gzip.open(filename) as gz:
struct.unpack('I', gz.read(4))
n_items = struct.unpack('>I', gz.read(4))
if not label:
n_rows = struct.unpack('>I', gz.read(4))[0]
n_cols = struct.unpack('>I', gz.read(4))[0]
res = np.frombuffer(gz.read(n_items[0] * n_rows * n_cols), dtype=np.uint8)
res = res.reshape(n_items[0], n_rows * n_cols)
else:
res = np.frombuffer(gz.read(n_items[0]), dtype=np.uint8)
res = res.reshape(n_items[0], 1)
return res
def download_mnist():
data_folder = os.path.join(os.getcwd(), 'data/mnist')
os.makedirs(data_folder, exist_ok=True)
mnist_file_dataset = MNIST.get_file_dataset()
mnist_file_dataset.download(data_folder, overwrite=True)
X_train = load_data(glob.glob(os.path.join(data_folder, "**/train-images-idx3-ubyte.gz"),
recursive=True)[0], False) / 255.0
X_test = load_data(glob.glob(os.path.join(data_folder, "**/t10k-images-idx3-ubyte.gz"),
recursive=True)[0], False) / 255.0
y_train = load_data(glob.glob(os.path.join(data_folder, "**/train-labels-idx1-ubyte.gz"),
recursive=True)[0], True).reshape(-1)
y_test = load_data(glob.glob(os.path.join(data_folder, "**/t10k-labels-idx1-ubyte.gz"),
recursive=True)[0], True).reshape(-1)
train = tuple_dataset.TupleDataset(X_train.astype(np.float32), y_train.astype(np.int32))
test = tuple_dataset.TupleDataset(X_test.astype(np.float32), y_test.astype(np.int32))
return train, test

View File

@@ -0,0 +1,5 @@
name: fastai-with-custom-docker
dependencies:
- pip:
- azureml-sdk
- fastai==1.0.61

View File

@@ -0,0 +1,8 @@
name: train-hyperparameter-tune-deploy-with-keras
dependencies:
- pip:
- azureml-sdk
- azureml-widgets
- tensorflow
- keras<=2.3.1
- matplotlib

View File

@@ -0,0 +1,5 @@
name: distributed-pytorch-with-distributeddataparallel
dependencies:
- pip:
- azureml-sdk
- azureml-widgets

View File

@@ -0,0 +1,5 @@
name: distributed-pytorch-with-horovod
dependencies:
- pip:
- azureml-sdk
- azureml-widgets

View File

@@ -0,0 +1,10 @@
name: train-hyperparameter-tune-deploy-with-pytorch
dependencies:
- pip:
- azureml-sdk
- azureml-widgets
- pillow==5.4.1
- matplotlib
- numpy==1.19.3
- https://download.pytorch.org/whl/cpu/torch-1.6.0%2Bcpu-cp36-cp36m-win_amd64.whl
- https://download.pytorch.org/whl/cpu/torchvision-0.7.0%2Bcpu-cp36-cp36m-win_amd64.whl

View File

@@ -0,0 +1,6 @@
name: train-hyperparameter-tune-deploy-with-sklearn
dependencies:
- pip:
- azureml-sdk
- azureml-widgets
- numpy

View File

@@ -0,0 +1,11 @@
name: distributed-tensorflow-with-horovod
dependencies:
- pip:
- azureml-sdk
- azureml-widgets
- keras
- tensorflow-gpu==1.13.2
- horovod==0.19.1
- matplotlib
- pandas
- fuse

View File

@@ -0,0 +1,5 @@
name: distributed-tensorflow-with-parameter-server
dependencies:
- pip:
- azureml-sdk
- azureml-widgets

View File

@@ -0,0 +1,12 @@
name: train-hyperparameter-tune-deploy-with-tensorflow
dependencies:
- numpy
- matplotlib
- pip:
- azureml-sdk
- azureml-widgets
- pandas
- keras
- tensorflow==2.0.0
- matplotlib
- fuse

View File

@@ -0,0 +1,8 @@
name: pong_rllib
dependencies:
- pip:
- azureml-sdk
- azureml-contrib-reinforcementlearning
- azureml-widgets
- matplotlib
- azure-mgmt-network==12.0.0

View File

@@ -0,0 +1,6 @@
name: cartpole_ci
dependencies:
- pip:
- azureml-sdk
- azureml-contrib-reinforcementlearning
- azureml-widgets

View File

@@ -0,0 +1,6 @@
name: cartpole_sc
dependencies:
- pip:
- azureml-sdk
- azureml-contrib-reinforcementlearning
- azureml-widgets

View File

@@ -1,70 +0,0 @@
FROM mcr.microsoft.com/azureml/base:openmpi3.1.2-ubuntu18.04
# Install some basic utilities
RUN apt-get update && apt-get install -y \
curl \
ca-certificates \
sudo \
cpio \
git \
bzip2 \
libx11-6 \
tmux \
htop \
gcc \
xvfb \
python-opengl \
x11-xserver-utils \
ffmpeg \
mesa-utils \
nano \
vim \
rsync \
&& rm -rf /var/lib/apt/lists/*
# Create a working directory
RUN mkdir /app
WORKDIR /app
# Install Minecraft needed libraries
RUN mkdir -p /usr/share/man/man1 && \
sudo apt-get update && \
sudo apt-get install -y \
openjdk-8-jre-headless=8u162-b12-1 \
openjdk-8-jdk-headless=8u162-b12-1 \
openjdk-8-jre=8u162-b12-1 \
openjdk-8-jdk=8u162-b12-1
# Create a Python 3.7 environment
RUN conda install conda-build \
&& conda create -y --name py37 python=3.7.3 \
&& conda clean -ya
ENV CONDA_DEFAULT_ENV=py37
# Install minerl
RUN pip install --upgrade --user minerl
RUN pip install \
pandas \
matplotlib \
numpy \
scipy \
azureml-defaults \
tensorboardX \
tensorflow==1.15rc2 \
tabulate \
dm_tree \
lz4 \
ray==0.8.3 \
ray[rllib]==0.8.3 \
ray[tune]==0.8.3
COPY patch_files/* /root/.local/lib/python3.7/site-packages/minerl/env/Malmo/Minecraft/src/main/java/com/microsoft/Malmo/Client/
# Start minerl to pre-fetch minerl files (saves time when starting minerl during training)
RUN xvfb-run -a -s "-screen 0 1400x900x24" python -c "import gym; import minerl; env = gym.make('MineRLTreechop-v0'); env.close();"
RUN pip install --index-url https://test.pypi.org/simple/ malmo && \
python -c "import malmo.minecraftbootstrap; malmo.minecraftbootstrap.download();"
ENV MALMO_XSD_PATH="/app/MalmoPlatform/Schemas"

View File

@@ -1,939 +0,0 @@
// --------------------------------------------------------------------------------------------------
// Copyright (c) 2016 Microsoft Corporation
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
// associated documentation files (the "Software"), to deal in the Software without restriction,
// including without limitation the rights to use, copy, modify, merge, publish, distribute,
// sublicense, and/or l copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or
// substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
// NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
// --------------------------------------------------------------------------------------------------
package com.microsoft.Malmo.Client;
import com.microsoft.Malmo.MalmoMod;
import com.microsoft.Malmo.MissionHandlerInterfaces.IWantToQuit;
import com.microsoft.Malmo.Schemas.MissionInit;
import com.microsoft.Malmo.Utils.TCPUtils;
import net.minecraft.profiler.Profiler;
import com.microsoft.Malmo.Utils.TimeHelper;
import net.minecraftforge.common.config.Configuration;
import java.io.*;
import java.net.ServerSocket;
import java.net.Socket;
import java.nio.charset.Charset;
import java.util.Arrays;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.Hashtable;
import com.microsoft.Malmo.Utils.TCPInputPoller;
import java.util.logging.Level;
import java.util.LinkedList;
import java.util.List;
/**
* MalmoEnvServer - service supporting OpenAI gym "environment" for multi-agent Malmo missions.
*/
public class MalmoEnvServer implements IWantToQuit {
private static Profiler profiler = new Profiler();
private static int nsteps = 0;
private static boolean debug = false;
private static String hello = "<MalmoEnv" ;
private class EnvState {
// Mission parameters:
String missionInit = null;
String token = null;
String experimentId = null;
int agentCount = 0;
int reset = 0;
boolean quit = false;
boolean synchronous = false;
Long seed = null;
// OpenAI gym state:
boolean done = false;
double reward = 0.0;
byte[] obs = null;
String info = "";
LinkedList<String> commands = new LinkedList<String>();
}
private static boolean envPolicy = false; // Are we configured by config policy?
// Synchronize on EnvStateasd
private Lock lock = new ReentrantLock();
private Condition cond = lock.newCondition();
private EnvState envState = new EnvState();
private Hashtable<String, Integer> initTokens = new Hashtable<String, Integer>();
static final long COND_WAIT_SECONDS = 3; // Max wait in seconds before timing out (and replying to RPC).
static final int BYTES_INT = 4;
static final int BYTES_DOUBLE = 8;
private static final Charset utf8 = Charset.forName("UTF-8");
// Service uses a single per-environment client connection - initiated by the remote environment.
private int port;
private TCPInputPoller missionPoller; // Used for command parsing and not actual communication.
private String version;
// AOG: From running experiments, I've found that MineRL can get stuck resetting the
// environment which causes huge delays while we wait for the Python side to time
// out and restart the Minecraft instace. Minecraft itself is normally in a recoverable
// state, but the MalmoEnvServer instance will be blocked in a tight spin loop trying
// handling a Peek request from the Python client. To unstick things, I've added this
// flag that can be set when we know things are in a bad state to abort the peek request.
// WARNING: THIS IS ONLY TREATING THE SYMPTOM AND NOT THE ROOT CAUSE
// The reason things are getting stuck is because the player is either dying or we're
// receiving a quit request while an episode reset is in progress.
private boolean abortRequest;
public void abort() {
System.out.println("AOG: MalmoEnvServer.abort");
abortRequest = true;
}
/***
* Malmo "Env" service.
* @param port the port the service listens on.
* @param missionPoller for plugging into existing comms handling.
*/
public MalmoEnvServer(String version, int port, TCPInputPoller missionPoller) {
this.version = version;
this.missionPoller = missionPoller;
this.port = port;
// AOG - Assume we don't wan't to be aborting in the first place
this.abortRequest = false;
}
/** Initialize malmo env configuration. For now either on or "legacy" AgentHost protocol.*/
static public void update(Configuration configs) {
envPolicy = configs.get(MalmoMod.ENV_CONFIGS, "env", "false").getBoolean();
}
public static boolean isEnv() {
return envPolicy;
}
/**
* Start servicing the MalmoEnv protocol.
* @throws IOException
*/
public void serve() throws IOException {
ServerSocket serverSocket = new ServerSocket(port);
serverSocket.setPerformancePreferences(0,2,1);
while (true) {
try {
final Socket socket = serverSocket.accept();
socket.setTcpNoDelay(true);
Thread thread = new Thread("EnvServerSocketHandler") {
public void run() {
boolean running = false;
try {
checkHello(socket);
while (true) {
DataInputStream din = new DataInputStream(socket.getInputStream());
int hdr = din.readInt();
byte[] data = new byte[hdr];
din.readFully(data);
String command = new String(data, utf8);
if (command.startsWith("<Step")) {
profiler.startSection("root");
long start = System.nanoTime();
step(command, socket, din);
profiler.endSection();
if (nsteps % 100 == 0 && debug){
List<Profiler.Result> dat = profiler.getProfilingData("root");
for(int qq = 0; qq < dat.size(); qq++){
Profiler.Result res = dat.get(qq);
System.out.println(res.profilerName + " " + res.totalUsePercentage + " "+ res.usePercentage);
}
}
} else if (command.startsWith("<Peek")) {
peek(command, socket, din);
} else if (command.startsWith("<Init")) {
init(command, socket);
} else if (command.startsWith("<Find")) {
find(command, socket);
} else if (command.startsWith("<MissionInit")) {
if (missionInit(din, command, socket))
{
running = true;
}
} else if (command.startsWith("<Quit")) {
quit(command, socket);
profiler.profilingEnabled = false;
} else if (command.startsWith("<Exit")) {
exit(command, socket);
profiler.profilingEnabled = false;
} else if (command.startsWith("<Close")) {
close(command, socket);
profiler.profilingEnabled = false;
} else if (command.startsWith("<Status")) {
status(command, socket);
} else if (command.startsWith("<Echo")) {
command = "<Echo>" + command + "</Echo>";
data = command.getBytes(utf8);
hdr = data.length;
DataOutputStream dout = new DataOutputStream(socket.getOutputStream());
dout.writeInt(hdr);
dout.write(data, 0, hdr);
dout.flush();
} else {
throw new IOException("Unknown env service command");
}
}
} catch (IOException ioe) {
// ioe.printStackTrace();
TCPUtils.Log(Level.SEVERE, "MalmoEnv socket error: " + ioe + " (can be on disconnect)");
// System.out.println("[ERROR] " + "MalmoEnv socket error: " + ioe + " (can be on disconnect)");
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] MalmoEnv socket error");
try {
if (running) {
TCPUtils.Log(Level.INFO,"Want to quit on disconnect.");
System.out.println("[LOGTOPY] " + "Want to quit on disconnect.");
setWantToQuit();
}
socket.close();
} catch (IOException ioe2) {
}
}
}
};
thread.start();
} catch (IOException ioe) {
TCPUtils.Log(Level.SEVERE, "MalmoEnv service exits on " + ioe);
}
}
}
private void checkHello(Socket socket) throws IOException {
DataInputStream din = new DataInputStream(socket.getInputStream());
int hdr = din.readInt();
if (hdr <= 0 || hdr > hello.length() + 8) // Version number may be somewhat longer in future.
throw new IOException("Invalid MalmoEnv hello header length");
byte[] data = new byte[hdr];
din.readFully(data);
if (!new String(data).startsWith(hello + version))
throw new IOException("MalmoEnv invalid protocol or version - expected " + hello + version);
}
// Handler for <MissionInit> messages.
private boolean missionInit(DataInputStream din, String command, Socket socket) throws IOException {
String ipOriginator = socket.getInetAddress().getHostName();
int hdr;
byte[] data;
hdr = din.readInt();
data = new byte[hdr];
din.readFully(data);
String id = new String(data, utf8);
TCPUtils.Log(Level.INFO,"Mission Init" + id);
String[] token = id.split(":");
String experimentId = token[0];
int role = Integer.parseInt(token[1]);
int reset = Integer.parseInt(token[2]);
int agentCount = Integer.parseInt(token[3]);
Boolean isSynchronous = Boolean.parseBoolean(token[4]);
Long seed = null;
if(token.length > 5)
seed = Long.parseLong(token[5]);
if(isSynchronous && agentCount > 1){
throw new IOException("Synchronous mode currently does not support multiple agents.");
}
port = -1;
boolean allTokensConsumed = true;
boolean started = false;
lock.lock();
try {
if (role == 0) {
String previousToken = experimentId + ":0:" + (reset - 1);
initTokens.remove(previousToken);
String myToken = experimentId + ":0:" + reset;
if (!initTokens.containsKey(myToken)) {
TCPUtils.Log(Level.INFO,"(Pre)Start " + role + " reset " + reset);
started = startUp(command, ipOriginator, experimentId, reset, agentCount, myToken, seed, isSynchronous);
if (started)
initTokens.put(myToken, 0);
} else {
started = true; // Pre-started previously.
}
// Check that all previous tokens have been consumed. If not don't proceed to mission.
allTokensConsumed = areAllTokensConsumed(experimentId, reset, agentCount);
if (!allTokensConsumed) {
try {
cond.await(COND_WAIT_SECONDS, TimeUnit.SECONDS);
} catch (InterruptedException ie) {
}
allTokensConsumed = areAllTokensConsumed(experimentId, reset, agentCount);
}
} else {
TCPUtils.Log(Level.INFO, "Start " + role + " reset " + reset);
started = startUp(command, ipOriginator, experimentId, reset, agentCount, experimentId + ":" + role + ":" + reset, seed, isSynchronous);
}
} finally {
lock.unlock();
}
DataOutputStream dout = new DataOutputStream(socket.getOutputStream());
dout.writeInt(BYTES_INT);
dout.writeInt(allTokensConsumed && started ? 1 : 0);
dout.flush();
dout.flush();
return allTokensConsumed && started;
}
private boolean areAllTokensConsumed(String experimentId, int reset, int agentCount) {
boolean allTokensConsumed = true;
for (int i = 1; i < agentCount; i++) {
String tokenForAgent = experimentId + ":" + i + ":" + (reset - 1);
if (initTokens.containsKey(tokenForAgent)) {
TCPUtils.Log(Level.FINE,"Mission init - unconsumed " + tokenForAgent);
allTokensConsumed = false;
}
}
return allTokensConsumed;
}
private boolean startUp(String command, String ipOriginator, String experimentId, int reset, int agentCount, String myToken, Long seed, Boolean isSynchronous) throws IOException {
// Clear out mission state
envState.reward = 0.0;
envState.commands.clear();
envState.obs = null;
envState.info = "";
envState.missionInit = command;
envState.done = false;
envState.quit = false;
envState.token = myToken;
envState.experimentId = experimentId;
envState.agentCount = agentCount;
envState.reset = reset;
envState.synchronous = isSynchronous;
envState.seed = seed;
return startUpMission(command, ipOriginator);
}
private boolean startUpMission(String command, String ipOriginator) throws IOException {
if (missionPoller == null)
return false;
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(baos);
missionPoller.commandReceived(command, ipOriginator, dos);
dos.flush();
byte[] reply = baos.toByteArray();
ByteArrayInputStream bais = new ByteArrayInputStream(reply);
DataInputStream dis = new DataInputStream(bais);
int hdr = dis.readInt();
byte[] replyBytes = new byte[hdr];
dis.readFully(replyBytes);
String replyStr = new String(replyBytes);
if (replyStr.equals("MALMOOK")) {
TCPUtils.Log(Level.INFO, "MalmoEnvServer Mission starting ...");
return true;
} else if (replyStr.equals("MALMOBUSY")) {
TCPUtils.Log(Level.INFO, "MalmoEnvServer Busy - I want to quit");
this.envState.quit = true;
}
return false;
}
private static final int stepTagLength = "<Step_>".length(); // Step with option code.
private synchronized void stepSync(String command, Socket socket, DataInputStream din) throws IOException
{
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <STEP> Entering synchronous step.");
nsteps += 1;
profiler.startSection("commandProcessing");
String actions = command.substring(stepTagLength, command.length() - (stepTagLength + 2));
int options = Character.getNumericValue(command.charAt(stepTagLength - 2));
boolean withInfo = options == 0 || options == 2;
// Prepare to write data to the client.
DataOutputStream dout = new DataOutputStream(socket.getOutputStream());
double reward = 0.0;
boolean done;
byte[] obs;
String info = "";
boolean sent = false;
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <STEP> Acquiring lock for synchronous step.");
lock.lock();
try {
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <STEP> Lock is acquired.");
done = envState.done;
// TODO Handle when the environment is done.
// Process the actions.
if (actions.contains("\n")) {
String[] cmds = actions.split("\\n");
for(String cmd : cmds) {
envState.commands.add(cmd);
}
} else {
if (!actions.isEmpty())
envState.commands.add(actions);
}
sent = true;
profiler.endSection(); //cmd
profiler.startSection("requestTick");
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <STEP> Received: " + actions);
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <STEP> Requesting tick.");
// Now wait to run a tick
// If synchronous mode is off then we should see if want to quit is true.
while(!TimeHelper.SyncManager.requestTick() && !done ){Thread.yield();}
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <STEP> Tick request granted.");
profiler.endSection();
profiler.startSection("waitForTick");
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <STEP> Waiting for tick.");
// Then wait until the tick is finished
while(!TimeHelper.SyncManager.isTickCompleted() && !done ){ Thread.yield();}
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <STEP> TICK DONE. Getting observation.");
profiler.endSection();
profiler.startSection("getObservation");
// After which, get the observations.
obs = getObservation(done);
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <STEP> Observation received. Getting info.");
profiler.endSection();
profiler.startSection("getInfo");
// Pick up rewards.
reward = envState.reward;
if (withInfo) {
info = envState.info;
// if(info == null)
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <STEP> FILLING INFO: NULL");
// else
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <STEP> FILLING " + info.toString());
}
done = envState.done;
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <STEP> STATUS " + Boolean.toString(done));
envState.info = null;
envState.obs = null;
envState.reward = 0.0;
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <STEP> Info received..");
profiler.endSection();
} finally {
lock.unlock();
}
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <STEP> Lock released. Writing observation, info, done.");
profiler.startSection("writeObs");
dout.writeInt(obs.length);
dout.write(obs);
dout.writeInt(BYTES_DOUBLE + 2);
dout.writeDouble(reward);
dout.writeByte(done ? 1 : 0);
dout.writeByte(sent ? 1 : 0);
if (withInfo) {
byte[] infoBytes = info.getBytes(utf8);
dout.writeInt(infoBytes.length);
dout.write(infoBytes);
}
profiler.endSection(); //write obs
profiler.startSection("flush");
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <STEP> Packets written. Flushing.");
dout.flush();
profiler.endSection(); // flush
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <STEP> Done with step.");
}
// Handler for <Step_> messages. Single digit option code after _ specifies if turnkey and info are included in message.
private void step(String command, Socket socket, DataInputStream din) throws IOException {
if(envState.synchronous){
stepSync(command, socket, din);
}
else{
System.out.println("[ERROR] Asynchronous stepping is not supported in MineRL.");
}
}
// Handler for <Peek> messages.
private void peek(String command, Socket socket, DataInputStream din) throws IOException {
DataOutputStream dout = new DataOutputStream(socket.getOutputStream());
byte[] obs;
boolean done;
String info = "";
// AOG - As we've only seen issues with the peek reqest, I've focused my changes to just
// this function. Initially we want to be optimistic and assume we're not going to abort
// the request and my observations of event timings indicate that there is plenty of time
// between the peek request being received and the reset failing, so a race condition is
// unlikely.
abortRequest = false;
lock.lock();
try {
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <PEEK> Waiting for pistol to fire.");
while(!TimeHelper.SyncManager.hasServerFiredPistol() && !abortRequest){
// Now wait to run a tick
while(!TimeHelper.SyncManager.requestTick() && !abortRequest){Thread.yield();}
// Then wait until the tick is finished
while(!TimeHelper.SyncManager.isTickCompleted() && !abortRequest){ Thread.yield();}
Thread.yield();
}
if (abortRequest) {
System.out.println("AOG: Aborting peek request");
// AOG - We detect the lack of observation within our Python wrapper and throw a slightly
// diferent exception that by-passes MineRLs automatic clean up code. If we were to report
// 'done', the MineRL detects this as a runtime error and kills the Minecraft process
// triggering a lengthy restart. So far from testing, Minecraft itself is fine can we can
// retry the reset, it's only the tight loops above that were causing things to stall and
// timeout.
// No observation
dout.writeInt(0);
// No info
dout.writeInt(0);
// Done
dout.writeInt(1);
dout.writeByte(0);
dout.flush();
return;
}
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <PEEK> Pistol fired!.");
// Wait two ticks for the first observation from server to be propagated.
while(!TimeHelper.SyncManager.requestTick() ){Thread.yield();}
// Then wait until the tick is finished
while(!TimeHelper.SyncManager.isTickCompleted()){ Thread.yield();}
while(!TimeHelper.SyncManager.requestTick() ){Thread.yield();}
// Then wait until the tick is finished
while(!TimeHelper.SyncManager.isTickCompleted()){ Thread.yield();}
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <PEEK> Getting observation.");
obs = getObservation(false);
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <PEEK> Observation acquired.");
done = envState.done;
info = envState.info;
} finally {
lock.unlock();
}
dout.writeInt(obs.length);
dout.write(obs);
byte[] infoBytes = info.getBytes(utf8);
dout.writeInt(infoBytes.length);
dout.write(infoBytes);
dout.writeInt(1);
dout.writeByte(done ? 1 : 0);
dout.flush();
}
// Get the current observation. If none and not done wait for a short time.
public byte[] getObservation(boolean done) {
byte[] obs = envState.obs;
if (obs == null){
System.out.println("[ERROR] Video observation is null; please notify the developer.");
}
return obs;
}
// Handler for <Find> messages - used by non-zero roles to discover integrated server port from primary (role 0) service.
private final static int findTagLength = "<Find>".length();
private void find(String command, Socket socket) throws IOException {
Integer port;
lock.lock();
try {
String token = command.substring(findTagLength, command.length() - (findTagLength + 1));
TCPUtils.Log(Level.INFO, "Find token? " + token);
// Purge previous token.
String[] tokenSplits = token.split(":");
String experimentId = tokenSplits[0];
int role = Integer.parseInt(tokenSplits[1]);
int reset = Integer.parseInt(tokenSplits[2]);
String previousToken = experimentId + ":" + role + ":" + (reset - 1);
initTokens.remove(previousToken);
cond.signalAll();
// Check for next token. Wait for a short time if not already produced.
port = initTokens.get(token);
if (port == null) {
try {
cond.await(COND_WAIT_SECONDS, TimeUnit.SECONDS);
} catch (InterruptedException ie) {
}
port = initTokens.get(token);
if (port == null) {
port = 0;
TCPUtils.Log(Level.INFO,"Role " + role + " reset " + reset + " waiting for token.");
}
}
} finally {
lock.unlock();
}
DataOutputStream dout = new DataOutputStream(socket.getOutputStream());
dout.writeInt(BYTES_INT);
dout.writeInt(port);
dout.flush();
}
public boolean isSynchronous(){
return envState.synchronous;
}
// Handler for <Init> messages. These reset the service so use with care!
private void init(String command, Socket socket) throws IOException {
lock.lock();
try {
initTokens = new Hashtable<String, Integer>();
DataOutputStream dout = new DataOutputStream(socket.getOutputStream());
dout.writeInt(BYTES_INT);
dout.writeInt(1);
dout.flush();
} finally {
lock.unlock();
}
}
// Handler for <Quit> (quit mission) messages.
private void quit(String command, Socket socket) throws IOException {
lock.lock();
try {
if (!envState.done){
envState.quit = true;
}
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <PEEK> Pistol fired!.");
// Wait two ticks for the first observation from server to be propagated.
while(!TimeHelper.SyncManager.requestTick() ){Thread.yield();}
// Then wait until the tick is finished
while(!TimeHelper.SyncManager.isTickCompleted()){ Thread.yield();}
DataOutputStream dout = new DataOutputStream(socket.getOutputStream());
dout.writeInt(BYTES_INT);
dout.writeInt(envState.done ? 1 : 0);
dout.flush();
} finally {
lock.unlock();
}
}
private final static int closeTagLength = "<Close>".length();
// Handler for <Close> messages.
private void close(String command, Socket socket) throws IOException {
lock.lock();
try {
String token = command.substring(closeTagLength, command.length() - (closeTagLength + 1));
initTokens.remove(token);
DataOutputStream dout = new DataOutputStream(socket.getOutputStream());
dout.writeInt(BYTES_INT);
dout.writeInt(1);
dout.flush();
} finally {
lock.unlock();
}
}
// Handler for <Status> messages.
private void status(String command, Socket socket) throws IOException {
lock.lock();
try {
String status = "{}"; // TODO Possibly have something more interesting to report.
DataOutputStream dout = new DataOutputStream(socket.getOutputStream());
byte[] statusBytes = status.getBytes(utf8);
dout.writeInt(statusBytes.length);
dout.write(statusBytes);
dout.flush();
} finally {
lock.unlock();
}
}
// Handler for <Exit> messages. These "kill the service" temporarily so use with care!f
private void exit(String command, Socket socket) throws IOException {
// lock.lock();
try {
// We may exit before we get a chance to reply.
TimeHelper.SyncManager.setSynchronous(false);
DataOutputStream dout = new DataOutputStream(socket.getOutputStream());
dout.writeInt(BYTES_INT);
dout.writeInt(1);
dout.flush();
ClientStateMachine.exitJava();
} finally {
// lock.unlock();
}
}
// Malmo client state machine interface methods:
public String getCommand() {
try {
String command = envState.commands.poll();
if (command == null)
return "";
else
return command;
} finally {
}
}
public void endMission() {
// lock.lock();
try {
// AOG - If the mission is ending, we always want to abort requests and they won't
// be able to progress to completion and will stall.
System.out.println("AOG: MalmoEnvServer.endMission");
abort();
envState.done = true;
envState.quit = false;
envState.missionInit = null;
if (envState.token != null) {
initTokens.remove(envState.token);
envState.token = null;
envState.experimentId = null;
envState.agentCount = 0;
envState.reset = 0;
// cond.signalAll();
}
// lock.unlock();
} finally {
}
}
// Record a Malmo "observation" json - as the env info since an environment "obs" is a video frame.
public void observation(String info) {
// Parsing obs as JSON would be slower but less fragile than extracting the turn_key using string search.
// lock.lock();
try {
// TimeHelper.SyncManager.debugLog("[MALMO_ENV_SERVER] <OBSERVATION> Inserting: " + info);
envState.info = info;
// cond.signalAll();
} finally {
// lock.unlock();
}
}
public void addRewards(double rewards) {
// lock.lock();
try {
envState.reward += rewards;
} finally {
// lock.unlock();
}
}
public void addFrame(byte[] frame) {
// lock.lock();
try {
envState.obs = frame; // Replaces current.
// cond.signalAll();
} finally {
// lock.unlock();
}
}
public void notifyIntegrationServerStarted(int integrationServerPort) {
lock.lock();
try {
if (envState.token != null) {
TCPUtils.Log(Level.INFO,"Integration server start up - token: " + envState.token);
addTokens(integrationServerPort, envState.token, envState.experimentId, envState.agentCount, envState.reset);
cond.signalAll();
} else {
TCPUtils.Log(Level.WARNING,"No mission token on integration server start up!");
}
} finally {
lock.unlock();
}
}
private void addTokens(int integratedServerPort, String myToken, String experimentId, int agentCount, int reset) {
initTokens.put(myToken, integratedServerPort);
// Place tokens for other agents to find.
for (int i = 1; i < agentCount; i++) {
String tokenForAgent = experimentId + ":" + i + ":" + reset;
initTokens.put(tokenForAgent, integratedServerPort);
}
}
// IWantToQuit implementation.
@Override
public boolean doIWantToQuit(MissionInit missionInit) {
// lock.lock();
try {
return envState.quit;
} finally {
// lock.unlock();
}
}
public Long getSeed(){
return envState.seed;
}
private void setWantToQuit() {
// lock.lock();
try {
envState.quit = true;
} finally {
if(TimeHelper.SyncManager.isSynchronous()){
// We want to dsynchronize everything.
TimeHelper.SyncManager.setSynchronous(false);
}
// lock.unlock();
}
}
@Override
public void prepare(MissionInit missionInit) {
}
@Override
public void cleanup() {
}
@Override
public String getOutcome() {
return "Env quit";
}
}

View File

@@ -1,78 +0,0 @@
FROM mcr.microsoft.com/azureml/base-gpu:openmpi3.1.2-cuda10.0-cudnn7-ubuntu18.04
# Install some basic utilities
RUN apt-get update && apt-get install -y \
curl \
ca-certificates \
sudo \
cpio \
git \
bzip2 \
libx11-6 \
tmux \
htop \
gcc \
xvfb \
python-opengl \
x11-xserver-utils \
ffmpeg \
mesa-utils \
nano \
vim \
rsync \
&& rm -rf /var/lib/apt/lists/*
# Create a working directory
RUN mkdir /app
WORKDIR /app
# Create a Python 3.7 environment
RUN conda install conda-build \
&& conda create -y --name py37 python=3.7.3 \
&& conda clean -ya
ENV CONDA_DEFAULT_ENV=py37
# Install Minecraft needed libraries
RUN mkdir -p /usr/share/man/man1 && \
sudo apt-get update && \
sudo apt-get install -y \
openjdk-8-jre-headless=8u162-b12-1 \
openjdk-8-jdk-headless=8u162-b12-1 \
openjdk-8-jre=8u162-b12-1 \
openjdk-8-jdk=8u162-b12-1
RUN pip install --upgrade --user minerl
# PyTorch with CUDA 10 installation
RUN conda install -y -c pytorch \
cuda100=1.0 \
magma-cuda100=2.4.0 \
"pytorch=1.1.0=py3.7_cuda10.0.130_cudnn7.5.1_0" \
torchvision=0.3.0 \
&& conda clean -ya
RUN pip install \
pandas \
matplotlib \
numpy \
scipy \
azureml-defaults \
tensorboardX \
tensorflow-gpu==1.15rc2 \
GPUtil \
tabulate \
dm_tree \
lz4 \
ray==0.8.3 \
ray[rllib]==0.8.3 \
ray[tune]==0.8.3
COPY patch_files/* /root/.local/lib/python3.7/site-packages/minerl/env/Malmo/Minecraft/src/main/java/com/microsoft/Malmo/Client/
# Start minerl to pre-fetch minerl files (saves time when starting minerl during training)
RUN xvfb-run -a -s "-screen 0 1400x900x24" python -c "import gym; import minerl; env = gym.make('MineRLTreechop-v0'); env.close();"
RUN pip install --index-url https://test.pypi.org/simple/ malmo && \
python -c "import malmo.minecraftbootstrap; malmo.minecraftbootstrap.download();"
ENV MALMO_XSD_PATH="/app/MalmoPlatform/Schemas"

Some files were not shown because too many files have changed in this diff Show More