diff --git a/configuration.ipynb b/configuration.ipynb index 08e8e159..45dc345b 100644 --- a/configuration.ipynb +++ b/configuration.ipynb @@ -103,7 +103,7 @@ "source": [ "import azureml.core\n", "\n", - "print(\"This notebook was created using version 1.14.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.15.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, diff --git a/contrib/fairness/fairlearn-azureml-mitigation.yml b/contrib/fairness/fairlearn-azureml-mitigation.yml new file mode 100644 index 00000000..0e23d674 --- /dev/null +++ b/contrib/fairness/fairlearn-azureml-mitigation.yml @@ -0,0 +1,8 @@ +name: fairlearn-azureml-mitigation +dependencies: +- pip: + - azureml-sdk + - azureml-contrib-fairness + - fairlearn==0.4.6 + - joblib + - shap diff --git a/contrib/fairness/upload-fairness-dashboard.yml b/contrib/fairness/upload-fairness-dashboard.yml new file mode 100644 index 00000000..8317e795 --- /dev/null +++ b/contrib/fairness/upload-fairness-dashboard.yml @@ -0,0 +1,8 @@ +name: upload-fairness-dashboard +dependencies: +- pip: + - azureml-sdk + - azureml-contrib-fairness + - fairlearn==0.4.6 + - joblib + - shap diff --git a/how-to-use-azureml/automated-machine-learning/automl_env.yml b/how-to-use-azureml/automated-machine-learning/automl_env.yml index d5d525c5..b336c0eb 100644 --- a/how-to-use-azureml/automated-machine-learning/automl_env.yml +++ b/how-to-use-azureml/automated-machine-learning/automl_env.yml @@ -24,5 +24,5 @@ dependencies: - pytorch-transformers==1.0.0 - spacy==2.1.8 - https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz - - -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.14.0/validated_win32_requirements.txt [--no-deps] + - -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.15.0/validated_win32_requirements.txt [--no-deps] diff --git a/how-to-use-azureml/automated-machine-learning/automl_env_linux.yml b/how-to-use-azureml/automated-machine-learning/automl_env_linux.yml index fb49a7a8..66742c0d 100644 --- a/how-to-use-azureml/automated-machine-learning/automl_env_linux.yml +++ b/how-to-use-azureml/automated-machine-learning/automl_env_linux.yml @@ -24,5 +24,5 @@ dependencies: - pytorch-transformers==1.0.0 - spacy==2.1.8 - https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz - - -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.14.0/validated_linux_requirements.txt [--no-deps] + - -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.15.0/validated_linux_requirements.txt [--no-deps] diff --git a/how-to-use-azureml/automated-machine-learning/automl_env_mac.yml b/how-to-use-azureml/automated-machine-learning/automl_env_mac.yml index b86d7c38..e3cf95bf 100644 --- a/how-to-use-azureml/automated-machine-learning/automl_env_mac.yml +++ b/how-to-use-azureml/automated-machine-learning/automl_env_mac.yml @@ -25,4 +25,4 @@ dependencies: - pytorch-transformers==1.0.0 - spacy==2.1.8 - https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz - - -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.14.0/validated_darwin_requirements.txt [--no-deps] + - -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.15.0/validated_darwin_requirements.txt [--no-deps] diff --git a/how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb b/how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb index a2e40335..786ebc8c 100644 --- a/how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb +++ b/how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb @@ -89,7 +89,7 @@ "from azureml.automl.core.featurization import FeaturizationConfig\n", "from azureml.core.dataset import Dataset\n", "from azureml.train.automl import AutoMLConfig\n", - "from azureml.interpret._internal.explanation_client import ExplanationClient" + "from azureml.interpret import ExplanationClient" ] }, { @@ -105,7 +105,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"This notebook was created using version 1.14.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.15.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, diff --git a/how-to-use-azureml/automated-machine-learning/classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.ipynb b/how-to-use-azureml/automated-machine-learning/classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.ipynb index 04e90748..288dc622 100644 --- a/how-to-use-azureml/automated-machine-learning/classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.ipynb +++ b/how-to-use-azureml/automated-machine-learning/classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.ipynb @@ -93,7 +93,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"This notebook was created using version 1.14.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.15.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, diff --git a/how-to-use-azureml/automated-machine-learning/continuous-retraining/auto-ml-continuous-retraining.ipynb b/how-to-use-azureml/automated-machine-learning/continuous-retraining/auto-ml-continuous-retraining.ipynb index 6974cc87..1b61bc74 100644 --- a/how-to-use-azureml/automated-machine-learning/continuous-retraining/auto-ml-continuous-retraining.ipynb +++ b/how-to-use-azureml/automated-machine-learning/continuous-retraining/auto-ml-continuous-retraining.ipynb @@ -88,7 +88,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"This notebook was created using version 1.14.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.15.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, @@ -190,7 +190,7 @@ "metadata": {}, "outputs": [], "source": [ - "from azureml.core.runconfig import CondaDependencies, DEFAULT_CPU_IMAGE, RunConfiguration\n", + "from azureml.core.runconfig import CondaDependencies, RunConfiguration\n", "\n", "# create a new RunConfig object\n", "conda_run_config = RunConfiguration(framework=\"python\")\n", @@ -199,7 +199,6 @@ "conda_run_config.target = compute_target\n", "\n", "conda_run_config.environment.docker.enabled = True\n", - "conda_run_config.environment.docker.base_image = azureml.core.runconfig.DEFAULT_CPU_IMAGE\n", "\n", "cd = CondaDependencies.create(pip_packages=['azureml-sdk[automl]', 'applicationinsights', 'azureml-opendatasets', 'azureml-defaults'], \n", " conda_packages=['numpy==1.16.2'], \n", diff --git a/how-to-use-azureml/automated-machine-learning/experimental/README.md b/how-to-use-azureml/automated-machine-learning/experimental/README.md index 8c3ef2eb..566f9999 100644 --- a/how-to-use-azureml/automated-machine-learning/experimental/README.md +++ b/how-to-use-azureml/automated-machine-learning/experimental/README.md @@ -17,16 +17,16 @@ There's no need to install mini-conda specifically. - Download the sample notebooks from [GitHub](https://github.com/Azure/MachineLearningNotebooks) as zip and extract the contents to a local directory. The automated ML sample notebooks are in the "automated-machine-learning" folder. ### 3. Setup a new conda environment -The **automl_setup** script creates a new conda environment, installs the necessary packages, configures the widget and starts a jupyter notebook. It takes the conda environment name as an optional parameter. The default conda environment name is azure_automl. The exact command depends on the operating system. See the specific sections below for Windows, Mac and Linux. It can take about 10 minutes to execute. +The **automl_setup_thin_client** script creates a new conda environment, installs the necessary packages, configures the widget and starts a jupyter notebook. It takes the conda environment name as an optional parameter. The default conda environment name is azure_automl_experimental. The exact command depends on the operating system. See the specific sections below for Windows, Mac and Linux. It can take about 10 minutes to execute. Packages installed by the **automl_setup** script: -For more details refer to the [automl_env.yml](./automl_env.yml) +For more details refer to the [automl_env_thin_client.yml](./automl_env_thin_client.yml) ## Windows Start an **Anaconda Prompt** window, cd to the **how-to-use-azureml/automated-machine-learning/experimental** folder where the sample notebooks were extracted and then run: ``` -automl_setup +automl_setup_thin_client ``` ## Mac Install "Command line developer tools" if it is not already installed (you can use the command: `xcode-select --install`). @@ -34,14 +34,14 @@ Install "Command line developer tools" if it is not already installed (you can u Start a Terminal windows, cd to the **how-to-use-azureml/automated-machine-learning/experimental** folder where the sample notebooks were extracted and then run: ``` -bash automl_setup_mac.sh +bash automl_setup_thin_client_mac.sh ``` ## Linux cd to the **how-to-use-azureml/automated-machine-learning/experimental** folder where the sample notebooks were extracted and then run: ``` -bash automl_setup_linux.sh +bash automl_setup_thin_client_linux.sh ``` ### 4. Running configuration.ipynb @@ -49,7 +49,7 @@ bash automl_setup_linux.sh - Execute the cells in the notebook to Register Machine Learning Services Resource Provider and create a workspace. (*instructions in notebook*) ### 5. Running Samples -- Please make sure you use the Python [conda env:azure_automl] kernel when trying the sample Notebooks. +- Please make sure you use the Python [conda env:azure_automl_experimental] kernel when trying the sample Notebooks. - Follow the instructions in the individual notebooks to explore various features in automated ML. ### 6. Starting jupyter notebook manually @@ -71,7 +71,7 @@ jupyter notebook # Automated ML SDK Sample Notebooks -- [auto-ml-regression.ipynb](regression/auto-ml-regression.ipynb) +- [auto-ml-regression-model-proxy.ipynb](regression-model-proxy/auto-ml-regression-model-proxy.ipynb) - Dataset: Hardware Performance Dataset - Simple example of using automated ML for regression - Uses azure compute for training diff --git a/how-to-use-azureml/automated-machine-learning/experimental/automl_setup.cmd b/how-to-use-azureml/automated-machine-learning/experimental/automl_setup_thin_client.cmd similarity index 100% rename from how-to-use-azureml/automated-machine-learning/experimental/automl_setup.cmd rename to how-to-use-azureml/automated-machine-learning/experimental/automl_setup_thin_client.cmd diff --git a/how-to-use-azureml/automated-machine-learning/experimental/automl_setup_linux.sh b/how-to-use-azureml/automated-machine-learning/experimental/automl_setup_thin_client_linux.sh similarity index 100% rename from how-to-use-azureml/automated-machine-learning/experimental/automl_setup_linux.sh rename to how-to-use-azureml/automated-machine-learning/experimental/automl_setup_thin_client_linux.sh diff --git a/how-to-use-azureml/automated-machine-learning/experimental/automl_setup_mac.sh b/how-to-use-azureml/automated-machine-learning/experimental/automl_setup_thin_client_mac.sh similarity index 100% rename from how-to-use-azureml/automated-machine-learning/experimental/automl_setup_mac.sh rename to how-to-use-azureml/automated-machine-learning/experimental/automl_setup_thin_client_mac.sh diff --git a/how-to-use-azureml/automated-machine-learning/experimental/automl_env.yml b/how-to-use-azureml/automated-machine-learning/experimental/automl_thin_client_env.yml similarity index 100% rename from how-to-use-azureml/automated-machine-learning/experimental/automl_env.yml rename to how-to-use-azureml/automated-machine-learning/experimental/automl_thin_client_env.yml diff --git a/how-to-use-azureml/automated-machine-learning/experimental/automl_env_mac.yml b/how-to-use-azureml/automated-machine-learning/experimental/automl_thin_client_env_mac.yml similarity index 100% rename from how-to-use-azureml/automated-machine-learning/experimental/automl_env_mac.yml rename to how-to-use-azureml/automated-machine-learning/experimental/automl_thin_client_env_mac.yml diff --git a/how-to-use-azureml/automated-machine-learning/experimental/regression/auto-ml-regression-model-proxy.ipynb b/how-to-use-azureml/automated-machine-learning/experimental/regression-model-proxy/auto-ml-regression-model-proxy.ipynb similarity index 96% rename from how-to-use-azureml/automated-machine-learning/experimental/regression/auto-ml-regression-model-proxy.ipynb rename to how-to-use-azureml/automated-machine-learning/experimental/regression-model-proxy/auto-ml-regression-model-proxy.ipynb index 2669e811..8f6433ce 100644 --- a/how-to-use-azureml/automated-machine-learning/experimental/regression/auto-ml-regression-model-proxy.ipynb +++ b/how-to-use-azureml/automated-machine-learning/experimental/regression-model-proxy/auto-ml-regression-model-proxy.ipynb @@ -13,7 +13,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/experimental/regression/auto-ml-regression.png)" + "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/experimental/regression-model-proxy/auto-ml-regression-model-proxy.png)" ] }, { @@ -38,7 +38,7 @@ "metadata": {}, "source": [ "## Introduction\n", - "In this example we use the Hardware Performance Dataset to showcase how you can use AutoML for a simple regression problem. The Regression goal is to predict the performance of certain combinations of hardware parts.\n", + "In this example we use an experimental feature, Model Proxy, to do a predict on the best generated model without downloading the model locally. The prediction will happen on same compute and environment that was used to train the model. This feature is currently in the experimental state, which means that the API is prone to changing, please make sure to run on the latest version of this notebook if you face any issues.\n", "\n", "If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration](../../../../configuration.ipynb) notebook first if you haven't already to establish your connection to the AzureML Workspace. \n", "\n", @@ -92,7 +92,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"This notebook was created using version 1.14.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.15.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, @@ -384,10 +384,10 @@ "metadata": {}, "outputs": [], "source": [ - "y_pred_train = best_model_proxy.predict(train_data).to_pandas_dataframe()\n", + "y_pred_train = best_model_proxy.predict(train_data).to_pandas_dataframe().values.flatten()\n", "y_residual_train = y_train - y_pred_train\n", "\n", - "y_pred_test = best_model_proxy.predict(test_data).to_pandas_dataframe()\n", + "y_pred_test = best_model_proxy.predict(test_data).to_pandas_dataframe().values.flatten()\n", "y_residual_test = y_test - y_pred_test" ] }, diff --git a/how-to-use-azureml/automated-machine-learning/experimental/regression/auto-ml-regression-model-proxy.yml b/how-to-use-azureml/automated-machine-learning/experimental/regression-model-proxy/auto-ml-regression-model-proxy.yml similarity index 100% rename from how-to-use-azureml/automated-machine-learning/experimental/regression/auto-ml-regression-model-proxy.yml rename to how-to-use-azureml/automated-machine-learning/experimental/regression-model-proxy/auto-ml-regression-model-proxy.yml diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-beer-remote/auto-ml-forecasting-beer-remote.ipynb b/how-to-use-azureml/automated-machine-learning/forecasting-beer-remote/auto-ml-forecasting-beer-remote.ipynb index 511d4ed7..b0c4dd12 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-beer-remote/auto-ml-forecasting-beer-remote.ipynb +++ b/how-to-use-azureml/automated-machine-learning/forecasting-beer-remote/auto-ml-forecasting-beer-remote.ipynb @@ -114,7 +114,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"This notebook was created using version 1.14.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.15.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-bike-share/auto-ml-forecasting-bike-share.ipynb b/how-to-use-azureml/automated-machine-learning/forecasting-bike-share/auto-ml-forecasting-bike-share.ipynb index 59a9be7b..7d892d51 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-bike-share/auto-ml-forecasting-bike-share.ipynb +++ b/how-to-use-azureml/automated-machine-learning/forecasting-bike-share/auto-ml-forecasting-bike-share.ipynb @@ -87,7 +87,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"This notebook was created using version 1.14.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.15.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-bike-share/run_forecast.py b/how-to-use-azureml/automated-machine-learning/forecasting-bike-share/run_forecast.py index a4529a05..a9b7ccdc 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-bike-share/run_forecast.py +++ b/how-to-use-azureml/automated-machine-learning/forecasting-bike-share/run_forecast.py @@ -1,20 +1,12 @@ -from azureml.core import Environment -from azureml.core.conda_dependencies import CondaDependencies from azureml.train.estimator import Estimator -from azureml.core.run import Run def run_rolling_forecast(test_experiment, compute_target, train_run, test_dataset, target_column_name, inference_folder='./forecast'): - condafile = inference_folder + '/condafile.yml' train_run.download_file('outputs/model.pkl', inference_folder + '/model.pkl') - train_run.download_file('outputs/conda_env_v_1_0_0.yml', condafile) - inference_env = Environment("myenv") - inference_env.docker.enabled = True - inference_env.python.conda_dependencies = CondaDependencies( - conda_dependencies_file_path=condafile) + inference_env = train_run.get_environment() est = Estimator(source_directory=inference_folder, entry_script='forecasting_script.py', diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb b/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb index ebb2885b..99db8038 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb +++ b/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb @@ -97,7 +97,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"This notebook was created using version 1.14.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.15.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/auto-ml-forecasting-function.ipynb b/how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/auto-ml-forecasting-function.ipynb index 580747cc..d74563e5 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/auto-ml-forecasting-function.ipynb +++ b/how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/auto-ml-forecasting-function.ipynb @@ -94,7 +94,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"This notebook was created using version 1.14.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.15.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb b/how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb index 12f2d2c8..598f41e3 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb +++ b/how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb @@ -82,7 +82,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"This notebook was created using version 1.14.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.15.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, diff --git a/how-to-use-azureml/automated-machine-learning/local-run-classification-credit-card-fraud/auto-ml-classification-credit-card-fraud-local.ipynb b/how-to-use-azureml/automated-machine-learning/local-run-classification-credit-card-fraud/auto-ml-classification-credit-card-fraud-local.ipynb index 8dd488ec..26390b40 100644 --- a/how-to-use-azureml/automated-machine-learning/local-run-classification-credit-card-fraud/auto-ml-classification-credit-card-fraud-local.ipynb +++ b/how-to-use-azureml/automated-machine-learning/local-run-classification-credit-card-fraud/auto-ml-classification-credit-card-fraud-local.ipynb @@ -80,7 +80,7 @@ "from azureml.core.workspace import Workspace\n", "from azureml.core.dataset import Dataset\n", "from azureml.train.automl import AutoMLConfig\n", - "from azureml.interpret._internal.explanation_client import ExplanationClient" + "from azureml.interpret import ExplanationClient" ] }, { @@ -96,7 +96,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"This notebook was created using version 1.14.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.15.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, diff --git a/how-to-use-azureml/automated-machine-learning/regression-explanation-featurization/auto-ml-regression-explanation-featurization.ipynb b/how-to-use-azureml/automated-machine-learning/regression-explanation-featurization/auto-ml-regression-explanation-featurization.ipynb index 9b2f9e0e..afdbc5dc 100644 --- a/how-to-use-azureml/automated-machine-learning/regression-explanation-featurization/auto-ml-regression-explanation-featurization.ipynb +++ b/how-to-use-azureml/automated-machine-learning/regression-explanation-featurization/auto-ml-regression-explanation-featurization.ipynb @@ -98,7 +98,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"This notebook was created using version 1.14.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.15.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, @@ -625,7 +625,7 @@ "metadata": {}, "outputs": [], "source": [ - "from azureml.interpret._internal.explanation_client import ExplanationClient\n", + "from azureml.interpret import ExplanationClient\n", "client = ExplanationClient.from_run(automl_run)\n", "engineered_explanations = client.download_model_explanation(raw=False, comment='engineered explanations')\n", "print(engineered_explanations.get_feature_importance_dict())\n", diff --git a/how-to-use-azureml/automated-machine-learning/regression/auto-ml-regression.ipynb b/how-to-use-azureml/automated-machine-learning/regression/auto-ml-regression.ipynb index d7b27df4..d153fa4d 100644 --- a/how-to-use-azureml/automated-machine-learning/regression/auto-ml-regression.ipynb +++ b/how-to-use-azureml/automated-machine-learning/regression/auto-ml-regression.ipynb @@ -92,7 +92,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"This notebook was created using version 1.14.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.15.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, diff --git a/how-to-use-azureml/azure-databricks/README.md b/how-to-use-azureml/azure-databricks/README.md deleted file mode 100644 index 4749c0c6..00000000 --- a/how-to-use-azureml/azure-databricks/README.md +++ /dev/null @@ -1,33 +0,0 @@ -Azure Databricks is a managed Spark offering on Azure and customers already use it for advanced analytics. It provides a collaborative Notebook based environment with CPU or GPU based compute cluster. - -In this section, you will find sample notebooks on how to use Azure Machine Learning SDK with Azure Databricks. You can train a model using Spark MLlib and then deploy the model to ACI/AKS from within Azure Databricks. You can also use Automated ML capability (**public preview**) of Azure ML SDK with Azure Databricks. - -- Customers who use Azure Databricks for advanced analytics can now use the same cluster to run experiments with or without automated machine learning. -- You can keep the data within the same cluster. -- You can leverage the local worker nodes with autoscale and auto termination capabilities. -- You can use multiple cores of your Azure Databricks cluster to perform simultenous training. -- You can further tune the model generated by automated machine learning if you chose to. -- Every run (including the best run) is available as a pipeline, which you can tune further if needed. -- The model trained using Azure Databricks can be registered in Azure ML SDK workspace and then deployed to Azure managed compute (ACI or AKS) using the Azure Machine learning SDK. - -Please follow our [Azure doc](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-environment#azure-databricks) to install the sdk in your Azure Databricks cluster before trying any of the sample notebooks. - -**Single file** - -The following archive contains all the sample notebooks. You can the run notebooks after importing [DBC](Databricks_AMLSDK_1-4_6.dbc) in your Databricks workspace instead of downloading individually. - -Notebooks 1-4 have to be run sequentially & are related to Income prediction experiment based on this [dataset](https://archive.ics.uci.edu/ml/datasets/adult) and demonstrate how to data prep, train and operationalize a Spark ML model with Azure ML Python SDK from within Azure Databricks. - -Notebook 6 is an Automated ML sample notebook for Classification. - -Learn more about [how to use Azure Databricks as a development environment](https://docs.microsoft.com/azure/machine-learning/service/how-to-configure-environment#azure-databricks) for Azure Machine Learning service. - -**Databricks as a Compute Target from AML Pipelines** -You can use Azure Databricks as a compute target from [Azure Machine Learning Pipelines](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-ml-pipelines). Take a look at this notebook for details: [aml-pipelines-use-databricks-as-compute-target.ipynb](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/azure-databricks/databricks-as-remote-compute-target/aml-pipelines-use-databricks-as-compute-target.ipynb). - -For more on SDK concepts, please refer to [notebooks](https://github.com/Azure/MachineLearningNotebooks). - -**Please let us know your feedback.** - - - -![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/azure-databricks/README.png) \ No newline at end of file diff --git a/how-to-use-azureml/azure-databricks/amlsdk/build-model-run-history-03.ipynb b/how-to-use-azureml/azure-databricks/amlsdk/build-model-run-history-03.ipynb deleted file mode 100644 index 6584e388..00000000 --- a/how-to-use-azureml/azure-databricks/amlsdk/build-model-run-history-03.ipynb +++ /dev/null @@ -1,373 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Azure ML & Azure Databricks notebooks by Parashar Shah.\n", - "\n", - "Copyright (c) Microsoft Corporation. All rights reserved.\n", - "\n", - "Licensed under the MIT License." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#Model Building" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "import pprint\n", - "import numpy as np\n", - "\n", - "from pyspark.ml import Pipeline, PipelineModel\n", - "from pyspark.ml.feature import OneHotEncoder, StringIndexer, VectorAssembler\n", - "from pyspark.ml.classification import LogisticRegression\n", - "from pyspark.ml.evaluation import BinaryClassificationEvaluator\n", - "from pyspark.ml.tuning import CrossValidator, ParamGridBuilder" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import azureml.core\n", - "\n", - "# Check core SDK version number\n", - "print(\"SDK version:\", azureml.core.VERSION)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Set auth to be used by workspace related APIs.\n", - "# For automation or CI/CD ServicePrincipalAuthentication can be used.\n", - "# https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.authentication.serviceprincipalauthentication?view=azure-ml-py\n", - "auth = None" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# import the Workspace class and check the azureml SDK version\n", - "from azureml.core import Workspace\n", - "\n", - "ws = Workspace.from_config(auth = auth)\n", - "print('Workspace name: ' + ws.name, \n", - " 'Azure region: ' + ws.location, \n", - " 'Subscription id: ' + ws.subscription_id, \n", - " 'Resource group: ' + ws.resource_group, sep = '\\n')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#get the train and test datasets\n", - "train_data_path = \"AdultCensusIncomeTrain\"\n", - "test_data_path = \"AdultCensusIncomeTest\"\n", - "\n", - "train = spark.read.parquet(train_data_path)\n", - "test = spark.read.parquet(test_data_path)\n", - "\n", - "print(\"train: ({}, {})\".format(train.count(), len(train.columns)))\n", - "print(\"test: ({}, {})\".format(test.count(), len(test.columns)))\n", - "\n", - "train.printSchema()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#Define Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "label = \"income\"\n", - "dtypes = dict(train.dtypes)\n", - "dtypes.pop(label)\n", - "\n", - "si_xvars = []\n", - "ohe_xvars = []\n", - "featureCols = []\n", - "for idx,key in enumerate(dtypes):\n", - " if dtypes[key] == \"string\":\n", - " featureCol = \"-\".join([key, \"encoded\"])\n", - " featureCols.append(featureCol)\n", - " \n", - " tmpCol = \"-\".join([key, \"tmp\"])\n", - " # string-index and one-hot encode the string column\n", - " #https://spark.apache.org/docs/2.3.0/api/java/org/apache/spark/ml/feature/StringIndexer.html\n", - " #handleInvalid: Param for how to handle invalid data (unseen labels or NULL values). \n", - " #Options are 'skip' (filter out rows with invalid data), 'error' (throw an error), \n", - " #or 'keep' (put invalid data in a special additional bucket, at index numLabels). Default: \"error\"\n", - " si_xvars.append(StringIndexer(inputCol=key, outputCol=tmpCol, handleInvalid=\"skip\"))\n", - " ohe_xvars.append(OneHotEncoder(inputCol=tmpCol, outputCol=featureCol))\n", - " else:\n", - " featureCols.append(key)\n", - "\n", - "# string-index the label column into a column named \"label\"\n", - "si_label = StringIndexer(inputCol=label, outputCol='label')\n", - "\n", - "# assemble the encoded feature columns in to a column named \"features\"\n", - "assembler = VectorAssembler(inputCols=featureCols, outputCol=\"features\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from azureml.core.run import Run\n", - "from azureml.core.experiment import Experiment\n", - "import numpy as np\n", - "import os\n", - "import shutil\n", - "\n", - "model_name = \"AdultCensus_runHistory.mml\"\n", - "model_dbfs = os.path.join(\"/dbfs\", model_name)\n", - "run_history_name = 'spark-ml-notebook'\n", - "\n", - "# start a training run by defining an experiment\n", - "myexperiment = Experiment(ws, \"Ignite_AI_Talk\")\n", - "root_run = myexperiment.start_logging()\n", - "\n", - "# Regularization Rates - \n", - "regs = [0.0001, 0.001, 0.01, 0.1]\n", - " \n", - "# try a bunch of regularization rate in a Logistic Regression model\n", - "for reg in regs:\n", - " print(\"Regularization rate: {}\".format(reg))\n", - " # create a bunch of child runs\n", - " with root_run.child_run(\"reg-\" + str(reg)) as run:\n", - " # create a new Logistic Regression model.\n", - " lr = LogisticRegression(regParam=reg)\n", - " \n", - " # put together the pipeline\n", - " pipe = Pipeline(stages=[*si_xvars, *ohe_xvars, si_label, assembler, lr])\n", - "\n", - " # train the model\n", - " model_p = pipe.fit(train)\n", - " \n", - " # make prediction\n", - " pred = model_p.transform(test)\n", - " \n", - " # evaluate. note only 2 metrics are supported out of the box by Spark ML.\n", - " bce = BinaryClassificationEvaluator(rawPredictionCol='rawPrediction')\n", - " au_roc = bce.setMetricName('areaUnderROC').evaluate(pred)\n", - " au_prc = bce.setMetricName('areaUnderPR').evaluate(pred)\n", - "\n", - " print(\"Area under ROC: {}\".format(au_roc))\n", - " print(\"Area Under PR: {}\".format(au_prc))\n", - " \n", - " # log reg, au_roc, au_prc and feature names in run history\n", - " run.log(\"reg\", reg)\n", - " run.log(\"au_roc\", au_roc)\n", - " run.log(\"au_prc\", au_prc)\n", - " run.log_list(\"columns\", train.columns)\n", - "\n", - " # save model\n", - " model_p.write().overwrite().save(model_name)\n", - " \n", - " # upload the serialized model into run history record\n", - " mdl, ext = model_name.split(\".\")\n", - " model_zip = mdl + \".zip\"\n", - " shutil.make_archive(mdl, 'zip', model_dbfs)\n", - " run.upload_file(\"outputs/\" + model_name, model_zip) \n", - " #run.upload_file(\"outputs/\" + model_name, path_or_stream = model_dbfs) #cannot deal with folders\n", - "\n", - " # now delete the serialized model from local folder since it is already uploaded to run history \n", - " shutil.rmtree(model_dbfs)\n", - " os.remove(model_zip)\n", - " \n", - "# Declare run completed\n", - "root_run.complete()\n", - "root_run_id = root_run.id\n", - "print (\"run id:\", root_run.id)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "metrics = root_run.get_metrics(recursive=True)\n", - "best_run_id = max(metrics, key = lambda k: metrics[k]['au_roc'])\n", - "print(best_run_id, metrics[best_run_id]['au_roc'], metrics[best_run_id]['reg'])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#Get the best run\n", - "child_runs = {}\n", - "\n", - "for r in root_run.get_children():\n", - " child_runs[r.id] = r\n", - " \n", - "best_run = child_runs[best_run_id]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#Download the model from the best run to a local folder\n", - "best_model_file_name = \"best_model.zip\"\n", - "best_run.download_file(name = 'outputs/' + model_name, output_file_path = best_model_file_name)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#Model Evaluation" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "##unzip the model to dbfs (as load() seems to require that) and load it.\n", - "if os.path.isfile(model_dbfs) or os.path.isdir(model_dbfs):\n", - " shutil.rmtree(model_dbfs)\n", - "shutil.unpack_archive(best_model_file_name, model_dbfs)\n", - "\n", - "model_p_best = PipelineModel.load(model_name)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# make prediction\n", - "pred = model_p_best.transform(test)\n", - "output = pred[['hours_per_week','age','workclass','marital_status','income','prediction']]\n", - "display(output.limit(5))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# evaluate. note only 2 metrics are supported out of the box by Spark ML.\n", - "bce = BinaryClassificationEvaluator(rawPredictionCol='rawPrediction')\n", - "au_roc = bce.setMetricName('areaUnderROC').evaluate(pred)\n", - "au_prc = bce.setMetricName('areaUnderPR').evaluate(pred)\n", - "\n", - "print(\"Area under ROC: {}\".format(au_roc))\n", - "print(\"Area Under PR: {}\".format(au_prc))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#Model Persistence" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "##NOTE: by default the model is saved to and loaded from /dbfs/ instead of cwd!\n", - "model_p_best.write().overwrite().save(model_name)\n", - "print(\"saved model to {}\".format(model_dbfs))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%sh\n", - "\n", - "ls -la /dbfs/AdultCensus_runHistory.mml/*" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "dbutils.notebook.exit(\"success\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/azure-databricks/amlsdk/build-model-run-history-03.png)" - ] - } - ], - "metadata": { - "authors": [ - { - "name": "pasha" - } - ], - "kernelspec": { - "display_name": "Python 3.6", - "language": "python", - "name": "python36" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.6" - }, - "name": "build-model-run-history-03", - "notebookId": 3836944406456339 - }, - "nbformat": 4, - "nbformat_minor": 1 -} \ No newline at end of file diff --git a/how-to-use-azureml/azure-databricks/amlsdk/deploy-to-aci-04.ipynb b/how-to-use-azureml/azure-databricks/amlsdk/deploy-to-aci-04.ipynb deleted file mode 100644 index 910e6eef..00000000 --- a/how-to-use-azureml/azure-databricks/amlsdk/deploy-to-aci-04.ipynb +++ /dev/null @@ -1,320 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Azure ML & Azure Databricks notebooks by Parashar Shah.\n", - "\n", - "Copyright (c) Microsoft Corporation. All rights reserved.\n", - "\n", - "Licensed under the MIT License." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Register Azure Databricks trained model and deploy it to ACI\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Please ensure you have run all previous notebooks in sequence before running this.\n", - "\n", - "Please Register Azure Container Instance(ACI) using Azure Portal: https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-manager-supported-services#portal in your subscription before using the SDK to deploy your ML model to ACI." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import azureml.core\n", - "\n", - "# Check core SDK version number\n", - "print(\"SDK version:\", azureml.core.VERSION)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Set auth to be used by workspace related APIs.\n", - "# For automation or CI/CD ServicePrincipalAuthentication can be used.\n", - "# https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.authentication.serviceprincipalauthentication?view=azure-ml-py\n", - "auth = None" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from azureml.core import Workspace\n", - "\n", - "ws = Workspace.from_config(auth = auth)\n", - "print('Workspace name: ' + ws.name, \n", - " 'Azure region: ' + ws.location, \n", - " 'Subscription id: ' + ws.subscription_id, \n", - " 'Resource group: ' + ws.resource_group, sep = '\\n')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "##NOTE: service deployment always gets the model from the current working dir.\n", - "import os\n", - "\n", - "model_name = \"AdultCensus_runHistory.mml\" # \n", - "model_name_dbfs = os.path.join(\"/dbfs\", model_name)\n", - "\n", - "print(\"copy model from dbfs to local\")\n", - "model_local = \"file:\" + os.getcwd() + \"/\" + model_name\n", - "dbutils.fs.cp(model_name, model_local, True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#Register the model\n", - "from azureml.core.model import Model\n", - "mymodel = Model.register(model_path = model_name, # this points to a local file\n", - " model_name = model_name, # this is the name the model is registered as, am using same name for both path and name. \n", - " description = \"ADB trained model by Parashar\",\n", - " workspace = ws)\n", - "\n", - "print(mymodel.name, mymodel.description, mymodel.version)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#%%writefile score_sparkml.py\n", - "score_sparkml = \"\"\"\n", - " \n", - "import json\n", - " \n", - "def init():\n", - " # One-time initialization of PySpark and predictive model\n", - " import pyspark\n", - " import os\n", - " from azureml.core.model import Model\n", - " from pyspark.ml import PipelineModel\n", - " \n", - " global trainedModel\n", - " global spark\n", - " \n", - " spark = pyspark.sql.SparkSession.builder.appName(\"ADB and AML notebook by Parashar\").getOrCreate()\n", - " model_name = \"{model_name}\" #interpolated\n", - " # AZUREML_MODEL_DIR is an environment variable created during deployment.\n", - " # It is the path to the model folder (./azureml-models/$MODEL_NAME/$VERSION)\n", - " # For multiple models, it points to the folder containing all deployed models (./azureml-models)\n", - " model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), model_name)\n", - " trainedModel = PipelineModel.load(model_path)\n", - " \n", - "def run(input_json):\n", - " if isinstance(trainedModel, Exception):\n", - " return json.dumps({{\"trainedModel\":str(trainedModel)}})\n", - " \n", - " try:\n", - " sc = spark.sparkContext\n", - " input_list = json.loads(input_json)\n", - " input_rdd = sc.parallelize(input_list)\n", - " input_df = spark.read.json(input_rdd)\n", - " \n", - " # Compute prediction\n", - " prediction = trainedModel.transform(input_df)\n", - " #result = prediction.first().prediction\n", - " predictions = prediction.collect()\n", - " \n", - " #Get each scored result\n", - " preds = [str(x['prediction']) for x in predictions]\n", - " result = \",\".join(preds)\n", - " # you can return any data type as long as it is JSON-serializable\n", - " return result.tolist()\n", - " except Exception as e:\n", - " result = str(e)\n", - " return result\n", - " \n", - "\"\"\".format(model_name=model_name)\n", - " \n", - "exec(score_sparkml)\n", - " \n", - "with open(\"score_sparkml.py\", \"w\") as file:\n", - " file.write(score_sparkml)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from azureml.core.conda_dependencies import CondaDependencies \n", - "\n", - "myacienv = CondaDependencies.create(conda_packages=['scikit-learn','numpy','pandas']) # showing how to add libs as an eg. - not needed for this model.\n", - "\n", - "with open(\"myenv.yml\",\"w\") as f:\n", - " f.write(myacienv.serialize_to_string())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#deploy to ACI\n", - "from azureml.core.webservice import AciWebservice, Webservice\n", - "from azureml.exceptions import WebserviceException\n", - "from azureml.core.model import InferenceConfig\n", - "from azureml.core.environment import Environment\n", - "from azureml.core.conda_dependencies import CondaDependencies\n", - "\n", - "\n", - "myaci_config = AciWebservice.deploy_configuration(cpu_cores = 2, \n", - " memory_gb = 2, \n", - " tags = {'name':'Databricks Azure ML ACI'}, \n", - " description = 'This is for ADB and AML example.')\n", - "\n", - "service_name = 'aciws'\n", - "\n", - "# Remove any existing service under the same name.\n", - "try:\n", - " Webservice(ws, service_name).delete()\n", - "except WebserviceException:\n", - " pass\n", - "\n", - "myenv = Environment.get(ws, name='AzureML-PySpark-MmlSpark-0.15')\n", - "# we need to add extra packages to procured environment\n", - "# in order to deploy amended environment we need to rename it\n", - "myenv.name = 'myenv'\n", - "model_dependencies = CondaDependencies('myenv.yml')\n", - "for pip_dep in model_dependencies.pip_packages:\n", - " myenv.python.conda_dependencies.add_pip_package(pip_dep)\n", - "for conda_dep in model_dependencies.conda_packages:\n", - " myenv.python.conda_dependencies.add_conda_package(conda_dep)\n", - "inference_config = InferenceConfig(entry_script='score_sparkml.py', environment=myenv)\n", - "\n", - "myservice = Model.deploy(ws, service_name, [mymodel], inference_config, myaci_config)\n", - "myservice.wait_for_deployment(show_output=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "help(Webservice)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#for using the Web HTTP API \n", - "print(myservice.scoring_uri)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import json\n", - "\n", - "#get the some sample data\n", - "test_data_path = \"AdultCensusIncomeTest\"\n", - "test = spark.read.parquet(test_data_path).limit(5)\n", - "\n", - "test_json = json.dumps(test.toJSON().collect())\n", - "\n", - "print(test_json)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#using data defined above predict if income is >50K (1) or <=50K (0)\n", - "myservice.run(input_data=test_json)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#comment to not delete the web service\n", - "myservice.delete()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Deploying to other types of computes\n", - "\n", - "In order to learn how to deploy to other types of compute targets, such as AKS, please take a look at the set of notebooks in the [deployment](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/deployment) folder." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/azure-databricks/amlsdk/deploy-to-aci-04.png)" - ] - } - ], - "metadata": { - "authors": [ - { - "name": "pasha" - } - ], - "kernelspec": { - "display_name": "Python 3.6", - "language": "python", - "name": "python36" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.8" - }, - "name": "deploy-to-aci-04", - "notebookId": 3836944406456376 - }, - "nbformat": 4, - "nbformat_minor": 1 -} \ No newline at end of file diff --git a/how-to-use-azureml/azure-databricks/amlsdk/ingest-data-02.ipynb b/how-to-use-azureml/azure-databricks/amlsdk/ingest-data-02.ipynb deleted file mode 100644 index ee2996cf..00000000 --- a/how-to-use-azureml/azure-databricks/amlsdk/ingest-data-02.ipynb +++ /dev/null @@ -1,179 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Azure ML & Azure Databricks notebooks by Parashar Shah.\n", - "\n", - "Copyright (c) Microsoft Corporation. All rights reserved.\n", - "\n", - "Licensed under the MIT License." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#Data Ingestion" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "import urllib" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Download AdultCensusIncome.csv from Azure CDN. This file has 32,561 rows.\n", - "dataurl = \"https://amldockerdatasets.azureedge.net/AdultCensusIncome.csv\"\n", - "datafile = \"AdultCensusIncome.csv\"\n", - "datafile_dbfs = os.path.join(\"/dbfs\", datafile)\n", - "\n", - "if os.path.isfile(datafile_dbfs):\n", - " print(\"found {} at {}\".format(datafile, datafile_dbfs))\n", - "else:\n", - " print(\"downloading {} to {}\".format(datafile, datafile_dbfs))\n", - " urllib.request.urlretrieve(dataurl, datafile_dbfs)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Create a Spark dataframe out of the csv file.\n", - "data_all = sqlContext.read.format('csv').options(header='true', inferSchema='true', ignoreLeadingWhiteSpace='true', ignoreTrailingWhiteSpace='true').load(datafile)\n", - "print(\"({}, {})\".format(data_all.count(), len(data_all.columns)))\n", - "data_all.printSchema()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#renaming columns\n", - "columns_new = [col.replace(\"-\", \"_\") for col in data_all.columns]\n", - "data_all = data_all.toDF(*columns_new)\n", - "data_all.printSchema()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "display(data_all.limit(5))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#Data Preparation" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Choose feature columns and the label column.\n", - "label = \"income\"\n", - "xvars = set(data_all.columns) - {label}\n", - "\n", - "print(\"label = {}\".format(label))\n", - "print(\"features = {}\".format(xvars))\n", - "\n", - "data = data_all.select([*xvars, label])\n", - "\n", - "# Split data into train and test.\n", - "train, test = data.randomSplit([0.75, 0.25], seed=123)\n", - "\n", - "print(\"train ({}, {})\".format(train.count(), len(train.columns)))\n", - "print(\"test ({}, {})\".format(test.count(), len(test.columns)))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#Data Persistence" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Write the train and test data sets to intermediate storage\n", - "train_data_path = \"AdultCensusIncomeTrain\"\n", - "test_data_path = \"AdultCensusIncomeTest\"\n", - "\n", - "train_data_path_dbfs = os.path.join(\"/dbfs\", \"AdultCensusIncomeTrain\")\n", - "test_data_path_dbfs = os.path.join(\"/dbfs\", \"AdultCensusIncomeTest\")\n", - "\n", - "train.write.mode('overwrite').parquet(train_data_path)\n", - "test.write.mode('overwrite').parquet(test_data_path)\n", - "print(\"train and test datasets saved to {} and {}\".format(train_data_path_dbfs, test_data_path_dbfs))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/azure-databricks/amlsdk/ingest-data-02.png)" - ] - } - ], - "metadata": { - "authors": [ - { - "name": "pasha" - } - ], - "kernelspec": { - "display_name": "Python 3.6", - "language": "python", - "name": "python36" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.6" - }, - "name": "ingest-data-02", - "notebookId": 3836944406456362 - }, - "nbformat": 4, - "nbformat_minor": 1 -} \ No newline at end of file diff --git a/how-to-use-azureml/azure-databricks/amlsdk/installation-and-configuration-01.ipynb b/how-to-use-azureml/azure-databricks/amlsdk/installation-and-configuration-01.ipynb deleted file mode 100644 index 1db74aa5..00000000 --- a/how-to-use-azureml/azure-databricks/amlsdk/installation-and-configuration-01.ipynb +++ /dev/null @@ -1,183 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Azure ML & Azure Databricks notebooks by Parashar Shah.\n", - "\n", - "Copyright (c) Microsoft Corporation. All rights reserved.\n", - "\n", - "Licensed under the MIT License." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We support installing AML SDK as library from GUI. When attaching a library follow this https://docs.databricks.com/user-guide/libraries.html and add the below string as your PyPi package. You can select the option to attach the library to all clusters or just one cluster.\n", - "\n", - "**install azureml-sdk**\n", - "* Source: Upload Python Egg or PyPi\n", - "* PyPi Name: `azureml-sdk[databricks]`\n", - "* Select Install Library" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import azureml.core\n", - "\n", - "# Check core SDK version number - based on build number of preview/master.\n", - "print(\"SDK version:\", azureml.core.VERSION)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Please specify the Azure subscription Id, resource group name, workspace name, and the region in which you want to create the Azure Machine Learning Workspace.\n", - "\n", - "You can get the value of your Azure subscription ID from the Azure Portal, and then selecting Subscriptions from the menu on the left.\n", - "\n", - "For the resource_group, use the name of the resource group that contains your Azure Databricks Workspace.\n", - "\n", - "NOTE: If you provide a resource group name that does not exist, the resource group will be automatically created. This may or may not succeed in your environment, depending on the permissions you have on your Azure Subscription." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# subscription_id = \"\"\n", - "# resource_group = \"\"\n", - "# workspace_name = \"\"\n", - "# workspace_region = \"\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Set auth to be used by workspace related APIs.\n", - "# For automation or CI/CD ServicePrincipalAuthentication can be used.\n", - "# https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.authentication.serviceprincipalauthentication?view=azure-ml-py\n", - "auth = None" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# import the Workspace class and check the azureml SDK version\n", - "# exist_ok checks if workspace exists or not.\n", - "\n", - "from azureml.core import Workspace\n", - "\n", - "ws = Workspace.create(name = workspace_name,\n", - " subscription_id = subscription_id,\n", - " resource_group = resource_group, \n", - " location = workspace_region,\n", - " auth = auth,\n", - " exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#get workspace details\n", - "ws.get_details()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "ws = Workspace(workspace_name = workspace_name,\n", - " subscription_id = subscription_id,\n", - " resource_group = resource_group,\n", - " auth = auth)\n", - "\n", - "# persist the subscription id, resource group name, and workspace name in aml_config/config.json.\n", - "ws.write_config()\n", - "#if you need to give a different path/filename please use this\n", - "#write_config(path=\"/databricks/driver/aml_config/\",file_name=)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "help(Workspace)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# import the Workspace class and check the azureml SDK version\n", - "from azureml.core import Workspace\n", - "\n", - "ws = Workspace.from_config(auth = auth)\n", - "#ws = Workspace.from_config()\n", - "print('Workspace name: ' + ws.name, \n", - " 'Azure region: ' + ws.location, \n", - " 'Subscription id: ' + ws.subscription_id, \n", - " 'Resource group: ' + ws.resource_group, sep = '\\n')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/azure-databricks/amlsdk/installation-and-configuration-01.png)" - ] - } - ], - "metadata": { - "authors": [ - { - "name": "pasha" - } - ], - "kernelspec": { - "display_name": "Python 3.6", - "language": "python", - "name": "python36" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.6" - }, - "name": "installation-and-configuration-01", - "notebookId": 3688394266452835 - }, - "nbformat": 4, - "nbformat_minor": 1 -} \ No newline at end of file diff --git a/how-to-use-azureml/azure-databricks/databricks-as-remote-compute-target/aml-pipelines-use-databricks-as-compute-target.ipynb b/how-to-use-azureml/azure-databricks/databricks-as-remote-compute-target/aml-pipelines-use-databricks-as-compute-target.ipynb deleted file mode 100644 index 51a46fc7..00000000 --- a/how-to-use-azureml/azure-databricks/databricks-as-remote-compute-target/aml-pipelines-use-databricks-as-compute-target.ipynb +++ /dev/null @@ -1,719 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Copyright (c) Microsoft Corporation. All rights reserved. \n", - "Licensed under the MIT License." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Using Databricks as a Compute Target from Azure Machine Learning Pipeline\n", - "To use Databricks as a compute target from [Azure Machine Learning Pipeline](https://aka.ms/pl-concept), a [DatabricksStep](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.databricks_step.databricksstep?view=azure-ml-py) is used. This notebook demonstrates the use of DatabricksStep in Azure Machine Learning Pipeline.\n", - "\n", - "The notebook will show:\n", - "1. Running an arbitrary Databricks notebook that the customer has in Databricks workspace\n", - "2. Running an arbitrary Python script that the customer has in DBFS\n", - "3. Running an arbitrary Python script that is available on local computer (will upload to DBFS, and then run in Databricks) \n", - "4. Running a JAR job that the customer has in DBFS.\n", - "\n", - "## Before you begin:\n", - "\n", - "1. **Create an Azure Databricks workspace** in the same subscription where you have your Azure Machine Learning workspace. You will need details of this workspace later on to define DatabricksStep. [Click here](https://ms.portal.azure.com/#blade/HubsExtension/Resources/resourceType/Microsoft.Databricks%2Fworkspaces) for more information.\n", - "2. **Create PAT (access token)**: Manually create a Databricks access token at the Azure Databricks portal. See [this](https://docs.databricks.com/api/latest/authentication.html#generate-a-token) for more information.\n", - "3. **Add demo notebook to ADB**: This notebook has a sample you can use as is. Launch Azure Databricks attached to your Azure Machine Learning workspace and add a new notebook. \n", - "4. **Create/attach a Blob storage** for use from ADB" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Add demo notebook to ADB Workspace\n", - "Copy and paste the below code to create a new notebook in your ADB workspace." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "```python\n", - "# direct access\n", - "dbutils.widgets.get(\"myparam\")\n", - "p = getArgument(\"myparam\")\n", - "print (\"Param -\\'myparam':\")\n", - "print (p)\n", - "\n", - "dbutils.widgets.get(\"input\")\n", - "i = getArgument(\"input\")\n", - "print (\"Param -\\'input':\")\n", - "print (i)\n", - "\n", - "dbutils.widgets.get(\"output\")\n", - "o = getArgument(\"output\")\n", - "print (\"Param -\\'output':\")\n", - "print (o)\n", - "\n", - "n = i + \"/testdata.txt\"\n", - "df = spark.read.csv(n)\n", - "\n", - "display (df)\n", - "\n", - "data = [('value1', 'value2')]\n", - "df2 = spark.createDataFrame(data)\n", - "\n", - "z = o + \"/output.txt\"\n", - "df2.write.csv(z)\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Azure Machine Learning and Pipeline SDK-specific imports" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "import azureml.core\n", - "from azureml.core.runconfig import JarLibrary\n", - "from azureml.core.compute import ComputeTarget, DatabricksCompute\n", - "from azureml.exceptions import ComputeTargetException\n", - "from azureml.core import Workspace, Experiment\n", - "from azureml.pipeline.core import Pipeline, PipelineData\n", - "from azureml.pipeline.steps import DatabricksStep\n", - "from azureml.core.datastore import Datastore\n", - "from azureml.data.data_reference import DataReference\n", - "\n", - "# Check core SDK version number\n", - "print(\"SDK version:\", azureml.core.VERSION)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Initialize Workspace\n", - "\n", - "Initialize a workspace object from persisted configuration. Make sure the config file is present at .\\config.json" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "ws = Workspace.from_config()\n", - "print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\\n')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Attach Databricks compute target\n", - "Next, you need to add your Databricks workspace to Azure Machine Learning as a compute target and give it a name. You will use this name to refer to your Databricks workspace compute target inside Azure Machine Learning.\n", - "\n", - "- **Resource Group** - The resource group name of your Azure Machine Learning workspace\n", - "- **Databricks Workspace Name** - The workspace name of your Azure Databricks workspace\n", - "- **Databricks Access Token** - The access token you created in ADB\n", - "\n", - "**The Databricks workspace need to be present in the same subscription as your AML workspace**" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Replace with your account info before running.\n", - " \n", - "db_compute_name=os.getenv(\"DATABRICKS_COMPUTE_NAME\", \"\") # Databricks compute name\n", - "db_resource_group=os.getenv(\"DATABRICKS_RESOURCE_GROUP\", \"\") # Databricks resource group\n", - "db_workspace_name=os.getenv(\"DATABRICKS_WORKSPACE_NAME\", \"\") # Databricks workspace name\n", - "db_access_token=os.getenv(\"DATABRICKS_ACCESS_TOKEN\", \"\") # Databricks access token\n", - " \n", - "try:\n", - " databricks_compute = DatabricksCompute(workspace=ws, name=db_compute_name)\n", - " print('Compute target {} already exists'.format(db_compute_name))\n", - "except ComputeTargetException:\n", - " print('Compute not found, will use below parameters to attach new one')\n", - " print('db_compute_name {}'.format(db_compute_name))\n", - " print('db_resource_group {}'.format(db_resource_group))\n", - " print('db_workspace_name {}'.format(db_workspace_name))\n", - " print('db_access_token {}'.format(db_access_token))\n", - " \n", - " config = DatabricksCompute.attach_configuration(\n", - " resource_group = db_resource_group,\n", - " workspace_name = db_workspace_name,\n", - " access_token= db_access_token)\n", - " databricks_compute=ComputeTarget.attach(ws, db_compute_name, config)\n", - " databricks_compute.wait_for_completion(True)\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Data Connections with Inputs and Outputs\n", - "The DatabricksStep supports Azure Bloband ADLS for inputs and outputs. You also will need to define a [Secrets](https://docs.azuredatabricks.net/user-guide/secrets/index.html) scope to enable authentication to external data sources such as Blob and ADLS from Databricks.\n", - "\n", - "- Databricks documentation on [Azure Blob](https://docs.azuredatabricks.net/spark/latest/data-sources/azure/azure-storage.html)\n", - "- Databricks documentation on [ADLS](https://docs.databricks.com/spark/latest/data-sources/azure/azure-datalake.html)\n", - "\n", - "### Type of Data Access\n", - "Databricks allows to interact with Azure Blob and ADLS in two ways.\n", - "- **Direct Access**: Databricks allows you to interact with Azure Blob or ADLS URIs directly. The input or output URIs will be mapped to a Databricks widget param in the Databricks notebook.\n", - "- **Mounting**: You will be supplied with additional parameters and secrets that will enable you to mount your ADLS or Azure Blob input or output location in your Databricks notebook." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Direct Access: Python sample code\n", - "If you have a data reference named \"input\" it will represent the URI of the input and you can access it directly in the Databricks python notebook like so:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "```python\n", - "dbutils.widgets.get(\"input\")\n", - "y = getArgument(\"input\")\n", - "df = spark.read.csv(y)\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Mounting: Python sample code for Azure Blob\n", - "Given an Azure Blob data reference named \"input\" the following widget params will be made available in the Databricks notebook:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "```python\n", - "# This contains the input URI\n", - "dbutils.widgets.get(\"input\")\n", - "myinput_uri = getArgument(\"input\")\n", - "\n", - "# How to get the input datastore name inside ADB notebook\n", - "# This contains the name of a Databricks secret (in the predefined \"amlscope\" secret scope) \n", - "# that contians an access key or sas for the Azure Blob input (this name is obtained by appending \n", - "# the name of the input with \"_blob_secretname\". \n", - "dbutils.widgets.get(\"input_blob_secretname\") \n", - "myinput_blob_secretname = getArgument(\"input_blob_secretname\")\n", - "\n", - "# This contains the required configuration for mounting\n", - "dbutils.widgets.get(\"input_blob_config\")\n", - "myinput_blob_config = getArgument(\"input_blob_config\")\n", - "\n", - "# Usage\n", - "dbutils.fs.mount(\n", - " source = myinput_uri,\n", - " mount_point = \"/mnt/input\",\n", - " extra_configs = {myinput_blob_config:dbutils.secrets.get(scope = \"amlscope\", key = myinput_blob_secretname)})\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Mounting: Python sample code for ADLS\n", - "Given an ADLS data reference named \"input\" the following widget params will be made available in the Databricks notebook:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "```python\n", - "# This contains the input URI\n", - "dbutils.widgets.get(\"input\") \n", - "myinput_uri = getArgument(\"input\")\n", - "\n", - "# This contains the client id for the service principal \n", - "# that has access to the adls input\n", - "dbutils.widgets.get(\"input_adls_clientid\") \n", - "myinput_adls_clientid = getArgument(\"input_adls_clientid\")\n", - "\n", - "# This contains the name of a Databricks secret (in the predefined \"amlscope\" secret scope) \n", - "# that contains the secret for the above mentioned service principal\n", - "dbutils.widgets.get(\"input_adls_secretname\") \n", - "myinput_adls_secretname = getArgument(\"input_adls_secretname\")\n", - "\n", - "# This contains the refresh url for the mounting configs\n", - "dbutils.widgets.get(\"input_adls_refresh_url\") \n", - "myinput_adls_refresh_url = getArgument(\"input_adls_refresh_url\")\n", - "\n", - "# Usage \n", - "configs = {\"dfs.adls.oauth2.access.token.provider.type\": \"ClientCredential\",\n", - " \"dfs.adls.oauth2.client.id\": myinput_adls_clientid,\n", - " \"dfs.adls.oauth2.credential\": dbutils.secrets.get(scope = \"amlscope\", key =myinput_adls_secretname),\n", - " \"dfs.adls.oauth2.refresh.url\": myinput_adls_refresh_url}\n", - "\n", - "dbutils.fs.mount(\n", - " source = myinput_uri,\n", - " mount_point = \"/mnt/output\",\n", - " extra_configs = configs)\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Use Databricks from Azure Machine Learning Pipeline\n", - "To use Databricks as a compute target from Azure Machine Learning Pipeline, a DatabricksStep is used. Let's define a datasource (via DataReference) and intermediate data (via PipelineData) to be used in DatabricksStep." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Use the default blob storage\n", - "def_blob_store = Datastore(ws, \"workspaceblobstore\")\n", - "print('Datastore {} will be used'.format(def_blob_store.name))\n", - "\n", - "# We are uploading a sample file in the local directory to be used as a datasource\n", - "def_blob_store.upload_files(files=[\"./testdata.txt\"], target_path=\"dbtest\", overwrite=False)\n", - "\n", - "step_1_input = DataReference(datastore=def_blob_store, path_on_datastore=\"dbtest\",\n", - " data_reference_name=\"input\")\n", - "\n", - "step_1_output = PipelineData(\"output\", datastore=def_blob_store)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Add a DatabricksStep\n", - "Adds a Databricks notebook as a step in a Pipeline.\n", - "- ***name:** Name of the Module\n", - "- **inputs:** List of input connections for data consumed by this step. Fetch this inside the notebook using dbutils.widgets.get(\"input\")\n", - "- **outputs:** List of output port definitions for outputs produced by this step. Fetch this inside the notebook using dbutils.widgets.get(\"output\")\n", - "- **existing_cluster_id:** Cluster ID of an existing Interactive cluster on the Databricks workspace. If you are providing this, do not provide any of the parameters below that are used to create a new cluster such as spark_version, node_type, etc.\n", - "- **spark_version:** Version of spark for the databricks run cluster. default value: 4.0.x-scala2.11\n", - "- **node_type:** Azure vm node types for the databricks run cluster. default value: Standard_D3_v2\n", - "- **num_workers:** Specifies a static number of workers for the databricks run cluster\n", - "- **min_workers:** Specifies a min number of workers to use for auto-scaling the databricks run cluster\n", - "- **max_workers:** Specifies a max number of workers to use for auto-scaling the databricks run cluster\n", - "- **spark_env_variables:** Spark environment variables for the databricks run cluster (dictionary of {str:str}). default value: {'PYSPARK_PYTHON': '/databricks/python3/bin/python3'}\n", - "- **notebook_path:** Path to the notebook in the databricks instance. If you are providing this, do not provide python script related paramaters or JAR related parameters.\n", - "- **notebook_params:** Parameters for the databricks notebook (dictionary of {str:str}). Fetch this inside the notebook using dbutils.widgets.get(\"myparam\")\n", - "- **python_script_path:** The path to the python script in the DBFS or S3. If you are providing this, do not provide python_script_name which is used for uploading script from local machine.\n", - "- **python_script_params:** Parameters for the python script (list of str)\n", - "- **main_class_name:** The name of the entry point in a JAR module. If you are providing this, do not provide any python script or notebook related parameters.\n", - "- **jar_params:** Parameters for the JAR module (list of str)\n", - "- **python_script_name:** name of a python script on your local machine (relative to source_directory). If you are providing this do not provide python_script_path which is used to execute a remote python script; or any of the JAR or notebook related parameters.\n", - "- **source_directory:** folder that contains the script and other files\n", - "- **hash_paths:** list of paths to hash to detect a change in source_directory (script file is always hashed)\n", - "- **run_name:** Name in databricks for this run\n", - "- **timeout_seconds:** Timeout for the databricks run\n", - "- **runconfig:** Runconfig to use. Either pass runconfig or each library type as a separate parameter but do not mix the two\n", - "- **maven_libraries:** maven libraries for the databricks run\n", - "- **pypi_libraries:** pypi libraries for the databricks run\n", - "- **egg_libraries:** egg libraries for the databricks run\n", - "- **jar_libraries:** jar libraries for the databricks run\n", - "- **rcran_libraries:** rcran libraries for the databricks run\n", - "- **compute_target:** Azure Databricks compute\n", - "- **allow_reuse:** Whether the step should reuse previous results when run with the same settings/inputs\n", - "- **version:** Optional version tag to denote a change in functionality for the step\n", - "\n", - "\\* *denotes required fields* \n", - "*You must provide exactly one of num_workers or min_workers and max_workers paramaters* \n", - "*You must provide exactly one of databricks_compute or databricks_compute_name parameters*\n", - "\n", - "## Use runconfig to specify library dependencies\n", - "You can use a runconfig to specify the library dependencies for your cluster in Databricks. The runconfig will contain a databricks section as follows:\n", - "\n", - "```yaml\n", - "environment:\n", - "# Databricks details\n", - " databricks:\n", - "# List of maven libraries.\n", - " mavenLibraries:\n", - " - coordinates: org.jsoup:jsoup:1.7.1\n", - " repo: ''\n", - " exclusions:\n", - " - slf4j:slf4j\n", - " - '*:hadoop-client'\n", - "# List of PyPi libraries\n", - " pypiLibraries:\n", - " - package: beautifulsoup4\n", - " repo: ''\n", - "# List of RCran libraries\n", - " rcranLibraries:\n", - " -\n", - "# Coordinates.\n", - " package: ada\n", - "# Repo\n", - " repo: http://cran.us.r-project.org\n", - "# List of JAR libraries\n", - " jarLibraries:\n", - " -\n", - "# Coordinates.\n", - " library: dbfs:/mnt/libraries/library.jar\n", - "# List of Egg libraries\n", - " eggLibraries:\n", - " -\n", - "# Coordinates.\n", - " library: dbfs:/mnt/libraries/library.egg\n", - "```\n", - "\n", - "You can then create a RunConfiguration object using this file and pass it as the runconfig parameter to DatabricksStep.\n", - "```python\n", - "from azureml.core.runconfig import RunConfiguration\n", - "\n", - "runconfig = RunConfiguration()\n", - "runconfig.load(path='', name='')\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 1. Running the demo notebook already added to the Databricks workspace\n", - "Create a notebook in the Azure Databricks workspace, and provide the path to that notebook as the value associated with the environment variable \"DATABRICKS_NOTEBOOK_PATH\". This will then set the variable\u00c2\u00a0notebook_path\u00c2\u00a0when you run the code cell below:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "databricksstep-remarks-sample" - ] - }, - "outputs": [], - "source": [ - "notebook_path=os.getenv(\"DATABRICKS_NOTEBOOK_PATH\", \"\") # Databricks notebook path\n", - "\n", - "dbNbStep = DatabricksStep(\n", - " name=\"DBNotebookInWS\",\n", - " inputs=[step_1_input],\n", - " outputs=[step_1_output],\n", - " num_workers=1,\n", - " notebook_path=notebook_path,\n", - " notebook_params={'myparam': 'testparam'},\n", - " run_name='DB_Notebook_demo',\n", - " compute_target=databricks_compute,\n", - " allow_reuse=True\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Build and submit the Experiment" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "steps = [dbNbStep]\n", - "pipeline = Pipeline(workspace=ws, steps=steps)\n", - "pipeline_run = Experiment(ws, 'DB_Notebook_demo').submit(pipeline)\n", - "pipeline_run.wait_for_completion()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### View Run Details" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from azureml.widgets import RunDetails\n", - "RunDetails(pipeline_run).show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 2. Running a Python script from DBFS\n", - "This shows how to run a Python script in DBFS. \n", - "\n", - "To complete this, you will need to first upload the Python script in your local machine to DBFS using the [CLI](https://docs.azuredatabricks.net/user-guide/dbfs-databricks-file-system.html). The CLI command is given below:\n", - "\n", - "```\n", - "dbfs cp ./train-db-dbfs.py dbfs:/train-db-dbfs.py\n", - "```\n", - "\n", - "The code in the below cell assumes that you have completed the previous step of uploading the script `train-db-dbfs.py` to the root folder in DBFS." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "python_script_path = os.getenv(\"DATABRICKS_PYTHON_SCRIPT_PATH\", \"\") # Databricks python script path\n", - "\n", - "dbPythonInDbfsStep = DatabricksStep(\n", - " name=\"DBPythonInDBFS\",\n", - " inputs=[step_1_input],\n", - " num_workers=1,\n", - " python_script_path=python_script_path,\n", - " python_script_params={'--input_data'},\n", - " run_name='DB_Python_demo',\n", - " compute_target=databricks_compute,\n", - " allow_reuse=True\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Build and submit the Experiment" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "steps = [dbPythonInDbfsStep]\n", - "pipeline = Pipeline(workspace=ws, steps=steps)\n", - "pipeline_run = Experiment(ws, 'DB_Python_demo').submit(pipeline)\n", - "pipeline_run.wait_for_completion()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### View Run Details" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from azureml.widgets import RunDetails\n", - "RunDetails(pipeline_run).show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 3. Running a Python script in Databricks that currenlty is in local computer\n", - "To run a Python script that is currently in your local computer, follow the instructions below. \n", - "\n", - "The commented out code below code assumes that you have `train-db-local.py` in the `scripts` subdirectory under the current working directory.\n", - "\n", - "In this case, the Python script will be uploaded first to DBFS, and then the script will be run in Databricks." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "python_script_name = \"train-db-local.py\"\n", - "source_directory = \".\"\n", - "\n", - "dbPythonInLocalMachineStep = DatabricksStep(\n", - " name=\"DBPythonInLocalMachine\",\n", - " inputs=[step_1_input],\n", - " num_workers=1,\n", - " python_script_name=python_script_name,\n", - " source_directory=source_directory,\n", - " run_name='DB_Python_Local_demo',\n", - " compute_target=databricks_compute,\n", - " allow_reuse=True\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Build and submit the Experiment" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "steps = [dbPythonInLocalMachineStep]\n", - "pipeline = Pipeline(workspace=ws, steps=steps)\n", - "pipeline_run = Experiment(ws, 'DB_Python_Local_demo').submit(pipeline)\n", - "pipeline_run.wait_for_completion()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### View Run Details" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from azureml.widgets import RunDetails\n", - "RunDetails(pipeline_run).show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 4. Running a JAR job that is alreay added in DBFS\n", - "To run a JAR job that is already uploaded to DBFS, follow the instructions below. You will first upload the JAR file to DBFS using the [CLI](https://docs.azuredatabricks.net/user-guide/dbfs-databricks-file-system.html).\n", - "\n", - "The commented out code in the below cell assumes that you have uploaded `train-db-dbfs.jar` to the root folder in DBFS. You can upload `train-db-dbfs.jar` to the root folder in DBFS using this commandline so you can use `jar_library_dbfs_path = \"dbfs:/train-db-dbfs.jar\"`:\n", - "\n", - "```\n", - "dbfs cp ./train-db-dbfs.jar dbfs:/train-db-dbfs.jar\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "main_jar_class_name = \"com.microsoft.aeva.Main\"\n", - "jar_library_dbfs_path = os.getenv(\"DATABRICKS_JAR_LIB_PATH\", \"\") # Databricks jar library path\n", - "\n", - "dbJarInDbfsStep = DatabricksStep(\n", - " name=\"DBJarInDBFS\",\n", - " inputs=[step_1_input],\n", - " num_workers=1,\n", - " main_class_name=main_jar_class_name,\n", - " jar_params={'arg1', 'arg2'},\n", - " run_name='DB_JAR_demo',\n", - " jar_libraries=[JarLibrary(jar_library_dbfs_path)],\n", - " compute_target=databricks_compute,\n", - " allow_reuse=True\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Build and submit the Experiment" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "steps = [dbJarInDbfsStep]\n", - "pipeline = Pipeline(workspace=ws, steps=steps)\n", - "pipeline_run = Experiment(ws, 'DB_JAR_demo').submit(pipeline)\n", - "pipeline_run.wait_for_completion()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### View Run Details" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from azureml.widgets import RunDetails\n", - "RunDetails(pipeline_run).show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Next: ADLA as a Compute Target\n", - "To use ADLA as a compute target from Azure Machine Learning Pipeline, a AdlaStep is used. This [notebook](https://aka.ms/pl-adla) demonstrates the use of AdlaStep in Azure Machine Learning Pipeline." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/azure-databricks/databricks-as-remote-compute-target/aml-pipelines-use-databricks-as-compute-target.png)" - ] - } - ], - "metadata": { - "authors": [ - { - "name": "diray" - } - ], - "kernelspec": { - "display_name": "Python 3.6", - "language": "python", - "name": "python36" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.2" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} \ No newline at end of file diff --git a/how-to-use-azureml/azure-databricks/databricks-as-remote-compute-target/testdata.txt b/how-to-use-azureml/azure-databricks/databricks-as-remote-compute-target/testdata.txt deleted file mode 100644 index 2069d6e5..00000000 --- a/how-to-use-azureml/azure-databricks/databricks-as-remote-compute-target/testdata.txt +++ /dev/null @@ -1 +0,0 @@ -Test1 diff --git a/how-to-use-azureml/azure-databricks/databricks-as-remote-compute-target/train-db-dbfs.py b/how-to-use-azureml/azure-databricks/databricks-as-remote-compute-target/train-db-dbfs.py deleted file mode 100644 index 99b511af..00000000 --- a/how-to-use-azureml/azure-databricks/databricks-as-remote-compute-target/train-db-dbfs.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. -# Licensed under the MIT license. - -print("In train.py") -print("As a data scientist, this is where I use my training code.") diff --git a/how-to-use-azureml/azure-databricks/databricks-as-remote-compute-target/train-db-local.py b/how-to-use-azureml/azure-databricks/databricks-as-remote-compute-target/train-db-local.py deleted file mode 100644 index 99b511af..00000000 --- a/how-to-use-azureml/azure-databricks/databricks-as-remote-compute-target/train-db-local.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. -# Licensed under the MIT license. - -print("In train.py") -print("As a data scientist, this is where I use my training code.") diff --git a/how-to-use-azureml/explain-model/azure-integration/remote-explanation/explain-model-on-amlcompute.ipynb b/how-to-use-azureml/explain-model/azure-integration/remote-explanation/explain-model-on-amlcompute.ipynb index 54687a68..e907716a 100644 --- a/how-to-use-azureml/explain-model/azure-integration/remote-explanation/explain-model-on-amlcompute.ipynb +++ b/how-to-use-azureml/explain-model/azure-integration/remote-explanation/explain-model-on-amlcompute.ipynb @@ -518,7 +518,7 @@ "metadata": {}, "outputs": [], "source": [ - "from azureml.contrib.interpret.explanation.explanation_client import ExplanationClient\n", + "from azureml.interpret import ExplanationClient\n", "\n", "# Get model explanation data\n", "client = ExplanationClient.from_run(run)\n", diff --git a/how-to-use-azureml/explain-model/azure-integration/remote-explanation/train_explain.py b/how-to-use-azureml/explain-model/azure-integration/remote-explanation/train_explain.py index c3dff4e6..4b2879c0 100644 --- a/how-to-use-azureml/explain-model/azure-integration/remote-explanation/train_explain.py +++ b/how-to-use-azureml/explain-model/azure-integration/remote-explanation/train_explain.py @@ -4,7 +4,7 @@ from sklearn import datasets from sklearn.linear_model import Ridge from interpret.ext.blackbox import TabularExplainer -from azureml.contrib.interpret.explanation.explanation_client import ExplanationClient +from azureml.interpret import ExplanationClient from sklearn.model_selection import train_test_split from azureml.core.run import Run import joblib diff --git a/how-to-use-azureml/explain-model/azure-integration/run-history/save-retrieve-explanations-run-history.ipynb b/how-to-use-azureml/explain-model/azure-integration/run-history/save-retrieve-explanations-run-history.ipynb index 678484bf..11d291ff 100644 --- a/how-to-use-azureml/explain-model/azure-integration/run-history/save-retrieve-explanations-run-history.ipynb +++ b/how-to-use-azureml/explain-model/azure-integration/run-history/save-retrieve-explanations-run-history.ipynb @@ -451,7 +451,7 @@ "source": [ "import azureml.core\n", "from azureml.core import Workspace, Experiment\n", - "from azureml.contrib.interpret.explanation.explanation_client import ExplanationClient\n", + "from azureml.interpret import ExplanationClient\n", "# Check core SDK version number\n", "print(\"SDK version:\", azureml.core.VERSION)" ] diff --git a/how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-on-amlcompute-and-deploy.ipynb b/how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-on-amlcompute-and-deploy.ipynb index cce857ab..5c56654e 100644 --- a/how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-on-amlcompute-and-deploy.ipynb +++ b/how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-on-amlcompute-and-deploy.ipynb @@ -368,7 +368,7 @@ "outputs": [], "source": [ "# Retrieve global explanation for visualization\n", - "from azureml.contrib.interpret.explanation.explanation_client import ExplanationClient\n", + "from azureml.interpret import ExplanationClient\n", "\n", "# get model explanation data\n", "client = ExplanationClient.from_run(run)\n", diff --git a/how-to-use-azureml/explain-model/azure-integration/scoring-time/train_explain.py b/how-to-use-azureml/explain-model/azure-integration/scoring-time/train_explain.py index f3629b98..b0d96bf4 100644 --- a/how-to-use-azureml/explain-model/azure-integration/scoring-time/train_explain.py +++ b/how-to-use-azureml/explain-model/azure-integration/scoring-time/train_explain.py @@ -15,7 +15,7 @@ from sklearn_pandas import DataFrameMapper from azureml.core.run import Run from interpret.ext.blackbox import TabularExplainer -from azureml.contrib.interpret.explanation.explanation_client import ExplanationClient +from azureml.interpret import ExplanationClient from azureml.interpret.scoring.scoring_explainer import LinearScoringExplainer, save OUTPUT_DIR = './outputs/' diff --git a/how-to-use-azureml/machine-learning-pipelines/nyc-taxi-data-regression-model-building/nyc-taxi-data-regression-model-building.ipynb b/how-to-use-azureml/machine-learning-pipelines/nyc-taxi-data-regression-model-building/nyc-taxi-data-regression-model-building.ipynb index 94e43a6a..c1ae0a81 100644 --- a/how-to-use-azureml/machine-learning-pipelines/nyc-taxi-data-regression-model-building/nyc-taxi-data-regression-model-building.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/nyc-taxi-data-regression-model-building/nyc-taxi-data-regression-model-building.ipynb @@ -278,9 +278,6 @@ "# Enable Docker\n", "aml_run_config.environment.docker.enabled = True\n", "\n", - "# Set Docker base image to the default CPU-based image\n", - "aml_run_config.environment.docker.base_image = \"mcr.microsoft.com/azureml/base:0.2.1\"\n", - "\n", "# Use conda_dependencies.yml to create a conda environment in the Docker image for execution\n", "aml_run_config.environment.python.user_managed_dependencies = False\n", "\n", diff --git a/how-to-use-azureml/ml-frameworks/README.md b/how-to-use-azureml/ml-frameworks/README.md new file mode 100644 index 00000000..9498afaa --- /dev/null +++ b/how-to-use-azureml/ml-frameworks/README.md @@ -0,0 +1,10 @@ +## Training and deployment examples with ML frameworks +These sample notebooks show you how to train and deploy models with popular machine learning frameworks using Azure Machine Learning. + +1. [Scikit-learn](scikit-learn): Train, hyperparameter tune and deploy scikit-learn models. +2. [PyTorch](pytorch): Train, hyperparameter tune and deploy PyTorch models. Distributed training with PyTorch. +3. [TensorFlow](tensorflow): Train, hyperparameter tune and deploy TensorFlow models. Distributed training with TensorFlow. +4. [Keras](keras): Train, hyperparameter tune and deploy Keras models. +5. [Chainer](chainer): Train, hyperparameter tune and deploy Chainer models. Distributed training with Chainer. + + ![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/ml-frameworks/README.png) diff --git a/how-to-use-azureml/ml-frameworks/chainer/training/distributed-chainer/distributed-chainer.ipynb b/how-to-use-azureml/ml-frameworks/chainer/distributed-chainer/distributed-chainer.ipynb similarity index 83% rename from how-to-use-azureml/ml-frameworks/chainer/training/distributed-chainer/distributed-chainer.ipynb rename to how-to-use-azureml/ml-frameworks/chainer/distributed-chainer/distributed-chainer.ipynb index 542d5a73..25f8eaa2 100644 --- a/how-to-use-azureml/ml-frameworks/chainer/training/distributed-chainer/distributed-chainer.ipynb +++ b/how-to-use-azureml/ml-frameworks/chainer/distributed-chainer/distributed-chainer.ipynb @@ -13,7 +13,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/ml-frameworks/chainer/training/distributed-chainer/distributed-chainer.png)" + "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/ml-frameworks/chainer/distributed-chainer/distributed-chainer.png)" ] }, { @@ -29,7 +29,7 @@ "metadata": {}, "source": [ "## Prerequisites\n", - "* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [Configuration](../../../configuration.ipynb) notebook to install the Azure Machine Learning Python SDK and create an Azure ML `Workspace`" + "* If you are using an Azure Machine Learning compute instance, you are all set. Otherwise, go through the [Configuration](../../../../configuration.ipynb) notebook to install the Azure Machine Learning Python SDK and create an Azure ML `Workspace`" ] }, { @@ -217,8 +217,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Create a Chainer estimator\n", - "The Azure ML SDK's Chainer estimator enables you to easily submit Chainer training jobs for both single-node and distributed runs." + "### Create an environment\n", + "\n", + "In this tutorial, we will use one of the Azure ML Chainer curated environments for training." ] }, { @@ -227,21 +228,36 @@ "metadata": {}, "outputs": [], "source": [ - "from azureml.train.dnn import Chainer, Mpi\n", + "from azureml.core import Environment\n", "\n", - "estimator = Chainer(source_directory=project_folder,\n", - " compute_target=compute_target,\n", - " entry_script='train_mnist.py',\n", - " node_count=2,\n", - " distributed_training=Mpi(),\n", - " use_gpu=True)" + "chainer_env = Environment.get(ws, name='AzureML-Chainer-5.1.0-GPU')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The above code specifies that we will run our training script on `2` nodes, with one worker per node. In order to execute a distributed run using MPI, you must provide the argument `distributed_backend=Mpi()`. To specify `i` workers per node, you must provide the argument `distributed_backend=Mpi(process_count_per_node=i)`.Using this estimator with these settings, Chainer and its dependencies will be installed for you. However, if your script also uses other packages, make sure to install them via the `Chainer` constructor's `pip_packages` or `conda_packages` parameters." + "### Configure your training job\n", + "\n", + "Create a ScriptRunConfig object to specify the configuration details of your training job, including your training script, environment to use, and the compute target to run on.\n", + "\n", + "In order to execute a distributed run using MPI, you must create an `MpiConfiguration` object and specify it to the `distributed_job_config` parameter. The below code will configure a 2-node distributed job. If you would also like to run multiple processes per node (i.e. if your cluster SKU has multiple GPUs), additionally specify the `process_count_per_node` parameter in MpiConfiguration." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from azureml.core import ScriptRunConfig\n", + "from azureml.core.runconfig import MpiConfiguration\n", + "\n", + "src = ScriptRunConfig(source_directory=project_folder,\n", + " script='train_mnist.py',\n", + " compute_target=compute_target,\n", + " environment=chainer_env,\n", + " distributed_job_config=MpiConfiguration(node_count=2))" ] }, { @@ -249,7 +265,7 @@ "metadata": {}, "source": [ "### Submit job\n", - "Run your experiment by submitting your estimator object. Note that this call is asynchronous." + "Run your experiment by submitting your ScriptRunConfig object. Note that this call is asynchronous." ] }, { @@ -258,7 +274,7 @@ "metadata": {}, "outputs": [], "source": [ - "run = experiment.submit(estimator)\n", + "run = experiment.submit(src)\n", "print(run)" ] }, @@ -297,6 +313,22 @@ "name": "ninhu" } ], + "category": "training", + "compute": [ + "AML Compute" + ], + "datasets": [ + "MNIST" + ], + "deployment": [ + "None" + ], + "exclude_from_index": false, + "framework": [ + "Chainer" + ], + "friendly_name": "Distributed Training with Chainer", + "index_order": 1, "kernelspec": { "display_name": "Python 3.6", "language": "python", @@ -312,28 +344,12 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.6" + "version": "3.7.7" }, - "friendly_name": "Distributed Training with Chainer", - "exclude_from_index": false, - "index_order": 1, - "category": "training", - "task": "Use the Chainer estimator to perform distributed training", - "datasets": [ - "MNIST" - ], - "compute": [ - "AML Compute" - ], - "deployment": [ - "None" - ], - "framework": [ - "Chainer" - ], "tags": [ "None" - ] + ], + "task": "Use the Chainer estimator to perform distributed training" }, "nbformat": 4, "nbformat_minor": 2 diff --git a/how-to-use-azureml/ml-frameworks/chainer/training/distributed-chainer/distributed-chainer.yml b/how-to-use-azureml/ml-frameworks/chainer/distributed-chainer/distributed-chainer.yml similarity index 100% rename from how-to-use-azureml/ml-frameworks/chainer/training/distributed-chainer/distributed-chainer.yml rename to how-to-use-azureml/ml-frameworks/chainer/distributed-chainer/distributed-chainer.yml diff --git a/how-to-use-azureml/ml-frameworks/chainer/training/distributed-chainer/train_mnist.py b/how-to-use-azureml/ml-frameworks/chainer/distributed-chainer/train_mnist.py similarity index 100% rename from how-to-use-azureml/ml-frameworks/chainer/training/distributed-chainer/train_mnist.py rename to how-to-use-azureml/ml-frameworks/chainer/distributed-chainer/train_mnist.py diff --git a/how-to-use-azureml/ml-frameworks/chainer/deployment/train-hyperparameter-tune-deploy-with-chainer/chainer_mnist.py b/how-to-use-azureml/ml-frameworks/chainer/train-hyperparameter-tune-deploy-with-chainer/chainer_mnist.py similarity index 100% rename from how-to-use-azureml/ml-frameworks/chainer/deployment/train-hyperparameter-tune-deploy-with-chainer/chainer_mnist.py rename to how-to-use-azureml/ml-frameworks/chainer/train-hyperparameter-tune-deploy-with-chainer/chainer_mnist.py diff --git a/how-to-use-azureml/ml-frameworks/chainer/deployment/train-hyperparameter-tune-deploy-with-chainer/chainer_score.py b/how-to-use-azureml/ml-frameworks/chainer/train-hyperparameter-tune-deploy-with-chainer/chainer_score.py similarity index 100% rename from how-to-use-azureml/ml-frameworks/chainer/deployment/train-hyperparameter-tune-deploy-with-chainer/chainer_score.py rename to how-to-use-azureml/ml-frameworks/chainer/train-hyperparameter-tune-deploy-with-chainer/chainer_score.py diff --git a/how-to-use-azureml/ml-frameworks/chainer/deployment/train-hyperparameter-tune-deploy-with-chainer/train-hyperparameter-tune-deploy-with-chainer.ipynb b/how-to-use-azureml/ml-frameworks/chainer/train-hyperparameter-tune-deploy-with-chainer/train-hyperparameter-tune-deploy-with-chainer.ipynb similarity index 92% rename from how-to-use-azureml/ml-frameworks/chainer/deployment/train-hyperparameter-tune-deploy-with-chainer/train-hyperparameter-tune-deploy-with-chainer.ipynb rename to how-to-use-azureml/ml-frameworks/chainer/train-hyperparameter-tune-deploy-with-chainer/train-hyperparameter-tune-deploy-with-chainer.ipynb index 94ce595e..cdc2db91 100644 --- a/how-to-use-azureml/ml-frameworks/chainer/deployment/train-hyperparameter-tune-deploy-with-chainer/train-hyperparameter-tune-deploy-with-chainer.ipynb +++ b/how-to-use-azureml/ml-frameworks/chainer/train-hyperparameter-tune-deploy-with-chainer/train-hyperparameter-tune-deploy-with-chainer.ipynb @@ -13,7 +13,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/ml-frameworks/chainer/deployment/train-hyperparameter-tune-deploy-with-chainer/train-hyperparameter-tune-deploy-with-chainer.png)" + "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/ml-frameworks/chainer/train-hyperparameter-tune-deploy-with-chainer/train-hyperparameter-tune-deploy-with-chainer.png)" ] }, { @@ -30,7 +30,7 @@ "metadata": {}, "source": [ "## Prerequisites\n", - "* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [Configuration](../../../configuration.ipynb) notebook to install the Azure Machine Learning Python SDK and create an Azure ML `Workspace`" + "* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [Configuration](../../../../configuration.ipynb) notebook to install the Azure Machine Learning Python SDK and create an Azure ML `Workspace`" ] }, { @@ -130,8 +130,7 @@ " print('Found existing compute target.')\n", "except ComputeTargetException:\n", " print('Creating a new compute target...')\n", - " compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6', \n", - " min_nodes=2,\n", + " compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6',\n", " max_nodes=4)\n", "\n", " # create the cluster\n", @@ -245,41 +244,68 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Create a Chainer estimator\n", - "The Azure ML SDK's Chainer estimator enables you to easily submit Chainer training jobs for both single-node and distributed runs. The following code will define a single-node Chainer job." + "### Create an environment\n", + "\n", + "Define a conda environment YAML file with your training script dependencies and create an Azure ML environment." ] }, { "cell_type": "code", "execution_count": null, - "metadata": { - "tags": [ - "dnn-chainer-remarks-sample" - ] - }, + "metadata": {}, "outputs": [], "source": [ - "from azureml.train.dnn import Chainer\n", + "%%writefile conda_dependencies.yml\n", "\n", - "script_params = {\n", - " '--epochs': 10,\n", - " '--batchsize': 128,\n", - " '--output_dir': './outputs'\n", - "}\n", + "channels:\n", + "- conda-forge\n", + "dependencies:\n", + "- python=3.6.2\n", + "- pip:\n", + " - azureml-defaults\n", + " - chainer==5.1.0\n", + " - cupy-cuda90==5.1.0\n", + " - mpi4py==3.0.0\n", + " - pytest" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from azureml.core import Environment\n", "\n", - "estimator = Chainer(source_directory=project_folder, \n", - " script_params=script_params,\n", - " compute_target=compute_target,\n", - " pip_packages=['numpy', 'pytest'],\n", - " entry_script='chainer_mnist.py',\n", - " use_gpu=True)" + "chainer_env = Environment.from_conda_specification(name = 'chainer-5.1.0-gpu', file_path = './conda_dependencies.yml')\n", + "\n", + "# Specify a GPU base image\n", + "chainer_env.docker.enabled = True\n", + "chainer_env.docker.base_image = 'mcr.microsoft.com/azureml/intelmpi2018.3-cuda9.0-cudnn7-ubuntu16.04'" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The `script_params` parameter is a dictionary containing the command-line arguments to your training script `entry_script`. To leverage the Azure VM's GPU for training, we set `use_gpu=True`." + "### Configure your training job\n", + "\n", + "Create a ScriptRunConfig object to specify the configuration details of your training job, including your training script, environment to use, and the compute target to run on." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from azureml.core import ScriptRunConfig\n", + "\n", + "src = ScriptRunConfig(source_directory=project_folder,\n", + " script='chainer_mnist.py',\n", + " arguments=['--epochs', 10, '--batchsize', 128, '--output_dir', './outputs'],\n", + " compute_target=compute_target,\n", + " environment=chainer_env)" ] }, { @@ -287,7 +313,7 @@ "metadata": {}, "source": [ "### Submit job\n", - "Run your experiment by submitting your estimator object. Note that this call is asynchronous." + "Run your experiment by submitting your ScriptRunConfig object. Note that this call is asynchronous." ] }, { @@ -296,7 +322,7 @@ "metadata": {}, "outputs": [], "source": [ - "run = experiment.submit(estimator)" + "run = experiment.submit(src)" ] }, { @@ -366,13 +392,13 @@ " }\n", ")\n", "\n", - "hyperdrive_config = HyperDriveConfig(estimator=estimator,\n", + "hyperdrive_config = HyperDriveConfig(run_config=src,\n", " hyperparameter_sampling=param_sampling, \n", " primary_metric_name='Accuracy',\n", " policy=BanditPolicy(evaluation_interval=1, slack_factor=0.1, delay_evaluation=3),\n", " primary_metric_goal=PrimaryMetricGoal.MAXIMIZE,\n", " max_total_runs=8,\n", - " max_concurrent_runs=4)\n" + " max_concurrent_runs=4)" ] }, { @@ -750,7 +776,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.6" + "version": "3.7.7" }, "tags": [ "None" diff --git a/how-to-use-azureml/ml-frameworks/chainer/deployment/train-hyperparameter-tune-deploy-with-chainer/train-hyperparameter-tune-deploy-with-chainer.yml b/how-to-use-azureml/ml-frameworks/chainer/train-hyperparameter-tune-deploy-with-chainer/train-hyperparameter-tune-deploy-with-chainer.yml similarity index 100% rename from how-to-use-azureml/ml-frameworks/chainer/deployment/train-hyperparameter-tune-deploy-with-chainer/train-hyperparameter-tune-deploy-with-chainer.yml rename to how-to-use-azureml/ml-frameworks/chainer/train-hyperparameter-tune-deploy-with-chainer/train-hyperparameter-tune-deploy-with-chainer.yml diff --git a/how-to-use-azureml/training-with-deep-learning/train-hyperparameter-tune-deploy-with-keras/keras_mnist.py b/how-to-use-azureml/ml-frameworks/keras/train-hyperparameter-tune-deploy-with-keras/keras_mnist.py similarity index 100% rename from how-to-use-azureml/training-with-deep-learning/train-hyperparameter-tune-deploy-with-keras/keras_mnist.py rename to how-to-use-azureml/ml-frameworks/keras/train-hyperparameter-tune-deploy-with-keras/keras_mnist.py diff --git a/how-to-use-azureml/ml-frameworks/tensorflow/deployment/train-hyperparameter-tune-deploy-with-tensorflow/nn.png b/how-to-use-azureml/ml-frameworks/keras/train-hyperparameter-tune-deploy-with-keras/nn.png similarity index 100% rename from how-to-use-azureml/ml-frameworks/tensorflow/deployment/train-hyperparameter-tune-deploy-with-tensorflow/nn.png rename to how-to-use-azureml/ml-frameworks/keras/train-hyperparameter-tune-deploy-with-keras/nn.png diff --git a/how-to-use-azureml/training-with-deep-learning/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.ipynb b/how-to-use-azureml/ml-frameworks/keras/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.ipynb similarity index 90% rename from how-to-use-azureml/training-with-deep-learning/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.ipynb rename to how-to-use-azureml/ml-frameworks/keras/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.ipynb index 85d5cd54..d5c5abc6 100644 --- a/how-to-use-azureml/training-with-deep-learning/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.ipynb +++ b/how-to-use-azureml/ml-frameworks/keras/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.ipynb @@ -13,7 +13,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/training-with-deep-learning/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.png)" + "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/ml-frameworks/keras/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.png)" ] }, { @@ -33,7 +33,7 @@ "\n", "## Prerequisite:\n", "* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning\n", - "* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration notebook](../../../configuration.ipynb) to:\n", + "* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration notebook](../../../../configuration.ipynb) to:\n", " * install the AML SDK\n", " * create a workspace and its configuration file (`config.json`)\n", "* For local scoring test, you will also need to have `tensorflow` and `keras` installed in the current Jupyter kernel." @@ -411,9 +411,54 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Create TensorFlow estimator & add Keras\n", - "Next, we construct an `azureml.train.dnn.TensorFlow` estimator object, use the `gpu-cluster` as compute target, and pass the mount-point of the datastore to the training code as a parameter.\n", - "The TensorFlow estimator is providing a simple way of launching a TensorFlow training job on a compute target. It will automatically provide a docker image that has TensorFlow installed. In this case, we add `keras` package (for the Keras framework obviously), and `matplotlib` package for plotting a \"Loss vs. Accuracy\" chart and record it in run history." + "## Create an environment\n", + "\n", + "Define a conda environment YAML file with your training script dependencies, which include TensorFlow, Keras and matplotlib, and create an Azure ML environment." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%%writefile conda_dependencies.yml\n", + "\n", + "channels:\n", + "- conda-forge\n", + "dependencies:\n", + "- python=3.6.2\n", + "- pip:\n", + " - azureml-defaults==1.13.0\n", + " - tensorflow-gpu==2.0.0\n", + " - keras<=2.3.1\n", + " - matplotlib" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from azureml.core import Environment\n", + "\n", + "keras_env = Environment.from_conda_specification(name = 'keras-2.3.1', file_path = './conda_dependencies.yml')\n", + "\n", + "# Specify a GPU base image\n", + "keras_env.docker.enabled = True\n", + "keras_env.docker.base_image = 'mcr.microsoft.com/azureml/openmpi3.1.2-cuda10.0-cudnn7-ubuntu18.04'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Configure the training job\n", + "\n", + "Create a ScriptRunConfig object to specify the configuration details of your training job, including your training script, environment to use, and the compute target to run on.\n", + "\n", + "Note that we are specifying a DatasetConsumptionConfig for our FileDataset as an argument to the training script. Azure ML will resolve this DatasetConsumptionConfig to the mount-point of the backing datastore, which we access from the training script." ] }, { @@ -434,22 +479,19 @@ "metadata": {}, "outputs": [], "source": [ - "from azureml.train.dnn import TensorFlow\n", + "from azureml.core import ScriptRunConfig\n", "\n", - "script_params = {\n", - " '--data-folder': dataset.as_named_input('mnist').as_mount(),\n", - " '--batch-size': 50,\n", - " '--first-layer-neurons': 300,\n", - " '--second-layer-neurons': 100,\n", - " '--learning-rate': 0.001\n", - "}\n", + "args = ['--data-folder', dataset.as_named_input('mnist').as_mount(),\n", + " '--batch-size', 50,\n", + " '--first-layer-neurons', 300,\n", + " '--second-layer-neurons', 100,\n", + " '--learning-rate', 0.001]\n", "\n", - "est = TensorFlow(source_directory=script_folder,\n", - " script_params=script_params,\n", - " compute_target=compute_target, \n", - " entry_script='keras_mnist.py',\n", - " framework_version='2.0', \n", - " pip_packages=['keras<=2.3.1','azureml-dataset-runtime[pandas,fuse]','matplotlib'])" + "src = ScriptRunConfig(source_directory=script_folder,\n", + " script='keras_mnist.py',\n", + " arguments=args,\n", + " compute_target=compute_target,\n", + " environment=keras_env)" ] }, { @@ -457,7 +499,7 @@ "metadata": {}, "source": [ "## Submit job to run\n", - "Submit the estimator to the Azure ML experiment to kick off the execution." + "Submit the ScriptRunConfig to the Azure ML experiment to kick off the execution." ] }, { @@ -466,7 +508,7 @@ "metadata": {}, "outputs": [], "source": [ - "run = exp.submit(est)" + "run = exp.submit(src)" ] }, { @@ -475,7 +517,7 @@ "source": [ "### Monitor the Run\n", "As the Run is executed, it will go through the following stages:\n", - "1. Preparing: A docker image is created matching the Python environment specified by the TensorFlow estimator and it will be uploaded to the workspace's Azure Container Registry. This step will only happen once for each Python environment -- the container will then be cached for subsequent runs. Creating and uploading the image takes about **5 minutes**. While the job is preparing, logs are streamed to the run history and can be viewed to monitor the progress of the image creation.\n", + "1. Preparing: A docker image is created matching the Python environment specified by the Azure ML environment, and it will be uploaded to the workspace's Azure Container Registry. This step will only happen once for each Python environment -- the container will then be cached for subsequent runs. Creating and uploading the image takes about **5 minutes**. While the job is preparing, logs are streamed to the run history and can be viewed to monitor the progress of the image creation.\n", "\n", "2. Scaling: If the compute needs to be scaled up (i.e. the AmlCompute cluster requires more nodes to execute the run than currently available), the cluster will attempt to scale up in order to make the required amount of nodes available. Scaling typically takes about **5 minutes**.\n", "\n", @@ -708,7 +750,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Next, we will create a new estimator without the above parameters since they will be passed in later by Hyperdrive configuration. Note we still need to keep the `data-folder` parameter since that's not a hyperparamter we will sweep." + "Next, we will create a new ScriptRunConfig without the above arguments since they will be passed in later by our Hyperdrive configuration. Note we still need to keep the `data-folder` parameter since that's not a hyperparameter we will sweep." ] }, { @@ -717,12 +759,13 @@ "metadata": {}, "outputs": [], "source": [ - "est = TensorFlow(source_directory=script_folder,\n", - " script_params={'--data-folder': dataset.as_named_input('mnist').as_mount()},\n", - " compute_target=compute_target,\n", - " entry_script='keras_mnist.py',\n", - " framework_version='2.0',\n", - " pip_packages=['keras<=2.3.1','azureml-dataset-runtime[pandas,fuse]','matplotlib'])" + "args = ['--data-folder', dataset.as_named_input('mnist').as_mount()]\n", + "\n", + "src = ScriptRunConfig(source_directory=script_folder,\n", + " script='keras_mnist.py',\n", + " arguments=args,\n", + " compute_target=compute_target,\n", + " environment=keras_env)" ] }, { @@ -754,13 +797,13 @@ "metadata": {}, "outputs": [], "source": [ - "hdc = HyperDriveConfig(estimator=est, \n", - " hyperparameter_sampling=ps, \n", - " policy=policy, \n", - " primary_metric_name='Accuracy', \n", - " primary_metric_goal=PrimaryMetricGoal.MAXIMIZE, \n", - " max_total_runs=20,\n", - " max_concurrent_runs=4)" + "hyperdrive_config = HyperDriveConfig(run_config=src,\n", + " hyperparameter_sampling=ps,\n", + " policy=policy,\n", + " primary_metric_name='Accuracy',\n", + " primary_metric_goal=PrimaryMetricGoal.MAXIMIZE,\n", + " max_total_runs=20,\n", + " max_concurrent_runs=4)" ] }, { @@ -776,7 +819,7 @@ "metadata": {}, "outputs": [], "source": [ - "hdr = exp.submit(config=hdc)" + "hyperdrive_run = exp.submit(config=hyperdrive_config)" ] }, { @@ -792,7 +835,7 @@ "metadata": {}, "outputs": [], "source": [ - "RunDetails(hdr).show()" + "RunDetails(hyperdrive_run).show()" ] }, { @@ -801,7 +844,7 @@ "metadata": {}, "outputs": [], "source": [ - "hdr.wait_for_completion(show_output=True)" + "hyperdrive_run.wait_for_completion(show_output=True)" ] }, { @@ -810,7 +853,7 @@ "metadata": {}, "outputs": [], "source": [ - "assert(hdr.get_status() == \"Completed\")" + "assert(hyperdrive_run.get_status() == \"Completed\")" ] }, { @@ -836,7 +879,7 @@ "metadata": {}, "outputs": [], "source": [ - "best_run = hdr.get_best_run_by_primary_metric()\n", + "best_run = hyperdrive_run.get_best_run_by_primary_metric()\n", "print(best_run.get_details()['runDefinition']['arguments'])" ] }, @@ -1179,7 +1222,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.9" + "version": "3.7.7" }, "tags": [ "None" diff --git a/how-to-use-azureml/training-with-deep-learning/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.yml b/how-to-use-azureml/ml-frameworks/keras/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.yml similarity index 100% rename from how-to-use-azureml/training-with-deep-learning/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.yml rename to how-to-use-azureml/ml-frameworks/keras/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.yml diff --git a/how-to-use-azureml/ml-frameworks/tensorflow/deployment/train-hyperparameter-tune-deploy-with-tensorflow/utils.py b/how-to-use-azureml/ml-frameworks/keras/train-hyperparameter-tune-deploy-with-keras/utils.py similarity index 100% rename from how-to-use-azureml/ml-frameworks/tensorflow/deployment/train-hyperparameter-tune-deploy-with-tensorflow/utils.py rename to how-to-use-azureml/ml-frameworks/keras/train-hyperparameter-tune-deploy-with-keras/utils.py diff --git a/how-to-use-azureml/ml-frameworks/pytorch/training/distributed-pytorch-with-horovod/distributed-pytorch-with-horovod.ipynb b/how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-horovod/distributed-pytorch-with-horovod.ipynb similarity index 82% rename from how-to-use-azureml/ml-frameworks/pytorch/training/distributed-pytorch-with-horovod/distributed-pytorch-with-horovod.ipynb rename to how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-horovod/distributed-pytorch-with-horovod.ipynb index f711b27f..2562d0ce 100644 --- a/how-to-use-azureml/ml-frameworks/pytorch/training/distributed-pytorch-with-horovod/distributed-pytorch-with-horovod.ipynb +++ b/how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-horovod/distributed-pytorch-with-horovod.ipynb @@ -13,7 +13,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/ml-frameworks/pytorch/training/distributed-pytorch-with-horovod/distributed-pytorch-with-horovod.png)" + "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-horovod/distributed-pytorch-with-horovod.png)" ] }, { @@ -29,7 +29,7 @@ "metadata": {}, "source": [ "## Prerequisites\n", - "* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [Configuration](../../../configuration.ipynb) notebook to install the Azure Machine Learning Python SDK and create an Azure ML `Workspace`\n", + "* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [Configuration](../../../../configuration.ipynb) notebook to install the Azure Machine Learning Python SDK and create an Azure ML `Workspace`\n", "* Review the [tutorial](../train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.ipynb) on single-node PyTorch training using Azure Machine Learning" ] }, @@ -230,8 +230,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Create a PyTorch estimator\n", - "The Azure ML SDK's PyTorch estimator enables you to easily submit PyTorch training jobs for both single-node and distributed runs. For more information on the PyTorch estimator, refer [here](https://docs.microsoft.com/azure/machine-learning/service/how-to-train-pytorch)." + "### Create an environment\n", + "\n", + "In this tutorial, we will use one of Azure ML's curated PyTorch environments for training. [Curated environments](https://docs.microsoft.com/azure/machine-learning/how-to-use-environments#use-a-curated-environment) are available in your workspace by default. Specifically, we will use the PyTorch 1.6 GPU curated environment. The curated environment includes the `torch`, `torchvision` and `horovod` packages required by the training script." ] }, { @@ -240,21 +241,36 @@ "metadata": {}, "outputs": [], "source": [ - "from azureml.train.dnn import PyTorch, Mpi\n", + "from azureml.core import Environment\n", "\n", - "estimator = PyTorch(source_directory=project_folder,\n", - " compute_target=compute_target,\n", - " entry_script='pytorch_horovod_mnist.py',\n", - " node_count=2,\n", - " distributed_training=Mpi(),\n", - " use_gpu=True)" + "pytorch_env = Environment.get(ws, name='AzureML-PyTorch-1.6-GPU')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The above code specifies that we will run our training script on `2` nodes, with one worker per node. In order to execute a distributed run using MPI/Horovod, you must provide the argument `distributed_backend=Mpi()`. To specify `i` workers per node, you must provide the argument `distributed_backend=Mpi(process_count_per_node=i)`. Using this estimator with these settings, PyTorch, Horovod and their dependencies will be installed for you. However, if your script also uses other packages, make sure to install them via the `PyTorch` constructor's `pip_packages` or `conda_packages` parameters." + "### Configure the training job\n", + "\n", + "Create a ScriptRunConfig object to specify the configuration details of your training job, including your training script, environment to use, and the compute target to run on.\n", + "\n", + "In order to execute a distributed run using MPI/Horovod, you must create an `MpiConfiguration` object and pass it to the `distributed_job_config` parameter of the ScriptRunConfig constructor. The below code will configure a 2-node distributed job running one process per node. If you would also like to run multiple processes per node (i.e. if your cluster SKU has multiple GPUs), additionally specify the `process_count_per_node` parameter in `MpiConfiguration` (the default is `1`)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from azureml.core import ScriptRunConfig\n", + "from azureml.core.runconfig import MpiConfiguration\n", + "\n", + "src = ScriptRunConfig(source_directory=project_folder,\n", + " script='pytorch_horovod_mnist.py',\n", + " compute_target=compute_target,\n", + " environment=pytorch_env,\n", + " distributed_job_config=MpiConfiguration(node_count=2))" ] }, { @@ -262,7 +278,7 @@ "metadata": {}, "source": [ "### Submit job\n", - "Run your experiment by submitting your estimator object. Note that this call is asynchronous." + "Run your experiment by submitting your ScriptRunConfig object. Note that this call is asynchronous." ] }, { @@ -271,7 +287,7 @@ "metadata": {}, "outputs": [], "source": [ - "run = experiment.submit(estimator)\n", + "run = experiment.submit(src)\n", "print(run)" ] }, @@ -317,6 +333,22 @@ "name": "ninhu" } ], + "category": "training", + "compute": [ + "AML Compute" + ], + "datasets": [ + "MNIST" + ], + "deployment": [ + "None" + ], + "exclude_from_index": false, + "framework": [ + "PyTorch" + ], + "friendly_name": "Distributed PyTorch", + "index_order": 1, "kernelspec": { "display_name": "Python 3.6", "language": "python", @@ -332,28 +364,12 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.6" + "version": "3.6.9" }, - "friendly_name": "Distributed PyTorch", - "exclude_from_index": false, - "index_order": 1, - "category": "training", - "task": "Train a model using the distributed training via Horovod", - "datasets": [ - "MNIST" - ], - "compute": [ - "AML Compute" - ], - "deployment": [ - "None" - ], - "framework": [ - "PyTorch" - ], "tags": [ "None" - ] + ], + "task": "Train a model using the distributed training via Horovod" }, "nbformat": 4, "nbformat_minor": 2 diff --git a/how-to-use-azureml/ml-frameworks/pytorch/training/distributed-pytorch-with-horovod/distributed-pytorch-with-horovod.yml b/how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-horovod/distributed-pytorch-with-horovod.yml similarity index 100% rename from how-to-use-azureml/ml-frameworks/pytorch/training/distributed-pytorch-with-horovod/distributed-pytorch-with-horovod.yml rename to how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-horovod/distributed-pytorch-with-horovod.yml diff --git a/how-to-use-azureml/ml-frameworks/pytorch/training/distributed-pytorch-with-horovod/pytorch_horovod_mnist.py b/how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-horovod/pytorch_horovod_mnist.py similarity index 100% rename from how-to-use-azureml/ml-frameworks/pytorch/training/distributed-pytorch-with-horovod/pytorch_horovod_mnist.py rename to how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-horovod/pytorch_horovod_mnist.py diff --git a/how-to-use-azureml/ml-frameworks/pytorch/training/distributed-pytorch-with-nccl-gloo/distributed-pytorch-with-nccl-gloo.ipynb b/how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-nccl-gloo/distributed-pytorch-with-nccl-gloo.ipynb similarity index 71% rename from how-to-use-azureml/ml-frameworks/pytorch/training/distributed-pytorch-with-nccl-gloo/distributed-pytorch-with-nccl-gloo.ipynb rename to how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-nccl-gloo/distributed-pytorch-with-nccl-gloo.ipynb index 91ab32c7..55c15863 100644 --- a/how-to-use-azureml/ml-frameworks/pytorch/training/distributed-pytorch-with-nccl-gloo/distributed-pytorch-with-nccl-gloo.ipynb +++ b/how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-nccl-gloo/distributed-pytorch-with-nccl-gloo.ipynb @@ -13,15 +13,15 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/training-with-deep-learning/distributed-pytorch-with-horovod/distributed-pytorch-with-horovod.png)" + "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-horovod/distributed-pytorch-with-horovod.png)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "# Distributed PyTorch \n", - "In this tutorial, you will train a PyTorch model on the [MNIST](http://yann.lecun.com/exdb/mnist/) dataset using distributed training via Nccl/Gloo across a GPU cluster. " + "# Distributed PyTorch with DistributedDataParallel\n", + "In this tutorial, you will train a PyTorch model on the [MNIST](http://yann.lecun.com/exdb/mnist/) dataset using distributed training with PyTorch's `DistributedDataParallel` module across a GPU cluster. " ] }, { @@ -29,7 +29,7 @@ "metadata": {}, "source": [ "## Prerequisites\n", - "* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [Configuration](../../../configuration.ipynb) notebook to install the Azure Machine Learning Python SDK and create an Azure ML `Workspace`" + "* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [Configuration](../../../../configuration.ipynb) notebook to install the Azure Machine Learning Python SDK and create an Azure ML `Workspace`" ] }, { @@ -229,8 +229,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Create a PyTorch estimator(Nccl Backend)\n", - "The Azure ML SDK's PyTorch estimator enables you to easily submit PyTorch training jobs for both single-node and distributed runs. For more information on the PyTorch estimator, refer [here](https://docs.microsoft.com/azure/machine-learning/service/how-to-train-pytorch)." + "### Create an environment\n", + "\n", + "Define a conda environment YAML file with your training script dependencies and create an Azure ML environment." ] }, { @@ -239,26 +240,67 @@ "metadata": {}, "outputs": [], "source": [ - "from azureml.train.dnn import PyTorch, Nccl\n", + "%%writefile conda_dependencies.yml\n", "\n", - "estimator = PyTorch(source_directory=project_folder,\n", - " script_params={\"--dist-backend\" : \"nccl\",\n", - " \"--dist-url\": \"$AZ_BATCHAI_PYTORCH_INIT_METHOD\",\n", - " \"--rank\": \"$AZ_BATCHAI_TASK_INDEX\",\n", - " \"--world-size\": 2},\n", - " compute_target=compute_target,\n", - " entry_script='pytorch_mnist.py',\n", - " node_count=2,\n", - " distributed_training=Nccl(),\n", - " use_gpu=True)" + "channels:\n", + "- conda-forge\n", + "dependencies:\n", + "- python=3.6.2\n", + "- pip:\n", + " - azureml-defaults\n", + " - torch==1.6.0\n", + " - torchvision==0.7.0\n", + " - future==0.17.1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from azureml.core import Environment\n", + "\n", + "pytorch_env = Environment.from_conda_specification(name = 'pytorch-1.6-gpu', file_path = './conda_dependencies.yml')\n", + "\n", + "# Specify a GPU base image\n", + "pytorch_env.docker.enabled = True\n", + "pytorch_env.docker.base_image = 'mcr.microsoft.com/azureml/openmpi3.1.2-cuda10.1-cudnn7-ubuntu18.04'" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "In the above code, `script_params` uses Azure ML generated `AZ_BATCHAI_PYTORCH_INIT_METHOD` for shared file-system initialization and `AZ_BATCHAI_TASK_INDEX` as rank of each worker process.\n", - "The above code specifies that we will run our training script on `2` nodes, with one worker per node. In order to execute a distributed run using Nccl, you must provide the argument `distributed_training=Nccl()`. Using this estimator with these settings, PyTorch and dependencies will be installed for you. However, if your script also uses other packages, make sure to install them via the `PyTorch` constructor's `pip_packages` or `conda_packages` parameters." + "### Configure the training job: torch.distributed with NCCL backend\n", + "\n", + "Create a ScriptRunConfig object to specify the configuration details of your training job, including your training script, environment to use, and the compute target to run on.\n", + "\n", + "In order to run a distributed PyTorch job with **torch.distributed** using the NCCL backend, create a `PyTorchConfiguration` and pass it to the `distributed_job_config` parameter of the ScriptRunConfig constructor. Specify `communication_backend='Nccl'` in the PyTorchConfiguration. The below code will configure a 2-node distributed job. The NCCL backend is the recommended backend for PyTorch distributed GPU training.\n", + "\n", + "The script arguments refers to the Azure ML-set environment variables `AZ_BATCHAI_PYTORCH_INIT_METHOD` for shared file-system initialization and `AZ_BATCHAI_TASK_INDEX` for the global rank of each worker process." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from azureml.core import ScriptRunConfig\n", + "from azureml.core.runconfig import PyTorchConfiguration\n", + "\n", + "args = ['--dist-backend', 'nccl',\n", + " '--dist-url', '$AZ_BATCHAI_PYTORCH_INIT_METHOD',\n", + " '--rank', '$AZ_BATCHAI_TASK_INDEX',\n", + " '--world-size', 2]\n", + "\n", + "src = ScriptRunConfig(source_directory=project_folder,\n", + " script='pytorch_mnist.py',\n", + " arguments=args,\n", + " compute_target=compute_target,\n", + " environment=pytorch_env,\n", + " distributed_job_config=PyTorchConfiguration(communication_backend='Nccl', node_count=2))" ] }, { @@ -266,7 +308,7 @@ "metadata": {}, "source": [ "### Submit job\n", - "Run your experiment by submitting your estimator object. Note that this call is asynchronous." + "Run your experiment by submitting your ScriptRunConfig object. Note that this call is asynchronous." ] }, { @@ -275,7 +317,7 @@ "metadata": {}, "outputs": [], "source": [ - "run = experiment.submit(estimator)\n", + "run = experiment.submit(src)\n", "print(run)" ] }, @@ -318,8 +360,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Create a PyTorch estimator(Gloo Backend)\n", - "The Azure ML SDK's PyTorch estimator enables you to easily submit PyTorch training jobs for both single-node and distributed runs. For more information on the PyTorch estimator, refer [here](https://docs.microsoft.com/azure/machine-learning/service/how-to-train-pytorch)." + "### Configure training job: torch.distributed with Gloo backend\n", + "\n", + "If you would instead like to use the Gloo backend for distributed training, you can do so via the following code. The Gloo backend is recommended for distributed CPU training." ] }, { @@ -328,28 +371,27 @@ "metadata": {}, "outputs": [], "source": [ - "from azureml.train.dnn import PyTorch, Gloo\n", + "from azureml.core import ScriptRunConfig\n", + "from azureml.core.runconfig import PyTorchConfiguration\n", "\n", - "estimator = PyTorch(source_directory=project_folder,\n", - " script_params={\"--dist-backend\" : \"gloo\",\n", - " \"--dist-url\": \"$AZ_BATCHAI_PYTORCH_INIT_METHOD\",\n", - " \"--rank\": \"$AZ_BATCHAI_TASK_INDEX\",\n", - " \"--world-size\": 2},\n", - " compute_target=compute_target,\n", - " entry_script='pytorch_mnist.py',\n", - " node_count=2,\n", - " distributed_training=Gloo(),\n", - " use_gpu=True)" + "args = ['--dist-backend', 'gloo',\n", + " '--dist-url', '$AZ_BATCHAI_PYTORCH_INIT_METHOD',\n", + " '--rank', '$AZ_BATCHAI_TASK_INDEX',\n", + " '--world-size', 2]\n", + "\n", + "src = ScriptRunConfig(source_directory=project_folder,\n", + " script='pytorch_mnist.py',\n", + " arguments=args,\n", + " compute_target=compute_target,\n", + " environment=pytorch_env,\n", + " distributed_job_config=PyTorchConfiguration(communication_backend='Gloo', node_count=2))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "In the above code, `script_params` uses Azure ML generated `AZ_BATCHAI_PYTORCH_INIT_METHOD` for shared file-system initialization and `AZ_BATCHAI_TASK_INDEX` as rank of each worker process.\n", - "The above code specifies that we will run our training script on `2` nodes, with one worker per node. In order to execute a distributed run using Gloo, you must provide the argument `distributed_training=Gloo()`. Using this estimator with these settings, PyTorch and dependencies will be installed for you. However, if your script also uses other packages, make sure to install them via the `PyTorch` constructor's `pip_packages` or `conda_packages` parameters.\n", - "\n", - "Once you create the estimaotr you can follow the submit steps as shown above to submit a PyTorch run with `Gloo` backend. " + "Once you create the ScriptRunConfig, you can follow the submit steps as shown in the previous steps to submit a PyTorch distributed run using the Gloo backend." ] } ], @@ -359,6 +401,22 @@ "name": "ninhu" } ], + "category": "training", + "compute": [ + "AML Compute" + ], + "datasets": [ + "MNIST" + ], + "deployment": [ + "None" + ], + "exclude_from_index": false, + "framework": [ + "PyTorch" + ], + "friendly_name": "Distributed training with PyTorch", + "index_order": 1, "kernelspec": { "display_name": "Python 3.6", "language": "python", @@ -374,28 +432,12 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.8" + "version": "3.6.9" }, - "friendly_name": "Distributed training with PyTorch", - "exclude_from_index": false, - "index_order": 1, - "category": "training", - "task": "Train a model using distributed training via Nccl/Gloo", - "datasets": [ - "MNIST" - ], - "compute": [ - "AML Compute" - ], - "deployment": [ - "None" - ], - "framework": [ - "PyTorch" - ], "tags": [ "None" - ] + ], + "task": "Train a model using distributed training via Nccl/Gloo" }, "nbformat": 4, "nbformat_minor": 2 diff --git a/how-to-use-azureml/ml-frameworks/pytorch/training/distributed-pytorch-with-nccl-gloo/distributed-pytorch-with-nccl-gloo.yml b/how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-nccl-gloo/distributed-pytorch-with-nccl-gloo.yml similarity index 100% rename from how-to-use-azureml/ml-frameworks/pytorch/training/distributed-pytorch-with-nccl-gloo/distributed-pytorch-with-nccl-gloo.yml rename to how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-nccl-gloo/distributed-pytorch-with-nccl-gloo.yml diff --git a/how-to-use-azureml/ml-frameworks/pytorch/training/distributed-pytorch-with-nccl-gloo/pytorch_mnist.py b/how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-nccl-gloo/pytorch_mnist.py similarity index 100% rename from how-to-use-azureml/ml-frameworks/pytorch/training/distributed-pytorch-with-nccl-gloo/pytorch_mnist.py rename to how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-nccl-gloo/pytorch_mnist.py diff --git a/how-to-use-azureml/ml-frameworks/pytorch/deployment/train-hyperparameter-tune-deploy-with-pytorch/pytorch_score.py b/how-to-use-azureml/ml-frameworks/pytorch/train-hyperparameter-tune-deploy-with-pytorch/pytorch_score.py similarity index 100% rename from how-to-use-azureml/ml-frameworks/pytorch/deployment/train-hyperparameter-tune-deploy-with-pytorch/pytorch_score.py rename to how-to-use-azureml/ml-frameworks/pytorch/train-hyperparameter-tune-deploy-with-pytorch/pytorch_score.py diff --git a/how-to-use-azureml/ml-frameworks/pytorch/deployment/train-hyperparameter-tune-deploy-with-pytorch/pytorch_train.py b/how-to-use-azureml/ml-frameworks/pytorch/train-hyperparameter-tune-deploy-with-pytorch/pytorch_train.py similarity index 100% rename from how-to-use-azureml/ml-frameworks/pytorch/deployment/train-hyperparameter-tune-deploy-with-pytorch/pytorch_train.py rename to how-to-use-azureml/ml-frameworks/pytorch/train-hyperparameter-tune-deploy-with-pytorch/pytorch_train.py diff --git a/how-to-use-azureml/ml-frameworks/pytorch/deployment/train-hyperparameter-tune-deploy-with-pytorch/test_img.jpg b/how-to-use-azureml/ml-frameworks/pytorch/train-hyperparameter-tune-deploy-with-pytorch/test_img.jpg similarity index 100% rename from how-to-use-azureml/ml-frameworks/pytorch/deployment/train-hyperparameter-tune-deploy-with-pytorch/test_img.jpg rename to how-to-use-azureml/ml-frameworks/pytorch/train-hyperparameter-tune-deploy-with-pytorch/test_img.jpg diff --git a/how-to-use-azureml/ml-frameworks/pytorch/deployment/train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.ipynb b/how-to-use-azureml/ml-frameworks/pytorch/train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.ipynb similarity index 89% rename from how-to-use-azureml/ml-frameworks/pytorch/deployment/train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.ipynb rename to how-to-use-azureml/ml-frameworks/pytorch/train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.ipynb index 936fb627..43db5fa4 100644 --- a/how-to-use-azureml/ml-frameworks/pytorch/deployment/train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.ipynb +++ b/how-to-use-azureml/ml-frameworks/pytorch/train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.ipynb @@ -13,7 +13,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/ml-frameworks/pytorch/deployment/train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.png)" + "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/ml-frameworks/pytorch/train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.png)" ] }, { @@ -32,7 +32,7 @@ "metadata": {}, "source": [ "## Prerequisites\n", - "* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [Configuration](../../../configuration.ipynb) notebook to install the Azure Machine Learning Python SDK and create an Azure ML `Workspace`" + "* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [Configuration](../../../../configuration.ipynb) notebook to install the Azure Machine Learning Python SDK and create an Azure ML `Workspace`" ] }, { @@ -243,44 +243,68 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Create a PyTorch estimator\n", - "The Azure ML SDK's PyTorch estimator enables you to easily submit PyTorch training jobs for both single-node and distributed runs. For more information on the PyTorch estimator, refer [here](https://docs.microsoft.com/azure/machine-learning/service/how-to-train-pytorch). The following code will define a single-node PyTorch job." + "### Create an environment\n", + "\n", + "Define a conda environment YAML file with your training script dependencies and create an Azure ML environment." ] }, { "cell_type": "code", "execution_count": null, - "metadata": { - "tags": [ - "dnn-pytorch-remarks-sample" - ] - }, + "metadata": {}, "outputs": [], "source": [ - "from azureml.train.dnn import PyTorch\n", + "%%writefile conda_dependencies.yml\n", "\n", - "script_params = {\n", - " '--num_epochs': 30,\n", - " '--output_dir': './outputs'\n", - "}\n", + "channels:\n", + "- conda-forge\n", + "dependencies:\n", + "- python=3.6.2\n", + "- pip:\n", + " - azureml-defaults\n", + " - torch==1.6.0\n", + " - torchvision==0.7.0\n", + " - future==0.17.1\n", + " - pillow" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from azureml.core import Environment\n", "\n", - "estimator = PyTorch(source_directory=project_folder, \n", - " script_params=script_params,\n", - " compute_target=compute_target,\n", - " entry_script='pytorch_train.py',\n", - " use_gpu=True,\n", - " pip_packages=['pillow==5.4.1'])" + "pytorch_env = Environment.from_conda_specification(name = 'pytorch-1.6-gpu', file_path = './conda_dependencies.yml')\n", + "\n", + "# Specify a GPU base image\n", + "pytorch_env.docker.enabled = True\n", + "pytorch_env.docker.base_image = 'mcr.microsoft.com/azureml/openmpi3.1.2-cuda10.1-cudnn7-ubuntu18.04'" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The `script_params` parameter is a dictionary containing the command-line arguments to your training script `entry_script`. Please note the following:\n", - "- We passed our training data reference `ds_data` to our script's `--data_dir` argument. This will 1) mount our datastore on the remote compute and 2) provide the path to the training data `fowl_data` on our datastore.\n", - "- We specified the output directory as `./outputs`. The `outputs` directory is specially treated by Azure ML in that all the content in this directory gets uploaded to your workspace as part of your run history. The files written to this directory are therefore accessible even once your remote run is over. In this tutorial, we will save our trained model to this output directory.\n", + "### Configure the training job\n", "\n", - "To leverage the Azure VM's GPU for training, we set `use_gpu=True`." + "Create a ScriptRunConfig object to specify the configuration details of your training job, including your training script, environment to use, and the compute target to run on. The following code will configure a single-node PyTorch job." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from azureml.core import ScriptRunConfig\n", + "\n", + "src = ScriptRunConfig(source_directory=project_folder,\n", + " script='pytorch_train.py',\n", + " arguments=['--num_epochs', 30, '--output_dir', './outputs'],\n", + " compute_target=compute_target,\n", + " environment=pytorch_env)" ] }, { @@ -288,7 +312,7 @@ "metadata": {}, "source": [ "### Submit job\n", - "Run your experiment by submitting your estimator object. Note that this call is asynchronous." + "Run your experiment by submitting your ScriptRunConfig object. Note that this call is asynchronous." ] }, { @@ -297,7 +321,7 @@ "metadata": {}, "outputs": [], "source": [ - "run = experiment.submit(estimator)\n", + "run = experiment.submit(src)\n", "print(run)" ] }, @@ -381,7 +405,7 @@ "\n", "early_termination_policy = BanditPolicy(slack_factor=0.15, evaluation_interval=1, delay_evaluation=10)\n", "\n", - "hyperdrive_config = HyperDriveConfig(estimator=estimator,\n", + "hyperdrive_config = HyperDriveConfig(run_config=src,\n", " hyperparameter_sampling=param_sampling, \n", " policy=early_termination_policy,\n", " primary_metric_name='best_val_acc',\n", @@ -532,24 +556,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Create environment file\n", - "Then, we will need to create an environment file (`myenv.yml`) that specifies all of the scoring script's package dependencies. This file is used to ensure that all of those dependencies are installed in the Docker image by Azure ML. In this case, we need to specify `azureml-core`, `torch` and `torchvision`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from azureml.core.conda_dependencies import CondaDependencies \n", + "### Define the environment\n", "\n", - "myenv = CondaDependencies.create(pip_packages=['azureml-defaults', 'torch', 'torchvision>=0.5.0'])\n", - "\n", - "with open(\"myenv.yml\",\"w\") as f:\n", - " f.write(myenv.serialize_to_string())\n", - " \n", - "print(myenv.serialize_to_string())" + "Then, we will need to create an Azure ML environment that specifies all of the scoring script's package dependencies. In this tutorial, we will reuse the same environment, `pytorch_env`, that we created for training." ] }, { @@ -570,11 +579,8 @@ "from azureml.core.model import InferenceConfig\n", "from azureml.core.webservice import Webservice\n", "from azureml.core.model import Model\n", - "from azureml.core.environment import Environment\n", "\n", - "\n", - "myenv = Environment.from_conda_specification(name=\"myenv\", file_path=\"myenv.yml\")\n", - "inference_config = InferenceConfig(entry_script=\"pytorch_score.py\", environment=myenv)\n", + "inference_config = InferenceConfig(entry_script=\"pytorch_score.py\", environment=pytorch_env)\n", "\n", "aciconfig = AciWebservice.deploy_configuration(cpu_cores=1, \n", " memory_gb=1, \n", @@ -742,7 +748,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.6" + "version": "3.7.7" }, "tags": [ "None" diff --git a/how-to-use-azureml/ml-frameworks/pytorch/deployment/train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.yml b/how-to-use-azureml/ml-frameworks/pytorch/train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.yml similarity index 100% rename from how-to-use-azureml/ml-frameworks/pytorch/deployment/train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.yml rename to how-to-use-azureml/ml-frameworks/pytorch/train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.yml diff --git a/how-to-use-azureml/ml-frameworks/scikit-learn/training/train-hyperparameter-tune-deploy-with-sklearn/train-hyperparameter-tune-deploy-with-sklearn.ipynb b/how-to-use-azureml/ml-frameworks/scikit-learn/train-hyperparameter-tune-deploy-with-sklearn/train-hyperparameter-tune-deploy-with-sklearn.ipynb similarity index 91% rename from how-to-use-azureml/ml-frameworks/scikit-learn/training/train-hyperparameter-tune-deploy-with-sklearn/train-hyperparameter-tune-deploy-with-sklearn.ipynb rename to how-to-use-azureml/ml-frameworks/scikit-learn/train-hyperparameter-tune-deploy-with-sklearn/train-hyperparameter-tune-deploy-with-sklearn.ipynb index 6f80d18d..43e5cc08 100644 --- a/how-to-use-azureml/ml-frameworks/scikit-learn/training/train-hyperparameter-tune-deploy-with-sklearn/train-hyperparameter-tune-deploy-with-sklearn.ipynb +++ b/how-to-use-azureml/ml-frameworks/scikit-learn/train-hyperparameter-tune-deploy-with-sklearn/train-hyperparameter-tune-deploy-with-sklearn.ipynb @@ -13,7 +13,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/ml-frameworks/scikit-learn/training/train-hyperparameter-tune-deploy-with-sklearn/train-hyperparameter-tune-deploy-with-sklearn.png)" + "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/ml-frameworks/scikit-learn/train-hyperparameter-tune-deploy-with-sklearn/train-hyperparameter-tune-deploy-with-sklearn.png)" ] }, { @@ -35,7 +35,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "* Go through the [Configuration](../../../configuration.ipynb) notebook to install the Azure Machine Learning Python SDK and create an Azure ML Workspace" + "* Go through the [Configuration](../../../../configuration.ipynb) notebook to install the Azure Machine Learning Python SDK and create an Azure ML Workspace" ] }, { @@ -285,46 +285,59 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Create a Scikit-learn estimator" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The Azure ML SDK's Scikit-learn estimator enables you to easily submit Scikit-learn training jobs for single-node runs. The following code will define a single-node Scikit-learn job." + "### Create an environment\n", + "\n", + "Define a conda environment YAML file with your training script dependencies and create an Azure ML environment." ] }, { "cell_type": "code", "execution_count": null, - "metadata": { - "tags": [ - "sklearn-remarks-sample" - ] - }, + "metadata": {}, "outputs": [], "source": [ - "from azureml.train.sklearn import SKLearn\n", + "%%writefile conda_dependencies.yml\n", "\n", - "script_params = {\n", - " '--kernel': 'linear',\n", - " '--penalty': 1.0,\n", - "}\n", + "dependencies:\n", + "- python=3.6.2\n", + "- scikit-learn\n", + "- pip:\n", + " - azureml-defaults" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from azureml.core import Environment\n", "\n", - "estimator = SKLearn(source_directory=project_folder, \n", - " script_params=script_params,\n", - " compute_target=compute_target,\n", - " entry_script='train_iris.py',\n", - " pip_packages=['joblib==0.13.2']\n", - " )" + "sklearn_env = Environment.from_conda_specification(name = 'sklearn-env', file_path = './conda_dependencies.yml')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The `script_params` parameter is a dictionary containing the command-line arguments to your training script `entry_script`." + "### Configure the training job\n", + "\n", + "Create a ScriptRunConfig object to specify the configuration details of your training job, including your training script, environment to use, and the compute target to run on." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from azureml.core import ScriptRunConfig\n", + "\n", + "src = ScriptRunConfig(source_directory=project_folder,\n", + " script='train_iris.py',\n", + " arguments=['--kernel', 'linear', '--penalty', 1.0],\n", + " compute_target=compute_target,\n", + " environment=sklearn_env)" ] }, { @@ -338,7 +351,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Run your experiment by submitting your estimator object. Note that this call is asynchronous." + "Run your experiment by submitting your ScriptRunConfig object. Note that this call is asynchronous." ] }, { @@ -347,7 +360,7 @@ "metadata": {}, "outputs": [], "source": [ - "run = experiment.submit(estimator)" + "run = experiment.submit(src)" ] }, { @@ -430,7 +443,7 @@ " }\n", ")\n", "\n", - "hyperdrive_config = HyperDriveConfig(estimator=estimator,\n", + "hyperdrive_config = HyperDriveConfig(run_config=src,\n", " hyperparameter_sampling=param_sampling, \n", " primary_metric_name='Accuracy',\n", " primary_metric_goal=PrimaryMetricGoal.MAXIMIZE,\n", @@ -593,7 +606,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.6" + "version": "3.7.7" }, "msauthor": "dipeck", "tags": [ diff --git a/how-to-use-azureml/ml-frameworks/scikit-learn/training/train-hyperparameter-tune-deploy-with-sklearn/train-hyperparameter-tune-deploy-with-sklearn.yml b/how-to-use-azureml/ml-frameworks/scikit-learn/train-hyperparameter-tune-deploy-with-sklearn/train-hyperparameter-tune-deploy-with-sklearn.yml similarity index 100% rename from how-to-use-azureml/ml-frameworks/scikit-learn/training/train-hyperparameter-tune-deploy-with-sklearn/train-hyperparameter-tune-deploy-with-sklearn.yml rename to how-to-use-azureml/ml-frameworks/scikit-learn/train-hyperparameter-tune-deploy-with-sklearn/train-hyperparameter-tune-deploy-with-sklearn.yml diff --git a/how-to-use-azureml/ml-frameworks/scikit-learn/training/train-hyperparameter-tune-deploy-with-sklearn/train_iris.py b/how-to-use-azureml/ml-frameworks/scikit-learn/train-hyperparameter-tune-deploy-with-sklearn/train_iris.py similarity index 100% rename from how-to-use-azureml/ml-frameworks/scikit-learn/training/train-hyperparameter-tune-deploy-with-sklearn/train_iris.py rename to how-to-use-azureml/ml-frameworks/scikit-learn/train-hyperparameter-tune-deploy-with-sklearn/train_iris.py diff --git a/how-to-use-azureml/ml-frameworks/tensorflow/training/distributed-tensorflow-with-horovod/distributed-tensorflow-with-horovod.ipynb b/how-to-use-azureml/ml-frameworks/tensorflow/distributed-tensorflow-with-horovod/distributed-tensorflow-with-horovod.ipynb similarity index 82% rename from how-to-use-azureml/ml-frameworks/tensorflow/training/distributed-tensorflow-with-horovod/distributed-tensorflow-with-horovod.ipynb rename to how-to-use-azureml/ml-frameworks/tensorflow/distributed-tensorflow-with-horovod/distributed-tensorflow-with-horovod.ipynb index b2055716..bd204bdf 100644 --- a/how-to-use-azureml/ml-frameworks/tensorflow/training/distributed-tensorflow-with-horovod/distributed-tensorflow-with-horovod.ipynb +++ b/how-to-use-azureml/ml-frameworks/tensorflow/distributed-tensorflow-with-horovod/distributed-tensorflow-with-horovod.ipynb @@ -13,7 +13,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/training/manage-runs/manage-runs.png)" + "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/ml-frameworks/tensorflow/distributed-tensorflow-with-horovod/distributed-tensorflow-with-horovod.png)" ] }, { @@ -30,7 +30,7 @@ "source": [ "## Prerequisites\n", "* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning (AML)\n", - "* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration notebook](../../../configuration.ipynb) to:\n", + "* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration notebook](../../../../configuration.ipynb) to:\n", " * install the AML SDK\n", " * create a workspace and its configuration file (`config.json`)\n", "* Review the [tutorial](../train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb) on single-node TensorFlow training using the SDK" @@ -176,8 +176,8 @@ "outputs": [], "source": [ "dataset = dataset.register(workspace=ws,\n", - " name='mattmahoney dataset',\n", - " description='mattmahoney training and test dataset',\n", + " name='wikipedia-text',\n", + " description='Wikipedia text training and test dataset',\n", " create_new_version=True)" ] }, @@ -259,10 +259,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Create a TensorFlow estimator\n", - "The AML SDK's TensorFlow estimator enables you to easily submit TensorFlow training jobs for both single-node and distributed runs. For more information on the TensorFlow estimator, refer [here](https://docs.microsoft.com/azure/machine-learning/service/how-to-train-tensorflow).\n", + "### Create an environment\n", "\n", - "The TensorFlow estimator also takes a `framework_version` parameter -- if no version is provided, the estimator will default to the latest version supported by AzureML. Use `TensorFlow.get_supported_versions()` to get a list of all versions supported by your current SDK version or see the [SDK documentation](https://docs.microsoft.com/en-us/python/api/azureml-train-core/azureml.train.dnn?view=azure-ml-py) for the versions supported in the most current release." + "In this tutorial, we will use one of Azure ML's curated TensorFlow environments for training. [Curated environments](https://docs.microsoft.com/azure/machine-learning/how-to-use-environments#use-a-curated-environment) are available in your workspace by default. Specifically, we will use the TensorFlow 1.13 GPU curated environment." ] }, { @@ -271,28 +270,37 @@ "metadata": {}, "outputs": [], "source": [ - "from azureml.train.dnn import TensorFlow, Mpi\n", + "from azureml.core import Environment\n", "\n", - "script_params={\n", - " '--input_data': dataset.as_named_input('mattmahoney').as_mount(),\n", - "}\n", - "\n", - "estimator= TensorFlow(source_directory=project_folder,\n", - " compute_target=compute_target,\n", - " script_params=script_params,\n", - " entry_script='tf_horovod_word2vec.py',\n", - " node_count=2,\n", - " distributed_training=Mpi(),\n", - " framework_version='1.13', \n", - " use_gpu=True,\n", - " pip_packages=['azureml-dataset-runtime[pandas,fuse]'])" + "tf_env = Environment.get(ws, name='AzureML-TensorFlow-1.13-GPU')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The above code specifies that we will run our training script on `2` nodes, with one worker per node. In order to execute a distributed run using MPI/Horovod, you must provide the argument `distributed_backend=Mpi()`. To specify `i` workers per node, you must provide the argument `distributed_backend=Mpi(process_count_per_node=i)`. Using this estimator with these settings, TensorFlow, Horovod and their dependencies will be installed for you. However, if your script also uses other packages, make sure to install them via the `TensorFlow` constructor's `pip_packages` or `conda_packages` parameters." + "### Configure the training job\n", + "\n", + "Create a ScriptRunConfig object to specify the configuration details of your training job, including your training script, environment to use, and the compute target to run on.\n", + "\n", + "In order to execute a distributed run using MPI/Horovod, you must create an `MpiConfiguration` object and pass it to the `distributed_job_config` parameter of the ScriptRunConfig constructor. The below code will configure a 2-node distributed job running one process per node. If you would also like to run multiple processes per node (i.e. if your cluster SKU has multiple GPUs), additionally specify the `process_count_per_node` parameter in `MpiConfiguration` (the default is `1`)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from azureml.core import ScriptRunConfig\n", + "from azureml.core.runconfig import MpiConfiguration\n", + "\n", + "src = ScriptRunConfig(source_directory=project_folder,\n", + " script='tf_horovod_word2vec.py',\n", + " arguments=['--input_data', dataset.as_mount()],\n", + " compute_target=compute_target,\n", + " environment=tf_env,\n", + " distributed_job_config=MpiConfiguration(node_count=2))" ] }, { @@ -300,7 +308,7 @@ "metadata": {}, "source": [ "### Submit job\n", - "Run your experiment by submitting your estimator object. Note that this call is asynchronous." + "Run your experiment by submitting your ScriptRunConfig object. Note that this call is asynchronous." ] }, { @@ -309,7 +317,7 @@ "metadata": {}, "outputs": [], "source": [ - "run = experiment.submit(estimator)\n", + "run = experiment.submit(src)\n", "print(run)\n", "run.get_details()" ] @@ -352,7 +360,7 @@ "metadata": { "authors": [ { - "name": "maxluk" + "name": "minxia" } ], "category": "training", @@ -386,7 +394,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.4" + "version": "3.6.9" }, "tags": [ "None" diff --git a/how-to-use-azureml/ml-frameworks/tensorflow/training/distributed-tensorflow-with-horovod/distributed-tensorflow-with-horovod.yml b/how-to-use-azureml/ml-frameworks/tensorflow/distributed-tensorflow-with-horovod/distributed-tensorflow-with-horovod.yml similarity index 100% rename from how-to-use-azureml/ml-frameworks/tensorflow/training/distributed-tensorflow-with-horovod/distributed-tensorflow-with-horovod.yml rename to how-to-use-azureml/ml-frameworks/tensorflow/distributed-tensorflow-with-horovod/distributed-tensorflow-with-horovod.yml diff --git a/how-to-use-azureml/ml-frameworks/tensorflow/training/distributed-tensorflow-with-horovod/tf_horovod_word2vec.py b/how-to-use-azureml/ml-frameworks/tensorflow/distributed-tensorflow-with-horovod/tf_horovod_word2vec.py similarity index 100% rename from how-to-use-azureml/ml-frameworks/tensorflow/training/distributed-tensorflow-with-horovod/tf_horovod_word2vec.py rename to how-to-use-azureml/ml-frameworks/tensorflow/distributed-tensorflow-with-horovod/tf_horovod_word2vec.py diff --git a/how-to-use-azureml/ml-frameworks/tensorflow/training/distributed-tensorflow-with-parameter-server/distributed-tensorflow-with-parameter-server.ipynb b/how-to-use-azureml/ml-frameworks/tensorflow/distributed-tensorflow-with-parameter-server/distributed-tensorflow-with-parameter-server.ipynb similarity index 81% rename from how-to-use-azureml/ml-frameworks/tensorflow/training/distributed-tensorflow-with-parameter-server/distributed-tensorflow-with-parameter-server.ipynb rename to how-to-use-azureml/ml-frameworks/tensorflow/distributed-tensorflow-with-parameter-server/distributed-tensorflow-with-parameter-server.ipynb index b1d7058b..b274a127 100644 --- a/how-to-use-azureml/ml-frameworks/tensorflow/training/distributed-tensorflow-with-parameter-server/distributed-tensorflow-with-parameter-server.ipynb +++ b/how-to-use-azureml/ml-frameworks/tensorflow/distributed-tensorflow-with-parameter-server/distributed-tensorflow-with-parameter-server.ipynb @@ -13,7 +13,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/ml-frameworks/tensorflow/training/distributed-tensorflow-with-parameter-server/distributed-tensorflow-with-parameter-server.png)" + "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/ml-frameworks/tensorflow/distributed-tensorflow-with-parameter-server/distributed-tensorflow-with-parameter-server.png)" ] }, { @@ -30,7 +30,7 @@ "source": [ "## Prerequisites\n", "* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning (AML)\n", - "* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration notebook](../../../configuration.ipynb) to:\n", + "* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration notebook](../../../../configuration.ipynb) to:\n", " * install the AML SDK\n", " * create a workspace and its configuration file (`config.json`)\n", "* Review the [tutorial](../train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb) on single-node TensorFlow training using the SDK" @@ -205,8 +205,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Create a TensorFlow estimator\n", - "The AML SDK's TensorFlow estimator enables you to easily submit TensorFlow training jobs for both single-node and distributed runs. For more information on the TensorFlow estimator, refer [here](https://docs.microsoft.com/azure/machine-learning/service/how-to-train-tensorflow)." + "### Create an environment\n", + "\n", + "In this tutorial, we will use one of Azure ML's curated TensorFlow environments for training. [Curated environments](https://docs.microsoft.com/azure/machine-learning/how-to-use-environments#use-a-curated-environment) are available in your workspace by default. Specifically, we will use the TensorFlow 1.13 GPU curated environment." ] }, { @@ -215,27 +216,37 @@ "metadata": {}, "outputs": [], "source": [ - "from azureml.train.dnn import TensorFlow, ParameterServer\n", + "from azureml.core import Environment\n", "\n", - "script_params={\n", - " '--num_gpus': 1,\n", - " '--train_steps': 500\n", - "}\n", - "\n", - "estimator = TensorFlow(source_directory=project_folder,\n", - " compute_target=compute_target,\n", - " script_params=script_params,\n", - " entry_script='tf_mnist_replica.py',\n", - " node_count=2,\n", - " distributed_training=ParameterServer(worker_count=2),\n", - " use_gpu=True)" + "tf_env = Environment.get(ws, name='AzureML-TensorFlow-1.13-GPU')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The above code specifies that we will run our training script on `2` nodes, with two workers and one parameter server. In order to execute a native distributed TensorFlow run, you must provide the argument `distributed_backend=ParameterServer()`. Using this estimator with these settings, TensorFlow and its dependencies will be installed for you. However, if your script also uses other packages, make sure to install them via the `TensorFlow` constructor's `pip_packages` or `conda_packages` parameters." + "### Configure the training job\n", + "\n", + "Create a ScriptRunConfig object to specify the configuration details of your training job, including your training script, environment to use, and the compute target to run on.\n", + "\n", + "In order to execute a distributed TensorFlow run with the parameter server strategy, you must create a `TensorflowConfiguration` object and pass it to the `distributed_job_config` parameter of the ScriptRunConfig constructor. The below code configures a distributed TensorFlow run with `2` workers and `1` parameter server." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from azureml.core import ScriptRunConfig\n", + "from azureml.core.runconfig import TensorflowConfiguration\n", + "\n", + "src = ScriptRunConfig(source_directory=project_folder,\n", + " script='tf_mnist_replica.py',\n", + " arguments=['--num_gpus', 1, '--train_steps', 500],\n", + " compute_target=compute_target,\n", + " environment=tf_env,\n", + " distributed_job_config=TensorflowConfiguration(worker_count=2, parameter_server_count=1))" ] }, { @@ -243,7 +254,7 @@ "metadata": {}, "source": [ "### Submit job\n", - "Run your experiment by submitting your estimator object. Note that this call is asynchronous." + "Run your experiment by submitting your ScriptRunConfig object. Note that this call is asynchronous." ] }, { @@ -252,7 +263,7 @@ "metadata": {}, "outputs": [], "source": [ - "run = experiment.submit(estimator)\n", + "run = experiment.submit(src)\n", "print(run)" ] }, @@ -295,9 +306,25 @@ "metadata": { "authors": [ { - "name": "ninhu" + "name": "minxia" } ], + "category": "training", + "compute": [ + "AML Compute" + ], + "datasets": [ + "MNIST" + ], + "deployment": [ + "None" + ], + "exclude_from_index": false, + "framework": [ + "TensorFlow" + ], + "friendly_name": "Distributed TensorFlow with parameter server", + "index_order": 1, "kernelspec": { "display_name": "Python 3.6", "language": "python", @@ -313,28 +340,12 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.6" + "version": "3.6.9" }, - "friendly_name": "Distributed TensorFlow with parameter server", - "exclude_from_index": false, - "index_order": 1, - "category": "training", - "task": "Use the TensorFlow estimator to train a model using distributed training", - "datasets": [ - "MNIST" - ], - "compute": [ - "AML Compute" - ], - "deployment": [ - "None" - ], - "framework": [ - "TensorFlow" - ], "tags": [ "None" - ] + ], + "task": "Use the TensorFlow estimator to train a model using distributed training" }, "nbformat": 4, "nbformat_minor": 2 diff --git a/how-to-use-azureml/ml-frameworks/tensorflow/training/distributed-tensorflow-with-parameter-server/distributed-tensorflow-with-parameter-server.yml b/how-to-use-azureml/ml-frameworks/tensorflow/distributed-tensorflow-with-parameter-server/distributed-tensorflow-with-parameter-server.yml similarity index 100% rename from how-to-use-azureml/ml-frameworks/tensorflow/training/distributed-tensorflow-with-parameter-server/distributed-tensorflow-with-parameter-server.yml rename to how-to-use-azureml/ml-frameworks/tensorflow/distributed-tensorflow-with-parameter-server/distributed-tensorflow-with-parameter-server.yml diff --git a/how-to-use-azureml/ml-frameworks/tensorflow/training/distributed-tensorflow-with-parameter-server/tf_mnist_replica.py b/how-to-use-azureml/ml-frameworks/tensorflow/distributed-tensorflow-with-parameter-server/tf_mnist_replica.py similarity index 100% rename from how-to-use-azureml/ml-frameworks/tensorflow/training/distributed-tensorflow-with-parameter-server/tf_mnist_replica.py rename to how-to-use-azureml/ml-frameworks/tensorflow/distributed-tensorflow-with-parameter-server/tf_mnist_replica.py diff --git a/how-to-use-azureml/ml-frameworks/tensorflow/training/hyperparameter-tune-and-warm-start-with-tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow.ipynb b/how-to-use-azureml/ml-frameworks/tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow.ipynb similarity index 85% rename from how-to-use-azureml/ml-frameworks/tensorflow/training/hyperparameter-tune-and-warm-start-with-tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow.ipynb rename to how-to-use-azureml/ml-frameworks/tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow.ipynb index 00cc3209..9874744e 100644 --- a/how-to-use-azureml/ml-frameworks/tensorflow/training/hyperparameter-tune-and-warm-start-with-tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow.ipynb +++ b/how-to-use-azureml/ml-frameworks/tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow.ipynb @@ -13,29 +13,15 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/training-with-deep-learning/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.png)" + "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/ml-frameworks/tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow.png)" ] }, { "cell_type": "markdown", - "metadata": { - "nbpresent": { - "id": "bf74d2e9-2708-49b1-934b-e0ede342f475" - } - }, + "metadata": {}, "source": [ - "# Hyperparameter tuning and warm start with TensorFlow\n", - "\n", - "## Introduction\n", - "This tutorial shows how to tune the hyperparameters of a simple deep neural network using the MNIST dataset and TensorFlow on Azure Machine Learning. MNIST is a popular dataset consisting of 70,000 grayscale images. Each image is a handwritten digit of `28x28` pixels, representing number from 0 to 9. The goal is to create a multi-class classifier to identify the digit each image represents, and deploy it as a web service in Azure.\n", - "\n", - "For more information about the MNIST dataset, please visit [Yan LeCun's website](http://yann.lecun.com/exdb/mnist/).\n", - "\n", - "## Prerequisite:\n", - "* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning\n", - "* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration notebook](../../../configuration.ipynb) to:\n", - " * install the AML SDK\n", - " * create a workspace and its configuration file (`config.json`)" + "# Warm start hyperparameter tuning\n", + "In this tutorial, you will learn how to warm start a hyperparameter tuning run from a previous tuning run." ] }, { @@ -423,12 +409,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Create TensorFlow estimator\n", - "Next, we construct an `azureml.train.dnn.TensorFlow` estimator object, use the Batch AI cluster as compute target, and pass the mount-point of the datastore to the training code as a parameter.\n", + "### Create an environment\n", "\n", - "The TensorFlow estimator is providing a simple way of launching a TensorFlow training job on a compute target. It will automatically provide a docker image that has TensorFlow installed -- if additional pip or conda packages are required, their names can be passed in via the `pip_packages` and `conda_packages` arguments and they will be included in the resulting docker.\n", - "\n", - "The TensorFlow estimator also takes a `framework_version` parameter -- if no version is provided, the estimator will default to the latest version supported by AzureML. Use `TensorFlow.get_supported_versions()` to get a list of all versions supported by your current SDK version or see the [SDK documentation](https://docs.microsoft.com/en-us/python/api/azureml-train-core/azureml.train.dnn?view=azure-ml-py) for the versions supported in the most current release." + "In this tutorial, we will use one of Azure ML's curated TensorFlow environments for training. [Curated environments](https://docs.microsoft.com/azure/machine-learning/how-to-use-environments#use-a-curated-environment) are available in your workspace by default. Specifically, we will use the TensorFlow 2.0 GPU curated environment." ] }, { @@ -437,47 +420,38 @@ "metadata": {}, "outputs": [], "source": [ - "from azureml.core.environment import Environment\n", - "from azureml.core.conda_dependencies import CondaDependencies\n", + "from azureml.core import Environment\n", "\n", - "# set up environment\\n\n", - "env = Environment('my_env')\n", - "# ensure latest azureml-dataset-runtime and other required packages installed in the environment\n", - "cd = CondaDependencies.create(pip_packages=['keras',\n", - " 'azureml-sdk',\n", - " 'tensorflow==2.0.0',\n", - " 'matplotlib',\n", - " 'azureml-dataset-runtime[pandas,fuse]'])\n", - "\n", - "env.python.conda_dependencies = cd" + "tf_env = Environment.get(ws, name='AzureML-TensorFlow-2.0-GPU')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Configure the training job\u00c2\u00b6\n", + "Create a ScriptRunConfig object to specify the configuration details of your training job, including your training script, environment to use, and the compute target to run on." ] }, { "cell_type": "code", "execution_count": null, - "metadata": { - "tags": [ - "dnn-tensorflow-remarks-sample" - ] - }, + "metadata": {}, "outputs": [], "source": [ - "from azureml.train.dnn import TensorFlow\n", + "from azureml.core import ScriptRunConfig\n", "\n", - "script_params = {\n", - " '--data-folder': dataset.as_named_input('mnist').as_mount(),\n", - " '--batch-size': 64,\n", - " '--first-layer-neurons': 256,\n", - " '--second-layer-neurons': 128,\n", - " '--learning-rate': 0.01\n", - "}\n", + "args = ['--data-folder', dataset.as_mount(),\n", + " '--batch-size', 64,\n", + " '--first-layer-neurons', 256,\n", + " '--second-layer-neurons', 128,\n", + " '--learning-rate', 0.01]\n", "\n", - "est = TensorFlow(source_directory=script_folder,\n", - " script_params=script_params,\n", - " compute_target=compute_target,\n", - " entry_script='tf_mnist.py', \n", - " framework_version='2.0',\n", - " environment_definition= env)" + "src = ScriptRunConfig(source_directory=script_folder,\n", + " script='tf_mnist.py',\n", + " arguments=args,\n", + " compute_target=compute_target,\n", + " environment=tf_env)" ] }, { @@ -485,7 +459,7 @@ "metadata": {}, "source": [ "## Submit job to run\n", - "Submit the estimator to an Azure ML experiment to kick off the execution." + "Submit the ScriptRunConfig to an Azure ML experiment to kick off the execution." ] }, { @@ -494,7 +468,7 @@ "metadata": {}, "outputs": [], "source": [ - "run = exp.submit(est)" + "run = exp.submit(src)" ] }, { @@ -546,7 +520,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Next, we will create a new estimator without the above parameters since they will be passed in later. Note we still need to keep the `data-folder` parameter since that's not a hyperparamter we will sweep." + "Next, we will create a new ScriptRunConfig without the above parameters since they will be passed in later. Note we still need to keep the `data-folder` parameter since that's not a hyperparamter we will sweep." ] }, { @@ -555,12 +529,13 @@ "metadata": {}, "outputs": [], "source": [ - "est = TensorFlow(source_directory=script_folder,\n", - " script_params={'--data-folder': dataset.as_named_input('mnist').as_mount()},\n", - " compute_target=compute_target,\n", - " entry_script='tf_mnist.py',\n", - " framework_version='2.0',\n", - " environment_definition = env)" + "args = ['--data-folder', dataset.as_mount()]\n", + "\n", + "src = ScriptRunConfig(source_directory=script_folder,\n", + " script='tf_mnist.py',\n", + " arguments=args,\n", + " compute_target=compute_target,\n", + " environment=tf_env)" ] }, { @@ -584,7 +559,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Now we are ready to configure a run configuration object, and specify the primary metric `validation_acc` that's recorded in your training runs. If you go back to visit the training script, you will notice that this value is being logged after every epoch (a full batch set). We also want to tell the service that we are looking to maximizing this value. We also set the number of samples to 20, and maximal concurrent job to 4, which is the same as the number of nodes in our computer cluster." + "Now we are ready to configure a run configuration object, and specify the primary metric `validation_acc` that's recorded in your training runs. If you go back to visit the training script, you will notice that this value is being logged after every epoch (a full batch set). We also want to tell the service that we are looking to maximizing this value. We also set the number of samples to 15, and maximal concurrent job to 4, which is the same as the number of nodes in our computer cluster." ] }, { @@ -594,7 +569,7 @@ "outputs": [], "source": [ "from azureml.train.hyperdrive import HyperDriveConfig, PrimaryMetricGoal\n", - "htc = HyperDriveConfig(estimator=est, \n", + "htc = HyperDriveConfig(run_config=src, \n", " hyperparameter_sampling=ps, \n", " policy=policy, \n", " primary_metric_name='validation_acc', \n", @@ -720,7 +695,7 @@ "source": [ "warm_start_parents_to_resume_from=[htr]\n", "\n", - "warm_start_htc = HyperDriveConfig(estimator=est, \n", + "warm_start_htc = HyperDriveConfig(run_config=src, \n", " hyperparameter_sampling=ps, \n", " policy=policy, \n", " resume_from=warm_start_parents_to_resume_from, \n", @@ -818,7 +793,7 @@ "metadata": {}, "outputs": [], "source": [ - "resume_child_runs_htc = HyperDriveConfig(estimator=est, \n", + "resume_child_runs_htc = HyperDriveConfig(run_config=src, \n", " hyperparameter_sampling=ps, \n", " policy=policy, \n", " resume_child_runs=child_runs_to_resume, \n", @@ -834,7 +809,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - " We can use the run history widget to show the progress of this resumed run. Be patient as this might take a while to complete." + " We can use the run history widget to show the progress of this resumed run. Be patient as this might take a while to complete." ] }, { @@ -872,13 +847,6 @@ "best_resume_child_run = resume_child_runs_htr.get_best_run_by_primary_metric()\n", "resume_child_run_model = best_resume_child_run.register_model(model_name='tf-dnn-mnist-resumed', model_path='outputs/model')" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { @@ -918,7 +886,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.8" + "version": "3.6.9" }, "tags": [ "None" diff --git a/how-to-use-azureml/ml-frameworks/tensorflow/training/hyperparameter-tune-and-warm-start-with-tensorflow/nn.png b/how-to-use-azureml/ml-frameworks/tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow/nn.png similarity index 100% rename from how-to-use-azureml/ml-frameworks/tensorflow/training/hyperparameter-tune-and-warm-start-with-tensorflow/nn.png rename to how-to-use-azureml/ml-frameworks/tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow/nn.png diff --git a/how-to-use-azureml/ml-frameworks/tensorflow/training/hyperparameter-tune-and-warm-start-with-tensorflow/tf_mnist.py b/how-to-use-azureml/ml-frameworks/tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow/tf_mnist.py similarity index 100% rename from how-to-use-azureml/ml-frameworks/tensorflow/training/hyperparameter-tune-and-warm-start-with-tensorflow/tf_mnist.py rename to how-to-use-azureml/ml-frameworks/tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow/tf_mnist.py diff --git a/how-to-use-azureml/ml-frameworks/tensorflow/training/hyperparameter-tune-and-warm-start-with-tensorflow/utils.py b/how-to-use-azureml/ml-frameworks/tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow/utils.py similarity index 100% rename from how-to-use-azureml/ml-frameworks/tensorflow/training/hyperparameter-tune-and-warm-start-with-tensorflow/utils.py rename to how-to-use-azureml/ml-frameworks/tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow/utils.py diff --git a/how-to-use-azureml/training-with-deep-learning/train-hyperparameter-tune-deploy-with-keras/nn.png b/how-to-use-azureml/ml-frameworks/tensorflow/train-hyperparameter-tune-deploy-with-tensorflow/nn.png similarity index 100% rename from how-to-use-azureml/training-with-deep-learning/train-hyperparameter-tune-deploy-with-keras/nn.png rename to how-to-use-azureml/ml-frameworks/tensorflow/train-hyperparameter-tune-deploy-with-tensorflow/nn.png diff --git a/how-to-use-azureml/ml-frameworks/tensorflow/deployment/train-hyperparameter-tune-deploy-with-tensorflow/tf_mnist.py b/how-to-use-azureml/ml-frameworks/tensorflow/train-hyperparameter-tune-deploy-with-tensorflow/tf_mnist.py similarity index 100% rename from how-to-use-azureml/ml-frameworks/tensorflow/deployment/train-hyperparameter-tune-deploy-with-tensorflow/tf_mnist.py rename to how-to-use-azureml/ml-frameworks/tensorflow/train-hyperparameter-tune-deploy-with-tensorflow/tf_mnist.py diff --git a/how-to-use-azureml/ml-frameworks/tensorflow/deployment/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb b/how-to-use-azureml/ml-frameworks/tensorflow/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb similarity index 92% rename from how-to-use-azureml/ml-frameworks/tensorflow/deployment/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb rename to how-to-use-azureml/ml-frameworks/tensorflow/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb index 6f382ce6..37d0785d 100644 --- a/how-to-use-azureml/ml-frameworks/tensorflow/deployment/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb +++ b/how-to-use-azureml/ml-frameworks/tensorflow/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb @@ -13,7 +13,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/training-with-deep-learning/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.png)" + "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/ml-frameworks/tensorflow/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.png)" ] }, { @@ -33,7 +33,7 @@ "\n", "## Prerequisite:\n", "* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning\n", - "* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration notebook](../../../configuration.ipynb) to:\n", + "* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration notebook](../../../../configuration.ipynb) to:\n", " * install the AML SDK\n", " * create a workspace and its configuration file (`config.json`)" ] @@ -425,41 +425,50 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Create TensorFlow estimator\n", - "Next, we construct an `azureml.train.dnn.TensorFlow` estimator object, use the Batch AI cluster as compute target, and pass the mount-point of the datastore to the training code as a parameter.\n", + "## Create an environment\n", "\n", - "The TensorFlow estimator is providing a simple way of launching a TensorFlow training job on a compute target. It will automatically provide a docker image that has TensorFlow installed -- if additional pip or conda packages are required, their names can be passed in via the `pip_packages` and `conda_packages` arguments and they will be included in the resulting docker.\n", - "\n", - "The TensorFlow estimator also takes a `framework_version` parameter -- if no version is provided, the estimator will default to the latest version supported by AzureML. Use `TensorFlow.get_supported_versions()` to get a list of all versions supported by your current SDK version or see the [SDK documentation](https://docs.microsoft.com/en-us/python/api/azureml-train-core/azureml.train.dnn?view=azure-ml-py) for the versions supported in the most current release." + "In this tutorial, we will use one of Azure ML's curated TensorFlow environments for training. [Curated environments](https://docs.microsoft.com/azure/machine-learning/how-to-use-environments#use-a-curated-environment) are available in your workspace by default. Specifically, we will use the TensorFlow 2.0 GPU curated environment." ] }, { "cell_type": "code", "execution_count": null, - "metadata": { - "tags": [ - "dnn-tensorflow-remarks-sample" - ] - }, + "metadata": {}, "outputs": [], "source": [ - "from azureml.train.dnn import TensorFlow\n", + "from azureml.core import Environment\n", "\n", - "script_params = {\n", - " '--data-folder': dataset.as_named_input('mnist').as_mount(),\n", - " '--batch-size': 64,\n", - " '--first-layer-neurons': 256,\n", - " '--second-layer-neurons': 128,\n", - " '--learning-rate': 0.01\n", - "}\n", + "tf_env = Environment.get(ws, name='AzureML-TensorFlow-2.0-GPU')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Configure the training job\n", "\n", - "est = TensorFlow(source_directory=script_folder,\n", - " script_params=script_params,\n", - " compute_target=compute_target,\n", - " entry_script='tf_mnist.py',\n", - " use_gpu=True,\n", - " framework_version='2.0',\n", - " pip_packages=['azureml-dataset-runtime[pandas,fuse]'])" + "Create a ScriptRunConfig object to specify the configuration details of your training job, including your training script, environment to use, and the compute target to run on." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from azureml.core import ScriptRunConfig\n", + "\n", + "args = ['--data-folder', dataset.as_named_input('mnist').as_mount(),\n", + " '--batch-size', 64,\n", + " '--first-layer-neurons', 256,\n", + " '--second-layer-neurons', 128,\n", + " '--learning-rate', 0.01]\n", + "\n", + "src = ScriptRunConfig(source_directory=script_folder,\n", + " script='tf_mnist.py',\n", + " arguments=args,\n", + " compute_target=compute_target,\n", + " environment=tf_env)" ] }, { @@ -467,7 +476,7 @@ "metadata": {}, "source": [ "## Submit job to run\n", - "Submit the estimator to an Azure ML experiment to kick off the execution." + "Submit the ScriptRunConfig to an Azure ML experiment to kick off the execution." ] }, { @@ -476,7 +485,7 @@ "metadata": {}, "outputs": [], "source": [ - "run = exp.submit(est)" + "run = exp.submit(src)" ] }, { @@ -485,7 +494,7 @@ "source": [ "### Monitor the Run \n", "As the Run is executed, it will go through the following stages:\n", - "1. Preparing: A docker image is created matching the Python environment specified by the TensorFlow estimator and it will be uploaded to the workspace's Azure Container Registry. This step will only happen once for each Python environment -- the container will then be cached for subsequent runs. Creating and uploading the image takes about **5 minutes**. While the job is preparing, logs are streamed to the run history and can be viewed to monitor the progress of the image creation.\n", + "1. Preparing: A docker image is created based on the specifications of the Azure ML environment and it will be uploaded to the workspace's Azure Container Registry. This step will only happen once for each Python environment -- the container will then be cached for subsequent runs. Creating and uploading the image takes about **5 minutes**. While the job is preparing, logs are streamed to the run history and can be viewed to monitor the progress of the image creation.\n", "\n", "2. Scaling: If the compute needs to be scaled up (i.e. the Batch AI cluster requires more nodes to execute the run than currently available), the cluster will attempt to scale up in order to make the required amount of nodes available. Scaling typically takes about **5 minutes**.\n", "\n", @@ -701,7 +710,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Next, we will create a new estimator without the above parameters since they will be passed in later. Note we still need to keep the `data-folder` parameter since that's not a hyperparamter we will sweep." + "Next, we will create a new ScriptRunConfig without the above parameters since they will be passed in later. Note we still need to keep the `data-folder` parameter since that's not a hyperparamter we will sweep." ] }, { @@ -710,13 +719,13 @@ "metadata": {}, "outputs": [], "source": [ - "est = TensorFlow(source_directory=script_folder,\n", - " script_params={'--data-folder': dataset.as_named_input('mnist').as_mount()},\n", - " compute_target=compute_target,\n", - " entry_script='tf_mnist.py',\n", - " framework_version='2.0',\n", - " use_gpu=True,\n", - " pip_packages=['azureml-dataset-runtime[pandas,fuse]'])" + "args = ['--data-folder', dataset.as_named_input('mnist').as_mount()]\n", + "\n", + "src = ScriptRunConfig(source_directory=script_folder,\n", + " script='tf_mnist.py',\n", + " arguments=args,\n", + " compute_target=compute_target,\n", + " environment=tf_env)" ] }, { @@ -748,7 +757,7 @@ "metadata": {}, "outputs": [], "source": [ - "htc = HyperDriveConfig(estimator=est, \n", + "htc = HyperDriveConfig(run_config=src, \n", " hyperparameter_sampling=ps, \n", " policy=policy, \n", " primary_metric_name='validation_acc', \n", @@ -1160,7 +1169,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.4" + "version": "3.6.9" }, "tags": [ "None" diff --git a/how-to-use-azureml/ml-frameworks/tensorflow/deployment/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.yml b/how-to-use-azureml/ml-frameworks/tensorflow/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.yml similarity index 100% rename from how-to-use-azureml/ml-frameworks/tensorflow/deployment/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.yml rename to how-to-use-azureml/ml-frameworks/tensorflow/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.yml diff --git a/how-to-use-azureml/ml-frameworks/tensorflow/training/train-tensorflow-resume-training/utils.py b/how-to-use-azureml/ml-frameworks/tensorflow/train-hyperparameter-tune-deploy-with-tensorflow/utils.py similarity index 100% rename from how-to-use-azureml/ml-frameworks/tensorflow/training/train-tensorflow-resume-training/utils.py rename to how-to-use-azureml/ml-frameworks/tensorflow/train-hyperparameter-tune-deploy-with-tensorflow/utils.py diff --git a/how-to-use-azureml/ml-frameworks/tensorflow/training/train-tensorflow-resume-training/tf_mnist_with_checkpoint.py b/how-to-use-azureml/ml-frameworks/tensorflow/train-tensorflow-resume-training/tf_mnist_with_checkpoint.py similarity index 100% rename from how-to-use-azureml/ml-frameworks/tensorflow/training/train-tensorflow-resume-training/tf_mnist_with_checkpoint.py rename to how-to-use-azureml/ml-frameworks/tensorflow/train-tensorflow-resume-training/tf_mnist_with_checkpoint.py diff --git a/how-to-use-azureml/ml-frameworks/tensorflow/training/train-tensorflow-resume-training/train-tensorflow-resume-training.ipynb b/how-to-use-azureml/ml-frameworks/tensorflow/train-tensorflow-resume-training/train-tensorflow-resume-training.ipynb similarity index 81% rename from how-to-use-azureml/ml-frameworks/tensorflow/training/train-tensorflow-resume-training/train-tensorflow-resume-training.ipynb rename to how-to-use-azureml/ml-frameworks/tensorflow/train-tensorflow-resume-training/train-tensorflow-resume-training.ipynb index 5071bd83..c3bcd342 100644 --- a/how-to-use-azureml/ml-frameworks/tensorflow/training/train-tensorflow-resume-training/train-tensorflow-resume-training.ipynb +++ b/how-to-use-azureml/ml-frameworks/tensorflow/train-tensorflow-resume-training/train-tensorflow-resume-training.ipynb @@ -13,7 +13,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/ml-frameworks/tensorflow/training/train-tensorflow-resume-training/train-tensorflow-resume-training.png)" + "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/ml-frameworks/tensorflow/train-tensorflow-resume-training/train-tensorflow-resume-training.png)" ] }, { @@ -30,7 +30,7 @@ "source": [ "## Prerequisites\n", "* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning (AML)\n", - "* Go through the [configuration notebook](../../../configuration.ipynb) to:\n", + "* Go through the [configuration notebook](../../../../configuration.ipynb) to:\n", " * install the AML SDK\n", " * create a workspace and its configuration file (`config.json`)\n", "* Review the [tutorial](../train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb) on single-node TensorFlow training using the SDK" @@ -277,10 +277,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Create a TensorFlow estimator\n", - "The AML SDK's TensorFlow estimator enables you to easily submit TensorFlow training jobs for both single-node and distributed runs. For more information on the TensorFlow estimator, refer [here](https://docs.microsoft.com/azure/machine-learning/service/how-to-train-tensorflow).\n", + "### Create an environment\n", "\n", - "The TensorFlow estimator also takes a `framework_version` parameter -- if no version is provided, the estimator will default to the latest version supported by AzureML. Use `TensorFlow.get_supported_versions()` to get a list of all versions supported by your current SDK version or see the [SDK documentation](https://docs.microsoft.com/en-us/python/api/azureml-train-core/azureml.train.dnn?view=azure-ml-py) for the versions supported in the most current release." + "In this tutorial, we will use one of Azure ML's curated TensorFlow environments for training. [Curated environments](https://docs.microsoft.com/azure/machine-learning/how-to-use-environments#use-a-curated-environment) are available in your workspace by default. Specifically, we will use the TensorFlow 1.13 GPU curated environment." ] }, { @@ -289,25 +288,35 @@ "metadata": {}, "outputs": [], "source": [ - "from azureml.train.dnn import TensorFlow\n", + "from azureml.core import Environment\n", "\n", - "script_params={\n", - " '--data-folder': dataset.as_named_input('mnist').as_mount()\n", - "}\n", - "\n", - "estimator= TensorFlow(source_directory=script_folder,\n", - " compute_target=compute_target,\n", - " script_params=script_params,\n", - " entry_script='tf_mnist_with_checkpoint.py',\n", - " use_gpu=True,\n", - " pip_packages=['azureml-dataset-runtime[pandas,fuse]'])" + "tf_env = Environment.get(ws, name='AzureML-TensorFlow-1.13-GPU')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "In the above code, we passed our training data reference `ds_data` to our script's `--data-folder` argument. This will 1) mount our datastore on the remote compute and 2) provide the path to the data zip file on our datastore." + "### Configure the training job\n", + "\n", + "Create a ScriptRunConfig object to specify the configuration details of your training job, including your training script, environment to use, and the compute target to run on." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from azureml.core import ScriptRunConfig\n", + "\n", + "args = ['--data-folder', dataset.as_mount()]\n", + "\n", + "src = ScriptRunConfig(source_directory=script_folder,\n", + " script='tf_mnist_with_checkpoint.py',\n", + " arguments=args,\n", + " compute_target=compute_target,\n", + " environment=tf_env)" ] }, { @@ -315,7 +324,7 @@ "metadata": {}, "source": [ "### Submit job\n", - "### Run your experiment by submitting your estimator object. Note that this call is asynchronous." + "Run your experiment by submitting your ScriptRunConfig object. Note that this call is asynchronous." ] }, { @@ -324,7 +333,7 @@ "metadata": {}, "outputs": [], "source": [ - "run = experiment.submit(estimator)\n", + "run = experiment.submit(src)\n", "print(run)" ] }, @@ -366,7 +375,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Now let's resume the training from the above run" + "# Now let's resume training from the above run" ] }, { @@ -374,7 +383,7 @@ "metadata": {}, "source": [ "First, we will get the DataPath to the outputs directory of the above run which\n", - "contains the checkpoint files and/or model" + "contains the checkpoint files. We will create a DataReference from this DataPath and specify the compute binding as mount mode; this will tell Azure ML to mount the checkpoint files on the compute target for the run." ] }, { @@ -383,14 +392,17 @@ "metadata": {}, "outputs": [], "source": [ - "model_location = run._get_outputs_datapath()" + "from azureml.data.datapath import DataPathComputeBinding\n", + "\n", + "checkpoint_path = run._get_outputs_datapath()\n", + "checkpoint_data_ref = checkpoint_path.create_data_reference(datapath_compute_binding=DataPathComputeBinding(mode=\"mount\"))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Now, we will create a new TensorFlow estimator and pass in the model location. On passing 'resume_from' parameter, a new entry in script_params is created with key as 'resume_from' and value as the model/checkpoint files location and the location gets automatically mounted on the compute target." + "Now, we will create a new ScriptRunConfig and append the additional `'--resume-from'` argument with the corresponding checkpoint location to the `arguments` parameter." ] }, { @@ -399,19 +411,16 @@ "metadata": {}, "outputs": [], "source": [ - "from azureml.train.dnn import TensorFlow\n", + "from azureml.core import ScriptRunConfig\n", "\n", - "script_params={\n", - " '--data-folder': dataset.as_named_input('mnist').as_mount()\n", - "}\n", + "args = ['--data-folder', dataset.as_mount(),\n", + " '--resume-from', str(checkpoint_data_ref)]\n", "\n", - "estimator2 = TensorFlow(source_directory=script_folder,\n", - " compute_target=compute_target,\n", - " script_params=script_params,\n", - " entry_script='tf_mnist_with_checkpoint.py',\n", - " resume_from=model_location,\n", - " use_gpu=True,\n", - " pip_packages=['azureml-dataset-runtime[pandas,fuse]'])" + "src = ScriptRunConfig(source_directory=script_folder,\n", + " script='tf_mnist_with_checkpoint.py',\n", + " arguments=args,\n", + " compute_target=compute_target,\n", + " environment=tf_env)" ] }, { @@ -427,8 +436,8 @@ "metadata": {}, "outputs": [], "source": [ - "run2 = experiment.submit(estimator2)\n", - "print(run2)" + "resumed_run = experiment.submit(src)\n", + "print(resumed_run)" ] }, { @@ -437,7 +446,7 @@ "metadata": {}, "outputs": [], "source": [ - "run2.wait_for_completion(show_output=True)" + "resumed_run.wait_for_completion(show_output=True)" ] } ], @@ -478,7 +487,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.4" + "version": "3.6.9" }, "msauthor": "hesuri", "tags": [ diff --git a/how-to-use-azureml/training-with-deep-learning/train-hyperparameter-tune-deploy-with-keras/utils.py b/how-to-use-azureml/ml-frameworks/tensorflow/train-tensorflow-resume-training/utils.py similarity index 100% rename from how-to-use-azureml/training-with-deep-learning/train-hyperparameter-tune-deploy-with-keras/utils.py rename to how-to-use-azureml/ml-frameworks/tensorflow/train-tensorflow-resume-training/utils.py diff --git a/how-to-use-azureml/ml-frameworks/tensorflow/training/hyperparameter-tune-and-warm-start-with-tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow.yml b/how-to-use-azureml/ml-frameworks/tensorflow/training/hyperparameter-tune-and-warm-start-with-tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow.yml deleted file mode 100644 index cd40fc99..00000000 --- a/how-to-use-azureml/ml-frameworks/tensorflow/training/hyperparameter-tune-and-warm-start-with-tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow.yml +++ /dev/null @@ -1,12 +0,0 @@ -name: hyperparameter-tune-and-warm-start-with-tensorflow -dependencies: -- numpy -- matplotlib -- pip: - - azureml-sdk - - azureml-widgets - - pandas - - keras - - tensorflow - - matplotlib - - fuse diff --git a/how-to-use-azureml/ml-frameworks/tensorflow/training/train-tensorflow-resume-training/train-tensorflow-resume-training.yml b/how-to-use-azureml/ml-frameworks/tensorflow/training/train-tensorflow-resume-training/train-tensorflow-resume-training.yml deleted file mode 100644 index e62efebc..00000000 --- a/how-to-use-azureml/ml-frameworks/tensorflow/training/train-tensorflow-resume-training/train-tensorflow-resume-training.yml +++ /dev/null @@ -1,10 +0,0 @@ -name: train-tensorflow-resume-training -dependencies: -- pip: - - azureml-sdk - - azureml-widgets - - pandas - - keras - - tensorflow==1.14.0 - - matplotlib - - fuse diff --git a/how-to-use-azureml/reinforcement-learning/cartpole-on-compute-instance/cartpole_ci.ipynb b/how-to-use-azureml/reinforcement-learning/cartpole-on-compute-instance/cartpole_ci.ipynb index 2f77840f..27e840b8 100644 --- a/how-to-use-azureml/reinforcement-learning/cartpole-on-compute-instance/cartpole_ci.ipynb +++ b/how-to-use-azureml/reinforcement-learning/cartpole-on-compute-instance/cartpole_ci.ipynb @@ -133,7 +133,7 @@ " if not os.path.isfile(\"/mnt/azmnt/.nbvm\"):\n", " return None\n", " with open(\"/mnt/azmnt/.nbvm\", 'r') as nbvm_file:\n", - " return {key:value for (key, value) in line.strip().split('=') for line in nbvm_file}\n" + " return { key:value for (key, value) in [ line.strip().split('=') for line in nbvm_file if '=' in line ] }\n" ] }, { diff --git a/how-to-use-azureml/track-and-monitor-experiments/logging-api/logging-api.ipynb b/how-to-use-azureml/track-and-monitor-experiments/logging-api/logging-api.ipynb index 514f0a08..c0cd1e50 100644 --- a/how-to-use-azureml/track-and-monitor-experiments/logging-api/logging-api.ipynb +++ b/how-to-use-azureml/track-and-monitor-experiments/logging-api/logging-api.ipynb @@ -100,7 +100,7 @@ "\n", "# Check core SDK version number\n", "\n", - "print(\"This notebook was created using SDK version 1.14.0, you are currently running version\", azureml.core.VERSION)" + "print(\"This notebook was created using SDK version 1.15.0, you are currently running version\", azureml.core.VERSION)" ] }, { @@ -378,7 +378,7 @@ "metadata": {}, "outputs": [], "source": [ - "file_name = 'outputs/myfile.txt'\n", + "file_name = 'logging-api/myfile.txt'\n", "\n", "with open(file_name, \"w\") as f:\n", " f.write('This is an output file that will be uploaded.\\n')\n", diff --git a/how-to-use-azureml/training-with-deep-learning/export-run-history-to-tensorboard/export-run-history-to-tensorboard.ipynb b/how-to-use-azureml/track-and-monitor-experiments/tensorboard/export-run-history-to-tensorboard/export-run-history-to-tensorboard.ipynb similarity index 91% rename from how-to-use-azureml/training-with-deep-learning/export-run-history-to-tensorboard/export-run-history-to-tensorboard.ipynb rename to how-to-use-azureml/track-and-monitor-experiments/tensorboard/export-run-history-to-tensorboard/export-run-history-to-tensorboard.ipynb index b350103c..7dcf5a8c 100644 --- a/how-to-use-azureml/training-with-deep-learning/export-run-history-to-tensorboard/export-run-history-to-tensorboard.ipynb +++ b/how-to-use-azureml/track-and-monitor-experiments/tensorboard/export-run-history-to-tensorboard/export-run-history-to-tensorboard.ipynb @@ -9,24 +9,6 @@ "Licensed under the MIT License." ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/training-with-deep-learning/export-run-history-to-tensorboard/export-run-history-to-tensorboard.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Export Run History as Tensorboard logs\n", - "\n", - "1. Run some training and log some metrics into Run History\n", - "2. Export the run history to some directory as Tensorboard logs\n", - "3. Launch a local Tensorboard to view the run history" - ] - }, { "cell_type": "markdown", "metadata": {}, diff --git a/how-to-use-azureml/training-with-deep-learning/export-run-history-to-tensorboard/export-run-history-to-tensorboard.yml b/how-to-use-azureml/track-and-monitor-experiments/tensorboard/export-run-history-to-tensorboard/export-run-history-to-tensorboard.yml similarity index 100% rename from how-to-use-azureml/training-with-deep-learning/export-run-history-to-tensorboard/export-run-history-to-tensorboard.yml rename to how-to-use-azureml/track-and-monitor-experiments/tensorboard/export-run-history-to-tensorboard/export-run-history-to-tensorboard.yml diff --git a/how-to-use-azureml/track-and-monitor-experiments/tensorboard/tensorboard.ipynb b/how-to-use-azureml/track-and-monitor-experiments/tensorboard/tensorboard/tensorboard.ipynb similarity index 100% rename from how-to-use-azureml/track-and-monitor-experiments/tensorboard/tensorboard.ipynb rename to how-to-use-azureml/track-and-monitor-experiments/tensorboard/tensorboard/tensorboard.ipynb diff --git a/how-to-use-azureml/track-and-monitor-experiments/tensorboard/tensorboard.yml b/how-to-use-azureml/track-and-monitor-experiments/tensorboard/tensorboard/tensorboard.yml similarity index 100% rename from how-to-use-azureml/track-and-monitor-experiments/tensorboard/tensorboard.yml rename to how-to-use-azureml/track-and-monitor-experiments/tensorboard/tensorboard/tensorboard.yml diff --git a/how-to-use-azureml/track-and-monitor-experiments/using-mlflow/train-projects-local/train-projects-local.yml b/how-to-use-azureml/track-and-monitor-experiments/using-mlflow/train-projects-local/train-projects-local.yml deleted file mode 100644 index f69f333f..00000000 --- a/how-to-use-azureml/track-and-monitor-experiments/using-mlflow/train-projects-local/train-projects-local.yml +++ /dev/null @@ -1,7 +0,0 @@ -name: train-projects-local -dependencies: -- scikit-learn -- pip: - - azureml-sdk - - scikit-learn - - azureml-mlflow diff --git a/how-to-use-azureml/track-and-monitor-experiments/using-mlflow/train-projects-remote/train-projects-remote.yml b/how-to-use-azureml/track-and-monitor-experiments/using-mlflow/train-projects-remote/train-projects-remote.yml deleted file mode 100644 index 708058f3..00000000 --- a/how-to-use-azureml/track-and-monitor-experiments/using-mlflow/train-projects-remote/train-projects-remote.yml +++ /dev/null @@ -1,7 +0,0 @@ -name: train-projects-remote -dependencies: -- scikit-learn -- pip: - - azureml-sdk - - scikit-learn - - azureml-mlflow diff --git a/how-to-use-azureml/training-with-deep-learning/README.md b/how-to-use-azureml/training-with-deep-learning/README.md deleted file mode 100644 index 5090a228..00000000 --- a/how-to-use-azureml/training-with-deep-learning/README.md +++ /dev/null @@ -1,8 +0,0 @@ -## Azure Machine Learning service training examples - -These examples show you: - -1. [Train using Keras and tune hyperparameters using Hyperdrive](train-hyperparameter-tune-deploy-with-keras) -2. [Export run history records to Tensorboard](export-run-history-to-tensorboard) - - ![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/training-with-deep-learning/README.png) diff --git a/how-to-use-azureml/training/train-on-local/train-on-local.ipynb b/how-to-use-azureml/training/train-on-local/train-on-local.ipynb index db330960..478f7857 100644 --- a/how-to-use-azureml/training/train-on-local/train-on-local.ipynb +++ b/how-to-use-azureml/training/train-on-local/train-on-local.ipynb @@ -699,7 +699,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.7" + "version": "3.6.5" }, "tags": [ "None" @@ -707,5 +707,5 @@ "task": "Train a model locally" }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } \ No newline at end of file diff --git a/how-to-use-azureml/work-with-data/datasets-tutorial/labeled-datasets/labeled-datasets.ipynb b/how-to-use-azureml/work-with-data/datasets-tutorial/labeled-datasets/labeled-datasets.ipynb index a0f9fcd8..3ae1ec69 100644 --- a/how-to-use-azureml/work-with-data/datasets-tutorial/labeled-datasets/labeled-datasets.ipynb +++ b/how-to-use-azureml/work-with-data/datasets-tutorial/labeled-datasets/labeled-datasets.ipynb @@ -271,7 +271,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Configure Estimator for training\n", + "### Configure training job\n", "\n", "You can ask the system to build a conda environment based on your dependency specification. Once the environment is built, and if you don't change your dependencies, it will be reused in subsequent runs." ] @@ -296,14 +296,13 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "An estimator object is used to submit the run. Azure Machine Learning has pre-configured estimators for common machine learning frameworks, as well as generic Estimator. Create a generic estimator for by specifying\n", + "A ScriptRunConfig object is used to submit the run. Create a ScriptRunConfig by specifying\n", "\n", - "* The name of the estimator object, `est`\n", "* The directory that contains your scripts. All the files in this directory are uploaded into the cluster nodes for execution. \n", "* The training script name, train.py\n", "* The input dataset for training\n", "* The compute target. In this case you will use the AmlCompute you created\n", - "* The environment definition for the experiment" + "* The environment for the experiment" ] }, { @@ -312,13 +311,13 @@ "metadata": {}, "outputs": [], "source": [ - "from azureml.train.estimator import Estimator\n", + "from azureml.core import ScriptRunConfig\n", "\n", - "est = Estimator(source_directory=script_folder, \n", - " entry_script='train.py',\n", - " inputs=[crack_labels.as_named_input('crack_labels')],\n", - " compute_target=compute_target,\n", - " environment_definition= conda_env)" + "src = ScriptRunConfig(source_directory=script_folder,\n", + " script='train.py',\n", + " arguments=[crack_labels.as_named_input('crack_labels')],\n", + " compute_target=compute_target,\n", + " enviroment=conda_env)" ] }, { @@ -327,7 +326,7 @@ "source": [ "### Submit job to run\n", "\n", - "Submit the estimator to the Azure ML experiment to kick off the execution." + "Submit the ScriptRunConfig to the Azure ML experiment to kick off the execution." ] }, { @@ -336,7 +335,7 @@ "metadata": {}, "outputs": [], "source": [ - "run = exp.submit(est)" + "run = exp.submit(src)" ] }, { diff --git a/how-to-use-azureml/work-with-data/datasets-tutorial/scriptrun-with-data-input-output/how-to-use-scriptrun.ipynb b/how-to-use-azureml/work-with-data/datasets-tutorial/scriptrun-with-data-input-output/how-to-use-scriptrun.ipynb index a34f9aeb..9b361f1b 100644 --- a/how-to-use-azureml/work-with-data/datasets-tutorial/scriptrun-with-data-input-output/how-to-use-scriptrun.ipynb +++ b/how-to-use-azureml/work-with-data/datasets-tutorial/scriptrun-with-data-input-output/how-to-use-scriptrun.ipynb @@ -20,9 +20,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# How to use ScriptRun with data input and output\n", + "# How to use configure a training run with data input and output\n", "\n", - "This notebook shows how to use [ScriptRun](https://docs.microsoft.com/python/api/azureml-core/azureml.core.script_run.scriptrun?view=azure-ml-py) with input and output. A run submitted with ScriptRunConfig represents a single trial in an experiment. Submitting the run returns a ScriptRun object, which can be used to monitor the asynchronous execution of the run, log metrics and store output of the run, and analyze results and access artifacts generated by the run.\n", + "This notebook shows how to use [ScriptRunConfig](https://docs.microsoft.com/python/api/azureml-core/azureml.core.scriptrunconfig?view=azure-ml-py) with input and output. A run submitted with ScriptRunConfig represents a single trial in an experiment. Submitting the run returns a ScriptRun object, which can be used to monitor the asynchronous execution of the run, log metrics and store output of the run, and analyze results and access artifacts generated by the run.\n", "\n", "\n", "## Prerequisite:\n", @@ -66,7 +66,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "If we could not find the cluster with the given name, then we will create a new cluster here. We will create an `AmlCompute` cluster of `STANDARD_NC6` GPU VMs. This process is broken down into 3 steps:\n", + "If we could not find the cluster with the given name, then we will create a new cluster here. We will create an `AmlCompute` cluster of `STANDARD_D2_V2` GPU VMs. This process is broken down into 3 steps:\n", "1. create the configuration (this step is local and only takes a second)\n", "2. create the cluster (this step will take about **20 seconds**)\n", "3. provision the VMs to bring the cluster to the initial size (of 1 in this case). This step will take about **3-5 minutes** and is providing only sparse output in the process. Please make sure to wait until the call returns before moving to the next cell" @@ -82,31 +82,31 @@ "from azureml.core.compute_target import ComputeTargetException\n", "\n", "# choose a name for your cluster\n", - "cluster_name = \"amlcomp\"\n", + "cluster_name = \"cpu-cluster\"\n", "\n", "try:\n", - " cpu_cluster = ComputeTarget(workspace=ws, name=cluster_name)\n", + " compute_target = ComputeTarget(workspace=ws, name=cluster_name)\n", " print('Found existing compute target')\n", "except ComputeTargetException:\n", " print('Creating a new compute target...')\n", - " compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6', max_nodes=4)\n", + " compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2', max_nodes=4)\n", "\n", " # create the cluster\n", - " cpu_cluster = ComputeTarget.create(ws, cluster_name, compute_config)\n", + " compute_target = ComputeTarget.create(ws, cluster_name, compute_config)\n", "\n", " # can poll for a minimum number of nodes and for a specific timeout. \n", " # if no min node count is provided it uses the scale settings for the cluster\n", - " cpu_cluster.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)\n", + " compute_target.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)\n", "\n", "# use get_status() to get a detailed status for the current cluster. \n", - "print(cpu_cluster.get_status().serialize())" + "print(compute_target.get_status().serialize())" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Now that you have created the compute target, let's see what the workspace's `compute_targets` property returns. You should now see one entry named 'mlc' of type `AmlCompute`." + "Now that you have created the compute target, let's see what the workspace's `compute_targets` property returns. You should now see one entry named 'cpu-cluster' of type `AmlCompute`." ] }, { @@ -192,7 +192,7 @@ "\n", "input_data = Dataset.File.from_files(def_blob_store.path('script-run/iris.csv')).as_named_input('input').as_mount()\n", "\n", - "# output is configured to write the result back to def_blob_store, under\"may_sample/outputdataset\" folder\n", + "# output is configured to write the result back to def_blob_store, under \"sample/outputdataset\" folder\n", "# learn more about options to configure the output, run 'help(OutputFileDatasetConfig)'\n", "output = OutputFileDatasetConfig(destination=(def_blob_store, 'sample/outputdataset'))" ] @@ -223,13 +223,9 @@ "src = ScriptRunConfig(source_directory=source_directory, \n", " script='dummy_train.py', \n", " # to mount the dataset on the remote compute and pass the mounted path as an argument to the training script\n", - " arguments =[input_data, output])\n", - "\n", - "src.run_config.framework = 'python'\n", - "src.run_config.target = cpu_cluster.name\n", - "\n", - "# Set environment\n", - "src.run_config.environment = myenv" + " arguments =[input_data, output],\n", + " compute_target=compute_target,\n", + " environment=myenv)" ] }, { diff --git a/how-to-use-azureml/work-with-data/datasets-tutorial/train-with-datasets/train-with-datasets.ipynb b/how-to-use-azureml/work-with-data/datasets-tutorial/train-with-datasets/train-with-datasets.ipynb index b23b19b8..bdcf5ec3 100644 --- a/how-to-use-azureml/work-with-data/datasets-tutorial/train-with-datasets/train-with-datasets.ipynb +++ b/how-to-use-azureml/work-with-data/datasets-tutorial/train-with-datasets/train-with-datasets.ipynb @@ -268,19 +268,53 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Configure and use datasets as the input to Estimator" + "### Create an environment\n", + "\n", + "Define a conda environment YAML file with your training script dependencies and create an Azure ML environment." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%%writefile conda_dependencies.yml\n", + "\n", + "dependencies:\n", + "- python=3.6.2\n", + "- scikit-learn\n", + "- pip:\n", + " - azureml-defaults\n", + " - packaging" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from azureml.core import Environment\n", + "\n", + "sklearn_env = Environment.from_conda_specification(name = 'sklearn-env', file_path = './conda_dependencies.yml')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "An estimator is a configuration object you submit to Azure Machine Learning to instruct how to set up the remote environment. Azure Machine Learning has pre-configured estimators for common machine learning frameworks, as well as generic Estimator. Create a SKLearn estimator by specifying:\n", - "\n", - "* The name of the estimator object, `est`\n", + "### Configure training run" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "A ScriptRunConfig object specifies the configuration details of your training job, including your training script, environment to use, and the compute target to run on. Specify the following in your script run configuration:\n", "* The directory that contains your scripts. All the files in this directory are uploaded into the cluster nodes for execution. \n", - "* The training script name, train_titanic.py\n", - "* The input dataset for training. `as_named_input()` is required so that the input dataset can be referenced by the assigned name in your training script. \n", + "* The training script name, train_iris.py\n", + "* The input dataset for training, passed as an argument to your training script. `as_named_input()` is required so that the input dataset can be referenced by the assigned name in your training script. \n", "* The compute target. In this case you will use the AmlCompute you created\n", "* The environment definition for the experiment" ] @@ -291,14 +325,13 @@ "metadata": {}, "outputs": [], "source": [ - "from azureml.train.sklearn import SKLearn\n", + "from azureml.core import ScriptRunConfig\n", "\n", - "est = SKLearn(source_directory=script_folder, \n", - " entry_script='train_iris.py', \n", - " # pass dataset object as an input with name 'titanic'\n", - " inputs=[dataset.as_named_input('iris')],\n", - " pip_packages=['azureml-dataset-runtime[fuse]', 'packaging'],\n", - " compute_target=compute_target) " + "src = ScriptRunConfig(source_directory=script_folder,\n", + " script='train_iris.py',\n", + " arguments=[dataset.as_named_input('iris')],\n", + " compute_target=compute_target,\n", + " environment=sklearn_env)" ] }, { @@ -306,7 +339,7 @@ "metadata": {}, "source": [ "### Submit job to run\n", - "Submit the estimator to the Azure ML experiment to kick off the execution." + "Submit the ScriptRunConfig to the Azure ML experiment to kick off the execution." ] }, { @@ -315,7 +348,7 @@ "metadata": {}, "outputs": [], "source": [ - "run = exp.submit(est)" + "run = exp.submit(src)" ] }, { @@ -420,6 +453,7 @@ "\n", "import os\n", "import glob\n", + "import argparse\n", "\n", "from azureml.core.run import Run\n", "from sklearn.linear_model import Ridge\n", @@ -435,10 +469,15 @@ "\n", "import numpy as np\n", "\n", + "parser = argparse.ArgumentParser()\n", + "parser.add_argument('--data-folder', type=str, help='training dataset')\n", + "args = parser.parse_args()\n", + "\n", "os.makedirs('./outputs', exist_ok=True)\n", "\n", + "base_path = args.data_folder\n", + "\n", "run = Run.get_context()\n", - "base_path = run.input_datasets['diabetes']\n", "\n", "X = np.load(glob.glob(os.path.join(base_path, '**/features.npy'), recursive=True)[0])\n", "y = np.load(glob.glob(os.path.join(base_path, '**/labels.npy'), recursive=True)[0])\n", @@ -479,23 +518,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "You can ask the system to build a conda environment based on your dependency specification. Once the environment is built, and if you don't change your dependencies, it will be reused in subsequent runs." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from azureml.core import Environment\n", - "from azureml.core.conda_dependencies import CondaDependencies\n", + "Now configure your run. We will reuse the same `sklearn_env` environment from the previous run. Once the environment is built, and if you don't change your dependencies, it will be reused in subsequent runs. \n", "\n", - "conda_env = Environment('conda-env')\n", - "conda_env.python.conda_dependencies = CondaDependencies.create(pip_packages=['azureml-core',\n", - " 'azureml-dataset-runtime[pandas,fuse]',\n", - " 'scikit-learn',\n", - " 'packaging'])" + "We will pass in the DatasetConsumptionConfig of our FileDataset to the `'--data-folder'` argument of the script. Azure ML will resolve this to mount point of the data on the compute target, which we parse in the training script." ] }, { @@ -509,11 +534,9 @@ "src = ScriptRunConfig(source_directory=script_folder, \n", " script='train_diabetes.py', \n", " # to mount the dataset on the remote compute and pass the mounted path as an argument to the training script\n", - " arguments =[dataset.as_named_input('diabetes').as_mount()])\n", - "\n", - "src.run_config.framework = 'python'\n", - "src.run_config.environment = conda_env\n", - "src.run_config.target = compute_target.name" + " arguments =['--data-folder', dataset.as_mount()],\n", + " compute_target=compute_target,\n", + " environment=sklearn_env)" ] }, { diff --git a/index.md b/index.md index f1ac1234..98451e69 100644 --- a/index.md +++ b/index.md @@ -54,25 +54,25 @@ Machine Learning notebook samples and encourage efficient retrieval of topics an |Title| Task | Dataset | Training Compute | Deployment Target | ML Framework | Tags | |:----|:-----|:-------:|:----------------:|:-----------------:|:------------:|:------------:| -| [Train a model with hyperparameter tuning](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/chainer/deployment/train-hyperparameter-tune-deploy-with-chainer/train-hyperparameter-tune-deploy-with-chainer.ipynb) | Train a Convolutional Neural Network (CNN) | MNIST | AML Compute | Azure Container Instance | Chainer | None | -| [Distributed Training with Chainer](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/chainer/training/distributed-chainer/distributed-chainer.ipynb) | Use the Chainer estimator to perform distributed training | MNIST | AML Compute | None | Chainer | None | -| [Training with hyperparameter tuning using PyTorch](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/pytorch/deployment/train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.ipynb) | Train an image classification model using transfer learning with the PyTorch estimator | ImageNet | AML Compute | Azure Container Instance | PyTorch | None | -| [Distributed PyTorch](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/pytorch/training/distributed-pytorch-with-horovod/distributed-pytorch-with-horovod.ipynb) | Train a model using the distributed training via Horovod | MNIST | AML Compute | None | PyTorch | None | -| [Distributed training with PyTorch](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/pytorch/training/distributed-pytorch-with-nccl-gloo/distributed-pytorch-with-nccl-gloo.ipynb) | Train a model using distributed training via Nccl/Gloo | MNIST | AML Compute | None | PyTorch | None | -| [Training and hyperparameter tuning with Scikit-learn](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/scikit-learn/training/train-hyperparameter-tune-deploy-with-sklearn/train-hyperparameter-tune-deploy-with-sklearn.ipynb) | Train a support vector machine (SVM) to perform classification | Iris | AML Compute | None | Scikit-learn | None | -| [Training and hyperparameter tuning using the TensorFlow estimator](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/tensorflow/deployment/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb) | Train a deep neural network | MNIST | AML Compute | Azure Container Instance | TensorFlow | None | -| [Distributed training using TensorFlow with Horovod](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/tensorflow/training/distributed-tensorflow-with-horovod/distributed-tensorflow-with-horovod.ipynb) | Use the TensorFlow estimator to train a word2vec model | None | AML Compute | None | TensorFlow | None | -| [Distributed TensorFlow with parameter server](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/tensorflow/training/distributed-tensorflow-with-parameter-server/distributed-tensorflow-with-parameter-server.ipynb) | Use the TensorFlow estimator to train a model using distributed training | MNIST | AML Compute | None | TensorFlow | None | -| [Hyperparameter tuning and warm start using the TensorFlow estimator](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/tensorflow/training/hyperparameter-tune-and-warm-start-with-tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow.ipynb) | Train a deep neural network | MNIST | AML Compute | Azure Container Instance | TensorFlow | None | -| [Resuming a model](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/tensorflow/training/train-tensorflow-resume-training/train-tensorflow-resume-training.ipynb) | Resume a model in TensorFlow from a previously submitted run | MNIST | AML Compute | None | TensorFlow | None | +| [Distributed Training with Chainer](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/chainer/distributed-chainer/distributed-chainer.ipynb) | Use the Chainer estimator to perform distributed training | MNIST | AML Compute | None | Chainer | None | +| [Train a model with hyperparameter tuning](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/chainer/train-hyperparameter-tune-deploy-with-chainer/train-hyperparameter-tune-deploy-with-chainer.ipynb) | Train a Convolutional Neural Network (CNN) | MNIST | AML Compute | Azure Container Instance | Chainer | None | +| [Train a DNN using hyperparameter tuning and deploying with Keras](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/keras/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.ipynb) | Create a multi-class classifier | MNIST | AML Compute | Azure Container Instance | TensorFlow | None | +| [Distributed PyTorch](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-horovod/distributed-pytorch-with-horovod.ipynb) | Train a model using the distributed training via Horovod | MNIST | AML Compute | None | PyTorch | None | +| [Distributed training with PyTorch](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-nccl-gloo/distributed-pytorch-with-nccl-gloo.ipynb) | Train a model using distributed training via Nccl/Gloo | MNIST | AML Compute | None | PyTorch | None | +| [Training with hyperparameter tuning using PyTorch](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/pytorch/train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.ipynb) | Train an image classification model using transfer learning with the PyTorch estimator | ImageNet | AML Compute | Azure Container Instance | PyTorch | None | +| [Training and hyperparameter tuning with Scikit-learn](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/scikit-learn/train-hyperparameter-tune-deploy-with-sklearn/train-hyperparameter-tune-deploy-with-sklearn.ipynb) | Train a support vector machine (SVM) to perform classification | Iris | AML Compute | None | Scikit-learn | None | +| [Distributed training using TensorFlow with Horovod](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/tensorflow/distributed-tensorflow-with-horovod/distributed-tensorflow-with-horovod.ipynb) | Use the TensorFlow estimator to train a word2vec model | None | AML Compute | None | TensorFlow | None | +| [Distributed TensorFlow with parameter server](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/tensorflow/distributed-tensorflow-with-parameter-server/distributed-tensorflow-with-parameter-server.ipynb) | Use the TensorFlow estimator to train a model using distributed training | MNIST | AML Compute | None | TensorFlow | None | +| [Hyperparameter tuning and warm start using the TensorFlow estimator](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow.ipynb) | Train a deep neural network | MNIST | AML Compute | Azure Container Instance | TensorFlow | None | +| [Training and hyperparameter tuning using the TensorFlow estimator](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/tensorflow/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb) | Train a deep neural network | MNIST | AML Compute | Azure Container Instance | TensorFlow | None | +| [Resuming a model](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/tensorflow/train-tensorflow-resume-training/train-tensorflow-resume-training.ipynb) | Resume a model in TensorFlow from a previously submitted run | MNIST | AML Compute | None | TensorFlow | None | +| [Using Tensorboard](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/track-and-monitor-experiments/tensorboard/export-run-history-to-tensorboard/export-run-history-to-tensorboard.ipynb) | Export the run history as Tensorboard logs | None | None | None | TensorFlow | None | | [Training in Spark](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training/train-in-spark/train-in-spark.ipynb) | Submiting a run on a spark cluster | None | HDI cluster | None | PySpark | None | | [Train on Azure Machine Learning Compute](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training/train-on-amlcompute/train-on-amlcompute.ipynb) | Submit a run on Azure Machine Learning Compute. | Diabetes | AML Compute | None | None | None | | [Train on local compute](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training/train-on-local/train-on-local.ipynb) | Train a model locally | Diabetes | Local | None | None | None | | [Train in a remote Linux virtual machine](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training/train-on-remote-vm/train-on-remote-vm.ipynb) | Configure and execute a run | Diabetes | Data Science Virtual Machine | None | None | None | -| [Using Tensorboard](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training-with-deep-learning/export-run-history-to-tensorboard/export-run-history-to-tensorboard.ipynb) | Export the run history as Tensorboard logs | None | None | None | TensorFlow | None | -| [Train a DNN using hyperparameter tuning and deploying with Keras](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training-with-deep-learning/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.ipynb) | Create a multi-class classifier | MNIST | AML Compute | Azure Container Instance | TensorFlow | None | | [Managing your training runs](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/track-and-monitor-experiments/manage-runs/manage-runs.ipynb) | Monitor and complete runs | None | Local | None | None | None | -| [Tensorboard integration with run history](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/track-and-monitor-experiments/tensorboard/tensorboard.ipynb) | Run a TensorFlow job and view its Tensorboard output live | None | Local, DSVM, AML Compute | None | TensorFlow | None | +| [Tensorboard integration with run history](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/track-and-monitor-experiments/tensorboard/tensorboard/tensorboard.ipynb) | Run a TensorFlow job and view its Tensorboard output live | None | Local, DSVM, AML Compute | None | TensorFlow | None | | [Use MLflow with AML for a local training run](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/track-and-monitor-experiments/using-mlflow/train-local/train-local.ipynb) | Use MLflow tracking APIs together with Azure Machine Learning for storing your metrics and artifacts | Diabetes | Local | None | None | None | | [Use MLflow with AML for a remote training run](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/track-and-monitor-experiments/using-mlflow/train-remote/train-remote.ipynb) | Use MLflow tracking APIs together with AML for storing your metrics and artifacts | Diabetes | AML Compute | None | None | None | @@ -100,17 +100,12 @@ Machine Learning notebook samples and encourage efficient retrieval of topics an | [upload-fairness-dashboard](https://github.com/Azure/MachineLearningNotebooks/blob/master//contrib/fairness/upload-fairness-dashboard.ipynb) | | | | | | | | [azure-ml-with-nvidia-rapids](https://github.com/Azure/MachineLearningNotebooks/blob/master//contrib/RAPIDS/azure-ml-with-nvidia-rapids.ipynb) | | | | | | | | [auto-ml-continuous-retraining](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/continuous-retraining/auto-ml-continuous-retraining.ipynb) | | | | | | | -| [auto-ml-regression-model-proxy](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/experimental/regression/auto-ml-regression-model-proxy.ipynb) | | | | | | | +| [auto-ml-regression-model-proxy](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/experimental/regression-model-proxy/auto-ml-regression-model-proxy.ipynb) | | | | | | | | [auto-ml-forecasting-beer-remote](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-beer-remote/auto-ml-forecasting-beer-remote.ipynb) | | | | | | | | [auto-ml-forecasting-energy-demand](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb) | | | | | | | | [auto-ml-regression](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/regression/auto-ml-regression.ipynb) | | | | | | | -| [build-model-run-history-03](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/azure-databricks/amlsdk/build-model-run-history-03.ipynb) | | | | | | | -| [deploy-to-aci-04](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/azure-databricks/amlsdk/deploy-to-aci-04.ipynb) | | | | | | | -| [ingest-data-02](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/azure-databricks/amlsdk/ingest-data-02.ipynb) | | | | | | | -| [installation-and-configuration-01](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/azure-databricks/amlsdk/installation-and-configuration-01.ipynb) | | | | | | | | [automl-databricks-local-01](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/azure-databricks/automl/automl-databricks-local-01.ipynb) | | | | | | | | [automl-databricks-local-with-deployment](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/azure-databricks/automl/automl-databricks-local-with-deployment.ipynb) | | | | | | | -| [aml-pipelines-use-databricks-as-compute-target](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/azure-databricks/databricks-as-remote-compute-target/aml-pipelines-use-databricks-as-compute-target.ipynb) | | | | | | | | [multi-model-register-and-deploy](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/deploy-multi-model/multi-model-register-and-deploy.ipynb) | | | | | | | | [register-model-deploy-local-advanced](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/deploy-to-local/register-model-deploy-local-advanced.ipynb) | | | | | | | | [enable-app-insights-in-production-service](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/enable-app-insights-in-production-service/enable-app-insights-in-production-service.ipynb) | | | | | | | diff --git a/setup-environment/configuration.ipynb b/setup-environment/configuration.ipynb index dceaba67..3e285e2a 100644 --- a/setup-environment/configuration.ipynb +++ b/setup-environment/configuration.ipynb @@ -102,7 +102,7 @@ "source": [ "import azureml.core\n", "\n", - "print(\"This notebook was created using version 1.14.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.15.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, diff --git a/tutorials/README.md b/tutorials/README.md index 4d6ee70c..adb137e6 100644 --- a/tutorials/README.md +++ b/tutorials/README.md @@ -16,6 +16,7 @@ The following tutorials are intended to provide an introductory overview of Azur | Tutorial | Description | Notebook | Task | Framework | | --- | --- | --- | --- | --- | +| [Get Started (day1)](https://docs.microsoft.com/azure/machine-learning/tutorial-1st-experiment-sdk-setup-local) | Learn the fundamental concepts of Azure Machine Learning to help onboard your existing code to Azure Machine Learning. This tutorial focuses heavily on submitting machine learning jobs to scalable cloud-based compute clusters. | [get-started-day1](get-started-day1/day1-part1-setup.ipynb) | Learn Azure Machine Learning Concepts | PyTorch | [Train your first ML Model](https://docs.microsoft.com/azure/machine-learning/tutorial-1st-experiment-sdk-train) | Learn the foundational design patterns in Azure Machine Learning and train a scikit-learn model based on a diabetes data set. | [tutorial-quickstart-train-model.ipynb](create-first-ml-experiment/tutorial-1st-experiment-sdk-train.ipynb) | Regression | Scikit-Learn | [Train an image classification model](https://docs.microsoft.com/azure/machine-learning/tutorial-train-models-with-aml) | Train a scikit-learn image classification model. | [img-classification-part1-training.ipynb](image-classification-mnist-data/img-classification-part1-training.ipynb) | Image Classification | Scikit-Learn | [Deploy an image classification model](https://docs.microsoft.com/azure/machine-learning/tutorial-deploy-models-with-aml) | Deploy a scikit-learn image classification model to Azure Container Instances. | [img-classification-part2-deploy.ipynb](image-classification-mnist-data/img-classification-part2-deploy.ipynb) | Image Classification | Scikit-Learn diff --git a/tutorials/image-classification-mnist-data/img-classification-part1-training.ipynb b/tutorials/image-classification-mnist-data/img-classification-part1-training.ipynb index 1f95948a..c969a52a 100644 --- a/tutorials/image-classification-mnist-data/img-classification-part1-training.ipynb +++ b/tutorials/image-classification-mnist-data/img-classification-part1-training.ipynb @@ -272,7 +272,7 @@ "For this task, you submit the job to run on the remote training cluster you set up earlier. To submit a job you:\n", "* Create a directory\n", "* Create a training script\n", - "* Create an estimator object\n", + "* Create a script run configuration\n", "* Submit the job \n", "\n", "### Create a directory\n", @@ -400,16 +400,15 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Create an estimator\n", + "### Configure the training job\n", "\n", - "An estimator object is used to submit the run. Azure Machine Learning has pre-configured estimators for common machine learning frameworks, as well as generic Estimator. Create an estimator by specifying\n", + "Create a ScriptRunConfig object to specify the configuration details of your training job, including your training script, environment to use, and the compute target to run on. Configure the ScriptRunConfig by specifying:\n", "\n", - "* The name of the estimator object, `est`\n", "* The directory that contains your scripts. All the files in this directory are uploaded into the cluster nodes for execution. \n", "* The compute target. In this case you will use the AmlCompute you created\n", "* The training script name, train.py\n", "* An environment that contains the libraries needed to run the script\n", - "* Parameters required from the training script. \n", + "* Arguments required from the training script. \n", "\n", "In this tutorial, the target is AmlCompute. All files in the script folder are uploaded into the cluster nodes for execution. The data_folder is set to use the dataset.\n", "\n", @@ -441,7 +440,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Then, create the estimator by specifying the training script, compute target and environment." + "Then, create the ScriptRunConfig by specifying the training script, compute target and environment." ] }, { @@ -454,19 +453,15 @@ }, "outputs": [], "source": [ - "from azureml.train.estimator import Estimator\n", + "from azureml.core import ScriptRunConfig\n", "\n", - "script_params = {\n", - " # to mount files referenced by mnist dataset\n", - " '--data-folder': mnist_file_dataset.as_named_input('mnist_opendataset').as_mount(),\n", - " '--regularization': 0.5\n", - "}\n", + "args = ['--data-folder', mnist_file_dataset.as_mount(), '--regularization', 0.5]\n", "\n", - "est = Estimator(source_directory=script_folder,\n", - " script_params=script_params,\n", - " compute_target=compute_target,\n", - " environment_definition=env,\n", - " entry_script='train.py')" + "src = ScriptRunConfig(source_directory=script_folder,\n", + " script='train.py', \n", + " arguments=args,\n", + " compute_target=compute_target,\n", + " environment=env)" ] }, { @@ -475,7 +470,7 @@ "source": [ "### Submit the job to the cluster\n", "\n", - "Run the experiment by submitting the estimator object. And you can navigate to Azure portal to monitor the run." + "Run the experiment by submitting the ScriptRunConfig object. And you can navigate to Azure portal to monitor the run." ] }, { @@ -490,7 +485,7 @@ }, "outputs": [], "source": [ - "run = exp.submit(config=est)\n", + "run = exp.submit(config=src)\n", "run" ] }, @@ -502,11 +497,11 @@ "\n", "## Monitor a remote run\n", "\n", - "In total, the first run takes **approximately 10 minutes**. But for subsequent runs, as long as the dependencies (`conda_packages` parameter in the above estimator constructor) don't change, the same image is reused and hence the container start up time is much faster.\n", + "In total, the first run takes **approximately 10 minutes**. But for subsequent runs, as long as the dependencies in the Azure ML environment don't change, the same image is reused and hence the container start up time is much faster.\n", "\n", "Here is what's happening while you wait:\n", "\n", - "- **Image creation**: A Docker image is created matching the Python environment specified by the estimator. The image is built and stored in the ACR (Azure Container Registry) associated with your workspace. Image creation and uploading takes **about 5 minutes**. \n", + "- **Image creation**: A Docker image is created matching the Python environment specified by the Azure ML environment. The image is built and stored in the ACR (Azure Container Registry) associated with your workspace. Image creation and uploading takes **about 5 minutes**. \n", "\n", " This stage happens once for each Python environment since the container is cached for subsequent runs. During image creation, logs are streamed to the run history. You can monitor the image creation progress using these logs.\n", "\n", @@ -687,7 +682,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.6" + "version": "3.6.9" }, "msauthor": "roastala", "network_required": false