diff --git a/configuration.ipynb b/configuration.ipynb index 5fb17e42..f55db70a 100644 --- a/configuration.ipynb +++ b/configuration.ipynb @@ -103,7 +103,7 @@ "source": [ "import azureml.core\n", "\n", - "print(\"This notebook was created using version 1.27.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.28.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, @@ -254,6 +254,8 @@ "\n", "Many of the sample notebooks use Azure ML managed compute (AmlCompute) to train models using a dynamically scalable pool of compute. In this section you will create default compute clusters for use by the other notebooks and any other operations you choose.\n", "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n", + "\n", "To create a cluster, you need to specify a compute configuration that specifies the type of machine to be used and the scalability behaviors. Then you choose a name for the cluster that is unique within the workspace that can be used to address the cluster later.\n", "\n", "The cluster parameters are:\n", diff --git a/how-to-use-azureml/automated-machine-learning/automl_env.yml b/how-to-use-azureml/automated-machine-learning/automl_env.yml index b0dfac07..d4fdbd81 100644 --- a/how-to-use-azureml/automated-machine-learning/automl_env.yml +++ b/how-to-use-azureml/automated-machine-learning/automl_env.yml @@ -21,8 +21,8 @@ dependencies: - pip: # Required packages for AzureML execution, history, and data preparation. - - azureml-widgets~=1.27.0 + - azureml-widgets~=1.28.0 - pytorch-transformers==1.0.0 - spacy==2.1.8 - https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz - - -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.27.0/validated_win32_requirements.txt [--no-deps] + - -r https://automlresources-prod.azureedge.net/validated-requirements/1.28.0/validated_win32_requirements.txt [--no-deps] diff --git a/how-to-use-azureml/automated-machine-learning/automl_env_linux.yml b/how-to-use-azureml/automated-machine-learning/automl_env_linux.yml index 61aec3da..a1393cfb 100644 --- a/how-to-use-azureml/automated-machine-learning/automl_env_linux.yml +++ b/how-to-use-azureml/automated-machine-learning/automl_env_linux.yml @@ -21,8 +21,8 @@ dependencies: - pip: # Required packages for AzureML execution, history, and data preparation. - - azureml-widgets~=1.27.0 + - azureml-widgets~=1.28.0 - pytorch-transformers==1.0.0 - spacy==2.1.8 - https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz - - -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.27.0/validated_linux_requirements.txt [--no-deps] + - -r https://automlresources-prod.azureedge.net/validated-requirements/1.28.0/validated_linux_requirements.txt [--no-deps] diff --git a/how-to-use-azureml/automated-machine-learning/automl_env_mac.yml b/how-to-use-azureml/automated-machine-learning/automl_env_mac.yml index 8a7d2030..42c4d6a4 100644 --- a/how-to-use-azureml/automated-machine-learning/automl_env_mac.yml +++ b/how-to-use-azureml/automated-machine-learning/automl_env_mac.yml @@ -22,8 +22,8 @@ dependencies: - pip: # Required packages for AzureML execution, history, and data preparation. - - azureml-widgets~=1.27.0 + - azureml-widgets~=1.28.0 - pytorch-transformers==1.0.0 - spacy==2.1.8 - https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz - - -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.27.0/validated_darwin_requirements.txt [--no-deps] + - -r https://automlresources-prod.azureedge.net/validated-requirements/1.28.0/validated_darwin_requirements.txt [--no-deps] diff --git a/how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb b/how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb index 9f0072ea..e0a91ef5 100644 --- a/how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb +++ b/how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb @@ -105,7 +105,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"This notebook was created using version 1.27.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.28.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, @@ -165,6 +165,9 @@ "source": [ "## Create or Attach existing AmlCompute\n", "You will need to create a compute target for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n", + "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n", + "\n", "#### Creation of AmlCompute takes approximately 5 minutes. \n", "If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n", "As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota." diff --git a/how-to-use-azureml/automated-machine-learning/classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.ipynb b/how-to-use-azureml/automated-machine-learning/classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.ipynb index 8a0a8055..d91bea06 100644 --- a/how-to-use-azureml/automated-machine-learning/classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.ipynb +++ b/how-to-use-azureml/automated-machine-learning/classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.ipynb @@ -93,7 +93,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"This notebook was created using version 1.27.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.28.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, @@ -127,6 +127,9 @@ "source": [ "## Create or Attach existing AmlCompute\n", "A compute target is required to execute the Automated ML run. In this tutorial, you create AmlCompute as your training compute resource.\n", + "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n", + "\n", "#### Creation of AmlCompute takes approximately 5 minutes. \n", "If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n", "As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota." diff --git a/how-to-use-azureml/automated-machine-learning/classification-text-dnn/auto-ml-classification-text-dnn.ipynb b/how-to-use-azureml/automated-machine-learning/classification-text-dnn/auto-ml-classification-text-dnn.ipynb index 325335f6..4188cfc5 100644 --- a/how-to-use-azureml/automated-machine-learning/classification-text-dnn/auto-ml-classification-text-dnn.ipynb +++ b/how-to-use-azureml/automated-machine-learning/classification-text-dnn/auto-ml-classification-text-dnn.ipynb @@ -96,7 +96,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"This notebook was created using version 1.27.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.28.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, @@ -138,6 +138,8 @@ "## Set up a compute cluster\n", "This section uses a user-provided compute cluster (named \"dnntext-cluster\" in this example). If a cluster with this name does not exist in the user's workspace, the below code will create a new cluster. You can choose the parameters of the cluster as mentioned in the comments.\n", "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n", + "\n", "Whether you provide/select a CPU or GPU cluster, AutoML will choose the appropriate DNN for that setup - BiLSTM or BERT text featurizer will be included in the candidate featurizers on CPU and GPU respectively. If your goal is to obtain the most accurate model, we recommend you use GPU clusters since BERT featurizers usually outperform BiLSTM featurizers." ] }, @@ -485,7 +487,7 @@ "outputs": [], "source": [ "test_run = run_inference(test_experiment, compute_target, script_folder, best_dnn_run,\n", - " train_dataset, test_dataset, target_column_name, model_name)" + " test_dataset, target_column_name, model_name)" ] }, { diff --git a/how-to-use-azureml/automated-machine-learning/classification-text-dnn/helper.py b/how-to-use-azureml/automated-machine-learning/classification-text-dnn/helper.py index dc254472..66a12549 100644 --- a/how-to-use-azureml/automated-machine-learning/classification-text-dnn/helper.py +++ b/how-to-use-azureml/automated-machine-learning/classification-text-dnn/helper.py @@ -5,7 +5,7 @@ from azureml.core.run import Run def run_inference(test_experiment, compute_target, script_folder, train_run, - train_dataset, test_dataset, target_column_name, model_name): + test_dataset, target_column_name, model_name): inference_env = train_run.get_environment() @@ -16,7 +16,6 @@ def run_inference(test_experiment, compute_target, script_folder, train_run, '--model_name': model_name }, inputs=[ - train_dataset.as_named_input('train_data'), test_dataset.as_named_input('test_data') ], compute_target=compute_target, diff --git a/how-to-use-azureml/automated-machine-learning/classification-text-dnn/infer.py b/how-to-use-azureml/automated-machine-learning/classification-text-dnn/infer.py index 18ed5314..cd3f8257 100644 --- a/how-to-use-azureml/automated-machine-learning/classification-text-dnn/infer.py +++ b/how-to-use-azureml/automated-machine-learning/classification-text-dnn/infer.py @@ -1,5 +1,6 @@ import argparse +import pandas as pd import numpy as np from sklearn.externals import joblib @@ -32,22 +33,21 @@ model = joblib.load(model_path) run = Run.get_context() # get input dataset by name test_dataset = run.input_datasets['test_data'] -train_dataset = run.input_datasets['train_data'] X_test_df = test_dataset.drop_columns(columns=[target_column_name]) \ .to_pandas_dataframe() y_test_df = test_dataset.with_timestamp_columns(None) \ .keep_columns(columns=[target_column_name]) \ .to_pandas_dataframe() -y_train_df = test_dataset.with_timestamp_columns(None) \ - .keep_columns(columns=[target_column_name]) \ - .to_pandas_dataframe() predicted = model.predict_proba(X_test_df) +if isinstance(predicted, pd.DataFrame): + predicted = predicted.values + # Use the AutoML scoring module -class_labels = np.unique(np.concatenate((y_train_df.values, y_test_df.values))) train_labels = model.classes_ +class_labels = np.unique(np.concatenate((y_test_df.values, np.reshape(train_labels, (-1, 1))))) classification_metrics = list(constants.CLASSIFICATION_SCALAR_SET) scores = scoring.score_classification(y_test_df.values, predicted, classification_metrics, diff --git a/how-to-use-azureml/automated-machine-learning/continuous-retraining/auto-ml-continuous-retraining.ipynb b/how-to-use-azureml/automated-machine-learning/continuous-retraining/auto-ml-continuous-retraining.ipynb index c05de38e..0e8da95a 100644 --- a/how-to-use-azureml/automated-machine-learning/continuous-retraining/auto-ml-continuous-retraining.ipynb +++ b/how-to-use-azureml/automated-machine-learning/continuous-retraining/auto-ml-continuous-retraining.ipynb @@ -81,7 +81,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"This notebook was created using version 1.27.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.28.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, @@ -141,6 +141,9 @@ "#### Create or Attach existing AmlCompute\n", "\n", "You will need to create a compute target for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n", + "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n", + "\n", "#### Creation of AmlCompute takes approximately 5 minutes. \n", "If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n", "As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota." diff --git a/how-to-use-azureml/automated-machine-learning/experimental/regression-model-proxy/auto-ml-regression-model-proxy.ipynb b/how-to-use-azureml/automated-machine-learning/experimental/regression-model-proxy/auto-ml-regression-model-proxy.ipynb index 52e23431..7307cbdb 100644 --- a/how-to-use-azureml/automated-machine-learning/experimental/regression-model-proxy/auto-ml-regression-model-proxy.ipynb +++ b/how-to-use-azureml/automated-machine-learning/experimental/regression-model-proxy/auto-ml-regression-model-proxy.ipynb @@ -91,7 +91,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"This notebook was created using version 1.27.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.28.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-beer-remote/auto-ml-forecasting-beer-remote.ipynb b/how-to-use-azureml/automated-machine-learning/forecasting-beer-remote/auto-ml-forecasting-beer-remote.ipynb index 5b674fe3..e8866649 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-beer-remote/auto-ml-forecasting-beer-remote.ipynb +++ b/how-to-use-azureml/automated-machine-learning/forecasting-beer-remote/auto-ml-forecasting-beer-remote.ipynb @@ -113,7 +113,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"This notebook was created using version 1.27.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.28.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, @@ -162,7 +162,9 @@ }, "source": [ "### Using AmlCompute\n", - "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you use `AmlCompute` as your training compute resource." + "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you use `AmlCompute` as your training compute resource.\n", + "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist." ] }, { diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-bike-share/auto-ml-forecasting-bike-share.ipynb b/how-to-use-azureml/automated-machine-learning/forecasting-bike-share/auto-ml-forecasting-bike-share.ipynb index 0ad38eab..e62322d3 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-bike-share/auto-ml-forecasting-bike-share.ipynb +++ b/how-to-use-azureml/automated-machine-learning/forecasting-bike-share/auto-ml-forecasting-bike-share.ipynb @@ -87,7 +87,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"This notebook was created using version 1.27.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.28.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, @@ -129,6 +129,9 @@ "source": [ "## Compute\n", "You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n", + "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n", + "\n", "#### Creation of AmlCompute takes approximately 5 minutes. \n", "If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n", "As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota." diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb b/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb index 0b3899bb..db233f14 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb +++ b/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb @@ -97,7 +97,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"This notebook was created using version 1.27.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.28.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/auto-ml-forecasting-function.ipynb b/how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/auto-ml-forecasting-function.ipynb index c5f2b0cb..b7160450 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/auto-ml-forecasting-function.ipynb +++ b/how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/auto-ml-forecasting-function.ipynb @@ -94,7 +94,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"This notebook was created using version 1.27.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.28.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, @@ -263,7 +263,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource." + "You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n", + "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist." ] }, { diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb b/how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb index db8eccfc..2575cec9 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb +++ b/how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb @@ -82,7 +82,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"This notebook was created using version 1.27.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.28.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, @@ -124,6 +124,9 @@ "source": [ "## Compute\n", "You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n", + "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n", + "\n", "#### Creation of AmlCompute takes approximately 5 minutes. \n", "If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n", "As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota." diff --git a/how-to-use-azureml/automated-machine-learning/local-run-classification-credit-card-fraud/auto-ml-classification-credit-card-fraud-local.ipynb b/how-to-use-azureml/automated-machine-learning/local-run-classification-credit-card-fraud/auto-ml-classification-credit-card-fraud-local.ipynb index d8393f0e..76f036e7 100644 --- a/how-to-use-azureml/automated-machine-learning/local-run-classification-credit-card-fraud/auto-ml-classification-credit-card-fraud-local.ipynb +++ b/how-to-use-azureml/automated-machine-learning/local-run-classification-credit-card-fraud/auto-ml-classification-credit-card-fraud-local.ipynb @@ -96,7 +96,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"This notebook was created using version 1.27.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.28.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, diff --git a/how-to-use-azureml/automated-machine-learning/regression-explanation-featurization/auto-ml-regression-explanation-featurization.ipynb b/how-to-use-azureml/automated-machine-learning/regression-explanation-featurization/auto-ml-regression-explanation-featurization.ipynb index 2e1cd266..222a647d 100644 --- a/how-to-use-azureml/automated-machine-learning/regression-explanation-featurization/auto-ml-regression-explanation-featurization.ipynb +++ b/how-to-use-azureml/automated-machine-learning/regression-explanation-featurization/auto-ml-regression-explanation-featurization.ipynb @@ -96,7 +96,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"This notebook was created using version 1.27.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.28.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, @@ -130,6 +130,8 @@ "### Create or Attach existing AmlCompute\n", "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you create `AmlCompute` as your training compute resource.\n", "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n", + "\n", "**Creation of AmlCompute takes approximately 5 minutes.** If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n", "\n", "As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota." diff --git a/how-to-use-azureml/automated-machine-learning/regression/auto-ml-regression.ipynb b/how-to-use-azureml/automated-machine-learning/regression/auto-ml-regression.ipynb index fedd7547..aab12933 100644 --- a/how-to-use-azureml/automated-machine-learning/regression/auto-ml-regression.ipynb +++ b/how-to-use-azureml/automated-machine-learning/regression/auto-ml-regression.ipynb @@ -92,7 +92,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"This notebook was created using version 1.27.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.28.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, diff --git a/how-to-use-azureml/azure-synapse/spark_job_on_synapse_spark_pool.ipynb b/how-to-use-azureml/azure-synapse/spark_job_on_synapse_spark_pool.ipynb index 946cc61e..fe2f74cb 100644 --- a/how-to-use-azureml/azure-synapse/spark_job_on_synapse_spark_pool.ipynb +++ b/how-to-use-azureml/azure-synapse/spark_job_on_synapse_spark_pool.ipynb @@ -46,7 +46,7 @@ "import azureml.core\n", "from azureml.core import Workspace, Experiment\n", "from azureml.core import LinkedService, SynapseWorkspaceLinkedServiceConfiguration\n", - "from azureml.core.compute import ComputeTarget, SynapseCompute\n", + "from azureml.core.compute import ComputeTarget, AmlCompute, SynapseCompute\n", "from azureml.exceptions import ComputeTargetException\n", "from azureml.data import HDFSOutputDatasetConfig\n", "from azureml.core.datastore import Datastore\n", diff --git a/how-to-use-azureml/deployment/deploy-with-controlled-rollout/deploy-aks-with-controlled-rollout.ipynb b/how-to-use-azureml/deployment/deploy-with-controlled-rollout/deploy-aks-with-controlled-rollout.ipynb index 3c958286..25002c44 100644 --- a/how-to-use-azureml/deployment/deploy-with-controlled-rollout/deploy-aks-with-controlled-rollout.ipynb +++ b/how-to-use-azureml/deployment/deploy-with-controlled-rollout/deploy-aks-with-controlled-rollout.ipynb @@ -157,7 +157,9 @@ "metadata": {}, "source": [ "## Provision the AKS Cluster\n", - "If you already have an AKS cluster attached to this workspace, skip the step below and provide the name of the cluster." + "If you already have an AKS cluster attached to this workspace, skip the step below and provide the name of the cluster.\n", + "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist." ] }, { diff --git a/how-to-use-azureml/deployment/enable-app-insights-in-production-service/enable-app-insights-in-production-service.ipynb b/how-to-use-azureml/deployment/enable-app-insights-in-production-service/enable-app-insights-in-production-service.ipynb index b2dd05d5..9a06114f 100644 --- a/how-to-use-azureml/deployment/enable-app-insights-in-production-service/enable-app-insights-in-production-service.ipynb +++ b/how-to-use-azureml/deployment/enable-app-insights-in-production-service/enable-app-insights-in-production-service.ipynb @@ -267,7 +267,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Create AKS compute if you haven't done so." + "### Create AKS compute if you haven't done so.\n", + "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist." ] }, { diff --git a/how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks-ssl.ipynb b/how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks-ssl.ipynb index def904c9..99a4ecd9 100644 --- a/how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks-ssl.ipynb +++ b/how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks-ssl.ipynb @@ -211,6 +211,8 @@ "# Provision the AKS Cluster with SSL\n", "This is a one time setup. You can reuse this cluster for multiple deployments after it has been created. If you delete the cluster or the resource group that contains it, then you would have to recreate it.\n", "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n", + "\n", "See code snippet below. Check the documentation [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-secure-web-service) for more details" ] }, diff --git a/how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks.ipynb b/how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks.ipynb index 4629e479..05580b7d 100644 --- a/how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks.ipynb +++ b/how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks.ipynb @@ -325,7 +325,9 @@ "metadata": {}, "source": [ "# Provision the AKS Cluster\n", - "This is a one time setup. You can reuse this cluster for multiple deployments after it has been created. If you delete the cluster or the resource group that contains it, then you would have to recreate it." + "This is a one time setup. You can reuse this cluster for multiple deployments after it has been created. If you delete the cluster or the resource group that contains it, then you would have to recreate it.\n", + "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist." ] }, { diff --git a/how-to-use-azureml/explain-model/azure-integration/remote-explanation/explain-model-on-amlcompute.ipynb b/how-to-use-azureml/explain-model/azure-integration/remote-explanation/explain-model-on-amlcompute.ipynb index 8fbf089a..02504b73 100644 --- a/how-to-use-azureml/explain-model/azure-integration/remote-explanation/explain-model-on-amlcompute.ipynb +++ b/how-to-use-azureml/explain-model/azure-integration/remote-explanation/explain-model-on-amlcompute.ipynb @@ -203,6 +203,8 @@ "source": [ "### Provision a compute target\n", "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n", + "\n", "You can provision an AmlCompute resource by simply defining two parameters thanks to smart defaults. By default it autoscales from 0 nodes and provisions dedicated VMs to run your job in a container. This is useful when you want to continously re-use the same target, debug it between jobs or simply share the resource with other users of your workspace.\n", "\n", "* `vm_size`: VM family of the nodes provisioned by AmlCompute. Simply choose from the supported_vmsizes() above\n", diff --git a/how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-on-amlcompute-and-deploy.ipynb b/how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-on-amlcompute-and-deploy.ipynb index e39afe4a..cb73cc49 100644 --- a/how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-on-amlcompute-and-deploy.ipynb +++ b/how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-on-amlcompute-and-deploy.ipynb @@ -204,6 +204,8 @@ "source": [ "### Provision a compute target\n", "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n", + "\n", "You can provision an AmlCompute resource by simply defining two parameters thanks to smart defaults. By default it autoscales from 0 nodes and provisions dedicated VMs to run your job in a container. This is useful when you want to continously re-use the same target, debug it between jobs or simply share the resource with other users of your workspace.\n", "\n", "* `vm_size`: VM family of the nodes provisioned by AmlCompute. Simply choose from the supported_vmsizes() above\n", diff --git a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-getting-started.ipynb b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-getting-started.ipynb index 725ff487..4f3527fd 100644 --- a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-getting-started.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-getting-started.ipynb @@ -209,6 +209,8 @@ "#### Retrieve or create a Azure Machine Learning compute\n", "Azure Machine Learning Compute is a service for provisioning and managing clusters of Azure virtual machines for running machine learning workloads. Let's create a new Azure Machine Learning Compute in the current workspace, if it doesn't already exist. We will then run the training script on this compute target.\n", "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n", + "\n", "If we could not find the compute with the given name in the previous cell, then we will create a new compute here. We will create an Azure Machine Learning Compute containing **STANDARD_D2_V2 CPU VMs**. This process is broken down into the following steps:\n", "\n", "1. Create the configuration\n", diff --git a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-how-to-use-pipeline-drafts.ipynb b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-how-to-use-pipeline-drafts.ipynb index 0864a32b..3b07b1d3 100644 --- a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-how-to-use-pipeline-drafts.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-how-to-use-pipeline-drafts.ipynb @@ -55,7 +55,9 @@ "metadata": {}, "source": [ "### Compute Target\n", - "Retrieve an already attached Azure Machine Learning Compute to use in the Pipeline." + "Retrieve an already attached Azure Machine Learning Compute to use in the Pipeline.\n", + "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist." ] }, { diff --git a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-parameter-tuning-with-hyperdrive.ipynb b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-parameter-tuning-with-hyperdrive.ipynb index 0832d71f..7a617663 100644 --- a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-parameter-tuning-with-hyperdrive.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-parameter-tuning-with-hyperdrive.ipynb @@ -210,6 +210,8 @@ "## Retrieve or create a Azure Machine Learning compute\n", "Azure Machine Learning Compute is a service for provisioning and managing clusters of Azure virtual machines for running machine learning workloads. Let's create a new Azure Machine Learning Compute in the current workspace, if it doesn't already exist. We will then run the training script on this compute target.\n", "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n", + "\n", "If we could not find the compute with the given name in the previous cell, then we will create a new compute here. This process is broken down into the following steps:\n", "\n", "1. Create the configuration\n", diff --git a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-publish-and-run-using-rest-endpoint.ipynb b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-publish-and-run-using-rest-endpoint.ipynb index ef552075..4f8d6996 100644 --- a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-publish-and-run-using-rest-endpoint.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-publish-and-run-using-rest-endpoint.ipynb @@ -68,7 +68,9 @@ "metadata": {}, "source": [ "### Compute Targets\n", - "#### Retrieve an already attached Azure Machine Learning Compute" + "#### Retrieve an already attached Azure Machine Learning Compute\n", + "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist." ] }, { diff --git a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-setup-schedule-for-a-published-pipeline.ipynb b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-setup-schedule-for-a-published-pipeline.ipynb index 7b479cb0..2acb6e06 100644 --- a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-setup-schedule-for-a-published-pipeline.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-setup-schedule-for-a-published-pipeline.ipynb @@ -54,7 +54,9 @@ "metadata": {}, "source": [ "### Compute Targets\n", - "#### Retrieve an already attached Azure Machine Learning Compute" + "#### Retrieve an already attached Azure Machine Learning Compute\n", + "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist." ] }, { diff --git a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-setup-versioned-pipeline-endpoints.ipynb b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-setup-versioned-pipeline-endpoints.ipynb index 6544f352..aefaf8f7 100644 --- a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-setup-versioned-pipeline-endpoints.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-setup-versioned-pipeline-endpoints.ipynb @@ -78,7 +78,9 @@ "source": [ "#### Initialization, Steps to create a Pipeline\n", "\n", - "The best practice is to use separate folders for scripts and its dependent files for each step and specify that folder as the `source_directory` for the step. This helps reduce the size of the snapshot created for the step (only the specific folder is snapshotted). Since changes in any files in the `source_directory` would trigger a re-upload of the snapshot, this helps keep the reuse of the step when there are no changes in the `source_directory` of the step." + "The best practice is to use separate folders for scripts and its dependent files for each step and specify that folder as the `source_directory` for the step. This helps reduce the size of the snapshot created for the step (only the specific folder is snapshotted). Since changes in any files in the `source_directory` would trigger a re-upload of the snapshot, this helps keep the reuse of the step when there are no changes in the `source_directory` of the step.\n", + "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist." ] }, { diff --git a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-datapath-and-pipelineparameter.ipynb b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-datapath-and-pipelineparameter.ipynb index 061ead86..ff01d3a0 100644 --- a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-datapath-and-pipelineparameter.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-datapath-and-pipelineparameter.ipynb @@ -109,7 +109,9 @@ "metadata": {}, "source": [ "## Create or Attach an AmlCompute cluster\n", - "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you get the default `AmlCompute` as your training compute resource." + "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you get the default `AmlCompute` as your training compute resource.\n", + "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist." ] }, { diff --git a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-dataset-and-pipelineparameter.ipynb b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-dataset-and-pipelineparameter.ipynb index 78b3dc4a..98cfe348 100644 --- a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-dataset-and-pipelineparameter.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-dataset-and-pipelineparameter.ipynb @@ -111,7 +111,9 @@ "metadata": {}, "source": [ "## Create or Attach an AmlCompute cluster\n", - "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you get the default `AmlCompute` as your training compute resource." + "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you get the default `AmlCompute` as your training compute resource.\n", + "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist." ] }, { diff --git a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-use-databricks-as-compute-target.ipynb b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-use-databricks-as-compute-target.ipynb index affaa213..7455053c 100644 --- a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-use-databricks-as-compute-target.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-use-databricks-as-compute-target.ipynb @@ -699,12 +699,162 @@ ] }, { + "source": [ + "### 5. Running demo notebook already added to the Databricks workspace using existing cluster\n", + "First you need register DBFS datastore and make sure path_on_datastore does exist in databricks file system, you can browser the files by refering [this](https://docs.azuredatabricks.net/user-guide/dbfs-databricks-file-system.html).\n", + "\n", + "Find existing_cluster_id by opeing Azure Databricks UI with Clusters page and in url you will find a string connected with '-' right after \"clusters/\"." + ], "cell_type": "markdown", + "metadata": {} + }, + { + "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], + "source": [ + "try:\n", + " dbfs_ds = Datastore.get(workspace=ws, datastore_name='dbfs_datastore')\n", + " print('DBFS Datastore already exists')\n", + "except Exception as ex:\n", + " dbfs_ds = Datastore.register_dbfs(ws, datastore_name='dbfs_datastore')\n", + "\n", + "step_1_input = DataReference(datastore=dbfs_ds, path_on_datastore=\"FileStore\", data_reference_name=\"input\")\n", + "step_1_output = PipelineData(\"output\", datastore=dbfs_ds)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "dbNbWithExistingClusterStep = DatabricksStep(\n", + " name=\"DBFSReferenceWithExisting\",\n", + " inputs=[step_1_input],\n", + " outputs=[step_1_output],\n", + " notebook_path=notebook_path,\n", + " notebook_params={'myparam': 'testparam', \n", + " 'myparam2': pipeline_param},\n", + " run_name='DBFS_Reference_With_Existing',\n", + " compute_target=databricks_compute,\n", + " existing_cluster_id=\"your existing cluster id\",\n", + " allow_reuse=True\n", + ")" + ] + }, + { + "source": [ + "#### Build and submit the Experiment" + ], + "cell_type": "markdown", + "metadata": {} + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "steps = [dbNbWithExistingClusterStep]\n", + "pipeline = Pipeline(workspace=ws, steps=steps)\n", + "pipeline_run = Experiment(ws, 'DBFS_Reference_With_Existing').submit(pipeline)\n", + "pipeline_run.wait_for_completion()" + ] + }, + { + "source": [ + "#### View Run Details" + ], + "cell_type": "markdown", + "metadata": {} + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from azureml.widgets import RunDetails\n", + "RunDetails(pipeline_run).show()" + ] + }, + { + "source": [ + "### 6. Running a Python script in Databricks that currenlty is in local computer with existing cluster\n", + "When you access azure blob or data lake storage from an existing (interactive) cluster, you need to ensure the Spark configuration is set up correctly to access this storage and this set up may require the cluster to be restarted.\n", + "\n", + "If you set permit_cluster_restart to True, AML will check if the spark configuration needs to be updated and restart the cluster for you if required. This will ensure that the storage can be correctly accessed from the Databricks cluster." + ], + "cell_type": "markdown", + "metadata": {} + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "step_1_input = DataReference(datastore=def_blob_store, path_on_datastore=\"dbtest\",\n", + " data_reference_name=\"input\")\n", + "\n", + "dbPythonInLocalWithExistingStep = DatabricksStep(\n", + " name=\"DBPythonInLocalMachineWithExisting\",\n", + " inputs=[step_1_input],\n", + " python_script_name=python_script_name,\n", + " source_directory=source_directory,\n", + " run_name='DB_Python_Local_existing_demo',\n", + " compute_target=databricks_compute,\n", + " existing_cluster_id=\"your existing cluster id\",\n", + " allow_reuse=False,\n", + " permit_cluster_restart=True\n", + ")" + ] + }, + { + "source": [ + "#### Build and submit the Experiment" + ], + "cell_type": "markdown", + "metadata": {} + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "steps = [dbPythonInLocalWithExistingStep]\n", + "pipeline = Pipeline(workspace=ws, steps=steps)\n", + "pipeline_run = Experiment(ws, 'DB_Python_Local_existing_demo').submit(pipeline)\n", + "pipeline_run.wait_for_completion()" + ] + }, + { + "source": [ + "#### View Run Details" + ], + "cell_type": "markdown", + "metadata": {} + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from azureml.widgets import RunDetails\n", + "RunDetails(pipeline_run).show()" + ] + }, + { "source": [ "# Next: ADLA as a Compute Target\n", "To use ADLA as a compute target from Azure Machine Learning Pipeline, a AdlaStep is used. This [notebook](https://aka.ms/pl-adla) demonstrates the use of AdlaStep in Azure Machine Learning Pipeline." - ] + ], + "cell_type": "markdown", + "metadata": {} } ], "metadata": { diff --git a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-automated-machine-learning-step.ipynb b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-automated-machine-learning-step.ipynb index b86720cb..6e496355 100644 --- a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-automated-machine-learning-step.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-automated-machine-learning-step.ipynb @@ -125,7 +125,9 @@ "metadata": {}, "source": [ "### Create or Attach an AmlCompute cluster\n", - "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you get the default `AmlCompute` as your training compute resource." + "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you get the default `AmlCompute` as your training compute resource.\n", + "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist." ] }, { diff --git a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-commandstep-r.ipynb b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-commandstep-r.ipynb index 7ff66e59..4d7a3af5 100644 --- a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-commandstep-r.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-commandstep-r.ipynb @@ -79,7 +79,9 @@ "metadata": {}, "source": [ "## Create or Attach existing AmlCompute\n", - "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, you create `AmlCompute` as your training compute resource." + "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, you create `AmlCompute` as your training compute resource.\n", + "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist." ] }, { diff --git a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-commandstep.ipynb b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-commandstep.ipynb index ce69d4a2..fd719bb6 100644 --- a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-commandstep.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-commandstep.ipynb @@ -77,7 +77,9 @@ "metadata": {}, "source": [ "## Create or Attach existing AmlCompute\n", - "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, you create `AmlCompute` as your training compute resource." + "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, you create `AmlCompute` as your training compute resource.\n", + "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist." ] }, { diff --git a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-data-dependency-steps.ipynb b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-data-dependency-steps.ipynb index 10c0eed9..419303a4 100644 --- a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-data-dependency-steps.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-data-dependency-steps.ipynb @@ -134,7 +134,9 @@ "metadata": {}, "source": [ "#### Retrieve or create an Aml compute\n", - "Azure Machine Learning Compute is a service for provisioning and managing clusters of Azure virtual machines for running machine learning workloads. Let's get the default Aml Compute in the current workspace. We will then run the training script on this compute target." + "Azure Machine Learning Compute is a service for provisioning and managing clusters of Azure virtual machines for running machine learning workloads. Let's get the default Aml Compute in the current workspace. We will then run the training script on this compute target.\n", + "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist." ] }, { diff --git a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-notebook-runner-step.ipynb b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-notebook-runner-step.ipynb index 764cbee6..4e178747 100644 --- a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-notebook-runner-step.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-notebook-runner-step.ipynb @@ -147,7 +147,9 @@ "metadata": {}, "source": [ "### Create or Attach an AmlCompute cluster\n", - "You will need to create a [compute target](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.computetarget?view=azure-ml-py) for your remote run. In this tutorial, you get the default `AmlCompute` as your training compute resource." + "You will need to create a [compute target](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.computetarget?view=azure-ml-py) for your remote run. In this tutorial, you get the default `AmlCompute` as your training compute resource.\n", + "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist." ] }, { diff --git a/how-to-use-azureml/machine-learning-pipelines/nyc-taxi-data-regression-model-building/nyc-taxi-data-regression-model-building.ipynb b/how-to-use-azureml/machine-learning-pipelines/nyc-taxi-data-regression-model-building/nyc-taxi-data-regression-model-building.ipynb index e5a36649..ef34e24f 100644 --- a/how-to-use-azureml/machine-learning-pipelines/nyc-taxi-data-regression-model-building/nyc-taxi-data-regression-model-building.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/nyc-taxi-data-regression-model-building/nyc-taxi-data-regression-model-building.ipynb @@ -225,7 +225,9 @@ "metadata": {}, "source": [ "### Setup Compute\n", - "#### Create new or use an existing compute" + "#### Create new or use an existing compute\n", + "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist." ] }, { diff --git a/how-to-use-azureml/machine-learning-pipelines/parallel-run/file-dataset-image-inference-mnist.ipynb b/how-to-use-azureml/machine-learning-pipelines/parallel-run/file-dataset-image-inference-mnist.ipynb index 36038f80..50736a54 100644 --- a/how-to-use-azureml/machine-learning-pipelines/parallel-run/file-dataset-image-inference-mnist.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/parallel-run/file-dataset-image-inference-mnist.ipynb @@ -24,9 +24,9 @@ "In this notebook, we will demonstrate how to make predictions on large quantities of data asynchronously using the ML pipelines with Azure Machine Learning. Batch inference (or batch scoring) provides cost-effective inference, with unparalleled throughput for asynchronous applications. Batch prediction pipelines can scale to perform inference on terabytes of production data. Batch prediction is optimized for high throughput, fire-and-forget predictions for a large collection of data.\n", "\n", "> **Tip**\n", - "If your system requires low-latency processing (to process a single document or small set of documents quickly), use [real-time scoring](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-consume-web-service) instead of batch prediction.\n", + "If your system requires low-latency processing (to process a single document or small set of documents quickly), use [real-time scoring](https://docs.microsoft.com/azure/machine-learning/service/how-to-consume-web-service) instead of batch prediction.\n", "\n", - "In this example will be take a digit identification model already-trained on MNIST dataset using the [AzureML training with deep learning example notebook](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/training-with-deep-learning/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.ipynb), and run that trained model on some of the MNIST test images in batch. \n", + "In this example will be take a digit identification model already-trained on MNIST dataset using the [AzureML training with deep learning example notebook](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/ml-frameworks/keras/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.ipynb), and run that trained model on some of the MNIST test images in batch. \n", "\n", "The input dataset used for this notebook differs from a standard MNIST dataset in that it has been converted to PNG images to demonstrate use of files as inputs to Batch Inference. A sample of PNG-converted images of the MNIST dataset were take from [this repository](https://github.com/myleott/mnist_png). \n", "\n", @@ -86,6 +86,8 @@ "### Create or Attach existing compute resource\n", "By using Azure Machine Learning Compute, a managed service, data scientists can train machine learning models on clusters of Azure virtual machines. Examples include VMs with GPU support. In this tutorial, you create Azure Machine Learning Compute as your training environment. The code below creates the compute clusters for you if they don't already exist in your workspace.\n", "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n", + "\n", "**Creation of compute takes approximately 5 minutes. If the AmlCompute with that name is already in your workspace the code will skip the creation process.**" ] }, @@ -180,8 +182,7 @@ "metadata": {}, "source": [ "### Create a FileDataset\n", - "A [FileDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.filedataset?view=azure-ml-py) references single or multiple files in your datastores or public urls. The files can be of any format. FileDataset provides you with the ability to download or mount the files to your compute. By creating a dataset, you create a reference to the data source location. If you applied any subsetting transformations to the dataset, they will be stored in the dataset as well. The data remains in its existing location, so no extra storage cost is incurred.", - "\n", + "A [FileDataset](https://docs.microsoft.com/python/api/azureml-core/azureml.data.filedataset?view=azure-ml-py) references single or multiple files in your datastores or public urls. The files can be of any format. FileDataset provides you with the ability to download or mount the files to your compute. By creating a dataset, you create a reference to the data source location. If you applied any subsetting transformations to the dataset, they will be stored in the dataset as well. The data remains in its existing location, so no extra storage cost is incurred.\n", "You can use dataset objects as inputs. Register the datasets to the workspace if you want to reuse them later." ] }, @@ -224,7 +225,7 @@ "metadata": {}, "source": [ "### Intermediate/Output Data\n", - "Intermediate data (or output of a Step) is represented by [PipelineData](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-core/azureml.pipeline.core.pipelinedata?view=azure-ml-py) object. PipelineData can be produced by one step and consumed in another step by providing the PipelineData object as an output of one step and the input of one or more steps." + "Intermediate data (or output of a Step) is represented by [PipelineData](https://docs.microsoft.com/python/api/azureml-pipeline-core/azureml.pipeline.core.pipelinedata?view=azure-ml-py) object. PipelineData can be produced by one step and consumed in another step by providing the PipelineData object as an output of one step and the input of one or more steps." ] }, { @@ -276,7 +277,7 @@ "### Register the model with Workspace\n", "A registered model is a logical container for one or more files that make up your model. For example, if you have a model that's stored in multiple files, you can register them as a single model in the workspace. After you register the files, you can then download or deploy the registered model and receive all the files that you registered.\n", "\n", - "Using tags, you can track useful information such as the name and version of the machine learning library used to train the model. Note that tags must be alphanumeric. Learn more about registering models [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-deploy-and-where#registermodel) " + "Using tags, you can track useful information such as the name and version of the machine learning library used to train the model. Note that tags must be alphanumeric. Learn more about registering models [here](https://docs.microsoft.com/azure/machine-learning/service/how-to-deploy-and-where#registermodel) " ] }, { @@ -362,7 +363,6 @@ " \"azureml-core\", \"azureml-dataset-runtime[fuse]\"])\n", "batch_env = Environment(name=\"batch_environment\")\n", "batch_env.python.conda_dependencies = batch_conda_deps\n", - "batch_env.docker.enabled = True\n", "batch_env.docker.base_image = DEFAULT_CPU_IMAGE" ] }, @@ -379,7 +379,6 @@ "metadata": {}, "outputs": [], "source": [ - "from azureml.pipeline.core import PipelineParameter\n", "from azureml.pipeline.steps import ParallelRunStep, ParallelRunConfig\n", "\n", "parallel_run_config = ParallelRunConfig(\n", diff --git a/how-to-use-azureml/machine-learning-pipelines/parallel-run/tabular-dataset-inference-iris.ipynb b/how-to-use-azureml/machine-learning-pipelines/parallel-run/tabular-dataset-inference-iris.ipynb index b93e32dc..205d2fbd 100644 --- a/how-to-use-azureml/machine-learning-pipelines/parallel-run/tabular-dataset-inference-iris.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/parallel-run/tabular-dataset-inference-iris.ipynb @@ -24,7 +24,7 @@ "In this notebook, we will demonstrate how to make predictions on large quantities of data asynchronously using the ML pipelines with Azure Machine Learning. Batch inference (or batch scoring) provides cost-effective inference, with unparalleled throughput for asynchronous applications. Batch prediction pipelines can scale to perform inference on terabytes of production data. Batch prediction is optimized for high throughput, fire-and-forget predictions for a large collection of data.\n", "\n", "> **Tip**\n", - "If your system requires low-latency processing (to process a single document or small set of documents quickly), use [real-time scoring](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-consume-web-service) instead of batch prediction.\n", + "If your system requires low-latency processing (to process a single document or small set of documents quickly), use [real-time scoring](https://docs.microsoft.com/azure/machine-learning/service/how-to-consume-web-service) instead of batch prediction.\n", "\n", "In this example we will take use a machine learning model already trained to predict different types of iris flowers and run that trained model on some of the data in a CSV file which has characteristics of different iris flowers. However, the same example can be extended to manipulating data to any embarrassingly-parallel processing through a python script.\n", "\n", @@ -84,6 +84,8 @@ "### Create or Attach existing compute resource\n", "By using Azure Machine Learning Compute, a managed service, data scientists can train machine learning models on clusters of Azure virtual machines. Examples include VMs with GPU support. In this tutorial, you create Azure Machine Learning Compute as your training environment. The code below creates the compute clusters for you if they don't already exist in your workspace.\n", "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n", + "\n", "**Creation of compute takes approximately 5 minutes. If the AmlCompute with that name is already in your workspace the code will skip the creation process.**" ] }, @@ -160,7 +162,7 @@ "metadata": {}, "source": [ "### Create a TabularDataset\n", - "A [TabularDataSet](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py) references single or multiple files which contain data in a tabular structure (ie like CSV files) in your datastores or public urls. TabularDatasets provides you with the ability to download or mount the files to your compute. By creating a dataset, you create a reference to the data source location. If you applied any subsetting transformations to the dataset, they will be stored in the dataset as well. The data remains in its existing location, so no extra storage cost is incurred.\n", + "A [TabularDataSet](https://docs.microsoft.com/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py) references single or multiple files which contain data in a tabular structure (ie like CSV files) in your datastores or public urls. TabularDatasets provides you with the ability to download or mount the files to your compute. By creating a dataset, you create a reference to the data source location. If you applied any subsetting transformations to the dataset, they will be stored in the dataset as well. The data remains in its existing location, so no extra storage cost is incurred.\n", "You can use dataset objects as inputs. Register the datasets to the workspace if you want to reuse them later." ] }, @@ -184,7 +186,7 @@ "metadata": {}, "source": [ "### Intermediate/Output Data\n", - "Intermediate data (or output of a Step) is represented by [PipelineData](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-core/azureml.pipeline.core.pipelinedata?view=azure-ml-py) object. PipelineData can be produced by one step and consumed in another step by providing the PipelineData object as an output of one step and the input of one or more steps." + "Intermediate data (or output of a Step) is represented by [PipelineData](https://docs.microsoft.com/python/api/azureml-pipeline-core/azureml.pipeline.core.pipelinedata?view=azure-ml-py) object. PipelineData can be produced by one step and consumed in another step by providing the PipelineData object as an output of one step and the input of one or more steps." ] }, { @@ -311,7 +313,6 @@ "\n", "predict_env = Environment(name=\"predict_environment\")\n", "predict_env.python.conda_dependencies = predict_conda_deps\n", - "predict_env.docker.enabled = True\n", "predict_env.spark.precache_packages = False" ] }, diff --git a/how-to-use-azureml/machine-learning-pipelines/pipeline-style-transfer/pipeline-style-transfer-parallel-run.ipynb b/how-to-use-azureml/machine-learning-pipelines/pipeline-style-transfer/pipeline-style-transfer-parallel-run.ipynb index 18686362..32000b07 100644 --- a/how-to-use-azureml/machine-learning-pipelines/pipeline-style-transfer/pipeline-style-transfer-parallel-run.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/pipeline-style-transfer/pipeline-style-transfer-parallel-run.ipynb @@ -178,7 +178,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Create or use existing compute" + "# Create or use existing compute\n", + "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist." ] }, { diff --git a/how-to-use-azureml/ml-frameworks/chainer/distributed-chainer/distributed-chainer.ipynb b/how-to-use-azureml/ml-frameworks/chainer/distributed-chainer/distributed-chainer.ipynb index 25f8eaa2..eb21490d 100644 --- a/how-to-use-azureml/ml-frameworks/chainer/distributed-chainer/distributed-chainer.ipynb +++ b/how-to-use-azureml/ml-frameworks/chainer/distributed-chainer/distributed-chainer.ipynb @@ -98,6 +98,8 @@ "## Create or attach existing AmlCompute\n", "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, we use Azure ML managed compute ([AmlCompute](https://docs.microsoft.com/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute)) for our remote training compute resource. Specifically, the below code creates an `STANDARD_NC6` GPU cluster that autoscales from `0` to `4` nodes.\n", "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n", + "\n", "**Creation of AmlCompute takes approximately 5 minutes.** If the AmlCompute with that name is already in your workspace, this code will skip the creation process.\n", "\n", "As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota." diff --git a/how-to-use-azureml/ml-frameworks/chainer/train-hyperparameter-tune-deploy-with-chainer/train-hyperparameter-tune-deploy-with-chainer.ipynb b/how-to-use-azureml/ml-frameworks/chainer/train-hyperparameter-tune-deploy-with-chainer/train-hyperparameter-tune-deploy-with-chainer.ipynb index 905c2c75..9ad67f4b 100644 --- a/how-to-use-azureml/ml-frameworks/chainer/train-hyperparameter-tune-deploy-with-chainer/train-hyperparameter-tune-deploy-with-chainer.ipynb +++ b/how-to-use-azureml/ml-frameworks/chainer/train-hyperparameter-tune-deploy-with-chainer/train-hyperparameter-tune-deploy-with-chainer.ipynb @@ -98,6 +98,8 @@ "## Create or Attach existing AmlCompute\n", "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, we use Azure ML managed compute ([AmlCompute](https://docs.microsoft.com/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute)) for our remote training compute resource.\n", "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n", + "\n", "**Creation of AmlCompute takes approximately 5 minutes.** If the AmlCompute with that name is already in your workspace, this code will skip the creation process.\n", "\n", "As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota." diff --git a/how-to-use-azureml/ml-frameworks/fastai/fastai-with-custom-docker/fastai-with-custom-docker.ipynb b/how-to-use-azureml/ml-frameworks/fastai/fastai-with-custom-docker/fastai-with-custom-docker.ipynb index 1cce8b8e..f30e8039 100644 --- a/how-to-use-azureml/ml-frameworks/fastai/fastai-with-custom-docker/fastai-with-custom-docker.ipynb +++ b/how-to-use-azureml/ml-frameworks/fastai/fastai-with-custom-docker/fastai-with-custom-docker.ipynb @@ -222,6 +222,8 @@ "### Create or attach existing AmlCompute\n", "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, you create `AmlCompute` as your training compute resource.\n", "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n", + "\n", "**Creation of AmlCompute takes approximately 5 minutes.** If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n", "\n", "As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota." diff --git a/how-to-use-azureml/ml-frameworks/keras/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.ipynb b/how-to-use-azureml/ml-frameworks/keras/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.ipynb index f176de93..8c4544ad 100644 --- a/how-to-use-azureml/ml-frameworks/keras/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.ipynb +++ b/how-to-use-azureml/ml-frameworks/keras/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.ipynb @@ -272,7 +272,9 @@ "metadata": {}, "source": [ "## Create or Attach existing AmlCompute\n", - "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, you create `AmlCompute` as your training compute resource." + "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, you create `AmlCompute` as your training compute resource.\n", + "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist." ] }, { diff --git a/how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-distributeddataparallel/distributed-pytorch-with-distributeddataparallel.ipynb b/how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-distributeddataparallel/distributed-pytorch-with-distributeddataparallel.ipynb index df2b5ce9..088233e3 100644 --- a/how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-distributeddataparallel/distributed-pytorch-with-distributeddataparallel.ipynb +++ b/how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-distributeddataparallel/distributed-pytorch-with-distributeddataparallel.ipynb @@ -99,6 +99,8 @@ "## Create or attach existing AmlCompute\n", "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, we use Azure ML managed compute ([AmlCompute](https://docs.microsoft.com/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute)) for our remote training compute resource. Specifically, the below code creates an `STANDARD_NC6` GPU cluster that autoscales from `0` to `4` nodes.\n", "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n", + "\n", "**Creation of AmlCompute takes approximately 5 minutes.** If the AmlCompute with that name is already in your workspace, this code will skip the creation process.\n", "\n", "As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota." diff --git a/how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-horovod/distributed-pytorch-with-horovod.ipynb b/how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-horovod/distributed-pytorch-with-horovod.ipynb index 2562d0ce..00fc8ad2 100644 --- a/how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-horovod/distributed-pytorch-with-horovod.ipynb +++ b/how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-horovod/distributed-pytorch-with-horovod.ipynb @@ -99,6 +99,8 @@ "## Create or attach existing AmlCompute\n", "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, we use Azure ML managed compute ([AmlCompute](https://docs.microsoft.com/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute)) for our remote training compute resource. Specifically, the below code creates an `STANDARD_NC6` GPU cluster that autoscales from `0` to `4` nodes.\n", "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n", + "\n", "**Creation of AmlCompute takes approximately 5 minutes.** If the AmlCompute with that name is already in your workspace, this code will skip the creation process.\n", "\n", "As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota." diff --git a/how-to-use-azureml/ml-frameworks/pytorch/train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.ipynb b/how-to-use-azureml/ml-frameworks/pytorch/train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.ipynb index 7920db7a..e0956d34 100644 --- a/how-to-use-azureml/ml-frameworks/pytorch/train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.ipynb +++ b/how-to-use-azureml/ml-frameworks/pytorch/train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.ipynb @@ -100,6 +100,8 @@ "## Create or Attach existing AmlCompute\n", "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, we use Azure ML managed compute ([AmlCompute](https://docs.microsoft.com/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute)) for our remote training compute resource.\n", "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n", + "\n", "**Creation of AmlCompute takes approximately 5 minutes.** If the AmlCompute with that name is already in your workspace, this code will skip the creation process.\n", "\n", "As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota." diff --git a/how-to-use-azureml/ml-frameworks/scikit-learn/train-hyperparameter-tune-deploy-with-sklearn/train-hyperparameter-tune-deploy-with-sklearn.ipynb b/how-to-use-azureml/ml-frameworks/scikit-learn/train-hyperparameter-tune-deploy-with-sklearn/train-hyperparameter-tune-deploy-with-sklearn.ipynb index 6885b99a..51f6a6d4 100644 --- a/how-to-use-azureml/ml-frameworks/scikit-learn/train-hyperparameter-tune-deploy-with-sklearn/train-hyperparameter-tune-deploy-with-sklearn.ipynb +++ b/how-to-use-azureml/ml-frameworks/scikit-learn/train-hyperparameter-tune-deploy-with-sklearn/train-hyperparameter-tune-deploy-with-sklearn.ipynb @@ -117,6 +117,8 @@ "source": [ "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, we use Azure ML managed compute ([AmlCompute](https://docs.microsoft.com/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute)) for our remote training compute resource.\n", "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n", + "\n", "As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota." ] }, diff --git a/how-to-use-azureml/ml-frameworks/tensorflow/distributed-tensorflow-with-horovod/distributed-tensorflow-with-horovod.ipynb b/how-to-use-azureml/ml-frameworks/tensorflow/distributed-tensorflow-with-horovod/distributed-tensorflow-with-horovod.ipynb index bd204bdf..10139f41 100644 --- a/how-to-use-azureml/ml-frameworks/tensorflow/distributed-tensorflow-with-horovod/distributed-tensorflow-with-horovod.ipynb +++ b/how-to-use-azureml/ml-frameworks/tensorflow/distributed-tensorflow-with-horovod/distributed-tensorflow-with-horovod.ipynb @@ -101,6 +101,8 @@ "## Create or Attach existing AmlCompute\n", "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, you create `AmlCompute` as your training compute resource.\n", "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n", + "\n", "**Creation of AmlCompute takes approximately 5 minutes.** If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n", "\n", "As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota." diff --git a/how-to-use-azureml/ml-frameworks/tensorflow/distributed-tensorflow-with-parameter-server/distributed-tensorflow-with-parameter-server.ipynb b/how-to-use-azureml/ml-frameworks/tensorflow/distributed-tensorflow-with-parameter-server/distributed-tensorflow-with-parameter-server.ipynb index b274a127..db1df133 100644 --- a/how-to-use-azureml/ml-frameworks/tensorflow/distributed-tensorflow-with-parameter-server/distributed-tensorflow-with-parameter-server.ipynb +++ b/how-to-use-azureml/ml-frameworks/tensorflow/distributed-tensorflow-with-parameter-server/distributed-tensorflow-with-parameter-server.ipynb @@ -101,6 +101,8 @@ "## Create or Attach existing AmlCompute\n", "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, you create `AmlCompute` as your training compute resource.\n", "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n", + "\n", "**Creation of AmlCompute takes approximately 5 minutes.** If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n", "\n", "As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota." diff --git a/how-to-use-azureml/ml-frameworks/tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow.ipynb b/how-to-use-azureml/ml-frameworks/tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow.ipynb index 448c4d76..9a9fb977 100644 --- a/how-to-use-azureml/ml-frameworks/tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow.ipynb +++ b/how-to-use-azureml/ml-frameworks/tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow.ipynb @@ -270,7 +270,9 @@ "metadata": {}, "source": [ "## Create or Attach existing AmlCompute\n", - "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, you create `AmlCompute` as your training compute resource." + "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, you create `AmlCompute` as your training compute resource.\n", + "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist." ] }, { diff --git a/how-to-use-azureml/ml-frameworks/tensorflow/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb b/how-to-use-azureml/ml-frameworks/tensorflow/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb index aabd5ed8..e3d34c9c 100644 --- a/how-to-use-azureml/ml-frameworks/tensorflow/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb +++ b/how-to-use-azureml/ml-frameworks/tensorflow/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb @@ -286,7 +286,9 @@ "metadata": {}, "source": [ "## Create or Attach existing AmlCompute\n", - "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, you create `AmlCompute` as your training compute resource." + "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, you create `AmlCompute` as your training compute resource.\n", + "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist." ] }, { diff --git a/how-to-use-azureml/ml-frameworks/tensorflow/train-tensorflow-resume-training/train-tensorflow-resume-training.ipynb b/how-to-use-azureml/ml-frameworks/tensorflow/train-tensorflow-resume-training/train-tensorflow-resume-training.ipynb index f9fa4065..c6d097ba 100644 --- a/how-to-use-azureml/ml-frameworks/tensorflow/train-tensorflow-resume-training/train-tensorflow-resume-training.ipynb +++ b/how-to-use-azureml/ml-frameworks/tensorflow/train-tensorflow-resume-training/train-tensorflow-resume-training.ipynb @@ -101,6 +101,8 @@ "## Create or Attach existing AmlCompute\n", "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, you create `AmlCompute` as your training compute resource.\n", "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n", + "\n", "**Creation of AmlCompute takes approximately 5 minutes.** If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n", "\n", "As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota." diff --git a/how-to-use-azureml/reinforcement-learning/atari-on-distributed-compute/pong_rllib.ipynb b/how-to-use-azureml/reinforcement-learning/atari-on-distributed-compute/pong_rllib.ipynb index 5e6c9463..b5d49156 100644 --- a/how-to-use-azureml/reinforcement-learning/atari-on-distributed-compute/pong_rllib.ipynb +++ b/how-to-use-azureml/reinforcement-learning/atari-on-distributed-compute/pong_rllib.ipynb @@ -141,13 +141,20 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Create Virtual Network\n", + "### Create Virtual Network and Network Security Group\n", "\n", - "If you are using separate compute targets for the Ray head and worker, a virtual network must be created in the resource group. If you have alraeady created a virtual network in the resource group, you can skip this step.\n", + "**If you are using separate compute targets for the Ray head and worker, as we do in this notebook**, a virtual network must be created in the resource group. If you have already created a virtual network in the resource group, you can skip this step.\n", "\n", - "To do this, you first must install the Azure Networking API.\n", + "> Note that your user role must have permissions to create and manage virtual networks to run the cells below. Talk to your IT admin if you do not have these permissions.\n", "\n", - "`pip install --upgrade azure-mgmt-network==12.0.0`" + "#### Create Virtual Network\n", + "To create the virtual network you first must install the [Azure Networking Python API](https://docs.microsoft.com/python/api/overview/azure/network?view=azure-python).\n", + "\n", + "`pip install --upgrade azure-mgmt-network`\n", + "\n", + "Note: In this section we are using [DefaultAzureCredential](https://docs.microsoft.com/python/api/azure-identity/azure.identity.defaultazurecredential?view=azure-python)\n", + "class for authentication which, by default, examines several options in turn, and stops on the first option that provides\n", + "a token. You will need to log in using Azure CLI, if none of the other options are available (please find more details [here](https://docs.microsoft.com/python/api/azure-identity/azure.identity.defaultazurecredential?view=azure-python))." ] }, { @@ -157,7 +164,7 @@ "outputs": [], "source": [ "# If you need to install the Azure Networking SDK, uncomment the following line.\n", - "#!pip install --upgrade azure-mgmt-network==12.0.0" + "#!pip install --upgrade azure-mgmt-network" ] }, { @@ -167,6 +174,7 @@ "outputs": [], "source": [ "from azure.mgmt.network import NetworkManagementClient\n", + "from azure.identity import DefaultAzureCredential\n", "\n", "# Virtual network name\n", "vnet_name =\"rl_pong_vnet\"\n", @@ -183,9 +191,9 @@ "# Azure region of the resource group\n", "location=ws.location\n", "\n", - "network_client = NetworkManagementClient(ws._auth_object, subscription_id)\n", + "network_client = NetworkManagementClient(credential=DefaultAzureCredential(), subscription_id=subscription_id)\n", "\n", - "async_vnet_creation = network_client.virtual_networks.create_or_update(\n", + "async_vnet_creation = network_client.virtual_networks.begin_create_or_update(\n", " resource_group,\n", " vnet_name,\n", " {\n", @@ -204,9 +212,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Set up Network Security Group on Virtual Network\n", + "#### Set up Network Security Group on Virtual Network\n", "\n", - "Depending on your Azure setup, you may need to open certain ports to make it possible for Azure to manage the compute targets that you create. The ports that need to be opened are described [here](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-enable-virtual-network).\n", + "Depending on your Azure setup, you may need to open certain ports to make it possible for Azure to manage the compute targets that you create. The ports that need to be opened are described [here](https://docs.microsoft.com/azure/machine-learning/how-to-enable-virtual-network).\n", "\n", "A common situation is that ports `29876-29877` are closed. The following code will add a security rule to open these ports. Or you can do this manually in the [Azure portal](https://portal.azure.com).\n", "\n", @@ -243,7 +251,7 @@ " ],\n", ")\n", "\n", - "async_nsg_creation = network_client.network_security_groups.create_or_update(\n", + "async_nsg_creation = network_client.network_security_groups.begin_create_or_update(\n", " resource_group,\n", " security_group_name,\n", " nsg_params,\n", @@ -265,7 +273,7 @@ " )\n", " \n", "# Create subnet on virtual network\n", - "async_subnet_creation = network_client.subnets.create_or_update(\n", + "async_subnet_creation = network_client.subnets.begin_create_or_update(\n", " resource_group_name=resource_group,\n", " virtual_network_name=vnet_name,\n", " subnet_name=subnet_name,\n", @@ -280,7 +288,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Review the virtual network security rules\n", + "#### Review the virtual network security rules\n", "Ensure that the virtual network is configured correctly with required ports open. It is possible that you have configured rules with broader range of ports that allows ports 29876-29877 to be opened. Kindly review your network security group rules. " ] }, @@ -291,17 +299,24 @@ "outputs": [], "source": [ "from files.networkutils import *\n", + "from azure.identity import DefaultAzureCredential\n", "\n", - "check_vnet_security_rules(ws._auth_object, ws.subscription_id, ws.resource_group, vnet_name, True)" + "check_vnet_security_rules(DefaultAzureCredential(), ws.subscription_id, ws.resource_group, vnet_name, True)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Create head compute target\n", + "### Create compute targets\n", "\n", - "In this example, we show how to set up separate compute targets for the Ray head and Ray worker nodes. First we define the head cluster with GPU for the Ray head node. One CPU of the head node will be used for the Ray head process and the rest of the CPUs will be used by the Ray worker processes." + "In this example, we show how to set up separate compute targets for the Ray head and Ray worker nodes.\n", + "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n", + "\n", + "#### Create head compute target\n", + "\n", + "First we define the head cluster with GPU for the Ray head node. One CPU of the head node will be used for the Ray head process and the rest of the CPUs will be used by the Ray worker processes." ] }, { @@ -353,7 +368,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Create worker compute target\n", + "#### Create worker compute target\n", "\n", "Now we create a compute target with CPUs for the additional Ray worker nodes. CPUs in these worker nodes are used by Ray worker processes. Each Ray worker node, depending on the CPUs on the node, may have multiple Ray worker processes. There can be multiple worker tasks on each worker process (core)." ] diff --git a/how-to-use-azureml/reinforcement-learning/atari-on-distributed-compute/pong_rllib.yml b/how-to-use-azureml/reinforcement-learning/atari-on-distributed-compute/pong_rllib.yml index d9d808d9..a79b3da0 100644 --- a/how-to-use-azureml/reinforcement-learning/atari-on-distributed-compute/pong_rllib.yml +++ b/how-to-use-azureml/reinforcement-learning/atari-on-distributed-compute/pong_rllib.yml @@ -5,4 +5,5 @@ dependencies: - azureml-contrib-reinforcementlearning - azureml-widgets - matplotlib - - azure-mgmt-network==12.0.0 + - azure-mgmt-network + - azure-cli diff --git a/how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/cartpole_sc.ipynb b/how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/cartpole_sc.ipynb index 19766dee..8a6f89cd 100644 --- a/how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/cartpole_sc.ipynb +++ b/how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/cartpole_sc.ipynb @@ -118,6 +118,8 @@ "\n", "A compute target is a designated compute resource where you run your training and simulation scripts. This location may be your local machine or a cloud-based compute resource. The code below shows how to create a cloud-based compute target. For more information see [What are compute targets in Azure Machine Learning?](https://docs.microsoft.com/en-us/azure/machine-learning/concept-compute-target)\n", "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n", + "\n", "**Note: Creation of a compute resource can take several minutes**. Please make sure to change `STANDARD_D2_V2` to a [size available in your region](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines)." ] }, diff --git a/how-to-use-azureml/reinforcement-learning/multiagent-particle-envs/particle.ipynb b/how-to-use-azureml/reinforcement-learning/multiagent-particle-envs/particle.ipynb index f3aca061..15876a06 100644 --- a/how-to-use-azureml/reinforcement-learning/multiagent-particle-envs/particle.ipynb +++ b/how-to-use-azureml/reinforcement-learning/multiagent-particle-envs/particle.ipynb @@ -138,6 +138,8 @@ "\n", "A compute target is a designated compute resource where you run your training script. For more information, see [What are compute targets in Azure Machine Learning service?](https://docs.microsoft.com/en-us/azure/machine-learning/concept-compute-target).\n", "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n", + "\n", "#### CPU target for Ray head\n", "\n", "In the experiment setup for this tutorial, the Ray head node will\n", diff --git a/how-to-use-azureml/track-and-monitor-experiments/logging-api/logging-api.ipynb b/how-to-use-azureml/track-and-monitor-experiments/logging-api/logging-api.ipynb index e2fddfdc..adb4184d 100644 --- a/how-to-use-azureml/track-and-monitor-experiments/logging-api/logging-api.ipynb +++ b/how-to-use-azureml/track-and-monitor-experiments/logging-api/logging-api.ipynb @@ -100,7 +100,7 @@ "\n", "# Check core SDK version number\n", "\n", - "print(\"This notebook was created using SDK version 1.27.0, you are currently running version\", azureml.core.VERSION)" + "print(\"This notebook was created using SDK version 1.28.0, you are currently running version\", azureml.core.VERSION)" ] }, { diff --git a/how-to-use-azureml/track-and-monitor-experiments/tensorboard/tensorboard/tensorboard.ipynb b/how-to-use-azureml/track-and-monitor-experiments/tensorboard/tensorboard/tensorboard.ipynb index bef46ca5..ba03e005 100644 --- a/how-to-use-azureml/track-and-monitor-experiments/tensorboard/tensorboard/tensorboard.ipynb +++ b/how-to-use-azureml/track-and-monitor-experiments/tensorboard/tensorboard/tensorboard.ipynb @@ -390,7 +390,9 @@ "source": [ "## Once more, with an AmlCompute cluster\n", "\n", - "Just to prove we can, let's create an AmlCompute CPU cluster, and run our demo there, as well." + "Just to prove we can, let's create an AmlCompute CPU cluster, and run our demo there, as well.\n", + "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist." ] }, { diff --git a/how-to-use-azureml/track-and-monitor-experiments/using-mlflow/train-remote/train-remote.ipynb b/how-to-use-azureml/track-and-monitor-experiments/using-mlflow/train-remote/train-remote.ipynb index 87432669..3f981c3a 100644 --- a/how-to-use-azureml/track-and-monitor-experiments/using-mlflow/train-remote/train-remote.ipynb +++ b/how-to-use-azureml/track-and-monitor-experiments/using-mlflow/train-remote/train-remote.ipynb @@ -67,7 +67,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Let's also create a Machine Learning Compute cluster for submitting the remote run. " + "Let's also create a Machine Learning Compute cluster for submitting the remote run. \n", + "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist." ] }, { diff --git a/how-to-use-azureml/training/train-on-amlcompute/train-on-amlcompute.ipynb b/how-to-use-azureml/training/train-on-amlcompute/train-on-amlcompute.ipynb index 6f5869fa..70d2d571 100644 --- a/how-to-use-azureml/training/train-on-amlcompute/train-on-amlcompute.ipynb +++ b/how-to-use-azureml/training/train-on-amlcompute/train-on-amlcompute.ipynb @@ -195,6 +195,8 @@ "source": [ "### Provision as a persistent compute target (Basic)\n", "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n", + "\n", "You can provision a persistent AmlCompute resource by simply defining two parameters thanks to smart defaults. By default it autoscales from 0 nodes and provisions dedicated VMs to run your job in a container. This is useful when you want to continously re-use the same target, debug it between jobs or simply share the resource with other users of your workspace.\n", "\n", "* `vm_size`: VM family of the nodes provisioned by AmlCompute. Simply choose from the supported_vmsizes() above\n", @@ -287,6 +289,8 @@ "source": [ "### Provision as a persistent compute target (Advanced)\n", "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n", + "\n", "You can also specify additional properties or change defaults while provisioning AmlCompute using a more advanced configuration. This is useful when you want a dedicated cluster of 4 nodes (for example you can set the min_nodes and max_nodes to 4), or want the compute to be within an existing VNet in your subscription.\n", "\n", "In addition to `vm_size` and `max_nodes`, you can specify:\n", diff --git a/how-to-use-azureml/work-with-data/datadrift-tutorial/datadrift-tutorial.ipynb b/how-to-use-azureml/work-with-data/datadrift-tutorial/datadrift-tutorial.ipynb index edb2eeff..b848d08d 100644 --- a/how-to-use-azureml/work-with-data/datadrift-tutorial/datadrift-tutorial.ipynb +++ b/how-to-use-azureml/work-with-data/datadrift-tutorial/datadrift-tutorial.ipynb @@ -162,6 +162,8 @@ "source": [ "## Create compute target\n", "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n", + "\n", "Create an Azure Machine Learning compute cluster to run the data drift monitor and associated runs. The below cell will create a compute cluster named `'cpu-cluster'`. " ] }, @@ -431,7 +433,7 @@ "Azure ML" ], "friendly_name": "Data drift quickdemo", - "index_order": 1.0, + "index_order": 1, "kernelspec": { "display_name": "Python 3.6", "language": "python", diff --git a/how-to-use-azureml/work-with-data/datasets-tutorial/pipeline-with-datasets/pipeline-for-image-classification.ipynb b/how-to-use-azureml/work-with-data/datasets-tutorial/pipeline-with-datasets/pipeline-for-image-classification.ipynb index da25aa4f..886057b6 100644 --- a/how-to-use-azureml/work-with-data/datasets-tutorial/pipeline-with-datasets/pipeline-for-image-classification.ipynb +++ b/how-to-use-azureml/work-with-data/datasets-tutorial/pipeline-with-datasets/pipeline-for-image-classification.ipynb @@ -125,6 +125,8 @@ "### Create or Attach existing compute resource\n", "By using Azure Machine Learning Compute, a managed service, data scientists can train machine learning models on clusters of Azure virtual machines. Examples include VMs with GPU support. In this tutorial, you create Azure Machine Learning Compute as your training environment. The code below creates the compute clusters for you if they don't already exist in your workspace.\n", "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n", + "\n", "**Creation of compute takes approximately 5 minutes.** If the AmlCompute with that name is already in your workspace the code will skip the creation process." ] }, diff --git a/how-to-use-azureml/work-with-data/datasets-tutorial/scriptrun-with-data-input-output/how-to-use-scriptrun.ipynb b/how-to-use-azureml/work-with-data/datasets-tutorial/scriptrun-with-data-input-output/how-to-use-scriptrun.ipynb index 9b361f1b..c32bc978 100644 --- a/how-to-use-azureml/work-with-data/datasets-tutorial/scriptrun-with-data-input-output/how-to-use-scriptrun.ipynb +++ b/how-to-use-azureml/work-with-data/datasets-tutorial/scriptrun-with-data-input-output/how-to-use-scriptrun.ipynb @@ -59,7 +59,9 @@ "metadata": {}, "source": [ "## Create or Attach existing AmlCompute\n", - "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, you create `AmlCompute` as your training compute resource." + "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, you create `AmlCompute` as your training compute resource.\n", + "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist." ] }, { diff --git a/how-to-use-azureml/work-with-data/datasets-tutorial/train-with-datasets/train-with-datasets.ipynb b/how-to-use-azureml/work-with-data/datasets-tutorial/train-with-datasets/train-with-datasets.ipynb index bdcf5ec3..cd5c5b0c 100644 --- a/how-to-use-azureml/work-with-data/datasets-tutorial/train-with-datasets/train-with-datasets.ipynb +++ b/how-to-use-azureml/work-with-data/datasets-tutorial/train-with-datasets/train-with-datasets.ipynb @@ -101,6 +101,8 @@ "## Create or Attach existing compute resource\n", "By using Azure Machine Learning Compute, a managed service, data scientists can train machine learning models on clusters of Azure virtual machines. Examples include VMs with GPU support. In this tutorial, you create Azure Machine Learning Compute as your training environment. The code below creates the compute clusters for you if they don't already exist in your workspace.\n", "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n", + "\n", "**Creation of compute takes approximately 5 minutes.** If the AmlCompute with that name is already in your workspace the code will skip the creation process." ] }, diff --git a/index.md b/index.md index aaa00028..5bd4ddf3 100644 --- a/index.md +++ b/index.md @@ -132,17 +132,12 @@ Machine Learning notebook samples and encourage efficient retrieval of topics an | [rai-loan-decision](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/responsible-ai/visualize-upload-loan-decision/rai-loan-decision.ipynb) | | | | | | | | [Logging APIs](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/track-and-monitor-experiments/logging-api/logging-api.ipynb) | Logging APIs and analyzing results | None | None | None | None | None | | [configuration](https://github.com/Azure/MachineLearningNotebooks/blob/master//setup-environment/configuration.ipynb) | | | | | | | +| [quickstart-azureml-automl](https://github.com/Azure/MachineLearningNotebooks/blob/master//tutorials/compute-instance-quickstarts/quickstart-azureml-automl/quickstart-azureml-automl.ipynb) | | | | | | | +| [quickstart-azureml-in-10mins](https://github.com/Azure/MachineLearningNotebooks/blob/master//tutorials/compute-instance-quickstarts/quickstart-azureml-in-10mins/quickstart-azureml-in-10mins.ipynb) | | | | | | | +| [quickstart-azureml-python-sdk](https://github.com/Azure/MachineLearningNotebooks/blob/master//tutorials/compute-instance-quickstarts/quickstart-azureml-python-sdk/quickstart-azureml-python-sdk.ipynb) | | | | | | | | [tutorial-1st-experiment-sdk-train](https://github.com/Azure/MachineLearningNotebooks/blob/master//tutorials/create-first-ml-experiment/tutorial-1st-experiment-sdk-train.ipynb) | | | | | | | -| [day1-part1-setup](https://github.com/Azure/MachineLearningNotebooks/blob/master//tutorials/get-started-day1/day1-part1-setup.ipynb) | | | | | | | -| [day1-part2-hello-world](https://github.com/Azure/MachineLearningNotebooks/blob/master//tutorials/get-started-day1/day1-part2-hello-world.ipynb) | | | | | | | -| [day1-part3-train-model](https://github.com/Azure/MachineLearningNotebooks/blob/master//tutorials/get-started-day1/day1-part3-train-model.ipynb) | | | | | | | -| [day1-part4-data](https://github.com/Azure/MachineLearningNotebooks/blob/master//tutorials/get-started-day1/day1-part4-data.ipynb) | | | | | | | | [img-classification-part1-training](https://github.com/Azure/MachineLearningNotebooks/blob/master//tutorials/image-classification-mnist-data/img-classification-part1-training.ipynb) | | | | | | | | [img-classification-part2-deploy](https://github.com/Azure/MachineLearningNotebooks/blob/master//tutorials/image-classification-mnist-data/img-classification-part2-deploy.ipynb) | | | | | | | | [img-classification-part3-deploy-encrypted](https://github.com/Azure/MachineLearningNotebooks/blob/master//tutorials/image-classification-mnist-data/img-classification-part3-deploy-encrypted.ipynb) | | | | | | | | [tutorial-pipeline-batch-scoring-classification](https://github.com/Azure/MachineLearningNotebooks/blob/master//tutorials/machine-learning-pipelines-advanced/tutorial-pipeline-batch-scoring-classification.ipynb) | | | | | | | -| [azureml-quickstart](https://github.com/Azure/MachineLearningNotebooks/blob/master//tutorials/quickstart/azureml-quickstart.ipynb) | | | | | | | -| [AzureMLIn10mins](https://github.com/Azure/MachineLearningNotebooks/blob/master//tutorials/quickstart-ci/AzureMLIn10mins.ipynb) | | | | | | | -| [ClassificationWithAutomatedML](https://github.com/Azure/MachineLearningNotebooks/blob/master//tutorials/quickstart-ci/ClassificationWithAutomatedML.ipynb) | | | | | | | -| [GettingStartedWithPythonSDK](https://github.com/Azure/MachineLearningNotebooks/blob/master//tutorials/quickstart-ci/GettingStartedWithPythonSDK.ipynb) | | | | | | | | [regression-automated-ml](https://github.com/Azure/MachineLearningNotebooks/blob/master//tutorials/regression-automl-nyc-taxi-data/regression-automated-ml.ipynb) | | | | | | | diff --git a/setup-environment/configuration.ipynb b/setup-environment/configuration.ipynb index 305ec9ed..d6de070f 100644 --- a/setup-environment/configuration.ipynb +++ b/setup-environment/configuration.ipynb @@ -102,7 +102,7 @@ "source": [ "import azureml.core\n", "\n", - "print(\"This notebook was created using version 1.27.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.28.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, diff --git a/tutorials/README.md b/tutorials/README.md index 6136eae5..4ca28c71 100644 --- a/tutorials/README.md +++ b/tutorials/README.md @@ -16,16 +16,14 @@ The following tutorials are intended to provide an introductory overview of Azur | Tutorial | Description | Notebook | Task | Framework | | --- | --- | --- | --- | --- | -| Azure Machine Learning in 10 minutes | Learn how to create and attach compute instances to notebooks, run an image classification model, track model metrics, and deploy a model| [quickstart](quickstart/azureml-quickstart.ipynb) | Learn Azure Machine Learning Concepts | PyTorch -| [Get Started (day1)](https://docs.microsoft.com/azure/machine-learning/tutorial-1st-experiment-sdk-setup-local) | Learn the fundamental concepts of Azure Machine Learning to help onboard your existing code to Azure Machine Learning. This tutorial focuses heavily on submitting machine learning jobs to scalable cloud-based compute clusters. | [get-started-day1](get-started-day1/day1-part1-setup.ipynb) | Learn Azure Machine Learning Concepts | PyTorch | [Train your first ML Model](https://docs.microsoft.com/azure/machine-learning/tutorial-1st-experiment-sdk-train) | Learn the foundational design patterns in Azure Machine Learning and train a scikit-learn model based on a diabetes data set. | [tutorial-quickstart-train-model.ipynb](create-first-ml-experiment/tutorial-1st-experiment-sdk-train.ipynb) | Regression | Scikit-Learn | [Train an image classification model](https://docs.microsoft.com/azure/machine-learning/tutorial-train-models-with-aml) | Train a scikit-learn image classification model. | [img-classification-part1-training.ipynb](image-classification-mnist-data/img-classification-part1-training.ipynb) | Image Classification | Scikit-Learn | [Deploy an image classification model](https://docs.microsoft.com/azure/machine-learning/tutorial-deploy-models-with-aml) | Deploy a scikit-learn image classification model to Azure Container Instances. | [img-classification-part2-deploy.ipynb](image-classification-mnist-data/img-classification-part2-deploy.ipynb) | Image Classification | Scikit-Learn | [Deploy an encrypted inferencing service](https://docs.microsoft.com/azure/machine-learning/tutorial-deploy-models-with-aml) |Deploy an image classification model for encrypted inferencing in Azure Container Instances | [img-classification-part3-deploy-encrypted.ipynb](image-classification-mnist-data/img-classification-part3-deploy-encrypted.ipynb) | Image Classification | Scikit-Learn | [Use automated machine learning to predict taxi fares](https://docs.microsoft.com/azure/machine-learning/tutorial-auto-train-models) | Train a regression model to predict taxi fares using Automated Machine Learning. | [regression-part2-automated-ml.ipynb](regression-automl-nyc-taxi-data/regression-automated-ml.ipynb) | Regression | Automated ML -| Azure ML in 10 minutes, to be run on a Compute Instance |Learn how to run an image classification model, track model metrics, and deploy a model in 10 minutes. | [AzureMLIn10mins.ipynb](quickstart-ci/AzureMLIn10mins.ipynb) | Image Classification | Scikit-Learn | -| Get started with Azure ML Job Submission, to be run on a Compute Instance |Learn how to use the Azure Machine Learning Python SDK to submit batch jobs. | [GettingStartedWithPythonSDK.ipynb](quickstart-ci/GettingStartedWithPythonSDK.ipynb) | Image Classification | Scikit-Learn | -| Get started with Automated ML, to be run on a Compute Instance | Learn how to use Automated ML for Fraud classification. | [ClassificationWithAutomatedML.ipynb](quickstart-ci/ClassificationWithAutomatedML.ipynb) | Classification | Automated ML | +| Azure ML in 10 minutes (Compute instance required) |Learn how to run an image classification model, track model metrics, and deploy a model in 10 minutes. | [quickstart-azureml-in-10mins.ipynb](compute-instance-quickstarts/quickstart-azureml-in-10mins/quickstart-azureml-in-10mins.ipynb) | Image Classification | Scikit-Learn | +| Get started with Azure ML Job Submission (Compute instance required) |Learn how to use the Azure Machine Learning Python SDK to submit batch jobs. | [quickstart-azureml-python-sdk.ipynb](compute-instance-quickstarts/quickstart-azureml-python-sdk/quickstart-azureml-python-sdk.ipynb) | Image Classification | Scikit-Learn | +| Get started with Automated ML (Compute instance required) | Learn how to use Automated ML for Fraud classification. | [quickstart-azureml-automl.ipynb](compute-instance-quickstarts/quickstart-azureml-automl/quickstart-azureml-automl.ipynb) | Classification | Automated ML | ## Advanced Samples diff --git a/tutorials/quickstart-ci/ClassificationWithAutomatedML.ipynb b/tutorials/compute-instance-quickstarts/quickstart-azureml-automl/quickstart-azureml-automl.ipynb similarity index 99% rename from tutorials/quickstart-ci/ClassificationWithAutomatedML.ipynb rename to tutorials/compute-instance-quickstarts/quickstart-azureml-automl/quickstart-azureml-automl.ipynb index 1ca83f28..6069c73a 100644 --- a/tutorials/quickstart-ci/ClassificationWithAutomatedML.ipynb +++ b/tutorials/compute-instance-quickstarts/quickstart-azureml-automl/quickstart-azureml-automl.ipynb @@ -488,18 +488,11 @@ "pygments_lexer": "ipython3", "version": "3.6.9" }, - "microsoft": { - "host": { - "AzureML": { - "notebookHasBeenCompleted": true - } - } - }, "notice": "Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License.", "nteract": { "version": "nteract-front-end@1.0.0" } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } \ No newline at end of file diff --git a/tutorials/get-started-day1/day1-part1-setup.yml b/tutorials/compute-instance-quickstarts/quickstart-azureml-automl/quickstart-azureml-automl.yml similarity index 53% rename from tutorials/get-started-day1/day1-part1-setup.yml rename to tutorials/compute-instance-quickstarts/quickstart-azureml-automl/quickstart-azureml-automl.yml index 067b78ec..47519735 100644 --- a/tutorials/get-started-day1/day1-part1-setup.yml +++ b/tutorials/compute-instance-quickstarts/quickstart-azureml-automl/quickstart-azureml-automl.yml @@ -1,4 +1,4 @@ -name: day1-part1-setup +name: quickstart-azureml-automl dependencies: - pip: - azureml-sdk diff --git a/tutorials/quickstart-ci/AzureMLIn10mins.ipynb b/tutorials/compute-instance-quickstarts/quickstart-azureml-in-10mins/quickstart-azureml-in-10mins.ipynb similarity index 98% rename from tutorials/quickstart-ci/AzureMLIn10mins.ipynb rename to tutorials/compute-instance-quickstarts/quickstart-azureml-in-10mins/quickstart-azureml-in-10mins.ipynb index c6901872..d3ddb25b 100644 --- a/tutorials/quickstart-ci/AzureMLIn10mins.ipynb +++ b/tutorials/compute-instance-quickstarts/quickstart-azureml-in-10mins/quickstart-azureml-in-10mins.ipynb @@ -625,7 +625,7 @@ "\n", "Now that you have working code in a development environment, learn how to submit a **_job_** - ideally on a schedule or trigger (for example, arrival of new data).\n", "\n", - " [**Learn how to get started with Azure ML Job Submission**](GettingStartedWithPythonSDK.ipynb) " + " [**Learn how to get started with Azure ML Job Submission**](../quickstart-azureml-python-sdk/quickstart-azureml-python-sdk.ipynb) " ] } ], @@ -637,7 +637,7 @@ ], "kernelspec": { "display_name": "Python 3.6", - "language": "python36", + "language": "python", "name": "python36" }, "language_info": { @@ -650,14 +650,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.5" - }, - "microsoft": { - "host": { - "AzureML": { - "notebookHasBeenCompleted": true - } - } + "version": "3.6.9" }, "notice": "Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License.", "nteract": { @@ -665,5 +658,5 @@ } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } \ No newline at end of file diff --git a/tutorials/quickstart-ci/GettingStartedWithPythonSDK.yml b/tutorials/compute-instance-quickstarts/quickstart-azureml-in-10mins/quickstart-azureml-in-10mins.yml similarity index 79% rename from tutorials/quickstart-ci/GettingStartedWithPythonSDK.yml rename to tutorials/compute-instance-quickstarts/quickstart-azureml-in-10mins/quickstart-azureml-in-10mins.yml index a0aa8c05..0db3895d 100644 --- a/tutorials/quickstart-ci/GettingStartedWithPythonSDK.yml +++ b/tutorials/compute-instance-quickstarts/quickstart-azureml-in-10mins/quickstart-azureml-in-10mins.yml @@ -1,4 +1,4 @@ -name: GettingStartedWithPythonSDK +name: quickstart-azureml-in-10mins dependencies: - pip: - azureml-sdk diff --git a/tutorials/quickstart-ci/score.py b/tutorials/compute-instance-quickstarts/quickstart-azureml-in-10mins/score.py similarity index 100% rename from tutorials/quickstart-ci/score.py rename to tutorials/compute-instance-quickstarts/quickstart-azureml-in-10mins/score.py diff --git a/tutorials/quickstart-ci/sklearn-mnist-batch/utils.py b/tutorials/compute-instance-quickstarts/quickstart-azureml-in-10mins/utils.py similarity index 100% rename from tutorials/quickstart-ci/sklearn-mnist-batch/utils.py rename to tutorials/compute-instance-quickstarts/quickstart-azureml-in-10mins/utils.py diff --git a/tutorials/quickstart-ci/GettingStartedWithPythonSDK.ipynb b/tutorials/compute-instance-quickstarts/quickstart-azureml-python-sdk/quickstart-azureml-python-sdk.ipynb similarity index 90% rename from tutorials/quickstart-ci/GettingStartedWithPythonSDK.ipynb rename to tutorials/compute-instance-quickstarts/quickstart-azureml-python-sdk/quickstart-azureml-python-sdk.ipynb index 2fcd6e9d..35c8e4ed 100644 --- a/tutorials/quickstart-ci/GettingStartedWithPythonSDK.ipynb +++ b/tutorials/compute-instance-quickstarts/quickstart-azureml-python-sdk/quickstart-azureml-python-sdk.ipynb @@ -67,17 +67,16 @@ }, "outputs": [], "source": [ - "import numpy as np\n", - "import matplotlib.pyplot as plt\n", - "\n", - "import azureml.core\n", - "from azureml.core import Workspace\n", - "from azureml.core import Experiment\n", - "\n", - "# connect to your workspace\n", - "ws = Workspace.from_config()\n", - "\n", - "experiment_name = \"get-started-with-jobsubmission-tutorial\"\n", + "import numpy as np\r\n", + "import matplotlib.pyplot as plt\r\n", + "\r\n", + "from azureml.core import Workspace\r\n", + "from azureml.core import Experiment\r\n", + "\r\n", + "# connect to your workspace\r\n", + "ws = Workspace.from_config()\r\n", + "\r\n", + "experiment_name = \"get-started-with-jobsubmission-tutorial\"\r\n", "exp = Experiment(workspace=ws, name=experiment_name)" ] }, @@ -175,55 +174,55 @@ }, "outputs": [], "source": [ - "# make sure utils.py is in the same directory as this code\n", - "from utils import load_data\n", - "import glob\n", - "\n", - "\n", - "# note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the model converge faster.\n", - "X_train = (\n", - " load_data(\n", - " glob.glob(\n", - " os.path.join(data_folder, \"**/train-images-idx3-ubyte.gz\"), recursive=True\n", - " )[0],\n", - " False,\n", - " )\n", - " / 255.0\n", - ")\n", - "X_test = (\n", - " load_data(\n", - " glob.glob(\n", - " os.path.join(data_folder, \"**/t10k-images-idx3-ubyte.gz\"), recursive=True\n", - " )[0],\n", - " False,\n", - " )\n", - " / 255.0\n", - ")\n", - "y_train = load_data(\n", - " glob.glob(\n", - " os.path.join(data_folder, \"**/train-labels-idx1-ubyte.gz\"), recursive=True\n", - " )[0],\n", - " True,\n", - ").reshape(-1)\n", - "y_test = load_data(\n", - " glob.glob(\n", - " os.path.join(data_folder, \"**/t10k-labels-idx1-ubyte.gz\"), recursive=True\n", - " )[0],\n", - " True,\n", - ").reshape(-1)\n", - "\n", - "\n", - "# now let's show some randomly chosen images from the training set.\n", - "count = 0\n", - "sample_size = 30\n", - "plt.figure(figsize=(16, 6))\n", - "for i in np.random.permutation(X_train.shape[0])[:sample_size]:\n", - " count = count + 1\n", - " plt.subplot(1, sample_size, count)\n", - " plt.axhline(\"\")\n", - " plt.axvline(\"\")\n", - " plt.text(x=10, y=-10, s=y_train[i], fontsize=18)\n", - " plt.imshow(X_train[i].reshape(28, 28), cmap=plt.cm.Greys)\n", + "# make sure utils.py is in the same directory as this code\r\n", + "from src.utils import load_data\r\n", + "import glob\r\n", + "\r\n", + "\r\n", + "# note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the model converge faster.\r\n", + "X_train = (\r\n", + " load_data(\r\n", + " glob.glob(\r\n", + " os.path.join(data_folder, \"**/train-images-idx3-ubyte.gz\"), recursive=True\r\n", + " )[0],\r\n", + " False,\r\n", + " )\r\n", + " / 255.0\r\n", + ")\r\n", + "X_test = (\r\n", + " load_data(\r\n", + " glob.glob(\r\n", + " os.path.join(data_folder, \"**/t10k-images-idx3-ubyte.gz\"), recursive=True\r\n", + " )[0],\r\n", + " False,\r\n", + " )\r\n", + " / 255.0\r\n", + ")\r\n", + "y_train = load_data(\r\n", + " glob.glob(\r\n", + " os.path.join(data_folder, \"**/train-labels-idx1-ubyte.gz\"), recursive=True\r\n", + " )[0],\r\n", + " True,\r\n", + ").reshape(-1)\r\n", + "y_test = load_data(\r\n", + " glob.glob(\r\n", + " os.path.join(data_folder, \"**/t10k-labels-idx1-ubyte.gz\"), recursive=True\r\n", + " )[0],\r\n", + " True,\r\n", + ").reshape(-1)\r\n", + "\r\n", + "\r\n", + "# now let's show some randomly chosen images from the training set.\r\n", + "count = 0\r\n", + "sample_size = 30\r\n", + "plt.figure(figsize=(16, 6))\r\n", + "for i in np.random.permutation(X_train.shape[0])[:sample_size]:\r\n", + " count = count + 1\r\n", + " plt.subplot(1, sample_size, count)\r\n", + " plt.axhline(\"\")\r\n", + " plt.axvline(\"\")\r\n", + " plt.text(x=10, y=-10, s=y_train[i], fontsize=18)\r\n", + " plt.imshow(X_train[i].reshape(28, 28), cmap=plt.cm.Greys)\r\n", "plt.show()" ] }, @@ -274,7 +273,7 @@ }, "outputs": [], "source": [ - "with open(\"sklearn-mnist-batch/train.py\", \"r\") as f:\n", + "with open(\"./src/train.py\", \"r\") as f:\n", " print(f.read())" ] }, @@ -375,8 +374,8 @@ } }, "source": [ - "Create a [ScriptRunConfig](https://docs.microsoft.com/python/api/azureml-core/azureml.core.scriptrunconfig?preserve-view=true&view=azure-ml-py) object to specify the configuration details of your training job, including your training script, environment to use, and the compute target to run on. A script run configuration is used to configure the information necessary for submitting a training run as part of an experiment. \n", - "\n", + "Create a [ScriptRunConfig](https://docs.microsoft.com/python/api/azureml-core/azureml.core.scriptrunconfig?preserve-view=true&view=azure-ml-py) object to specify the configuration details of your training job, including your training script, environment to use, and the compute target to run on. A script run configuration is used to configure the information necessary for submitting a training run as part of an experiment. In this case we will run this on a 'local' compute target, which is the compute instance you are running this notebook on.\r\n", + "\r\n", "Read more about configuring and submitting training runs [here](https://docs.microsoft.com/azure/machine-learning/how-to-set-up-training-targets). " ] }, @@ -403,9 +402,8 @@ "\n", "args = [\"--data-folder\", mnist_file_dataset.as_mount(), \"--regularization\", 0.5]\n", "\n", - "script_folder = \"sklearn-mnist-batch\"\n", "src = ScriptRunConfig(\n", - " source_directory=script_folder,\n", + " source_directory=\"src\",\n", " script=\"train.py\",\n", " arguments=args,\n", " compute_target=\"local\",\n", @@ -673,7 +671,7 @@ "\n", "In this quickstart, you have seen how to run jobs-based machine learning code in Azure Machine Learning. \n", "\n", - "It is also possible to use automated machine learning in Azure Machine Learning service to find the best model in an automated fashion. To see how this works, we recommend that you follow the next quickstart in this series, [**Fraud Classification using Automated ML**](ClassificationWithAutomatedML.ipynb). This quickstart is focused on AutoML using the Python SDK." + "It is also possible to use automated machine learning in Azure Machine Learning service to find the best model in an automated fashion. To see how this works, we recommend that you follow the next quickstart in this series, [**Fraud Classification using Automated ML**](../quickstart-azureml-automl/quickstart-azureml-automl.ipynb). This quickstart is focused on AutoML using the Python SDK." ] } ], @@ -706,5 +704,5 @@ } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } \ No newline at end of file diff --git a/tutorials/quickstart-ci/AzureMLIn10mins.yml b/tutorials/compute-instance-quickstarts/quickstart-azureml-python-sdk/quickstart-azureml-python-sdk.yml similarity index 70% rename from tutorials/quickstart-ci/AzureMLIn10mins.yml rename to tutorials/compute-instance-quickstarts/quickstart-azureml-python-sdk/quickstart-azureml-python-sdk.yml index 144bb6e7..22268be7 100644 --- a/tutorials/quickstart-ci/AzureMLIn10mins.yml +++ b/tutorials/compute-instance-quickstarts/quickstart-azureml-python-sdk/quickstart-azureml-python-sdk.yml @@ -1,4 +1,4 @@ -name: AzureMLIn10mins +name: quickstart-azureml-python-sdk dependencies: - pip: - azureml-sdk @@ -9,3 +9,4 @@ dependencies: - uuid - requests - azureml-opendatasets + - azureml-widgets diff --git a/tutorials/quickstart-ci/sklearn-mnist-batch/train.py b/tutorials/compute-instance-quickstarts/quickstart-azureml-python-sdk/src/train.py similarity index 100% rename from tutorials/quickstart-ci/sklearn-mnist-batch/train.py rename to tutorials/compute-instance-quickstarts/quickstart-azureml-python-sdk/src/train.py diff --git a/tutorials/quickstart-ci/utils.py b/tutorials/compute-instance-quickstarts/quickstart-azureml-python-sdk/src/utils.py similarity index 100% rename from tutorials/quickstart-ci/utils.py rename to tutorials/compute-instance-quickstarts/quickstart-azureml-python-sdk/src/utils.py diff --git a/tutorials/get-started-day1/IDE-users/01-create-workspace.py b/tutorials/get-started-day1/IDE-users/01-create-workspace.py deleted file mode 100644 index 053909d0..00000000 --- a/tutorials/get-started-day1/IDE-users/01-create-workspace.py +++ /dev/null @@ -1,12 +0,0 @@ -# 01-create-workspace.py -from azureml.core import Workspace - -# Example locations: 'westeurope' or 'eastus2' or 'westus2' or 'southeastasia'. -ws = Workspace.create(name='', - subscription_id='', - resource_group='', - create_resource_group=True, - location='') - -# write out the workspace details to a configuration file: .azureml/config.json -ws.write_config(path='.azureml') diff --git a/tutorials/get-started-day1/IDE-users/02-create-compute.py b/tutorials/get-started-day1/IDE-users/02-create-compute.py deleted file mode 100644 index ab2ec4eb..00000000 --- a/tutorials/get-started-day1/IDE-users/02-create-compute.py +++ /dev/null @@ -1,23 +0,0 @@ -# 02-create-compute.py -from azureml.core import Workspace -from azureml.core.compute import ComputeTarget, AmlCompute -from azureml.core.compute_target import ComputeTargetException - -ws = Workspace.from_config() - -# Choose a name for your CPU cluster -cpu_cluster_name = "cpu-cluster" - -# Verify that cluster does not exist already -try: - cpu_cluster = ComputeTarget(workspace=ws, name=cpu_cluster_name) - print('Found existing cluster, use it.') -except ComputeTargetException: - cfg = AmlCompute.provisioning_configuration( - vm_size='STANDARD_D2_V2', - max_nodes=4, - idle_seconds_before_scaledown=2400 - ) - cpu_cluster = ComputeTarget.create(ws, cpu_cluster_name, cfg) - -cpu_cluster.wait_for_completion(show_output=True) diff --git a/tutorials/get-started-day1/IDE-users/03-run-hello.py b/tutorials/get-started-day1/IDE-users/03-run-hello.py deleted file mode 100644 index 9eaca6b0..00000000 --- a/tutorials/get-started-day1/IDE-users/03-run-hello.py +++ /dev/null @@ -1,13 +0,0 @@ -# 03-run-hello.py -from azureml.core import Workspace, Experiment, ScriptRunConfig - -ws = Workspace.from_config() -experiment = Experiment(workspace=ws, name='day1-experiment-hello') - -config = ScriptRunConfig(source_directory='./src', - script='hello.py', - compute_target='cpu-cluster') - -run = experiment.submit(config) -aml_url = run.get_portal_url() -print(aml_url) diff --git a/tutorials/get-started-day1/IDE-users/04-run-pytorch.py b/tutorials/get-started-day1/IDE-users/04-run-pytorch.py deleted file mode 100644 index 93ed8b06..00000000 --- a/tutorials/get-started-day1/IDE-users/04-run-pytorch.py +++ /dev/null @@ -1,24 +0,0 @@ -# 04-run-pytorch.py -from azureml.core import Workspace -from azureml.core import Experiment -from azureml.core import Environment -from azureml.core import ScriptRunConfig - -if __name__ == "__main__": - ws = Workspace.from_config() - experiment = Experiment(workspace=ws, name='day1-experiment-train') - config = ScriptRunConfig(source_directory='./src', - script='train.py', - compute_target='cpu-cluster') - - # set up pytorch environment - env = Environment.from_conda_specification( - name='pytorch-env', - file_path='./environments/pytorch-env.yml' - ) - config.run_config.environment = env - - run = experiment.submit(config) - - aml_url = run.get_portal_url() - print(aml_url) diff --git a/tutorials/get-started-day1/IDE-users/05-upload-data.py b/tutorials/get-started-day1/IDE-users/05-upload-data.py deleted file mode 100644 index eafd680b..00000000 --- a/tutorials/get-started-day1/IDE-users/05-upload-data.py +++ /dev/null @@ -1,7 +0,0 @@ -# 05-upload-data.py -from azureml.core import Workspace -ws = Workspace.from_config() -datastore = ws.get_default_datastore() -datastore.upload(src_dir='./data', - target_path='datasets/cifar10', - overwrite=True) diff --git a/tutorials/get-started-day1/IDE-users/06-run-pytorch-data.py b/tutorials/get-started-day1/IDE-users/06-run-pytorch-data.py deleted file mode 100644 index b8cb71a9..00000000 --- a/tutorials/get-started-day1/IDE-users/06-run-pytorch-data.py +++ /dev/null @@ -1,35 +0,0 @@ -# 06-run-pytorch-data.py -from azureml.core import Workspace -from azureml.core import Experiment -from azureml.core import Environment -from azureml.core import ScriptRunConfig -from azureml.core import Dataset - -if __name__ == "__main__": - ws = Workspace.from_config() - datastore = ws.get_default_datastore() - dataset = Dataset.File.from_files(path=(datastore, 'datasets/cifar10')) - - experiment = Experiment(workspace=ws, name='day1-experiment-data') - - config = ScriptRunConfig( - source_directory='./src', - script='train.py', - compute_target='cpu-cluster', - arguments=[ - '--data_path', dataset.as_named_input('input').as_mount(), - '--learning_rate', 0.003, - '--momentum', 0.92], - ) - # set up pytorch environment - env = Environment.from_conda_specification( - name='pytorch-env', - file_path='./environments/pytorch-env.yml' - ) - config.run_config.environment = env - - run = experiment.submit(config) - aml_url = run.get_portal_url() - print("Submitted to compute cluster. Click link below") - print("") - print(aml_url) diff --git a/tutorials/get-started-day1/IDE-users/README.md b/tutorials/get-started-day1/IDE-users/README.md deleted file mode 100644 index 6d429351..00000000 --- a/tutorials/get-started-day1/IDE-users/README.md +++ /dev/null @@ -1,25 +0,0 @@ -# Get Started (day 1) with Azure Machine Learning: IDE Users - -This folder has been setup for IDE user (for example, VS Code or Pycharm) following the [Get started (day 1) with Azure Machine Learning tutorial series](https://aka.ms/day1aml). - -The directory is structured as follows: - -```Text -IDE-users -└──environments -| └──pytorch-env.yml -└──src -| └──hello.py -| └──model.py -| └──train.py -└──01-create-workspace.py -└──02-create-compute.py -└──03-run-hello.py -└──04-run-pytorch.py -└──05-upload-data.py -└──06-run-pytorch-data.py -``` - -Please refer to [the documentation](https://aka.ms/day1aml) for more details on these files. - -![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/tutorials/get-started-day1/IDE/README.png) \ No newline at end of file diff --git a/tutorials/get-started-day1/IDE-users/environments/pytorch-env.yml b/tutorials/get-started-day1/IDE-users/environments/pytorch-env.yml deleted file mode 100644 index 703e3265..00000000 --- a/tutorials/get-started-day1/IDE-users/environments/pytorch-env.yml +++ /dev/null @@ -1,9 +0,0 @@ - -name: pytorch-env -channels: - - defaults - - pytorch -dependencies: - - python=3.6.2 - - pytorch - - torchvision diff --git a/tutorials/get-started-day1/IDE-users/src/hello.py b/tutorials/get-started-day1/IDE-users/src/hello.py deleted file mode 100644 index 81bc4f35..00000000 --- a/tutorials/get-started-day1/IDE-users/src/hello.py +++ /dev/null @@ -1,2 +0,0 @@ - -print("hello world!") diff --git a/tutorials/get-started-day1/IDE-users/src/model.py b/tutorials/get-started-day1/IDE-users/src/model.py deleted file mode 100644 index a676db74..00000000 --- a/tutorials/get-started-day1/IDE-users/src/model.py +++ /dev/null @@ -1,22 +0,0 @@ -import torch.nn as nn -import torch.nn.functional as F - - -class Net(nn.Module): - def __init__(self): - super(Net, self).__init__() - self.conv1 = nn.Conv2d(3, 6, 5) - self.pool = nn.MaxPool2d(2, 2) - self.conv2 = nn.Conv2d(6, 16, 5) - self.fc1 = nn.Linear(16 * 5 * 5, 120) - self.fc2 = nn.Linear(120, 84) - self.fc3 = nn.Linear(84, 10) - - def forward(self, x): - x = self.pool(F.relu(self.conv1(x))) - x = self.pool(F.relu(self.conv2(x))) - x = x.view(-1, 16 * 5 * 5) - x = F.relu(self.fc1(x)) - x = F.relu(self.fc2(x)) - x = self.fc3(x) - return x diff --git a/tutorials/get-started-day1/IDE-users/src/train.py b/tutorials/get-started-day1/IDE-users/src/train.py deleted file mode 100644 index a0ef9496..00000000 --- a/tutorials/get-started-day1/IDE-users/src/train.py +++ /dev/null @@ -1,52 +0,0 @@ -import torch -import torch.optim as optim -import torchvision -import torchvision.transforms as transforms - -from model import Net - -# download CIFAR 10 data -trainset = torchvision.datasets.CIFAR10( - root="./data", - train=True, - download=True, - transform=torchvision.transforms.ToTensor(), -) -trainloader = torch.utils.data.DataLoader( - trainset, batch_size=4, shuffle=True, num_workers=2 -) - -if __name__ == "__main__": - - # define convolutional network - net = Net() - - # set up pytorch loss / optimizer - criterion = torch.nn.CrossEntropyLoss() - optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) - - # train the network - for epoch in range(2): - - running_loss = 0.0 - for i, data in enumerate(trainloader, 0): - # unpack the data - inputs, labels = data - - # zero the parameter gradients - optimizer.zero_grad() - - # forward + backward + optimize - outputs = net(inputs) - loss = criterion(outputs, labels) - loss.backward() - optimizer.step() - - # print statistics - running_loss += loss.item() - if i % 2000 == 1999: - loss = running_loss / 2000 - print(f"epoch={epoch + 1}, batch={i + 1:5}: loss {loss:.2f}") - running_loss = 0.0 - - print("Finished Training") diff --git a/tutorials/get-started-day1/code/hello/hello.py b/tutorials/get-started-day1/code/hello/hello.py deleted file mode 100644 index 81bc4f35..00000000 --- a/tutorials/get-started-day1/code/hello/hello.py +++ /dev/null @@ -1,2 +0,0 @@ - -print("hello world!") diff --git a/tutorials/get-started-day1/code/pytorch-cifar10-train-with-logging/model.py b/tutorials/get-started-day1/code/pytorch-cifar10-train-with-logging/model.py deleted file mode 100644 index a676db74..00000000 --- a/tutorials/get-started-day1/code/pytorch-cifar10-train-with-logging/model.py +++ /dev/null @@ -1,22 +0,0 @@ -import torch.nn as nn -import torch.nn.functional as F - - -class Net(nn.Module): - def __init__(self): - super(Net, self).__init__() - self.conv1 = nn.Conv2d(3, 6, 5) - self.pool = nn.MaxPool2d(2, 2) - self.conv2 = nn.Conv2d(6, 16, 5) - self.fc1 = nn.Linear(16 * 5 * 5, 120) - self.fc2 = nn.Linear(120, 84) - self.fc3 = nn.Linear(84, 10) - - def forward(self, x): - x = self.pool(F.relu(self.conv1(x))) - x = self.pool(F.relu(self.conv2(x))) - x = x.view(-1, 16 * 5 * 5) - x = F.relu(self.fc1(x)) - x = F.relu(self.fc2(x)) - x = self.fc3(x) - return x diff --git a/tutorials/get-started-day1/code/pytorch-cifar10-train-with-logging/train.py b/tutorials/get-started-day1/code/pytorch-cifar10-train-with-logging/train.py deleted file mode 100644 index b7d47371..00000000 --- a/tutorials/get-started-day1/code/pytorch-cifar10-train-with-logging/train.py +++ /dev/null @@ -1,62 +0,0 @@ -import torch -import torch.optim as optim -import torchvision -import torchvision.transforms as transforms - -from model import Net -from azureml.core import Run - - -# ADDITIONAL CODE: get AML run from the current context -run = Run.get_context() - -# download CIFAR 10 data -trainset = torchvision.datasets.CIFAR10( - root='./data', - train=True, - download=True, - transform=torchvision.transforms.ToTensor() -) -trainloader = torch.utils.data.DataLoader( - trainset, - batch_size=4, - shuffle=True, - num_workers=2 -) - -if __name__ == "__main__": - - # define convolutional network - net = Net() - - # set up pytorch loss / optimizer - criterion = torch.nn.CrossEntropyLoss() - optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) - - # train the network - for epoch in range(2): - - running_loss = 0.0 - for i, data in enumerate(trainloader, 0): - # unpack the data - inputs, labels = data - - # zero the parameter gradients - optimizer.zero_grad() - - # forward + backward + optimize - outputs = net(inputs) - loss = criterion(outputs, labels) - loss.backward() - optimizer.step() - - # print statistics - running_loss += loss.item() - if i % 2000 == 1999: - loss = running_loss / 2000 - # ADDITIONAL CODE: log loss metric to AML - run.log('loss', loss) - print(f'epoch={epoch + 1}, batch={i + 1:5}: loss {loss:.2f}') - running_loss = 0.0 - - print('Finished Training') diff --git a/tutorials/get-started-day1/code/pytorch-cifar10-train/model.py b/tutorials/get-started-day1/code/pytorch-cifar10-train/model.py deleted file mode 100644 index a676db74..00000000 --- a/tutorials/get-started-day1/code/pytorch-cifar10-train/model.py +++ /dev/null @@ -1,22 +0,0 @@ -import torch.nn as nn -import torch.nn.functional as F - - -class Net(nn.Module): - def __init__(self): - super(Net, self).__init__() - self.conv1 = nn.Conv2d(3, 6, 5) - self.pool = nn.MaxPool2d(2, 2) - self.conv2 = nn.Conv2d(6, 16, 5) - self.fc1 = nn.Linear(16 * 5 * 5, 120) - self.fc2 = nn.Linear(120, 84) - self.fc3 = nn.Linear(84, 10) - - def forward(self, x): - x = self.pool(F.relu(self.conv1(x))) - x = self.pool(F.relu(self.conv2(x))) - x = x.view(-1, 16 * 5 * 5) - x = F.relu(self.fc1(x)) - x = F.relu(self.fc2(x)) - x = self.fc3(x) - return x diff --git a/tutorials/get-started-day1/code/pytorch-cifar10-train/train.py b/tutorials/get-started-day1/code/pytorch-cifar10-train/train.py deleted file mode 100644 index a0ef9496..00000000 --- a/tutorials/get-started-day1/code/pytorch-cifar10-train/train.py +++ /dev/null @@ -1,52 +0,0 @@ -import torch -import torch.optim as optim -import torchvision -import torchvision.transforms as transforms - -from model import Net - -# download CIFAR 10 data -trainset = torchvision.datasets.CIFAR10( - root="./data", - train=True, - download=True, - transform=torchvision.transforms.ToTensor(), -) -trainloader = torch.utils.data.DataLoader( - trainset, batch_size=4, shuffle=True, num_workers=2 -) - -if __name__ == "__main__": - - # define convolutional network - net = Net() - - # set up pytorch loss / optimizer - criterion = torch.nn.CrossEntropyLoss() - optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) - - # train the network - for epoch in range(2): - - running_loss = 0.0 - for i, data in enumerate(trainloader, 0): - # unpack the data - inputs, labels = data - - # zero the parameter gradients - optimizer.zero_grad() - - # forward + backward + optimize - outputs = net(inputs) - loss = criterion(outputs, labels) - loss.backward() - optimizer.step() - - # print statistics - running_loss += loss.item() - if i % 2000 == 1999: - loss = running_loss / 2000 - print(f"epoch={epoch + 1}, batch={i + 1:5}: loss {loss:.2f}") - running_loss = 0.0 - - print("Finished Training") diff --git a/tutorials/get-started-day1/code/pytorch-cifar10-your-data/model.py b/tutorials/get-started-day1/code/pytorch-cifar10-your-data/model.py deleted file mode 100644 index a676db74..00000000 --- a/tutorials/get-started-day1/code/pytorch-cifar10-your-data/model.py +++ /dev/null @@ -1,22 +0,0 @@ -import torch.nn as nn -import torch.nn.functional as F - - -class Net(nn.Module): - def __init__(self): - super(Net, self).__init__() - self.conv1 = nn.Conv2d(3, 6, 5) - self.pool = nn.MaxPool2d(2, 2) - self.conv2 = nn.Conv2d(6, 16, 5) - self.fc1 = nn.Linear(16 * 5 * 5, 120) - self.fc2 = nn.Linear(120, 84) - self.fc3 = nn.Linear(84, 10) - - def forward(self, x): - x = self.pool(F.relu(self.conv1(x))) - x = self.pool(F.relu(self.conv2(x))) - x = x.view(-1, 16 * 5 * 5) - x = F.relu(self.fc1(x)) - x = F.relu(self.fc2(x)) - x = self.fc3(x) - return x diff --git a/tutorials/get-started-day1/code/pytorch-cifar10-your-data/train.py b/tutorials/get-started-day1/code/pytorch-cifar10-your-data/train.py deleted file mode 100644 index a9337ab4..00000000 --- a/tutorials/get-started-day1/code/pytorch-cifar10-your-data/train.py +++ /dev/null @@ -1,96 +0,0 @@ - -import os -import argparse -import torch -import torch.optim as optim -import torchvision -import torchvision.transforms as transforms - -from model import Net -from azureml.core import Run - -run = Run.get_context() - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument( - '--data_path', - type=str, - help='Path to the training data' - ) - parser.add_argument( - '--learning_rate', - type=float, - default=0.001, - help='Learning rate for SGD' - ) - parser.add_argument( - '--momentum', - type=float, - default=0.9, - help='Momentum for SGD' - ) - - args = parser.parse_args() - - print("===== DATA =====") - print("DATA PATH: " + args.data_path) - print("LIST FILES IN DATA PATH...") - print(os.listdir(args.data_path)) - print("================") - - # prepare DataLoader for CIFAR10 data - transform = transforms.Compose([ - transforms.ToTensor(), - transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) - ]) - trainset = torchvision.datasets.CIFAR10( - root=args.data_path, - train=True, - download=False, - transform=transform, - ) - trainloader = torch.utils.data.DataLoader( - trainset, - batch_size=4, - shuffle=True, - num_workers=2 - ) - - # define convolutional network - net = Net() - - # set up pytorch loss / optimizer - criterion = torch.nn.CrossEntropyLoss() - optimizer = optim.SGD( - net.parameters(), - lr=args.learning_rate, - momentum=args.momentum, - ) - - # train the network - for epoch in range(2): - - running_loss = 0.0 - for i, data in enumerate(trainloader, 0): - # unpack the data - inputs, labels = data - - # zero the parameter gradients - optimizer.zero_grad() - - # forward + backward + optimize - outputs = net(inputs) - loss = criterion(outputs, labels) - loss.backward() - optimizer.step() - - # print statistics - running_loss += loss.item() - if i % 2000 == 1999: - loss = running_loss / 2000 - run.log('loss', loss) # log loss metric to AML - print(f'epoch={epoch + 1}, batch={i + 1:5}: loss {loss:.2f}') - running_loss = 0.0 - - print('Finished Training') diff --git a/tutorials/get-started-day1/configuration/pytorch-aml-env.yml b/tutorials/get-started-day1/configuration/pytorch-aml-env.yml deleted file mode 100644 index e5c58540..00000000 --- a/tutorials/get-started-day1/configuration/pytorch-aml-env.yml +++ /dev/null @@ -1,11 +0,0 @@ -name: pytorch-aml-env -channels: - - defaults - - pytorch -dependencies: - - python=3.6.2 - - pytorch - - torchvision - - pip - - pip: - - azureml-sdk diff --git a/tutorials/get-started-day1/configuration/pytorch-env.yml b/tutorials/get-started-day1/configuration/pytorch-env.yml deleted file mode 100644 index 703e3265..00000000 --- a/tutorials/get-started-day1/configuration/pytorch-env.yml +++ /dev/null @@ -1,9 +0,0 @@ - -name: pytorch-env -channels: - - defaults - - pytorch -dependencies: - - python=3.6.2 - - pytorch - - torchvision diff --git a/tutorials/get-started-day1/day1-part1-setup.ipynb b/tutorials/get-started-day1/day1-part1-setup.ipynb deleted file mode 100644 index 713f21ed..00000000 --- a/tutorials/get-started-day1/day1-part1-setup.ipynb +++ /dev/null @@ -1,166 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Copyright (c) Microsoft Corporation. All rights reserved.\n", - "\n", - "Licensed under the MIT License." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/tutorials/day1-part1-setup.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Tutorial: Get started (day 1) with Azure Machine Learning (Part 1 of 4)\n", - "\n", - "---\n", - "## Introduction \n", - "\n", - "In this **four-part tutorial series**, you will learn the fundamentals of Azure Machine Learning and complete jobs-based Python machine learning tasks in the Azure cloud, including:\n", - "\n", - "1. Set up a compute cluster\n", - "2. Run code in the cloud using Azure Machine Learning's Python SDK.\n", - "3. Manage the Python environment you use for model training.\n", - "4. Upload data to Azure and consume that data in training.\n", - "\n", - "In this first part of the tutorial series you learn how to create an Azure Machine Learning Compute Cluster that will be used in subsequent parts of the series to submit jobs to. This notebook follows the steps provided on the [Python (day 1) - set up local computer documentation page](https://aka.ms/day1aml).\n", - "\n", - "## Pre-requisites \n", - "\n", - "- An Azure Subscription. If you don't have an Azure subscription, create a free account before you begin. Try [Azure Machine Learning](https://aka.ms/AMLFree) today.\n", - "- Familiarity with Python and Machine Learning concepts. For example, environments, training, scoring, and so on.\n", - "- If you are using a compute instance in Azure Machine Learning to run this notebook series, you are all set. Otherwise, please follow the [Configure a development environment for Azure Machine Learning](https://docs.microsoft.com/azure/machine-learning/how-to-configure-environment)\n", - "\n", - "---" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Ensure you have the latest Azure Machine Learning Python SDK\n", - "\n", - "This tutorial series depends on having the Azure Machine Learning SDK version 1.14.0 onwards installed. You can check your version using the code cell below." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from azureml.core import VERSION\n", - "\n", - "print ('Version: ' + VERSION)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If your version is below 1.14.0, then upgrade the SDK using `pip` (**Note: You may need to restart your kernel for the changes to take effect. Re-run the cell above to ensure you have the right version**).\n", - "\n", - "```bash\n", - "!pip install -U azureml-sdk\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Create an Azure Machine Learning compute cluster \n", - "\n", - "As this tutorial focuses on jobs-based machine learning tasks, you will be submitting python code to run on an Azure Machine Learning **Compute cluster**, which is well suited for large jobs and production. Therefore, you create an Azure Machine Learning compute cluster that will auto-scale between zero and four nodes:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "create mlc", - "batchai" - ] - }, - "outputs": [], - "source": [ - "from azureml.core import Workspace\n", - "from azureml.core.compute import ComputeTarget, AmlCompute\n", - "from azureml.core.compute_target import ComputeTargetException\n", - "\n", - "ws = Workspace.from_config() # this automatically looks for a directory .azureml\n", - "\n", - "# Choose a name for your CPU cluster\n", - "cpu_cluster_name = \"cpu-cluster\"\n", - "\n", - "# Verify that cluster does not exist already\n", - "try:\n", - " cpu_cluster = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n", - " print('Found existing cluster, use it.')\n", - "except ComputeTargetException:\n", - " compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2',\n", - " max_nodes=4, \n", - " idle_seconds_before_scaledown=2400)\n", - " cpu_cluster = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n", - "\n", - "cpu_cluster.wait_for_completion(show_output=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "> ! INFORMATION \n", - "> When the cluster has been created it will have 0 nodes provisioned. Therefore, the cluster does not incur costs until you submit a job. This cluster will scale down when it has been idle for 2400 seconds (40 minutes)." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Next Steps\n", - "\n", - "In the next tutorial, you walk through submitting a script to the Azure Machine Learning compute cluster.\n", - "\n", - "[Tutorial: Run \"Hello World\" Python Script on Azure](day1-part2-hello-world.ipynb)\n" - ] - } - ], - "metadata": { - "authors": [ - { - "name": "samkemp" - } - ], - "kernelspec": { - "display_name": "Python 3.6", - "language": "python", - "name": "python36" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.5" - }, - "notice": "Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License." - }, - "nbformat": 4, - "nbformat_minor": 4 -} \ No newline at end of file diff --git a/tutorials/get-started-day1/day1-part2-hello-world.ipynb b/tutorials/get-started-day1/day1-part2-hello-world.ipynb deleted file mode 100644 index 9c42e0d5..00000000 --- a/tutorials/get-started-day1/day1-part2-hello-world.ipynb +++ /dev/null @@ -1,204 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Copyright (c) Microsoft Corporation. All rights reserved.\n", - "\n", - "Licensed under the MIT License." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/tutorials/get-started-day1/day1-part2-hello-world.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Tutorial: \"Hello World\" (Part 2 of 4)\n", - "\n", - "---\n", - "## Introduction\n", - "In **part 2 of this get started series**, you will submit a trivial \"hello world\" python script to the cloud by:\n", - "\n", - "- Running Python code in the cloud with Azure Machine Learning SDK\n", - "- Switching between debugging locally on a compute instance.\n", - "- Submitting remote runs in the cloud\n", - "- Monitoring and recording runs in the Azure Machine Learning studio\n", - "\n", - "This notebook follows the steps provided on the [Python (day 1) - \"hello world\" documentation page](https://aka.ms/day1aml). This tutorial is part of a **four-part tutorial series** in which you learn the fundamentals of Azure Machine Learning and complete simple jobs-based machine learning tasks in the Azure cloud. It builds off the work you completed in [Tutorial part 1: set up an Azure Machine Learning compute cluster](day1-part1-setup.ipynb).\n", - "\n", - "## Pre-requisites\n", - "\n", - "- Complete [Tutorial part 1: set up an Azure Machine Learning compute cluster](day1-part1-setup.ipynb) if you don't already have an Azure Machine Learning compute cluster.\n", - "- Familiarity with Python and Machine Learning concepts.\n", - "- If you are using a compute instance in Azure Machine Learning to run this notebook series, you are all set. Otherwise, please follow the [Configure a development environment for Azure Machine Learning](https://docs.microsoft.com/azure/machine-learning/how-to-configure-environment)\n", - "---" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Your code\n", - "\n", - "In the `code/hello` subdirectory you will find a trivial python script [hello.py](code/hello/hello.py) that has the following code:\n", - "\n", - "```Python\n", - "# code/hello/hello.py\n", - "print(\"hello world!\")\n", - "```\n", - "\n", - "In this tutorial you are going to submit this trivial python script to an Azure Machine Learning Compute Cluster." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Test in your development environment\n", - "\n", - "You can test your code works on a compute instance or locally (for example, a laptop), which has the benefit of interactive debugging of code:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "!python code/hello/hello.py" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Submit your code to Azure Machine Learning\n", - "\n", - "Below you create a __*control script*__ this is where you specify _how_ your code is submitted to Azure Machine Learning. The code you submit to Azure Machine Learning (in this case `hello.py`) does not need anything specific to Azure Machine Learning - it can be any valid Python code. It is only the control script that is Azure Machine Learning specific.\n", - "\n", - "The code below will show a Jupyter widget that tracks the progress of your run, and displays logs.\n", - "\n", - "> ! NOTE
\n", - "> The very first run will take 5-10minutes to complete. This is because in the background a docker image is built in the cloud, the compute cluster is resized from 0 to 1 node, and the docker image is downloaded to the compute. Subsequent runs are much quicker (~15 seconds) as the docker image is cached on the compute - you can test this by resubmitting the code below after the first run has completed.
" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "remote run", - "batchai", - "configure run", - "use notebook widget" - ] - }, - "outputs": [], - "source": [ - "from azureml.core import Workspace, Experiment, ScriptRunConfig\n", - "from azureml.widgets import RunDetails\n", - "\n", - "ws = Workspace.from_config()\n", - "experiment = Experiment(workspace=ws, name='day1-experiment-hello')\n", - "\n", - "config = ScriptRunConfig(source_directory='./code/hello', script='hello.py', compute_target='cpu-cluster')\n", - "\n", - "run = experiment.submit(config)\n", - "RunDetails(run).show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Understanding the control code\n", - "\n", - "| Code |Description | \n", - "|---|---|\n", - "| `ws = Workspace.from_config()` | [Workspace](https://docs.microsoft.com/python/api/azureml-core/azureml.core.workspace.workspace?view=azure-ml-py&preserve-view=true) connects to your Azure Machine Learning workspace, so that you can communicate with your Azure Machine Learning resources. |\n", - "| `experiment = Experiment( ... )` | [Experiment](https://docs.microsoft.com/python/api/azureml-core/azureml.core.experiment.experiment?view=azure-ml-py&preserve-view=true) provides a simple way to organize multiple runs under a single name.
Later you can see how experiments make it easy to compare metrics between dozens of runs. |\n", - "| `config = ScriptRunConfig( ... )` | [ScriptRunConfig](https://docs.microsoft.com/python/api/azureml-core/azureml.core.scriptrunconfig?view=azure-ml-py&preserve-view=true) wraps your `hello.py` code and passes it to your workspace.
As the name suggests, you can use this class to _configure_ how you want your _script_ to _run_ in Azure Machine Learning.
Also specifies what compute target the script will run on.
In this code, the target is the compute cluster you created in the [setup tutorial](tutorial-1st-experiment-sdk-setup-local.md). |\n", - "| `run = experiment.submit(config)` | Submits your script. This submission is called a [Run](https://docs.microsoft.com/python/api/azureml-core/azureml.core.run(class)?view=azure-ml-py&preserve-view=true).
A run encapsulates a single execution of your code. Use a run to monitor the script progress, capture the output,
analyze the results, visualize metrics and more. |\n", - "| `aml_url = run.get_portal_url()` | The `run` object provides a handle on the execution of your code. Monitor its progress from
the Azure Machine Learning Studio with the URL that is printed from the python script. |\n", - "|`RunDetails(run).show()` | There is an Azure Machine Learning widget that shows the progress of your job along with streaming the log files.\n", - "\n", - "## View the logs\n", - "\n", - "The widget has a dropdown box titled **Output logs** select `70_driver_log.txt`, which shows the following standard output: \n", - "\n", - "```\n", - " 1: [2020-08-04T22:15:44.407305] Entering context manager injector.\n", - " 2: [context_manager_injector.py] Command line Options: Namespace(inject=['ProjectPythonPath:context_managers.ProjectPythonPath', 'RunHistory:context_managers.RunHistory', 'TrackUserError:context_managers.TrackUserError', 'UserExceptions:context_managers.UserExceptions'], invocation=['hello.py'])\n", - " 3: Starting the daemon thread to refresh tokens in background for process with pid = 31263\n", - " 4: Entering Run History Context Manager.\n", - " 5: Preparing to call script [ hello.py ] with arguments: []\n", - " 6: After variable expansion, calling script [ hello.py ] with arguments: []\n", - " 7:\n", - " 8: Hello world!\n", - " 9: Starting the daemon thread to refresh tokens in background for process with pid = 31263\n", - "10:\n", - "11:\n", - "12: The experiment completed successfully. Finalizing run...\n", - "13: Logging experiment finalizing status in history service.\n", - "14: [2020-08-04T22:15:46.541334] TimeoutHandler __init__\n", - "15: [2020-08-04T22:15:46.541396] TimeoutHandler __enter__\n", - "16: Cleaning up all outstanding Run operations, waiting 300.0 seconds\n", - "17: 1 items cleaning up...\n", - "18: Cleanup took 0.1812913417816162 seconds\n", - "19: [2020-08-04T22:15:47.040203] TimeoutHandler __exit__\n", - "```\n", - "\n", - "On line 8 above, you see the \"Hello world!\" output. The 70_driver_log.txt file contains the standard output from run and can be useful when debugging remote runs in the cloud. You can also view the run by clicking on the **Click here to see the run in Azure Machine Learning studio** link in the wdiget.\n", - "\n", - "## Next steps\n", - "\n", - "In this tutorial, you took a simple \"hello world\" script and ran it on Azure. You saw how to connect to your Azure Machine Learning workspace, create an Experiment, and submit your `hello.py` code to the cloud.\n", - "\n", - "In the [next tutorial](day1-part3-train-model.ipynb), you build on these learnings by running something more interesting than `print(\"Hello world!\")`.\n" - ] - } - ], - "metadata": { - "authors": [ - { - "name": "samkemp" - } - ], - "celltoolbar": "Edit Metadata", - "kernel_info": { - "name": "python3-azureml" - }, - "kernelspec": { - "display_name": "Python 3.6", - "language": "python", - "name": "python36" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.5" - }, - "notice": "Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License.", - "nteract": { - "version": "nteract-front-end@1.0.0" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} \ No newline at end of file diff --git a/tutorials/get-started-day1/day1-part2-hello-world.yml b/tutorials/get-started-day1/day1-part2-hello-world.yml deleted file mode 100644 index c7d0f6f0..00000000 --- a/tutorials/get-started-day1/day1-part2-hello-world.yml +++ /dev/null @@ -1,5 +0,0 @@ -name: day1-part2-hello-world -dependencies: -- pip: - - azureml-sdk - - azureml-widgets diff --git a/tutorials/get-started-day1/day1-part3-train-model.ipynb b/tutorials/get-started-day1/day1-part3-train-model.ipynb deleted file mode 100644 index 9e99a4ce..00000000 --- a/tutorials/get-started-day1/day1-part3-train-model.ipynb +++ /dev/null @@ -1,289 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Copyright (c) Microsoft Corporation. All rights reserved.\n", - "\n", - "Licensed under the MIT License." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/tutorials/get-started-day1/day1-part3-train-model.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Tutorial: Train your first ML model (Part 3 of 4)\n", - "\n", - "---\n", - "## Introduction\n", - "In the [previous tutorial](day1-part2-hello-world.ipynb), you ran a trivial \"Hello world!\" script in the cloud using Azure Machine Learning's Python SDK. This time you take it a step further by submitting a script that will train a machine learning model. This example will help you understand how Azure Machine Learning eases consistent behavior between debugging on a compute instance or laptop development environment, and remote runs.\n", - "\n", - "Learning these concepts means that by the end of this session, you can:\n", - "\n", - "* Use Conda to define an Azure Machine Learning environment.\n", - "* Train a model in the cloud.\n", - "* Log metrics to Azure Machine Learning.\n", - "\n", - "This notebook follows the steps provided on the [Python (day 1) - train a model documentation page](https://aka.ms/day1aml).\n", - "\n", - "## Prerequisites\n", - "\n", - "- You have completed the following:\n", - " - [Setup on your compute cluster](day1-part1-setup.ipynb)\n", - " - [Tutorial: Hello World example](day1-part2-hello-world.md)\n", - "- Familiarity with Python and Machine Learning concepts\n", - "- If you are using a compute instance in Azure Machine Learning to run this notebook series, you are all set. Otherwise, please follow the [Configure a development environment for Azure Machine Learning](https://docs.microsoft.com/azure/machine-learning/how-to-configure-environment)\n", - "---\n", - "\n", - "## Your machine learning code\n", - "\n", - "This tutorial shows you how to train a PyTorch model on the CIFAR 10 dataset using an Azure Machine Learning Cluster. In this case you will be using a CPU cluster, but this could equally be a GPU cluster. Whilst this tutorial uses PyTorch, the steps we show you apply to *any* machine learning code. \n", - "\n", - "In the `code/pytorch-cifar10-train` subdirectory you will see 2 files:\n", - "\n", - "1. [model.py](code/pytorch-cifar10-train/model.py) - this defines the neural network architecture\n", - "1. [train.py](code/pytorch-cifar10-train/train.py) - This is the training script. This script downloads the CIFAR10 dataset using PyTorch `torchvision.dataset` APIs, sets up the network defined in\n", - "`model.py`, and trains it for two epochs using standard SGD and cross-entropy loss.\n", - "\n", - "Note the code is based on [this introductory example from PyTorch](https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html). " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Define the Python environment for your machine learning code\n", - "\n", - "For demonstration purposes, we're going to use a Conda environment but the steps for a pip virtual environment are almost identical. This environment has all the dependencies that your model and training script require. \n", - "\n", - "In the `configuration` directory there is a *conda dependencies* file called [pytorch-env.yml](configuration/pytorch-env.yml) that specifies the dependencies to run the python code. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Test in your development environment\n", - "\n", - "Test your script runs on either your compute instance or laptop using this environment." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!python code/pytorch-cifar10-train/train.py" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**You should notice that the script has downloaded the data into a directory called `data`.**\n", - "\n", - "## Submit your machine learning code to Azure Machine Learning\n", - "\n", - "The difference to the control script below and the one used to submit \"hello world\" is that you adjust the environment to be set from the conda dependencies file you created earlier.\n", - "\n", - "> ! NOTE
\n", - "> The first time you run this script, Azure Machine Learning will build a new docker image from your PyTorch environment. The whole run could take 5-10 minutes to complete. You can see the docker build logs in the widget by selecting the `20_image_build_log.txt` in the log files dropdown. This image will be reused in future runs making them run much quicker.
\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "remote run", - "batchai", - "configure run", - "use notebook widget" - ] - }, - "outputs": [], - "source": [ - "from azureml.core import Workspace, Experiment, Environment, ScriptRunConfig\n", - "from azureml.widgets import RunDetails\n", - "\n", - "ws = Workspace.from_config()\n", - "experiment = Experiment(workspace=ws, name='day1-experiment-train')\n", - "config = ScriptRunConfig(source_directory='code/pytorch-cifar10-train/', script='train.py', compute_target='cpu-cluster')\n", - "\n", - "env = Environment.from_conda_specification(name='pytorch-env', file_path='configuration/pytorch-env.yml')\n", - "config.run_config.environment = env\n", - "\n", - "run = experiment.submit(config)\n", - "\n", - "RunDetails(run).show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Understand the control code\n", - "\n", - "Compared to the control script that submitted the \"hello world\" example, this control script introduces the following:\n", - "\n", - "| Code | Description\n", - "| --- | --- |\n", - "| `env = Environment.from_conda_specification( ...)` | Azure Machine Learning provides the concept of an `Environment` to represent a reproducible,
versioned Python environment for running experiments. Here you have created it from a yaml conda dependencies file.|\n", - "| `config.run_config.environment = env` | adds the environment to the ScriptRunConfig. |\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**There are many ways to create AML environments, including [from a pip requirements.txt](https://docs.microsoft.com/python/api/azureml-core/azureml.core.environment.environment?view=azure-ml-py&preserve-view=true#from-pip-requirements-name--file-path-), or even [from an existing local Conda environment](https://docs.microsoft.com/python/api/azureml-core/azureml.core.environment.environment?view=azure-ml-py&preserve-view=true#from-existing-conda-environment-name--conda-environment-name-).**\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Once your image is built, select `70_driver_log.txt` to see the output of your training script, which should look like:\n", - "\n", - "```txt\n", - "Downloading https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz to ./data/cifar-10-python.tar.gz\n", - "...\n", - "Files already downloaded and verified\n", - "epoch=1, batch= 2000: loss 2.19\n", - "...\n", - "epoch=2, batch=12000: loss 1.27\n", - "Finished Training\n", - "```\n", - "\n", - "Environments can be registered to a workspace with `env.register(ws)`, allowing them to be easily shared, reused, and versioned. Environments make it easy to reproduce previous results and to collaborate with your team.\n", - "\n", - "Azure Machine Learning also maintains a collection of curated environments. These environments cover common ML scenarios and are backed by cached Docker images. Cached Docker images make the first remote run faster.\n", - "\n", - "In short, using registered environments can save you time! More details can be found on the [environments documentation](./how-to-use-environments.md)\n", - "\n", - "## Log training metrics\n", - "\n", - "Now that you have a model training in Azure Machine Learning, start tracking some performance metrics.\n", - "The current training script prints metrics to the terminal. Azure Machine Learning provides a\n", - "mechanism for logging metrics with more functionality. By adding a few lines of code, you gain the ability to visualize metrics in the studio and to compare metrics between multiple runs.\n", - "\n", - "### Machine learning code updates\n", - "\n", - "In the `code/pytorch-cifar10-train-with-logging` directory you will notice the [train.py](code/pytorch-cifar10-train-with-logging/train.py) script has been modified with two additional lines that will log the loss to the Azure Machine Learning Studio:\n", - "\n", - "```python\n", - "# in train.py\n", - "run = Run.get_context()\n", - "...\n", - "run.log('loss', loss)\n", - "```\n", - "\n", - "Metrics in Azure Machine Learning are:\n", - "\n", - "- Organized by experiment and run so it's easy to keep track of and\n", - "compare metrics.\n", - "- Equipped with a UI so we can visualize training performance in the studio or in the notebook widget.\n", - "- **Designed to scale** You can submit concurrent experiments and the Azure Machine Learning cluster will scale out (up to the maximum node count of the cluster) to run the experiments in parallel." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Update the Environment for your machine learning code\n", - "\n", - "The `train.py` script just took a new dependency on `azureml.core`. Therefore, the conda dependecies file [pytorch-aml-env](configuration/pytorch-aml-env.yml) reflects this change." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Submit your machine learning code to Azure Machine Learning\n", - "Submit your code once more. This time the widget includes the metrics where you can now see live updates on the model training loss!" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "remote run", - "batchai", - "configure run", - "use notebook widget", - "get metrics" - ] - }, - "outputs": [], - "source": [ - "from azureml.core import Workspace, Experiment, Environment, ScriptRunConfig\n", - "from azureml.widgets import RunDetails\n", - "\n", - "ws = Workspace.from_config()\n", - "experiment = Experiment(workspace=ws, name='day1-experiment-train')\n", - "config = ScriptRunConfig(source_directory='code/pytorch-cifar10-train-with-logging', script='train.py', compute_target='cpu-cluster')\n", - "\n", - "env = Environment.from_conda_specification(name='pytorch-aml-env', file_path='configuration/pytorch-aml-env.yml')\n", - "config.run_config.environment = env\n", - "\n", - "run = experiment.submit(config)\n", - "RunDetails(run).show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Next steps\n", - "\n", - "In this session, you upgraded from a basic \"Hello world!\" script to a more realistic\n", - "training script that required a specific Python environment to run. You saw how\n", - "to take a local Conda environment to the cloud with Azure Machine Learning Environments. Finally, you\n", - "saw how in a few lines of code you can log metrics to Azure Machine Learning.\n", - "\n", - "In the next session, you'll see how to work with data in Azure Machine Learning by uploading the CIFAR10\n", - "dataset to Azure.\n", - "\n", - "[Tutorial: Bring your own data](day1-part4-data.ipynb)\n" - ] - } - ], - "metadata": { - "authors": [ - { - "name": "samkemp" - } - ], - "celltoolbar": "Edit Metadata", - "kernelspec": { - "display_name": "Python 3.6", - "language": "python", - "name": "python36" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.5" - }, - "notice": "Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License." - }, - "nbformat": 4, - "nbformat_minor": 4 -} \ No newline at end of file diff --git a/tutorials/get-started-day1/day1-part3-train-model.yml b/tutorials/get-started-day1/day1-part3-train-model.yml deleted file mode 100644 index 46598dd2..00000000 --- a/tutorials/get-started-day1/day1-part3-train-model.yml +++ /dev/null @@ -1,7 +0,0 @@ -name: day1-part3-train-model -dependencies: -- pip: - - azureml-sdk - - azureml-widgets - - pytorch - - torchvision diff --git a/tutorials/get-started-day1/day1-part4-data.ipynb b/tutorials/get-started-day1/day1-part4-data.ipynb deleted file mode 100644 index e19067e5..00000000 --- a/tutorials/get-started-day1/day1-part4-data.ipynb +++ /dev/null @@ -1,297 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Copyright (c) Microsoft Corporation. All rights reserved.\n", - "\n", - "Licensed under the MIT License." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/tutorials/get-started-day1/day1-part4-data.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Tutorial: Bring your own data (Part 4 of 4)\n", - "\n", - "---\n", - "## Introduction\n", - "\n", - "In the previous [Tutorial: Train a model in the cloud](day1-part3-train-model.ipynb) article, the CIFAR10 data was downloaded using the inbuilt `torchvision.datasets.CIFAR10` method in the PyTorch API. However, in many cases you are going to want to use your own data in a remote training run. This article focuses on the workflow you can leverage such that you can work with your own data in Azure Machine Learning. \n", - "\n", - "By the end of this tutorial you would have a better understanding of:\n", - "\n", - "- How to upload your data to Azure\n", - "- Best practices for working with cloud data in Azure Machine Learning\n", - "- Working with command-line arguments\n", - "\n", - "This notebook follows the steps provided on the [Python (day 1) - bring your own data documentation page](https://aka.ms/day1aml).\n", - "\n", - "## Prerequisites\n", - "\n", - "- You have completed:\n", - " - Setup on your [Azure Machine Learning Compute Cluster](day1-part1-setup.ipynb)\n", - " - [Tutorial: Hello World](day1-part2-hello-world.ipynb)\n", - " - [Tutorial: Train a model in the cloud](day1-part3-train-model.ipynb)\n", - "- Familiarity with Python and Machine Learning concepts\n", - "- If you are using a compute instance in Azure Machine Learning to run this notebook series, you are all set. Otherwise, please follow the [Configure a development environment for Azure Machine Learning](https://docs.microsoft.com/azure/machine-learning/how-to-configure-environment)\n", - "\n", - "---\n", - "\n", - "## Your machine learning code\n", - "\n", - "By now you have your training script running in Azure Machine Learning, and can monitor the model performance. Let's _parametrize_ the training script by introducing\n", - "arguments. Using arguments will allow you to easily compare different hyperparmeters.\n", - "\n", - "Presently our training script is set to download the CIFAR10 dataset on each run. The python code in [code/pytorch-cifar10-your-data/train.py](code/pytorch-cifar10-your-data/train.py) now uses **`argparse` to parametize the script.**" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Understanding your machine learning code changes\n", - "\n", - "The code used in `train.py` has leveraged the `argparse` library to set up the `data_path`, `learning_rate`, and `momentum`.\n", - "\n", - "```python\n", - "# .... other code\n", - "parser = argparse.ArgumentParser()\n", - "parser.add_argument('--data_path', type=str, help='Path to the training data')\n", - "parser.add_argument('--learning_rate', type=float, default=0.001, help='Learning rate for SGD')\n", - "parser.add_argument('--momentum', type=float, default=0.9, help='Momentum for SGD')\n", - "args = parser.parse_args()\n", - "# ... other code\n", - "```\n", - "\n", - "Also the `train.py` script was adapted to update the optimizer to use the user-defined parameters:\n", - "\n", - "```python\n", - "optimizer = optim.SGD(\n", - " net.parameters(),\n", - " lr=args.learning_rate, # get learning rate from command-line argument\n", - " momentum=args.momentum, # get momentum from command-line argument\n", - ")\n", - "```\n", - "\n", - "## Test your machine learning code locally\n", - "\n", - "To run the modified training script locally, run the python command below.\n", - "\n", - "You avoid having to download the CIFAR10 dataset by passing in a local path to the\n", - "data. Also you can experiment with different values for _learning rate_ and\n", - "_momentum_ hyperparameters without having to hard-code them in the training script.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!python code/pytorch-cifar10-your-data/train.py --data_path ./data --learning_rate 0.003 --momentum 0.92" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Upload your data to Azure\n", - "\n", - "In order to run this script in Azure Machine Learning, you need to make your training data available in Azure. Your Azure Machine Learning workspace comes equipped with a _default_ **Datastore** - an Azure Blob storage account - that you can use to store your training data.\n", - "\n", - "> ! NOTE
\n", - "> Azure Machine Learning allows you to connect other cloud-based datastores that store your data. For more details, see [datastores documentation](./concept-data.md).
\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from azureml.core import Workspace\n", - "ws = Workspace.from_config()\n", - "datastore = ws.get_default_datastore()\n", - "datastore.upload(src_dir='./data', target_path='datasets/cifar10', overwrite=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The `target_path` specifies the path on the datastore where the CIFAR10 data will be uploaded.\n", - "\n", - "## Submit your machine learning code to Azure Machine Learning\n", - "\n", - "As you have done previously, create a new Python control script:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "remote run", - "batchai", - "configure run", - "use notebook widget", - "get metrics", - "use datastore" - ] - }, - "outputs": [], - "source": [ - "from azureml.core import Workspace, Experiment, Environment, ScriptRunConfig, Dataset\n", - "from azureml.widgets import RunDetails\n", - "\n", - "ws = Workspace.from_config()\n", - "\n", - "datastore = ws.get_default_datastore()\n", - "dataset = Dataset.File.from_files(path=(datastore, 'datasets/cifar10'))\n", - "\n", - "experiment = Experiment(workspace=ws, name='day1-experiment-data')\n", - "\n", - "config = ScriptRunConfig(source_directory='./code/pytorch-cifar10-your-data',\n", - " script='train.py',\n", - " compute_target='cpu-cluster',\n", - " arguments=[\n", - " '--data_path', dataset.as_named_input('input').as_mount(),\n", - " '--learning_rate', 0.003,\n", - " '--momentum', 0.92])\n", - "\n", - "# set up pytorch environment\n", - "env = Environment.from_conda_specification(name='pytorch-aml-env',file_path='configuration/pytorch-aml-env.yml')\n", - "config.run_config.environment = env\n", - "\n", - "run = experiment.submit(config)\n", - "RunDetails(run).show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Understand the control code\n", - "\n", - "The above control code has the following additional code compared to the control code written in [previous tutorial](03-train-model.ipynb)\n", - "\n", - "**`dataset = Dataset.File.from_files(path=(datastore, 'datasets/cifar10'))`**: A Dataset is used to reference the data you uploaded to the Azure Blob Store. Datasets are an abstraction layer on top of your data that are designed to improve reliability and trustworthiness.\n", - "\n", - "\n", - "**`config = ScriptRunConfig(...)`**: We modified the `ScriptRunConfig` to include a list of arguments that will be passed into `train.py`. We also specified `dataset.as_named_input('input').as_mount()`, which means the directory specified will be _mounted_ to the compute target.\n", - "\n", - "## Inspect the 70_driver_log log file\n", - "\n", - "In the navigate to the 70_driver_log.txt file - you should see the following output:\n", - "\n", - "```\n", - "Processing 'input'.\n", - "Processing dataset FileDataset\n", - "{\n", - " \"source\": [\n", - " \"('workspaceblobstore', 'datasets/cifar10')\"\n", - " ],\n", - " \"definition\": [\n", - " \"GetDatastoreFiles\"\n", - " ],\n", - " \"registration\": {\n", - " \"id\": \"XXXXX\",\n", - " \"name\": null,\n", - " \"version\": null,\n", - " \"workspace\": \"Workspace.create(name='XXXX', subscription_id='XXXX', resource_group='X')\"\n", - " }\n", - "}\n", - "Mounting input to /tmp/tmp9kituvp3.\n", - "Mounted input to /tmp/tmp9kituvp3 as folder.\n", - "Exit __enter__ of DatasetContextManager\n", - "Entering Run History Context Manager.\n", - "Current directory: /mnt/batch/tasks/shared/LS_root/jobs/dsvm-aml/azureml/tutorial-session-3_1600171983_763c5381/mounts/workspaceblobstore/azureml/tutorial-session-3_1600171983_763c5381\n", - "Preparing to call script [ train.py ] with arguments: ['--data_path', '$input', '--learning_rate', '0.003', '--momentum', '0.92']\n", - "After variable expansion, calling script [ train.py ] with arguments: ['--data_path', '/tmp/tmp9kituvp3', '--learning_rate', '0.003', '--momentum', '0.92']\n", - "\n", - "Script type = None\n", - "===== DATA =====\n", - "DATA PATH: /tmp/tmp9kituvp3\n", - "LIST FILES IN DATA PATH...\n", - "['cifar-10-batches-py', 'cifar-10-python.tar.gz']\n", - "```\n", - "\n", - "Notice:\n", - "\n", - "1. Azure Machine Learning has mounted the blob store to the compute cluster automatically for you.\n", - "2. The ``dataset.as_named_input('input').as_mount()`` used in the control script resolves to the mount point\n", - "3. In the machine learning code we include a line to list the directorys under the data directory - you can see the list above.\n", - "\n", - "## Clean up resources\n", - "\n", - "The compute cluster will scale down to zero after 40minutes of idle time. When the compute is idle you will not be charged. If you want to delete the cluster use:\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from azureml.core import Workspace\n", - "\n", - "ws = Workspace.from_config()\n", - "ct = ws.compute_targets['cpu-cluster']\n", - "# ct.delete()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If you're not going to use what you've created here, delete the resources you just created with this quickstart so you don't incur any charges for storage. In the Azure portal, select and delete your resource group.\n", - "\n", - "## Next Steps\n", - "\n", - "To learn more about the capabilities of Azure Machine Learning please refer to the following documentation:\n", - "\n", - "* [Azure Machine Learning Pipelines](https://docs.microsoft.com/en-us/azure/machine-learning/concept-ml-pipelines#building-pipelines-with-the-python-sdk)\n", - "* [Deploy models for real-time scoring](https://docs.microsoft.com/en-us/azure/machine-learning/tutorial-deploy-models-with-aml)\n", - "* [Hyper parameter tuning with Azure Machine Learning](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-tune-hyperparameters)\n", - "* [Prep your code for production](https://docs.microsoft.com/azure/machine-learning/tutorial-convert-ml-experiment-to-production)" - ] - } - ], - "metadata": { - "authors": [ - { - "name": "samkemp" - } - ], - "celltoolbar": "Edit Metadata", - "kernelspec": { - "display_name": "Python 3.6", - "language": "python", - "name": "python36" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.5" - }, - "notice": "Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License." - }, - "nbformat": 4, - "nbformat_minor": 4 -} \ No newline at end of file diff --git a/tutorials/get-started-day1/day1-part4-data.yml b/tutorials/get-started-day1/day1-part4-data.yml deleted file mode 100644 index 2ab5f38b..00000000 --- a/tutorials/get-started-day1/day1-part4-data.yml +++ /dev/null @@ -1,7 +0,0 @@ -name: day1-part4-data -dependencies: -- pip: - - azureml-sdk - - azureml-widgets - - pytorch - - torchvision diff --git a/tutorials/image-classification-mnist-data/img-classification-part1-training.ipynb b/tutorials/image-classification-mnist-data/img-classification-part1-training.ipynb index 690b8feb..b0b11eb4 100644 --- a/tutorials/image-classification-mnist-data/img-classification-part1-training.ipynb +++ b/tutorials/image-classification-mnist-data/img-classification-part1-training.ipynb @@ -128,6 +128,9 @@ "metadata": {}, "source": [ "### Create or Attach existing compute resource\n", + "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n", + "\n", "By using Azure Machine Learning Compute, a managed service, data scientists can train machine learning models on clusters of Azure virtual machines. Examples include VMs with GPU support. In this tutorial, you create Azure Machine Learning Compute as your training environment. You will submit Python code to run on this VM later in the tutorial. \n", "The code below creates the compute clusters for you if they don't already exist in your workspace.\n", "\n", diff --git a/tutorials/image-classification-mnist-data/img-classification-part1-training.yml b/tutorials/image-classification-mnist-data/img-classification-part1-training.yml index 0a0da042..594f2e47 100644 --- a/tutorials/image-classification-mnist-data/img-classification-part1-training.yml +++ b/tutorials/image-classification-mnist-data/img-classification-part1-training.yml @@ -7,3 +7,4 @@ dependencies: - sklearn - pandas - azureml-opendatasets + - azureml-widgets diff --git a/tutorials/machine-learning-pipelines-advanced/tutorial-pipeline-batch-scoring-classification.ipynb b/tutorials/machine-learning-pipelines-advanced/tutorial-pipeline-batch-scoring-classification.ipynb index e593fabc..ca6739f2 100644 --- a/tutorials/machine-learning-pipelines-advanced/tutorial-pipeline-batch-scoring-classification.ipynb +++ b/tutorials/machine-learning-pipelines-advanced/tutorial-pipeline-batch-scoring-classification.ipynb @@ -225,7 +225,9 @@ "source": [ "## Create and attach remote compute target\n", "\n", - "Azure Machine Learning service pipelines cannot be run locally, and only run on cloud resources. Remote compute targets are reusable virtual compute environments where you run experiments and work-flows. Run the following code to create a GPU-enabled [`AmlCompute`](https://docs.microsoft.com/python/api/azureml-core/azureml.core.compute.amlcompute.amlcompute?view=azure-ml-py) target, and attach it to your workspace. See the [conceptual article](https://docs.microsoft.com/azure/machine-learning/service/concept-compute-target) for more information on compute targets." + "Azure Machine Learning service pipelines cannot be run locally, and only run on cloud resources. Remote compute targets are reusable virtual compute environments where you run experiments and work-flows. Run the following code to create a GPU-enabled [`AmlCompute`](https://docs.microsoft.com/python/api/azureml-core/azureml.core.compute.amlcompute.amlcompute?view=azure-ml-py) target, and attach it to your workspace. See the [conceptual article](https://docs.microsoft.com/azure/machine-learning/service/concept-compute-target) for more information on compute targets.\n", + "\n", + "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist." ] }, { diff --git a/tutorials/quickstart-ci/ClassificationWithAutomatedML.yml b/tutorials/quickstart-ci/ClassificationWithAutomatedML.yml deleted file mode 100644 index c4f87452..00000000 --- a/tutorials/quickstart-ci/ClassificationWithAutomatedML.yml +++ /dev/null @@ -1,4 +0,0 @@ -name: ClassificationWithAutomatedML -dependencies: -- pip: - - azureml-sdk diff --git a/tutorials/quickstart/azureml-quickstart.ipynb b/tutorials/quickstart/azureml-quickstart.ipynb deleted file mode 100644 index dcd946de..00000000 --- a/tutorials/quickstart/azureml-quickstart.ipynb +++ /dev/null @@ -1,482 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Copyright (c) Microsoft Corporation. All rights reserved. \n", - "\n", - "Licensed under the MIT License." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/tutorials/quickstart/azureml-quickstart.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Tutorial: Azure Machine Learning Quickstart\n", - "\n", - "In this tutorial, you learn how to quickly get started with Azure Machine Learning. Using a *compute instance* - a fully managed cloud-based VM that is pre-configured with the latest data science tools - you will train an image classification model using the CIFAR10 dataset.\n", - "\n", - "In this tutorial you will learn how to:\n", - "\n", - "* Create a compute instance and attach to a notebook\n", - "* Train an image classification model and log metrics\n", - "* Deploy the model\n", - "\n", - "## Prerequisites\n", - "\n", - "1. An Azure Machine Learning workspace\n", - "1. Familiar with the Python language and machine learning workflows.\n", - "\n", - "\n", - "## Create compute & attach to notebook\n", - "\n", - "To run this notebook you will need to create an Azure Machine Learning _compute instance_. The benefits of a compute instance over a local machine (e.g. laptop) or cloud VM are as follows:\n", - "\n", - "* It is a pre-configured with all the latest data science libaries (e.g. panads, scikit, TensorFlow, PyTorch) and tools (Jupyter, RStudio). In this tutorial we make extensive use of PyTorch, AzureML SDK, matplotlib and we do not need to install these components on a compute instance.\n", - "* Notebooks are seperate from the compute instance - this means that you can develop your notebook on a small VM size, and then seamlessly scale up (and/or use a GPU-enabled) the machine when needed to train a model.\n", - "* You can easily turn on/off the instance to control costs. \n", - "\n", - "To create compute, click on the + button at the top of the notebook viewer in Azure Machine Learning Studio:\n", - "\n", - "\n", - "\n", - "This will pop up the __New compute instance__ blade, provide a valid __Compute name__ (valid characters are upper and lower case letters, digits, and the - character). Then click on __Create__. \n", - "\n", - "It will take approximately 3 minutes for the compute to be ready. When the compute is ready you will see a green light next to the compute name at the top of the notebook viewer:\n", - "\n", - "\n", - "\n", - "You will also notice that the notebook is attached to the __Python 3.6 - AzureML__ jupyter Kernel. Other kernels can be selected such as R. In addition, if you did have other instances you can switch to them by simply using the dropdown menu next to the Compute label.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Import Data\n", - "\n", - "For this tutorial, you will use the CIFAR10 dataset. It has the classes: airplane, automobile, bird, cat, deer, dog, frog, horse, ship, truck. The images in CIFAR-10 three-channel color images of 32x32 pixels in size.\n", - "\n", - "The code cell below uses the PyTorch API to download the data to your compute instance, which should be quick (around 15 seconds). The data is divided into training and test sets.\n", - "\n", - "* **NOTE: The data is downloaded to the compute instance (in the `/tmp` directory) and not a durable cloud-based store like Azure Blob Storage or Azure Data Lake. This means if you delete the compute instance the data will be lost. The [getting started with Azure Machine Learning tutorial series](https://docs.microsoft.com/azure/machine-learning/tutorial-1st-experiment-sdk-setup-local) shows how to create an Azure Machine Learning *dataset*, which aids durability, versioning, and collaboration.**" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "gather": { - "logged": 1600881820920 - } - }, - "outputs": [], - "source": [ - "import torch\n", - "import torch.optim as optim\n", - "import torchvision\n", - "import torchvision.transforms as transforms\n", - "\n", - "transform = transforms.Compose(\n", - " [transforms.ToTensor(),\n", - " transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n", - "\n", - "trainset = torchvision.datasets.CIFAR10(root='/tmp/data', train=True,\n", - " download=True, transform=transform)\n", - "trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,\n", - " shuffle=True, num_workers=2)\n", - "\n", - "testset = torchvision.datasets.CIFAR10(root='/tmp/data', train=False,\n", - " download=True, transform=transform)\n", - "testloader = torch.utils.data.DataLoader(testset, batch_size=4,\n", - " shuffle=False, num_workers=2)\n", - "\n", - "classes = ('plane', 'car', 'bird', 'cat',\n", - " 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Take a look at the data\n", - "In the following cell, you have some python code that displays the first batch of 4 CIFAR10 images:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "gather": { - "logged": 1600882160868 - } - }, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "import numpy as np\n", - "\n", - "def imshow(img):\n", - " img = img / 2 + 0.5 # unnormalize\n", - " npimg = img.numpy()\n", - " plt.imshow(np.transpose(npimg, (1, 2, 0)))\n", - " plt.show()\n", - "\n", - "\n", - "# get some random training images\n", - "dataiter = iter(trainloader)\n", - "images, labels = dataiter.next()\n", - "\n", - "# show images\n", - "imshow(torchvision.utils.make_grid(images))\n", - "# print labels\n", - "print(' '.join('%5s' % classes[labels[j]] for j in range(4)))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Train model and log metrics\n", - "\n", - "In the directory `model` you will see a file called [model.py](./model/model.py) that defines the neural network architecture. The model is trained using the code below.\n", - "\n", - "* **Note: The model training take around 4 minutes to complete. The benefit of a compute instance is that the notebooks are separate from the compute - therefore you can easily switch to a different size/type of instance. For example, you could switch to run this training on a GPU-based compute instance if you had one provisioned. In the code below you can see that we have included `torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")`, which detects whether you are using a CPU or GPU machine.**" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "gather": { - "logged": 1600882387754 - }, - "tags": [ - "local run" - ] - }, - "outputs": [], - "source": [ - "from model.model import Net\n", - "from azureml.core import Experiment\n", - "from azureml.core import Workspace\n", - "\n", - "ws = Workspace.from_config()\n", - "\n", - "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n", - "device\n", - "\n", - "exp = Experiment(workspace=ws, name=\"cifar10-experiment\")\n", - "run = exp.start_logging(snapshot_directory=None)\n", - "\n", - "# define convolutional network\n", - "net = Net()\n", - "net.to(device)\n", - "\n", - "# set up pytorch loss / optimizer\n", - "criterion = torch.nn.CrossEntropyLoss()\n", - "optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)\n", - "\n", - "run.log(\"learning rate\", 0.001)\n", - "run.log(\"momentum\", 0.9)\n", - "\n", - "# train the network\n", - "for epoch in range(1):\n", - " running_loss = 0.0\n", - " for i, data in enumerate(trainloader, 0):\n", - " # unpack the data\n", - " inputs, labels = data[0].to(device), data[1].to(device)\n", - "\n", - " # zero the parameter gradients\n", - " optimizer.zero_grad()\n", - "\n", - " # forward + backward + optimize\n", - " outputs = net(inputs)\n", - " loss = criterion(outputs, labels)\n", - " loss.backward()\n", - " optimizer.step()\n", - "\n", - " # print statistics\n", - " running_loss += loss.item()\n", - " if i % 2000 == 1999:\n", - " loss = running_loss / 2000\n", - " run.log(\"loss\", loss)\n", - " print(f'epoch={epoch + 1}, batch={i + 1:5}: loss {loss:.2f}')\n", - " running_loss = 0.0\n", - "\n", - "print('Finished Training')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Once you have executed the cell below you can view the metrics updating in real time in the Azure Machine Learning studio:\n", - "\n", - "1. Select **Experiments** (left-hand menu)\n", - "1. Select **cifar10-experiment**\n", - "1. Select **Run 1**\n", - "1. Select the **Metrics** Tab\n", - "\n", - "The metrics tab will display the following graph:\n", - "\n", - "\"dataset" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Understand the code\n", - "\n", - "The code is based on the [Pytorch 60minute Blitz](https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html#sphx-glr-beginner-blitz-cifar10-tutorial-py) where we have also added a few additional lines of code to track the loss metric as the neural network trains.\n", - "\n", - "| Code | Description | \n", - "| ------------- | ---------- |\n", - "| `experiment = Experiment( ... )` | [Experiment](https://docs.microsoft.com/python/api/azureml-core/azureml.core.experiment.experiment?view=azure-ml-py&preserve-view=true) provides a simple way to organize multiple runs under a single name. Later you can see how experiments make it easy to compare metrics between dozens of runs. |\n", - "| `run.log()` | This will log the metrics to Azure Machine Learning. |" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Version control models with the Model Registry\n", - "\n", - "You can use model registration to store and version your models in your workspace. Registered models are identified by name and version. Each time you register a model with the same name as an existing one, the registry increments the version. Azure Machine Learning supports any model that can be loaded through Python 3.\n", - "\n", - "The code below does:\n", - "\n", - "1. Saves the model on the compute instance\n", - "1. Uploads the model file to the run (if you look in the experiment on Azure Machine Learning studio you should see on the **Outputs + logs** tab the model has been saved in the run)\n", - "1. Registers the uploaded model file\n", - "1. Transitions the run to a completed state" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "gather": { - "logged": 1600888071066 - }, - "tags": [ - "register model from file" - ] - }, - "outputs": [], - "source": [ - "from azureml.core import Model\n", - "\n", - "PATH = 'cifar_net.pth'\n", - "torch.save(net.state_dict(), PATH)\n", - "\n", - "run.upload_file(name=PATH, path_or_stream=PATH)\n", - "model = run.register_model(model_name='cifar10-model', \n", - " model_path=PATH,\n", - " model_framework=Model.Framework.PYTORCH,\n", - " description='cifar10 model')\n", - " \n", - "run.complete()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### View model in the model registry\n", - "\n", - "You can see the stored model by navigating to **Models** in the left-hand menu bar of Azure Machine Learning Studio. Click on the **cifar10-model** and you can see the details of the model like the experiement run id that created the model." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Deploy the model\n", - "\n", - "The next cell deploys the model to an Azure Container Instance so that you can score data in real-time (Azure Machine Learning also provides mechanisms to do batch scoring). A real-time endpoint allows application developers to integrate machine learning into their apps.\n", - "\n", - "* **Note: The deployment takes around 3 minutes to complete.**" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "deploy service", - "aci" - ] - }, - "outputs": [], - "source": [ - "from azureml.core import Environment, Model\n", - "from azureml.core.model import InferenceConfig\n", - "from azureml.core.webservice import AciWebservice\n", - "\n", - "environment = Environment.get(ws, \"AzureML-PyTorch-1.6-CPU\")\n", - "model = Model(ws, \"cifar10-model\")\n", - "\n", - "service_name = 'cifar-service'\n", - "inference_config = InferenceConfig(entry_script='score.py', environment=environment)\n", - "aci_config = AciWebservice.deploy_configuration(cpu_cores=1, memory_gb=1)\n", - "\n", - "service = Model.deploy(workspace=ws,\n", - " name=service_name,\n", - " models=[model],\n", - " inference_config=inference_config,\n", - " deployment_config=aci_config,\n", - " overwrite=True)\n", - "service.wait_for_deployment(show_output=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Understand the code\n", - "\n", - "| Code | Description | \n", - "| ------------- | ---------- |\n", - "| `environment = Environment.get()` | [Environment](https://docs.microsoft.com/python/api/overview/azure/ml/?view=azure-ml-py#environment) specify the Python packages, environment variables, and software settings around your training and scoring scripts. In this case, you are using a *curated environment* that has all the packages to run PyTorch. |\n", - "| `inference_config = InferenceConfig()` | This specifies the inference (scoring) configuration for the deployment such as the script to use when scoring (see below) and on what environment. |\n", - "| `service = Model.deploy()` | Deploy the model. |\n", - "\n", - "The [*scoring script*](score.py) file is has two functions:\n", - "\n", - "1. an `init` function that executes once when the service starts - in this function you normally get the model from the registry and set global variables\n", - "1. a `run(data)` function that executes each time a call is made to the service. In this function, you normally deserialize the json, run a prediction and output the predicted result.\n", - "\n", - "\n", - "## Test the model service\n", - "\n", - "In the next cell, you get some unseen data from the test loader:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "dataiter = iter(testloader)\n", - "images, labels = dataiter.next()\n", - "\n", - "# print images\n", - "imshow(torchvision.utils.make_grid(images))\n", - "print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(4)))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Finally, the next cell runs scores the above images using the deployed model service." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import json\n", - "\n", - "input_payload = json.dumps({\n", - " 'data': images.tolist()\n", - "})\n", - "\n", - "output = service.run(input_payload)\n", - "print(output)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Clean up resources\n", - "\n", - "To clean up the resources after this quickstart, firstly delete the Model service using:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "service.delete()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Next stop the compute instance by following these steps:\n", - "\n", - "1. Go to **Compute** in the left-hand menu of the Azure Machine Learning studio\n", - "1. Select your compute instance\n", - "1. Select **Stop**\n", - "\n", - "\n", - "**Important: The resources you created can be used as prerequisites to other Azure Machine Learning tutorials and how-to articles.** If you don't plan to use the resources you created, delete them, so you don't incur any charges:\n", - "\n", - "1. In the Azure portal, select **Resource groups** on the far left.\n", - "1. From the list, select the resource group you created.\n", - "1. Select **Delete resource group**.\n", - "1. Enter the resource group name. Then select **Delete**.\n", - "\n", - "You can also keep the resource group but delete a single workspace. Display the workspace properties and select **Delete**." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Next Steps\n", - "\n", - "In this tutorial, you have seen how to run your machine learning code on a fully managed, pre-configured cloud-based VM called a *compute instance*. Having a compute instance for your development environment removes the burden of installing data science tooling and libraries (for example, Jupyter, PyTorch, TensorFlow, Scikit) and allows you to easily scale up/down the compute power (RAM, cores) since the notebooks are separated from the VM. \n", - "\n", - "It is often the case that once you have your machine learning code working in a development environment that you want to productionize this by running as a **_job_** - ideally on a schedule or trigger (for example, arrival of new data). To this end, we recommend that you follow [**the day 1 getting started with Azure Machine Learning tutorial**](https://docs.microsoft.com/azure/machine-learning/tutorial-1st-experiment-sdk-setup-local). This day 1 tutorial is focussed on running jobs-based machine learning code in the cloud." - ] - } - ], - "metadata": { - "authors": [ - { - "name": "samkemp" - } - ], - "kernelspec": { - "display_name": "Python 3.6", - "language": "python", - "name": "python36" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.5" - }, - "nteract": { - "version": "nteract-front-end@1.0.0" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} \ No newline at end of file diff --git a/tutorials/quickstart/azureml-quickstart.yml b/tutorials/quickstart/azureml-quickstart.yml deleted file mode 100644 index 4095778e..00000000 --- a/tutorials/quickstart/azureml-quickstart.yml +++ /dev/null @@ -1,7 +0,0 @@ -name: azureml-quickstart -dependencies: -- pip: - - azureml-sdk - - pytorch - - torchvision - - matplotlib diff --git a/tutorials/quickstart/model/model.py b/tutorials/quickstart/model/model.py deleted file mode 100644 index a676db74..00000000 --- a/tutorials/quickstart/model/model.py +++ /dev/null @@ -1,22 +0,0 @@ -import torch.nn as nn -import torch.nn.functional as F - - -class Net(nn.Module): - def __init__(self): - super(Net, self).__init__() - self.conv1 = nn.Conv2d(3, 6, 5) - self.pool = nn.MaxPool2d(2, 2) - self.conv2 = nn.Conv2d(6, 16, 5) - self.fc1 = nn.Linear(16 * 5 * 5, 120) - self.fc2 = nn.Linear(120, 84) - self.fc3 = nn.Linear(84, 10) - - def forward(self, x): - x = self.pool(F.relu(self.conv1(x))) - x = self.pool(F.relu(self.conv2(x))) - x = x.view(-1, 16 * 5 * 5) - x = F.relu(self.fc1(x)) - x = F.relu(self.fc2(x)) - x = self.fc3(x) - return x diff --git a/tutorials/quickstart/score.py b/tutorials/quickstart/score.py deleted file mode 100644 index a3e7f706..00000000 --- a/tutorials/quickstart/score.py +++ /dev/null @@ -1,51 +0,0 @@ -import os -import torch -import json -import torch.nn as nn -import torch.nn.functional as F - - -class Net(nn.Module): - def __init__(self): - super(Net, self).__init__() - self.conv1 = nn.Conv2d(3, 6, 5) - self.pool = nn.MaxPool2d(2, 2) - self.conv2 = nn.Conv2d(6, 16, 5) - self.fc1 = nn.Linear(16 * 5 * 5, 120) - self.fc2 = nn.Linear(120, 84) - self.fc3 = nn.Linear(84, 10) - - def forward(self, x): - x = self.pool(F.relu(self.conv1(x))) - x = self.pool(F.relu(self.conv2(x))) - x = x.view(-1, 16 * 5 * 5) - x = F.relu(self.fc1(x)) - x = F.relu(self.fc2(x)) - x = self.fc3(x) - return x - - -def init(): - global net - global classes - - model_filename = 'cifar_net.pth' - model_path = os.path.join(os.environ['AZUREML_MODEL_DIR'], model_filename) - net = Net() - net.load_state_dict(torch.load(model_path)) - classes = ('plane', 'car', 'bird', 'cat', - 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') - - -def run(data): - data = json.loads(data) - images = torch.FloatTensor(data['data']) - outputs = net(images) - - _, predicted = torch.max(outputs, 1) - - result = [classes[predicted[j]] for j in range(4)] - result_json = json.dumps({"predictions": result}) - - # You can return any JSON-serializable object. - return result_json