mirror of
https://github.com/Azure/MachineLearningNotebooks.git
synced 2025-12-19 17:17:04 -05:00
Compare commits
1 Commits
azureml-sd
...
release_up
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8cf61fb207 |
@@ -103,7 +103,7 @@
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"\n",
|
||||
"print(\"This notebook was created using version 1.22.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -21,9 +21,9 @@ dependencies:
|
||||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azureml-widgets~=1.22.0
|
||||
- azureml-widgets~=1.23.0
|
||||
- pytorch-transformers==1.0.0
|
||||
- spacy==2.1.8
|
||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.22.0/validated_win32_requirements.txt [--no-deps]
|
||||
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.23.0/validated_win32_requirements.txt [--no-deps]
|
||||
- PyJWT < 2.0.0
|
||||
|
||||
@@ -21,10 +21,10 @@ dependencies:
|
||||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azureml-widgets~=1.22.0
|
||||
- azureml-widgets~=1.23.0
|
||||
- pytorch-transformers==1.0.0
|
||||
- spacy==2.1.8
|
||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.22.0/validated_linux_requirements.txt [--no-deps]
|
||||
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.23.0/validated_linux_requirements.txt [--no-deps]
|
||||
- PyJWT < 2.0.0
|
||||
|
||||
|
||||
@@ -22,9 +22,9 @@ dependencies:
|
||||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azureml-widgets~=1.22.0
|
||||
- azureml-widgets~=1.23.0
|
||||
- pytorch-transformers==1.0.0
|
||||
- spacy==2.1.8
|
||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.22.0/validated_darwin_requirements.txt [--no-deps]
|
||||
- -r https://automlcesdkdataresources.blob.core.windows.net/validated-requirements/1.23.0/validated_darwin_requirements.txt [--no-deps]
|
||||
- PyJWT < 2.0.0
|
||||
|
||||
@@ -105,7 +105,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.22.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -93,7 +93,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.22.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -96,7 +96,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.22.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -81,7 +81,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.22.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -5,17 +5,13 @@ dependencies:
|
||||
- pip<=19.3.1
|
||||
- python>=3.5.2,<3.8
|
||||
- nb_conda
|
||||
- matplotlib==2.1.0
|
||||
- numpy~=1.18.0
|
||||
- cython
|
||||
- urllib3<1.24
|
||||
- scikit-learn==0.22.1
|
||||
- pandas==0.25.1
|
||||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azureml-defaults
|
||||
- azureml-sdk
|
||||
- azureml-widgets
|
||||
- azureml-explain-model
|
||||
- pandas
|
||||
- PyJWT < 2.0.0
|
||||
|
||||
@@ -6,17 +6,13 @@ dependencies:
|
||||
- nomkl
|
||||
- python>=3.5.2,<3.8
|
||||
- nb_conda
|
||||
- matplotlib==2.1.0
|
||||
- numpy~=1.18.0
|
||||
- cython
|
||||
- urllib3<1.24
|
||||
- scikit-learn==0.22.1
|
||||
- pandas==0.25.1
|
||||
|
||||
- pip:
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azureml-defaults
|
||||
- azureml-sdk
|
||||
- azureml-widgets
|
||||
- azureml-explain-model
|
||||
- pandas
|
||||
- PyJWT < 2.0.0
|
||||
|
||||
@@ -67,11 +67,8 @@
|
||||
"source": [
|
||||
"import logging\n",
|
||||
"\n",
|
||||
"from matplotlib import pyplot as plt\n",
|
||||
"import json\n",
|
||||
"import numpy as np\n",
|
||||
"import pandas as pd\n",
|
||||
" \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"import azureml.core\n",
|
||||
"from azureml.core.experiment import Experiment\n",
|
||||
@@ -93,7 +90,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.22.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -116,9 +113,7 @@
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Run History Name'] = experiment_name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"outputDf.T"
|
||||
"output"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -276,34 +271,13 @@
|
||||
"## Results"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Widget for Monitoring Runs\n",
|
||||
"\n",
|
||||
"The widget will first report a \"loading\" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete.\n",
|
||||
"\n",
|
||||
"**Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.widgets import RunDetails\n",
|
||||
"RunDetails(remote_run).show() "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run.wait_for_completion()"
|
||||
"remote_run.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -368,18 +342,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# preview the first 3 rows of the dataset\n",
|
||||
"\n",
|
||||
"test_data = test_data.to_pandas_dataframe()\n",
|
||||
"y_test = test_data['ERP'].fillna(0)\n",
|
||||
"test_data = test_data.drop('ERP', 1)\n",
|
||||
"test_data = test_data.fillna(0)\n",
|
||||
"y_test = test_data.keep_columns('ERP')\n",
|
||||
"test_data = test_data.drop_columns('ERP')\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"train_data = train_data.to_pandas_dataframe()\n",
|
||||
"y_train = train_data['ERP'].fillna(0)\n",
|
||||
"train_data = train_data.drop('ERP', 1)\n",
|
||||
"train_data = train_data.fillna(0)\n"
|
||||
"y_train = train_data.keep_columns('ERP')\n",
|
||||
"train_data = train_data.drop_columns('ERP')\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -397,7 +365,16 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.train.automl.model_proxy import ModelProxy\n",
|
||||
"best_model_proxy = ModelProxy(best_run)"
|
||||
"best_model_proxy = ModelProxy(best_run)\n",
|
||||
"y_pred_train = best_model_proxy.predict(train_data)\n",
|
||||
"y_pred_test = best_model_proxy.predict(test_data)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Exploring results"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -406,60 +383,15 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"y_pred_train = best_model_proxy.predict(train_data).to_pandas_dataframe().values.flatten()\n",
|
||||
"y_pred_train = y_pred_train.to_pandas_dataframe().values.flatten()\n",
|
||||
"y_train = y_train.to_pandas_dataframe().values.flatten()\n",
|
||||
"y_residual_train = y_train - y_pred_train\n",
|
||||
"\n",
|
||||
"y_pred_test = best_model_proxy.predict(test_data).to_pandas_dataframe().values.flatten()\n",
|
||||
"y_residual_test = y_test - y_pred_test"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%matplotlib inline\n",
|
||||
"from sklearn.metrics import mean_squared_error, r2_score\n",
|
||||
"\n",
|
||||
"# Set up a multi-plot chart.\n",
|
||||
"f, (a0, a1) = plt.subplots(1, 2, gridspec_kw = {'width_ratios':[1, 1], 'wspace':0, 'hspace': 0})\n",
|
||||
"f.suptitle('Regression Residual Values', fontsize = 18)\n",
|
||||
"f.set_figheight(6)\n",
|
||||
"f.set_figwidth(16)\n",
|
||||
"\n",
|
||||
"# Plot residual values of training set.\n",
|
||||
"a0.axis([0, 360, -100, 100])\n",
|
||||
"a0.plot(y_residual_train, 'bo', alpha = 0.5)\n",
|
||||
"a0.plot([-10,360],[0,0], 'r-', lw = 3)\n",
|
||||
"a0.text(16,170,'RMSE = {0:.2f}'.format(np.sqrt(mean_squared_error(y_train, y_pred_train))), fontsize = 12)\n",
|
||||
"a0.text(16,140,'R2 score = {0:.2f}'.format(r2_score(y_train, y_pred_train)),fontsize = 12)\n",
|
||||
"a0.set_xlabel('Training samples', fontsize = 12)\n",
|
||||
"a0.set_ylabel('Residual Values', fontsize = 12)\n",
|
||||
"\n",
|
||||
"# Plot residual values of test set.\n",
|
||||
"a1.axis([0, 90, -100, 100])\n",
|
||||
"a1.plot(y_residual_test, 'bo', alpha = 0.5)\n",
|
||||
"a1.plot([-10,360],[0,0], 'r-', lw = 3)\n",
|
||||
"a1.text(5,170,'RMSE = {0:.2f}'.format(np.sqrt(mean_squared_error(y_test, y_pred_test))), fontsize = 12)\n",
|
||||
"a1.text(5,140,'R2 score = {0:.2f}'.format(r2_score(y_test, y_pred_test)),fontsize = 12)\n",
|
||||
"a1.set_xlabel('Test samples', fontsize = 12)\n",
|
||||
"a1.set_yticklabels([])\n",
|
||||
"\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%matplotlib inline\n",
|
||||
"test_pred = plt.scatter(y_test, y_pred_test, color='')\n",
|
||||
"test_test = plt.scatter(y_test, y_test, color='g')\n",
|
||||
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n",
|
||||
"plt.show()"
|
||||
"y_pred_test = y_pred_test.to_pandas_dataframe().values.flatten()\n",
|
||||
"y_test = y_test.to_pandas_dataframe().values.flatten()\n",
|
||||
"y_residual_test = y_test - y_pred_test\n",
|
||||
"print(y_residual_train)\n",
|
||||
"print(y_residual_test)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -113,7 +113,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.22.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -87,7 +87,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.22.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -97,7 +97,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.22.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -94,7 +94,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.22.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -82,7 +82,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.22.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -96,7 +96,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.22.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -96,7 +96,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.22.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -92,7 +92,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"This notebook was created using version 1.22.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -375,18 +375,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# preview the first 3 rows of the dataset\n",
|
||||
"\n",
|
||||
"test_data = test_data.to_pandas_dataframe()\n",
|
||||
"y_test = test_data['ERP'].fillna(0)\n",
|
||||
"test_data = test_data.drop('ERP', 1)\n",
|
||||
"test_data = test_data.fillna(0)\n",
|
||||
"y_test = test_data.keep_columns('ERP').to_pandas_dataframe()\n",
|
||||
"test_data = test_data.drop_columns('ERP').to_pandas_dataframe()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"train_data = train_data.to_pandas_dataframe()\n",
|
||||
"y_train = train_data['ERP'].fillna(0)\n",
|
||||
"train_data = train_data.drop('ERP', 1)\n",
|
||||
"train_data = train_data.fillna(0)\n"
|
||||
"y_train = train_data.keep_columns('ERP').to_pandas_dataframe()\n",
|
||||
"train_data = train_data.drop_columns('ERP').to_pandas_dataframe()\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -396,10 +390,10 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"y_pred_train = fitted_model.predict(train_data)\n",
|
||||
"y_residual_train = y_train - y_pred_train\n",
|
||||
"y_residual_train = y_train.values - y_pred_train\n",
|
||||
"\n",
|
||||
"y_pred_test = fitted_model.predict(test_data)\n",
|
||||
"y_residual_test = y_test - y_pred_test"
|
||||
"y_residual_test = y_test.values - y_pred_test"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -259,7 +259,7 @@
|
||||
"run_config.environment.docker.enabled = True\n",
|
||||
"\n",
|
||||
"azureml_pip_packages = [\n",
|
||||
" 'azureml-defaults', 'azureml-contrib-interpret', 'azureml-telemetry', 'azureml-interpret'\n",
|
||||
" 'azureml-defaults', 'azureml-telemetry', 'azureml-interpret'\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"# Note: this is to pin the scikit-learn and pandas versions to be same as notebook.\n",
|
||||
|
||||
@@ -57,7 +57,7 @@
|
||||
"Problem: IBM employee attrition classification with scikit-learn (run model explainer locally and upload explanation to the Azure Machine Learning Run History)\n",
|
||||
"\n",
|
||||
"1. Train a SVM classification model using Scikit-learn\n",
|
||||
"2. Run 'explain_model' with AML Run History, which leverages run history service to store and manage the explanation data\n",
|
||||
"2. Run 'explain-model-sample' with AML Run History, which leverages run history service to store and manage the explanation data\n",
|
||||
"---\n",
|
||||
"\n",
|
||||
"Setup: If you are using Jupyter notebooks, the extensions should be installed automatically with the package.\n",
|
||||
@@ -475,7 +475,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"experiment_name = 'explain_model'\n",
|
||||
"experiment_name = 'explain-model-sample'\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"run = experiment.start_logging()\n",
|
||||
"client = ExplanationClient.from_run(run)"
|
||||
|
||||
@@ -323,7 +323,7 @@
|
||||
"\n",
|
||||
"# azureml-defaults is required to host the model as a web service.\n",
|
||||
"azureml_pip_packages = [\n",
|
||||
" 'azureml-defaults', 'azureml-contrib-interpret', 'azureml-core', 'azureml-telemetry',\n",
|
||||
" 'azureml-defaults', 'azureml-core', 'azureml-telemetry',\n",
|
||||
" 'azureml-interpret'\n",
|
||||
"]\n",
|
||||
" \n",
|
||||
|
||||
@@ -267,7 +267,7 @@
|
||||
"run_config.environment.python.user_managed_dependencies = False\n",
|
||||
"\n",
|
||||
"azureml_pip_packages = [\n",
|
||||
" 'azureml-defaults', 'azureml-contrib-interpret', 'azureml-telemetry', 'azureml-interpret'\n",
|
||||
" 'azureml-defaults', 'azureml-telemetry', 'azureml-interpret'\n",
|
||||
"]\n",
|
||||
" \n",
|
||||
"\n",
|
||||
@@ -431,7 +431,7 @@
|
||||
"\n",
|
||||
"# WARNING: to install this, g++ needs to be available on the Docker image and is not by default (look at the next cell)\n",
|
||||
"azureml_pip_packages = [\n",
|
||||
" 'azureml-defaults', 'azureml-contrib-interpret', 'azureml-core', 'azureml-telemetry',\n",
|
||||
" 'azureml-defaults', 'azureml-core', 'azureml-telemetry',\n",
|
||||
" 'azureml-interpret'\n",
|
||||
"]\n",
|
||||
" \n",
|
||||
|
||||
@@ -341,7 +341,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pipeline = Pipeline(workspace=ws, steps=[step])\n",
|
||||
"pipeline_run = Experiment(ws, 'azurebatch_experiment').submit(pipeline)"
|
||||
"pipeline_run = Experiment(ws, 'azurebatch_sample').submit(pipeline)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -130,7 +130,7 @@
|
||||
"\n",
|
||||
"pipeline_draft = PipelineDraft.create(ws, name=\"TestPipelineDraft\",\n",
|
||||
" description=\"draft description\",\n",
|
||||
" experiment_name=\"helloworld\",\n",
|
||||
" experiment_name=\"pipeline_draft_sample\",\n",
|
||||
" pipeline=pipeline,\n",
|
||||
" continue_on_step_failure=True,\n",
|
||||
" tags={'dev': 'true'},\n",
|
||||
|
||||
@@ -325,7 +325,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# submit a pipeline run\n",
|
||||
"pipeline_run1 = Experiment(ws, 'Pipeline_experiment').submit(pipeline1)\n",
|
||||
"pipeline_run1 = Experiment(ws, 'Pipeline_experiment_sample').submit(pipeline1)\n",
|
||||
"# publish a pipeline from the submitted pipeline run\n",
|
||||
"published_pipeline2 = pipeline_run1.publish_pipeline(name=\"My_New_Pipeline2\", description=\"My Published Pipeline Description\", version=\"0.1\", continue_on_step_failure=True)\n",
|
||||
"published_pipeline2"
|
||||
|
||||
@@ -259,7 +259,7 @@
|
||||
"\n",
|
||||
"schedule = Schedule.create(workspace=ws, name=\"My_Schedule\",\n",
|
||||
" pipeline_id=pub_pipeline_id, \n",
|
||||
" experiment_name='Schedule_Run',\n",
|
||||
" experiment_name='Schedule-run-sample',\n",
|
||||
" recurrence=recurrence,\n",
|
||||
" wait_for_provisioning=True,\n",
|
||||
" description=\"Schedule Run\")\n",
|
||||
@@ -445,7 +445,7 @@
|
||||
"\n",
|
||||
"schedule = Schedule.create(workspace=ws, name=\"My_Schedule\",\n",
|
||||
" pipeline_id=pub_pipeline_id, \n",
|
||||
" experiment_name='Schedule_Run',\n",
|
||||
" experiment_name='Schedule-run-sample',\n",
|
||||
" datastore=datastore,\n",
|
||||
" wait_for_provisioning=True,\n",
|
||||
" description=\"Schedule Run\")\n",
|
||||
@@ -516,7 +516,7 @@
|
||||
"\n",
|
||||
"schedule = Schedule.create_for_pipeline_endpoint(workspace=ws, name=\"My_Endpoint_Schedule\",\n",
|
||||
" pipeline_endpoint_id=published_pipeline_endpoint_id,\n",
|
||||
" experiment_name='Schedule_Run',\n",
|
||||
" experiment_name='Schedule-run-sample',\n",
|
||||
" recurrence=recurrence, description=\"Schedule_Run\",\n",
|
||||
" wait_for_provisioning=True)\n",
|
||||
"\n",
|
||||
|
||||
@@ -553,7 +553,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Experiment\n",
|
||||
"pipeline_run = Experiment(ws, name=\"submit_from_endpoint\").submit(pipeline_endpoint_by_name, tags={'endpoint_tag': \"1\"}, pipeline_version=\"0\")"
|
||||
"pipeline_run = Experiment(ws, name=\"submit_endpoint_sample\").submit(pipeline_endpoint_by_name, tags={'endpoint_tag': \"1\"}, pipeline_version=\"0\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -101,7 +101,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create an Azure ML experiment\n",
|
||||
"Let's create an experiment named \"automlstep-classification\" and a folder to hold the training scripts. The script runs will be recorded under the experiment in Azure.\n",
|
||||
"Let's create an experiment named \"automlstep-sample\" and a folder to hold the training scripts. The script runs will be recorded under the experiment in Azure.\n",
|
||||
"\n",
|
||||
"The best practice is to use separate folders for scripts and its dependent files for each step and specify that folder as the `source_directory` for the step. This helps reduce the size of the snapshot created for the step (only the specific folder is snapshotted). Since changes in any files in the `source_directory` would trigger a re-upload of the snapshot, this helps keep the reuse of the step when there are no changes in the `source_directory` of the step."
|
||||
]
|
||||
@@ -113,7 +113,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Choose a name for the run history container in the workspace.\n",
|
||||
"experiment_name = 'automlstep-classification'\n",
|
||||
"experiment_name = 'automlstep-sample'\n",
|
||||
"project_folder = './project'\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
|
||||
@@ -428,7 +428,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pipeline_run1 = Experiment(ws, 'Data_dependency').submit(pipeline1)\n",
|
||||
"pipeline_run1 = Experiment(ws, 'Data_dependency_sample').submit(pipeline1)\n",
|
||||
"print(\"Pipeline is submitted for execution\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -147,7 +147,7 @@
|
||||
"\n",
|
||||
"To do this, you first must install the Azure Networking API.\n",
|
||||
"\n",
|
||||
"`pip install --upgrade azure-mgmt-network`"
|
||||
"`pip install --upgrade azure-mgmt-network==12.0.0`"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -157,7 +157,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# If you need to install the Azure Networking SDK, uncomment the following line.\n",
|
||||
"#!pip install --upgrade azure-mgmt-network"
|
||||
"#!pip install --upgrade azure-mgmt-network==12.0.0"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -167,7 +167,7 @@
|
||||
"\n",
|
||||
"To do this, you first must install the Azure Networking API.\n",
|
||||
"\n",
|
||||
"`pip install --upgrade azure-mgmt-network`"
|
||||
"`pip install --upgrade azure-mgmt-network==12.0.0`"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -177,7 +177,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# If you need to install the Azure Networking SDK, uncomment the following line.\n",
|
||||
"#!pip install --upgrade azure-mgmt-network"
|
||||
"#!pip install --upgrade azure-mgmt-network==12.0.0"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -100,7 +100,7 @@
|
||||
"\n",
|
||||
"# Check core SDK version number\n",
|
||||
"\n",
|
||||
"print(\"This notebook was created using SDK version 1.22.0, you are currently running version\", azureml.core.VERSION)"
|
||||
"print(\"This notebook was created using SDK version 1.23.0, you are currently running version\", azureml.core.VERSION)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -98,7 +98,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"experiment_name = \"experiment-with-mlflow\"\n",
|
||||
"experiment_name = \"LocalTrain-with-mlflow-sample\"\n",
|
||||
"mlflow.set_experiment(experiment_name)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -123,7 +123,7 @@
|
||||
"source": [
|
||||
"from azureml.core import Experiment\n",
|
||||
"\n",
|
||||
"experiment_name = \"experiment-with-mlflow\"\n",
|
||||
"experiment_name = \"RemoteTrain-with-mlflow-sample\"\n",
|
||||
"exp = Experiment(workspace=ws, name=experiment_name)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -102,7 +102,7 @@
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"\n",
|
||||
"print(\"This notebook was created using version 1.22.0 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.23.0 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
|
||||
Reference in New Issue
Block a user