update samples from Release-118 as a part of SDK release

This commit is contained in:
amlrelsa-ms
2022-01-24 19:14:42 +00:00
parent 2a2d2efa17
commit ca76074645
33 changed files with 115 additions and 118 deletions

View File

@@ -103,7 +103,7 @@
"source": [
"import azureml.core\n",
"\n",
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.38.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},

View File

@@ -6,4 +6,4 @@ dependencies:
- fairlearn>=0.6.2
- joblib
- liac-arff
- raiwidgets~=0.15.0
- raiwidgets~=0.16.0

View File

@@ -6,4 +6,4 @@ dependencies:
- fairlearn>=0.6.2
- joblib
- liac-arff
- raiwidgets~=0.15.0
- raiwidgets~=0.16.0

View File

@@ -21,9 +21,9 @@ dependencies:
- pip:
# Required packages for AzureML execution, history, and data preparation.
- azureml-widgets~=1.37.0
- azureml-widgets~=1.38.0
- pytorch-transformers==1.0.0
- spacy==2.1.8
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.37.0/validated_win32_requirements.txt [--no-deps]
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.38.0/validated_win32_requirements.txt [--no-deps]
- arch==4.14

View File

@@ -4,7 +4,6 @@ dependencies:
# Currently Azure ML only supports 3.5.2 and later.
- pip==21.1.2
- python>=3.5.2,<3.8
- nb_conda
- boto3==1.15.18
- matplotlib==2.1.0
- numpy==1.18.5
@@ -22,9 +21,9 @@ dependencies:
- pip:
# Required packages for AzureML execution, history, and data preparation.
- azureml-widgets~=1.37.0
- azureml-widgets~=1.38.0
- pytorch-transformers==1.0.0
- spacy==2.1.8
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.37.0/validated_linux_requirements.txt [--no-deps]
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.38.0/validated_linux_requirements.txt [--no-deps]
- arch==4.14

View File

@@ -5,7 +5,6 @@ dependencies:
- pip==21.1.2
- nomkl
- python>=3.5.2,<3.8
- nb_conda
- boto3==1.15.18
- matplotlib==2.1.0
- numpy==1.18.5
@@ -23,9 +22,9 @@ dependencies:
- pip:
# Required packages for AzureML execution, history, and data preparation.
- azureml-widgets~=1.37.0
- azureml-widgets~=1.38.0
- pytorch-transformers==1.0.0
- spacy==2.1.8
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.37.0/validated_darwin_requirements.txt [--no-deps]
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.38.0/validated_darwin_requirements.txt [--no-deps]
- arch==4.14

View File

@@ -105,7 +105,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.38.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
@@ -430,7 +430,7 @@
"metadata": {},
"outputs": [],
"source": [
"# Download the featuurization summary JSON file locally\n",
"# Download the featurization summary JSON file locally\n",
"best_run.download_file(\"outputs/featurization_summary.json\", \"featurization_summary.json\")\n",
"\n",
"# Render the JSON as a pandas DataFrame\n",

View File

@@ -93,7 +93,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.38.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},

View File

@@ -97,7 +97,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.38.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
@@ -369,7 +369,7 @@
"metadata": {},
"outputs": [],
"source": [
"# Download the featuurization summary JSON file locally\n",
"# Download the featurization summary JSON file locally\n",
"best_run.download_file(\"outputs/featurization_summary.json\", \"featurization_summary.json\")\n",
"\n",
"# Render the JSON as a pandas DataFrame\n",

View File

@@ -81,7 +81,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.38.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},

View File

@@ -4,7 +4,6 @@ dependencies:
# Currently Azure ML only supports 3.5.2 and later.
- pip<=19.3.1
- python>=3.5.2,<3.8
- nb_conda
- cython
- urllib3<1.24
- PyJWT < 2.0.0

View File

@@ -5,7 +5,6 @@ dependencies:
- pip<=19.3.1
- nomkl
- python>=3.5.2,<3.8
- nb_conda
- cython
- urllib3<1.24
- PyJWT < 2.0.0

View File

@@ -92,7 +92,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.38.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},

View File

@@ -91,7 +91,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.38.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},

View File

@@ -113,7 +113,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.38.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},

View File

@@ -64,15 +64,16 @@
"metadata": {},
"outputs": [],
"source": [
"import azureml.core\n",
"import pandas as pd\n",
"import numpy as np\n",
"import json\n",
"import logging\n",
"\n",
"from azureml.core import Workspace, Experiment, Dataset\n",
"from azureml.train.automl import AutoMLConfig\n",
"from datetime import datetime\n",
"from azureml.automl.core.featurization import FeaturizationConfig"
"\n",
"import azureml.core\n",
"import numpy as np\n",
"import pandas as pd\n",
"from azureml.automl.core.featurization import FeaturizationConfig\n",
"from azureml.core import Dataset, Experiment, Workspace\n",
"from azureml.train.automl import AutoMLConfig\n"
]
},
{
@@ -88,7 +89,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.38.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
@@ -398,8 +399,8 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"### Retrieve the Best Model\n",
"Below we select the best model from all the training iterations using get_output method."
"### Retrieve the Best Run details\n",
"Below we retrieve the best Run object from among all the runs in the experiment."
]
},
{
@@ -408,8 +409,8 @@
"metadata": {},
"outputs": [],
"source": [
"best_run, fitted_model = remote_run.get_output()\n",
"fitted_model.steps"
"best_run = remote_run.get_best_child()\n",
"best_run"
]
},
{
@@ -418,7 +419,7 @@
"source": [
"## Featurization\n",
"\n",
"You can access the engineered feature names generated in time-series featurization. Note that a number of named holiday periods are represented. We recommend that you have at least one year of data when using this feature to ensure that all yearly holidays are captured in the training featurization."
"We can look at the engineered feature names generated in time-series featurization via. the JSON file named 'engineered_feature_names.json' under the run outputs. Note that a number of named holiday periods are represented. We recommend that you have at least one year of data when using this feature to ensure that all yearly holidays are captured in the training featurization."
]
},
{
@@ -427,7 +428,12 @@
"metadata": {},
"outputs": [],
"source": [
"fitted_model.named_steps[\"timeseriestransformer\"].get_engineered_feature_names()"
"# Download the JSON file locally\n",
"best_run.download_file(\"outputs/engineered_feature_names.json\", \"engineered_feature_names.json\")\n",
"with open(\"engineered_feature_names.json\", \"r\") as f:\n",
" records = json.load(f)\n",
"\n",
"records"
]
},
{
@@ -451,12 +457,16 @@
"metadata": {},
"outputs": [],
"source": [
"# Get the featurization summary as a list of JSON\n",
"featurization_summary = fitted_model.named_steps[\n",
" \"timeseriestransformer\"\n",
"].get_featurization_summary()\n",
"# View the featurization summary as a pandas dataframe\n",
"pd.DataFrame.from_records(featurization_summary)"
"# Download the featurization summary JSON file locally\n",
"best_run.download_file(\"outputs/featurization_summary.json\", \"featurization_summary.json\")\n",
"\n",
"# Render the JSON as a pandas DataFrame\n",
"with open(\"featurization_summary.json\", \"r\") as f:\n",
" records = json.load(f)\n",
"fs = pd.DataFrame.from_records(records)\n",
"\n",
"# View a summary of the featurization \n",
"fs[[\"RawFeatureName\", \"TypeDetected\", \"Dropped\", \"EngineeredFeatureCount\", \"Transformations\"]]"
]
},
{

View File

@@ -68,6 +68,7 @@
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"import logging\n",
"\n",
"from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score\n",
@@ -99,7 +100,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.38.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
@@ -398,8 +399,8 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"## Retrieve the Best Model\n",
"Below we select the best model from all the training iterations using get_output method."
"### Retrieve the Best Run details\n",
"Below we retrieve the best Run object from among all the runs in the experiment."
]
},
{
@@ -408,8 +409,8 @@
"metadata": {},
"outputs": [],
"source": [
"best_run, fitted_model = remote_run.get_output()\n",
"fitted_model.steps"
"best_run = remote_run.get_best_child()\n",
"best_run"
]
},
{
@@ -417,7 +418,7 @@
"metadata": {},
"source": [
"## Featurization\n",
"You can access the engineered feature names generated in time-series featurization."
"We can look at the engineered feature names generated in time-series featurization via. the JSON file named 'engineered_feature_names.json' under the run outputs. "
]
},
{
@@ -426,7 +427,12 @@
"metadata": {},
"outputs": [],
"source": [
"fitted_model.named_steps[\"timeseriestransformer\"].get_engineered_feature_names()"
"# Download the JSON file locally\n",
"best_run.download_file(\"outputs/engineered_feature_names.json\", \"engineered_feature_names.json\")\n",
"with open(\"engineered_feature_names.json\", \"r\") as f:\n",
" records = json.load(f)\n",
"\n",
"records"
]
},
{
@@ -449,12 +455,16 @@
"metadata": {},
"outputs": [],
"source": [
"# Get the featurization summary as a list of JSON\n",
"featurization_summary = fitted_model.named_steps[\n",
" \"timeseriestransformer\"\n",
"].get_featurization_summary()\n",
"# View the featurization summary as a pandas dataframe\n",
"pd.DataFrame.from_records(featurization_summary)"
"# Download the featurization summary JSON file locally\n",
"best_run.download_file(\"outputs/featurization_summary.json\", \"featurization_summary.json\")\n",
"\n",
"# Render the JSON as a pandas DataFrame\n",
"with open(\"featurization_summary.json\", \"r\") as f:\n",
" records = json.load(f)\n",
"fs = pd.DataFrame.from_records(records)\n",
"\n",
"# View a summary of the featurization \n",
"fs[[\"RawFeatureName\", \"TypeDetected\", \"Dropped\", \"EngineeredFeatureCount\", \"Transformations\"]]"
]
},
{
@@ -641,7 +651,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"### Retrieve the Best Model"
"### Retrieve the Best Run details"
]
},
{
@@ -650,7 +660,8 @@
"metadata": {},
"outputs": [],
"source": [
"best_run_lags, fitted_model_lags = advanced_remote_run.get_output()"
"best_run_lags = remote_run.get_best_child()\n",
"best_run_lags"
]
},
{

View File

@@ -94,7 +94,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.38.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},

View File

@@ -58,14 +58,15 @@
"metadata": {},
"outputs": [],
"source": [
"import azureml.core\n",
"import pandas as pd\n",
"import json\n",
"import logging\n",
"\n",
"from azureml.core.workspace import Workspace\n",
"import azureml.core\n",
"import pandas as pd\n",
"from azureml.automl.core.featurization import FeaturizationConfig\n",
"from azureml.core.experiment import Experiment\n",
"from azureml.train.automl import AutoMLConfig\n",
"from azureml.automl.core.featurization import FeaturizationConfig"
"from azureml.core.workspace import Workspace\n",
"from azureml.train.automl import AutoMLConfig\n"
]
},
{
@@ -81,7 +82,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.38.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
@@ -366,7 +367,7 @@
"|-|-|\n",
"|**time_column_name**|The name of your time column.|\n",
"|**forecast_horizon**|The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly).|\n",
"|**time_series_id_column_names**|The column names used to uniquely identify the time series in data that has multiple rows with the same timestamp. If the time series identifiers are not defined, the data set is assumed to be one time series.|\n",
"|**time_series_id_column_names**|This optional parameter represents the column names used to uniquely identify the time series in data that has multiple rows with the same timestamp. If the time series identifiers are not defined or incorrectly defined, time series identifiers will be created automatically if they exist.|\n",
"|**freq**|Forecast frequency. This optional parameter represents the period with which the forecast is desired, for example, daily, weekly, yearly, etc. Use this parameter for the correction of time series containing irregular data points or for padding of short time series. The frequency needs to be a pandas offset alias. Please refer to [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects) for more information."
]
},
@@ -378,7 +379,7 @@
"\n",
"The [AutoMLConfig](https://docs.microsoft.com/en-us/python/api/azureml-train-automl-client/azureml.train.automl.automlconfig.automlconfig?view=azure-ml-py) object defines the settings and data for an AutoML training job. Here, we set necessary inputs like the task type, the number of AutoML iterations to try, the training data, and cross-validation parameters.\n",
"\n",
"For forecasting tasks, there are some additional parameters that can be set in the `ForecastingParameters` class: the name of the column holding the date/time, the timeseries id column names, and the maximum forecast horizon. A time column is required for forecasting, while the time_series_id is optional. If time_series_id columns are not given, AutoML assumes that the whole dataset is a single time-series. We also pass a list of columns to drop prior to modeling. The _logQuantity_ column is completely correlated with the target quantity, so it must be removed to prevent a target leak.\n",
"For forecasting tasks, there are some additional parameters that can be set in the `ForecastingParameters` class: the name of the column holding the date/time, the timeseries id column names, and the maximum forecast horizon. A time column is required for forecasting, while the time_series_id is optional. If time_series_id columns are not given or incorrectly given, AutoML automatically creates time_series_id columns if they exist. We also pass a list of columns to drop prior to modeling. The _logQuantity_ column is completely correlated with the target quantity, so it must be removed to prevent a target leak.\n",
"\n",
"The forecast horizon is given in units of the time-series frequency; for instance, the OJ series frequency is weekly, so a horizon of 20 means that a trained model will estimate sales up to 20 weeks beyond the latest date in the training data for each series. In this example, we set the forecast horizon to the number of samples per series in the test set (n_test_periods). Generally, the value of this parameter will be dictated by business needs. For example, a demand planning application that estimates the next month of sales should set the horizon according to suitable planning time-scales. Please see the [energy_demand notebook](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand) for more discussion of forecast horizon.\n",
"\n",
@@ -421,7 +422,6 @@
"forecasting_parameters = ForecastingParameters(\n",
" time_column_name=time_column_name,\n",
" forecast_horizon=n_test_periods,\n",
" time_series_id_column_names=time_series_id_column_names,\n",
" freq=\"W-THU\", # Set the forecast frequency to be weekly (start on each Thursday)\n",
")\n",
"\n",
@@ -472,8 +472,8 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"### Retrieve the Best Model\n",
"Each run within an Experiment stores serialized (i.e. pickled) pipelines from the AutoML iterations. We can now retrieve the pipeline with the best performance on the validation dataset:"
"### Retrieve the Best Run details\n",
"Below we retrieve the best Run object from among all the runs in the experiment."
]
},
{
@@ -482,9 +482,9 @@
"metadata": {},
"outputs": [],
"source": [
"best_run, fitted_model = remote_run.get_output()\n",
"print(fitted_model.steps)\n",
"model_name = best_run.properties[\"model_name\"]"
"best_run = remote_run.get_best_child()\n",
"model_name = best_run.properties[\"model_name\"]\n",
"best_run"
]
},
{
@@ -502,16 +502,16 @@
"metadata": {},
"outputs": [],
"source": [
"custom_featurizer = fitted_model.named_steps[\"timeseriestransformer\"]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"custom_featurizer.get_featurization_summary()"
"# Download the featurization summary JSON file locally\n",
"best_run.download_file(\"outputs/featurization_summary.json\", \"featurization_summary.json\")\n",
"\n",
"# Render the JSON as a pandas DataFrame\n",
"with open(\"featurization_summary.json\", \"r\") as f:\n",
" records = json.load(f)\n",
"fs = pd.DataFrame.from_records(records)\n",
"\n",
"# View a summary of the featurization \n",
"fs[[\"RawFeatureName\", \"TypeDetected\", \"Dropped\", \"EngineeredFeatureCount\", \"Transformations\"]]"
]
},
{

View File

@@ -96,7 +96,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.38.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},

View File

@@ -96,7 +96,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.38.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
@@ -359,7 +359,7 @@
"metadata": {},
"outputs": [],
"source": [
"# Download the featuurization summary JSON file locally\n",
"# Download the featurization summary JSON file locally\n",
"best_run.download_file(\"outputs/featurization_summary.json\", \"featurization_summary.json\")\n",
"\n",
"# Render the JSON as a pandas DataFrame\n",

View File

@@ -92,7 +92,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.38.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},

View File

@@ -106,7 +106,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.38.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},

View File

@@ -11,4 +11,4 @@ dependencies:
- matplotlib
- azureml-dataset-runtime
- ipywidgets
- raiwidgets~=0.15.0
- raiwidgets~=0.16.0

View File

@@ -10,4 +10,4 @@ dependencies:
- ipython
- matplotlib
- ipywidgets
- raiwidgets~=0.15.0
- raiwidgets~=0.16.0

View File

@@ -10,4 +10,4 @@ dependencies:
- ipython
- matplotlib
- ipywidgets
- raiwidgets~=0.15.0
- raiwidgets~=0.16.0

View File

@@ -12,4 +12,4 @@ dependencies:
- azureml-dataset-runtime
- azureml-core
- ipywidgets
- raiwidgets~=0.15.0
- raiwidgets~=0.16.0

View File

@@ -95,7 +95,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.38.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},

View File

@@ -8,5 +8,5 @@ dependencies:
- matplotlib
- azureml-dataset-runtime
- ipywidgets
- raiwidgets~=0.15.0
- raiwidgets~=0.16.0
- liac-arff

View File

@@ -100,7 +100,7 @@
"\n",
"# Check core SDK version number\n",
"\n",
"print(\"This notebook was created using SDK version 1.37.0, you are currently running version\", azureml.core.VERSION)"
"print(\"This notebook was created using SDK version 1.38.0, you are currently running version\", azureml.core.VERSION)"
]
},
{

View File

@@ -184,24 +184,6 @@
"myenv.python.conda_dependencies=conda_dep"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Specify environment variables\n",
"\n",
"You can add environment variables to your environment. These then become available using ```os.environ.get``` in your training script."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"myenv.environment_variables = {\"MESSAGE\":\"Hello from Azure Machine Learning\"}"
]
},
{
"cell_type": "markdown",
"metadata": {},

View File

@@ -102,7 +102,7 @@
"source": [
"import azureml.core\n",
"\n",
"print(\"This notebook was created using version 1.37.0 of the Azure ML SDK\")\n",
"print(\"This notebook was created using version 1.38.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},

View File

@@ -2,7 +2,7 @@ import argparse
import os
import numpy as np
import glob
import joblib
# import joblib
import mlflow
from sklearn.linear_model import LogisticRegression
@@ -30,8 +30,7 @@ X_train = (
os.path.join(data_folder, "**/train-images-idx3-ubyte.gz"), recursive=True
)[0],
False,
) /
255.0
) / 255.0
)
X_test = (
load_data(
@@ -39,8 +38,7 @@ X_test = (
os.path.join(data_folder, "**/t10k-images-idx3-ubyte.gz"), recursive=True
)[0],
False,
) /
255.0
) / 255.0
)
y_train = load_data(
glob.glob(