update samples from Release-136 as a part of SDK release

This commit is contained in:
amlrelsa-ms
2022-04-25 17:08:42 +00:00
parent a4dfcc4693
commit 1903f78285
22 changed files with 120 additions and 108 deletions

View File

@@ -103,7 +103,7 @@
"source": [ "source": [
"import azureml.core\n", "import azureml.core\n",
"\n", "\n",
"print(\"This notebook was created using version 1.40.0 of the Azure ML SDK\")\n", "print(\"This notebook was created using version 1.41.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
] ]
}, },

View File

@@ -13,7 +13,7 @@ dependencies:
- pytorch::pytorch=1.4.0 - pytorch::pytorch=1.4.0
- conda-forge::fbprophet==0.7.1 - conda-forge::fbprophet==0.7.1
- cudatoolkit=10.1.243 - cudatoolkit=10.1.243
- tqdm==4.63.1 - scipy==1.5.2
- notebook - notebook
- pywin32==225 - pywin32==225
- PySocks==1.7.1 - PySocks==1.7.1
@@ -21,10 +21,10 @@ dependencies:
- pip: - pip:
# Required packages for AzureML execution, history, and data preparation. # Required packages for AzureML execution, history, and data preparation.
- azureml-widgets~=1.40.0 - azureml-widgets~=1.41.0
- pytorch-transformers==1.0.0 - pytorch-transformers==1.0.0
- spacy==2.2.4 - spacy==2.2.4
- pystan==2.19.1.1 - pystan==2.19.1.1
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz - https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.40.0/validated_win32_requirements.txt [--no-deps] - -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.41.0/validated_win32_requirements.txt [--no-deps]
- arch==4.14 - arch==4.14

View File

@@ -24,10 +24,10 @@ dependencies:
- pip: - pip:
# Required packages for AzureML execution, history, and data preparation. # Required packages for AzureML execution, history, and data preparation.
- azureml-widgets~=1.40.0 - azureml-widgets~=1.41.0
- pytorch-transformers==1.0.0 - pytorch-transformers==1.0.0
- spacy==2.2.4 - spacy==2.2.4
- pystan==2.19.1.1 - pystan==2.19.1.1
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz - https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.40.0/validated_linux_requirements.txt [--no-deps] - -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.41.0/validated_linux_requirements.txt [--no-deps]
- arch==4.14 - arch==4.14

View File

@@ -25,10 +25,10 @@ dependencies:
- pip: - pip:
# Required packages for AzureML execution, history, and data preparation. # Required packages for AzureML execution, history, and data preparation.
- azureml-widgets~=1.40.0 - azureml-widgets~=1.41.0
- pytorch-transformers==1.0.0 - pytorch-transformers==1.0.0
- spacy==2.2.4 - spacy==2.2.4
- pystan==2.19.1.1 - pystan==2.19.1.1
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz - https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.40.0/validated_darwin_requirements.txt [--no-deps] - -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.41.0/validated_darwin_requirements.txt [--no-deps]
- arch==4.14 - arch==4.14

View File

@@ -134,6 +134,7 @@
"output[\"Resource Group\"] = ws.resource_group\n", "output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n", "output[\"Location\"] = ws.location\n",
"output[\"Experiment Name\"] = experiment.name\n", "output[\"Experiment Name\"] = experiment.name\n",
"output[\"SDK Version\"] = azureml.core.VERSION\n",
"pd.set_option(\"display.max_colwidth\", None)\n", "pd.set_option(\"display.max_colwidth\", None)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n", "outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T" "outputDf.T"

View File

@@ -8,9 +8,12 @@ dependencies:
- urllib3==1.26.7 - urllib3==1.26.7
- PyJWT < 2.0.0 - PyJWT < 2.0.0
- numpy==1.18.5 - numpy==1.18.5
- pywin32==227
- pip: - pip:
# Required packages for AzureML execution, history, and data preparation. # Required packages for AzureML execution, history, and data preparation.
- azure-core==1.21.1
- azure-identity==1.7.0
- azureml-defaults - azureml-defaults
- azureml-sdk - azureml-sdk
- azureml-widgets - azureml-widgets

View File

@@ -14,6 +14,8 @@ dependencies:
- pip: - pip:
# Required packages for AzureML execution, history, and data preparation. # Required packages for AzureML execution, history, and data preparation.
- azure-core==1.21.1
- azure-identity==1.7.0
- azureml-defaults - azureml-defaults
- azureml-sdk - azureml-sdk
- azureml-widgets - azureml-widgets

View File

@@ -92,7 +92,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"print(\"This notebook was created using version 1.40.0 of the Azure ML SDK\")\n", "print(\"This notebook was created using version 1.41.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
] ]
}, },

View File

@@ -75,7 +75,6 @@
"from azureml.core.experiment import Experiment\n", "from azureml.core.experiment import Experiment\n",
"from azureml.core.workspace import Workspace\n", "from azureml.core.workspace import Workspace\n",
"from azureml.core.dataset import Dataset\n", "from azureml.core.dataset import Dataset\n",
"from azureml.data.dataset_factory import TabularDatasetFactory\n",
"from azureml.train.automl import AutoMLConfig" "from azureml.train.automl import AutoMLConfig"
] ]
}, },
@@ -92,7 +91,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"print(\"This notebook was created using version 1.40.0 of the Azure ML SDK\")\n", "print(\"This notebook was created using version 1.41.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
] ]
}, },
@@ -197,10 +196,10 @@
"source": [ "source": [
"ds = ws.get_default_datastore()\n", "ds = ws.get_default_datastore()\n",
"\n", "\n",
"train_data = TabularDatasetFactory.register_pandas_dataframe(\n", "train_data = Dataset.Tabular.register_pandas_dataframe(\n",
" train_data.to_pandas_dataframe(), target=(ds, \"machineTrainData\"), name=\"train_data\")\n", " train_data.to_pandas_dataframe(), target=(ds, \"machineTrainData\"), name=\"train_data\")\n",
"\n", "\n",
"test_data = TabularDatasetFactory.register_pandas_dataframe(\n", "test_data = Dataset.Tabular.register_pandas_dataframe(\n",
" test_data.to_pandas_dataframe(), target=(ds, \"machineTestData\"), name=\"test_data\")" " test_data.to_pandas_dataframe(), target=(ds, \"machineTestData\"), name=\"test_data\")"
] ]
}, },
@@ -328,7 +327,8 @@
"metadata": {}, "metadata": {},
"source": [ "source": [
"#### Show hyperparameters\n", "#### Show hyperparameters\n",
"Show the model pipeline used for the best run with its hyperparameters." "Show the model pipeline used for the best run with its hyperparameters.\n",
"For ensemble pipelines it shows the iterations and algorithms that are ensembled."
] ]
}, },
{ {
@@ -337,8 +337,19 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"run_properties = json.loads(best_run.get_details()['properties']['pipeline_script'])\n", "run_properties = best_run.get_details()['properties']\n",
"print(json.dumps(run_properties, indent = 1)) " "pipeline_script = json.loads(run_properties['pipeline_script'])\n",
"print(json.dumps(pipeline_script, indent = 1)) \n",
"\n",
"if 'ensembled_iterations' in run_properties:\n",
" print(\"\")\n",
" print(\"Ensembled Iterations\")\n",
" print(run_properties['ensembled_iterations'])\n",
" \n",
"if 'ensembled_algorithms' in run_properties:\n",
" print(\"\")\n",
" print(\"Ensembled Algorithms\")\n",
" print(run_properties['ensembled_algorithms'])"
] ]
}, },
{ {

View File

@@ -121,7 +121,7 @@ def calculate_scores_and_build_plots(
input_dir: str, output_dir: str, automl_settings: Dict[str, Any] input_dir: str, output_dir: str, automl_settings: Dict[str, Any]
): ):
os.makedirs(output_dir, exist_ok=True) os.makedirs(output_dir, exist_ok=True)
grains = automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES) grains = automl_settings.get(constants.TimeSeries.TIME_SERIES_ID_COLUMN_NAMES)
time_column_name = automl_settings.get(constants.TimeSeries.TIME_COLUMN_NAME) time_column_name = automl_settings.get(constants.TimeSeries.TIME_COLUMN_NAME)
if grains is None: if grains is None:
grains = [] grains = []

View File

@@ -322,10 +322,10 @@
"| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n", "| **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |\n",
"| **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. |\n", "| **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. |\n",
"| **label_column_name** | The name of the label column. |\n", "| **label_column_name** | The name of the label column. |\n",
"| **max_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n", "| **forecast_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |\n",
"| **n_cross_validations** | Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way. |\n", "| **n_cross_validations** | Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way. |\n",
"| **time_column_name** | The name of your time column. |\n", "| **time_column_name** | The name of your time column. |\n",
"| **grain_column_names** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |\n", "| **time_series_id_column_names** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |\n",
"| **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). |\n", "| **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). |\n",
"| **partition_column_names** | The names of columns used to group your models. For timeseries, the groups must not split up individual time-series. That is, each group must contain one or more whole time-series. |" "| **partition_column_names** | The names of columns used to group your models. For timeseries, the groups must not split up individual time-series. That is, each group must contain one or more whole time-series. |"
] ]
@@ -354,8 +354,8 @@
" \"label_column_name\": TARGET_COLNAME,\n", " \"label_column_name\": TARGET_COLNAME,\n",
" \"n_cross_validations\": 3,\n", " \"n_cross_validations\": 3,\n",
" \"time_column_name\": TIME_COLNAME,\n", " \"time_column_name\": TIME_COLNAME,\n",
" \"max_horizon\": 6,\n", " \"forecast_horizon\": 6,\n",
" \"grain_column_names\": partition_column_names,\n", " \"time_series_id_column_names\": partition_column_names,\n",
" \"track_child_runs\": False,\n", " \"track_child_runs\": False,\n",
"}\n", "}\n",
"\n", "\n",

View File

@@ -57,7 +57,7 @@
"Notebook synopsis:\n", "Notebook synopsis:\n",
"\n", "\n",
"1. Creating an Experiment in an existing Workspace\n", "1. Creating an Experiment in an existing Workspace\n",
"2. Configuration and remote run of AutoML for a time-series model exploring Regression learners, Arima, Prophet and DNNs\n", "2. Configuration and remote run of AutoML for a time-series model exploring DNNs\n",
"4. Evaluating the fitted model using a rolling test " "4. Evaluating the fitted model using a rolling test "
] ]
}, },
@@ -92,8 +92,7 @@
"# Squash warning messages for cleaner output in the notebook\n", "# Squash warning messages for cleaner output in the notebook\n",
"warnings.showwarning = lambda *args, **kwargs: None\n", "warnings.showwarning = lambda *args, **kwargs: None\n",
"\n", "\n",
"from azureml.core.workspace import Workspace\n", "from azureml.core import Workspace, Experiment, Dataset\n",
"from azureml.core.experiment import Experiment\n",
"from azureml.train.automl import AutoMLConfig\n", "from azureml.train.automl import AutoMLConfig\n",
"from matplotlib import pyplot as plt\n", "from matplotlib import pyplot as plt\n",
"from sklearn.metrics import mean_absolute_error, mean_squared_error\n", "from sklearn.metrics import mean_absolute_error, mean_squared_error\n",
@@ -298,40 +297,21 @@
"from helper import split_full_for_forecasting\n", "from helper import split_full_for_forecasting\n",
"\n", "\n",
"train, valid = split_full_for_forecasting(df, time_column_name)\n", "train, valid = split_full_for_forecasting(df, time_column_name)\n",
"train.to_csv(\"train.csv\")\n", "\n",
"valid.to_csv(\"valid.csv\")\n", "# Reset index to create a Tabualr Dataset.\n",
"test_df.to_csv(\"test.csv\")\n", "train.reset_index(inplace=True)\n",
"valid.reset_index(inplace=True)\n",
"test_df.reset_index(inplace=True)\n",
"\n", "\n",
"datastore = ws.get_default_datastore()\n", "datastore = ws.get_default_datastore()\n",
"datastore.upload_files(\n", "train_dataset = Dataset.Tabular.register_pandas_dataframe(\n",
" files=[\"./train.csv\"],\n", " train, target=(datastore, \"dataset/\"), name=\"Github_DAU_train\"\n",
" target_path=\"github-dataset/tabular/\",\n",
" overwrite=True,\n",
" show_progress=True,\n",
")\n", ")\n",
"datastore.upload_files(\n", "valid_dataset = Dataset.Tabular.register_pandas_dataframe(\n",
" files=[\"./valid.csv\"],\n", " valid, target=(datastore, \"dataset/\"), name=\"Github_DAU_valid\"\n",
" target_path=\"github-dataset/tabular/\",\n",
" overwrite=True,\n",
" show_progress=True,\n",
")\n", ")\n",
"datastore.upload_files(\n", "test_dataset = Dataset.Tabular.register_pandas_dataframe(\n",
" files=[\"./test.csv\"],\n", " test_df, target=(datastore, \"dataset/\"), name=\"Github_DAU_test\"\n",
" target_path=\"github-dataset/tabular/\",\n",
" overwrite=True,\n",
" show_progress=True,\n",
")\n",
"\n",
"from azureml.core import Dataset\n",
"\n",
"train_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"github-dataset/tabular/train.csv\")]\n",
")\n",
"valid_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"github-dataset/tabular/valid.csv\")]\n",
")\n",
"test_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"github-dataset/tabular/test.csv\")]\n",
")" ")"
] ]
}, },
@@ -397,7 +377,7 @@
" freq=\"D\", # Set the forecast frequency to be daily\n", " freq=\"D\", # Set the forecast frequency to be daily\n",
")\n", ")\n",
"\n", "\n",
"# We will disable the enable_early_stopping flag to ensure the DNN model is recommended for demonstration purpose.\n", "# To only allow the TCNForecaster we set the allowed_models parameter to reflect this.\n",
"automl_config = AutoMLConfig(\n", "automl_config = AutoMLConfig(\n",
" task=\"forecasting\",\n", " task=\"forecasting\",\n",
" primary_metric=\"normalized_root_mean_squared_error\",\n", " primary_metric=\"normalized_root_mean_squared_error\",\n",
@@ -410,7 +390,7 @@
" max_concurrent_iterations=4,\n", " max_concurrent_iterations=4,\n",
" max_cores_per_iteration=-1,\n", " max_cores_per_iteration=-1,\n",
" enable_dnn=True,\n", " enable_dnn=True,\n",
" enable_early_stopping=False,\n", " allowed_models=[\"TCNForecaster\"],\n",
" forecasting_parameters=forecasting_parameters,\n", " forecasting_parameters=forecasting_parameters,\n",
")" ")"
] ]
@@ -503,7 +483,9 @@
"if not forecast_model in summary_df[\"run_id\"]:\n", "if not forecast_model in summary_df[\"run_id\"]:\n",
" forecast_model = \"ForecastTCN\"\n", " forecast_model = \"ForecastTCN\"\n",
"\n", "\n",
"best_dnn_run_id = summary_df[\"run_id\"][forecast_model]\n", "best_dnn_run_id = summary_df[summary_df[\"Score\"] == summary_df[\"Score\"].min()][\n",
" \"run_id\"\n",
"][forecast_model]\n",
"best_dnn_run = Run(experiment, best_dnn_run_id)" "best_dnn_run = Run(experiment, best_dnn_run_id)"
] ]
}, },
@@ -564,11 +546,6 @@
}, },
"outputs": [], "outputs": [],
"source": [ "source": [
"from azureml.core import Dataset\n",
"\n",
"test_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, \"github-dataset/tabular/test.csv\")]\n",
")\n",
"# preview the first 3 rows of the dataset\n", "# preview the first 3 rows of the dataset\n",
"test_dataset.take(5).to_pandas_dataframe()" "test_dataset.take(5).to_pandas_dataframe()"
] ]

View File

@@ -79,9 +79,7 @@ def get_result_df(remote_run):
if "goal" in run.properties: if "goal" in run.properties:
goal_minimize = run.properties["goal"].split("_")[-1] == "min" goal_minimize = run.properties["goal"].split("_")[-1] == "min"
summary_df = summary_df.T.sort_values( summary_df = summary_df.T.sort_values("Score", ascending=goal_minimize)
"Score", ascending=goal_minimize
).drop_duplicates(["run_algorithm"])
summary_df = summary_df.set_index("run_algorithm") summary_df = summary_df.set_index("run_algorithm")
return summary_df return summary_df

View File

@@ -324,7 +324,7 @@
"| **enable_early_stopping** | Flag to enable early termination if the score is not improving in the short term. |\n", "| **enable_early_stopping** | Flag to enable early termination if the score is not improving in the short term. |\n",
"| **time_column_name** | The name of your time column. |\n", "| **time_column_name** | The name of your time column. |\n",
"| **enable_engineered_explanations** | Engineered feature explanations will be downloaded if enable_engineered_explanations flag is set to True. By default it is set to False to save storage space. |\n", "| **enable_engineered_explanations** | Engineered feature explanations will be downloaded if enable_engineered_explanations flag is set to True. By default it is set to False to save storage space. |\n",
"| **time_series_id_column_name** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |\n", "| **time_series_id_column_names** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |\n",
"| **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). |\n", "| **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). |\n",
"| **pipeline_fetch_max_batch_size** | Determines how many pipelines (training algorithms) to fetch at a time for training, this helps reduce throttling when training at large scale. |\n", "| **pipeline_fetch_max_batch_size** | Determines how many pipelines (training algorithms) to fetch at a time for training, this helps reduce throttling when training at large scale. |\n",
"| **partition_column_names** | The names of columns used to group your models. For timeseries, the groups must not split up individual time-series. That is, each group must contain one or more whole time-series. |" "| **partition_column_names** | The names of columns used to group your models. For timeseries, the groups must not split up individual time-series. That is, each group must contain one or more whole time-series. |"
@@ -355,8 +355,8 @@
" \"n_cross_validations\": 3,\n", " \"n_cross_validations\": 3,\n",
" \"time_column_name\": \"WeekStarting\",\n", " \"time_column_name\": \"WeekStarting\",\n",
" \"drop_column_names\": \"Revenue\",\n", " \"drop_column_names\": \"Revenue\",\n",
" \"max_horizon\": 6,\n", " \"forecast_horizon\": 6,\n",
" \"grain_column_names\": partition_column_names,\n", " \"time_series_id_column_names\": partition_column_names,\n",
" \"track_child_runs\": False,\n", " \"track_child_runs\": False,\n",
"}\n", "}\n",
"\n", "\n",

View File

@@ -106,7 +106,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"print(\"This notebook was created using version 1.40.0 of the Azure ML SDK\")\n", "print(\"This notebook was created using version 1.41.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
] ]
}, },

View File

@@ -57,6 +57,10 @@ RUN pip install --no-cache-dir \
lz4 \ lz4 \
psutil \ psutil \
setproctitle setproctitle
# This is required for ray 0.8.7
RUN pip install -U aiohttp==3.7.4
# This is needed for mpi to locate libpython # This is needed for mpi to locate libpython
ENV LD_LIBRARY_PATH $AZUREML_CONDA_ENVIRONMENT_PATH/lib:$LD_LIBRARY_PATH ENV LD_LIBRARY_PATH $AZUREML_CONDA_ENVIRONMENT_PATH/lib:$LD_LIBRARY_PATH

View File

@@ -28,7 +28,7 @@ RUN conda install -y conda=4.7.12 python=3.7 && conda clean -ay && \
psutil \ psutil \
setproctitle \ setproctitle \
pygame \ pygame \
gym[atari]==0.17.3 && \ gym[classic_control]==0.19.0 && \
conda install -y -c conda-forge x264='1!152.20180717' ffmpeg=4.0.2 && \ conda install -y -c conda-forge x264='1!152.20180717' ffmpeg=4.0.2 && \
conda install -c anaconda opencv conda install -c anaconda opencv

View File

@@ -95,7 +95,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"print(\"This notebook was created using version 1.40.0 of the Azure ML SDK\")\n", "print(\"This notebook was created using version 1.41.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
] ]
}, },

View File

@@ -30,7 +30,7 @@ _categorical_columns = [
def fetch_census_dataset(): def fetch_census_dataset():
"""Fetch the Adult Census Dataset. """Fetch the Adult Census Dataset
This uses a particular URL for the Adult Census dataset. The code This uses a particular URL for the Adult Census dataset. The code
is a simplified version of fetch_openml() in sklearn. is a simplified version of fetch_openml() in sklearn.
@@ -39,45 +39,25 @@ def fetch_census_dataset():
https://openml.org/data/v1/download/1595261.gz https://openml.org/data/v1/download/1595261.gz
(as of 2021-03-31) (as of 2021-03-31)
""" """
dataset_path = "1595261.gz"
try: try:
from urllib import urlretrieve file_stream = gzip.GzipFile(filename=dataset_path, mode='rb')
except ImportError:
from urllib.request import urlretrieve
filename = "1595261.gz" with closing(file_stream):
data_url = "https://rainotebookscdn.blob.core.windows.net/datasets/" def _stream_generator(response):
for line in response:
yield line.decode('utf-8')
remaining_attempts = 5 stream = _stream_generator(file_stream)
sleep_duration = 10 data = arff.load(stream)
while remaining_attempts > 0: except Exception as exc:
try: raise Exception("Could not load dataset from {} with exception {}".format(dataset_path, exc))
urlretrieve(data_url + filename, filename)
http_stream = gzip.GzipFile(filename=filename, mode='rb')
with closing(http_stream):
def _stream_generator(response):
for line in response:
yield line.decode('utf-8')
stream = _stream_generator(http_stream)
data = arff.load(stream)
except Exception as exc: # noqa: B902
remaining_attempts -= 1
print("Error downloading dataset from {} ({} attempt(s) remaining)"
.format(data_url, remaining_attempts))
print(exc)
sleep(sleep_duration)
sleep_duration *= 2
continue
else:
# dataset successfully downloaded
break
else:
raise Exception("Could not retrieve dataset from {}.".format(data_url))
attributes = OrderedDict(data['attributes']) attributes = OrderedDict(data['attributes'])
arff_columns = list(attributes) arff_columns = list(attributes)
raw_df = pd.DataFrame(data=data['data'], columns=arff_columns) raw_df = pd.DataFrame(data=data['data'], columns=arff_columns)
target_column_name = 'class' target_column_name = 'class'

View File

@@ -100,7 +100,7 @@
"\n", "\n",
"# Check core SDK version number\n", "# Check core SDK version number\n",
"\n", "\n",
"print(\"This notebook was created using SDK version 1.40.0, you are currently running version\", azureml.core.VERSION)" "print(\"This notebook was created using SDK version 1.41.0, you are currently running version\", azureml.core.VERSION)"
] ]
}, },
{ {
@@ -363,6 +363,43 @@
"run.log_image(name='Hyperbolic Tangent', plot=plt)" "run.log_image(name='Hyperbolic Tangent', plot=plt)"
] ]
}, },
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Logging for when more Metric Names are required\n",
"\n",
"Limits on logging are internally enforced to ensure a smooth experience, however these can sometimes be limiting, particularly in terms of the limit on metric names.\n",
"\n",
"The \"Logging Vectors\" or \"Logging Tables\" examples previously can be expanded upon to use up to 15 columns to increase this limit, with the information still being presented in Run Details as a chart, and being directly comparable in experiment reports.\n",
"\n",
"**Note:** see [Azure Machine Learning Limits Documentation](https://aka.ms/azure-machine-learning-limits) for more information on service limits.\n",
"**Note:** tables logged into the run are expected to be relatively small. Logging very large tables into Azure ML can result in reduced performance. If you need to store large amounts of data associated with the run, you can write the data to file that will be uploaded."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import random\n",
"metricNames = [ \"Accuracy\", \"Precision\", \"Recall\" ]\n",
"columnNames = [ \"expected\", \"actual\", \"calculated\", \"inferred\", \"determined\", \"predicted\", \"forecast\", \"speculated\", \"assumed\", \"required\", \"intended\", \"deduced\", \"theorized\", \"hoped\", \"hypothesized\" ]\n",
"\n",
"for step in range(1000):\n",
" for metricName in metricNames:\n",
"\n",
" metricKeyValueDictionary={}\n",
" for column in columnNames:\n",
" metricKeyValueDictionary[column] = random.randrange(0, step + 1)\n",
"\n",
" run.log_row(\n",
" metricName,\n",
" \"Example row for metric \" + metricName,\n",
" **metricKeyValueDictionary)"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@@ -498,7 +535,6 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"import os\n",
"os.makedirs('files', exist_ok=True)\n", "os.makedirs('files', exist_ok=True)\n",
"\n", "\n",
"for f in run.get_file_names():\n", "for f in run.get_file_names():\n",

View File

@@ -102,7 +102,7 @@
"source": [ "source": [
"import azureml.core\n", "import azureml.core\n",
"\n", "\n",
"print(\"This notebook was created using version 1.40.0 of the Azure ML SDK\")\n", "print(\"This notebook was created using version 1.41.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
] ]
}, },