Compare commits

...

4 Commits

Author SHA1 Message Date
amlrelsa-ms
b0aa91acce update samples from Release-140 as a part of SDK release 2022-05-04 23:01:56 +00:00
Harneet Virk
5928ba83bb Merge pull request #1748 from Azure/release_update/Release-138
update samples from Release-138 as a part of  SDK release
2022-04-29 10:40:01 -07:00
amlrelsa-ms
ffa3a43979 update samples from Release-138 as a part of SDK release 2022-04-29 17:09:13 +00:00
Harneet Virk
7ce79a43f1 Merge pull request #1746 from Azure/release_update/Release-137
update samples from Release-137 as a part of  SDK release
2022-04-27 11:50:44 -07:00
26 changed files with 52 additions and 30 deletions

View File

@@ -90,6 +90,7 @@
"output[\"Resource Group\"] = ws.resource_group\n", "output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n", "output[\"Location\"] = ws.location\n",
"output[\"Experiment Name\"] = experiment.name\n", "output[\"Experiment Name\"] = experiment.name\n",
"output[\"SDK Version\"] = azureml.core.VERSION\n",
"pd.set_option(\"display.max_colwidth\", None)\n", "pd.set_option(\"display.max_colwidth\", None)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n", "outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T" "outputDf.T"

View File

@@ -101,6 +101,7 @@
"output[\"Resource Group\"] = ws.resource_group\n", "output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n", "output[\"Location\"] = ws.location\n",
"output[\"Experiment Name\"] = experiment.name\n", "output[\"Experiment Name\"] = experiment.name\n",
"output[\"SDK Version\"] = azureml.core.VERSION\n",
"pd.set_option(\"display.max_colwidth\", None)\n", "pd.set_option(\"display.max_colwidth\", None)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n", "outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T" "outputDf.T"

View File

@@ -102,6 +102,7 @@
"output[\"Resource Group\"] = ws.resource_group\n", "output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n", "output[\"Location\"] = ws.location\n",
"output[\"Run History Name\"] = experiment_name\n", "output[\"Run History Name\"] = experiment_name\n",
"output[\"SDK Version\"] = azureml.core.VERSION\n",
"pd.set_option(\"display.max_colwidth\", None)\n", "pd.set_option(\"display.max_colwidth\", None)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n", "outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T" "outputDf.T"

View File

@@ -5,6 +5,7 @@ import json
import os import os
import re import re
import numpy as np
import pandas as pd import pandas as pd
from matplotlib import pyplot as plt from matplotlib import pyplot as plt
@@ -146,6 +147,9 @@ def calculate_scores_and_build_plots(
_draw_one_plot(one_forecast, time_column_name, grains, pdf) _draw_one_plot(one_forecast, time_column_name, grains, pdf)
pdf.close() pdf.close()
forecast_df.to_csv(os.path.join(output_dir, FORECASTS_FILE), index=False) forecast_df.to_csv(os.path.join(output_dir, FORECASTS_FILE), index=False)
# Remove np.NaN and np.inf from the prediction and actuals data.
forecast_df.replace([np.inf, -np.inf], np.nan, inplace=True)
forecast_df.dropna(subset=[ACTUALS, PREDICTIONS], inplace=True)
metrics = compute_all_metrics(forecast_df, grains + [BACKTEST_ITER]) metrics = compute_all_metrics(forecast_df, grains + [BACKTEST_ITER])
metrics.to_csv(os.path.join(output_dir, SCORES_FILE), index=False) metrics.to_csv(os.path.join(output_dir, SCORES_FILE), index=False)

View File

@@ -86,6 +86,7 @@
"output[\"Resource Group\"] = ws.resource_group\n", "output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n", "output[\"Location\"] = ws.location\n",
"output[\"Default datastore name\"] = dstore.name\n", "output[\"Default datastore name\"] = dstore.name\n",
"output[\"SDK Version\"] = azureml.core.VERSION\n",
"pd.set_option(\"display.max_colwidth\", None)\n", "pd.set_option(\"display.max_colwidth\", None)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n", "outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T" "outputDf.T"

View File

@@ -5,6 +5,7 @@ import json
import os import os
import re import re
import numpy as np
import pandas as pd import pandas as pd
from matplotlib import pyplot as plt from matplotlib import pyplot as plt
@@ -146,6 +147,9 @@ def calculate_scores_and_build_plots(
_draw_one_plot(one_forecast, time_column_name, grains, pdf) _draw_one_plot(one_forecast, time_column_name, grains, pdf)
pdf.close() pdf.close()
forecast_df.to_csv(os.path.join(output_dir, FORECASTS_FILE), index=False) forecast_df.to_csv(os.path.join(output_dir, FORECASTS_FILE), index=False)
# Remove np.NaN and np.inf from the prediction and actuals data.
forecast_df.replace([np.inf, -np.inf], np.nan, inplace=True)
forecast_df.dropna(subset=[ACTUALS, PREDICTIONS], inplace=True)
metrics = compute_all_metrics(forecast_df, grains + [BACKTEST_ITER]) metrics = compute_all_metrics(forecast_df, grains + [BACKTEST_ITER])
metrics.to_csv(os.path.join(output_dir, SCORES_FILE), index=False) metrics.to_csv(os.path.join(output_dir, SCORES_FILE), index=False)

View File

@@ -100,6 +100,7 @@
"output[\"SKU\"] = ws.sku\n", "output[\"SKU\"] = ws.sku\n",
"output[\"Resource Group\"] = ws.resource_group\n", "output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n", "output[\"Location\"] = ws.location\n",
"output[\"SDK Version\"] = azureml.core.VERSION\n",
"pd.set_option(\"display.max_colwidth\", None)\n", "pd.set_option(\"display.max_colwidth\", None)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n", "outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T" "outputDf.T"

View File

@@ -119,6 +119,7 @@
"output[\"Resource Group\"] = ws.resource_group\n", "output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n", "output[\"Location\"] = ws.location\n",
"output[\"Run History Name\"] = experiment_name\n", "output[\"Run History Name\"] = experiment_name\n",
"output[\"SDK Version\"] = azureml.core.VERSION\n",
"pd.set_option(\"display.max_colwidth\", None)\n", "pd.set_option(\"display.max_colwidth\", None)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n", "outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T" "outputDf.T"

View File

@@ -132,6 +132,7 @@
"output[\"Resource Group\"] = ws.resource_group\n", "output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n", "output[\"Location\"] = ws.location\n",
"output[\"Run History Name\"] = experiment_name\n", "output[\"Run History Name\"] = experiment_name\n",
"output[\"SDK Version\"] = azureml.core.VERSION\n",
"pd.set_option(\"display.max_colwidth\", None)\n", "pd.set_option(\"display.max_colwidth\", None)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n", "outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T" "outputDf.T"

View File

@@ -121,6 +121,7 @@
"output[\"Resource Group\"] = ws.resource_group\n", "output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n", "output[\"Location\"] = ws.location\n",
"output[\"Run History Name\"] = experiment_name\n", "output[\"Run History Name\"] = experiment_name\n",
"output[\"SDK Version\"] = azureml.core.VERSION\n",
"pd.set_option(\"display.max_colwidth\", None)\n", "pd.set_option(\"display.max_colwidth\", None)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n", "outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T" "outputDf.T"

View File

@@ -147,6 +147,7 @@
"output[\"Resource Group\"] = ws.resource_group\n", "output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n", "output[\"Location\"] = ws.location\n",
"output[\"Run History Name\"] = experiment_name\n", "output[\"Run History Name\"] = experiment_name\n",
"output[\"SDK Version\"] = azureml.core.VERSION\n",
"pd.set_option(\"display.max_colwidth\", None)\n", "pd.set_option(\"display.max_colwidth\", None)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n", "outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T" "outputDf.T"

View File

@@ -78,6 +78,7 @@
"output[\"Resource Group\"] = ws.resource_group\n", "output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n", "output[\"Location\"] = ws.location\n",
"output[\"Default datastore name\"] = dstore.name\n", "output[\"Default datastore name\"] = dstore.name\n",
"output[\"SDK Version\"] = azureml.core.VERSION\n",
"pd.set_option(\"display.max_colwidth\", None)\n", "pd.set_option(\"display.max_colwidth\", None)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n", "outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T" "outputDf.T"

View File

@@ -78,6 +78,7 @@
"output[\"Resource Group\"] = ws.resource_group\n", "output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n", "output[\"Location\"] = ws.location\n",
"output[\"Default datastore name\"] = dstore.name\n", "output[\"Default datastore name\"] = dstore.name\n",
"output[\"SDK Version\"] = azureml.core.VERSION\n",
"pd.set_option(\"display.max_colwidth\", None)\n", "pd.set_option(\"display.max_colwidth\", None)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n", "outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T" "outputDf.T"

View File

@@ -112,6 +112,7 @@
"output[\"Resource Group\"] = ws.resource_group\n", "output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n", "output[\"Location\"] = ws.location\n",
"output[\"Run History Name\"] = experiment_name\n", "output[\"Run History Name\"] = experiment_name\n",
"output[\"SDK Version\"] = azureml.core.VERSION\n",
"pd.set_option(\"display.max_colwidth\", None)\n", "pd.set_option(\"display.max_colwidth\", None)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n", "outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T" "outputDf.T"

View File

@@ -93,6 +93,7 @@
"output[\"Resource Group\"] = ws.resource_group\n", "output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n", "output[\"Location\"] = ws.location\n",
"output[\"Experiment Name\"] = experiment.name\n", "output[\"Experiment Name\"] = experiment.name\n",
"output[\"SDK Version\"] = azureml.core.VERSION\n",
"pd.set_option(\"display.max_colwidth\", None)\n", "pd.set_option(\"display.max_colwidth\", None)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n", "outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T" "outputDf.T"

View File

@@ -93,6 +93,7 @@
"output[\"Resource Group\"] = ws.resource_group\n", "output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n", "output[\"Location\"] = ws.location\n",
"output[\"Experiment Name\"] = experiment.name\n", "output[\"Experiment Name\"] = experiment.name\n",
"output[\"SDK Version\"] = azureml.core.VERSION\n",
"pd.set_option(\"display.max_colwidth\", None)\n", "pd.set_option(\"display.max_colwidth\", None)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n", "outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T" "outputDf.T"

View File

@@ -89,6 +89,7 @@
"output[\"Resource Group\"] = ws.resource_group\n", "output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n", "output[\"Location\"] = ws.location\n",
"output[\"Run History Name\"] = experiment_name\n", "output[\"Run History Name\"] = experiment_name\n",
"output[\"SDK Version\"] = azureml.core.VERSION\n",
"pd.set_option(\"display.max_colwidth\", None)\n", "pd.set_option(\"display.max_colwidth\", None)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n", "outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T" "outputDf.T"

View File

@@ -82,7 +82,7 @@
"source": [ "source": [
"## Create trained model\n", "## Create trained model\n",
"\n", "\n",
"For this example, we will train a small model on scikit-learn's [diabetes dataset](https://scikit-learn.org/stable/datasets/index.html#diabetes-dataset). " "For this example, we will train a small model on scikit-learn's [diabetes dataset](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_diabetes.html). "
] ]
}, },
{ {
@@ -279,7 +279,9 @@
"\n", "\n",
"\n", "\n",
"environment = Environment('my-sklearn-environment')\n", "environment = Environment('my-sklearn-environment')\n",
"environment.python.conda_dependencies = CondaDependencies.create(pip_packages=[\n", "environment.python.conda_dependencies = CondaDependencies.create(conda_packages=[\n",
" 'pip==20.2.4'],\n",
" pip_packages=[\n",
" 'azureml-defaults',\n", " 'azureml-defaults',\n",
" 'inference-schema[numpy-support]',\n", " 'inference-schema[numpy-support]',\n",
" 'joblib',\n", " 'joblib',\n",
@@ -478,7 +480,9 @@
"\n", "\n",
"\n", "\n",
"environment = Environment('my-sklearn-environment')\n", "environment = Environment('my-sklearn-environment')\n",
"environment.python.conda_dependencies = CondaDependencies.create(pip_packages=[\n", "environment.python.conda_dependencies = CondaDependencies.create(conda_packages=[\n",
" 'pip==20.2.4'],\n",
" pip_packages=[\n",
" 'azureml-defaults',\n", " 'azureml-defaults',\n",
" 'inference-schema[numpy-support]',\n", " 'inference-schema[numpy-support]',\n",
" 'joblib',\n", " 'joblib',\n",

View File

@@ -105,7 +105,9 @@
"from azureml.core.conda_dependencies import CondaDependencies\n", "from azureml.core.conda_dependencies import CondaDependencies\n",
"\n", "\n",
"environment=Environment('my-sklearn-environment')\n", "environment=Environment('my-sklearn-environment')\n",
"environment.python.conda_dependencies = CondaDependencies.create(pip_packages=[\n", "environment.python.conda_dependencies = CondaDependencies.create(conda_packages=[\n",
" 'pip==20.2.4'],\n",
" pip_packages=[\n",
" 'azureml-defaults',\n", " 'azureml-defaults',\n",
" 'inference-schema[numpy-support]',\n", " 'inference-schema[numpy-support]',\n",
" 'numpy',\n", " 'numpy',\n",

View File

@@ -358,6 +358,7 @@
"# cause errors. Please take extra care when specifying your dependencies in a production environment.\n", "# cause errors. Please take extra care when specifying your dependencies in a production environment.\n",
"myenv = CondaDependencies.create(\n", "myenv = CondaDependencies.create(\n",
" python_version=python_version,\n", " python_version=python_version,\n",
" conda_packages=['pip==20.2.4'],\n",
" pip_packages=['pyyaml', sklearn_dep, pandas_dep] + azureml_pip_packages)\n", " pip_packages=['pyyaml', sklearn_dep, pandas_dep] + azureml_pip_packages)\n",
"\n", "\n",
"with open(\"myenv.yml\",\"w\") as f:\n", "with open(\"myenv.yml\",\"w\") as f:\n",

View File

@@ -1,5 +1,11 @@
FROM mcr.microsoft.com/azureml/openmpi4.1.0-cuda11.0.3-cudnn8-ubuntu18.04:20211111.v1 FROM mcr.microsoft.com/azureml/openmpi4.1.0-cuda11.0.3-cudnn8-ubuntu18.04:20211111.v1
# CUDA repository key rotation: https://forums.developer.nvidia.com/t/notice-cuda-linux-repository-key-rotation/212771
RUN apt-key del 7fa2af80
ENV distro ubuntu1804
ENV arch x86_64
RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/$distro/$arch/3bf863cc.pub
RUN apt-get update && apt-get install -y --no-install-recommends \ RUN apt-get update && apt-get install -y --no-install-recommends \
python-opengl \ python-opengl \
rsync \ rsync \

View File

@@ -97,7 +97,7 @@
"import azureml.core\n", "import azureml.core\n",
"\n", "\n",
"# Check core SDK version number\n", "# Check core SDK version number\n",
"print(\"Azure Machine Learning SDK Version: \", azureml.core.VERSION)" "print(\"Azure Machine Learning SDK version: \", azureml.core.VERSION)"
] ]
}, },
{ {
@@ -242,11 +242,7 @@
" register(workspace=ws)\n", " register(workspace=ws)\n",
"ray_cpu_build_details = ray_cpu_env.build(workspace=ws)\n", "ray_cpu_build_details = ray_cpu_env.build(workspace=ws)\n",
"\n", "\n",
"import time\n", "ray_cpu_build_details.wait_for_completion(show_output=True)"
"while ray_cpu_build_details.status not in ['Succeeded', 'Failed']:\n",
" print(f'Awaiting completion of ray CPU environment build. Current status is: {ray_cpu_build_details.status}')\n",
" time.sleep(30)\n",
"print(f'status={ray_cpu_build_details.status}')"
] ]
}, },
{ {
@@ -279,11 +275,7 @@
" register(workspace=ws)\n", " register(workspace=ws)\n",
"ray_gpu_build_details = ray_gpu_env.build(workspace=ws)\n", "ray_gpu_build_details = ray_gpu_env.build(workspace=ws)\n",
"\n", "\n",
"import time\n", "ray_gpu_build_details.wait_for_completion(show_output=True)"
"while ray_gpu_build_details.status not in ['Succeeded', 'Failed']:\n",
" print(f'Awaiting completion of ray GPU environment build. Current status is: {ray_gpu_build_details.status}')\n",
" time.sleep(30)\n",
"print(f'status={ray_gpu_build_details.status}')"
] ]
}, },
{ {

View File

@@ -255,11 +255,7 @@
" register(workspace=ws)\n", " register(workspace=ws)\n",
"ray_env_build_details = ray_environment.build(workspace=ws)\n", "ray_env_build_details = ray_environment.build(workspace=ws)\n",
"\n", "\n",
"# import time\n", "ray_env_build_details.wait_for_completion(show_output=True)"
"while ray_env_build_details.status not in ['Succeeded', 'Failed']:\n",
" print(f'Awaiting completion of environment build. Current status is: {ray_env_build_details.status}')\n",
" time.sleep(30)\n",
"print(f'status={ray_env_build_details.status}')"
] ]
}, },
{ {

View File

@@ -223,11 +223,7 @@
" register(workspace=ws)\n", " register(workspace=ws)\n",
"ray_env_build_details = ray_environment.build(workspace=ws)\n", "ray_env_build_details = ray_environment.build(workspace=ws)\n",
"\n", "\n",
"import time\n", "ray_env_build_details.wait_for_completion(show_output=True)"
"while ray_env_build_details.status not in ['Succeeded', 'Failed']:\n",
" print(f'Awaiting completion of environment build. Current status is: {ray_env_build_details.status}')\n",
" time.sleep(30)\n",
"print(f'status={ray_env_build_details.status}')"
] ]
}, },
{ {

View File

@@ -8,10 +8,8 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
rm -rf /var/lib/apt/lists/* && \ rm -rf /var/lib/apt/lists/* && \
rm -rf /usr/share/man/* rm -rf /usr/share/man/*
RUN conda install -y conda=4.7.12 python=3.7 && conda clean -ay && \ RUN conda install -y conda=4.12.0 python=3.7 && conda clean -ay
pip install ray-on-aml==0.1.6 & \ RUN pip install ray-on-aml==0.1.6 & \
pip install --upgrade ray==0.8.3 \
ray[rllib,dashboard,tune]==0.8.3 & \
pip install --no-cache-dir \ pip install --no-cache-dir \
azureml-defaults \ azureml-defaults \
azureml-dataset-runtime[fuse,pandas] \ azureml-dataset-runtime[fuse,pandas] \
@@ -32,3 +30,5 @@ RUN conda install -y conda=4.7.12 python=3.7 && conda clean -ay && \
conda install -y -c conda-forge x264='1!152.20180717' ffmpeg=4.0.2 && \ conda install -y -c conda-forge x264='1!152.20180717' ffmpeg=4.0.2 && \
conda install -c anaconda opencv conda install -c anaconda opencv
RUN pip install --upgrade ray==0.8.3 \
ray[rllib,dashboard,tune]==0.8.3

View File

@@ -246,7 +246,9 @@
"ray_environment = Environment. \\\n", "ray_environment = Environment. \\\n",
" from_dockerfile(name=ray_environment_name, dockerfile=ray_environment_dockerfile_path). \\\n", " from_dockerfile(name=ray_environment_name, dockerfile=ray_environment_dockerfile_path). \\\n",
" register(workspace=ws)\n", " register(workspace=ws)\n",
"ray_gpu_build_details = ray_environment.build(workspace=ws)" "ray_cpu_build_details = ray_environment.build(workspace=ws)\n",
"\n",
"ray_cpu_build_details.wait_for_completion(show_output=True)"
] ]
}, },
{ {