update samples from Release-149 as a part of SDK release

This commit is contained in:
amlrelsa-ms
2022-07-07 00:18:42 +00:00
parent 3c4770cfe5
commit 15a3ca649d
5 changed files with 24 additions and 42 deletions

View File

@@ -149,12 +149,7 @@ def get_backtest_pipeline(
inputs=[forecasts.as_mount()],
outputs=[data_results],
source_directory=PROJECT_FOLDER,
arguments=[
"--forecasts",
forecasts,
"--output-dir",
data_results,
],
arguments=["--forecasts", forecasts, "--output-dir", data_results],
runconfig=run_config,
compute_target=compute_target,
allow_reuse=False,

View File

@@ -23,11 +23,7 @@ except ImportError:
def infer_forecasting_dataset_tcn(
X_test,
y_test,
model,
output_path,
output_dataset_name="results",
X_test, y_test, model, output_path, output_dataset_name="results"
):
y_pred, df_all = model.forecast(X_test, y_test)
@@ -71,10 +67,7 @@ def get_model(model_path, model_file_name):
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
type=str,
dest="model_name",
help="Model to be loaded",
"--model_name", type=str, dest="model_name", help="Model to be loaded"
)
parser.add_argument(
@@ -108,12 +101,7 @@ def get_args():
return args
def get_data(
run,
fitted_model,
target_column_name,
test_dataset_name,
):
def get_data(run, fitted_model, target_column_name, test_dataset_name):
# get input dataset by name
test_dataset = Dataset.get_by_name(run.experiment.workspace, test_dataset_name)
@@ -159,10 +147,7 @@ if __name__ == "__main__":
fitted_model = get_model(model_path, model_file_name)
X_test_df, y_test = get_data(
run,
fitted_model,
target_column_name,
test_dataset_name,
run, fitted_model, target_column_name, test_dataset_name
)
infer_forecasting_dataset_tcn(

View File

@@ -69,17 +69,19 @@
"# ONNX Model Zoo and save it in the same folder as this tutorial\n",
"\n",
"import urllib.request\n",
"import os\n",
"\n",
"onnx_model_url = \"https://github.com/onnx/models/blob/main/vision/body_analysis/emotion_ferplus/model/emotion-ferplus-7.tar.gz?raw=true\"\n",
"\n",
"urllib.request.urlretrieve(onnx_model_url, filename=\"emotion-ferplus-7.tar.gz\")\n",
"os.mkdir(\"emotion_ferplus\")\n",
"\n",
"# the ! magic command tells our jupyter notebook kernel to run the following line of \n",
"# code from the command line instead of the notebook kernel\n",
"\n",
"# We use tar and xvcf to unzip the files we just retrieved from the ONNX model zoo\n",
"\n",
"!tar xvzf emotion-ferplus-7.tar.gz"
"!tar xvzf emotion-ferplus-7.tar.gz -C emotion_ferplus"
]
},
{
@@ -130,7 +132,7 @@
"metadata": {},
"outputs": [],
"source": [
"model_dir = \"emotion_ferplus\" # replace this with the location of your model files\n",
"model_dir = \"emotion_ferplus/model\" # replace this with the location of your model files\n",
"\n",
"# leave as is if it's in the same folder as this notebook"
]
@@ -496,13 +498,12 @@
"\n",
"# to use parsers to read in our model/data\n",
"import json\n",
"import os\n",
"\n",
"test_inputs = []\n",
"test_outputs = []\n",
"\n",
"# read in 3 testing images from .pb files\n",
"test_data_size = 3\n",
"# read in 1 testing images from .pb files\n",
"test_data_size = 1\n",
"\n",
"for num in np.arange(test_data_size):\n",
" input_test_data = os.path.join(model_dir, 'test_data_set_{0}'.format(num), 'input_0.pb')\n",
@@ -533,7 +534,7 @@
},
"source": [
"### Show some sample images\n",
"We use `matplotlib` to plot 3 test images from the dataset."
"We use `matplotlib` to plot 1 test images from the dataset."
]
},
{
@@ -547,7 +548,7 @@
"outputs": [],
"source": [
"plt.figure(figsize = (20, 20))\n",
"for test_image in np.arange(3):\n",
"for test_image in np.arange(test_data_size):\n",
" test_inputs[test_image].reshape(1, 64, 64)\n",
" plt.subplot(1, 8, test_image+1)\n",
" plt.axhline('')\n",

View File

@@ -69,10 +69,12 @@
"# ONNX Model Zoo and save it in the same folder as this tutorial\n",
"\n",
"import urllib.request\n",
"import os\n",
"\n",
"onnx_model_url = \"https://github.com/onnx/models/blob/main/vision/classification/mnist/model/mnist-7.tar.gz?raw=true\"\n",
"\n",
"urllib.request.urlretrieve(onnx_model_url, filename=\"mnist-7.tar.gz\")"
"urllib.request.urlretrieve(onnx_model_url, filename=\"mnist-7.tar.gz\")\n",
"os.mkdir(\"mnist\")"
]
},
{
@@ -86,7 +88,7 @@
"\n",
"# We use tar and xvcf to unzip the files we just retrieved from the ONNX model zoo\n",
"\n",
"!tar xvzf mnist-7.tar.gz"
"!tar xvzf mnist-7.tar.gz -C mnist"
]
},
{
@@ -137,7 +139,7 @@
"metadata": {},
"outputs": [],
"source": [
"model_dir = \"mnist\" # replace this with the location of your model files\n",
"model_dir = \"mnist/model\" # replace this with the location of your model files\n",
"\n",
"# leave as is if it's in the same folder as this notebook"
]
@@ -447,13 +449,12 @@
"\n",
"# to use parsers to read in our model/data\n",
"import json\n",
"import os\n",
"\n",
"test_inputs = []\n",
"test_outputs = []\n",
"\n",
"# read in 3 testing images from .pb files\n",
"test_data_size = 3\n",
"# read in 1 testing images from .pb files\n",
"test_data_size = 1\n",
"\n",
"for i in np.arange(test_data_size):\n",
" input_test_data = os.path.join(model_dir, 'test_data_set_{0}'.format(i), 'input_0.pb')\n",
@@ -486,7 +487,7 @@
},
"source": [
"### Show some sample images\n",
"We use `matplotlib` to plot 3 test images from the dataset."
"We use `matplotlib` to plot 1 test images from the dataset."
]
},
{
@@ -500,7 +501,7 @@
"outputs": [],
"source": [
"plt.figure(figsize = (16, 6))\n",
"for test_image in np.arange(3):\n",
"for test_image in np.arange(test_data_size):\n",
" plt.subplot(1, 15, test_image+1)\n",
" plt.axhline('')\n",
" plt.axvline('')\n",

View File

@@ -8,8 +8,8 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
rm -rf /var/lib/apt/lists/* && \
rm -rf /usr/share/man/*
RUN conda install -y conda=4.12.0 python=3.7 && conda clean -ay
RUN pip install ray-on-aml==0.1.6 & \
RUN conda install -y conda=4.13.0 python=3.7 && conda clean -ay
RUN pip install ray-on-aml==0.2.1 & \
pip install --no-cache-dir \
azureml-defaults \
azureml-dataset-runtime[fuse,pandas] \