diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-backtest-single-model/pipeline_helper.py b/how-to-use-azureml/automated-machine-learning/forecasting-backtest-single-model/pipeline_helper.py index c0079343..753bba48 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-backtest-single-model/pipeline_helper.py +++ b/how-to-use-azureml/automated-machine-learning/forecasting-backtest-single-model/pipeline_helper.py @@ -149,12 +149,7 @@ def get_backtest_pipeline( inputs=[forecasts.as_mount()], outputs=[data_results], source_directory=PROJECT_FOLDER, - arguments=[ - "--forecasts", - forecasts, - "--output-dir", - data_results, - ], + arguments=["--forecasts", forecasts, "--output-dir", data_results], runconfig=run_config, compute_target=compute_target, allow_reuse=False, diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-pipelines/scripts/infer.py b/how-to-use-azureml/automated-machine-learning/forecasting-pipelines/scripts/infer.py index 16b22f66..c213146a 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-pipelines/scripts/infer.py +++ b/how-to-use-azureml/automated-machine-learning/forecasting-pipelines/scripts/infer.py @@ -23,11 +23,7 @@ except ImportError: def infer_forecasting_dataset_tcn( - X_test, - y_test, - model, - output_path, - output_dataset_name="results", + X_test, y_test, model, output_path, output_dataset_name="results" ): y_pred, df_all = model.forecast(X_test, y_test) @@ -71,10 +67,7 @@ def get_model(model_path, model_file_name): def get_args(): parser = argparse.ArgumentParser() parser.add_argument( - "--model_name", - type=str, - dest="model_name", - help="Model to be loaded", + "--model_name", type=str, dest="model_name", help="Model to be loaded" ) parser.add_argument( @@ -108,12 +101,7 @@ def get_args(): return args -def get_data( - run, - fitted_model, - target_column_name, - test_dataset_name, -): +def get_data(run, fitted_model, target_column_name, test_dataset_name): # get input dataset by name test_dataset = Dataset.get_by_name(run.experiment.workspace, test_dataset_name) @@ -159,10 +147,7 @@ if __name__ == "__main__": fitted_model = get_model(model_path, model_file_name) X_test_df, y_test = get_data( - run, - fitted_model, - target_column_name, - test_dataset_name, + run, fitted_model, target_column_name, test_dataset_name ) infer_forecasting_dataset_tcn( diff --git a/how-to-use-azureml/deployment/onnx/onnx-inference-facial-expression-recognition-deploy.ipynb b/how-to-use-azureml/deployment/onnx/onnx-inference-facial-expression-recognition-deploy.ipynb index 3b37c55e..9d9e2590 100644 --- a/how-to-use-azureml/deployment/onnx/onnx-inference-facial-expression-recognition-deploy.ipynb +++ b/how-to-use-azureml/deployment/onnx/onnx-inference-facial-expression-recognition-deploy.ipynb @@ -69,17 +69,19 @@ "# ONNX Model Zoo and save it in the same folder as this tutorial\n", "\n", "import urllib.request\n", + "import os\n", "\n", "onnx_model_url = \"https://github.com/onnx/models/blob/main/vision/body_analysis/emotion_ferplus/model/emotion-ferplus-7.tar.gz?raw=true\"\n", "\n", "urllib.request.urlretrieve(onnx_model_url, filename=\"emotion-ferplus-7.tar.gz\")\n", + "os.mkdir(\"emotion_ferplus\")\n", "\n", "# the ! magic command tells our jupyter notebook kernel to run the following line of \n", "# code from the command line instead of the notebook kernel\n", "\n", "# We use tar and xvcf to unzip the files we just retrieved from the ONNX model zoo\n", "\n", - "!tar xvzf emotion-ferplus-7.tar.gz" + "!tar xvzf emotion-ferplus-7.tar.gz -C emotion_ferplus" ] }, { @@ -130,7 +132,7 @@ "metadata": {}, "outputs": [], "source": [ - "model_dir = \"emotion_ferplus\" # replace this with the location of your model files\n", + "model_dir = \"emotion_ferplus/model\" # replace this with the location of your model files\n", "\n", "# leave as is if it's in the same folder as this notebook" ] @@ -496,13 +498,12 @@ "\n", "# to use parsers to read in our model/data\n", "import json\n", - "import os\n", "\n", "test_inputs = []\n", "test_outputs = []\n", "\n", - "# read in 3 testing images from .pb files\n", - "test_data_size = 3\n", + "# read in 1 testing images from .pb files\n", + "test_data_size = 1\n", "\n", "for num in np.arange(test_data_size):\n", " input_test_data = os.path.join(model_dir, 'test_data_set_{0}'.format(num), 'input_0.pb')\n", @@ -533,7 +534,7 @@ }, "source": [ "### Show some sample images\n", - "We use `matplotlib` to plot 3 test images from the dataset." + "We use `matplotlib` to plot 1 test images from the dataset." ] }, { @@ -547,7 +548,7 @@ "outputs": [], "source": [ "plt.figure(figsize = (20, 20))\n", - "for test_image in np.arange(3):\n", + "for test_image in np.arange(test_data_size):\n", " test_inputs[test_image].reshape(1, 64, 64)\n", " plt.subplot(1, 8, test_image+1)\n", " plt.axhline('')\n", diff --git a/how-to-use-azureml/deployment/onnx/onnx-inference-mnist-deploy.ipynb b/how-to-use-azureml/deployment/onnx/onnx-inference-mnist-deploy.ipynb index 7d481129..67466c82 100644 --- a/how-to-use-azureml/deployment/onnx/onnx-inference-mnist-deploy.ipynb +++ b/how-to-use-azureml/deployment/onnx/onnx-inference-mnist-deploy.ipynb @@ -69,10 +69,12 @@ "# ONNX Model Zoo and save it in the same folder as this tutorial\n", "\n", "import urllib.request\n", + "import os\n", "\n", "onnx_model_url = \"https://github.com/onnx/models/blob/main/vision/classification/mnist/model/mnist-7.tar.gz?raw=true\"\n", "\n", - "urllib.request.urlretrieve(onnx_model_url, filename=\"mnist-7.tar.gz\")" + "urllib.request.urlretrieve(onnx_model_url, filename=\"mnist-7.tar.gz\")\n", + "os.mkdir(\"mnist\")" ] }, { @@ -86,7 +88,7 @@ "\n", "# We use tar and xvcf to unzip the files we just retrieved from the ONNX model zoo\n", "\n", - "!tar xvzf mnist-7.tar.gz" + "!tar xvzf mnist-7.tar.gz -C mnist" ] }, { @@ -137,7 +139,7 @@ "metadata": {}, "outputs": [], "source": [ - "model_dir = \"mnist\" # replace this with the location of your model files\n", + "model_dir = \"mnist/model\" # replace this with the location of your model files\n", "\n", "# leave as is if it's in the same folder as this notebook" ] @@ -447,13 +449,12 @@ "\n", "# to use parsers to read in our model/data\n", "import json\n", - "import os\n", "\n", "test_inputs = []\n", "test_outputs = []\n", "\n", - "# read in 3 testing images from .pb files\n", - "test_data_size = 3\n", + "# read in 1 testing images from .pb files\n", + "test_data_size = 1\n", "\n", "for i in np.arange(test_data_size):\n", " input_test_data = os.path.join(model_dir, 'test_data_set_{0}'.format(i), 'input_0.pb')\n", @@ -486,7 +487,7 @@ }, "source": [ "### Show some sample images\n", - "We use `matplotlib` to plot 3 test images from the dataset." + "We use `matplotlib` to plot 1 test images from the dataset." ] }, { @@ -500,7 +501,7 @@ "outputs": [], "source": [ "plt.figure(figsize = (16, 6))\n", - "for test_image in np.arange(3):\n", + "for test_image in np.arange(test_data_size):\n", " plt.subplot(1, 15, test_image+1)\n", " plt.axhline('')\n", " plt.axvline('')\n", diff --git a/how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/files/docker/Dockerfile b/how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/files/docker/Dockerfile index 2a3c79fb..816df2bf 100644 --- a/how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/files/docker/Dockerfile +++ b/how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/files/docker/Dockerfile @@ -8,8 +8,8 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ rm -rf /var/lib/apt/lists/* && \ rm -rf /usr/share/man/* -RUN conda install -y conda=4.12.0 python=3.7 && conda clean -ay -RUN pip install ray-on-aml==0.1.6 & \ +RUN conda install -y conda=4.13.0 python=3.7 && conda clean -ay +RUN pip install ray-on-aml==0.2.1 & \ pip install --no-cache-dir \ azureml-defaults \ azureml-dataset-runtime[fuse,pandas] \