mirror of
https://github.com/Azure/MachineLearningNotebooks.git
synced 2025-12-19 17:17:04 -05:00
update samples from Release-60 as a part of SDK release
This commit is contained in:
@@ -413,7 +413,7 @@
|
||||
"\n",
|
||||
"In this tutorial, the target is AmlCompute. All files in the script folder are uploaded into the cluster nodes for execution. The data_folder is set to use the dataset.\n",
|
||||
"\n",
|
||||
"First, create the environment that contains: the scikit-learn library, azureml-dataprep required for accessing the dataset, and azureml-defaults which contains the dependencies for logging metrics. The azureml-defaults also contains the dependencies required for deploying the model as a web service later in the part 2 of the tutorial.\n",
|
||||
"First, create the environment that contains: the scikit-learn library, azureml-dataset-runtime required for accessing the dataset, and azureml-defaults which contains the dependencies for logging metrics. The azureml-defaults also contains the dependencies required for deploying the model as a web service later in the part 2 of the tutorial.\n",
|
||||
"\n",
|
||||
"Once the environment is defined, register it with the Workspace to re-use it in part 2 of the tutorial."
|
||||
]
|
||||
@@ -429,7 +429,7 @@
|
||||
"\n",
|
||||
"# to install required packages\n",
|
||||
"env = Environment('tutorial-env')\n",
|
||||
"cd = CondaDependencies.create(pip_packages=['azureml-dataprep[pandas,fuse]>=1.1.14', 'azureml-defaults'], conda_packages = ['scikit-learn==0.22.1'])\n",
|
||||
"cd = CondaDependencies.create(pip_packages=['azureml-dataset-runtime[pandas,fuse]', 'azureml-defaults'], conda_packages = ['scikit-learn==0.22.1'])\n",
|
||||
"\n",
|
||||
"env.python.conda_dependencies = cd\n",
|
||||
"\n",
|
||||
|
||||
@@ -65,7 +65,7 @@
|
||||
"\n",
|
||||
"# to install required packages\n",
|
||||
"env = Environment('tutorial-env')\n",
|
||||
"cd = CondaDependencies.create(pip_packages=['azureml-dataprep[pandas,fuse]>=1.1.14', 'azureml-defaults'], conda_packages = ['scikit-learn==0.22.1'])\n",
|
||||
"cd = CondaDependencies.create(pip_packages=['azureml-dataset-runtime[pandas,fuse]', 'azureml-defaults'], conda_packages = ['scikit-learn==0.22.1'])\n",
|
||||
"\n",
|
||||
"env.python.conda_dependencies = cd\n",
|
||||
"\n",
|
||||
|
||||
@@ -81,8 +81,8 @@
|
||||
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||
"\n",
|
||||
"# to install required packages\n",
|
||||
"env = Environment('tutorial-env')\n",
|
||||
"cd = CondaDependencies.create(pip_packages=['azureml-dataprep[pandas,fuse]>=1.1.14', 'azureml-defaults', 'azure-storage-blob', 'encrypted-inference==0.9'], conda_packages = ['scikit-learn==0.22.1'])\n",
|
||||
"env = Environment('tutorial-encryption-env')\n",
|
||||
"cd = CondaDependencies.create(pip_packages=['azureml-dataset-runtime[pandas,fuse]', 'azureml-defaults', 'azure-storage-blob', 'encrypted-inference==0.9'], conda_packages = ['scikit-learn==0.22.1'])\n",
|
||||
"\n",
|
||||
"env.python.conda_dependencies = cd\n",
|
||||
"\n",
|
||||
@@ -282,7 +282,7 @@
|
||||
"ws = Workspace.from_config()\n",
|
||||
"model = Model(ws, 'sklearn_mnist')\n",
|
||||
"\n",
|
||||
"myenv = Environment.get(workspace=ws, name=\"tutorial-env\")\n",
|
||||
"myenv = Environment.get(workspace=ws, name=\"tutorial-encryption-env\")\n",
|
||||
"inference_config = InferenceConfig(entry_script=\"score.py\", environment=myenv)\n",
|
||||
"\n",
|
||||
"service_name = 'sklearn-mnist-svc-' + str(uuid.uuid4())[:4]\n",
|
||||
|
||||
@@ -124,9 +124,7 @@
|
||||
"\n",
|
||||
"input_images = Dataset.File.from_files((batchscore_blob, \"batchscoring/images/\"))\n",
|
||||
"label_ds = Dataset.File.from_files((batchscore_blob, \"batchscoring/labels/\"))\n",
|
||||
"output_dir = PipelineData(name=\"scores\", \n",
|
||||
" datastore=def_data_store, \n",
|
||||
" output_path_on_compute=\"batchscoring/results\")"
|
||||
"output_dir = PipelineData(name=\"scores\", datastore=def_data_store)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -142,15 +140,13 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"input_images = input_images.register(workspace = ws, name = \"input_images\")\n",
|
||||
"label_ds = label_ds.register(workspace = ws, name = \"label_ds\", create_new_version=True)"
|
||||
"input_images = input_images.register(workspace=ws, name=\"input_images\")\n",
|
||||
"label_ds = label_ds.register(workspace=ws, name=\"label_ds\", create_new_version=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"## Download and register the model"
|
||||
]
|
||||
@@ -277,7 +273,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Before running the pipeline, you create an object that defines the python environment and dependencies needed by your script `batch_scoring.py`. The main dependency required is Tensorflow, but you also install `azureml-defaults` for background processes from the SDK. Create a `RunConfiguration` object using the dependencies, and also specify Docker and Docker-GPU support."
|
||||
"Before running the pipeline, you create an object that defines the python environment and dependencies needed by your script `batch_scoring.py`. The main dependency required is Tensorflow, but you also install `azureml-core` and `azureml-dataset-runtime[fuse]` for background processes from the SDK. Create a `RunConfiguration` object using the dependencies, and also specify Docker and Docker-GPU support."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -291,7 +287,7 @@
|
||||
"from azureml.core.runconfig import DEFAULT_GPU_IMAGE\n",
|
||||
"\n",
|
||||
"cd = CondaDependencies.create(pip_packages=[\"tensorflow-gpu==1.15.2\",\n",
|
||||
" \"azureml-core\", \"azureml-dataprep[fuse]\"])\n",
|
||||
" \"azureml-core\", \"azureml-dataset-runtime[fuse]\"])\n",
|
||||
"\n",
|
||||
"env = Environment(name=\"parallelenv\")\n",
|
||||
"env.python.conda_dependencies=cd\n",
|
||||
@@ -319,6 +315,7 @@
|
||||
" entry_script=\"batch_scoring.py\",\n",
|
||||
" source_directory=\"scripts\",\n",
|
||||
" output_action=\"append_row\",\n",
|
||||
" append_row_file_name=\"parallel_run_step.txt\",\n",
|
||||
" mini_batch_size=\"20\",\n",
|
||||
" error_threshold=1,\n",
|
||||
" compute_target=compute_target,\n",
|
||||
@@ -424,15 +421,15 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"batch_run = next(pipeline_run.get_children())\n",
|
||||
"batch_output = batch_run.get_output_data(\"scores\")\n",
|
||||
"batch_output.download(local_path=\"inception_results\")\n",
|
||||
"\n",
|
||||
"import pandas as pd\n",
|
||||
"for root, dirs, files in os.walk(\"inception_results\"):\n",
|
||||
" for file in files:\n",
|
||||
" if file.endswith(\"parallel_run_step.txt\"):\n",
|
||||
" result_file = os.path.join(root,file)\n",
|
||||
"import tempfile\n",
|
||||
"\n",
|
||||
"batch_run = pipeline_run.find_step_run(batch_score_step.name)[0]\n",
|
||||
"batch_output = batch_run.get_output_data(output_dir.name)\n",
|
||||
"\n",
|
||||
"target_dir = tempfile.mkdtemp()\n",
|
||||
"batch_output.download(local_path=target_dir)\n",
|
||||
"result_file = os.path.join(target_dir, batch_output.path_on_datastore, parallel_run_config.append_row_file_name)\n",
|
||||
"\n",
|
||||
"df = pd.read_csv(result_file, delimiter=\":\", header=None)\n",
|
||||
"df.columns = [\"Filename\", \"Prediction\"]\n",
|
||||
|
||||
Reference in New Issue
Block a user