mirror of
https://github.com/Azure/MachineLearningNotebooks.git
synced 2025-12-20 09:37:04 -05:00
Compare commits
1 Commits
master
...
azureml-sd
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0772c157a8 |
@@ -103,7 +103,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"import azureml.core\n",
|
"import azureml.core\n",
|
||||||
"\n",
|
"\n",
|
||||||
"print(\"This notebook was created using version 1.0.79 of the Azure ML SDK\")\n",
|
"print(\"This notebook was created using version 1.0.81 of the Azure ML SDK\")\n",
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ dependencies:
|
|||||||
- pytorch-transformers==1.0.0
|
- pytorch-transformers==1.0.0
|
||||||
- spacy==2.1.8
|
- spacy==2.1.8
|
||||||
- joblib
|
- joblib
|
||||||
- onnxruntime==0.4.0
|
- onnxruntime==1.0.0
|
||||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||||
|
|
||||||
channels:
|
channels:
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ dependencies:
|
|||||||
- pytorch-transformers==1.0.0
|
- pytorch-transformers==1.0.0
|
||||||
- spacy==2.1.8
|
- spacy==2.1.8
|
||||||
- joblib
|
- joblib
|
||||||
- onnxruntime==0.4.0
|
- onnxruntime==1.0.0
|
||||||
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
|
||||||
|
|
||||||
channels:
|
channels:
|
||||||
|
|||||||
@@ -288,7 +288,7 @@
|
|||||||
"|**blacklist_models** | *List* of *strings* indicating machine learning algorithms for AutoML to avoid in this run. <br><br> Allowed values for **Classification**<br><i>LogisticRegression</i><br><i>SGD</i><br><i>MultinomialNaiveBayes</i><br><i>BernoulliNaiveBayes</i><br><i>SVM</i><br><i>LinearSVM</i><br><i>KNN</i><br><i>DecisionTree</i><br><i>RandomForest</i><br><i>ExtremeRandomTrees</i><br><i>LightGBM</i><br><i>GradientBoosting</i><br><i>TensorFlowDNN</i><br><i>TensorFlowLinearClassifier</i><br><br>Allowed values for **Regression**<br><i>ElasticNet</i><br><i>GradientBoosting</i><br><i>DecisionTree</i><br><i>KNN</i><br><i>LassoLars</i><br><i>SGD</i><br><i>RandomForest</i><br><i>ExtremeRandomTrees</i><br><i>LightGBM</i><br><i>TensorFlowLinearRegressor</i><br><i>TensorFlowDNN</i><br><br>Allowed values for **Forecasting**<br><i>ElasticNet</i><br><i>GradientBoosting</i><br><i>DecisionTree</i><br><i>KNN</i><br><i>LassoLars</i><br><i>SGD</i><br><i>RandomForest</i><br><i>ExtremeRandomTrees</i><br><i>LightGBM</i><br><i>TensorFlowLinearRegressor</i><br><i>TensorFlowDNN</i><br><i>Arima</i><br><i>Prophet</i>|\n",
|
"|**blacklist_models** | *List* of *strings* indicating machine learning algorithms for AutoML to avoid in this run. <br><br> Allowed values for **Classification**<br><i>LogisticRegression</i><br><i>SGD</i><br><i>MultinomialNaiveBayes</i><br><i>BernoulliNaiveBayes</i><br><i>SVM</i><br><i>LinearSVM</i><br><i>KNN</i><br><i>DecisionTree</i><br><i>RandomForest</i><br><i>ExtremeRandomTrees</i><br><i>LightGBM</i><br><i>GradientBoosting</i><br><i>TensorFlowDNN</i><br><i>TensorFlowLinearClassifier</i><br><br>Allowed values for **Regression**<br><i>ElasticNet</i><br><i>GradientBoosting</i><br><i>DecisionTree</i><br><i>KNN</i><br><i>LassoLars</i><br><i>SGD</i><br><i>RandomForest</i><br><i>ExtremeRandomTrees</i><br><i>LightGBM</i><br><i>TensorFlowLinearRegressor</i><br><i>TensorFlowDNN</i><br><br>Allowed values for **Forecasting**<br><i>ElasticNet</i><br><i>GradientBoosting</i><br><i>DecisionTree</i><br><i>KNN</i><br><i>LassoLars</i><br><i>SGD</i><br><i>RandomForest</i><br><i>ExtremeRandomTrees</i><br><i>LightGBM</i><br><i>TensorFlowLinearRegressor</i><br><i>TensorFlowDNN</i><br><i>Arima</i><br><i>Prophet</i>|\n",
|
||||||
"| **whitelist_models** | *List* of *strings* indicating machine learning algorithms for AutoML to use in this run. Same values listed above for **blacklist_models** allowed for **whitelist_models**.|\n",
|
"| **whitelist_models** | *List* of *strings* indicating machine learning algorithms for AutoML to use in this run. Same values listed above for **blacklist_models** allowed for **whitelist_models**.|\n",
|
||||||
"|**experiment_exit_score**| Value indicating the target for *primary_metric*. <br>Once the target is surpassed the run terminates.|\n",
|
"|**experiment_exit_score**| Value indicating the target for *primary_metric*. <br>Once the target is surpassed the run terminates.|\n",
|
||||||
"|**experiment_timeout_minutes**| Maximum amount of time in minutes that all iterations combined can take before the experiment terminates.|\n",
|
"|**experiment_timeout_hours**| Maximum amount of time in hours that all iterations combined can take before the experiment terminates.|\n",
|
||||||
"|**enable_early_stopping**| Flag to enble early termination if the score is not improving in the short term.|\n",
|
"|**enable_early_stopping**| Flag to enble early termination if the score is not improving in the short term.|\n",
|
||||||
"|**featurization**| 'auto' / 'off' Indicator for whether featurization step should be done automatically or not. Note: If the input data is sparse, featurization cannot be turned on.|\n",
|
"|**featurization**| 'auto' / 'off' Indicator for whether featurization step should be done automatically or not. Note: If the input data is sparse, featurization cannot be turned on.|\n",
|
||||||
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
||||||
@@ -306,7 +306,7 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"automl_settings = {\n",
|
"automl_settings = {\n",
|
||||||
" \"experiment_timeout_minutes\" : 20,\n",
|
" \"experiment_timeout_hours\" : 0.3,\n",
|
||||||
" \"enable_early_stopping\" : True,\n",
|
" \"enable_early_stopping\" : True,\n",
|
||||||
" \"iteration_timeout_minutes\": 5,\n",
|
" \"iteration_timeout_minutes\": 5,\n",
|
||||||
" \"max_concurrent_iterations\": 4,\n",
|
" \"max_concurrent_iterations\": 4,\n",
|
||||||
@@ -694,10 +694,10 @@
|
|||||||
"from azureml.core.webservice import AciWebservice\n",
|
"from azureml.core.webservice import AciWebservice\n",
|
||||||
"from azureml.core.webservice import Webservice\n",
|
"from azureml.core.webservice import Webservice\n",
|
||||||
"from azureml.core.model import Model\n",
|
"from azureml.core.model import Model\n",
|
||||||
|
"from azureml.core.environment import Environment\n",
|
||||||
"\n",
|
"\n",
|
||||||
"inference_config = InferenceConfig(runtime = \"python\", \n",
|
"myenv = Environment.from_conda_specification(name=\"myenv\", file_path=conda_env_file_name)\n",
|
||||||
" entry_script = script_file_name,\n",
|
"inference_config = InferenceConfig(entry_script=script_file_name, environment=myenv)\n",
|
||||||
" conda_file = conda_env_file_name)\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"aciconfig = AciWebservice.deploy_configuration(cpu_cores = 1, \n",
|
"aciconfig = AciWebservice.deploy_configuration(cpu_cores = 1, \n",
|
||||||
" memory_gb = 1, \n",
|
" memory_gb = 1, \n",
|
||||||
|
|||||||
@@ -8,6 +8,6 @@ dependencies:
|
|||||||
- azureml-widgets
|
- azureml-widgets
|
||||||
- matplotlib
|
- matplotlib
|
||||||
- pandas_ml
|
- pandas_ml
|
||||||
- onnxruntime==0.4.0
|
- onnxruntime==1.0.0
|
||||||
- azureml-explain-model
|
- azureml-explain-model
|
||||||
- azureml-contrib-interpret
|
- azureml-contrib-interpret
|
||||||
|
|||||||
@@ -213,7 +213,7 @@
|
|||||||
" \"preprocess\": True,\n",
|
" \"preprocess\": True,\n",
|
||||||
" \"enable_early_stopping\": True,\n",
|
" \"enable_early_stopping\": True,\n",
|
||||||
" \"max_concurrent_iterations\": 2, # This is a limit for testing purpose, please increase it as per cluster size\n",
|
" \"max_concurrent_iterations\": 2, # This is a limit for testing purpose, please increase it as per cluster size\n",
|
||||||
" \"experiment_timeout_minutes\": 10, # This is a time limit for testing purposes, remove it for real use cases, this will drastically limit ablity to find the best model possible\n",
|
" \"experiment_timeout_hours\": 0.2, # This is a time limit for testing purposes, remove it for real use cases, this will drastically limit ablity to find the best model possible\n",
|
||||||
" \"verbosity\": logging.INFO,\n",
|
" \"verbosity\": logging.INFO,\n",
|
||||||
"}\n",
|
"}\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -305,7 +305,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"#### Explain model\n",
|
"#### Explain model\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Automated ML models can be explained and visualized using the SDK Explainability library. [Learn how to use the explainer](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/model-explanation-remote-amlcompute/auto-ml-model-explanations-remote-compute.ipynb)."
|
"Automated ML models can be explained and visualized using the SDK Explainability library. "
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -334,17 +334,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"#### Print the properties of the model\n",
|
"#### Print the properties of the model\n",
|
||||||
"The fitted_model is a python object and you can read the different properties of the object.\n",
|
"The fitted_model is a python object and you can read the different properties of the object.\n"
|
||||||
"See *Print the properties of the model* section in [this sample notebook](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/classification/auto-ml-classification.ipynb)."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"### Deploy\n",
|
|
||||||
"\n",
|
|
||||||
"To deploy the model into a web service endpoint, see _Deploy_ section in [this sample notebook](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/classification-with-deployment/auto-ml-classification-with-deployment.ipynb)"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -210,7 +210,24 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"## Data Ingestion Pipeline \n",
|
"## Data Ingestion Pipeline \n",
|
||||||
"For this demo, we will use NOAA weather data from [Azure Open Datasets](https://azure.microsoft.com/services/open-datasets/). You can replace this with your own dataset, or you can skip this pipeline if you already have a time-series based `TabularDataset`.\n",
|
"For this demo, we will use NOAA weather data from [Azure Open Datasets](https://azure.microsoft.com/services/open-datasets/). You can replace this with your own dataset, or you can skip this pipeline if you already have a time-series based `TabularDataset`.\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# The name and target column of the Dataset to create \n",
|
||||||
|
"dataset = \"NOAA-Weather-DS4\"\n",
|
||||||
|
"target_column_name = \"temperature\""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
"\n",
|
"\n",
|
||||||
"### Upload Data Step\n",
|
"### Upload Data Step\n",
|
||||||
"The data ingestion pipeline has a single step with a script to query the latest weather data and upload it to the blob store. During the first run, the script will create and register a time-series based `TabularDataset` with the past one week of weather data. For each subsequent run, the script will create a partition in the blob store by querying NOAA for new weather data since the last modified time of the dataset (`dataset.data_changed_time`) and creating a data.csv file."
|
"The data ingestion pipeline has a single step with a script to query the latest weather data and upload it to the blob store. During the first run, the script will create and register a time-series based `TabularDataset` with the past one week of weather data. For each subsequent run, the script will create a partition in the blob store by querying NOAA for new weather data since the last modified time of the dataset (`dataset.data_changed_time`) and creating a data.csv file."
|
||||||
@@ -225,8 +242,6 @@
|
|||||||
"from azureml.pipeline.core import Pipeline, PipelineParameter\n",
|
"from azureml.pipeline.core import Pipeline, PipelineParameter\n",
|
||||||
"from azureml.pipeline.steps import PythonScriptStep\n",
|
"from azureml.pipeline.steps import PythonScriptStep\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# The name of the Dataset to create \n",
|
|
||||||
"dataset = \"NOAA-Weather-DS4\"\n",
|
|
||||||
"ds_name = PipelineParameter(name=\"ds_name\", default_value=dataset)\n",
|
"ds_name = PipelineParameter(name=\"ds_name\", default_value=dataset)\n",
|
||||||
"upload_data_step = PythonScriptStep(script_name=\"upload_weather_data.py\", \n",
|
"upload_data_step = PythonScriptStep(script_name=\"upload_weather_data.py\", \n",
|
||||||
" allow_reuse=False,\n",
|
" allow_reuse=False,\n",
|
||||||
@@ -272,7 +287,7 @@
|
|||||||
"## Training Pipeline\n",
|
"## Training Pipeline\n",
|
||||||
"### Prepare Training Data Step\n",
|
"### Prepare Training Data Step\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Script to bring data into common X,y format. We need to set allow_reuse flag to False to allow the pipeline to run even when inputs don't change. We also need the name of the model to check the time the model was last trained."
|
"Script to check if new data is available since the model was last trained. If no new data is available, we cancel the remaining pipeline steps. We need to set allow_reuse flag to False to allow the pipeline to run even when inputs don't change. We also need the name of the model to check the time the model was last trained."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -283,11 +298,8 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"from azureml.pipeline.core import PipelineData\n",
|
"from azureml.pipeline.core import PipelineData\n",
|
||||||
"\n",
|
"\n",
|
||||||
"target_column = PipelineParameter(\"target_column\", default_value=\"y\")\n",
|
|
||||||
"# The model name with which to register the trained model in the workspace.\n",
|
"# The model name with which to register the trained model in the workspace.\n",
|
||||||
"model_name = PipelineParameter(\"model_name\", default_value=\"y\")\n",
|
"model_name = PipelineParameter(\"model_name\", default_value=\"noaaweatherds\")"
|
||||||
"output_x = PipelineData(\"output_x\", datastore=dstor)\n",
|
|
||||||
"output_y = PipelineData(\"output_y\", datastore=dstor)"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -299,16 +311,23 @@
|
|||||||
"data_prep_step = PythonScriptStep(script_name=\"check_data.py\", \n",
|
"data_prep_step = PythonScriptStep(script_name=\"check_data.py\", \n",
|
||||||
" allow_reuse=False,\n",
|
" allow_reuse=False,\n",
|
||||||
" name=\"check_data\",\n",
|
" name=\"check_data\",\n",
|
||||||
" arguments=[\"--target_column\", target_column,\n",
|
" arguments=[\"--ds_name\", ds_name,\n",
|
||||||
" \"--output_x\", output_x,\n",
|
|
||||||
" \"--output_y\", output_y,\n",
|
|
||||||
" \"--ds_name\", ds_name,\n",
|
|
||||||
" \"--model_name\", model_name],\n",
|
" \"--model_name\", model_name],\n",
|
||||||
" outputs=[output_x, output_y], \n",
|
|
||||||
" compute_target=compute_target, \n",
|
" compute_target=compute_target, \n",
|
||||||
" runconfig=conda_run_config)"
|
" runconfig=conda_run_config)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.core import Dataset\n",
|
||||||
|
"train_ds = Dataset.get_by_name(ws, dataset)\n",
|
||||||
|
"train_ds = train_ds.drop_columns([\"partition_date\"])"
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@@ -324,11 +343,11 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from azureml.train.automl import AutoMLConfig\n",
|
"from azureml.train.automl import AutoMLConfig\n",
|
||||||
"from azureml.train.automl.runtime import AutoMLStep\n",
|
"from azureml.train.automl import AutoMLStep\n",
|
||||||
"\n",
|
"\n",
|
||||||
"automl_settings = {\n",
|
"automl_settings = {\n",
|
||||||
" \"iteration_timeout_minutes\": 20,\n",
|
" \"iteration_timeout_minutes\": 10,\n",
|
||||||
" \"experiment_timeout_minutes\": 30,\n",
|
" \"experiment_timeout_minutes\": 10,\n",
|
||||||
" \"n_cross_validations\": 3,\n",
|
" \"n_cross_validations\": 3,\n",
|
||||||
" \"primary_metric\": 'r2_score',\n",
|
" \"primary_metric\": 'r2_score',\n",
|
||||||
" \"preprocess\": True,\n",
|
" \"preprocess\": True,\n",
|
||||||
@@ -342,8 +361,8 @@
|
|||||||
" debug_log = 'automl_errors.log',\n",
|
" debug_log = 'automl_errors.log',\n",
|
||||||
" path = \".\",\n",
|
" path = \".\",\n",
|
||||||
" compute_target=compute_target,\n",
|
" compute_target=compute_target,\n",
|
||||||
" run_configuration=conda_run_config,\n",
|
" training_data = train_ds,\n",
|
||||||
" data_script = \"get_data.py\",\n",
|
" label_column_name = target_column_name,\n",
|
||||||
" **automl_settings\n",
|
" **automl_settings\n",
|
||||||
" )"
|
" )"
|
||||||
]
|
]
|
||||||
@@ -378,7 +397,6 @@
|
|||||||
"automl_step = AutoMLStep(\n",
|
"automl_step = AutoMLStep(\n",
|
||||||
" name='automl_module',\n",
|
" name='automl_module',\n",
|
||||||
" automl_config=automl_config,\n",
|
" automl_config=automl_config,\n",
|
||||||
" inputs=[output_x, output_y],\n",
|
|
||||||
" outputs=[metirics_data, model_data],\n",
|
" outputs=[metirics_data, model_data],\n",
|
||||||
" allow_reuse=False)"
|
" allow_reuse=False)"
|
||||||
]
|
]
|
||||||
@@ -432,7 +450,7 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"training_pipeline_run = experiment.submit(training_pipeline, pipeline_parameters={\n",
|
"training_pipeline_run = experiment.submit(training_pipeline, pipeline_parameters={\n",
|
||||||
" \"target_column\": \"temperature\", \"ds_name\": dataset, \"model_name\": \"noaaweatherds\"})"
|
" \"ds_name\": dataset, \"model_name\": \"noaaweatherds\"})"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -475,7 +493,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"from azureml.pipeline.core import Schedule\n",
|
"from azureml.pipeline.core import Schedule\n",
|
||||||
"schedule = Schedule.create(workspace=ws, name=\"RetrainingSchedule\",\n",
|
"schedule = Schedule.create(workspace=ws, name=\"RetrainingSchedule\",\n",
|
||||||
" pipeline_parameters={\"target_column\": \"temperature\",\"ds_name\": dataset, \"model_name\": \"noaaweatherds\"},\n",
|
" pipeline_parameters={\"ds_name\": dataset, \"model_name\": \"noaaweatherds\"},\n",
|
||||||
" pipeline_id=published_pipeline.id, \n",
|
" pipeline_id=published_pipeline.id, \n",
|
||||||
" experiment_name=experiment_name, \n",
|
" experiment_name=experiment_name, \n",
|
||||||
" datastore=dstor,\n",
|
" datastore=dstor,\n",
|
||||||
|
|||||||
@@ -15,32 +15,16 @@ if type(run) == _OfflineRun:
|
|||||||
else:
|
else:
|
||||||
ws = run.experiment.workspace
|
ws = run.experiment.workspace
|
||||||
|
|
||||||
|
print("Check for new data.")
|
||||||
def write_output(df, path):
|
|
||||||
os.makedirs(path, exist_ok=True)
|
|
||||||
print("%s created" % path)
|
|
||||||
df.to_csv(path + "/part-00000", index=False)
|
|
||||||
|
|
||||||
|
|
||||||
print("Check for new data and prepare the data")
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser("split")
|
parser = argparse.ArgumentParser("split")
|
||||||
parser.add_argument("--target_column", type=str, help="input split features")
|
|
||||||
parser.add_argument("--ds_name", help="input dataset name")
|
parser.add_argument("--ds_name", help="input dataset name")
|
||||||
parser.add_argument("--model_name", help="name of the deployed model")
|
parser.add_argument("--model_name", help="name of the deployed model")
|
||||||
parser.add_argument("--output_x", type=str,
|
|
||||||
help="output features")
|
|
||||||
parser.add_argument("--output_y", type=str,
|
|
||||||
help="output labels")
|
|
||||||
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
print("Argument 1(ds_name): %s" % args.ds_name)
|
print("Argument 1(ds_name): %s" % args.ds_name)
|
||||||
print("Argument 2(target_column): %s" % args.target_column)
|
print("Argument 2(model_name): %s" % args.model_name)
|
||||||
print("Argument 3(model_name): %s" % args.model_name)
|
|
||||||
print("Argument 4(output_x): %s" % args.output_x)
|
|
||||||
print("Argument 5(output_y): %s" % args.output_y)
|
|
||||||
|
|
||||||
# Get the latest registered model
|
# Get the latest registered model
|
||||||
try:
|
try:
|
||||||
@@ -54,22 +38,9 @@ except Exception as e:
|
|||||||
train_ds = Dataset.get_by_name(ws, args.ds_name)
|
train_ds = Dataset.get_by_name(ws, args.ds_name)
|
||||||
dataset_changed_time = train_ds.data_changed_time
|
dataset_changed_time = train_ds.data_changed_time
|
||||||
|
|
||||||
if dataset_changed_time > last_train_time:
|
if not dataset_changed_time > last_train_time:
|
||||||
# New data is available since the model was last trained
|
|
||||||
print("Dataset was last updated on {0}. Retraining...".format(dataset_changed_time))
|
|
||||||
train_ds = train_ds.drop_columns(["partition_date"])
|
|
||||||
X_train = train_ds.drop_columns(
|
|
||||||
columns=[args.target_column]).to_pandas_dataframe()
|
|
||||||
y_train = train_ds.keep_columns(
|
|
||||||
columns=[args.target_column]).to_pandas_dataframe()
|
|
||||||
|
|
||||||
non_null = y_train[args.target_column].notnull()
|
|
||||||
y = y_train[non_null]
|
|
||||||
X = X_train[non_null]
|
|
||||||
|
|
||||||
if not (args.output_x is None and args.output_y is None):
|
|
||||||
write_output(X, args.output_x)
|
|
||||||
write_output(y, args.output_y)
|
|
||||||
else:
|
|
||||||
print("Cancelling run since there is no new data.")
|
print("Cancelling run since there is no new data.")
|
||||||
run.parent.cancel()
|
run.parent.cancel()
|
||||||
|
else:
|
||||||
|
# New data is available since the model was last trained
|
||||||
|
print("Dataset was last updated on {0}. Retraining...".format(dataset_changed_time))
|
||||||
|
|||||||
@@ -1,15 +0,0 @@
|
|||||||
import os
|
|
||||||
import pandas as pd
|
|
||||||
|
|
||||||
|
|
||||||
def get_data():
|
|
||||||
print("In get_data")
|
|
||||||
print(os.environ['AZUREML_DATAREFERENCE_output_x'])
|
|
||||||
X_train = pd.read_csv(
|
|
||||||
os.environ['AZUREML_DATAREFERENCE_output_x'] + "/part-00000")
|
|
||||||
y_train = pd.read_csv(
|
|
||||||
os.environ['AZUREML_DATAREFERENCE_output_y'] + "/part-00000")
|
|
||||||
|
|
||||||
print(X_train.head(3))
|
|
||||||
|
|
||||||
return {"X": X_train.values, "y": y_train.values.flatten()}
|
|
||||||
@@ -58,7 +58,7 @@ except Exception as e:
|
|||||||
print(traceback.format_exc())
|
print(traceback.format_exc())
|
||||||
print("Dataset with name {0} not found, registering new dataset.".format(args.ds_name))
|
print("Dataset with name {0} not found, registering new dataset.".format(args.ds_name))
|
||||||
register_dataset = True
|
register_dataset = True
|
||||||
end_time_last_slice = datetime.today() - relativedelta(weeks=1)
|
end_time_last_slice = datetime.today() - relativedelta(weeks=2)
|
||||||
|
|
||||||
end_time = datetime.utcnow()
|
end_time = datetime.utcnow()
|
||||||
train_df = get_noaa_data(end_time_last_slice, end_time)
|
train_df = get_noaa_data(end_time_last_slice, end_time)
|
||||||
@@ -80,10 +80,10 @@ if train_df.size > 0:
|
|||||||
target_path=folder_name,
|
target_path=folder_name,
|
||||||
overwrite=True,
|
overwrite=True,
|
||||||
show_progress=True)
|
show_progress=True)
|
||||||
|
|
||||||
if register_dataset:
|
|
||||||
ds = Dataset.Tabular.from_delimited_files(dstor.path("{}/**/*.csv".format(
|
|
||||||
args.ds_name)), partition_format='/{partition_date:yyyy/MM/dd/hh/mm/ss}/data.csv')
|
|
||||||
ds.register(ws, name=args.ds_name)
|
|
||||||
else:
|
else:
|
||||||
print("No new data since {0}.".format(end_time_last_slice))
|
print("No new data since {0}.".format(end_time_last_slice))
|
||||||
|
|
||||||
|
if register_dataset:
|
||||||
|
ds = Dataset.Tabular.from_delimited_files(dstor.path("{}/**/*.csv".format(
|
||||||
|
args.ds_name)), partition_format='/{partition_date:yyyy/MM/dd/HH/mm/ss}/data.csv')
|
||||||
|
ds.register(ws, name=args.ds_name)
|
||||||
|
|||||||
@@ -30,11 +30,11 @@ def _get_configs(automlconfig: AutoMLConfig,
|
|||||||
groups = _get_groups(data, group_column_names)
|
groups = _get_groups(data, group_column_names)
|
||||||
configs = {}
|
configs = {}
|
||||||
for i, group in groups.iterrows():
|
for i, group in groups.iterrows():
|
||||||
single = data
|
single = data._dataflow
|
||||||
group_name = "#####".join(str(x) for x in group.values)
|
group_name = "#####".join(str(x) for x in group.values)
|
||||||
group_name = valid_chars.sub('', group_name)
|
group_name = valid_chars.sub('', group_name)
|
||||||
for key in group.index:
|
for key in group.index:
|
||||||
single = single._dataflow.filter(data._dataflow[key] == group[key])
|
single = single.filter(data._dataflow[key] == group[key])
|
||||||
t_dataset = TabularDataset._create(single)
|
t_dataset = TabularDataset._create(single)
|
||||||
group_conf = copy.deepcopy(automlconfig)
|
group_conf = copy.deepcopy(automlconfig)
|
||||||
group_conf.user_settings['training_data'] = t_dataset
|
group_conf.user_settings['training_data'] = t_dataset
|
||||||
|
|||||||
@@ -558,7 +558,6 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"# specify CondaDependencies obj\n",
|
"# specify CondaDependencies obj\n",
|
||||||
"conda_run_config.environment.python.conda_dependencies = CondaDependencies.create(\n",
|
"conda_run_config.environment.python.conda_dependencies = CondaDependencies.create(\n",
|
||||||
" conda_packages=['scikit-learn', 'numpy','py-xgboost<=0.80'],\n",
|
|
||||||
" pip_packages=azureml_pip_packages)"
|
" pip_packages=azureml_pip_packages)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -726,8 +725,7 @@
|
|||||||
" \n",
|
" \n",
|
||||||
"\n",
|
"\n",
|
||||||
"# specify CondaDependencies obj\n",
|
"# specify CondaDependencies obj\n",
|
||||||
"myenv = CondaDependencies.create(conda_packages=['scikit-learn', 'pandas', 'numpy', 'py-xgboost<=0.80'],\n",
|
"myenv = CondaDependencies.create(pip_packages=azureml_pip_packages,\n",
|
||||||
" pip_packages=azureml_pip_packages,\n",
|
|
||||||
" pin_sdk_version=True)\n",
|
" pin_sdk_version=True)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"with open(\"myenv.yml\",\"w\") as f:\n",
|
"with open(\"myenv.yml\",\"w\") as f:\n",
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ from azureml.core.experiment import Experiment
|
|||||||
from sklearn.externals import joblib
|
from sklearn.externals import joblib
|
||||||
from azureml.core.dataset import Dataset
|
from azureml.core.dataset import Dataset
|
||||||
from azureml.train.automl.runtime.automl_explain_utilities import AutoMLExplainerSetupClass, \
|
from azureml.train.automl.runtime.automl_explain_utilities import AutoMLExplainerSetupClass, \
|
||||||
automl_setup_model_explanations
|
automl_setup_model_explanations, automl_check_model_if_explainable
|
||||||
from azureml.explain.model.mimic.models.lightgbm_model import LGBMExplainableModel
|
from azureml.explain.model.mimic.models.lightgbm_model import LGBMExplainableModel
|
||||||
from azureml.explain.model.mimic_wrapper import MimicWrapper
|
from azureml.explain.model.mimic_wrapper import MimicWrapper
|
||||||
from automl.client.core.common.constants import MODEL_PATH
|
from automl.client.core.common.constants import MODEL_PATH
|
||||||
@@ -25,6 +25,11 @@ ws = run.experiment.workspace
|
|||||||
experiment = Experiment(ws, '<<experimnet_name>>')
|
experiment = Experiment(ws, '<<experimnet_name>>')
|
||||||
automl_run = Run(experiment=experiment, run_id='<<run_id>>')
|
automl_run = Run(experiment=experiment, run_id='<<run_id>>')
|
||||||
|
|
||||||
|
# Check if this AutoML model is explainable
|
||||||
|
if not automl_check_model_if_explainable(automl_run):
|
||||||
|
raise Exception("Model explanations is currently not supported for " + automl_run.get_properties().get(
|
||||||
|
'run_algorithm'))
|
||||||
|
|
||||||
# Download the best model from the artifact store
|
# Download the best model from the artifact store
|
||||||
automl_run.download_file(name=MODEL_PATH, output_file_path='model.pkl')
|
automl_run.download_file(name=MODEL_PATH, output_file_path='model.pkl')
|
||||||
|
|
||||||
|
|||||||
@@ -180,7 +180,7 @@
|
|||||||
"# just get the published pipeline object that you have the ID for.\n",
|
"# just get the published pipeline object that you have the ID for.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Get all published pipeline objects in the workspace\n",
|
"# Get all published pipeline objects in the workspace\n",
|
||||||
"all_pub_pipelines = PublishedPipeline.get_all(ws)\n",
|
"all_pub_pipelines = PublishedPipeline.list(ws)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# We will iterate through the list of published pipelines and \n",
|
"# We will iterate through the list of published pipelines and \n",
|
||||||
"# use the last ID in the list for Schelue operations: \n",
|
"# use the last ID in the list for Schelue operations: \n",
|
||||||
@@ -244,7 +244,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"schedules = Schedule.get_all(ws, pipeline_id=pub_pipeline_id)\n",
|
"schedules = Schedule.list(ws, pipeline_id=pub_pipeline_id)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# We will iterate through the list of schedules and \n",
|
"# We will iterate through the list of schedules and \n",
|
||||||
"# use the last recurrence schedule in the list for further operations: \n",
|
"# use the last recurrence schedule in the list for further operations: \n",
|
||||||
@@ -272,7 +272,7 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Use active_only=False to get all schedules including disabled schedules\n",
|
"# Use active_only=False to get all schedules including disabled schedules\n",
|
||||||
"schedules = Schedule.get_all(ws, active_only=True) \n",
|
"schedules = Schedule.list(ws, active_only=True) \n",
|
||||||
"print(\"Your workspace has the following schedules set up:\")\n",
|
"print(\"Your workspace has the following schedules set up:\")\n",
|
||||||
"for schedule in schedules:\n",
|
"for schedule in schedules:\n",
|
||||||
" print(\"{} (Published pipeline: {}\".format(schedule.id, schedule.pipeline_id))"
|
" print(\"{} (Published pipeline: {}\".format(schedule.id, schedule.pipeline_id))"
|
||||||
|
|||||||
@@ -230,7 +230,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"endpoint_list = PipelineEndpoint.get_all(workspace=ws, active_only=True)\n",
|
"endpoint_list = PipelineEndpoint.list(workspace=ws, active_only=True)\n",
|
||||||
"endpoint_list"
|
"endpoint_list"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -360,7 +360,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"versions = pipeline_endpoint_by_name.get_all_versions()\n",
|
"versions = pipeline_endpoint_by_name.list_versions()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"for ve in versions:\n",
|
"for ve in versions:\n",
|
||||||
" print(ve.version)\n",
|
" print(ve.version)\n",
|
||||||
@@ -381,7 +381,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"pipelines = pipeline_endpoint_by_name.get_all_pipelines(active_only=True)\n",
|
"pipelines = pipeline_endpoint_by_name.list_pipelines(active_only=True)\n",
|
||||||
"pipelines"
|
"pipelines"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -0,0 +1,436 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Copyright (c) Microsoft Corporation. All rights reserved. \n",
|
||||||
|
"Licensed under the MIT License."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Azure Machine Learning Pipeline with NotebookRunnerStep\n",
|
||||||
|
"This notebook demonstrates the use of `NotebookRunnerStep`. It allows you to run a local notebook as a step in Azure Machine Learning Pipeline."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Introduction\n",
|
||||||
|
"In this example we showcase how you can run another notebook `notebook_runner/training_notebook.ipynb` as a step in Azure Machine Learning Pipeline.\n",
|
||||||
|
"\n",
|
||||||
|
"If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure you have executed the [configuration](https://aka.ms/pl-config) before running this notebook.\n",
|
||||||
|
"\n",
|
||||||
|
"In this notebook you will learn how to:\n",
|
||||||
|
"1. Create an `Experiment` in an existing `Workspace`.\n",
|
||||||
|
"2. Create or Attach existing AmlCompute to a workspace.\n",
|
||||||
|
"3. Configure NotebookRun using `NotebokRunConfig`.\n",
|
||||||
|
"5. Use NotebookRunnerStep.\n",
|
||||||
|
"6. Run the notebook on `AmlCompute` as a pipeline step consuming the output of a python script step.\n",
|
||||||
|
"\n",
|
||||||
|
"Advantages of running your notebook as a step in pipeline:\n",
|
||||||
|
"1. Run your notebook like a python script without converting into .py files, leveraging complete end to end experience of Azure Machine Learning Pipelines.\n",
|
||||||
|
"2. Use pipeline intermediate data to and from the notebook along with other steps in pipeline.\n",
|
||||||
|
"3. Parameterize your notebook with [Pipeline Parameters](./aml-pipelines-publish-and-run-using-rest-endpoint.ipynb).\n",
|
||||||
|
"\n",
|
||||||
|
"Try some more [quick start notebooks](https://github.com/microsoft/recommenders/tree/master/notebooks/00_quick_start) with `NotebookRunnerStep`."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Azure Machine Learning and Pipeline SDK-specific imports"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import os\n",
|
||||||
|
"\n",
|
||||||
|
"import azureml.core\n",
|
||||||
|
"\n",
|
||||||
|
"from azureml.core.compute import AmlCompute, ComputeTarget\n",
|
||||||
|
"from azureml.core.runconfig import RunConfiguration\n",
|
||||||
|
"from azureml.data.data_reference import DataReference\n",
|
||||||
|
"from azureml.pipeline.core import PipelineData\n",
|
||||||
|
"from azureml.core.datastore import Datastore\n",
|
||||||
|
"\n",
|
||||||
|
"from azureml.widgets import RunDetails\n",
|
||||||
|
"\n",
|
||||||
|
"from azureml.core import Workspace, Experiment\n",
|
||||||
|
"from azureml.contrib.notebook import NotebookRunConfig, AzureMLNotebookHandler\n",
|
||||||
|
"\n",
|
||||||
|
"from azureml.pipeline.core import Pipeline\n",
|
||||||
|
"from azureml.pipeline.steps import PythonScriptStep\n",
|
||||||
|
"from azureml.contrib.notebook import NotebookRunnerStep\n",
|
||||||
|
"\n",
|
||||||
|
"# Check core SDK version number\n",
|
||||||
|
"print(\"SDK version:\", azureml.core.VERSION)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Initialize Workspace\n",
|
||||||
|
"\n",
|
||||||
|
"Initialize a [workspace](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.workspace(class%29) object from persisted configuration."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"ws = Workspace.from_config()\n",
|
||||||
|
"print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\\n')\n",
|
||||||
|
"ws.set_default_datastore(\"workspaceblobstore\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Upload data to datastore"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"Datastore.get(ws, \"workspaceblobstore\").upload_files([\"./20news.pkl\"], target_path=\"20newsgroups\", overwrite=True)\n",
|
||||||
|
"print(\"Upload call completed\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Create an Azure ML experiment\n",
|
||||||
|
"Let's create an experiment named \"notebook-step-run-example\" and a folder to holding the notebook and other scripts. The script runs will be recorded under the experiment in Azure.\n",
|
||||||
|
"\n",
|
||||||
|
"The best practice is to use separate folders for scripts and its dependent files for each step and specify that folder as the `source_directory` for the step. This helps reduce the size of the snapshot created for the step (only the specific folder is snapshotted). Since changes in any files in the `source_directory` would trigger a re-upload of the snapshot, this helps keep the reuse of the step when there are no changes in the `source_directory` of the step."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Choose a name for the run history container in the workspace.\n",
|
||||||
|
"experiment_name = 'notebook-step-run-example'\n",
|
||||||
|
"source_directory = 'notebook_runner'\n",
|
||||||
|
"\n",
|
||||||
|
"experiment = Experiment(ws, experiment_name)\n",
|
||||||
|
"experiment"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Create or Attach an AmlCompute cluster\n",
|
||||||
|
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you get the default `AmlCompute` as your training compute resource."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Choose a name for your cluster.\n",
|
||||||
|
"amlcompute_cluster_name = \"cpu-cluster\"\n",
|
||||||
|
"\n",
|
||||||
|
"found = False\n",
|
||||||
|
"# Check if this compute target already exists in the workspace.\n",
|
||||||
|
"cts = ws.compute_targets\n",
|
||||||
|
"if amlcompute_cluster_name in cts and cts[amlcompute_cluster_name].type == 'AmlCompute':\n",
|
||||||
|
" found = True\n",
|
||||||
|
" print('Found existing compute target.')\n",
|
||||||
|
" compute_target = cts[amlcompute_cluster_name]\n",
|
||||||
|
" \n",
|
||||||
|
"if not found:\n",
|
||||||
|
" print('Creating a new compute target...')\n",
|
||||||
|
" provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_D2_V2\", # for GPU, use \"STANDARD_NC6\"\n",
|
||||||
|
" #vm_priority = 'lowpriority', # optional\n",
|
||||||
|
" max_nodes = 4)\n",
|
||||||
|
"\n",
|
||||||
|
" # Create the cluster.\n",
|
||||||
|
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, provisioning_config)\n",
|
||||||
|
" \n",
|
||||||
|
" # Can poll for a minimum number of nodes and for a specific timeout.\n",
|
||||||
|
" # If no min_node_count is provided, it will use the scale settings for the cluster.\n",
|
||||||
|
" compute_target.wait_for_completion(show_output = True, min_node_count = 1, timeout_in_minutes = 10)\n",
|
||||||
|
" \n",
|
||||||
|
" # For a more detailed view of current AmlCompute status, use get_status()."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Create a new RunConfig object"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||||
|
"\n",
|
||||||
|
"conda_run_config = RunConfiguration(framework=\"python\")\n",
|
||||||
|
"\n",
|
||||||
|
"conda_run_config.environment.docker.enabled = True\n",
|
||||||
|
"conda_run_config.environment.docker.base_image = azureml.core.runconfig.DEFAULT_CPU_IMAGE\n",
|
||||||
|
"\n",
|
||||||
|
"cd = CondaDependencies.create(pip_packages=['azureml-sdk'], pin_sdk_version=False)\n",
|
||||||
|
"conda_run_config.environment.python.conda_dependencies = cd\n",
|
||||||
|
"\n",
|
||||||
|
"print('run config is ready')"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Define input and outputs"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"input_data = DataReference(\n",
|
||||||
|
" datastore=Datastore.get(ws, \"workspaceblobstore\"),\n",
|
||||||
|
" data_reference_name=\"blob_test_data\",\n",
|
||||||
|
" path_on_datastore=\"20newsgroups/20news.pkl\")\n",
|
||||||
|
"\n",
|
||||||
|
"output_data = PipelineData(name=\"processed_data\",\n",
|
||||||
|
" datastore=Datastore.get(ws, \"workspaceblobstore\"))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Create notebook run configuration and set parameters values"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"handler = AzureMLNotebookHandler(timeout=600, progress_bar=False, log_output=True)\n",
|
||||||
|
"\n",
|
||||||
|
"cfg = NotebookRunConfig(source_directory=source_directory, notebook=\"training_notebook.ipynb\",\n",
|
||||||
|
" handler = handler,\n",
|
||||||
|
" parameters={\"arg1\": \"Machine Learning\"},\n",
|
||||||
|
" run_config=conda_run_config)\n",
|
||||||
|
"\n",
|
||||||
|
"print(\"Notebook Run Config is created.\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Define PythonScriptStep"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"print('Source directory for the step is {}.'.format(os.path.realpath('./train')))\n",
|
||||||
|
"python_script_step = PythonScriptStep(\n",
|
||||||
|
" script_name=\"train.py\",\n",
|
||||||
|
" arguments=[\"--input_data\", input_data],\n",
|
||||||
|
" inputs=[input_data],\n",
|
||||||
|
" outputs=[output_data],\n",
|
||||||
|
" compute_target=compute_target, \n",
|
||||||
|
" source_directory=\"./train\",\n",
|
||||||
|
" allow_reuse=True)\n",
|
||||||
|
"print(\"python_script_step created\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Define NotebookRunnerStep\n",
|
||||||
|
"\n",
|
||||||
|
"This step will consume intermediate output produced by `python_script_step` as an input.\n",
|
||||||
|
"\n",
|
||||||
|
"Optionally, a output of type `output_notebook_pipeline_data_name` can be added to the `NotebookRunnerStep` to redirect the `output_notebook` of notebook run to `NotebookRunnerStep`'s step output produced as `PipelineData` and can be further passed along the pipeline."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from azureml.pipeline.core import PipelineParameter, TrainingOutput\n",
|
||||||
|
"\n",
|
||||||
|
"output_from_notebook = PipelineData(name=\"notebook_processed_data\",\n",
|
||||||
|
" datastore=Datastore.get(ws, \"workspaceblobstore\"))\n",
|
||||||
|
"\n",
|
||||||
|
"my_pipeline_param = PipelineParameter(name=\"pipeline_param\", default_value=\"my_param\")\n",
|
||||||
|
"\n",
|
||||||
|
"print('Source directory for the step is {}.'.format(os.path.realpath(source_directory)))\n",
|
||||||
|
"notebook_runner_step = NotebookRunnerStep(name=\"training_notebook_step\",\n",
|
||||||
|
" notebook_run_config=cfg,\n",
|
||||||
|
" params={\"my_pipeline_param\": my_pipeline_param},\n",
|
||||||
|
" inputs=[output_data],\n",
|
||||||
|
" outputs=[output_from_notebook],\n",
|
||||||
|
" allow_reuse=True,\n",
|
||||||
|
" compute_target=compute_target,\n",
|
||||||
|
" output_notebook_pipeline_data_name=\"notebook_result\")\n",
|
||||||
|
"\n",
|
||||||
|
"print(\"Notebook Runner Step is Created.\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Build Pipeline\n",
|
||||||
|
"\n",
|
||||||
|
"Once we have the steps (or steps collection), we can build the [pipeline](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-core/azureml.pipeline.core.pipeline.pipeline?view=azure-ml-py). By deafult, all these steps will run in **parallel** once we submit the pipeline for run.\n",
|
||||||
|
"\n",
|
||||||
|
"A pipeline is created with a list of steps and a workspace. Submit a pipeline using [submit](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.experiment(class)?view=azure-ml-py#submit-config--tags-none----kwargs-). When submit is called, a [PipelineRun](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-core/azureml.pipeline.core.pipelinerun?view=azure-ml-py) is created which in turn creates [StepRun](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-core/azureml.pipeline.core.steprun?view=azure-ml-py) objects for each step in the workflow."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"pipeline1 = Pipeline(workspace=ws, steps=[notebook_runner_step])\n",
|
||||||
|
"\n",
|
||||||
|
"pipeline1.validate()\n",
|
||||||
|
"print(\"Pipeline validation complete\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"pipeline_run1 = experiment.submit(pipeline1)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"RunDetails(pipeline_run1).show()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Download output notebook\n",
|
||||||
|
"\n",
|
||||||
|
"`output_notebook` can be retrieved via pipeline step output if `output_notebook_pipeline_data_name` is provided to the `NotebookRunnerStep`"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"pipeline_run1.wait_for_completion()\n",
|
||||||
|
" Retrieve the step runs by name `train.py`\n",
|
||||||
|
"train_step = pipeline_run1.find_step_run('training_notebook_step')\n",
|
||||||
|
"\n",
|
||||||
|
"if train_step:\n",
|
||||||
|
" train_step_obj = train_step[0] # since we have only one step by name `training_notebook_step`\n",
|
||||||
|
" train_step_obj.get_output_data('notebook_result').download(source_directory) # download the output to source_directory"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"authors": [
|
||||||
|
{
|
||||||
|
"name": "sanpil"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"category": "tutorial",
|
||||||
|
"compute": [
|
||||||
|
"AML Compute"
|
||||||
|
],
|
||||||
|
"datasets": [
|
||||||
|
"Custom"
|
||||||
|
],
|
||||||
|
"deployment": [
|
||||||
|
"None"
|
||||||
|
],
|
||||||
|
"exclude_from_index": false,
|
||||||
|
"framework": [
|
||||||
|
"Azure ML"
|
||||||
|
],
|
||||||
|
"friendly_name": "How to use run a notebook as a step in AML Pipelines",
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3.6",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python36"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.7.3"
|
||||||
|
},
|
||||||
|
"order_index": 12,
|
||||||
|
"star_tag": [
|
||||||
|
"None"
|
||||||
|
],
|
||||||
|
"tags": [
|
||||||
|
"None"
|
||||||
|
],
|
||||||
|
"task": "Demonstrates the use of NotebookRunnerStep"
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 2
|
||||||
|
}
|
||||||
@@ -0,0 +1,6 @@
|
|||||||
|
name: aml-pipelines-with-notebook-runner-step
|
||||||
|
dependencies:
|
||||||
|
- pip:
|
||||||
|
- azureml-sdk
|
||||||
|
- azureml-widgets
|
||||||
|
- azureml-contrib-notebook
|
||||||
@@ -0,0 +1,106 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Copyright (c) Microsoft Corporation. All rights reserved. \n",
|
||||||
|
"Licensed under the MIT License."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import os"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"print(\"In training_notebook.ipynb\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"tags": [
|
||||||
|
"parameters"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# declaring parameters to override\n",
|
||||||
|
"\n",
|
||||||
|
"arg1 = \"Azure\"\n",
|
||||||
|
"processed_data = None\n",
|
||||||
|
"notebook_processed_data = None\n",
|
||||||
|
"my_pipeline_param = None"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Final parameter values\n",
|
||||||
|
"\n",
|
||||||
|
"print(\"arg1: %s\" % arg1)\n",
|
||||||
|
"print(\"input from previous step: %s\" % processed_data)\n",
|
||||||
|
"print(\"output from notebook: %s\" % notebook_processed_data)\n",
|
||||||
|
"print(\"pipeline_parameter: %s\" % my_pipeline_param)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"if not (notebook_processed_data is None):\n",
|
||||||
|
" os.makedirs(notebook_processed_data, exist_ok=True)\n",
|
||||||
|
" print(\"%s created\" % notebook_processed_data)"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"authors": [
|
||||||
|
{
|
||||||
|
"name": "sanpil"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3.6",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python36"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.6.7"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 2
|
||||||
|
}
|
||||||
@@ -561,10 +561,11 @@
|
|||||||
"from azureml.core.model import InferenceConfig\n",
|
"from azureml.core.model import InferenceConfig\n",
|
||||||
"from azureml.core.webservice import Webservice\n",
|
"from azureml.core.webservice import Webservice\n",
|
||||||
"from azureml.core.model import Model\n",
|
"from azureml.core.model import Model\n",
|
||||||
|
"from azureml.core.environment import Environment\n",
|
||||||
"\n",
|
"\n",
|
||||||
"inference_config = InferenceConfig(runtime= \"python\", \n",
|
"\n",
|
||||||
" entry_script=\"pytorch_score.py\",\n",
|
"myenv = Environment.from_conda_specification(name=\"myenv\", file_path=\"myenv.yml\")\n",
|
||||||
" conda_file=\"myenv.yml\")\n",
|
"inference_config = InferenceConfig(entry_script=\"pytorch_score.py\", environment=myenv)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"aciconfig = AciWebservice.deploy_configuration(cpu_cores=1, \n",
|
"aciconfig = AciWebservice.deploy_configuration(cpu_cores=1, \n",
|
||||||
" memory_gb=1, \n",
|
" memory_gb=1, \n",
|
||||||
|
|||||||
@@ -908,13 +908,16 @@
|
|||||||
"def init():\n",
|
"def init():\n",
|
||||||
" global X, output, sess\n",
|
" global X, output, sess\n",
|
||||||
" tf.reset_default_graph()\n",
|
" tf.reset_default_graph()\n",
|
||||||
" model_root = Model.get_model_path('tf-dnn-mnist')\n",
|
" model_root = os.getenv('AZUREML_MODEL_DIR')\n",
|
||||||
" saver = tf.train.import_meta_graph(os.path.join(model_root, 'mnist-tf.model.meta'))\n",
|
" # the name of the folder in which to look for tensorflow model files\n",
|
||||||
|
" tf_model_folder = 'model'\n",
|
||||||
|
" saver = tf.train.import_meta_graph(\n",
|
||||||
|
" os.path.join(model_root, tf_model_folder, 'mnist-tf.model.meta'))\n",
|
||||||
" X = tf.get_default_graph().get_tensor_by_name(\"network/X:0\")\n",
|
" X = tf.get_default_graph().get_tensor_by_name(\"network/X:0\")\n",
|
||||||
" output = tf.get_default_graph().get_tensor_by_name(\"network/output/MatMul:0\")\n",
|
" output = tf.get_default_graph().get_tensor_by_name(\"network/output/MatMul:0\")\n",
|
||||||
" \n",
|
"\n",
|
||||||
" sess = tf.Session()\n",
|
" sess = tf.Session()\n",
|
||||||
" saver.restore(sess, os.path.join(model_root, 'mnist-tf.model'))\n",
|
" saver.restore(sess, os.path.join(model_root, tf_model_folder, 'mnist-tf.model'))\n",
|
||||||
"\n",
|
"\n",
|
||||||
"def run(raw_data):\n",
|
"def run(raw_data):\n",
|
||||||
" data = np.array(json.loads(raw_data)['data'])\n",
|
" data = np.array(json.loads(raw_data)['data'])\n",
|
||||||
@@ -943,6 +946,7 @@
|
|||||||
"cd = CondaDependencies.create()\n",
|
"cd = CondaDependencies.create()\n",
|
||||||
"cd.add_conda_package('numpy')\n",
|
"cd.add_conda_package('numpy')\n",
|
||||||
"cd.add_tensorflow_conda_package()\n",
|
"cd.add_tensorflow_conda_package()\n",
|
||||||
|
"cd.add_pip_package(\"azureml-defaults\")\n",
|
||||||
"cd.save_to_file(base_directory='./', conda_file_path='myenv.yml')\n",
|
"cd.save_to_file(base_directory='./', conda_file_path='myenv.yml')\n",
|
||||||
"\n",
|
"\n",
|
||||||
"print(cd.serialize_to_string())"
|
"print(cd.serialize_to_string())"
|
||||||
@@ -966,10 +970,11 @@
|
|||||||
"from azureml.core.model import InferenceConfig\n",
|
"from azureml.core.model import InferenceConfig\n",
|
||||||
"from azureml.core.webservice import Webservice\n",
|
"from azureml.core.webservice import Webservice\n",
|
||||||
"from azureml.core.model import Model\n",
|
"from azureml.core.model import Model\n",
|
||||||
|
"from azureml.core.environment import Environment\n",
|
||||||
"\n",
|
"\n",
|
||||||
"inference_config = InferenceConfig(runtime= \"python\", \n",
|
"\n",
|
||||||
" entry_script=\"score.py\",\n",
|
"myenv = Environment.from_conda_specification(name=\"myenv\", file_path=\"myenv.yml\")\n",
|
||||||
" conda_file=\"myenv.yml\")\n",
|
"inference_config = InferenceConfig(entry_script=\"score.py\", environment=myenv)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"aciconfig = AciWebservice.deploy_configuration(cpu_cores=1, \n",
|
"aciconfig = AciWebservice.deploy_configuration(cpu_cores=1, \n",
|
||||||
" memory_gb=1, \n",
|
" memory_gb=1, \n",
|
||||||
|
|||||||
346
how-to-use-azureml/monitor-models/data-drift/dataset/testing.csv
Normal file
346
how-to-use-azureml/monitor-models/data-drift/dataset/testing.csv
Normal file
@@ -0,0 +1,346 @@
|
|||||||
|
latitude,longitude,temperature,windAngle,windSpeed,elevation
|
||||||
|
26.536,-81.755,17.8,10.0,2.1,9.0
|
||||||
|
26.536,-81.755,16.7,360.0,1.5,9.0
|
||||||
|
26.536,-81.755,16.1,350.0,1.5,9.0
|
||||||
|
26.536,-81.755,15.0,0.0,0.0,9.0
|
||||||
|
26.536,-81.755,14.4,350.0,1.5,9.0
|
||||||
|
26.536,-81.755,0.0,0.0,0.0,9.0
|
||||||
|
26.536,-81.755,13.9,360.0,2.1,9.0
|
||||||
|
26.536,-81.755,13.3,350.0,1.5,9.0
|
||||||
|
26.536,-81.755,13.3,10.0,2.1,9.0
|
||||||
|
26.536,-81.755,13.3,360.0,1.5,9.0
|
||||||
|
26.536,-81.755,13.3,0.0,0.0,9.0
|
||||||
|
26.536,-81.755,12.2,0.0,0.0,9.0
|
||||||
|
26.536,-81.755,11.7,0.0,0.0,9.0
|
||||||
|
26.536,-81.755,14.4,0.0,0.0,9.0
|
||||||
|
26.536,-81.755,17.2,10.0,2.6,9.0
|
||||||
|
26.536,-81.755,20.0,20.0,2.6,9.0
|
||||||
|
26.536,-81.755,22.2,10.0,3.6,9.0
|
||||||
|
26.536,-81.755,23.3,30.0,4.6,9.0
|
||||||
|
26.536,-81.755,23.3,330.0,2.6,9.0
|
||||||
|
26.536,-81.755,24.4,0.0,0.0,9.0
|
||||||
|
26.536,-81.755,25.0,360.0,3.1,9.0
|
||||||
|
26.536,-81.755,24.4,20.0,4.1,9.0
|
||||||
|
26.536,-81.755,23.3,10.0,2.6,9.0
|
||||||
|
26.536,-81.755,21.1,30.0,2.1,9.0
|
||||||
|
26.536,-81.755,18.3,0.0,0.0,9.0
|
||||||
|
26.536,-81.755,17.2,30.0,2.1,9.0
|
||||||
|
26.536,-81.755,15.6,60.0,2.6,9.0
|
||||||
|
26.536,-81.755,15.6,0.0,0.0,9.0
|
||||||
|
26.536,-81.755,13.9,60.0,2.6,9.0
|
||||||
|
26.536,-81.755,12.8,70.0,2.6,9.0
|
||||||
|
26.536,-81.755,0.0,0.0,0.0,9.0
|
||||||
|
26.536,-81.755,11.7,70.0,2.1,9.0
|
||||||
|
26.536,-81.755,12.2,20.0,2.1,9.0
|
||||||
|
26.536,-81.755,11.7,30.0,1.5,9.0
|
||||||
|
26.536,-81.755,11.1,40.0,2.1,9.0
|
||||||
|
26.536,-81.755,12.2,40.0,2.6,9.0
|
||||||
|
26.536,-81.755,12.2,30.0,2.6,9.0
|
||||||
|
26.536,-81.755,12.2,0.0,0.0,9.0
|
||||||
|
26.536,-81.755,15.0,30.0,6.2,9.0
|
||||||
|
26.536,-81.755,17.2,50.0,3.6,9.0
|
||||||
|
26.536,-81.755,20.6,60.0,5.1,9.0
|
||||||
|
26.536,-81.755,22.8,50.0,4.6,9.0
|
||||||
|
26.536,-81.755,24.4,80.0,6.2,9.0
|
||||||
|
26.536,-81.755,25.0,100.0,5.7,9.0
|
||||||
|
26.536,-81.755,25.6,60.0,3.1,9.0
|
||||||
|
26.536,-81.755,25.6,80.0,4.6,9.0
|
||||||
|
26.536,-81.755,25.0,90.0,5.1,9.0
|
||||||
|
26.536,-81.755,24.4,80.0,5.1,9.0
|
||||||
|
26.536,-81.755,21.1,60.0,2.6,9.0
|
||||||
|
26.536,-81.755,19.4,70.0,3.6,9.0
|
||||||
|
26.536,-81.755,18.3,70.0,2.6,9.0
|
||||||
|
26.536,-81.755,18.3,80.0,2.6,9.0
|
||||||
|
26.536,-81.755,17.2,60.0,1.5,9.0
|
||||||
|
26.536,-81.755,16.1,70.0,2.6,9.0
|
||||||
|
26.536,-81.755,15.6,70.0,2.6,9.0
|
||||||
|
26.536,-81.755,0.0,0.0,0.0,9.0
|
||||||
|
26.536,-81.755,16.1,50.0,2.6,9.0
|
||||||
|
26.536,-81.755,15.6,50.0,2.1,9.0
|
||||||
|
26.536,-81.755,15.0,50.0,1.5,9.0
|
||||||
|
26.536,-81.755,15.0,0.0,0.0,9.0
|
||||||
|
26.536,-81.755,15.0,0.0,0.0,9.0
|
||||||
|
26.536,-81.755,14.4,0.0,0.0,9.0
|
||||||
|
26.536,-81.755,14.4,30.0,4.1,9.0
|
||||||
|
26.536,-81.755,16.1,40.0,1.5,9.0
|
||||||
|
26.536,-81.755,19.4,0.0,1.5,9.0
|
||||||
|
26.536,-81.755,22.8,90.0,2.6,9.0
|
||||||
|
26.536,-81.755,24.4,130.0,3.6,9.0
|
||||||
|
26.536,-81.755,25.6,100.0,4.6,9.0
|
||||||
|
26.536,-81.755,26.1,120.0,3.1,9.0
|
||||||
|
26.536,-81.755,26.7,0.0,2.6,9.0
|
||||||
|
26.536,-81.755,27.2,0.0,0.0,9.0
|
||||||
|
26.536,-81.755,27.2,40.0,3.1,9.0
|
||||||
|
26.536,-81.755,26.1,30.0,1.5,9.0
|
||||||
|
26.536,-81.755,22.8,310.0,2.1,9.0
|
||||||
|
26.536,-81.755,23.3,330.0,2.1,9.0
|
||||||
|
-34.067,-56.238,17.5,30.0,3.1,68.0
|
||||||
|
-34.067,-56.238,21.2,30.0,5.7,68.0
|
||||||
|
-34.067,-56.238,24.5,30.0,3.1,68.0
|
||||||
|
-34.067,-56.238,27.5,330.0,3.6,68.0
|
||||||
|
-34.067,-56.238,29.2,30.0,4.1,68.0
|
||||||
|
-34.067,-56.238,31.0,20.0,4.6,68.0
|
||||||
|
-34.067,-56.238,33.0,360.0,2.6,68.0
|
||||||
|
-34.067,-56.238,33.6,60.0,3.1,68.0
|
||||||
|
-34.067,-56.238,33.6,30.0,3.6,68.0
|
||||||
|
-34.067,-56.238,18.6,40.0,3.1,68.0
|
||||||
|
-34.067,-56.238,22.0,120.0,1.5,68.0
|
||||||
|
-34.067,-56.238,25.0,120.0,2.6,68.0
|
||||||
|
-34.067,-56.238,28.6,50.0,3.1,68.0
|
||||||
|
-34.067,-56.238,30.6,50.0,4.1,68.0
|
||||||
|
-34.067,-56.238,31.5,30.0,6.7,68.0
|
||||||
|
-34.067,-56.238,32.0,40.0,7.2,68.0
|
||||||
|
-34.067,-56.238,33.0,30.0,5.7,68.0
|
||||||
|
-34.067,-56.238,33.2,360.0,3.6,68.0
|
||||||
|
-34.067,-56.238,20.6,30.0,3.1,68.0
|
||||||
|
-34.067,-56.238,21.2,0.0,0.0,68.0
|
||||||
|
-34.067,-56.238,22.0,210.0,3.1,68.0
|
||||||
|
-34.067,-56.238,23.0,210.0,3.6,68.0
|
||||||
|
-34.067,-56.238,24.0,180.0,6.7,68.0
|
||||||
|
-34.067,-56.238,24.5,210.0,7.2,68.0
|
||||||
|
-34.067,-56.238,21.0,180.0,8.2,68.0
|
||||||
|
-34.067,-56.238,20.0,180.0,6.7,68.0
|
||||||
|
-34.083,-56.233,20.2,180.0,7.2,68.0
|
||||||
|
-29.917,-71.2,16.6,290.0,4.1,146.0
|
||||||
|
-29.916,-71.2,17.0,290.0,4.1,147.0
|
||||||
|
-29.916,-71.2,16.0,310.0,3.1,147.0
|
||||||
|
-29.916,-71.2,16.0,300.0,2.1,147.0
|
||||||
|
-29.917,-71.2,15.1,0.0,0.0,146.0
|
||||||
|
-29.916,-71.2,15.0,0.0,1.0,147.0
|
||||||
|
-29.916,-71.2,15.0,160.0,1.0,147.0
|
||||||
|
-29.916,-71.2,15.0,120.0,1.0,147.0
|
||||||
|
-29.917,-71.2,14.3,190.0,1.0,146.0
|
||||||
|
-29.916,-71.2,14.0,190.0,1.0,147.0
|
||||||
|
-29.916,-71.2,14.0,0.0,0.0,147.0
|
||||||
|
-29.916,-71.2,14.0,100.0,3.1,147.0
|
||||||
|
-29.917,-71.2,12.9,0.0,0.0,146.0
|
||||||
|
-29.916,-71.2,13.0,0.0,1.0,147.0
|
||||||
|
-29.916,-71.2,14.0,0.0,0.5,147.0
|
||||||
|
-29.916,-71.2,15.0,0.0,0.5,147.0
|
||||||
|
-29.917,-71.2,15.9,0.0,0.0,146.0
|
||||||
|
-29.916,-71.2,16.0,0.0,0.0,147.0
|
||||||
|
-29.916,-71.2,17.0,270.0,4.6,147.0
|
||||||
|
-29.916,-71.2,19.0,260.0,4.1,147.0
|
||||||
|
-29.917,-71.2,18.1,270.0,6.2,146.0
|
||||||
|
-29.916,-71.2,18.0,270.0,6.2,147.0
|
||||||
|
-29.916,-71.2,19.0,270.0,6.2,147.0
|
||||||
|
-29.916,-71.2,20.0,260.0,5.1,147.0
|
||||||
|
-29.917,-71.2,19.6,280.0,6.2,146.0
|
||||||
|
-29.916,-71.2,20.0,280.0,6.2,147.0
|
||||||
|
-29.916,-71.2,20.0,270.0,6.2,147.0
|
||||||
|
-29.916,-71.2,19.0,280.0,6.7,147.0
|
||||||
|
-29.917,-71.2,18.3,270.0,5.7,146.0
|
||||||
|
-29.916,-71.2,18.0,270.0,5.7,147.0
|
||||||
|
-29.916,-71.2,18.0,0.0,0.0,147.0
|
||||||
|
-29.916,-71.2,17.0,280.0,4.6,147.0
|
||||||
|
-29.917,-71.2,15.9,280.0,4.1,146.0
|
||||||
|
-29.916,-71.2,16.0,280.0,4.1,147.0
|
||||||
|
-29.916,-71.2,15.0,280.0,3.6,147.0
|
||||||
|
-29.916,-71.2,15.0,280.0,3.6,147.0
|
||||||
|
-29.917,-71.2,15.4,280.0,4.1,146.0
|
||||||
|
-29.916,-71.2,15.0,280.0,4.1,147.0
|
||||||
|
-29.916,-71.2,16.0,240.0,2.1,147.0
|
||||||
|
-29.916,-71.2,15.0,0.0,0.5,147.0
|
||||||
|
-29.917,-71.2,15.8,80.0,3.6,146.0
|
||||||
|
-29.916,-71.2,16.0,80.0,3.6,147.0
|
||||||
|
-29.916,-71.2,16.0,10.0,1.5,147.0
|
||||||
|
-29.916,-71.2,16.0,100.0,1.5,147.0
|
||||||
|
-29.917,-71.2,15.3,130.0,1.5,146.0
|
||||||
|
-29.916,-71.2,15.0,130.0,1.5,147.0
|
||||||
|
-29.916,-71.2,15.0,110.0,1.0,147.0
|
||||||
|
-29.916,-71.2,16.0,280.0,6.2,147.0
|
||||||
|
-29.917,-71.2,15.9,240.0,3.6,146.0
|
||||||
|
-29.916,-71.2,16.0,240.0,3.6,147.0
|
||||||
|
-29.916,-71.2,16.0,240.0,3.1,147.0
|
||||||
|
-29.916,-71.2,16.0,220.0,3.1,147.0
|
||||||
|
-29.917,-71.2,16.4,260.0,3.1,146.0
|
||||||
|
-29.916,-71.2,16.0,260.0,3.1,147.0
|
||||||
|
-29.916,-71.2,17.0,230.0,2.6,147.0
|
||||||
|
-29.916,-71.2,18.0,0.0,1.5,147.0
|
||||||
|
-29.917,-71.2,20.3,340.0,2.6,146.0
|
||||||
|
-29.916,-71.2,20.0,340.0,2.6,147.0
|
||||||
|
-29.916,-71.2,21.0,270.0,5.1,147.0
|
||||||
|
-29.916,-71.2,20.0,270.0,6.7,147.0
|
||||||
|
-29.917,-71.2,19.2,280.0,6.7,146.0
|
||||||
|
-29.916,-71.2,19.0,280.0,6.7,147.0
|
||||||
|
-29.916,-71.2,19.0,310.0,2.6,147.0
|
||||||
|
-29.916,-71.2,18.0,270.0,5.1,147.0
|
||||||
|
-29.917,-71.2,17.0,300.0,4.6,146.0
|
||||||
|
-29.916,-71.2,17.0,300.0,4.6,147.0
|
||||||
|
-29.916,-71.2,17.0,300.0,3.6,147.0
|
||||||
|
-29.916,-71.2,17.0,290.0,3.1,147.0
|
||||||
|
-29.917,-71.2,16.3,290.0,2.1,146.0
|
||||||
|
-29.916,-71.2,16.0,290.0,2.1,147.0
|
||||||
|
-29.916,-71.2,17.0,270.0,1.0,147.0
|
||||||
|
-29.916,-71.2,17.0,0.0,0.5,147.0
|
||||||
|
-29.917,-71.2,16.5,160.0,2.1,146.0
|
||||||
|
-29.916,-71.2,17.0,160.0,2.1,147.0
|
||||||
|
-29.916,-71.2,15.0,120.0,3.1,147.0
|
||||||
|
-29.916,-71.2,16.0,180.0,1.5,147.0
|
||||||
|
-29.917,-71.2,14.7,0.0,0.0,146.0
|
||||||
|
-29.916,-71.2,15.0,0.0,1.0,147.0
|
||||||
|
-29.916,-71.2,15.0,300.0,1.0,147.0
|
||||||
|
-29.916,-71.2,16.0,0.0,0.0,147.0
|
||||||
|
-29.917,-71.2,18.5,110.0,1.0,146.0
|
||||||
|
-29.916,-71.2,19.0,110.0,1.0,147.0
|
||||||
|
-29.916,-71.2,20.0,270.0,3.6,147.0
|
||||||
|
-29.916,-71.2,20.0,270.0,5.7,147.0
|
||||||
|
-29.917,-71.2,20.0,280.0,6.2,146.0
|
||||||
|
-29.916,-71.2,20.0,280.0,6.2,147.0
|
||||||
|
-29.916,-71.2,21.0,290.0,6.7,147.0
|
||||||
|
-29.916,-71.2,20.0,270.0,6.2,147.0
|
||||||
|
-29.917,-71.2,21.0,260.0,6.7,146.0
|
||||||
|
-29.916,-71.2,21.0,260.0,6.7,147.0
|
||||||
|
-29.916,-71.2,20.0,270.0,6.2,147.0
|
||||||
|
-29.916,-71.2,19.0,260.0,5.1,147.0
|
||||||
|
-29.916,-71.2,18.0,280.0,4.6,147.0
|
||||||
|
-29.917,-71.2,17.5,280.0,3.1,146.0
|
||||||
|
-29.916,-71.2,18.0,280.0,3.1,147.0
|
||||||
|
30.349,-85.788,11.1,0.0,0.0,21.0
|
||||||
|
30.349,-85.788,11.1,0.0,0.0,21.0
|
||||||
|
30.349,-85.788,9.4,0.0,0.0,21.0
|
||||||
|
30.349,-85.788,9.4,0.0,0.0,21.0
|
||||||
|
30.349,-85.788,8.3,300.0,2.1,21.0
|
||||||
|
30.349,-85.788,11.1,280.0,1.5,21.0
|
||||||
|
30.349,-85.788,0.0,0.0,0.0,21.0
|
||||||
|
30.349,-85.788,10.6,320.0,3.1,21.0
|
||||||
|
30.349,-85.788,9.4,310.0,3.1,21.0
|
||||||
|
30.349,-85.788,7.8,320.0,2.6,21.0
|
||||||
|
30.349,-85.788,6.1,340.0,2.1,21.0
|
||||||
|
30.349,-85.788,6.7,330.0,2.6,21.0
|
||||||
|
30.349,-85.788,6.1,310.0,1.5,21.0
|
||||||
|
30.349,-85.788,7.2,310.0,2.1,21.0
|
||||||
|
30.349,-85.788,12.8,360.0,3.1,21.0
|
||||||
|
30.349,-85.788,15.0,0.0,3.1,21.0
|
||||||
|
30.349,-85.788,16.7,20.0,4.6,21.0
|
||||||
|
30.349,-85.788,18.9,30.0,5.1,21.0
|
||||||
|
30.349,-85.788,19.4,10.0,4.1,21.0
|
||||||
|
30.349,-85.788,21.1,330.0,2.6,21.0
|
||||||
|
30.349,-85.788,21.1,10.0,4.6,21.0
|
||||||
|
30.349,-85.788,21.7,360.0,4.1,21.0
|
||||||
|
30.349,-85.788,21.7,30.0,2.1,21.0
|
||||||
|
30.349,-85.788,21.7,330.0,2.6,21.0
|
||||||
|
30.349,-85.788,16.1,350.0,2.1,21.0
|
||||||
|
30.349,-85.788,11.7,0.0,0.0,21.0
|
||||||
|
30.349,-85.788,8.9,0.0,0.0,21.0
|
||||||
|
30.349,-85.788,9.4,0.0,0.0,21.0
|
||||||
|
30.349,-85.788,7.8,0.0,0.0,21.0
|
||||||
|
30.349,-85.788,11.1,30.0,3.1,21.0
|
||||||
|
30.349,-85.788,7.2,0.0,0.0,21.0
|
||||||
|
30.349,-85.788,7.2,0.0,0.0,21.0
|
||||||
|
30.349,-85.788,0.0,0.0,0.0,21.0
|
||||||
|
30.349,-85.788,7.8,30.0,2.1,21.0
|
||||||
|
30.349,-85.788,8.3,40.0,2.6,21.0
|
||||||
|
30.349,-85.788,7.2,50.0,1.5,21.0
|
||||||
|
30.349,-85.788,8.3,60.0,1.5,21.0
|
||||||
|
30.349,-85.788,5.6,40.0,2.1,21.0
|
||||||
|
30.349,-85.788,6.7,40.0,2.1,21.0
|
||||||
|
30.349,-85.788,7.8,50.0,3.1,21.0
|
||||||
|
30.349,-85.788,11.7,70.0,2.6,21.0
|
||||||
|
30.349,-85.788,15.6,70.0,3.1,21.0
|
||||||
|
30.349,-85.788,18.9,100.0,3.6,21.0
|
||||||
|
30.349,-85.788,20.0,130.0,3.6,21.0
|
||||||
|
30.349,-85.788,21.1,140.0,4.1,21.0
|
||||||
|
30.349,-85.788,21.7,150.0,4.1,21.0
|
||||||
|
30.349,-85.788,21.7,170.0,3.1,21.0
|
||||||
|
30.349,-85.788,22.2,170.0,3.1,21.0
|
||||||
|
30.349,-85.788,20.6,0.0,0.0,21.0
|
||||||
|
30.349,-85.788,17.2,0.0,0.0,21.0
|
||||||
|
30.349,-85.788,14.4,0.0,0.0,21.0
|
||||||
|
30.349,-85.788,12.8,100.0,1.5,21.0
|
||||||
|
30.349,-85.788,13.3,100.0,1.5,21.0
|
||||||
|
30.349,-85.788,10.6,0.0,0.0,21.0
|
||||||
|
30.349,-85.788,9.4,0.0,0.0,21.0
|
||||||
|
30.349,-85.788,7.8,0.0,0.0,21.0
|
||||||
|
30.358,-85.799,8.3,0.0,0.0,21.0
|
||||||
|
30.349,-85.788,0.0,0.0,0.0,21.0
|
||||||
|
30.358,-85.799,6.7,0.0,0.0,21.0
|
||||||
|
30.358,-85.799,7.2,0.0,0.0,21.0
|
||||||
|
30.358,-85.799,7.2,0.0,0.0,21.0
|
||||||
|
30.358,-85.799,8.3,50.0,1.5,21.0
|
||||||
|
30.358,-85.799,9.4,0.0,0.0,21.0
|
||||||
|
30.358,-85.799,8.9,0.0,0.0,21.0
|
||||||
|
30.358,-85.799,10.0,340.0,1.5,21.0
|
||||||
|
30.358,-85.799,12.8,40.0,1.5,21.0
|
||||||
|
30.358,-85.799,16.7,100.0,2.1,21.0
|
||||||
|
30.358,-85.799,21.1,100.0,1.5,21.0
|
||||||
|
30.358,-85.799,23.3,0.0,0.0,21.0
|
||||||
|
30.358,-85.799,25.0,180.0,4.6,21.0
|
||||||
|
30.358,-85.799,24.4,230.0,3.6,21.0
|
||||||
|
30.358,-85.799,25.0,210.0,4.1,21.0
|
||||||
|
30.358,-85.799,23.9,170.0,4.1,21.0
|
||||||
|
30.358,-85.799,22.8,0.0,0.0,21.0
|
||||||
|
30.358,-85.799,19.4,0.0,0.0,21.0
|
||||||
|
30.358,-85.799,17.8,140.0,2.1,21.0
|
||||||
|
60.383,5.333,-0.7,0.0,0.0,36.0
|
||||||
|
60.383,5.333,0.6,270.0,2.0,36.0
|
||||||
|
60.383,5.333,-0.9,120.0,1.0,36.0
|
||||||
|
60.383,5.333,-1.6,130.0,2.0,36.0
|
||||||
|
60.383,5.333,-1.4,150.0,1.0,36.0
|
||||||
|
60.383,5.333,-1.7,0.0,0.0,36.0
|
||||||
|
60.383,5.333,-1.7,140.0,1.0,36.0
|
||||||
|
60.383,5.333,-1.4,0.0,0.0,36.0
|
||||||
|
60.383,5.333,-1.0,0.0,0.0,36.0
|
||||||
|
60.383,5.333,-1.0,150.0,1.0,36.0
|
||||||
|
60.383,5.333,-0.7,140.0,1.0,36.0
|
||||||
|
60.383,5.333,0.5,150.0,1.0,36.0
|
||||||
|
60.383,5.333,1.9,0.0,0.0,36.0
|
||||||
|
60.383,5.333,1.7,0.0,0.0,36.0
|
||||||
|
60.383,5.333,2.1,310.0,2.0,36.0
|
||||||
|
60.383,5.333,1.5,90.0,1.0,36.0
|
||||||
|
60.383,5.333,1.9,290.0,1.0,36.0
|
||||||
|
60.383,5.333,2.0,320.0,1.0,36.0
|
||||||
|
60.383,5.333,1.9,330.0,1.0,36.0
|
||||||
|
60.383,5.333,1.3,350.0,1.0,36.0
|
||||||
|
60.383,5.333,1.5,120.0,1.0,36.0
|
||||||
|
60.383,5.333,1.3,150.0,2.0,36.0
|
||||||
|
60.383,5.333,0.8,140.0,1.0,36.0
|
||||||
|
60.383,5.333,0.3,300.0,1.0,36.0
|
||||||
|
60.383,5.333,0.2,140.0,1.0,36.0
|
||||||
|
60.383,5.333,0.4,140.0,1.0,36.0
|
||||||
|
60.383,5.333,0.5,320.0,1.0,36.0
|
||||||
|
60.383,5.333,1.5,330.0,1.0,36.0
|
||||||
|
60.383,5.333,1.8,40.0,1.0,36.0
|
||||||
|
60.383,5.333,2.3,170.0,1.0,36.0
|
||||||
|
60.383,5.333,2.7,140.0,1.0,36.0
|
||||||
|
60.383,5.333,3.1,330.0,1.0,36.0
|
||||||
|
60.383,5.333,3.8,350.0,1.0,36.0
|
||||||
|
60.383,5.333,3.8,140.0,1.0,36.0
|
||||||
|
60.383,5.333,4.1,150.0,1.0,36.0
|
||||||
|
60.383,5.333,4.4,180.0,1.0,36.0
|
||||||
|
60.383,5.333,4.9,300.0,1.0,36.0
|
||||||
|
60.383,5.333,5.2,320.0,1.0,36.0
|
||||||
|
60.383,5.333,6.7,340.0,1.0,36.0
|
||||||
|
60.383,5.333,6.9,250.0,1.0,36.0
|
||||||
|
60.383,5.333,7.9,300.0,2.0,36.0
|
||||||
|
60.383,5.333,5.5,140.0,1.0,36.0
|
||||||
|
60.383,5.333,7.1,140.0,2.0,36.0
|
||||||
|
60.383,5.333,7.0,280.0,2.0,36.0
|
||||||
|
60.383,5.333,4.6,170.0,1.0,36.0
|
||||||
|
60.383,5.333,4.8,330.0,1.0,36.0
|
||||||
|
60.383,5.333,6.4,260.0,2.0,36.0
|
||||||
|
60.383,5.333,6.2,340.0,1.0,36.0
|
||||||
|
60.383,5.333,5.7,320.0,2.0,36.0
|
||||||
|
60.383,5.333,5.2,100.0,1.0,36.0
|
||||||
|
60.383,5.333,5.1,310.0,1.0,36.0
|
||||||
|
60.383,5.333,4.9,290.0,2.0,36.0
|
||||||
|
60.383,5.333,4.9,310.0,2.0,36.0
|
||||||
|
60.383,5.333,6.1,320.0,2.0,36.0
|
||||||
|
60.383,5.333,7.0,250.0,1.0,36.0
|
||||||
|
60.383,5.333,5.3,140.0,1.0,36.0
|
||||||
|
60.383,5.333,6.9,350.0,1.0,36.0
|
||||||
|
60.383,5.333,9.7,110.0,3.0,36.0
|
||||||
|
60.383,5.333,10.3,300.0,3.0,36.0
|
||||||
|
60.383,5.333,8.7,310.0,1.0,36.0
|
||||||
|
60.383,5.333,9.0,270.0,3.0,36.0
|
||||||
|
60.383,5.333,11.6,80.0,3.0,36.0
|
||||||
|
60.383,5.333,11.4,80.0,4.0,36.0
|
||||||
|
60.383,5.333,9.7,70.0,5.0,36.0
|
||||||
|
60.383,5.333,9.5,80.0,6.0,36.0
|
||||||
|
60.383,5.333,8.7,80.0,5.0,36.0
|
||||||
|
60.383,5.333,7.7,80.0,5.0,36.0
|
||||||
|
60.383,5.333,8.2,80.0,4.0,36.0
|
||||||
|
60.383,5.333,7.7,30.0,1.0,36.0
|
||||||
|
60.383,5.333,7.2,310.0,1.0,36.0
|
||||||
|
60.383,5.333,6.8,300.0,2.0,36.0
|
||||||
|
60.383,5.333,6.7,140.0,1.0,36.0
|
||||||
|
@@ -92,7 +92,7 @@
|
|||||||
"dstore = ws.get_default_datastore()\n",
|
"dstore = ws.get_default_datastore()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# upload weather data\n",
|
"# upload weather data\n",
|
||||||
"dstore.upload('training-dataset', 'drift-on-aks-data', overwrite=True, show_progress=False)"
|
"dstore.upload('dataset', 'drift-on-aks-data', overwrite=True, show_progress=False)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -229,7 +229,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"## Run recent weather data through the webservice \n",
|
"## Run recent weather data through the webservice \n",
|
||||||
"\n",
|
"\n",
|
||||||
"The below cells take the past 2 days of weather data, filter and transform using the same processes as the training dataset, and runs the data through the service."
|
"The below cells take the weather data of Florida from 2019-11-20 to 2019-11-12, filter and transform using the same processes as the training dataset, and runs the data through the service."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -238,16 +238,10 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from datetime import datetime, timedelta\n",
|
"# create dataset \n",
|
||||||
"from azureml.opendatasets import NoaaIsdWeather\n",
|
"tset = Dataset.Tabular.from_delimited_files(dstore.path('drift-on-aks-data/testing.csv'))\n",
|
||||||
"\n",
|
"\n",
|
||||||
"start = datetime.today() - timedelta(days=2)\n",
|
"df = tset.to_pandas_dataframe().fillna(0)\n",
|
||||||
"end = datetime.today()\n",
|
|
||||||
"\n",
|
|
||||||
"isd = NoaaIsdWeather(start, end)\n",
|
|
||||||
"\n",
|
|
||||||
"df = isd.to_pandas_dataframe().fillna(0)\n",
|
|
||||||
"df = df[df['stationName'].str.contains('FLORIDA', regex=True, na=False)]\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"X_features = ['latitude', 'longitude', 'temperature', 'windAngle', 'windSpeed']\n",
|
"X_features = ['latitude', 'longitude', 'temperature', 'windAngle', 'windSpeed']\n",
|
||||||
"y_features = ['elevation']\n",
|
"y_features = ['elevation']\n",
|
||||||
@@ -264,9 +258,9 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"import json\n",
|
"import json\n",
|
||||||
"\n",
|
"\n",
|
||||||
"today_data = json.dumps({'data': X.values.tolist()})\n",
|
"data = json.dumps({'data': X.values.tolist()})\n",
|
||||||
"\n",
|
"\n",
|
||||||
"data_encoded = bytes(today_data, encoding='utf8')\n",
|
"data_encoded = bytes(data, encoding='utf8')\n",
|
||||||
"prediction = service.run(input_data=data_encoded)\n",
|
"prediction = service.run(input_data=data_encoded)\n",
|
||||||
"print(prediction)"
|
"print(prediction)"
|
||||||
]
|
]
|
||||||
@@ -342,6 +336,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
|
"from datetime import datetime, timedelta\n",
|
||||||
"from azureml.datadrift import DataDriftDetector, AlertConfiguration\n",
|
"from azureml.datadrift import DataDriftDetector, AlertConfiguration\n",
|
||||||
"\n",
|
"\n",
|
||||||
"services = [service_name]\n",
|
"services = [service_name]\n",
|
||||||
|
|||||||
@@ -100,7 +100,7 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"# Check core SDK version number\n",
|
"# Check core SDK version number\n",
|
||||||
"\n",
|
"\n",
|
||||||
"print(\"This notebook was created using SDK version 1.0.79, you are currently running version\", azureml.core.VERSION)"
|
"print(\"This notebook was created using SDK version 1.0.81, you are currently running version\", azureml.core.VERSION)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -925,6 +925,7 @@
|
|||||||
"cd = CondaDependencies.create()\n",
|
"cd = CondaDependencies.create()\n",
|
||||||
"cd.add_tensorflow_conda_package()\n",
|
"cd.add_tensorflow_conda_package()\n",
|
||||||
"cd.add_conda_package('keras==2.2.5')\n",
|
"cd.add_conda_package('keras==2.2.5')\n",
|
||||||
|
"cd.add_pip_package(\"azureml-defaults\")\n",
|
||||||
"cd.save_to_file(base_directory='./', conda_file_path='myenv.yml')\n",
|
"cd.save_to_file(base_directory='./', conda_file_path='myenv.yml')\n",
|
||||||
"\n",
|
"\n",
|
||||||
"print(cd.serialize_to_string())"
|
"print(cd.serialize_to_string())"
|
||||||
@@ -947,10 +948,11 @@
|
|||||||
"from azureml.core.webservice import AciWebservice\n",
|
"from azureml.core.webservice import AciWebservice\n",
|
||||||
"from azureml.core.model import InferenceConfig\n",
|
"from azureml.core.model import InferenceConfig\n",
|
||||||
"from azureml.core.model import Model\n",
|
"from azureml.core.model import Model\n",
|
||||||
|
"from azureml.core.environment import Environment\n",
|
||||||
"\n",
|
"\n",
|
||||||
"inference_config = InferenceConfig(runtime= \"python\", \n",
|
"\n",
|
||||||
" entry_script=\"score.py\",\n",
|
"myenv = Environment.from_conda_specification(name=\"myenv\", file_path=\"myenv.yml\")\n",
|
||||||
" conda_file=\"myenv.yml\")\n",
|
"inference_config = InferenceConfig(entry_script=\"score.py\", environment=myenv)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"aciconfig = AciWebservice.deploy_configuration(cpu_cores=1,\n",
|
"aciconfig = AciWebservice.deploy_configuration(cpu_cores=1,\n",
|
||||||
" auth_enabled=True, # this flag generates API keys to secure access\n",
|
" auth_enabled=True, # this flag generates API keys to secure access\n",
|
||||||
|
|||||||
@@ -290,7 +290,9 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# backfill for one month\n",
|
"# backfill for one month\n",
|
||||||
"backfill = monitor.backfill(datetime(2019, 9, 1), datetime(2019, 10, 1))\n",
|
"backfill_start_date = datetime(2019, 9, 1)\n",
|
||||||
|
"backfill_end_date = datetime(2019, 10, 1)\n",
|
||||||
|
"backfill = monitor.backfill(backfill_start_date, backfill_end_date)\n",
|
||||||
"backfill"
|
"backfill"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -353,7 +355,7 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# plot the results from Python SDK \n",
|
"# plot the results from Python SDK \n",
|
||||||
"monitor.show()"
|
"monitor.show(backfill_start_date, backfill_end_date)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -371,7 +373,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"link = 'https://ml.azure.com/data/monitor/{}?wsid=/subscriptions/{}/resourcegroups/{}/workspaces/{}'.format(monitor.name, ws.subscription_id, ws.resource_group, ws.name)\n",
|
"link = 'https://ml.azure.com/data/monitor/{}?wsid=/subscriptions/{}/resourcegroups/{}/workspaces/{}&startDate={}&endDate={}'.format(monitor.name, ws.subscription_id, ws.resource_group, ws.name, backfill_start_date.strftime('%Y-%m-%d'), backfill_end_date .strftime('%Y-%m-%d'))\n",
|
||||||
"print(link)"
|
"print(link)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
106
index.md
106
index.md
@@ -10,6 +10,7 @@ Machine Learning notebook samples and encourage efficient retrieval of topics an
|
|||||||
|Title| Task | Dataset | Training Compute | Deployment Target | ML Framework | Tags |
|
|Title| Task | Dataset | Training Compute | Deployment Target | ML Framework | Tags |
|
||||||
|:----|:-----|:-------:|:----------------:|:-----------------:|:------------:|:------------:|
|
|:----|:-----|:-------:|:----------------:|:-----------------:|:------------:|:------------:|
|
||||||
| [Using Azure ML environments](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training/using-environments/using-environments.ipynb) | Creating and registering environments | None | Local | None | None | None |
|
| [Using Azure ML environments](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training/using-environments/using-environments.ipynb) | Creating and registering environments | None | Local | None | None | None |
|
||||||
|
|
||||||
| [Estimators in AML with hyperparameter tuning](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training-with-deep-learning/how-to-use-estimator/how-to-use-estimator.ipynb) | Use the Estimator pattern in Azure Machine Learning SDK | None | AML Compute | None | None | None |
|
| [Estimators in AML with hyperparameter tuning](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training-with-deep-learning/how-to-use-estimator/how-to-use-estimator.ipynb) | Use the Estimator pattern in Azure Machine Learning SDK | None | AML Compute | None | None | None |
|
||||||
|
|
||||||
|
|
||||||
@@ -18,63 +19,116 @@ Machine Learning notebook samples and encourage efficient retrieval of topics an
|
|||||||
|Title| Task | Dataset | Training Compute | Deployment Target | ML Framework | Tags |
|
|Title| Task | Dataset | Training Compute | Deployment Target | ML Framework | Tags |
|
||||||
|:----|:-----|:-------:|:----------------:|:-----------------:|:------------:|:------------:|
|
|:----|:-----|:-------:|:----------------:|:-----------------:|:------------:|:------------:|
|
||||||
| [Forecasting BikeShare Demand](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-bike-share/auto-ml-forecasting-bike-share.ipynb) | Forecasting | BikeShare | Remote | None | Azure ML AutoML | Forecasting |
|
| [Forecasting BikeShare Demand](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-bike-share/auto-ml-forecasting-bike-share.ipynb) | Forecasting | BikeShare | Remote | None | Azure ML AutoML | Forecasting |
|
||||||
|
|
||||||
| [Forecasting orange juice sales with deployment](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb) | Forecasting | Orange Juice Sales | Remote | Azure Container Instance | Azure ML AutoML | None |
|
| [Forecasting orange juice sales with deployment](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb) | Forecasting | Orange Juice Sales | Remote | Azure Container Instance | Azure ML AutoML | None |
|
||||||
|
|
||||||
| [Forecasting with automated ML SQL integration](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/sql-server/energy-demand/auto-ml-sql-energy-demand.ipynb) | Forecasting | NYC Energy | Local | None | Azure ML AutoML | |
|
| [Forecasting with automated ML SQL integration](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/sql-server/energy-demand/auto-ml-sql-energy-demand.ipynb) | Forecasting | NYC Energy | Local | None | Azure ML AutoML | |
|
||||||
|
|
||||||
| [Setup automated ML SQL integration](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/sql-server/setup/auto-ml-sql-setup.ipynb) | None | None | None | None | Azure ML AutoML | |
|
| [Setup automated ML SQL integration](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/sql-server/setup/auto-ml-sql-setup.ipynb) | None | None | None | None | Azure ML AutoML | |
|
||||||
|
|
||||||
| [Register a model and deploy locally](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/deploy-to-local/register-model-deploy-local.ipynb) | Deployment | None | Local | Local | None | None |
|
| [Register a model and deploy locally](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/deploy-to-local/register-model-deploy-local.ipynb) | Deployment | None | Local | Local | None | None |
|
||||||
|
|
||||||
| :star:[Data drift on aks](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/monitor-models/data-drift/drift-on-aks.ipynb) | Filtering | NOAA | Remote | AKS | Azure ML | Dataset, Timeseries, Drift |
|
| :star:[Data drift on aks](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/monitor-models/data-drift/drift-on-aks.ipynb) | Filtering | NOAA | Remote | AKS | Azure ML | Dataset, Timeseries, Drift |
|
||||||
|
|
||||||
| [Train and deploy a model using Python SDK](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training/train-within-notebook/train-within-notebook.ipynb) | Training and deploying a model from a notebook | Diabetes | Local | Azure Container Instance | None | None |
|
| [Train and deploy a model using Python SDK](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training/train-within-notebook/train-within-notebook.ipynb) | Training and deploying a model from a notebook | Diabetes | Local | Azure Container Instance | None | None |
|
||||||
|
|
||||||
| :star:[Data drift quickdemo](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/work-with-data/datadrift-tutorial/datadrift-tutorial.ipynb) | Filtering | NOAA | Remote | None | Azure ML | Dataset, Timeseries, Drift |
|
| :star:[Data drift quickdemo](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/work-with-data/datadrift-tutorial/datadrift-tutorial.ipynb) | Filtering | NOAA | Remote | None | Azure ML | Dataset, Timeseries, Drift |
|
||||||
|
|
||||||
| :star:[Filtering data using Tabular Timeseiries Dataset related API](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/work-with-data/datasets-tutorial/tabular-timeseries-dataset-filtering.ipynb) | Filtering | NOAA | Local | None | Azure ML | Dataset, Tabular Timeseries |
|
| :star:[Filtering data using Tabular Timeseiries Dataset related API](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/work-with-data/datasets-tutorial/tabular-timeseries-dataset-filtering.ipynb) | Filtering | NOAA | Local | None | Azure ML | Dataset, Tabular Timeseries |
|
||||||
|
|
||||||
| :star:[Introduction to labeled datasets](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/work-with-data/datasets-tutorial/labeled-datasets/labeled-datasets.ipynb) | Train | | Remote | None | Azure ML | Dataset, label, Estimator |
|
| :star:[Introduction to labeled datasets](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/work-with-data/datasets-tutorial/labeled-datasets/labeled-datasets.ipynb) | Train | | Remote | None | Azure ML | Dataset, label, Estimator |
|
||||||
|
|
||||||
| :star:[Datasets with ML Pipeline](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/work-with-data/datasets-tutorial/pipeline-with-datasets/pipeline-for-image-classification.ipynb) | Train | Fashion MNIST | Remote | None | Azure ML | Dataset, Pipeline, Estimator, ScriptRun |
|
| :star:[Datasets with ML Pipeline](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/work-with-data/datasets-tutorial/pipeline-with-datasets/pipeline-for-image-classification.ipynb) | Train | Fashion MNIST | Remote | None | Azure ML | Dataset, Pipeline, Estimator, ScriptRun |
|
||||||
|
|
||||||
| :star:[Train with Datasets (Tabular and File)](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/work-with-data/datasets-tutorial/train-with-datasets/train-with-datasets.ipynb) | Train | Iris, Diabetes | Remote | None | Azure ML | Dataset, Estimator, ScriptRun |
|
| :star:[Train with Datasets (Tabular and File)](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/work-with-data/datasets-tutorial/train-with-datasets/train-with-datasets.ipynb) | Train | Iris, Diabetes | Remote | None | Azure ML | Dataset, Estimator, ScriptRun |
|
||||||
|
|
||||||
| [Forecasting away from training data](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-high-frequency/automl-forecasting-function.ipynb) | Forecasting | None | Remote | None | Azure ML AutoML | Forecasting, Confidence Intervals |
|
| [Forecasting away from training data](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-high-frequency/automl-forecasting-function.ipynb) | Forecasting | None | Remote | None | Azure ML AutoML | Forecasting, Confidence Intervals |
|
||||||
|
|
||||||
| [Automated ML run with basic edition features.](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb) | Classification | Bankmarketing | AML | ACI | None | featurization, explainability, remote_run, AutomatedML |
|
| [Automated ML run with basic edition features.](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb) | Classification | Bankmarketing | AML | ACI | None | featurization, explainability, remote_run, AutomatedML |
|
||||||
|
|
||||||
| [Classification of credit card fraudulent transactions using Automated ML](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.ipynb) | Classification | Creditcard | AML Compute | None | None | remote_run, AutomatedML |
|
| [Classification of credit card fraudulent transactions using Automated ML](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.ipynb) | Classification | Creditcard | AML Compute | None | None | remote_run, AutomatedML |
|
||||||
|
|
||||||
| [Automated ML run with featurization and model explainability.](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/regression-hardware-performance-explanation-and-featurization/auto-ml-regression-hardware-performance-explanation-and-featurization.ipynb) | Regression | MachineData | AML | ACI | None | featurization, explainability, remote_run, AutomatedML |
|
| [Automated ML run with featurization and model explainability.](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/regression-hardware-performance-explanation-and-featurization/auto-ml-regression-hardware-performance-explanation-and-featurization.ipynb) | Regression | MachineData | AML | ACI | None | featurization, explainability, remote_run, AutomatedML |
|
||||||
|
|
||||||
| [Use MLflow with Azure Machine Learning for training and deployment](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/track-and-monitor-experiments/using-mlflow/train-deploy-pytorch/train-and-deploy-pytorch.ipynb) | Use MLflow with Azure Machine Learning to train and deploy Pa yTorch image classifier model | MNIST | AML Compute | Azure Container Instance | PyTorch | None |
|
| [Use MLflow with Azure Machine Learning for training and deployment](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/track-and-monitor-experiments/using-mlflow/train-deploy-pytorch/train-and-deploy-pytorch.ipynb) | Use MLflow with Azure Machine Learning to train and deploy Pa yTorch image classifier model | MNIST | AML Compute | Azure Container Instance | PyTorch | None |
|
||||||
|
|
||||||
| :star:[Azure Machine Learning Pipeline with DataTranferStep](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-data-transfer.ipynb) | Demonstrates the use of DataTranferStep | Custom | ADF | None | Azure ML | None |
|
| :star:[Azure Machine Learning Pipeline with DataTranferStep](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-data-transfer.ipynb) | Demonstrates the use of DataTranferStep | Custom | ADF | None | Azure ML | None |
|
||||||
|
|
||||||
| [Getting Started with Azure Machine Learning Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-getting-started.ipynb) | Getting Started notebook for ANML Pipelines | Custom | AML Compute | None | Azure ML | None |
|
| [Getting Started with Azure Machine Learning Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-getting-started.ipynb) | Getting Started notebook for ANML Pipelines | Custom | AML Compute | None | Azure ML | None |
|
||||||
|
|
||||||
| [Azure Machine Learning Pipeline with AzureBatchStep](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-how-to-use-azurebatch-to-run-a-windows-executable.ipynb) | Demonstrates the use of AzureBatchStep | Custom | Azure Batch | None | Azure ML | None |
|
| [Azure Machine Learning Pipeline with AzureBatchStep](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-how-to-use-azurebatch-to-run-a-windows-executable.ipynb) | Demonstrates the use of AzureBatchStep | Custom | Azure Batch | None | Azure ML | None |
|
||||||
|
|
||||||
| [Azure Machine Learning Pipeline with EstimatorStep](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-how-to-use-estimatorstep.ipynb) | Demonstrates the use of EstimatorStep | Custom | AML Compute | None | Azure ML | None |
|
| [Azure Machine Learning Pipeline with EstimatorStep](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-how-to-use-estimatorstep.ipynb) | Demonstrates the use of EstimatorStep | Custom | AML Compute | None | Azure ML | None |
|
||||||
|
|
||||||
| :star:[How to use ModuleStep with AML Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-how-to-use-modulestep.ipynb) | Demonstrates the use of ModuleStep | Custom | AML Compute | None | Azure ML | None |
|
| :star:[How to use ModuleStep with AML Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-how-to-use-modulestep.ipynb) | Demonstrates the use of ModuleStep | Custom | AML Compute | None | Azure ML | None |
|
||||||
|
|
||||||
| :star:[How to use Pipeline Drafts to create a Published Pipeline](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-how-to-use-pipeline-drafts.ipynb) | Demonstrates the use of Pipeline Drafts | Custom | AML Compute | None | Azure ML | None |
|
| :star:[How to use Pipeline Drafts to create a Published Pipeline](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-how-to-use-pipeline-drafts.ipynb) | Demonstrates the use of Pipeline Drafts | Custom | AML Compute | None | Azure ML | None |
|
||||||
|
|
||||||
| :star:[Azure Machine Learning Pipeline with HyperDriveStep](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-parameter-tuning-with-hyperdrive.ipynb) | Demonstrates the use of HyperDriveStep | Custom | AML Compute | None | Azure ML | None |
|
| :star:[Azure Machine Learning Pipeline with HyperDriveStep](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-parameter-tuning-with-hyperdrive.ipynb) | Demonstrates the use of HyperDriveStep | Custom | AML Compute | None | Azure ML | None |
|
||||||
|
|
||||||
| :star:[How to Publish a Pipeline and Invoke the REST endpoint](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-publish-and-run-using-rest-endpoint.ipynb) | Demonstrates the use of Published Pipelines | Custom | AML Compute | None | Azure ML | None |
|
| :star:[How to Publish a Pipeline and Invoke the REST endpoint](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-publish-and-run-using-rest-endpoint.ipynb) | Demonstrates the use of Published Pipelines | Custom | AML Compute | None | Azure ML | None |
|
||||||
|
|
||||||
| :star:[How to Setup a Schedule for a Published Pipeline](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-setup-schedule-for-a-published-pipeline.ipynb) | Demonstrates the use of Schedules for Published Pipelines | Custom | AML Compute | None | Azure ML | None |
|
| :star:[How to Setup a Schedule for a Published Pipeline](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-setup-schedule-for-a-published-pipeline.ipynb) | Demonstrates the use of Schedules for Published Pipelines | Custom | AML Compute | None | Azure ML | None |
|
||||||
|
|
||||||
| [How to setup a versioned Pipeline Endpoint](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-setup-versioned-pipeline-endpoints.ipynb) | Demonstrates the use of PipelineEndpoint to run a specific version of the Published Pipeline | Custom | AML Compute | None | Azure ML | None |
|
| [How to setup a versioned Pipeline Endpoint](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-setup-versioned-pipeline-endpoints.ipynb) | Demonstrates the use of PipelineEndpoint to run a specific version of the Published Pipeline | Custom | AML Compute | None | Azure ML | None |
|
||||||
|
|
||||||
| :star:[How to use DataPath as a PipelineParameter](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-datapath-and-pipelineparameter.ipynb) | Demonstrates the use of DataPath as a PipelineParameter | Custom | AML Compute | None | Azure ML | None |
|
| :star:[How to use DataPath as a PipelineParameter](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-datapath-and-pipelineparameter.ipynb) | Demonstrates the use of DataPath as a PipelineParameter | Custom | AML Compute | None | Azure ML | None |
|
||||||
|
|
||||||
| [How to use AdlaStep with AML Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-use-adla-as-compute-target.ipynb) | Demonstrates the use of AdlaStep | Custom | Azure Data Lake Analytics | None | Azure ML | None |
|
| [How to use AdlaStep with AML Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-use-adla-as-compute-target.ipynb) | Demonstrates the use of AdlaStep | Custom | Azure Data Lake Analytics | None | Azure ML | None |
|
||||||
|
|
||||||
| :star:[How to use DatabricksStep with AML Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-use-databricks-as-compute-target.ipynb) | Demonstrates the use of DatabricksStep | Custom | Azure Databricks | None | Azure ML, Azure Databricks | None |
|
| :star:[How to use DatabricksStep with AML Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-use-databricks-as-compute-target.ipynb) | Demonstrates the use of DatabricksStep | Custom | Azure Databricks | None | Azure ML, Azure Databricks | None |
|
||||||
|
|
||||||
| :star:[How to use AutoMLStep with AML Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-automated-machine-learning-step.ipynb) | Demonstrates the use of AutoMLStep | Custom | AML Compute | None | Automated Machine Learning | None |
|
| :star:[How to use AutoMLStep with AML Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-automated-machine-learning-step.ipynb) | Demonstrates the use of AutoMLStep | Custom | AML Compute | None | Automated Machine Learning | None |
|
||||||
|
|
||||||
| :star:[Azure Machine Learning Pipelines with Data Dependency](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-data-dependency-steps.ipynb) | Demonstrates how to construct a Pipeline with data dependency between steps | Custom | AML Compute | None | Azure ML | None |
|
| :star:[Azure Machine Learning Pipelines with Data Dependency](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-data-dependency-steps.ipynb) | Demonstrates how to construct a Pipeline with data dependency between steps | Custom | AML Compute | None | Azure ML | None |
|
||||||
|
|
||||||
|
| [How to use run a notebook as a step in AML Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-notebook-runner-step.ipynb) | Demonstrates the use of NotebookRunnerStep | Custom | AML Compute | None | Azure ML | None |
|
||||||
|
|
||||||
|
|
||||||
## Training
|
## Training
|
||||||
|
|
||||||
|Title| Task | Dataset | Training Compute | Deployment Target | ML Framework | Tags |
|
|Title| Task | Dataset | Training Compute | Deployment Target | ML Framework | Tags |
|
||||||
|:----|:-----|:-------:|:----------------:|:-----------------:|:------------:|:------------:|
|
|:----|:-----|:-------:|:----------------:|:-----------------:|:------------:|:------------:|
|
||||||
| [Train a model with hyperparameter tuning](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/chainer/deployment/train-hyperparameter-tune-deploy-with-chainer/train-hyperparameter-tune-deploy-with-chainer.ipynb) | Train a Convolutional Neural Network (CNN) | MNIST | AML Compute | Azure Container Instance | Chainer | None |
|
| [Train a model with hyperparameter tuning](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/chainer/deployment/train-hyperparameter-tune-deploy-with-chainer/train-hyperparameter-tune-deploy-with-chainer.ipynb) | Train a Convolutional Neural Network (CNN) | MNIST | AML Compute | Azure Container Instance | Chainer | None |
|
||||||
|
|
||||||
| [Distributed Training with Chainer](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/chainer/training/distributed-chainer/distributed-chainer.ipynb) | Use the Chainer estimator to perform distributed training | MNIST | AML Compute | None | Chainer | None |
|
| [Distributed Training with Chainer](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/chainer/training/distributed-chainer/distributed-chainer.ipynb) | Use the Chainer estimator to perform distributed training | MNIST | AML Compute | None | Chainer | None |
|
||||||
|
|
||||||
| [Training with hyperparameter tuning using PyTorch](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/pytorch/deployment/train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.ipynb) | Train an image classification model using transfer learning with the PyTorch estimator | ImageNet | AML Compute | Azure Container Instance | PyTorch | None |
|
| [Training with hyperparameter tuning using PyTorch](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/pytorch/deployment/train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.ipynb) | Train an image classification model using transfer learning with the PyTorch estimator | ImageNet | AML Compute | Azure Container Instance | PyTorch | None |
|
||||||
|
|
||||||
| [Distributed PyTorch](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/pytorch/training/distributed-pytorch-with-horovod/distributed-pytorch-with-horovod.ipynb) | Train a model using the distributed training via Horovod | MNIST | AML Compute | None | PyTorch | None |
|
| [Distributed PyTorch](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/pytorch/training/distributed-pytorch-with-horovod/distributed-pytorch-with-horovod.ipynb) | Train a model using the distributed training via Horovod | MNIST | AML Compute | None | PyTorch | None |
|
||||||
|
|
||||||
| [Distributed training with PyTorch](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/pytorch/training/distributed-pytorch-with-nccl-gloo/distributed-pytorch-with-nccl-gloo.ipynb) | Train a model using distributed training via Nccl/Gloo | MNIST | AML Compute | None | PyTorch | None |
|
| [Distributed training with PyTorch](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/pytorch/training/distributed-pytorch-with-nccl-gloo/distributed-pytorch-with-nccl-gloo.ipynb) | Train a model using distributed training via Nccl/Gloo | MNIST | AML Compute | None | PyTorch | None |
|
||||||
|
|
||||||
| [Training and hyperparameter tuning with Scikit-learn](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/scikit-learn/training/train-hyperparameter-tune-deploy-with-sklearn/train-hyperparameter-tune-deploy-with-sklearn.ipynb) | Train a support vector machine (SVM) to perform classification | Iris | AML Compute | None | Scikit-learn | None |
|
| [Training and hyperparameter tuning with Scikit-learn](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/scikit-learn/training/train-hyperparameter-tune-deploy-with-sklearn/train-hyperparameter-tune-deploy-with-sklearn.ipynb) | Train a support vector machine (SVM) to perform classification | Iris | AML Compute | None | Scikit-learn | None |
|
||||||
|
|
||||||
| [Training and hyperparameter tuning using the TensorFlow estimator](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/tensorflow/deployment/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb) | Train a deep neural network | MNIST | AML Compute | Azure Container Instance | TensorFlow | None |
|
| [Training and hyperparameter tuning using the TensorFlow estimator](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/tensorflow/deployment/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb) | Train a deep neural network | MNIST | AML Compute | Azure Container Instance | TensorFlow | None |
|
||||||
|
|
||||||
| [Distributed training using TensorFlow with Horovod](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/tensorflow/training/distributed-tensorflow-with-horovod/distributed-tensorflow-with-horovod.ipynb) | Use the TensorFlow estimator to train a word2vec model | None | AML Compute | None | TensorFlow | None |
|
| [Distributed training using TensorFlow with Horovod](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/tensorflow/training/distributed-tensorflow-with-horovod/distributed-tensorflow-with-horovod.ipynb) | Use the TensorFlow estimator to train a word2vec model | None | AML Compute | None | TensorFlow | None |
|
||||||
|
|
||||||
| [Distributed TensorFlow with parameter server](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/tensorflow/training/distributed-tensorflow-with-parameter-server/distributed-tensorflow-with-parameter-server.ipynb) | Use the TensorFlow estimator to train a model using distributed training | MNIST | AML Compute | None | TensorFlow | None |
|
| [Distributed TensorFlow with parameter server](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/tensorflow/training/distributed-tensorflow-with-parameter-server/distributed-tensorflow-with-parameter-server.ipynb) | Use the TensorFlow estimator to train a model using distributed training | MNIST | AML Compute | None | TensorFlow | None |
|
||||||
|
|
||||||
| [Hyperparameter tuning and warm start using the TensorFlow estimator](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/tensorflow/training/hyperparameter-tune-and-warm-start-with-tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow.ipynb) | Train a deep neural network | MNIST | AML Compute | Azure Container Instance | TensorFlow | None |
|
| [Hyperparameter tuning and warm start using the TensorFlow estimator](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/tensorflow/training/hyperparameter-tune-and-warm-start-with-tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow.ipynb) | Train a deep neural network | MNIST | AML Compute | Azure Container Instance | TensorFlow | None |
|
||||||
|
|
||||||
| [Resuming a model](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/tensorflow/training/train-tensorflow-resume-training/train-tensorflow-resume-training.ipynb) | Resume a model in TensorFlow from a previously submitted run | MNIST | AML Compute | None | TensorFlow | None |
|
| [Resuming a model](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/tensorflow/training/train-tensorflow-resume-training/train-tensorflow-resume-training.ipynb) | Resume a model in TensorFlow from a previously submitted run | MNIST | AML Compute | None | TensorFlow | None |
|
||||||
|
|
||||||
| [Training in Spark](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training/train-in-spark/train-in-spark.ipynb) | Submiting a run on a spark cluster | None | HDI cluster | None | PySpark | None |
|
| [Training in Spark](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training/train-in-spark/train-in-spark.ipynb) | Submiting a run on a spark cluster | None | HDI cluster | None | PySpark | None |
|
||||||
|
|
||||||
| [Train on Azure Machine Learning Compute](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training/train-on-amlcompute/train-on-amlcompute.ipynb) | Submit a run on Azure Machine Learning Compute. | Diabetes | AML Compute | None | None | None |
|
| [Train on Azure Machine Learning Compute](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training/train-on-amlcompute/train-on-amlcompute.ipynb) | Submit a run on Azure Machine Learning Compute. | Diabetes | AML Compute | None | None | None |
|
||||||
|
|
||||||
| [Train on local compute](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training/train-on-local/train-on-local.ipynb) | Train a model locally | Diabetes | Local | None | None | None |
|
| [Train on local compute](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training/train-on-local/train-on-local.ipynb) | Train a model locally | Diabetes | Local | None | None | None |
|
||||||
|
|
||||||
| [Train in a remote Linux virtual machine](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training/train-on-remote-vm/train-on-remote-vm.ipynb) | Configure and execute a run | Diabetes | Data Science Virtual Machine | None | None | None |
|
| [Train in a remote Linux virtual machine](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training/train-on-remote-vm/train-on-remote-vm.ipynb) | Configure and execute a run | Diabetes | Data Science Virtual Machine | None | None | None |
|
||||||
|
|
||||||
| [Using Tensorboard](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training-with-deep-learning/export-run-history-to-tensorboard/export-run-history-to-tensorboard.ipynb) | Export the run history as Tensorboard logs | None | None | None | TensorFlow | None |
|
| [Using Tensorboard](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training-with-deep-learning/export-run-history-to-tensorboard/export-run-history-to-tensorboard.ipynb) | Export the run history as Tensorboard logs | None | None | None | TensorFlow | None |
|
||||||
|
|
||||||
| [Train a DNN using hyperparameter tuning and deploying with Keras](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training-with-deep-learning/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.ipynb) | Create a multi-class classifier | MNIST | AML Compute | Azure Container Instance | TensorFlow | None |
|
| [Train a DNN using hyperparameter tuning and deploying with Keras](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training-with-deep-learning/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.ipynb) | Create a multi-class classifier | MNIST | AML Compute | Azure Container Instance | TensorFlow | None |
|
||||||
|
|
||||||
| [Managing your training runs](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/track-and-monitor-experiments/manage-runs/manage-runs.ipynb) | Monitor and complete runs | None | Local | None | None | None |
|
| [Managing your training runs](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/track-and-monitor-experiments/manage-runs/manage-runs.ipynb) | Monitor and complete runs | None | Local | None | None | None |
|
||||||
|
|
||||||
| [Tensorboard integration with run history](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/track-and-monitor-experiments/tensorboard/tensorboard.ipynb) | Run a TensorFlow job and view its Tensorboard output live | None | Local, DSVM, AML Compute | None | TensorFlow | None |
|
| [Tensorboard integration with run history](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/track-and-monitor-experiments/tensorboard/tensorboard.ipynb) | Run a TensorFlow job and view its Tensorboard output live | None | Local, DSVM, AML Compute | None | TensorFlow | None |
|
||||||
|
|
||||||
| [Use MLflow with AML for a local training run](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/track-and-monitor-experiments/using-mlflow/train-local/train-local.ipynb) | Use MLflow tracking APIs together with Azure Machine Learning for storing your metrics and artifacts | Diabetes | Local | None | None | None |
|
| [Use MLflow with AML for a local training run](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/track-and-monitor-experiments/using-mlflow/train-local/train-local.ipynb) | Use MLflow tracking APIs together with Azure Machine Learning for storing your metrics and artifacts | Diabetes | Local | None | None | None |
|
||||||
|
|
||||||
| [Use MLflow with AML for a remote training run](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/track-and-monitor-experiments/using-mlflow/train-remote/train-remote.ipynb) | Use MLflow tracking APIs together with AML for storing your metrics and artifacts | Diabetes | AML Compute | None | None | None |
|
| [Use MLflow with AML for a remote training run](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/track-and-monitor-experiments/using-mlflow/train-remote/train-remote.ipynb) | Use MLflow tracking APIs together with AML for storing your metrics and artifacts | Diabetes | AML Compute | None | None | None |
|
||||||
|
|
||||||
|
|
||||||
@@ -85,12 +139,19 @@ Machine Learning notebook samples and encourage efficient retrieval of topics an
|
|||||||
|Title| Task | Dataset | Training Compute | Deployment Target | ML Framework | Tags |
|
|Title| Task | Dataset | Training Compute | Deployment Target | ML Framework | Tags |
|
||||||
|:----|:-----|:-------:|:----------------:|:-----------------:|:------------:|:------------:|
|
|:----|:-----|:-------:|:----------------:|:-----------------:|:------------:|:------------:|
|
||||||
| [Deploy MNIST digit recognition with ONNX Runtime](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/onnx/onnx-inference-mnist-deploy.ipynb) | Image Classification | MNIST | Local | Azure Container Instance | ONNX | ONNX Model Zoo |
|
| [Deploy MNIST digit recognition with ONNX Runtime](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/onnx/onnx-inference-mnist-deploy.ipynb) | Image Classification | MNIST | Local | Azure Container Instance | ONNX | ONNX Model Zoo |
|
||||||
|
|
||||||
| [Deploy Facial Expression Recognition (FER+) with ONNX Runtime](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/onnx/onnx-inference-facial-expression-recognition-deploy.ipynb) | Facial Expression Recognition | Emotion FER | Local | Azure Container Instance | ONNX | ONNX Model Zoo |
|
| [Deploy Facial Expression Recognition (FER+) with ONNX Runtime](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/onnx/onnx-inference-facial-expression-recognition-deploy.ipynb) | Facial Expression Recognition | Emotion FER | Local | Azure Container Instance | ONNX | ONNX Model Zoo |
|
||||||
|
|
||||||
| :star:[Register model and deploy as webservice](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/deploy-to-cloud/model-register-and-deploy.ipynb) | Deploy a model with Azure Machine Learning | Diabetes | None | Azure Container Instance | Scikit-learn | None |
|
| :star:[Register model and deploy as webservice](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/deploy-to-cloud/model-register-and-deploy.ipynb) | Deploy a model with Azure Machine Learning | Diabetes | None | Azure Container Instance | Scikit-learn | None |
|
||||||
|
|
||||||
| :star:[Deploy models to AKS using controlled roll out](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/deploy-with-controlled-rollout/deploy-aks-with-controlled-rollout.ipynb) | Deploy a model with Azure Machine Learning | Diabetes | None | Azure Kubernetes Service | Scikit-learn | None |
|
| :star:[Deploy models to AKS using controlled roll out](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/deploy-with-controlled-rollout/deploy-aks-with-controlled-rollout.ipynb) | Deploy a model with Azure Machine Learning | Diabetes | None | Azure Kubernetes Service | Scikit-learn | None |
|
||||||
|
|
||||||
| [Train MNIST in PyTorch, convert, and deploy with ONNX Runtime](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/onnx/onnx-train-pytorch-aml-deploy-mnist.ipynb) | Image Classification | MNIST | AML Compute | Azure Container Instance | ONNX | ONNX Converter |
|
| [Train MNIST in PyTorch, convert, and deploy with ONNX Runtime](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/onnx/onnx-train-pytorch-aml-deploy-mnist.ipynb) | Image Classification | MNIST | AML Compute | Azure Container Instance | ONNX | ONNX Converter |
|
||||||
|
|
||||||
| [Deploy ResNet50 with ONNX Runtime](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/onnx/onnx-modelzoo-aml-deploy-resnet50.ipynb) | Image Classification | ImageNet | Local | Azure Container Instance | ONNX | ONNX Model Zoo |
|
| [Deploy ResNet50 with ONNX Runtime](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/onnx/onnx-modelzoo-aml-deploy-resnet50.ipynb) | Image Classification | ImageNet | Local | Azure Container Instance | ONNX | ONNX Model Zoo |
|
||||||
|
|
||||||
| [Deploy a model as a web service using MLflow](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/track-and-monitor-experiments/using-mlflow/deploy-model/deploy-model.ipynb) | Use MLflow with AML | Diabetes | None | Azure Container Instance | Scikit-learn | None |
|
| [Deploy a model as a web service using MLflow](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/track-and-monitor-experiments/using-mlflow/deploy-model/deploy-model.ipynb) | Use MLflow with AML | Diabetes | None | Azure Container Instance | Scikit-learn | None |
|
||||||
|
|
||||||
| :star:[Convert and deploy TinyYolo with ONNX Runtime](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/onnx/onnx-convert-aml-deploy-tinyyolo.ipynb) | Object Detection | PASCAL VOC | local | Azure Container Instance | ONNX | ONNX Converter |
|
| :star:[Convert and deploy TinyYolo with ONNX Runtime](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/onnx/onnx-convert-aml-deploy-tinyyolo.ipynb) | Object Detection | PASCAL VOC | local | Azure Container Instance | ONNX | ONNX Converter |
|
||||||
|
|
||||||
|
|
||||||
@@ -99,47 +160,92 @@ Machine Learning notebook samples and encourage efficient retrieval of topics an
|
|||||||
|Title| Task | Dataset | Training Compute | Deployment Target | ML Framework | Tags |
|
|Title| Task | Dataset | Training Compute | Deployment Target | ML Framework | Tags |
|
||||||
|:----|:-----|:-------:|:----------------:|:-----------------:|:------------:|:------------:|
|
|:----|:-----|:-------:|:----------------:|:-----------------:|:------------:|:------------:|
|
||||||
| [DNN Text Featurization](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/classification-text-dnn/auto-ml-classification-text-dnn.ipynb) | Text featurization using DNNs for classification | None | AML Compute | None | None | None |
|
| [DNN Text Featurization](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/classification-text-dnn/auto-ml-classification-text-dnn.ipynb) | Text featurization using DNNs for classification | None | AML Compute | None | None | None |
|
||||||
|
|
||||||
| [Automated ML Grouping with Pipeline.](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-grouping/auto-ml-forecasting-grouping.ipynb) | Use AzureML Pipeline to trigger multiple Automated ML runs. | Orange Juice Sales | AML Compute | Azure Container Instance | Scikit-learn, Pytorch | AutomatedML |
|
| [Automated ML Grouping with Pipeline.](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-grouping/auto-ml-forecasting-grouping.ipynb) | Use AzureML Pipeline to trigger multiple Automated ML runs. | Orange Juice Sales | AML Compute | Azure Container Instance | Scikit-learn, Pytorch | AutomatedML |
|
||||||
|
|
||||||
| [configuration](https://github.com/Azure/MachineLearningNotebooks/blob/master/configuration.ipynb) | | | | | | |
|
| [configuration](https://github.com/Azure/MachineLearningNotebooks/blob/master/configuration.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [lightgbm-example](https://github.com/Azure/MachineLearningNotebooks/blob/master//contrib/gbdt/lightgbm/lightgbm-example.ipynb) | | | | | | |
|
| [lightgbm-example](https://github.com/Azure/MachineLearningNotebooks/blob/master//contrib/gbdt/lightgbm/lightgbm-example.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [azure-ml-with-nvidia-rapids](https://github.com/Azure/MachineLearningNotebooks/blob/master//contrib/RAPIDS/azure-ml-with-nvidia-rapids.ipynb) | | | | | | |
|
| [azure-ml-with-nvidia-rapids](https://github.com/Azure/MachineLearningNotebooks/blob/master//contrib/RAPIDS/azure-ml-with-nvidia-rapids.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [auto-ml-continuous-retraining](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/continuous-retraining/auto-ml-continuous-retraining.ipynb) | | | | | | |
|
| [auto-ml-continuous-retraining](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/continuous-retraining/auto-ml-continuous-retraining.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [auto-ml-forecasting-beer-remote](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-beer-remote/auto-ml-forecasting-beer-remote.ipynb) | | | | | | |
|
| [auto-ml-forecasting-beer-remote](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-beer-remote/auto-ml-forecasting-beer-remote.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [auto-ml-forecasting-energy-demand](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb) | | | | | | |
|
| [auto-ml-forecasting-energy-demand](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [auto-ml-regression](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/regression/auto-ml-regression.ipynb) | | | | | | |
|
| [auto-ml-regression](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/regression/auto-ml-regression.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [build-model-run-history-03](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/azure-databricks/amlsdk/build-model-run-history-03.ipynb) | | | | | | |
|
| [build-model-run-history-03](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/azure-databricks/amlsdk/build-model-run-history-03.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [deploy-to-aci-04](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/azure-databricks/amlsdk/deploy-to-aci-04.ipynb) | | | | | | |
|
| [deploy-to-aci-04](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/azure-databricks/amlsdk/deploy-to-aci-04.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [deploy-to-aks-05](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/azure-databricks/amlsdk/deploy-to-aks-05.ipynb) | | | | | | |
|
| [deploy-to-aks-05](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/azure-databricks/amlsdk/deploy-to-aks-05.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [ingest-data-02](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/azure-databricks/amlsdk/ingest-data-02.ipynb) | | | | | | |
|
| [ingest-data-02](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/azure-databricks/amlsdk/ingest-data-02.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [installation-and-configuration-01](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/azure-databricks/amlsdk/installation-and-configuration-01.ipynb) | | | | | | |
|
| [installation-and-configuration-01](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/azure-databricks/amlsdk/installation-and-configuration-01.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [automl-databricks-local-01](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/azure-databricks/automl/automl-databricks-local-01.ipynb) | | | | | | |
|
| [automl-databricks-local-01](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/azure-databricks/automl/automl-databricks-local-01.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [automl-databricks-local-with-deployment](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/azure-databricks/automl/automl-databricks-local-with-deployment.ipynb) | | | | | | |
|
| [automl-databricks-local-with-deployment](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/azure-databricks/automl/automl-databricks-local-with-deployment.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [aml-pipelines-use-databricks-as-compute-target](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/azure-databricks/databricks-as-remote-compute-target/aml-pipelines-use-databricks-as-compute-target.ipynb) | | | | | | |
|
| [aml-pipelines-use-databricks-as-compute-target](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/azure-databricks/databricks-as-remote-compute-target/aml-pipelines-use-databricks-as-compute-target.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [accelerated-models-object-detection](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/accelerated-models/accelerated-models-object-detection.ipynb) | | | | | | |
|
| [accelerated-models-object-detection](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/accelerated-models/accelerated-models-object-detection.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [accelerated-models-quickstart](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/accelerated-models/accelerated-models-quickstart.ipynb) | | | | | | |
|
| [accelerated-models-quickstart](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/accelerated-models/accelerated-models-quickstart.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [accelerated-models-training](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/accelerated-models/accelerated-models-training.ipynb) | | | | | | |
|
| [accelerated-models-training](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/accelerated-models/accelerated-models-training.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [multi-model-register-and-deploy](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/deploy-multi-model/multi-model-register-and-deploy.ipynb) | | | | | | |
|
| [multi-model-register-and-deploy](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/deploy-multi-model/multi-model-register-and-deploy.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [register-model-deploy-local-advanced](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/deploy-to-local/register-model-deploy-local-advanced.ipynb) | | | | | | |
|
| [register-model-deploy-local-advanced](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/deploy-to-local/register-model-deploy-local-advanced.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [enable-app-insights-in-production-service](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/enable-app-insights-in-production-service/enable-app-insights-in-production-service.ipynb) | | | | | | |
|
| [enable-app-insights-in-production-service](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/enable-app-insights-in-production-service/enable-app-insights-in-production-service.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [onnx-model-register-and-deploy](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/onnx/onnx-model-register-and-deploy.ipynb) | | | | | | |
|
| [onnx-model-register-and-deploy](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/onnx/onnx-model-register-and-deploy.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [production-deploy-to-aks](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks.ipynb) | | | | | | |
|
| [production-deploy-to-aks](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [register-model-create-image-deploy-service](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/register-model-create-image-deploy-service/register-model-create-image-deploy-service.ipynb) | | | | | | |
|
| [register-model-create-image-deploy-service](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/register-model-create-image-deploy-service/register-model-create-image-deploy-service.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [tensorflow-model-register-and-deploy](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/tensorflow/tensorflow-model-register-and-deploy.ipynb) | | | | | | |
|
| [tensorflow-model-register-and-deploy](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/tensorflow/tensorflow-model-register-and-deploy.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [explain-model-on-amlcompute](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/explain-model/azure-integration/remote-explanation/explain-model-on-amlcompute.ipynb) | | | | | | |
|
| [explain-model-on-amlcompute](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/explain-model/azure-integration/remote-explanation/explain-model-on-amlcompute.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [save-retrieve-explanations-run-history](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/explain-model/azure-integration/run-history/save-retrieve-explanations-run-history.ipynb) | | | | | | |
|
| [save-retrieve-explanations-run-history](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/explain-model/azure-integration/run-history/save-retrieve-explanations-run-history.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [train-explain-model-locally-and-deploy](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-locally-and-deploy.ipynb) | | | | | | |
|
| [train-explain-model-locally-and-deploy](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-locally-and-deploy.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [train-explain-model-on-amlcompute-and-deploy](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-on-amlcompute-and-deploy.ipynb) | | | | | | |
|
| [train-explain-model-on-amlcompute-and-deploy](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-on-amlcompute-and-deploy.ipynb) | | | | | | |
|
||||||
|
|
||||||
|
| [training_notebook](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/notebook_runner/training_notebook.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [nyc-taxi-data-regression-model-building](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/nyc-taxi-data-regression-model-building/nyc-taxi-data-regression-model-building.ipynb) | | | | | | |
|
| [nyc-taxi-data-regression-model-building](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/nyc-taxi-data-regression-model-building/nyc-taxi-data-regression-model-building.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [pipeline-batch-scoring](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/pipeline-batch-scoring/pipeline-batch-scoring.ipynb) | | | | | | |
|
| [pipeline-batch-scoring](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/pipeline-batch-scoring/pipeline-batch-scoring.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [pipeline-style-transfer](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/pipeline-style-transfer/pipeline-style-transfer.ipynb) | | | | | | |
|
| [pipeline-style-transfer](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/pipeline-style-transfer/pipeline-style-transfer.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [authentication-in-azureml](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/manage-azureml-service/authentication-in-azureml/authentication-in-azureml.ipynb) | | | | | | |
|
| [authentication-in-azureml](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/manage-azureml-service/authentication-in-azureml/authentication-in-azureml.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [Logging APIs](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/track-and-monitor-experiments/logging-api/logging-api.ipynb) | Logging APIs and analyzing results | None | None | None | None | None |
|
| [Logging APIs](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/track-and-monitor-experiments/logging-api/logging-api.ipynb) | Logging APIs and analyzing results | None | None | None | None | None |
|
||||||
|
|
||||||
| [distributed-cntk-with-custom-docker](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training-with-deep-learning/distributed-cntk-with-custom-docker/distributed-cntk-with-custom-docker.ipynb) | | | | | | |
|
| [distributed-cntk-with-custom-docker](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training-with-deep-learning/distributed-cntk-with-custom-docker/distributed-cntk-with-custom-docker.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [notebook_example](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training-with-deep-learning/how-to-use-estimator/notebook_example.ipynb) | | | | | | |
|
| [notebook_example](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training-with-deep-learning/how-to-use-estimator/notebook_example.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [configuration](https://github.com/Azure/MachineLearningNotebooks/blob/master//setup-environment/configuration.ipynb) | | | | | | |
|
| [configuration](https://github.com/Azure/MachineLearningNotebooks/blob/master//setup-environment/configuration.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [img-classification-part1-training](https://github.com/Azure/MachineLearningNotebooks/blob/master//tutorials/img-classification-part1-training.ipynb) | | | | | | |
|
| [img-classification-part1-training](https://github.com/Azure/MachineLearningNotebooks/blob/master//tutorials/img-classification-part1-training.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [img-classification-part2-deploy](https://github.com/Azure/MachineLearningNotebooks/blob/master//tutorials/img-classification-part2-deploy.ipynb) | | | | | | |
|
| [img-classification-part2-deploy](https://github.com/Azure/MachineLearningNotebooks/blob/master//tutorials/img-classification-part2-deploy.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [regression-automated-ml](https://github.com/Azure/MachineLearningNotebooks/blob/master//tutorials/regression-automated-ml.ipynb) | | | | | | |
|
| [regression-automated-ml](https://github.com/Azure/MachineLearningNotebooks/blob/master//tutorials/regression-automated-ml.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [tutorial-1st-experiment-sdk-train](https://github.com/Azure/MachineLearningNotebooks/blob/master//tutorials/tutorial-1st-experiment-sdk-train.ipynb) | | | | | | |
|
| [tutorial-1st-experiment-sdk-train](https://github.com/Azure/MachineLearningNotebooks/blob/master//tutorials/tutorial-1st-experiment-sdk-train.ipynb) | | | | | | |
|
||||||
|
|
||||||
| [tutorial-pipeline-batch-scoring-classification](https://github.com/Azure/MachineLearningNotebooks/blob/master//tutorials/tutorial-pipeline-batch-scoring-classification.ipynb) | | | | | | |
|
| [tutorial-pipeline-batch-scoring-classification](https://github.com/Azure/MachineLearningNotebooks/blob/master//tutorials/tutorial-pipeline-batch-scoring-classification.ipynb) | | | | | | |
|
||||||
|
|
||||||
|
|||||||
@@ -102,7 +102,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"import azureml.core\n",
|
"import azureml.core\n",
|
||||||
"\n",
|
"\n",
|
||||||
"print(\"This notebook was created using version 1.0.79 of the Azure ML SDK\")\n",
|
"print(\"This notebook was created using version 1.0.81 of the Azure ML SDK\")\n",
|
||||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -328,6 +328,7 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"myenv = CondaDependencies()\n",
|
"myenv = CondaDependencies()\n",
|
||||||
"myenv.add_conda_package(\"scikit-learn\")\n",
|
"myenv.add_conda_package(\"scikit-learn\")\n",
|
||||||
|
"myenv.add_pip_package(\"azureml-defaults\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"with open(\"myenv.yml\",\"w\") as f:\n",
|
"with open(\"myenv.yml\",\"w\") as f:\n",
|
||||||
" f.write(myenv.serialize_to_string())"
|
" f.write(myenv.serialize_to_string())"
|
||||||
@@ -387,13 +388,11 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"Configure the image and deploy. The following code goes through these steps:\n",
|
"Configure the image and deploy. The following code goes through these steps:\n",
|
||||||
"\n",
|
"\n",
|
||||||
"1. Build an image using:\n",
|
"1. Create environment object containing dependencies needed by the model using the environment file (`myenv.yml`)\n",
|
||||||
|
"1. Create inference configuration necessary to deploy the model as a web service using:\n",
|
||||||
" * The scoring file (`score.py`)\n",
|
" * The scoring file (`score.py`)\n",
|
||||||
" * The environment file (`myenv.yml`)\n",
|
" * envrionment object created in previous step\n",
|
||||||
" * The model file\n",
|
"1. Deploy the model to the ACI container.\n",
|
||||||
"1. Register that image under the workspace. \n",
|
|
||||||
"1. Send the image to the ACI container.\n",
|
|
||||||
"1. Start up a container in ACI using the image.\n",
|
|
||||||
"1. Get the web service HTTP endpoint."
|
"1. Get the web service HTTP endpoint."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -413,10 +412,11 @@
|
|||||||
"%%time\n",
|
"%%time\n",
|
||||||
"from azureml.core.webservice import Webservice\n",
|
"from azureml.core.webservice import Webservice\n",
|
||||||
"from azureml.core.model import InferenceConfig\n",
|
"from azureml.core.model import InferenceConfig\n",
|
||||||
|
"from azureml.core.environment import Environment\n",
|
||||||
"\n",
|
"\n",
|
||||||
"inference_config = InferenceConfig(runtime= \"python\", \n",
|
"\n",
|
||||||
" entry_script=\"score.py\",\n",
|
"myenv = Environment.from_conda_specification(name=\"myenv\", file_path=\"myenv.yml\")\n",
|
||||||
" conda_file=\"myenv.yml\")\n",
|
"inference_config = InferenceConfig(entry_script=\"score.py\", environment=myenv)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"service = Model.deploy(workspace=ws, \n",
|
"service = Model.deploy(workspace=ws, \n",
|
||||||
" name='sklearn-mnist-svc', \n",
|
" name='sklearn-mnist-svc', \n",
|
||||||
|
|||||||
Reference in New Issue
Block a user