258 lines
6.8 KiB
Plaintext
258 lines
6.8 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
|
"\n",
|
|
"Licensed under the MIT License."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"# 05. Train in Spark\n",
|
|
"* Create Workspace\n",
|
|
"* Create Experiment\n",
|
|
"* Copy relevant files to the script folder\n",
|
|
"* Configure and Run"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Prerequisites\n",
|
|
"Make sure you go through the [00. Installation and Configuration](00.configuration.ipynb) Notebook first if you haven't."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Check core SDK version number\n",
|
|
"import azureml.core\n",
|
|
"\n",
|
|
"print(\"SDK version:\", azureml.core.VERSION)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Initialize Workspace\n",
|
|
"\n",
|
|
"Initialize a workspace object from persisted configuration."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"from azureml.core import Workspace\n",
|
|
"\n",
|
|
"ws = Workspace.from_config()\n",
|
|
"print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\\n')"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Create Experiment\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"experiment_name = 'train-on-spark'\n",
|
|
"\n",
|
|
"from azureml.core import Experiment\n",
|
|
"\n",
|
|
"exp = Experiment(workspace = ws, name = experiment_name)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"## View `train-spark.py`\n",
|
|
"\n",
|
|
"For convenience, we created a training script for you. It is printed below as a text, but you can also run `%pfile ./train-spark.py` in a cell to show the file."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"with open('train-spark.py', 'r') as training_script:\n",
|
|
" print(training_script.read())"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Configure & Run"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"### Attach an HDI cluster\n",
|
|
"To use HDI commpute target:\n",
|
|
" 1. Create an Spark for HDI cluster in Azure. Here is some [quick instructions](https://docs.microsoft.com/en-us/azure/machine-learning/desktop-workbench/how-to-create-dsvm-hdi). Make sure you use the Ubuntu flavor, NOT CentOS.\n",
|
|
" 2. Enter the IP address, username and password below"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"from azureml.core.compute import HDInsightCompute\n",
|
|
"\n",
|
|
"try:\n",
|
|
" # if you want to connect using SSH key instead of username/password you can provide parameters private_key_file and private_key_passphrase\n",
|
|
" hdi_compute_new = HDInsightCompute.attach(ws, \n",
|
|
" name=\"hdi-attach\", \n",
|
|
" address=\"hdi-ignite-demo-ssh.azurehdinsight.net\", \n",
|
|
" ssh_port=22, \n",
|
|
" username='<username>', \n",
|
|
" password='<password>')\n",
|
|
"\n",
|
|
"except UserErrorException as e:\n",
|
|
" print(\"Caught = {}\".format(e.message))\n",
|
|
" print(\"Compute config already attached.\")\n",
|
|
" \n",
|
|
" \n",
|
|
"hdi_compute_new.wait_for_completion(show_output=True)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"### Configure HDI run"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"from azureml.core.runconfig import RunConfiguration\n",
|
|
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
|
"\n",
|
|
"\n",
|
|
"# Load the \"cpu-dsvm.runconfig\" file (created by the above attach operation) in memory\n",
|
|
"run_config = RunConfiguration(framework = \"python\")\n",
|
|
"\n",
|
|
"# Set compute target to the Linux DSVM\n",
|
|
"run_config.target = hdi_compute.name\n",
|
|
"\n",
|
|
"# Use Docker in the remote VM\n",
|
|
"# run_config.environment.docker.enabled = True\n",
|
|
"\n",
|
|
"# Use CPU base image from DockerHub\n",
|
|
"# run_config.environment.docker.base_image = azureml.core.runconfig.DEFAULT_CPU_IMAGE\n",
|
|
"# print('Base Docker image is:', run_config.environment.docker.base_image)\n",
|
|
"\n",
|
|
"# Ask system to provision a new one based on the conda_dependencies.yml file\n",
|
|
"run_config.environment.python.user_managed_dependencies = False\n",
|
|
"\n",
|
|
"# Prepare the Docker and conda environment automatically when executingfor the first time.\n",
|
|
"# run_config.prepare_environment = True\n",
|
|
"\n",
|
|
"# specify CondaDependencies obj\n",
|
|
"# run_config.environment.python.conda_dependencies = CondaDependencies.create(conda_packages=['scikit-learn'])\n",
|
|
"# load the runconfig object from the \"myhdi.runconfig\" file generated by the attach operaton above."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"### Submit the script to HDI"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"script_run_config = ScriptRunConfig(source_directory = '.',\n",
|
|
" script= 'train-spark.py',\n",
|
|
" run_config = run_config)\n",
|
|
"run = experiment.submit(script_run_config)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# get the URL of the run history web page\n",
|
|
"run"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"run.wait_for_completion(show_output = True)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# get all metris logged in the run\n",
|
|
"metrics = run.get_metrics()\n",
|
|
"print(metrics)"
|
|
]
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "Python 3.6",
|
|
"language": "python",
|
|
"name": "python36"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.6.5"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 2
|
|
}
|