Update notebooks

This commit is contained in:
Roope Astala
2018-09-14 15:14:43 -04:00
parent 01a12c0b74
commit 8178484586
40 changed files with 14985 additions and 67 deletions

View File

@@ -110,7 +110,7 @@
"experiment_name = 'sklearn-mnist'\n",
"\n",
"from azureml.core import Experiment\n",
"exp = Experiment(workspace = ws, name = experiment_name)"
"exp = Experiment(workspace=ws, name=experiment_name)"
]
},
{
@@ -143,25 +143,25 @@
"\n",
"try:\n",
" # look for the existing cluster by name\n",
" compute_target = ComputeTarget(workspace = ws, name = batchai_cluster_name)\n",
" compute_target = ComputeTarget(workspace=ws, name=batchai_cluster_name)\n",
" if compute_target is BatchAiCompute:\n",
" print('found compute target {}, just use it.'.format(batchai_cluster_name))\n",
" else:\n",
" print('{} exists but it is not a Batch AI cluster. Please choose a different name.'.format(batchai_cluster_name))\n",
"except ComputeTargetException:\n",
" print('creating a new compute target...')\n",
" compute_config = BatchAiCompute.provisioning_configuration(vm_size = \"STANDARD_D2_V2\", # small CPU-based VM\n",
" #vm_priority = 'lowpriority', # optional\n",
" autoscale_enabled = True,\n",
" cluster_min_nodes = 0, \n",
" cluster_max_nodes = 4)\n",
" compute_config = BatchAiCompute.provisioning_configuration(vm_size=\"STANDARD_D2_V2\", # small CPU-based VM\n",
" #vm_priority='lowpriority', # optional\n",
" autoscale_enabled=True,\n",
" cluster_min_nodes=0, \n",
" cluster_max_nodes=4)\n",
"\n",
" # create the cluster\n",
" compute_target = ComputeTarget.create(ws, batchai_cluster_name, compute_config)\n",
" \n",
" # can poll for a minimum number of nodes and for a specific timeout. \n",
" # if no min node count is provided it uses the scale settings for the cluster\n",
" compute_target.wait_for_completion(show_output = True, min_node_count = None, timeout_in_minutes = 20)\n",
" compute_target.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)\n",
" \n",
" # Use the 'status' property to get a detailed status for the current cluster. \n",
" print(compute_target.status.serialize())"
@@ -197,10 +197,10 @@
"\n",
"os.makedirs('./data', exist_ok = True)\n",
"\n",
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz', filename = './data/train-images.gz')\n",
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz', filename = './data/train-labels.gz')\n",
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz', filename = './data/test-images.gz')\n",
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz', filename = './data/test-labels.gz')"
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz', filename='./data/train-images.gz')\n",
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz', filename='./data/train-labels.gz')\n",
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz', filename='./data/test-images.gz')\n",
"urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz', filename='./data/test-labels.gz')"
]
},
{
@@ -237,8 +237,8 @@
" plt.subplot(1, sample_size, count)\n",
" plt.axhline('')\n",
" plt.axvline('')\n",
" plt.text(x = 10, y = -10, s = y_train[i], fontsize = 18)\n",
" plt.imshow(X_train[i].reshape(28, 28), cmap = plt.cm.Greys)\n",
" plt.text(x=10, y=-10, s=y_train[i], fontsize=18)\n",
" plt.imshow(X_train[i].reshape(28, 28), cmap=plt.cm.Greys)\n",
"plt.show()"
]
},
@@ -264,7 +264,7 @@
"ds = ws.get_default_datastore()\n",
"print(ds.datastore_type, ds.account_name, ds.container_name)\n",
"\n",
"ds.upload(src_dir = './data', target_path = 'mnist', overwrite = True, show_progress = True)"
"ds.upload(src_dir='./data', target_path='mnist', overwrite=True, show_progress=True)"
]
},
{
@@ -339,7 +339,7 @@
"source": [
"import os\n",
"script_folder = './sklearn-mnist'\n",
"os.makedirs(script_folder, exist_ok = True)"
"os.makedirs(script_folder, exist_ok=True)"
]
},
{
@@ -371,8 +371,8 @@
"\n",
"# let user feed in 2 parameters, the location of the data files (from datastore), and the regularization rate of the logistic regression model\n",
"parser = argparse.ArgumentParser()\n",
"parser.add_argument('--data-folder', type = str, dest = 'data_folder', help = 'data folder mounting point')\n",
"parser.add_argument('--regularization', type = float, dest = 'reg', default = 0.01, help = 'regularization rate')\n",
"parser.add_argument('--data-folder', type=str, dest='data_folder', help='data folder mounting point')\n",
"parser.add_argument('--regularization', type=float, dest='reg', default=0.01, help='regularization rate')\n",
"args = parser.parse_args()\n",
"\n",
"data_folder = os.path.join(args.data_folder, 'mnist')\n",
@@ -389,25 +389,23 @@
"# get hold of the current run\n",
"run = Run.get_submitted_run()\n",
"\n",
"# train a logistic regression model with specified regularization rate\n",
"print('Train a logistic regression model with regularizaion rate of', args.reg)\n",
"clf = LogisticRegression(C = 1.0/args.reg, random_state = 42)\n",
"clf = LogisticRegression(C=1.0/args.reg, random_state=42)\n",
"clf.fit(X_train, y_train)\n",
"\n",
"print('Predict the test set')\n",
"# predict on the test set\n",
"y_hat = clf.predict(X_test)\n",
"\n",
"# calculate accuracy on the prediction\n",
"acc = np.average(y_hat == y_test)\n",
"print('Accuracy is', acc)\n",
"\n",
"# log regularization rate and accuracy \n",
"run.log('regularization rate', np.float(args.reg))\n",
"run.log('accuracy', np.float(acc))\n",
"\n",
"os.makedirs('outputs', exist_ok = True)\n",
"joblib.dump(value = clf, filename = 'outputs/sklearn_mnist_model.pkl')"
"os.makedirs('outputs', exist_ok=True)\n",
"# note file saved in the outputs folder is automatically uploaded into experiment record\n",
"joblib.dump(value=clf, filename='outputs/sklearn_mnist_model.pkl')"
]
},
{
@@ -417,7 +415,7 @@
"Notice how the script gets data and saves models:\n",
"\n",
"+ The training script reads an argument to find the directory containing the data. When you submit the job later, you point to the datastore for this argument:\n",
"`parser.add_argument('--data-folder', type = str, dest = 'data_folder', help = 'data directory mounting point')`"
"`parser.add_argument('--data-folder', type=str, dest='data_folder', help='data directory mounting point')`"
]
},
{
@@ -426,7 +424,7 @@
"source": [
"\n",
"+ The training script saves your model into a directory named outputs. <br/>\n",
"`joblib.dump(value = clf, filename = 'outputs/sklearn_mnist_model.pkl')`<br/>\n",
"`joblib.dump(value=clf, filename='outputs/sklearn_mnist_model.pkl')`<br/>\n",
"Anything written in this directory is automatically uploaded into your workspace. You'll access your model from this directory later in the tutorial."
]
},
@@ -477,11 +475,11 @@
" '--regularization': 0.8\n",
"}\n",
"\n",
"est = Estimator(source_directory = script_folder,\n",
" script_params = script_params,\n",
" compute_target = compute_target,\n",
" entry_script = 'train.py',\n",
" conda_packages = ['scikit-learn'])"
"est = Estimator(source_directory=script_folder,\n",
" script_params=script_params,\n",
" compute_target=compute_target,\n",
" entry_script='train.py',\n",
" conda_packages=['scikit-learn'])"
]
},
{
@@ -562,7 +560,7 @@
"metadata": {},
"outputs": [],
"source": [
"run.wait_for_completion(show_output = True) # specify True for a verbose log"
"run.wait_for_completion(show_output=True) # specify True for a verbose log"
]
},
{
@@ -623,7 +621,7 @@
"outputs": [],
"source": [
"# register model \n",
"model = run.register_model(model_name = 'sklearn_mnist', model_path = 'outputs/sklearn_mnist_model.pkl')\n",
"model = run.register_model(model_name='sklearn_mnist', model_path='outputs/sklearn_mnist_model.pkl')\n",
"print(model.name, model.id, model.version, sep = '\\t')"
]
},