mirror of
https://github.com/Azure/MachineLearningNotebooks.git
synced 2025-12-20 09:37:04 -05:00
Compare commits
3 Commits
azureml-sd
...
dockerfile
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f8d4fec978 | ||
|
|
0fdab91b97 | ||
|
|
b54be912d8 |
29
Dockerfiles/1.0.10/Dockerfile
Normal file
29
Dockerfiles/1.0.10/Dockerfile
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
FROM continuumio/miniconda:4.5.11
|
||||||
|
|
||||||
|
# install git
|
||||||
|
RUN apt-get update && apt-get upgrade -y && apt-get install -y git
|
||||||
|
|
||||||
|
# create a new conda environment named azureml
|
||||||
|
RUN conda create -n azureml -y -q Python=3.6
|
||||||
|
|
||||||
|
# install additional packages used by sample notebooks. this is optional
|
||||||
|
RUN ["/bin/bash", "-c", "source activate azureml && conda install -y tqdm cython matplotlib scikit-learn"]
|
||||||
|
|
||||||
|
# install azurmel-sdk components
|
||||||
|
RUN ["/bin/bash", "-c", "source activate azureml && pip install azureml-sdk[notebooks]==1.0.10"]
|
||||||
|
|
||||||
|
# clone Azure ML GitHub sample notebooks
|
||||||
|
RUN cd /home && git clone -b "azureml-sdk-1.0.10" --single-branch https://github.com/Azure/MachineLearningNotebooks.git
|
||||||
|
|
||||||
|
# generate jupyter configuration file
|
||||||
|
RUN ["/bin/bash", "-c", "source activate azureml && mkdir ~/.jupyter && cd ~/.jupyter && jupyter notebook --generate-config"]
|
||||||
|
|
||||||
|
# set an emtpy token for Jupyter to remove authentication.
|
||||||
|
# this is NOT recommended for production environment
|
||||||
|
RUN echo "c.NotebookApp.token = ''" >> ~/.jupyter/jupyter_notebook_config.py
|
||||||
|
|
||||||
|
# open up port 8887 on the container
|
||||||
|
EXPOSE 8887
|
||||||
|
|
||||||
|
# start Jupyter notebook server on port 8887 when the container starts
|
||||||
|
CMD /bin/bash -c "cd /home/MachineLearningNotebooks && source activate azureml && jupyter notebook --port 8887 --no-browser --ip 0.0.0.0 --allow-root"
|
||||||
29
Dockerfiles/1.0.2/Dockerfile
Normal file
29
Dockerfiles/1.0.2/Dockerfile
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
FROM continuumio/miniconda:4.5.11
|
||||||
|
|
||||||
|
# install git
|
||||||
|
RUN apt-get update && apt-get upgrade -y && apt-get install -y git
|
||||||
|
|
||||||
|
# create a new conda environment named azureml
|
||||||
|
RUN conda create -n azureml -y -q Python=3.6
|
||||||
|
|
||||||
|
# install additional packages used by sample notebooks. this is optional
|
||||||
|
RUN ["/bin/bash", "-c", "source activate azureml && conda install -y tqdm cython matplotlib scikit-learn"]
|
||||||
|
|
||||||
|
# install azurmel-sdk components
|
||||||
|
RUN ["/bin/bash", "-c", "source activate azureml && pip install azureml-sdk[notebooks]==1.0.2"]
|
||||||
|
|
||||||
|
# clone Azure ML GitHub sample notebooks
|
||||||
|
RUN cd /home && git clone -b "azureml-sdk-1.0.2" --single-branch https://github.com/Azure/MachineLearningNotebooks.git
|
||||||
|
|
||||||
|
# generate jupyter configuration file
|
||||||
|
RUN ["/bin/bash", "-c", "source activate azureml && mkdir ~/.jupyter && cd ~/.jupyter && jupyter notebook --generate-config"]
|
||||||
|
|
||||||
|
# set an emtpy token for Jupyter to remove authentication.
|
||||||
|
# this is NOT recommended for production environment
|
||||||
|
RUN echo "c.NotebookApp.token = ''" >> ~/.jupyter/jupyter_notebook_config.py
|
||||||
|
|
||||||
|
# open up port 8887 on the container
|
||||||
|
EXPOSE 8887
|
||||||
|
|
||||||
|
# start Jupyter notebook server on port 8887 when the container starts
|
||||||
|
CMD /bin/bash -c "cd /home/MachineLearningNotebooks && source activate azureml && jupyter notebook --port 8887 --no-browser --ip 0.0.0.0 --allow-root"
|
||||||
29
Dockerfiles/1.0.6/Dockerfile
Normal file
29
Dockerfiles/1.0.6/Dockerfile
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
FROM continuumio/miniconda:4.5.11
|
||||||
|
|
||||||
|
# install git
|
||||||
|
RUN apt-get update && apt-get upgrade -y && apt-get install -y git
|
||||||
|
|
||||||
|
# create a new conda environment named azureml
|
||||||
|
RUN conda create -n azureml -y -q Python=3.6
|
||||||
|
|
||||||
|
# install additional packages used by sample notebooks. this is optional
|
||||||
|
RUN ["/bin/bash", "-c", "source activate azureml && conda install -y tqdm cython matplotlib scikit-learn"]
|
||||||
|
|
||||||
|
# install azurmel-sdk components
|
||||||
|
RUN ["/bin/bash", "-c", "source activate azureml && pip install azureml-sdk[notebooks]==1.0.6"]
|
||||||
|
|
||||||
|
# clone Azure ML GitHub sample notebooks
|
||||||
|
RUN cd /home && git clone -b "azureml-sdk-1.0.6" --single-branch https://github.com/Azure/MachineLearningNotebooks.git
|
||||||
|
|
||||||
|
# generate jupyter configuration file
|
||||||
|
RUN ["/bin/bash", "-c", "source activate azureml && mkdir ~/.jupyter && cd ~/.jupyter && jupyter notebook --generate-config"]
|
||||||
|
|
||||||
|
# set an emtpy token for Jupyter to remove authentication.
|
||||||
|
# this is NOT recommended for production environment
|
||||||
|
RUN echo "c.NotebookApp.token = ''" >> ~/.jupyter/jupyter_notebook_config.py
|
||||||
|
|
||||||
|
# open up port 8887 on the container
|
||||||
|
EXPOSE 8887
|
||||||
|
|
||||||
|
# start Jupyter notebook server on port 8887 when the container starts
|
||||||
|
CMD /bin/bash -c "cd /home/MachineLearningNotebooks && source activate azureml && jupyter notebook --port 8887 --no-browser --ip 0.0.0.0 --allow-root"
|
||||||
29
Dockerfiles/1.0.8/Dockerfile
Normal file
29
Dockerfiles/1.0.8/Dockerfile
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
FROM continuumio/miniconda:4.5.11
|
||||||
|
|
||||||
|
# install git
|
||||||
|
RUN apt-get update && apt-get upgrade -y && apt-get install -y git
|
||||||
|
|
||||||
|
# create a new conda environment named azureml
|
||||||
|
RUN conda create -n azureml -y -q Python=3.6
|
||||||
|
|
||||||
|
# install additional packages used by sample notebooks. this is optional
|
||||||
|
RUN ["/bin/bash", "-c", "source activate azureml && conda install -y tqdm cython matplotlib scikit-learn"]
|
||||||
|
|
||||||
|
# install azurmel-sdk components
|
||||||
|
RUN ["/bin/bash", "-c", "source activate azureml && pip install azureml-sdk[notebooks]==1.0.8"]
|
||||||
|
|
||||||
|
# clone Azure ML GitHub sample notebooks
|
||||||
|
RUN cd /home && git clone -b "azureml-sdk-1.0.8" --single-branch https://github.com/Azure/MachineLearningNotebooks.git
|
||||||
|
|
||||||
|
# generate jupyter configuration file
|
||||||
|
RUN ["/bin/bash", "-c", "source activate azureml && mkdir ~/.jupyter && cd ~/.jupyter && jupyter notebook --generate-config"]
|
||||||
|
|
||||||
|
# set an emtpy token for Jupyter to remove authentication.
|
||||||
|
# this is NOT recommended for production environment
|
||||||
|
RUN echo "c.NotebookApp.token = ''" >> ~/.jupyter/jupyter_notebook_config.py
|
||||||
|
|
||||||
|
# open up port 8887 on the container
|
||||||
|
EXPOSE 8887
|
||||||
|
|
||||||
|
# start Jupyter notebook server on port 8887 when the container starts
|
||||||
|
CMD /bin/bash -c "cd /home/MachineLearningNotebooks && source activate azureml && jupyter notebook --port 8887 --no-browser --ip 0.0.0.0 --allow-root"
|
||||||
@@ -4,12 +4,13 @@ These examples show you:
|
|||||||
|
|
||||||
1. [How to use the Estimator pattern in Azure ML](how-to-use-estimator)
|
1. [How to use the Estimator pattern in Azure ML](how-to-use-estimator)
|
||||||
2. [Train using TensorFlow Estimator and tune hyperparameters using Hyperdrive](train-hyperparameter-tune-deploy-with-tensorflow)
|
2. [Train using TensorFlow Estimator and tune hyperparameters using Hyperdrive](train-hyperparameter-tune-deploy-with-tensorflow)
|
||||||
3. [Train using Pytorch Estimator and tune hyperparameters using Hyperdrive](train-hyperparameter-tune-deploy-with-pytorch)
|
3. [Train using Keras and tune hyperparameters using Hyperdrive](train-hyperparameter-tune-deploy-with-keras)
|
||||||
4. [Distributed training using TensorFlow and Parameter Server](distributed-tensorflow-with-parameter-server)
|
4. [Train using Pytorch Estimator and tune hyperparameters using Hyperdrive](train-hyperparameter-tune-deploy-with-pytorch)
|
||||||
5. [Distributed training using TensorFlow and Horovod](distributed-tensorflow-with-horovod)
|
5. [Distributed training using TensorFlow and Parameter Server](distributed-tensorflow-with-parameter-server)
|
||||||
6. [Distributed training using Pytorch and Horovod](distributed-pytorch-with-horovod)
|
6. [Distributed training using TensorFlow and Horovod](distributed-tensorflow-with-horovod)
|
||||||
7. [Distributed training using CNTK and custom Docker image](distributed-cntk-with-custom-docker)
|
7. [Distributed training using Pytorch and Horovod](distributed-pytorch-with-horovod)
|
||||||
8. [Export run history records to Tensorboard](export-run-history-to-tensorboard)
|
8. [Distributed training using CNTK and custom Docker image](distributed-cntk-with-custom-docker)
|
||||||
9. [Use TensorBoard to monitor training execution](tensorboard)
|
9. [Export run history records to Tensorboard](export-run-history-to-tensorboard)
|
||||||
|
10. [Use TensorBoard to monitor training execution](tensorboard)
|
||||||
|
|
||||||
Learn more about how to use `Estimator` class to [train deep neural networks with Azure Machine Learning](https://docs.microsoft.com/azure/machine-learning/service/how-to-train-ml-models).
|
Learn more about how to use `Estimator` class to [train deep neural networks with Azure Machine Learning](https://docs.microsoft.com/azure/machine-learning/service/how-to-train-ml-models).
|
||||||
|
|||||||
@@ -0,0 +1,121 @@
|
|||||||
|
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||||
|
# Licensed under the MIT License.
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
|
||||||
|
import keras
|
||||||
|
from keras.models import Sequential, model_from_json
|
||||||
|
from keras.layers import Dense
|
||||||
|
from keras.optimizers import RMSprop
|
||||||
|
from keras.callbacks import Callback
|
||||||
|
|
||||||
|
import tensorflow as tf
|
||||||
|
|
||||||
|
from azureml.core import Run
|
||||||
|
from utils import load_data, one_hot_encode
|
||||||
|
|
||||||
|
print("Keras version:", keras.__version__)
|
||||||
|
print("Tensorflow version:", tf.__version__)
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument('--data-folder', type=str, dest='data_folder', help='data folder mounting point')
|
||||||
|
parser.add_argument('--batch-size', type=int, dest='batch_size', default=50, help='mini batch size for training')
|
||||||
|
parser.add_argument('--first-layer-neurons', type=int, dest='n_hidden_1', default=100,
|
||||||
|
help='# of neurons in the first layer')
|
||||||
|
parser.add_argument('--second-layer-neurons', type=int, dest='n_hidden_2', default=100,
|
||||||
|
help='# of neurons in the second layer')
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
data_folder = args.data_folder
|
||||||
|
|
||||||
|
print('training dataset is stored here:', data_folder)
|
||||||
|
|
||||||
|
X_train = load_data(os.path.join(data_folder, 'train-images.gz'), False) / 255.0
|
||||||
|
X_test = load_data(os.path.join(data_folder, 'test-images.gz'), False) / 255.0
|
||||||
|
|
||||||
|
y_train = load_data(os.path.join(data_folder, 'train-labels.gz'), True).reshape(-1)
|
||||||
|
y_test = load_data(os.path.join(data_folder, 'test-labels.gz'), True).reshape(-1)
|
||||||
|
|
||||||
|
training_set_size = X_train.shape[0]
|
||||||
|
|
||||||
|
n_inputs = 28 * 28
|
||||||
|
n_h1 = args.n_hidden_1
|
||||||
|
n_h2 = args.n_hidden_2
|
||||||
|
n_outputs = 10
|
||||||
|
|
||||||
|
n_epochs = 20
|
||||||
|
batch_size = args.batch_size
|
||||||
|
|
||||||
|
y_train = one_hot_encode(y_train, n_outputs)
|
||||||
|
y_test = one_hot_encode(y_test, n_outputs)
|
||||||
|
print(X_train.shape, y_train.shape, X_test.shape, y_test.shape, sep='\n')
|
||||||
|
|
||||||
|
# Build a simple MLP model
|
||||||
|
model = Sequential()
|
||||||
|
# input layer
|
||||||
|
model.add(Dense(n_h1, activation='relu', input_shape=(n_inputs,)))
|
||||||
|
# hidden layer
|
||||||
|
model.add(Dense(n_h2, activation='relu'))
|
||||||
|
# output layer
|
||||||
|
model.add(Dense(n_outputs, activation='softmax'))
|
||||||
|
|
||||||
|
model.summary()
|
||||||
|
|
||||||
|
model.compile(loss='categorical_crossentropy',
|
||||||
|
optimizer=RMSprop(),
|
||||||
|
metrics=['accuracy'])
|
||||||
|
|
||||||
|
# start an Azure ML run
|
||||||
|
run = Run.get_context()
|
||||||
|
|
||||||
|
|
||||||
|
class LogRunMetrics(Callback):
|
||||||
|
# callback at the end of every epoch
|
||||||
|
def on_epoch_end(self, epoch, log):
|
||||||
|
# log a value repeated which creates a list
|
||||||
|
run.log('Loss', log['loss'])
|
||||||
|
run.log('Accuracy', log['acc'])
|
||||||
|
|
||||||
|
|
||||||
|
history = model.fit(X_train, y_train,
|
||||||
|
batch_size=batch_size,
|
||||||
|
epochs=n_epochs,
|
||||||
|
verbose=2,
|
||||||
|
validation_data=(X_test, y_test),
|
||||||
|
callbacks=[LogRunMetrics()])
|
||||||
|
|
||||||
|
score = model.evaluate(X_test, y_test, verbose=0)
|
||||||
|
|
||||||
|
# log a single value
|
||||||
|
run.log("Final test loss", score[0])
|
||||||
|
print('Test loss:', score[0])
|
||||||
|
|
||||||
|
run.log('Final test accuracy', score[1])
|
||||||
|
print('Test accuracy:', score[1])
|
||||||
|
|
||||||
|
plt.figure(figsize=(6, 3))
|
||||||
|
plt.title('MNIST with Keras MLP ({} epochs)'.format(n_epochs), fontsize=14)
|
||||||
|
plt.plot(history.history['acc'], 'b-', label='Accuracy', lw=4, alpha=0.5)
|
||||||
|
plt.plot(history.history['loss'], 'r--', label='Loss', lw=4, alpha=0.5)
|
||||||
|
plt.legend(fontsize=12)
|
||||||
|
plt.grid(True)
|
||||||
|
|
||||||
|
# log an image
|
||||||
|
run.log_image('Accuracy vs Loss', plot=plt)
|
||||||
|
|
||||||
|
# create a ./outputs/model folder in the compute target
|
||||||
|
# files saved in the "./outputs" folder are automatically uploaded into run history
|
||||||
|
os.makedirs('./outputs/model', exist_ok=True)
|
||||||
|
|
||||||
|
# serialize NN architecture to JSON
|
||||||
|
model_json = model.to_json()
|
||||||
|
# save model JSON
|
||||||
|
with open('./outputs/model/model.json', 'w') as f:
|
||||||
|
f.write(model_json)
|
||||||
|
# save model weights
|
||||||
|
model.save_weights('./outputs/model/model.h5')
|
||||||
|
print("model saved in ./outputs/model folder")
|
||||||
Binary file not shown.
|
After Width: | Height: | Size: 119 KiB |
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,27 @@
|
|||||||
|
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||||
|
# Licensed under the MIT License.
|
||||||
|
|
||||||
|
import gzip
|
||||||
|
import numpy as np
|
||||||
|
import struct
|
||||||
|
|
||||||
|
|
||||||
|
# load compressed MNIST gz files and return numpy arrays
|
||||||
|
def load_data(filename, label=False):
|
||||||
|
with gzip.open(filename) as gz:
|
||||||
|
struct.unpack('I', gz.read(4))
|
||||||
|
n_items = struct.unpack('>I', gz.read(4))
|
||||||
|
if not label:
|
||||||
|
n_rows = struct.unpack('>I', gz.read(4))[0]
|
||||||
|
n_cols = struct.unpack('>I', gz.read(4))[0]
|
||||||
|
res = np.frombuffer(gz.read(n_items[0] * n_rows * n_cols), dtype=np.uint8)
|
||||||
|
res = res.reshape(n_items[0], n_rows * n_cols)
|
||||||
|
else:
|
||||||
|
res = np.frombuffer(gz.read(n_items[0]), dtype=np.uint8)
|
||||||
|
res = res.reshape(n_items[0], 1)
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
# one-hot encode a 1-D array
|
||||||
|
def one_hot_encode(array, num_of_classes):
|
||||||
|
return np.eye(num_of_classes)[array.reshape(-1)]
|
||||||
Reference in New Issue
Block a user