Compare commits
266 Commits
azureml-sd
...
azureml-sd
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a05444845b | ||
|
|
79c9f50c15 | ||
|
|
67e10e0f6b | ||
|
|
1ef0331a0f | ||
|
|
5e91c836b9 | ||
|
|
ddbb3c45f6 | ||
|
|
8eed4e39d0 | ||
|
|
b37c0297db | ||
|
|
968cc798d0 | ||
|
|
5c9ca452fb | ||
|
|
5e82680272 | ||
|
|
41841fc8c0 | ||
|
|
896bf63736 | ||
|
|
d4751bf6ec | ||
|
|
3531fe8a21 | ||
|
|
db6ae67940 | ||
|
|
2a479bb01e | ||
|
|
d05eec92af | ||
|
|
70fdab0a28 | ||
|
|
7ce5a43b58 | ||
|
|
d2a9dbb582 | ||
|
|
a5d774683d | ||
|
|
0e850f0917 | ||
|
|
59f34b7179 | ||
|
|
2a3cb69004 | ||
|
|
42894ff81a | ||
|
|
2163cab50b | ||
|
|
255edb04c0 | ||
|
|
cfce079278 | ||
|
|
ae6f067c81 | ||
|
|
1b7ff724f3 | ||
|
|
8bba850db1 | ||
|
|
b9e35ea0cb | ||
|
|
ffa28aa89c | ||
|
|
6ab85a20e3 | ||
|
|
486c44d157 | ||
|
|
cd80040dd8 | ||
|
|
465a5b13b1 | ||
|
|
dcd2d58880 | ||
|
|
93bf4393f2 | ||
|
|
d6ebb484a6 | ||
|
|
35afd43193 | ||
|
|
2d68535de2 | ||
|
|
0d448892a3 | ||
|
|
2d41c00488 | ||
|
|
22597ac684 | ||
|
|
8b1bffc200 | ||
|
|
a240ac319f | ||
|
|
83cfe3b9b3 | ||
|
|
dcce6f227f | ||
|
|
5328186d68 | ||
|
|
7ccaa2cf57 | ||
|
|
56b0664b6b | ||
|
|
4c1167edc4 | ||
|
|
eb643fe213 | ||
|
|
5faa9d293c | ||
|
|
32e2b5f647 | ||
|
|
ae25654882 | ||
|
|
0ca05093bd | ||
|
|
5e39582de3 | ||
|
|
6b6a6da9dc | ||
|
|
cba2c6b9e2 | ||
|
|
58557abd20 | ||
|
|
59452a3141 | ||
|
|
463718e26b | ||
|
|
9ea0ba5131 | ||
|
|
2804a8d859 | ||
|
|
4761b668ff | ||
|
|
c4163017c2 | ||
|
|
71e8e9bd23 | ||
|
|
6ff06dd137 | ||
|
|
73db8ae04d | ||
|
|
3637dce58a | ||
|
|
23771fc599 | ||
|
|
5f04a467b7 | ||
|
|
532f65c998 | ||
|
|
f36dda0c2d | ||
|
|
c7b56929bc | ||
|
|
5f19d75a42 | ||
|
|
a1968aafa2 | ||
|
|
6b82991017 | ||
|
|
725013511e | ||
|
|
6a20160173 | ||
|
|
137db8aec0 | ||
|
|
b7b10c394b | ||
|
|
46206716a4 | ||
|
|
92bb98ac62 | ||
|
|
b398c24262 | ||
|
|
e0618302e3 | ||
|
|
b6cddafa3e | ||
|
|
4188bd2474 | ||
|
|
69126edfcb | ||
|
|
4e14c35b9b | ||
|
|
1608c19aa6 | ||
|
|
46b8611b74 | ||
|
|
fbb01bde70 | ||
|
|
cefe2f0811 | ||
|
|
42e0a31f88 | ||
|
|
8b0998ac9f | ||
|
|
046c6051fb | ||
|
|
bdb7db15ef | ||
|
|
b13139f103 | ||
|
|
8adb206ae3 | ||
|
|
484b6bbb7a | ||
|
|
55ef0bda6a | ||
|
|
1401cdef33 | ||
|
|
5d02206cbd | ||
|
|
c24b65d4ae | ||
|
|
57c5ef318f | ||
|
|
ba033d72f8 | ||
|
|
aa657ac528 | ||
|
|
7d8289679d | ||
|
|
a7c3db0560 | ||
|
|
e548847881 | ||
|
|
08c6b1f4ed | ||
|
|
78abb65f5e | ||
|
|
3c6c090732 | ||
|
|
513e36d9b2 | ||
|
|
9db91a7fb8 | ||
|
|
d9b26b655b | ||
|
|
cb8dc41766 | ||
|
|
9c9b4bb122 | ||
|
|
f5c896c70f | ||
|
|
3b572eddb2 | ||
|
|
51523db294 | ||
|
|
3b4998941c | ||
|
|
6cdbfb8722 | ||
|
|
c086bd69c7 | ||
|
|
279c9b8dc4 | ||
|
|
98589fe335 | ||
|
|
77f21058a2 | ||
|
|
baa65d0886 | ||
|
|
0fffa11b2a | ||
|
|
20ec225343 | ||
|
|
845e9d653e | ||
|
|
639ef81636 | ||
|
|
60158bf41a | ||
|
|
8dbbb01b8a | ||
|
|
6e6b2b0c48 | ||
|
|
85f5721bf8 | ||
|
|
6a7dd741e7 | ||
|
|
314218fc89 | ||
|
|
b50d2725c7 | ||
|
|
9a2f448792 | ||
|
|
dd620f19fd | ||
|
|
8116d31da4 | ||
|
|
ef29dc1fa5 | ||
|
|
97b345cb33 | ||
|
|
282250e670 | ||
|
|
acef60c5b3 | ||
|
|
bfb444eb15 | ||
|
|
6277659bf2 | ||
|
|
1645e12712 | ||
|
|
cc4a32e70b | ||
|
|
997a35aed5 | ||
|
|
dd6317a4a0 | ||
|
|
82d8353d54 | ||
|
|
59a01c17a0 | ||
|
|
e31e1d9af3 | ||
|
|
d38b9db255 | ||
|
|
761ad88c93 | ||
|
|
644729e5db | ||
|
|
e2b1b3fcaa | ||
|
|
dc692589a9 | ||
|
|
624b4595b5 | ||
|
|
0ed85c33c2 | ||
|
|
5b01de605f | ||
|
|
c351ac988a | ||
|
|
759ec3934c | ||
|
|
b499b88a85 | ||
|
|
5f4edac3c1 | ||
|
|
edfce0d936 | ||
|
|
1516c7fc24 | ||
|
|
389fb668ce | ||
|
|
647d5e72a5 | ||
|
|
43ac4c84bb | ||
|
|
8a1a82b50a | ||
|
|
72f386298c | ||
|
|
41d697e298 | ||
|
|
c3ce932029 | ||
|
|
a956162114 | ||
|
|
cb5a178e40 | ||
|
|
d81c336c59 | ||
|
|
4244a24d81 | ||
|
|
3b488555e5 | ||
|
|
6abc478f33 | ||
|
|
666c2579eb | ||
|
|
5af3aa4231 | ||
|
|
e48d828ab0 | ||
|
|
44aa636c21 | ||
|
|
4678f9adc3 | ||
|
|
5bf85edade | ||
|
|
94f381e884 | ||
|
|
ea1b7599c3 | ||
|
|
6b8a6befde | ||
|
|
c1511b7b74 | ||
|
|
8f007a3333 | ||
|
|
5ad3ca00e8 | ||
|
|
556a41e223 | ||
|
|
407b8929d0 | ||
|
|
18a11bbd8d | ||
|
|
8b439a9f7c | ||
|
|
75c393a221 | ||
|
|
be7176fe06 | ||
|
|
7b41675355 | ||
|
|
fa7685f6fa | ||
|
|
6b444b1467 | ||
|
|
c9767473ae | ||
|
|
648b48fc0c | ||
|
|
04db5d93e2 | ||
|
|
4e10935701 | ||
|
|
f737db499d | ||
|
|
6b66da1558 | ||
|
|
8647aea9d9 | ||
|
|
3ee2dc3258 | ||
|
|
9f7c4ce668 | ||
|
|
036ca6ac75 | ||
|
|
0b8817ee1c | ||
|
|
b7b5576b15 | ||
|
|
c082b72b71 | ||
|
|
673e76d431 | ||
|
|
c518a04a19 | ||
|
|
2f34888716 | ||
|
|
6ca0088991 | ||
|
|
40e3856786 | ||
|
|
ddd025e83e | ||
|
|
ece4242c8f | ||
|
|
4bca2bd7db | ||
|
|
a927dbfa31 | ||
|
|
280c718f53 | ||
|
|
bf1ac2b26a | ||
|
|
954c2afbce | ||
|
|
fbf1ea5f1a | ||
|
|
84b72d904b | ||
|
|
82bb9fcac3 | ||
|
|
5c6bbacd47 | ||
|
|
90aaeea113 | ||
|
|
eeab7284c9 | ||
|
|
02fd9b685c | ||
|
|
d5c923b446 | ||
|
|
f16bf27e26 | ||
|
|
c7bec58593 | ||
|
|
cca3996eb4 | ||
|
|
210efe022a | ||
|
|
5fd14bac30 | ||
|
|
3fa409543b | ||
|
|
42f2822b61 | ||
|
|
48afbe1cab | ||
|
|
1298c55dd4 | ||
|
|
0aa1b248f4 | ||
|
|
3012b8f5a8 | ||
|
|
501c55bcaf | ||
|
|
1a38f50221 | ||
|
|
cc64be8d6f | ||
|
|
a0127a2a64 | ||
|
|
7eb966bf79 | ||
|
|
9118f2c7ce | ||
|
|
0e3198f311 | ||
|
|
0fdab91b97 | ||
|
|
b54be912d8 | ||
|
|
3d0c7990ff | ||
|
|
6e1ce29a94 | ||
|
|
0d26c9986a | ||
|
|
100ab10797 | ||
|
|
1307efe7bc | ||
|
|
08d0b8cf08 |
29
Dockerfiles/1.0.10/Dockerfile
Normal file
@@ -0,0 +1,29 @@
|
||||
FROM continuumio/miniconda:4.5.11
|
||||
|
||||
# install git
|
||||
RUN apt-get update && apt-get upgrade -y && apt-get install -y git
|
||||
|
||||
# create a new conda environment named azureml
|
||||
RUN conda create -n azureml -y -q Python=3.6
|
||||
|
||||
# install additional packages used by sample notebooks. this is optional
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && conda install -y tqdm cython matplotlib scikit-learn"]
|
||||
|
||||
# install azurmel-sdk components
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && pip install azureml-sdk[notebooks]==1.0.10"]
|
||||
|
||||
# clone Azure ML GitHub sample notebooks
|
||||
RUN cd /home && git clone -b "azureml-sdk-1.0.10" --single-branch https://github.com/Azure/MachineLearningNotebooks.git
|
||||
|
||||
# generate jupyter configuration file
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && mkdir ~/.jupyter && cd ~/.jupyter && jupyter notebook --generate-config"]
|
||||
|
||||
# set an emtpy token for Jupyter to remove authentication.
|
||||
# this is NOT recommended for production environment
|
||||
RUN echo "c.NotebookApp.token = ''" >> ~/.jupyter/jupyter_notebook_config.py
|
||||
|
||||
# open up port 8887 on the container
|
||||
EXPOSE 8887
|
||||
|
||||
# start Jupyter notebook server on port 8887 when the container starts
|
||||
CMD /bin/bash -c "cd /home/MachineLearningNotebooks && source activate azureml && jupyter notebook --port 8887 --no-browser --ip 0.0.0.0 --allow-root"
|
||||
29
Dockerfiles/1.0.15/Dockerfile
Normal file
@@ -0,0 +1,29 @@
|
||||
FROM continuumio/miniconda:4.5.11
|
||||
|
||||
# install git
|
||||
RUN apt-get update && apt-get upgrade -y && apt-get install -y git
|
||||
|
||||
# create a new conda environment named azureml
|
||||
RUN conda create -n azureml -y -q Python=3.6
|
||||
|
||||
# install additional packages used by sample notebooks. this is optional
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && conda install -y tqdm cython matplotlib scikit-learn"]
|
||||
|
||||
# install azurmel-sdk components
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && pip install azureml-sdk[notebooks]==1.0.15"]
|
||||
|
||||
# clone Azure ML GitHub sample notebooks
|
||||
RUN cd /home && git clone -b "azureml-sdk-1.0.15" --single-branch https://github.com/Azure/MachineLearningNotebooks.git
|
||||
|
||||
# generate jupyter configuration file
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && mkdir ~/.jupyter && cd ~/.jupyter && jupyter notebook --generate-config"]
|
||||
|
||||
# set an emtpy token for Jupyter to remove authentication.
|
||||
# this is NOT recommended for production environment
|
||||
RUN echo "c.NotebookApp.token = ''" >> ~/.jupyter/jupyter_notebook_config.py
|
||||
|
||||
# open up port 8887 on the container
|
||||
EXPOSE 8887
|
||||
|
||||
# start Jupyter notebook server on port 8887 when the container starts
|
||||
CMD /bin/bash -c "cd /home/MachineLearningNotebooks && source activate azureml && jupyter notebook --port 8887 --no-browser --ip 0.0.0.0 --allow-root"
|
||||
29
Dockerfiles/1.0.17/Dockerfile
Normal file
@@ -0,0 +1,29 @@
|
||||
FROM continuumio/miniconda:4.5.11
|
||||
|
||||
# install git
|
||||
RUN apt-get update && apt-get upgrade -y && apt-get install -y git
|
||||
|
||||
# create a new conda environment named azureml
|
||||
RUN conda create -n azureml -y -q Python=3.6
|
||||
|
||||
# install additional packages used by sample notebooks. this is optional
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && conda install -y tqdm cython matplotlib scikit-learn"]
|
||||
|
||||
# install azurmel-sdk components
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && pip install azureml-sdk[notebooks]==1.0.17"]
|
||||
|
||||
# clone Azure ML GitHub sample notebooks
|
||||
RUN cd /home && git clone -b "azureml-sdk-1.0.17" --single-branch https://github.com/Azure/MachineLearningNotebooks.git
|
||||
|
||||
# generate jupyter configuration file
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && mkdir ~/.jupyter && cd ~/.jupyter && jupyter notebook --generate-config"]
|
||||
|
||||
# set an emtpy token for Jupyter to remove authentication.
|
||||
# this is NOT recommended for production environment
|
||||
RUN echo "c.NotebookApp.token = ''" >> ~/.jupyter/jupyter_notebook_config.py
|
||||
|
||||
# open up port 8887 on the container
|
||||
EXPOSE 8887
|
||||
|
||||
# start Jupyter notebook server on port 8887 when the container starts
|
||||
CMD /bin/bash -c "cd /home/MachineLearningNotebooks && source activate azureml && jupyter notebook --port 8887 --no-browser --ip 0.0.0.0 --allow-root"
|
||||
29
Dockerfiles/1.0.18/Dockerfile
Normal file
@@ -0,0 +1,29 @@
|
||||
FROM continuumio/miniconda:4.5.11
|
||||
|
||||
# install git
|
||||
RUN apt-get update && apt-get upgrade -y && apt-get install -y git
|
||||
|
||||
# create a new conda environment named azureml
|
||||
RUN conda create -n azureml -y -q Python=3.6
|
||||
|
||||
# install additional packages used by sample notebooks. this is optional
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && conda install -y tqdm cython matplotlib scikit-learn"]
|
||||
|
||||
# install azurmel-sdk components
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && pip install azureml-sdk[notebooks]==1.0.18"]
|
||||
|
||||
# clone Azure ML GitHub sample notebooks
|
||||
RUN cd /home && git clone -b "azureml-sdk-1.0.18" --single-branch https://github.com/Azure/MachineLearningNotebooks.git
|
||||
|
||||
# generate jupyter configuration file
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && mkdir ~/.jupyter && cd ~/.jupyter && jupyter notebook --generate-config"]
|
||||
|
||||
# set an emtpy token for Jupyter to remove authentication.
|
||||
# this is NOT recommended for production environment
|
||||
RUN echo "c.NotebookApp.token = ''" >> ~/.jupyter/jupyter_notebook_config.py
|
||||
|
||||
# open up port 8887 on the container
|
||||
EXPOSE 8887
|
||||
|
||||
# start Jupyter notebook server on port 8887 when the container starts
|
||||
CMD /bin/bash -c "cd /home/MachineLearningNotebooks && source activate azureml && jupyter notebook --port 8887 --no-browser --ip 0.0.0.0 --allow-root"
|
||||
29
Dockerfiles/1.0.2/Dockerfile
Normal file
@@ -0,0 +1,29 @@
|
||||
FROM continuumio/miniconda:4.5.11
|
||||
|
||||
# install git
|
||||
RUN apt-get update && apt-get upgrade -y && apt-get install -y git
|
||||
|
||||
# create a new conda environment named azureml
|
||||
RUN conda create -n azureml -y -q Python=3.6
|
||||
|
||||
# install additional packages used by sample notebooks. this is optional
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && conda install -y tqdm cython matplotlib scikit-learn"]
|
||||
|
||||
# install azurmel-sdk components
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && pip install azureml-sdk[notebooks]==1.0.2"]
|
||||
|
||||
# clone Azure ML GitHub sample notebooks
|
||||
RUN cd /home && git clone -b "azureml-sdk-1.0.2" --single-branch https://github.com/Azure/MachineLearningNotebooks.git
|
||||
|
||||
# generate jupyter configuration file
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && mkdir ~/.jupyter && cd ~/.jupyter && jupyter notebook --generate-config"]
|
||||
|
||||
# set an emtpy token for Jupyter to remove authentication.
|
||||
# this is NOT recommended for production environment
|
||||
RUN echo "c.NotebookApp.token = ''" >> ~/.jupyter/jupyter_notebook_config.py
|
||||
|
||||
# open up port 8887 on the container
|
||||
EXPOSE 8887
|
||||
|
||||
# start Jupyter notebook server on port 8887 when the container starts
|
||||
CMD /bin/bash -c "cd /home/MachineLearningNotebooks && source activate azureml && jupyter notebook --port 8887 --no-browser --ip 0.0.0.0 --allow-root"
|
||||
29
Dockerfiles/1.0.21/Dockerfile
Normal file
@@ -0,0 +1,29 @@
|
||||
FROM continuumio/miniconda:4.5.11
|
||||
|
||||
# install git
|
||||
RUN apt-get update && apt-get upgrade -y && apt-get install -y git
|
||||
|
||||
# create a new conda environment named azureml
|
||||
RUN conda create -n azureml -y -q Python=3.6
|
||||
|
||||
# install additional packages used by sample notebooks. this is optional
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && conda install -y tqdm cython matplotlib scikit-learn"]
|
||||
|
||||
# install azurmel-sdk components
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && pip install azureml-sdk[notebooks]==1.0.21"]
|
||||
|
||||
# clone Azure ML GitHub sample notebooks
|
||||
RUN cd /home && git clone -b "azureml-sdk-1.0.21" --single-branch https://github.com/Azure/MachineLearningNotebooks.git
|
||||
|
||||
# generate jupyter configuration file
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && mkdir ~/.jupyter && cd ~/.jupyter && jupyter notebook --generate-config"]
|
||||
|
||||
# set an emtpy token for Jupyter to remove authentication.
|
||||
# this is NOT recommended for production environment
|
||||
RUN echo "c.NotebookApp.token = ''" >> ~/.jupyter/jupyter_notebook_config.py
|
||||
|
||||
# open up port 8887 on the container
|
||||
EXPOSE 8887
|
||||
|
||||
# start Jupyter notebook server on port 8887 when the container starts
|
||||
CMD /bin/bash -c "cd /home/MachineLearningNotebooks && source activate azureml && jupyter notebook --port 8887 --no-browser --ip 0.0.0.0 --allow-root"
|
||||
29
Dockerfiles/1.0.23/Dockerfile
Normal file
@@ -0,0 +1,29 @@
|
||||
FROM continuumio/miniconda:4.5.11
|
||||
|
||||
# install git
|
||||
RUN apt-get update && apt-get upgrade -y && apt-get install -y git
|
||||
|
||||
# create a new conda environment named azureml
|
||||
RUN conda create -n azureml -y -q Python=3.6
|
||||
|
||||
# install additional packages used by sample notebooks. this is optional
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && conda install -y tqdm cython matplotlib scikit-learn"]
|
||||
|
||||
# install azurmel-sdk components
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && pip install azureml-sdk[notebooks]==1.0.23"]
|
||||
|
||||
# clone Azure ML GitHub sample notebooks
|
||||
RUN cd /home && git clone -b "azureml-sdk-1.0.23" --single-branch https://github.com/Azure/MachineLearningNotebooks.git
|
||||
|
||||
# generate jupyter configuration file
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && mkdir ~/.jupyter && cd ~/.jupyter && jupyter notebook --generate-config"]
|
||||
|
||||
# set an emtpy token for Jupyter to remove authentication.
|
||||
# this is NOT recommended for production environment
|
||||
RUN echo "c.NotebookApp.token = ''" >> ~/.jupyter/jupyter_notebook_config.py
|
||||
|
||||
# open up port 8887 on the container
|
||||
EXPOSE 8887
|
||||
|
||||
# start Jupyter notebook server on port 8887 when the container starts
|
||||
CMD /bin/bash -c "cd /home/MachineLearningNotebooks && source activate azureml && jupyter notebook --port 8887 --no-browser --ip 0.0.0.0 --allow-root"
|
||||
29
Dockerfiles/1.0.30/Dockerfile
Normal file
@@ -0,0 +1,29 @@
|
||||
FROM continuumio/miniconda:4.5.11
|
||||
|
||||
# install git
|
||||
RUN apt-get update && apt-get upgrade -y && apt-get install -y git
|
||||
|
||||
# create a new conda environment named azureml
|
||||
RUN conda create -n azureml -y -q Python=3.6
|
||||
|
||||
# install additional packages used by sample notebooks. this is optional
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && conda install -y tqdm cython matplotlib scikit-learn"]
|
||||
|
||||
# install azurmel-sdk components
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && pip install azureml-sdk[notebooks]==1.0.30"]
|
||||
|
||||
# clone Azure ML GitHub sample notebooks
|
||||
RUN cd /home && git clone -b "azureml-sdk-1.0.30" --single-branch https://github.com/Azure/MachineLearningNotebooks.git
|
||||
|
||||
# generate jupyter configuration file
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && mkdir ~/.jupyter && cd ~/.jupyter && jupyter notebook --generate-config"]
|
||||
|
||||
# set an emtpy token for Jupyter to remove authentication.
|
||||
# this is NOT recommended for production environment
|
||||
RUN echo "c.NotebookApp.token = ''" >> ~/.jupyter/jupyter_notebook_config.py
|
||||
|
||||
# open up port 8887 on the container
|
||||
EXPOSE 8887
|
||||
|
||||
# start Jupyter notebook server on port 8887 when the container starts
|
||||
CMD /bin/bash -c "cd /home/MachineLearningNotebooks && source activate azureml && jupyter notebook --port 8887 --no-browser --ip 0.0.0.0 --allow-root"
|
||||
29
Dockerfiles/1.0.33/Dockerfile
Normal file
@@ -0,0 +1,29 @@
|
||||
FROM continuumio/miniconda:4.5.11
|
||||
|
||||
# install git
|
||||
RUN apt-get update && apt-get upgrade -y && apt-get install -y git
|
||||
|
||||
# create a new conda environment named azureml
|
||||
RUN conda create -n azureml -y -q Python=3.6
|
||||
|
||||
# install additional packages used by sample notebooks. this is optional
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && conda install -y tqdm cython matplotlib scikit-learn"]
|
||||
|
||||
# install azurmel-sdk components
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && pip install azureml-sdk[notebooks]==1.0.33"]
|
||||
|
||||
# clone Azure ML GitHub sample notebooks
|
||||
RUN cd /home && git clone -b "azureml-sdk-1.0.33" --single-branch https://github.com/Azure/MachineLearningNotebooks.git
|
||||
|
||||
# generate jupyter configuration file
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && mkdir ~/.jupyter && cd ~/.jupyter && jupyter notebook --generate-config"]
|
||||
|
||||
# set an emtpy token for Jupyter to remove authentication.
|
||||
# this is NOT recommended for production environment
|
||||
RUN echo "c.NotebookApp.token = ''" >> ~/.jupyter/jupyter_notebook_config.py
|
||||
|
||||
# open up port 8887 on the container
|
||||
EXPOSE 8887
|
||||
|
||||
# start Jupyter notebook server on port 8887 when the container starts
|
||||
CMD /bin/bash -c "cd /home/MachineLearningNotebooks && source activate azureml && jupyter notebook --port 8887 --no-browser --ip 0.0.0.0 --allow-root"
|
||||
29
Dockerfiles/1.0.41/Dockerfile
Normal file
@@ -0,0 +1,29 @@
|
||||
FROM continuumio/miniconda:4.5.11
|
||||
|
||||
# install git
|
||||
RUN apt-get update && apt-get upgrade -y && apt-get install -y git
|
||||
|
||||
# create a new conda environment named azureml
|
||||
RUN conda create -n azureml -y -q Python=3.6
|
||||
|
||||
# install additional packages used by sample notebooks. this is optional
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && conda install -y tqdm cython matplotlib scikit-learn"]
|
||||
|
||||
# install azurmel-sdk components
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && pip install azureml-sdk[notebooks]==1.0.41"]
|
||||
|
||||
# clone Azure ML GitHub sample notebooks
|
||||
RUN cd /home && git clone -b "azureml-sdk-1.0.41" --single-branch https://github.com/Azure/MachineLearningNotebooks.git
|
||||
|
||||
# generate jupyter configuration file
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && mkdir ~/.jupyter && cd ~/.jupyter && jupyter notebook --generate-config"]
|
||||
|
||||
# set an emtpy token for Jupyter to remove authentication.
|
||||
# this is NOT recommended for production environment
|
||||
RUN echo "c.NotebookApp.token = ''" >> ~/.jupyter/jupyter_notebook_config.py
|
||||
|
||||
# open up port 8887 on the container
|
||||
EXPOSE 8887
|
||||
|
||||
# start Jupyter notebook server on port 8887 when the container starts
|
||||
CMD /bin/bash -c "cd /home/MachineLearningNotebooks && source activate azureml && jupyter notebook --port 8887 --no-browser --ip 0.0.0.0 --allow-root"
|
||||
29
Dockerfiles/1.0.6/Dockerfile
Normal file
@@ -0,0 +1,29 @@
|
||||
FROM continuumio/miniconda:4.5.11
|
||||
|
||||
# install git
|
||||
RUN apt-get update && apt-get upgrade -y && apt-get install -y git
|
||||
|
||||
# create a new conda environment named azureml
|
||||
RUN conda create -n azureml -y -q Python=3.6
|
||||
|
||||
# install additional packages used by sample notebooks. this is optional
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && conda install -y tqdm cython matplotlib scikit-learn"]
|
||||
|
||||
# install azurmel-sdk components
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && pip install azureml-sdk[notebooks]==1.0.6"]
|
||||
|
||||
# clone Azure ML GitHub sample notebooks
|
||||
RUN cd /home && git clone -b "azureml-sdk-1.0.6" --single-branch https://github.com/Azure/MachineLearningNotebooks.git
|
||||
|
||||
# generate jupyter configuration file
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && mkdir ~/.jupyter && cd ~/.jupyter && jupyter notebook --generate-config"]
|
||||
|
||||
# set an emtpy token for Jupyter to remove authentication.
|
||||
# this is NOT recommended for production environment
|
||||
RUN echo "c.NotebookApp.token = ''" >> ~/.jupyter/jupyter_notebook_config.py
|
||||
|
||||
# open up port 8887 on the container
|
||||
EXPOSE 8887
|
||||
|
||||
# start Jupyter notebook server on port 8887 when the container starts
|
||||
CMD /bin/bash -c "cd /home/MachineLearningNotebooks && source activate azureml && jupyter notebook --port 8887 --no-browser --ip 0.0.0.0 --allow-root"
|
||||
29
Dockerfiles/1.0.8/Dockerfile
Normal file
@@ -0,0 +1,29 @@
|
||||
FROM continuumio/miniconda:4.5.11
|
||||
|
||||
# install git
|
||||
RUN apt-get update && apt-get upgrade -y && apt-get install -y git
|
||||
|
||||
# create a new conda environment named azureml
|
||||
RUN conda create -n azureml -y -q Python=3.6
|
||||
|
||||
# install additional packages used by sample notebooks. this is optional
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && conda install -y tqdm cython matplotlib scikit-learn"]
|
||||
|
||||
# install azurmel-sdk components
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && pip install azureml-sdk[notebooks]==1.0.8"]
|
||||
|
||||
# clone Azure ML GitHub sample notebooks
|
||||
RUN cd /home && git clone -b "azureml-sdk-1.0.8" --single-branch https://github.com/Azure/MachineLearningNotebooks.git
|
||||
|
||||
# generate jupyter configuration file
|
||||
RUN ["/bin/bash", "-c", "source activate azureml && mkdir ~/.jupyter && cd ~/.jupyter && jupyter notebook --generate-config"]
|
||||
|
||||
# set an emtpy token for Jupyter to remove authentication.
|
||||
# this is NOT recommended for production environment
|
||||
RUN echo "c.NotebookApp.token = ''" >> ~/.jupyter/jupyter_notebook_config.py
|
||||
|
||||
# open up port 8887 on the container
|
||||
EXPOSE 8887
|
||||
|
||||
# start Jupyter notebook server on port 8887 when the container starts
|
||||
CMD /bin/bash -c "cd /home/MachineLearningNotebooks && source activate azureml && jupyter notebook --port 8887 --no-browser --ip 0.0.0.0 --allow-root"
|
||||
@@ -1,3 +1,4 @@
|
||||
|
||||
This software is made available to you on the condition that you agree to
|
||||
[your agreement][1] governing your use of Azure.
|
||||
If you do not have an existing agreement governing your use of Azure, you agree that
|
||||
101
NBSETUP.md
@@ -1,34 +1,95 @@
|
||||
# Notebook setup
|
||||
# Set up your notebook environment for Azure Machine Learning
|
||||
|
||||
---
|
||||
To run the notebooks in this repository use one of following options.
|
||||
|
||||
To run the notebooks in this repository use one of these methods:
|
||||
|
||||
## Use Azure Notebooks - Jupyter based notebooks in the Azure cloud
|
||||
## **Option 1: Use Azure Notebooks**
|
||||
Azure Notebooks is a hosted Jupyter-based notebook service in the Azure cloud. Azure Machine Learning Python SDK is already pre-installed in the Azure Notebooks `Python 3.6` kernel.
|
||||
|
||||
1. [](https://aka.ms/aml-clone-azure-notebooks)
|
||||
[Import sample notebooks ](https://aka.ms/aml-clone-azure-notebooks) into Azure Notebooks
|
||||
1. Follow the instructions in the [Configuration](configuration.ipynb) notebook to create and connect to a workspace
|
||||
1. Open one of the sample notebooks
|
||||
|
||||
**Make sure the Azure Notebook kernel is set to `Python 3.6`** when you open a notebook
|
||||
**Make sure the Azure Notebook kernel is set to `Python 3.6`** when you open a notebook by choosing Kernel > Change Kernel > Python 3.6 from the menus.
|
||||
|
||||

|
||||
## **Option 2: Use your own notebook server**
|
||||
|
||||
## **Use your own notebook server**
|
||||
### Quick installation
|
||||
We recommend you create a Python virtual environment ([Miniconda](https://conda.io/miniconda.html) preferred but [virtualenv](https://virtualenv.pypa.io/en/latest/) works too) and install the SDK in it.
|
||||
```sh
|
||||
# install just the base SDK
|
||||
pip install azureml-sdk
|
||||
|
||||
Video walkthrough:
|
||||
# clone the sample repoistory
|
||||
git clone https://github.com/Azure/MachineLearningNotebooks.git
|
||||
|
||||
[](https://youtu.be/VIsXeTuW3FU)
|
||||
# below steps are optional
|
||||
# install the base SDK, Jupyter notebook server and tensorboard
|
||||
pip install azureml-sdk[notebooks,tensorboard]
|
||||
|
||||
1. Setup a Jupyter Notebook server and [install the Azure Machine Learning SDK](https://docs.microsoft.com/en-us/azure/machine-learning/service/quickstart-create-workspace-with-python)
|
||||
1. Clone [this repository](https://aka.ms/aml-notebooks)
|
||||
1. You may need to install other packages for specific notebook
|
||||
- For example, to run the Azure Machine Learning Data Prep notebooks, install the extra dataprep SDK:
|
||||
```bash
|
||||
pip install azureml-dataprep
|
||||
```
|
||||
# install model explainability component
|
||||
pip install azureml-sdk[explain]
|
||||
|
||||
1. Start your notebook server
|
||||
1. Follow the instructions in the [Configuration](configuration.ipynb) notebook to create and connect to a workspace
|
||||
1. Open one of the sample notebooks
|
||||
# install automated ml components
|
||||
pip install azureml-sdk[automl]
|
||||
|
||||
# install experimental features (not ready for production use)
|
||||
pip install azureml-sdk[contrib]
|
||||
```
|
||||
|
||||
Note the _extras_ (the keywords inside the square brackets) can be combined. For example:
|
||||
```sh
|
||||
# install base SDK, Jupyter notebook and automated ml components
|
||||
pip install azureml-sdk[notebooks,automl]
|
||||
```
|
||||
|
||||
### Full instructions
|
||||
[Install the Azure Machine Learning SDK](https://docs.microsoft.com/en-us/azure/machine-learning/service/quickstart-create-workspace-with-python)
|
||||
|
||||
Please make sure you start with the [Configuration](configuration.ipynb) notebook to create and connect to a workspace.
|
||||
|
||||
|
||||
### Video walkthrough:
|
||||
|
||||
[!VIDEO https://youtu.be/VIsXeTuW3FU]
|
||||
|
||||
## **Option 3: Use Docker**
|
||||
|
||||
You need to have Docker engine installed locally and running. Open a command line window and type the following command.
|
||||
|
||||
__Note:__ We use version `1.0.10` below as an exmaple, but you can replace that with any available version number you like.
|
||||
|
||||
```sh
|
||||
# clone the sample repoistory
|
||||
git clone https://github.com/Azure/MachineLearningNotebooks.git
|
||||
|
||||
# change current directory to the folder
|
||||
# where Dockerfile of the specific SDK version is located.
|
||||
cd MachineLearningNotebooks/Dockerfiles/1.0.10
|
||||
|
||||
# build a Docker image with the a name (azuremlsdk for example)
|
||||
# and a version number tag (1.0.10 for example).
|
||||
# this can take several minutes depending on your computer speed and network bandwidth.
|
||||
docker build . -t azuremlsdk:1.0.10
|
||||
|
||||
# launch the built Docker container which also automatically starts
|
||||
# a Jupyter server instance listening on port 8887 of the host machine
|
||||
docker run -it -p 8887:8887 azuremlsdk:1.0.10
|
||||
```
|
||||
|
||||
Now you can point your browser to http://localhost:8887. We recommend that you start from the `configuration.ipynb` notebook at the root directory.
|
||||
|
||||
If you need additional Azure ML SDK components, you can either modify the Docker files before you build the Docker images to add additional steps, or install them through command line in the live container after you build the Docker image. For example:
|
||||
|
||||
```sh
|
||||
# install the core SDK and automated ml components
|
||||
pip install azureml-sdk[automl]
|
||||
|
||||
# install the core SDK and model explainability component
|
||||
pip install azureml-sdk[explain]
|
||||
|
||||
# install the core SDK and experimental components
|
||||
pip install azureml-sdk[contrib]
|
||||
```
|
||||
Drag and Drop
|
||||
The image will be downloaded by Fatkun
|
||||
39
README.md
@@ -1,9 +1,6 @@
|
||||
# Azure Machine Learning service example notebooks
|
||||
|
||||
This repository contains example notebooks demonstrating the [Azure Machine Learning](https://azure.microsoft.com/en-us/services/machine-learning-service/) Python SDK
|
||||
which allows you to build, train, deploy and manage machine learning solutions using Azure. The AML SDK
|
||||
allows you the choice of using local or cloud compute resources, while managing
|
||||
and maintaining the complete data science workflow from the cloud.
|
||||
This repository contains example notebooks demonstrating the [Azure Machine Learning](https://azure.microsoft.com/en-us/services/machine-learning-service/) Python SDK which allows you to build, train, deploy and manage machine learning solutions using Azure. The AML SDK allows you the choice of using local or cloud compute resources, while managing and maintaining the complete data science workflow from the cloud.
|
||||
|
||||

|
||||
|
||||
@@ -11,30 +8,32 @@ and maintaining the complete data science workflow from the cloud.
|
||||
```sh
|
||||
pip install azureml-sdk
|
||||
```
|
||||
Read more detailed instructions on [how to set up your environment](./NBSETUP.md).
|
||||
Read more detailed instructions on [how to set up your environment](./NBSETUP.md) using Azure Notebook service, your own Jupyter notebook server, or Docker.
|
||||
|
||||
## How to navigate and use the example notebooks?
|
||||
You should always run the [Configuration](./configuration.ipynb) notebook first when setting up a notebook library on a new machine or in a new environment. It configures your notebook library to connect to an Azure Machine Learning workspace, and sets up your workspace and compute to be used by many of the other examples.
|
||||
If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, you should always run the [Configuration](./configuration.ipynb) notebook first when setting up a notebook library on a new machine or in a new environment. It configures your notebook library to connect to an Azure Machine Learning workspace, and sets up your workspace and compute to be used by many of the other examples.
|
||||
|
||||
If you want to...
|
||||
|
||||
* ...try out and explore Azure ML, start with image classification tutorials [part 1 training](./tutorials/img-classification-part1-training.ipynb) and [part 2 deployment](./tutorials/img-classification-part2-deploy.ipynb).
|
||||
* ...try out and explore Azure ML, start with image classification tutorials: [Part 1 (Training)](./tutorials/img-classification-part1-training.ipynb) and [Part 2 (Deployment)](./tutorials/img-classification-part2-deploy.ipynb).
|
||||
* ...prepare your data and do automated machine learning, start with regression tutorials: [Part 1 (Data Prep)](./tutorials/regression-part1-data-prep.ipynb) and [Part 2 (Automated ML)](./tutorials/regression-part2-automated-ml.ipynb).
|
||||
* ...learn about experimentation and tracking run history, first [train within Notebook](./how-to-use-azureml/training/train-within-notebook/train-within-notebook.ipynb), then try [training on remote VM](./how-to-use-azureml/training/train-on-remote-vm/train-on-remote-vm.ipynb) and [using logging APIs](./how-to-use-azureml/training/logging-api/logging-api.ipynb).
|
||||
* ...train deep learning models at scale, first learn about [Machine Learning Compute](./how-to-use-azureml/training/train-on-amlcompute/train-on-amlcompute.ipynb), and then try [distributed hyperparameter tuning](./how-to-use-azureml/training-with-deep-learning/train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.ipynb) and [distributed training](./how-to-use-azureml/training-with-deep-learning/distributed-pytorch-with-horovod/distributed-pytorch-with-horovod.ipynb).
|
||||
* ...deploy model as realtime scoring service, first learn the basics by [training within Notebook and deploying to Azure Container Instance](./how-to-use-azureml/training/train-within-notebook/train-within-notebook.ipynb), then learn how to [register and manage models, and create Docker images](./how-to-use-azureml/deployment/register-model-create-image-deploy-service/register-model-create-image-deploy-service.ipynb), and [production deploy models on Azure Kubernetes Cluster](./how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks.ipynb).
|
||||
* ...deploy models as batch scoring service, first [train a model within Notebook](./how-to-use-azureml/training/train-within-notebook/train-within-notebook.ipynb), learn how to [register and manage models](./how-to-use-azureml/deployment/register-model-create-image-deploy-service/register-model-create-image-deploy-service.ipynb), then [create Machine Learning Compute for scoring compute](./how-to-use-azureml/training/train-on-amlcompute/train-on-amlcompute.ipynb), and [use Machine Learning Pipelines to deploy your model](./how-to-use-azureml/machine-learning-pipelines/pipeline-mpi-batch-prediction.ipynb).
|
||||
* ...deploy models as a realtime scoring service, first learn the basics by [training within Notebook and deploying to Azure Container Instance](./how-to-use-azureml/training/train-within-notebook/train-within-notebook.ipynb), then learn how to [register and manage models, and create Docker images](./how-to-use-azureml/deployment/register-model-create-image-deploy-service/register-model-create-image-deploy-service.ipynb), and [production deploy models on Azure Kubernetes Cluster](./how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks.ipynb).
|
||||
* ...deploy models as a batch scoring service, first [train a model within Notebook](./how-to-use-azureml/training/train-within-notebook/train-within-notebook.ipynb), learn how to [register and manage models](./how-to-use-azureml/deployment/register-model-create-image-deploy-service/register-model-create-image-deploy-service.ipynb), then [create Machine Learning Compute for scoring compute](./how-to-use-azureml/training/train-on-amlcompute/train-on-amlcompute.ipynb), and [use Machine Learning Pipelines to deploy your model](https://aka.ms/pl-batch-scoring).
|
||||
* ...monitor your deployed models, learn about using [App Insights](./how-to-use-azureml/deployment/enable-app-insights-in-production-service/enable-app-insights-in-production-service.ipynb) and [model data collection](./how-to-use-azureml/deployment/enable-data-collection-for-models-in-aks/enable-data-collection-for-models-in-aks.ipynb).
|
||||
|
||||
## Tutorials
|
||||
|
||||
The [Tutorials](./tutorials) folder contains notebooks for the tutorials described in the [Azure Machine Learning documentation](https://aka.ms/aml-docs)
|
||||
The [Tutorials](./tutorials) folder contains notebooks for the tutorials described in the [Azure Machine Learning documentation](https://aka.ms/aml-docs).
|
||||
|
||||
## How to use Azure ML
|
||||
|
||||
The [How to use Azure ML](./how-to-use-azureml) folder contains specific examples demonstrating the features of the Azure Machine Learning SDK
|
||||
|
||||
- [Training](./how-to-use-azureml/training) - Examples of how to build models using Azure ML's logging and execution capabilities on local and remote compute targets.
|
||||
- [Training](./how-to-use-azureml/training) - Examples of how to build models using Azure ML's logging and execution capabilities on local and remote compute targets
|
||||
- [Training with Deep Learning](./how-to-use-azureml/training-with-deep-learning) - Examples demonstrating how to build deep learning models using estimators and parameter sweeps
|
||||
- [Manage Azure ML Service](./how-to-use-azureml/manage-azureml-service) - Examples how to perform tasks, such as authenticate against Azure ML service in different ways.
|
||||
- [Automated Machine Learning](./how-to-use-azureml/automated-machine-learning) - Examples using Automated Machine Learning to automatically generate optimal machine learning pipelines and models
|
||||
- [Machine Learning Pipelines](./how-to-use-azureml/machine-learning-pipelines) - Examples showing how to create and use reusable pipelines for training and batch scoring
|
||||
- [Deployment](./how-to-use-azureml/deployment) - Examples showing how to deploy and manage machine learning models and solutions
|
||||
@@ -44,9 +43,8 @@ The [How to use Azure ML](./how-to-use-azureml) folder contains specific example
|
||||
## Documentation
|
||||
|
||||
* Quickstarts, end-to-end tutorials, and how-tos on the [official documentation site for Azure Machine Learning service](https://docs.microsoft.com/en-us/azure/machine-learning/service/).
|
||||
|
||||
* [Python SDK reference]( https://docs.microsoft.com/en-us/python/api/overview/azure/ml/intro?view=azure-ml-py)
|
||||
|
||||
* [Python SDK reference](https://docs.microsoft.com/en-us/python/api/overview/azure/ml/intro?view=azure-ml-py)
|
||||
* Azure ML Data Prep SDK [overview](https://aka.ms/data-prep-sdk), [Python SDK reference](https://aka.ms/aml-data-prep-apiref), and [tutorials and how-tos](https://aka.ms/aml-data-prep-notebooks).
|
||||
|
||||
---
|
||||
|
||||
@@ -54,5 +52,18 @@ The [How to use Azure ML](./how-to-use-azureml) folder contains specific example
|
||||
|
||||
Visit following repos to see projects contributed by Azure ML users:
|
||||
|
||||
- [AMLSamples](https://github.com/Azure/AMLSamples) Number of end-to-end examples, including face recognition, predictive maintenance, customer churn and sentiment analysis.
|
||||
- [Fine tune natural language processing models using Azure Machine Learning service](https://github.com/Microsoft/AzureML-BERT)
|
||||
- [Fashion MNIST with Azure ML SDK](https://github.com/amynic/azureml-sdk-fashion)
|
||||
|
||||
## Data/Telemetry
|
||||
This repository collects usage data and sends it to Mircosoft to help improve our products and services. Read Microsoft's [privacy statement to learn more](https://privacy.microsoft.com/en-US/privacystatement)
|
||||
|
||||
To opt out of tracking, please go to the raw markdown or .ipynb files and remove the following line of code:
|
||||
|
||||
```sh
|
||||
""
|
||||
```
|
||||
This URL will be slightly different depending on the file.
|
||||
|
||||

|
||||
|
||||
@@ -9,6 +9,13 @@
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -32,7 +39,6 @@
|
||||
" 1. Workspace parameters\n",
|
||||
" 1. Access your workspace\n",
|
||||
" 1. Create a new workspace\n",
|
||||
" 1. Create compute resources\n",
|
||||
"1. [Next steps](#Next%20steps)\n",
|
||||
"\n",
|
||||
"---\n",
|
||||
@@ -96,7 +102,7 @@
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"\n",
|
||||
"print(\"This notebook was created using version 1.0.10 of the Azure ML SDK\")\n",
|
||||
"print(\"This notebook was created using version 1.0.43 of the Azure ML SDK\")\n",
|
||||
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
||||
]
|
||||
},
|
||||
@@ -235,97 +241,6 @@
|
||||
"ws.write_config()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create compute resources for your training experiments\n",
|
||||
"\n",
|
||||
"Many of the sample notebooks use Azure ML managed compute (AmlCompute) to train models using a dynamically scalable pool of compute. In this section you will create default compute clusters for use by the other notebooks and any other operations you choose.\n",
|
||||
"\n",
|
||||
"To create a cluster, you need to specify a compute configuration that specifies the type of machine to be used and the scalability behaviors. Then you choose a name for the cluster that is unique within the workspace that can be used to address the cluster later.\n",
|
||||
"\n",
|
||||
"The cluster parameters are:\n",
|
||||
"* vm_size - this describes the virtual machine type and size used in the cluster. All machines in the cluster are the same type. You can get the list of vm sizes available in your region by using the CLI command\n",
|
||||
"\n",
|
||||
"```shell\n",
|
||||
"az vm list-skus -o tsv\n",
|
||||
"```\n",
|
||||
"* min_nodes - this sets the minimum size of the cluster. If you set the minimum to 0 the cluster will shut down all nodes while note in use. Setting this number to a value higher than 0 will allow for faster start-up times, but you will also be billed when the cluster is not in use.\n",
|
||||
"* max_nodes - this sets the maximum size of the cluster. Setting this to a larger number allows for more concurrency and a greater distributed processing of scale-out jobs.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"To create a **CPU** cluster now, run the cell below. The autoscale settings mean that the cluster will scale down to 0 nodes when inactive and up to 4 nodes when busy."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||
"\n",
|
||||
"# Choose a name for your CPU cluster\n",
|
||||
"cpu_cluster_name = \"cpucluster\"\n",
|
||||
"\n",
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" cpu_cluster = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
||||
" print(\"Found existing cpucluster\")\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" print(\"Creating new cpucluster\")\n",
|
||||
" \n",
|
||||
" # Specify the configuration for the new cluster\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size=\"STANDARD_D2_V2\",\n",
|
||||
" min_nodes=0,\n",
|
||||
" max_nodes=4)\n",
|
||||
"\n",
|
||||
" # Create the cluster with the specified name and configuration\n",
|
||||
" cpu_cluster = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
||||
" \n",
|
||||
" # Wait for the cluster to complete, show the output log\n",
|
||||
" cpu_cluster.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To create a **GPU** cluster, run the cell below. Note that your subscription must have sufficient quota for GPU VMs or the command will fail. To increase quota, see [these instructions](https://docs.microsoft.com/en-us/azure/azure-supportability/resource-manager-core-quotas-request). "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
||||
"from azureml.core.compute_target import ComputeTargetException\n",
|
||||
"\n",
|
||||
"# Choose a name for your GPU cluster\n",
|
||||
"gpu_cluster_name = \"gpucluster\"\n",
|
||||
"\n",
|
||||
"# Verify that cluster does not exist already\n",
|
||||
"try:\n",
|
||||
" gpu_cluster = ComputeTarget(workspace=ws, name=gpu_cluster_name)\n",
|
||||
" print(\"Found existing gpu cluster\")\n",
|
||||
"except ComputeTargetException:\n",
|
||||
" print(\"Creating new gpucluster\")\n",
|
||||
" \n",
|
||||
" # Specify the configuration for the new cluster\n",
|
||||
" compute_config = AmlCompute.provisioning_configuration(vm_size=\"STANDARD_NC6\",\n",
|
||||
" min_nodes=0,\n",
|
||||
" max_nodes=4)\n",
|
||||
" # Create the cluster with the specified name and configuration\n",
|
||||
" gpu_cluster = ComputeTarget.create(ws, gpu_cluster_name, compute_config)\n",
|
||||
"\n",
|
||||
" # Wait for the cluster to complete, show the output log\n",
|
||||
" gpu_cluster.wait_for_completion(show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -336,7 +251,7 @@
|
||||
"\n",
|
||||
"In this notebook you configured this notebook library to connect easily to an Azure ML workspace. You can copy this notebook to your own libraries to connect them to you workspace, or use it to bootstrap new workspaces completely.\n",
|
||||
"\n",
|
||||
"If you came here from another notebook, you can return there and complete that exercise, or you can try out the [Tutorials](./tutorials) or jump into \"how-to\" notebooks and start creating and deploying models. A good place to start is the [train in notebook](./how-to-use-azureml/training/train-in-notebook) example that walks through a simplified but complete end to end machine learning process."
|
||||
"If you came here from another notebook, you can return there and complete that exercise, or you can try out the [Tutorials](./tutorials) or jump into \"how-to\" notebooks and start creating and deploying models. A good place to start is the [train within notebook](./how-to-use-azureml/training/train-within-notebook) example that walks through a simplified but complete end to end machine learning process."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
307
contrib/RAPIDS/README.md
Normal file
@@ -0,0 +1,307 @@
|
||||
## How to use the RAPIDS on AzureML materials
|
||||
### Setting up requirements
|
||||
The material requires the use of the Azure ML SDK and of the Jupyter Notebook Server to run the interactive execution. Please refer to instructions to [setup the environment.](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-environment#local "Local Computer Set Up") Follow the instructions under **Local Computer**, make sure to run the last step: <span style="font-family: Courier New;">pip install \<new package\></span> with <span style="font-family: Courier New;">new package = progressbar2 (pip install progressbar2)</span>
|
||||
|
||||
After following the directions, the user should end up setting a conda environment (<span style="font-family: Courier New;">myenv</span>)that can be activated in an Anaconda prompt
|
||||
|
||||
The user would also require an Azure Subscription with a Machine Learning Services quota on the desired region for 24 nodes or more (to be able to select a vmSize with 4 GPUs as it is used on the Notebook) on the desired VM family ([NC\_v3](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu#ncv3-series), [NC\_v2](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu#ncv2-series), [ND](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu#nd-series) or [ND_v2](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu#ndv2-series-preview)), the specific vmSize to be used within the chosen family would also need to be whitelisted for Machine Learning Services usage.
|
||||
|
||||
|
||||
### Getting and running the material
|
||||
Clone the AzureML Notebooks repository in GitHub by running the following command on a local_directory:
|
||||
|
||||
* C:\local_directory>git clone https://github.com/Azure/MachineLearningNotebooks.git
|
||||
|
||||
On a conda prompt navigate to the local directory, activate the conda environment (<span style="font-family: Courier New;">myenv</span>), where the Azure ML SDK was installed and launch Jupyter Notebook.
|
||||
|
||||
* (<span style="font-family: Courier New;">myenv</span>) C:\local_directory>jupyter notebook
|
||||
|
||||
From the resulting browser at http://localhost:8888/tree, navigate to the master notebook:
|
||||
|
||||
* http://localhost:8888/tree/MachineLearningNotebooks/contrib/RAPIDS/azure-ml-with-nvidia-rapids.ipynb
|
||||
|
||||
|
||||
The following notebook will appear:
|
||||
|
||||

|
||||
|
||||
|
||||
### Master Jupyter Notebook
|
||||
The notebook can be executed interactively step by step, by pressing the Run button (In a red circle in the above image.)
|
||||
|
||||
The first couple of functional steps import the necessary AzureML libraries. If you experience any errors please refer back to the [setup the environment.](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-environment#local "Local Computer Set Up") instructions.
|
||||
|
||||
|
||||
#### Setting up a Workspace
|
||||
The following step gathers the information necessary to set up a workspace to execute the RAPIDS script. This needs to be done only once, or not at all if you already have a workspace you can use set up on the Azure Portal:
|
||||
|
||||

|
||||
|
||||
|
||||
It is important to be sure to set the correct values for the subscription\_id, resource\_group, workspace\_name, and region before executing the step. An example is:
|
||||
|
||||
subscription_id = os.environ.get("SUBSCRIPTION_ID", "1358e503-xxxx-4043-xxxx-65b83xxxx32d")
|
||||
resource_group = os.environ.get("RESOURCE_GROUP", "AML-Rapids-Testing")
|
||||
workspace_name = os.environ.get("WORKSPACE_NAME", "AML_Rapids_Tester")
|
||||
workspace_region = os.environ.get("WORKSPACE_REGION", "West US 2")
|
||||
|
||||
|
||||
The resource\_group and workspace_name could take any value, the region should match the region for which the subscription has the required Machine Learning Services node quota.
|
||||
|
||||
The first time the code is executed it will redirect to the Azure Portal to validate subscription credentials. After the workspace is created, its related information is stored on a local file so that this step can be subsequently skipped. The immediate step will just load the saved workspace
|
||||
|
||||

|
||||
|
||||
Once a workspace has been created the user could skip its creation and just jump to this step. The configuration file resides in:
|
||||
|
||||
* C:\local_directory\\MachineLearningNotebooks\contrib\RAPIDS\aml_config\config.json
|
||||
|
||||
|
||||
#### Creating an AML Compute Target
|
||||
Following step, creates an AML Compute Target
|
||||
|
||||

|
||||
|
||||
Parameter vm\_size on function call AmlCompute.provisioning\_configuration() has to be a member of the VM families ([NC\_v3](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu#ncv3-series), [NC\_v2](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu#ncv2-series), [ND](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu#nd-series) or [ND_v2](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu#ndv2-series-preview)) that are the ones provided with P40 or V100 GPUs, that are the ones supported by RAPIDS. In this particular case an Standard\_NC24s\_V2 was used.
|
||||
|
||||
|
||||
If the output of running the step has an error of the form:
|
||||
|
||||

|
||||
|
||||
It is an indication that even though the subscription has a node quota for VMs for that family, it does not have a node quota for Machine Learning Services for that family.
|
||||
You will need to request an increase node quota for that family in that region for **Machine Learning Services**.
|
||||
|
||||
|
||||
Another possible error is the following:
|
||||
|
||||

|
||||
|
||||
Which indicates that specified vmSize has not been whitelisted for usage on Machine Learning Services and a request to do so should be filled.
|
||||
|
||||
The successful creation of the compute target would have an output like the following:
|
||||
|
||||

|
||||
|
||||
#### RAPIDS script uploading and viewing
|
||||
The next step copies the RAPIDS script process_data.py, which is a slightly modified implementation of the [RAPIDS E2E example](https://github.com/rapidsai/notebooks/blob/master/mortgage/E2E.ipynb), into a script processing folder and it presents its contents to the user. (The script is discussed in the next section in detail).
|
||||
If the user wants to use a different RAPIDS script, the references to the <span style="font-family: Courier New;">process_data.py</span> script have to be changed
|
||||
|
||||

|
||||
|
||||
#### Data Uploading
|
||||
The RAPIDS script loads and extracts features from the Fannie Mae’s Mortgage Dataset to train an XGBoost prediction model. The script uses two years of data
|
||||
|
||||
The next few steps download and decompress the data and is made available to the script as an [Azure Machine Learning Datastore](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-access-data).
|
||||
|
||||
|
||||
The following functions are used to download and decompress the input data
|
||||
|
||||
|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
|
||||
The next step uses those functions to download locally file:
|
||||
http://rapidsai-data.s3-website.us-east-2.amazonaws.com/notebook-mortgage-data/mortgage_2000-2001.tgz'
|
||||
And to decompress it, into local folder path = .\mortgage_2000-2001
|
||||
The step takes several minutes, the intermediate outputs provide progress indicators.
|
||||
|
||||

|
||||
|
||||
|
||||
The decompressed data should have the following structure:
|
||||
* .\mortgage_2000-2001\acq\Acquisition_<year>Q<num>.txt
|
||||
* .\mortgage_2000-2001\perf\Performance_<year>Q<num>.txt
|
||||
* .\mortgage_2000-2001\names.csv
|
||||
|
||||
The data is divided in partitions that roughly correspond to yearly quarters. RAPIDS includes support for multi-node, multi-GPU deployments, enabling scaling up and out on much larger dataset sizes. The user will be able to verify that the number of partitions that the script is able to process increases with the number of GPUs used. The RAPIDS script is implemented for single-machine scenarios. An example supporting multiple nodes will be published later.
|
||||
|
||||
|
||||
The next step upload the data into the [Azure Machine Learning Datastore](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-access-data) under reference <span style="font-family: Courier New;">fileroot = mortgage_2000-2001</span>
|
||||
|
||||
The step takes several minutes to load the data, the output provides a progress indicator.
|
||||
|
||||

|
||||
|
||||
Once the data has been loaded into the Azure Machine LEarning Data Store, in subsequent run, the user can comment out the ds.upload line and just make reference to the <span style="font-family: Courier New;">mortgage_2000-2001</blog> data store reference
|
||||
|
||||
|
||||
#### Setting up required libraries and environment to run RAPIDS code
|
||||
There are two options to setup the environment to run RAPIDS code. The following steps shows how to ues a prebuilt conda environment. A recommended alternative is to specify a base Docker image and package dependencies. You can find sample code for that in the notebook.
|
||||
|
||||

|
||||
|
||||
|
||||
#### Wrapper function to submit the RAPIDS script as an Azure Machine Learning experiment
|
||||
|
||||
The next step consists of the definition of a wrapper function to be used when the user attempts to run the RAPIDS script with different arguments. It takes as arguments: <span style="font-family: Times New Roman;">*cpu\_training*</span>; a flag that indicates if the run is meant to be processed with CPU-only, <span style="font-family: Times New Roman;">*gpu\_count*</span>; the number of GPUs to be used if they are meant to be used and part_count: the number of data partitions to be used
|
||||
|
||||

|
||||
|
||||
|
||||
The core of the function resides in configuring the run by the instantiation of a ScriptRunConfig object, which defines the source_directory for the script to be executed, the name of the script and the arguments to be passed to the script.
|
||||
In addition to the wrapper function arguments, two other arguments are passed: <span style="font-family: Times New Roman;">*data\_dir*</span>, the directory where the data is stored and <span style="font-family: Times New Roman;">*end_year*</span> is the largest year to use partition from.
|
||||
|
||||
|
||||
As mentioned earlier the size of the data that can be processed increases with the number of gpus, in the function, dictionary <span style="font-family: Times New Roman;">*max\_gpu\_count\_data\_partition_mapping*</span> maps the maximum number of partitions that we empirically found that the system can handle given the number of GPUs used. The function throws a warning when the number of partitions for a given number of gpus exceeds the maximum but the script is still executed, however the user should expect an error as an out of memory situation would be encountered
|
||||
If the user wants to use a different RAPIDS script, the reference to the process_data.py script has to be changed
|
||||
|
||||
|
||||
#### Submitting Experiments
|
||||
We are ready to submit experiments: launching the RAPIDS script with different sets of parameters.
|
||||
|
||||
|
||||
The following couple of steps submit experiments under different conditions.
|
||||
|
||||

|
||||
|
||||
|
||||
The user can change variable num\_gpu between one and the number of GPUs supported by the chosen vmSize. Variable part\_count can take any value between 1 and 11, but if it exceeds the maximum for num_gpu, the run would result in an error
|
||||
|
||||
|
||||
If the experiment is successfully submitted, it would be placed on a queue for processing, its status would appeared as Queued and an output like the following would appear
|
||||
|
||||

|
||||
|
||||
|
||||
When the experiment starts running, its status would appeared as Running and the output would change to something like this:
|
||||
|
||||

|
||||
|
||||
|
||||
#### Reproducing the performance gains plot results on the Blog Post
|
||||
When the run has finished successfully, its status would appeared as Completed and the output would change to something like this:
|
||||
|
||||
|
||||

|
||||
|
||||
Which is the output for an experiment run with three partitions and one GPU, notice that the reported processing time is 49.16 seconds just as depicted on the performance gains plot on the blog post
|
||||
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
This output corresponds to a run with three partitions and two GPUs, notice that the reported processing time is 37.50 seconds just as depicted on the performance gains plot on the blog post
|
||||
|
||||
|
||||

|
||||
|
||||
This output corresponds to an experiment run with three partitions and three GPUs, notice that the reported processing time is 24.40 seconds just as depicted on the performance gains plot on the blog post
|
||||
|
||||
|
||||

|
||||
|
||||
This output corresponds to an experiment run with three partitions and four GPUs, notice that the reported processing time is 23.33 seconds just as depicted on the performance gains plot on the blogpost
|
||||
|
||||
|
||||

|
||||
|
||||
This output corresponds to an experiment run with three partitions and using only CPU, notice that the reported processing time is 9 minutes and 1.21 seconds or 541.21 second just as depicted on the performance gains plot on the blog post
|
||||
|
||||
|
||||

|
||||
|
||||
This output corresponds to an experiment run with nine partitions and four GPUs, notice that the notebook throws a warning signaling that the number of partitions exceed the maximum that the system can handle with those many GPUs and the run ends up failing, hence having and status of Failed.
|
||||
|
||||
|
||||
##### Freeing Resources
|
||||
In the last step the notebook deletes the compute target. (This step is optional especially if the min_nodes in the cluster is set to 0 with which the cluster will scale down to 0 nodes when there is no usage.)
|
||||
|
||||

|
||||
|
||||
|
||||
### RAPIDS Script
|
||||
The Master Notebook runs experiments by launching a RAPIDS script with different sets of parameters. In this section, the RAPIDS script, process_data.py in the material, is analyzed
|
||||
|
||||
The script first imports all the necessary libraries and parses the arguments passed by the Master Notebook.
|
||||
|
||||
The all internal functions to be used by the script are defined.
|
||||
|
||||
|
||||
#### Wrapper Auxiliary Functions:
|
||||
The below functions are wrappers for a configuration module for librmm, the RAPIDS Memory Manager python interface:
|
||||
|
||||

|
||||
|
||||
|
||||
A couple of other functions are wrappers for the submission of jobs to the DASK client:
|
||||
|
||||

|
||||

|
||||
|
||||
|
||||
#### Data Loading Functions:
|
||||
The data is loaded through the use of the following three functions
|
||||
|
||||

|
||||
|
||||
All three functions use library function cudf.read_csv(), cuDF version for the well known counterpart on Pandas.
|
||||
|
||||
|
||||
#### Data Transformation and Feature Extraction Functions:
|
||||
The raw data is transformed and processed to extract features by joining, slicing, grouping, aggregating, factoring, etc, the original dataframes just as is done with Pandas. The following functions in the script are used for that purpose:
|
||||

|
||||
|
||||

|
||||
|
||||
|
||||
#### Main() Function
|
||||
The previous functions are used in the Main function to accomplish several steps: Set up the Dask client, do all ETL operations, set up and train an XGBoost model, the function also assigns which data needs to be processed by each Dask client
|
||||
|
||||
|
||||
##### Setting Up DASK client:
|
||||
The following lines:
|
||||
|
||||

|
||||
|
||||
|
||||
Initialize and set up a DASK client with a number of workers corresponding to the number of GPUs to be used on the run. A successful execution of the set up will result on the following output:
|
||||
|
||||

|
||||
|
||||
##### All ETL functions are used on single calls to process\_quarter_gpu, one per data partition
|
||||
|
||||

|
||||
|
||||
|
||||
##### Concentrating the data assigned to each DASK worker
|
||||
The partitions assigned to each worker are concatenated and set up for training.
|
||||
|
||||

|
||||
|
||||
|
||||
##### Setting Training Parameters
|
||||
The parameters used for the training of a gradient boosted decision tree model are set up in the following code block:
|
||||

|
||||
|
||||
Notice how the parameters are modified when using the CPU-only mode.
|
||||
|
||||
|
||||
##### Launching the training of a gradient boosted decision tree model using XGBoost.
|
||||
|
||||

|
||||
|
||||
The outputs of the script can be observed in the master notebook as the script is executed
|
||||
|
||||

|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The [RAPIDS](https://www.developer.nvidia.com/rapids) suite of software libraries from NVIDIA enables the execution of end-to-end data science and analytics pipelines entirely on GPUs. In many machine learning projects, a significant portion of the model training time is spent in setting up the data; this stage of the process is known as Extraction, Transformation and Loading, or ETL. By using the DataFrame API for ETL and GPU-capable ML algorithms in RAPIDS, data preparation and training models can be done in GPU-accelerated end-to-end pipelines without incurring serialization costs between the pipeline stages. This notebook demonstrates how to use NVIDIA RAPIDS to prepare data and train model in Azure.\n",
|
||||
"The [RAPIDS](https://www.developer.nvidia.com/rapids) suite of software libraries from NVIDIA enables the execution of end-to-end data science and analytics pipelines entirely on GPUs. In many machine learning projects, a significant portion of the model training time is spent in setting up the data; this stage of the process is known as Extraction, Transformation and Loading, or ETL. By using the DataFrame API for ETLÂ and GPU-capable ML algorithms in RAPIDS, data preparation and training models can be done in GPU-accelerated end-to-end pipelines without incurring serialization costs between the pipeline stages. This notebook demonstrates how to use NVIDIA RAPIDS to prepare data and train model in Azure.\n",
|
||||
" \n",
|
||||
"In this notebook, we will do the following:\n",
|
||||
" \n",
|
||||
@@ -62,6 +62,7 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from azureml.core import Workspace, Experiment\n",
|
||||
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||
"from azureml.core.compute import AmlCompute, ComputeTarget\n",
|
||||
"from azureml.data.data_reference import DataReference\n",
|
||||
"from azureml.core.runconfig import RunConfiguration\n",
|
||||
@@ -210,21 +211,107 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This sample uses [Fannie Mae\u00e2\u20ac\u2122s Single-Family Loan Performance Data](http://www.fanniemae.com/portal/funding-the-market/data/loan-performance-data.html). Refer to the 'Available mortgage datasets' section in [instructions](https://rapidsai.github.io/demos/datasets/mortgage-data) to get sample data.\n",
|
||||
"\n",
|
||||
"Once you obtain access to the data, you will need to make this data available in an [Azure Machine Learning Datastore](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-access-data), for use in this sample."
|
||||
"This sample uses [Fannie Mae's Single-Family Loan Performance Data](http://www.fanniemae.com/portal/funding-the-market/data/loan-performance-data.html). Once you obtain access to the data, you will need to make this data available in an [Azure Machine Learning Datastore](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-access-data), for use in this sample. The following code shows how to do that."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<font color='red'>Important</font>: The following step assumes the data is uploaded to the Workspace's default data store under a folder named 'mortgagedata2000_01'. Note that uploading data to the Workspace's default data store is not necessary and the data can be referenced from any datastore, e.g., from Azure Blob or File service, once it is added as a datastore to the workspace. The path_on_datastore parameter needs to be updated, depending on where the data is available. The directory where the data is available should have the following folder structure, as the process_data.py script expects this directory structure:\n",
|
||||
"* _<data directory>_/acq\n",
|
||||
"* _<data directory>_/perf\n",
|
||||
"* _names.csv_\n",
|
||||
"### Downloading Data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<font color='red'>Important</font>: Python package progressbar2 is necessary to run the following cell. If it is not available in your environment where this notebook is running, please install it."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import tarfile\n",
|
||||
"import hashlib\n",
|
||||
"from urllib.request import urlretrieve\n",
|
||||
"from progressbar import ProgressBar\n",
|
||||
"\n",
|
||||
"The 'acq' and 'perf' refer to directories containing data files. The _<data directory>_ is the path specified in _path_on_datastore_ parameter in the step below."
|
||||
"def validate_downloaded_data(path):\n",
|
||||
" if(os.path.isdir(path) and os.path.exists(path + '//names.csv')) :\n",
|
||||
" if(os.path.isdir(path + '//acq' ) and len(os.listdir(path + '//acq')) == 8):\n",
|
||||
" if(os.path.isdir(path + '//perf' ) and len(os.listdir(path + '//perf')) == 11):\n",
|
||||
" print(\"Data has been downloaded and decompressed at: {0}\".format(path))\n",
|
||||
" return True\n",
|
||||
" print(\"Data has not been downloaded and decompressed\")\n",
|
||||
" return False\n",
|
||||
"\n",
|
||||
"def show_progress(count, block_size, total_size):\n",
|
||||
" global pbar\n",
|
||||
" global processed\n",
|
||||
" \n",
|
||||
" if count == 0:\n",
|
||||
" pbar = ProgressBar(maxval=total_size)\n",
|
||||
" processed = 0\n",
|
||||
" \n",
|
||||
" processed += block_size\n",
|
||||
" processed = min(processed,total_size)\n",
|
||||
" pbar.update(processed)\n",
|
||||
"\n",
|
||||
" \n",
|
||||
"def download_file(fileroot):\n",
|
||||
" filename = fileroot + '.tgz'\n",
|
||||
" if(not os.path.exists(filename) or hashlib.md5(open(filename, 'rb').read()).hexdigest() != '82dd47135053303e9526c2d5c43befd5' ):\n",
|
||||
" url_format = 'http://rapidsai-data.s3-website.us-east-2.amazonaws.com/notebook-mortgage-data/{0}.tgz'\n",
|
||||
" url = url_format.format(fileroot)\n",
|
||||
" print(\"...Downloading file :{0}\".format(filename))\n",
|
||||
" urlretrieve(url, filename,show_progress)\n",
|
||||
" pbar.finish()\n",
|
||||
" print(\"...File :{0} finished downloading\".format(filename))\n",
|
||||
" else:\n",
|
||||
" print(\"...File :{0} has been downloaded already\".format(filename))\n",
|
||||
" return filename\n",
|
||||
"\n",
|
||||
"def decompress_file(filename,path):\n",
|
||||
" tar = tarfile.open(filename)\n",
|
||||
" print(\"...Getting information from {0} about files to decompress\".format(filename))\n",
|
||||
" members = tar.getmembers()\n",
|
||||
" numFiles = len(members)\n",
|
||||
" so_far = 0\n",
|
||||
" for member_info in members:\n",
|
||||
" tar.extract(member_info,path=path)\n",
|
||||
" show_progress(so_far, 1, numFiles)\n",
|
||||
" so_far += 1\n",
|
||||
" pbar.finish()\n",
|
||||
" print(\"...All {0} files have been decompressed\".format(numFiles))\n",
|
||||
" tar.close()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"fileroot = 'mortgage_2000-2001'\n",
|
||||
"path = '.\\\\{0}'.format(fileroot)\n",
|
||||
"pbar = None\n",
|
||||
"processed = 0\n",
|
||||
"\n",
|
||||
"if(not validate_downloaded_data(path)):\n",
|
||||
" print(\"Downloading and Decompressing Input Data\")\n",
|
||||
" filename = download_file(fileroot)\n",
|
||||
" decompress_file(filename,path)\n",
|
||||
" print(\"Input Data has been Downloaded and Decompressed\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Uploading Data to Workspace"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -237,10 +324,10 @@
|
||||
"\n",
|
||||
"# download and uncompress data in a local directory before uploading to data store\n",
|
||||
"# directory specified in src_dir parameter below should have the acq, perf directories with data and names.csv file\n",
|
||||
"# ds.upload(src_dir='<local directory that has data>', target_path='mortgagedata2000_01', overwrite=True, show_progress=True)\n",
|
||||
"ds.upload(src_dir=path, target_path=fileroot, overwrite=True, show_progress=True)\n",
|
||||
"\n",
|
||||
"# data already uploaded to the datastore\n",
|
||||
"data_ref = DataReference(data_reference_name='data', datastore=ds, path_on_datastore='mortgagedata2000_01')"
|
||||
"data_ref = DataReference(data_reference_name='data', datastore=ds, path_on_datastore=fileroot)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -254,7 +341,26 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"AML allows the option of using existing Docker images with prebuilt conda environments. The following step use an existing image from [Docker Hub](https://hub.docker.com/r/rapidsai/rapidsai/)."
|
||||
"RunConfiguration is used to submit jobs to Azure Machine Learning service. When creating RunConfiguration for a job, users can either \n",
|
||||
"1. specify a Docker image with prebuilt conda environment and use it without any modifications to run the job, or \n",
|
||||
"2. specify a Docker image as the base image and conda or pip packages as dependnecies to let AML build a new Docker image with a conda environment containing specified dependencies to use in the job\n",
|
||||
"\n",
|
||||
"The second option is the recommended option in AML. \n",
|
||||
"The following steps have code for both options. You can pick the one that is more appropriate for your requirements. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Specify prebuilt conda environment"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The following code shows how to use an existing image from [Docker Hub](https://hub.docker.com/r/rapidsai/rapidsai/) that has a prebuilt conda environment named 'rapids' when creating a RunConfiguration. Note that this conda environment does not include azureml-defaults package that is required for using AML functionality like metrics tracking, model management etc. This package is automatically installed when you use 'Specify package dependencies' option and that is why it is the recommended option to create RunConfiguraiton in AML."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -266,18 +372,52 @@
|
||||
"run_config = RunConfiguration()\n",
|
||||
"run_config.framework = 'python'\n",
|
||||
"run_config.environment.python.user_managed_dependencies = True\n",
|
||||
"# use conda environment named 'rapids' available in the Docker image\n",
|
||||
"# this conda environment does not include azureml-defaults package that is required for using AML functionality like metrics tracking, model management etc.\n",
|
||||
"run_config.environment.python.interpreter_path = '/conda/envs/rapids/bin/python'\n",
|
||||
"run_config.target = gpu_cluster_name\n",
|
||||
"run_config.environment.docker.enabled = True\n",
|
||||
"run_config.environment.docker.gpu_support = True\n",
|
||||
"# if registry is not mentioned the image is pulled from Docker Hub\n",
|
||||
"run_config.environment.docker.base_image = \"rapidsai/rapidsai:cuda9.2_ubuntu16.04_root\"\n",
|
||||
"run_config.environment.docker.base_image = \"rapidsai/rapidsai:cuda9.2-runtime-ubuntu18.04\"\n",
|
||||
"# run_config.environment.docker.base_image_registry.address = '<registry_url>' # not required if the base_image is in Docker hub\n",
|
||||
"# run_config.environment.docker.base_image_registry.username = '<user_name>' # needed only for private images\n",
|
||||
"# run_config.environment.docker.base_image_registry.password = '<password>' # needed only for private images\n",
|
||||
"run_config.environment.spark.precache_packages = False\n",
|
||||
"run_config.data_references={'data':data_ref.to_config()}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Specify package dependencies"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The following code shows how to list package dependencies in a conda environment definition file (rapids.yml) when creating a RunConfiguration"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# cd = CondaDependencies(conda_dependencies_file_path='rapids.yml')\n",
|
||||
"# run_config = RunConfiguration(conda_dependencies=cd)\n",
|
||||
"# run_config.framework = 'python'\n",
|
||||
"# run_config.target = gpu_cluster_name\n",
|
||||
"# run_config.environment.docker.enabled = True\n",
|
||||
"# run_config.environment.docker.gpu_support = True\n",
|
||||
"# run_config.environment.docker.base_image = \"<image>\"\n",
|
||||
"# run_config.environment.docker.base_image_registry.address = '<registry_url>' # not required if the base_image is in Docker hub\n",
|
||||
"# run_config.environment.docker.base_image_registry.username = '<user_name>' # needed only for private images\n",
|
||||
"# run_config.environment.docker.base_image_registry.password = '<password>' # needed only for private images\n",
|
||||
"# run_config.environment.spark.precache_packages = False\n",
|
||||
"# run_config.data_references={'data':data_ref.to_config()}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -293,17 +433,24 @@
|
||||
"source": [
|
||||
"# parameter cpu_predictor indicates if training should be done on CPU. If set to true, GPUs are used *only* for ETL and *not* for training\n",
|
||||
"# parameter num_gpu indicates number of GPUs to use among the GPUs available in the VM for ETL and if cpu_predictor is false, for training as well \n",
|
||||
"def run_rapids_experiment(cpu_training, gpu_count):\n",
|
||||
"def run_rapids_experiment(cpu_training, gpu_count, part_count):\n",
|
||||
" # any value between 1-4 is allowed here depending the type of VMs available in gpu_cluster\n",
|
||||
" if gpu_count not in [1, 2, 3, 4]:\n",
|
||||
" raise Exception('Value specified for the number of GPUs to use {0} is invalid'.format(gpu_count))\n",
|
||||
"\n",
|
||||
" # following data partition mapping is empirical (specific to GPUs used and current data partitioning scheme) and may need to be tweaked\n",
|
||||
" gpu_count_data_partition_mapping = {1: 2, 2: 4, 3: 5, 4: 7}\n",
|
||||
" part_count = gpu_count_data_partition_mapping[gpu_count]\n",
|
||||
"\n",
|
||||
" max_gpu_count_data_partition_mapping = {1: 3, 2: 4, 3: 6, 4: 8}\n",
|
||||
" \n",
|
||||
" if part_count > max_gpu_count_data_partition_mapping[gpu_count]:\n",
|
||||
" print(\"Too many partitions for the number of GPUs, exceeding memory threshold\")\n",
|
||||
" \n",
|
||||
" if part_count > 11:\n",
|
||||
" print(\"Warning: Maximum number of partitions available is 11\")\n",
|
||||
" part_count = 11\n",
|
||||
" \n",
|
||||
" end_year = 2000\n",
|
||||
" if gpu_count > 2:\n",
|
||||
" \n",
|
||||
" if part_count > 4:\n",
|
||||
" end_year = 2001 # use more data with more GPUs\n",
|
||||
"\n",
|
||||
" src = ScriptRunConfig(source_directory=scripts_folder, \n",
|
||||
@@ -317,7 +464,8 @@
|
||||
"\n",
|
||||
" exp = Experiment(ws, 'rapidstest')\n",
|
||||
" run = exp.submit(config=src)\n",
|
||||
" RunDetails(run).show()"
|
||||
" RunDetails(run).show()\n",
|
||||
" return run"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -335,9 +483,10 @@
|
||||
"source": [
|
||||
"cpu_predictor = False\n",
|
||||
"# the value for num_gpu should be less than or equal to the number of GPUs available in the VM\n",
|
||||
"num_gpu = 1 \n",
|
||||
"num_gpu = 1\n",
|
||||
"data_part_count = 1\n",
|
||||
"# train using CPU, use GPU for both ETL and training\n",
|
||||
"run_rapids_experiment(cpu_predictor, num_gpu)"
|
||||
"run = run_rapids_experiment(cpu_predictor, num_gpu, data_part_count)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -358,8 +507,9 @@
|
||||
"cpu_predictor = True\n",
|
||||
"# the value for num_gpu should be less than or equal to the number of GPUs available in the VM\n",
|
||||
"num_gpu = 1\n",
|
||||
"data_part_count = 1\n",
|
||||
"# train using CPU, use GPU for ETL\n",
|
||||
"run_rapids_experiment(cpu_predictor, num_gpu)"
|
||||
"run = run_rapids_experiment(cpu_predictor, num_gpu, data_part_count)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
BIN
contrib/RAPIDS/imgs/2GPUs.png
Normal file
|
After Width: | Height: | Size: 180 KiB |
BIN
contrib/RAPIDS/imgs/3GPUs.png
Normal file
|
After Width: | Height: | Size: 183 KiB |
BIN
contrib/RAPIDS/imgs/4gpus.png
Normal file
|
After Width: | Height: | Size: 183 KiB |
BIN
contrib/RAPIDS/imgs/CPUBase.png
Normal file
|
After Width: | Height: | Size: 177 KiB |
BIN
contrib/RAPIDS/imgs/DLF1.png
Normal file
|
After Width: | Height: | Size: 5.0 KiB |
BIN
contrib/RAPIDS/imgs/DLF2.png
Normal file
|
After Width: | Height: | Size: 4.8 KiB |
BIN
contrib/RAPIDS/imgs/DLF3.png
Normal file
|
After Width: | Height: | Size: 3.2 KiB |
BIN
contrib/RAPIDS/imgs/Dask2.png
Normal file
|
After Width: | Height: | Size: 70 KiB |
BIN
contrib/RAPIDS/imgs/ETL.png
Normal file
|
After Width: | Height: | Size: 64 KiB |
BIN
contrib/RAPIDS/imgs/NotebookHome.png
Normal file
|
After Width: | Height: | Size: 554 KiB |
BIN
contrib/RAPIDS/imgs/OOM.png
Normal file
|
After Width: | Height: | Size: 213 KiB |
BIN
contrib/RAPIDS/imgs/PArameters.png
Normal file
|
After Width: | Height: | Size: 58 KiB |
BIN
contrib/RAPIDS/imgs/WorkSpaceSetUp.png
Normal file
|
After Width: | Height: | Size: 34 KiB |
BIN
contrib/RAPIDS/imgs/clusterdelete.png
Normal file
|
After Width: | Height: | Size: 4.5 KiB |
BIN
contrib/RAPIDS/imgs/completed.png
Normal file
|
After Width: | Height: | Size: 187 KiB |
BIN
contrib/RAPIDS/imgs/daskini.png
Normal file
|
After Width: | Height: | Size: 22 KiB |
BIN
contrib/RAPIDS/imgs/daskoutput.png
Normal file
|
After Width: | Height: | Size: 9.7 KiB |
BIN
contrib/RAPIDS/imgs/datastore.png
Normal file
|
After Width: | Height: | Size: 163 KiB |
BIN
contrib/RAPIDS/imgs/dcf1.png
Normal file
|
After Width: | Height: | Size: 3.5 KiB |
BIN
contrib/RAPIDS/imgs/dcf2.png
Normal file
|
After Width: | Height: | Size: 2.9 KiB |
BIN
contrib/RAPIDS/imgs/dcf3.png
Normal file
|
After Width: | Height: | Size: 2.5 KiB |
BIN
contrib/RAPIDS/imgs/dcf4.png
Normal file
|
After Width: | Height: | Size: 3.0 KiB |
BIN
contrib/RAPIDS/imgs/downamddecom.png
Normal file
|
After Width: | Height: | Size: 60 KiB |
BIN
contrib/RAPIDS/imgs/fef1.png
Normal file
|
After Width: | Height: | Size: 3.5 KiB |
BIN
contrib/RAPIDS/imgs/fef2.png
Normal file
|
After Width: | Height: | Size: 3.9 KiB |
BIN
contrib/RAPIDS/imgs/fef3.png
Normal file
|
After Width: | Height: | Size: 5.0 KiB |
BIN
contrib/RAPIDS/imgs/fef4.png
Normal file
|
After Width: | Height: | Size: 4.0 KiB |
BIN
contrib/RAPIDS/imgs/fef5.png
Normal file
|
After Width: | Height: | Size: 4.1 KiB |
BIN
contrib/RAPIDS/imgs/fef6.png
Normal file
|
After Width: | Height: | Size: 4.5 KiB |
BIN
contrib/RAPIDS/imgs/fef7.png
Normal file
|
After Width: | Height: | Size: 5.1 KiB |
BIN
contrib/RAPIDS/imgs/fef8.png
Normal file
|
After Width: | Height: | Size: 3.9 KiB |
BIN
contrib/RAPIDS/imgs/fef9.png
Normal file
|
After Width: | Height: | Size: 3.6 KiB |
BIN
contrib/RAPIDS/imgs/install2.png
Normal file
|
After Width: | Height: | Size: 120 KiB |
BIN
contrib/RAPIDS/imgs/installation.png
Normal file
|
After Width: | Height: | Size: 55 KiB |
BIN
contrib/RAPIDS/imgs/queue.png
Normal file
|
After Width: | Height: | Size: 52 KiB |
BIN
contrib/RAPIDS/imgs/running.png
Normal file
|
After Width: | Height: | Size: 181 KiB |
BIN
contrib/RAPIDS/imgs/saved_workspace.png
Normal file
|
After Width: | Height: | Size: 36 KiB |
BIN
contrib/RAPIDS/imgs/scriptuploading.png
Normal file
|
After Width: | Height: | Size: 21 KiB |
BIN
contrib/RAPIDS/imgs/submission1.png
Normal file
|
After Width: | Height: | Size: 19 KiB |
BIN
contrib/RAPIDS/imgs/target_creation.png
Normal file
|
After Width: | Height: | Size: 45 KiB |
BIN
contrib/RAPIDS/imgs/targeterror1.png
Normal file
|
After Width: | Height: | Size: 31 KiB |
BIN
contrib/RAPIDS/imgs/targeterror2.png
Normal file
|
After Width: | Height: | Size: 29 KiB |
BIN
contrib/RAPIDS/imgs/targetsuccess.png
Normal file
|
After Width: | Height: | Size: 10 KiB |
BIN
contrib/RAPIDS/imgs/training.png
Normal file
|
After Width: | Height: | Size: 18 KiB |
BIN
contrib/RAPIDS/imgs/wap1.png
Normal file
|
After Width: | Height: | Size: 2.4 KiB |
BIN
contrib/RAPIDS/imgs/wap2.png
Normal file
|
After Width: | Height: | Size: 2.5 KiB |
BIN
contrib/RAPIDS/imgs/wap3.png
Normal file
|
After Width: | Height: | Size: 3.4 KiB |
BIN
contrib/RAPIDS/imgs/wap4.png
Normal file
|
After Width: | Height: | Size: 4.8 KiB |
BIN
contrib/RAPIDS/imgs/wrapper.png
Normal file
|
After Width: | Height: | Size: 99 KiB |
@@ -1,9 +1,9 @@
|
||||
# License Info: https://github.com/rapidsai/notebooks/blob/master/LICENSE
|
||||
import numpy as np
|
||||
import datetime
|
||||
import dask_xgboost as dxgb_gpu
|
||||
import dask
|
||||
import dask_cudf
|
||||
from dask_cuda import LocalCUDACluster
|
||||
from dask.delayed import delayed
|
||||
from dask.distributed import Client, wait
|
||||
import xgboost as xgb
|
||||
@@ -15,53 +15,6 @@ from glob import glob
|
||||
import os
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser("rapidssample")
|
||||
parser.add_argument("--data_dir", type=str, help="location of data")
|
||||
parser.add_argument("--num_gpu", type=int, help="Number of GPUs to use", default=1)
|
||||
parser.add_argument("--part_count", type=int, help="Number of data files to train against", default=2)
|
||||
parser.add_argument("--end_year", type=int, help="Year to end the data load", default=2000)
|
||||
parser.add_argument("--cpu_predictor", type=str, help="Flag to use CPU for prediction", default='False')
|
||||
parser.add_argument('-f', type=str, default='') # added for notebook execution scenarios
|
||||
args = parser.parse_args()
|
||||
data_dir = args.data_dir
|
||||
num_gpu = args.num_gpu
|
||||
part_count = args.part_count
|
||||
end_year = args.end_year
|
||||
cpu_predictor = args.cpu_predictor.lower() in ('yes', 'true', 't', 'y', '1')
|
||||
|
||||
print('data_dir = {0}'.format(data_dir))
|
||||
print('num_gpu = {0}'.format(num_gpu))
|
||||
print('part_count = {0}'.format(part_count))
|
||||
part_count = part_count + 1 # adding one because the usage below is not inclusive
|
||||
print('end_year = {0}'.format(end_year))
|
||||
print('cpu_predictor = {0}'.format(cpu_predictor))
|
||||
|
||||
import subprocess
|
||||
|
||||
cmd = "hostname --all-ip-addresses"
|
||||
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
|
||||
output, error = process.communicate()
|
||||
IPADDR = str(output.decode()).split()[0]
|
||||
print('IPADDR is {0}'.format(IPADDR))
|
||||
|
||||
cmd = "/rapids/notebooks/utils/dask-setup.sh 0"
|
||||
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
|
||||
output, error = process.communicate()
|
||||
|
||||
cmd = "/rapids/notebooks/utils/dask-setup.sh rapids " + str(num_gpu) + " 8786 8787 8790 " + str(IPADDR) + " MASTER"
|
||||
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
|
||||
output, error = process.communicate()
|
||||
|
||||
print(output.decode())
|
||||
|
||||
import dask
|
||||
from dask.delayed import delayed
|
||||
from dask.distributed import Client, wait
|
||||
|
||||
_client = IPADDR + str(":8786")
|
||||
|
||||
client = dask.distributed.Client(_client)
|
||||
|
||||
def initialize_rmm_pool():
|
||||
from librmm_cffi import librmm_config as rmm_cfg
|
||||
|
||||
@@ -81,15 +34,17 @@ def run_dask_task(func, **kwargs):
|
||||
task = func(**kwargs)
|
||||
return task
|
||||
|
||||
def process_quarter_gpu(year=2000, quarter=1, perf_file=""):
|
||||
def process_quarter_gpu(client, col_names_path, acq_data_path, year=2000, quarter=1, perf_file=""):
|
||||
dask_client = client
|
||||
ml_arrays = run_dask_task(delayed(run_gpu_workflow),
|
||||
col_path=col_names_path,
|
||||
acq_path=acq_data_path,
|
||||
quarter=quarter,
|
||||
year=year,
|
||||
perf_file=perf_file)
|
||||
return client.compute(ml_arrays,
|
||||
return dask_client.compute(ml_arrays,
|
||||
optimize_graph=False,
|
||||
fifo_timeout="0ms"
|
||||
)
|
||||
fifo_timeout="0ms")
|
||||
|
||||
def null_workaround(df, **kwargs):
|
||||
for column, data_type in df.dtypes.items():
|
||||
@@ -99,9 +54,9 @@ def null_workaround(df, **kwargs):
|
||||
df[column] = df[column].fillna(-1)
|
||||
return df
|
||||
|
||||
def run_gpu_workflow(quarter=1, year=2000, perf_file="", **kwargs):
|
||||
names = gpu_load_names()
|
||||
acq_gdf = gpu_load_acquisition_csv(acquisition_path= acq_data_path + "/Acquisition_"
|
||||
def run_gpu_workflow(col_path, acq_path, quarter=1, year=2000, perf_file="", **kwargs):
|
||||
names = gpu_load_names(col_path=col_path)
|
||||
acq_gdf = gpu_load_acquisition_csv(acquisition_path= acq_path + "/Acquisition_"
|
||||
+ str(year) + "Q" + str(quarter) + ".txt")
|
||||
acq_gdf = acq_gdf.merge(names, how='left', on=['seller_name'])
|
||||
acq_gdf.drop_column('seller_name')
|
||||
@@ -231,7 +186,7 @@ def gpu_load_acquisition_csv(acquisition_path, **kwargs):
|
||||
|
||||
return cudf.read_csv(acquisition_path, names=cols, delimiter='|', dtype=list(dtypes.values()), skiprows=1)
|
||||
|
||||
def gpu_load_names(**kwargs):
|
||||
def gpu_load_names(col_path):
|
||||
""" Loads names used for renaming the banks
|
||||
|
||||
Returns
|
||||
@@ -248,7 +203,7 @@ def gpu_load_names(**kwargs):
|
||||
("new", "category"),
|
||||
])
|
||||
|
||||
return cudf.read_csv(col_names_path, names=cols, delimiter='|', dtype=list(dtypes.values()), skiprows=1)
|
||||
return cudf.read_csv(col_path, names=cols, delimiter='|', dtype=list(dtypes.values()), skiprows=1)
|
||||
|
||||
def create_ever_features(gdf, **kwargs):
|
||||
everdf = gdf[['loan_id', 'current_loan_delinquency_status']]
|
||||
@@ -384,32 +339,71 @@ def last_mile_cleaning(df, **kwargs):
|
||||
df['delinquency_12'] = df['delinquency_12'].fillna(False).astype('int32')
|
||||
for column in df.columns:
|
||||
df[column] = df[column].fillna(-1)
|
||||
return df.to_arrow(index=False)
|
||||
return df.to_arrow(preserve_index=False)
|
||||
|
||||
def main():
|
||||
#print('XGBOOST_BUILD_DOC is ' + os.environ['XGBOOST_BUILD_DOC'])
|
||||
parser = argparse.ArgumentParser("rapidssample")
|
||||
parser.add_argument("--data_dir", type=str, help="location of data")
|
||||
parser.add_argument("--num_gpu", type=int, help="Number of GPUs to use", default=1)
|
||||
parser.add_argument("--part_count", type=int, help="Number of data files to train against", default=2)
|
||||
parser.add_argument("--end_year", type=int, help="Year to end the data load", default=2000)
|
||||
parser.add_argument("--cpu_predictor", type=str, help="Flag to use CPU for prediction", default='False')
|
||||
parser.add_argument('-f', type=str, default='') # added for notebook execution scenarios
|
||||
args = parser.parse_args()
|
||||
data_dir = args.data_dir
|
||||
num_gpu = args.num_gpu
|
||||
part_count = args.part_count
|
||||
end_year = args.end_year
|
||||
cpu_predictor = args.cpu_predictor.lower() in ('yes', 'true', 't', 'y', '1')
|
||||
|
||||
if cpu_predictor:
|
||||
print('Training with CPUs require num gpu = 1')
|
||||
num_gpu = 1
|
||||
|
||||
print('data_dir = {0}'.format(data_dir))
|
||||
print('num_gpu = {0}'.format(num_gpu))
|
||||
print('part_count = {0}'.format(part_count))
|
||||
#part_count = part_count + 1 # adding one because the usage below is not inclusive
|
||||
print('end_year = {0}'.format(end_year))
|
||||
print('cpu_predictor = {0}'.format(cpu_predictor))
|
||||
|
||||
import subprocess
|
||||
|
||||
cmd = "hostname --all-ip-addresses"
|
||||
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
|
||||
output, error = process.communicate()
|
||||
IPADDR = str(output.decode()).split()[0]
|
||||
|
||||
cluster = LocalCUDACluster(ip=IPADDR,n_workers=num_gpu)
|
||||
client = Client(cluster)
|
||||
client
|
||||
print(client.ncores())
|
||||
|
||||
# to download data for this notebook, visit https://rapidsai.github.io/demos/datasets/mortgage-data and update the following paths accordingly
|
||||
acq_data_path = "{0}/acq".format(data_dir) #"/rapids/data/mortgage/acq"
|
||||
perf_data_path = "{0}/perf".format(data_dir) #"/rapids/data/mortgage/perf"
|
||||
col_names_path = "{0}/names.csv".format(data_dir) # "/rapids/data/mortgage/names.csv"
|
||||
start_year = 2000
|
||||
acq_data_path = "{0}/acq".format(data_dir) #"/rapids/data/mortgage/acq"
|
||||
perf_data_path = "{0}/perf".format(data_dir) #"/rapids/data/mortgage/perf"
|
||||
col_names_path = "{0}/names.csv".format(data_dir) # "/rapids/data/mortgage/names.csv"
|
||||
start_year = 2000
|
||||
#end_year = 2000 # end_year is inclusive -- converted to parameter
|
||||
#part_count = 2 # the number of data files to train against -- converted to parameter
|
||||
|
||||
client.run(initialize_rmm_pool)
|
||||
|
||||
client.run(initialize_rmm_pool)
|
||||
client
|
||||
print(client.ncores())
|
||||
# NOTE: The ETL calculates additional features which are then dropped before creating the XGBoost DMatrix.
|
||||
# This can be optimized to avoid calculating the dropped features.
|
||||
print("Reading ...")
|
||||
t1 = datetime.datetime.now()
|
||||
gpu_dfs = []
|
||||
gpu_time = 0
|
||||
quarter = 1
|
||||
year = start_year
|
||||
count = 0
|
||||
while year <= end_year:
|
||||
print("Reading ...")
|
||||
t1 = datetime.datetime.now()
|
||||
gpu_dfs = []
|
||||
gpu_time = 0
|
||||
quarter = 1
|
||||
year = start_year
|
||||
count = 0
|
||||
while year <= end_year:
|
||||
for file in glob(os.path.join(perf_data_path + "/Performance_" + str(year) + "Q" + str(quarter) + "*")):
|
||||
if count < part_count:
|
||||
gpu_dfs.append(process_quarter_gpu(year=year, quarter=quarter, perf_file=file))
|
||||
gpu_dfs.append(process_quarter_gpu(client, col_names_path, acq_data_path, year=year, quarter=quarter, perf_file=file))
|
||||
count += 1
|
||||
print('file: {0}'.format(file))
|
||||
print('count: {0}'.format(count))
|
||||
@@ -418,16 +412,17 @@ while year <= end_year:
|
||||
year += 1
|
||||
quarter = 1
|
||||
|
||||
wait(gpu_dfs)
|
||||
t2 = datetime.datetime.now()
|
||||
print("Reading time ...")
|
||||
print(t2-t1)
|
||||
print('len(gpu_dfs) is {0}'.format(len(gpu_dfs)))
|
||||
wait(gpu_dfs)
|
||||
t2 = datetime.datetime.now()
|
||||
print("Reading time ...")
|
||||
print(t2-t1)
|
||||
print('len(gpu_dfs) is {0}'.format(len(gpu_dfs)))
|
||||
|
||||
client.run(cudf._gdf.rmm_finalize)
|
||||
client.run(initialize_rmm_no_pool)
|
||||
|
||||
dxgb_gpu_params = {
|
||||
client.run(cudf._gdf.rmm_finalize)
|
||||
client.run(initialize_rmm_no_pool)
|
||||
client
|
||||
print(client.ncores())
|
||||
dxgb_gpu_params = {
|
||||
'nround': 100,
|
||||
'max_depth': 8,
|
||||
'max_leaves': 2**8,
|
||||
@@ -448,53 +443,53 @@ dxgb_gpu_params = {
|
||||
'criterion': 'friedman_mse',
|
||||
'grow_policy': 'lossguide',
|
||||
'verbose': True
|
||||
}
|
||||
}
|
||||
|
||||
if cpu_predictor:
|
||||
if cpu_predictor:
|
||||
print('Training using CPUs')
|
||||
dxgb_gpu_params['predictor'] = 'cpu_predictor'
|
||||
dxgb_gpu_params['tree_method'] = 'hist'
|
||||
dxgb_gpu_params['objective'] = 'reg:linear'
|
||||
|
||||
else:
|
||||
else:
|
||||
print('Training using GPUs')
|
||||
|
||||
print('Training parameters are {0}'.format(dxgb_gpu_params))
|
||||
print('Training parameters are {0}'.format(dxgb_gpu_params))
|
||||
|
||||
gpu_dfs = [delayed(DataFrame.from_arrow)(gpu_df) for gpu_df in gpu_dfs[:part_count]]
|
||||
gpu_dfs = [delayed(DataFrame.from_arrow)(gpu_df) for gpu_df in gpu_dfs[:part_count]]
|
||||
gpu_dfs = [gpu_df for gpu_df in gpu_dfs]
|
||||
wait(gpu_dfs)
|
||||
|
||||
gpu_dfs = [gpu_df for gpu_df in gpu_dfs]
|
||||
|
||||
wait(gpu_dfs)
|
||||
tmp_map = [(gpu_df, list(client.who_has(gpu_df).values())[0]) for gpu_df in gpu_dfs]
|
||||
new_map = {}
|
||||
for key, value in tmp_map:
|
||||
tmp_map = [(gpu_df, list(client.who_has(gpu_df).values())[0]) for gpu_df in gpu_dfs]
|
||||
new_map = {}
|
||||
for key, value in tmp_map:
|
||||
if value not in new_map:
|
||||
new_map[value] = [key]
|
||||
else:
|
||||
new_map[value].append(key)
|
||||
|
||||
del(tmp_map)
|
||||
gpu_dfs = []
|
||||
for list_delayed in new_map.values():
|
||||
del(tmp_map)
|
||||
gpu_dfs = []
|
||||
for list_delayed in new_map.values():
|
||||
gpu_dfs.append(delayed(cudf.concat)(list_delayed))
|
||||
|
||||
del(new_map)
|
||||
gpu_dfs = [(gpu_df[['delinquency_12']], gpu_df[delayed(list)(gpu_df.columns.difference(['delinquency_12']))]) for gpu_df in gpu_dfs]
|
||||
gpu_dfs = [(gpu_df[0].persist(), gpu_df[1].persist()) for gpu_df in gpu_dfs]
|
||||
gpu_dfs = [dask.delayed(xgb.DMatrix)(gpu_df[1], gpu_df[0]) for gpu_df in gpu_dfs]
|
||||
gpu_dfs = [gpu_df.persist() for gpu_df in gpu_dfs]
|
||||
del(new_map)
|
||||
gpu_dfs = [(gpu_df[['delinquency_12']], gpu_df[delayed(list)(gpu_df.columns.difference(['delinquency_12']))]) for gpu_df in gpu_dfs]
|
||||
gpu_dfs = [(gpu_df[0].persist(), gpu_df[1].persist()) for gpu_df in gpu_dfs]
|
||||
|
||||
gc.collect()
|
||||
labels = None
|
||||
gpu_dfs = [dask.delayed(xgb.DMatrix)(gpu_df[1], gpu_df[0]) for gpu_df in gpu_dfs]
|
||||
gpu_dfs = [gpu_df.persist() for gpu_df in gpu_dfs]
|
||||
gc.collect()
|
||||
wait(gpu_dfs)
|
||||
|
||||
print('str(gpu_dfs) is {0}'.format(str(gpu_dfs)))
|
||||
labels = None
|
||||
t1 = datetime.datetime.now()
|
||||
bst = dxgb_gpu.train(client, dxgb_gpu_params, gpu_dfs, labels, num_boost_round=dxgb_gpu_params['nround'])
|
||||
t2 = datetime.datetime.now()
|
||||
print("Training time ...")
|
||||
print(t2-t1)
|
||||
print('str(bst) is {0}'.format(str(bst)))
|
||||
print('Exiting script')
|
||||
|
||||
wait(gpu_dfs)
|
||||
t1 = datetime.datetime.now()
|
||||
bst = dxgb_gpu.train(client, dxgb_gpu_params, gpu_dfs, labels, num_boost_round=dxgb_gpu_params['nround'])
|
||||
t2 = datetime.datetime.now()
|
||||
print("Training time ...")
|
||||
print(t2-t1)
|
||||
print('str(bst) is {0}'.format(str(bst)))
|
||||
print('Exiting script')
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
35
contrib/RAPIDS/rapids.yml
Normal file
@@ -0,0 +1,35 @@
|
||||
name: rapids
|
||||
channels:
|
||||
- nvidia
|
||||
- numba
|
||||
- conda-forge
|
||||
- rapidsai
|
||||
- defaults
|
||||
- pytorch
|
||||
|
||||
dependencies:
|
||||
- arrow-cpp=0.12.0
|
||||
- bokeh
|
||||
- cffi=1.11.5
|
||||
- cmake=3.12
|
||||
- cuda92
|
||||
- cython==0.29
|
||||
- dask=1.1.1
|
||||
- distributed=1.25.3
|
||||
- faiss-gpu=1.5.0
|
||||
- numba=0.42
|
||||
- numpy=1.15.4
|
||||
- nvstrings
|
||||
- pandas=0.23.4
|
||||
- pyarrow=0.12.0
|
||||
- scikit-learn
|
||||
- scipy
|
||||
- cudf
|
||||
- cuml
|
||||
- python=3.6.2
|
||||
- jupyterlab
|
||||
- pip:
|
||||
- file:/rapids/xgboost/python-package/dist/xgboost-0.81-py3-none-any.whl
|
||||
- git+https://github.com/rapidsai/dask-xgboost@dask-cudf
|
||||
- git+https://github.com/rapidsai/dask-cudf@master
|
||||
- git+https://github.com/rapidsai/dask-cuda@master
|
||||
@@ -4,8 +4,9 @@ Learn how to use Azure Machine Learning services for experimentation and model m
|
||||
|
||||
As a pre-requisite, run the [configuration Notebook](../configuration.ipynb) notebook first to set up your Azure ML Workspace. Then, run the notebooks in following recommended order.
|
||||
|
||||
* [train-within-notebook](./training/train-within-notebook): Train a model hile tracking run history, and learn how to deploy the model as web service to Azure Container Instance.
|
||||
* [train-on-local](./training/train-on-local): Learn how to submit a run and use Azure ML managed run configuration.
|
||||
* [train-within-notebook](./training/train-within-notebook): Train a model hile tracking run history, and learn how to deploy the model as web service to Azure Container Instance.
|
||||
* [train-on-local](./training/train-on-local): Learn how to submit a run to local computer and use Azure ML managed run configuration.
|
||||
* [train-on-amlcompute](./training/train-on-amlcompute): Use a 1-n node Azure ML managed compute cluster for remote runs on Azure CPU or GPU infrastructure.
|
||||
* [train-on-remote-vm](./training/train-on-remote-vm): Use Data Science Virtual Machine as a target for remote runs.
|
||||
* [logging-api](./training/logging-api): Learn about the details of logging metrics to run history.
|
||||
* [register-model-create-image-deploy-service](./deployment/register-model-create-image-deploy-service): Learn about the details of model management.
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# Table of Contents
|
||||
1. [Automated ML Introduction](#introduction)
|
||||
1. [Running samples in Azure Notebooks](#jupyter)
|
||||
1. [Running samples in Azure Databricks](#databricks)
|
||||
1. [Running samples in a Local Conda environment](#localconda)
|
||||
1. [Setup using Azure Notebooks](#jupyter)
|
||||
1. [Setup using Azure Databricks](#databricks)
|
||||
1. [Setup using a Local Conda environment](#localconda)
|
||||
1. [Automated ML SDK Sample Notebooks](#samples)
|
||||
1. [Documentation](#documentation)
|
||||
1. [Running using python command](#pythoncommand)
|
||||
@@ -13,23 +13,23 @@
|
||||
Automated machine learning (automated ML) builds high quality machine learning models for you by automating model and hyperparameter selection. Bring a labelled dataset that you want to build a model for, automated ML will give you a high quality machine learning model that you can use for predictions.
|
||||
|
||||
|
||||
If you are new to Data Science, AutoML will help you get jumpstarted by simplifying machine learning model building. It abstracts you from needing to perform model selection, hyperparameter selection and in one step creates a high quality trained model for you to use.
|
||||
If you are new to Data Science, automated ML will help you get jumpstarted by simplifying machine learning model building. It abstracts you from needing to perform model selection, hyperparameter selection and in one step creates a high quality trained model for you to use.
|
||||
|
||||
If you are an experienced data scientist, AutoML will help increase your productivity by intelligently performing the model and hyperparameter selection for your training and generates high quality models much quicker than manually specifying several combinations of the parameters and running training jobs. AutoML provides visibility and access to all the training jobs and the performance characteristics of the models to help you further tune the pipeline if you desire.
|
||||
If you are an experienced data scientist, automated ML will help increase your productivity by intelligently performing the model and hyperparameter selection for your training and generates high quality models much quicker than manually specifying several combinations of the parameters and running training jobs. Automated ML provides visibility and access to all the training jobs and the performance characteristics of the models to help you further tune the pipeline if you desire.
|
||||
|
||||
Below are the three execution environments supported by AutoML.
|
||||
Below are the three execution environments supported by automated ML.
|
||||
|
||||
|
||||
<a name="jupyter"></a>
|
||||
## Running samples in Azure Notebooks - Jupyter based notebooks in the Azure cloud
|
||||
## Setup using Azure Notebooks - Jupyter based notebooks in the Azure cloud
|
||||
|
||||
1. [](https://aka.ms/aml-clone-azure-notebooks)
|
||||
[Import sample notebooks ](https://aka.ms/aml-clone-azure-notebooks) into Azure Notebooks.
|
||||
1. Follow the instructions in the [configuration](configuration.ipynb) notebook to create and connect to a workspace.
|
||||
1. Follow the instructions in the [configuration](../../configuration.ipynb) notebook to create and connect to a workspace.
|
||||
1. Open one of the sample notebooks.
|
||||
|
||||
<a name="databricks"></a>
|
||||
## Running samples in Azure Databricks
|
||||
## Setup using Azure Databricks
|
||||
|
||||
**NOTE**: Please create your Azure Databricks cluster as v4.x (high concurrency preferred) with **Python 3** (dropdown).
|
||||
**NOTE**: You should at least have contributor access to your Azure subcription to run the notebook.
|
||||
@@ -39,35 +39,25 @@ Below are the three execution environments supported by AutoML.
|
||||
- Attach the notebook to the cluster.
|
||||
|
||||
<a name="localconda"></a>
|
||||
## Running samples in a Local Conda environment
|
||||
## Setup using a Local Conda environment
|
||||
|
||||
To run these notebook on your own notebook server, use these installation instructions.
|
||||
|
||||
The instructions below will install everything you need and then start a Jupyter notebook. To start your Jupyter notebook manually, use:
|
||||
|
||||
```
|
||||
conda activate azure_automl
|
||||
jupyter notebook
|
||||
```
|
||||
|
||||
or on Mac:
|
||||
|
||||
```
|
||||
source activate azure_automl
|
||||
jupyter notebook
|
||||
```
|
||||
|
||||
The instructions below will install everything you need and then start a Jupyter notebook.
|
||||
|
||||
### 1. Install mini-conda from [here](https://conda.io/miniconda.html), choose 64-bit Python 3.7 or higher.
|
||||
- **Note**: if you already have conda installed, you can keep using it but it should be version 4.4.10 or later (as shown by: conda -V). If you have a previous version installed, you can update it using the command: conda update conda.
|
||||
There's no need to install mini-conda specifically.
|
||||
|
||||
### 2. Downloading the sample notebooks
|
||||
- Download the sample notebooks from [GitHub](https://github.com/Azure/MachineLearningNotebooks) as zip and extract the contents to a local directory. The AutoML sample notebooks are in the "automl" folder.
|
||||
- Download the sample notebooks from [GitHub](https://github.com/Azure/MachineLearningNotebooks) as zip and extract the contents to a local directory. The automated ML sample notebooks are in the "automated-machine-learning" folder.
|
||||
|
||||
### 3. Setup a new conda environment
|
||||
The **automl/automl_setup** script creates a new conda environment, installs the necessary packages, configures the widget and starts a jupyter notebook.
|
||||
It takes the conda environment name as an optional parameter. The default conda environment name is azure_automl. The exact command depends on the operating system. See the specific sections below for Windows, Mac and Linux. It can take about 10 minutes to execute.
|
||||
The **automl_setup** script creates a new conda environment, installs the necessary packages, configures the widget and starts a jupyter notebook. It takes the conda environment name as an optional parameter. The default conda environment name is azure_automl. The exact command depends on the operating system. See the specific sections below for Windows, Mac and Linux. It can take about 10 minutes to execute.
|
||||
|
||||
Packages installed by the **automl_setup** script:
|
||||
<ul><li>python</li><li>nb_conda</li><li>matplotlib</li><li>numpy</li><li>cython</li><li>urllib3</li><li>scipy</li><li>scikit-learn</li><li>pandas</li><li>tensorflow</li><li>py-xgboost</li><li>azureml-sdk</li><li>azureml-widgets</li><li>pandas-ml</li></ul>
|
||||
|
||||
For more details refer to the [automl_env.yml](./automl_env.yml)
|
||||
## Windows
|
||||
Start an **Anaconda Prompt** window, cd to the **how-to-use-azureml/automated-machine-learning** folder where the sample notebooks were extracted and then run:
|
||||
```
|
||||
@@ -90,46 +80,58 @@ bash automl_setup_linux.sh
|
||||
```
|
||||
|
||||
### 4. Running configuration.ipynb
|
||||
- Before running any samples you next need to run the configuration notebook. Click on configuration.ipynb notebook
|
||||
- Before running any samples you next need to run the configuration notebook. Click on [configuration](../../configuration.ipynb) notebook
|
||||
- Execute the cells in the notebook to Register Machine Learning Services Resource Provider and create a workspace. (*instructions in notebook*)
|
||||
|
||||
### 5. Running Samples
|
||||
- Please make sure you use the Python [conda env:azure_automl] kernel when trying the sample Notebooks.
|
||||
- Follow the instructions in the individual notebooks to explore various features in AutoML
|
||||
- Follow the instructions in the individual notebooks to explore various features in automated ML.
|
||||
|
||||
### 6. Starting jupyter notebook manually
|
||||
To start your Jupyter notebook manually, use:
|
||||
|
||||
```
|
||||
conda activate azure_automl
|
||||
jupyter notebook
|
||||
```
|
||||
|
||||
or on Mac or Linux:
|
||||
|
||||
```
|
||||
source activate azure_automl
|
||||
jupyter notebook
|
||||
```
|
||||
|
||||
<a name="samples"></a>
|
||||
# Automated ML SDK Sample Notebooks
|
||||
- [configuration.ipynb](configuration.ipynb)
|
||||
- Create new Azure ML Workspace
|
||||
- Save Workspace configuration file
|
||||
|
||||
- [auto-ml-classification.ipynb](classification/auto-ml-classification.ipynb)
|
||||
- Dataset: scikit learn's [digit dataset](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html#sklearn.datasets.load_digits)
|
||||
- Simple example of using Auto ML for classification
|
||||
- Simple example of using automated ML for classification
|
||||
- Uses local compute for training
|
||||
|
||||
- [auto-ml-regression.ipynb](regression/auto-ml-regression.ipynb)
|
||||
- Dataset: scikit learn's [diabetes dataset](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_diabetes.html)
|
||||
- Simple example of using Auto ML for regression
|
||||
- Simple example of using automated ML for regression
|
||||
- Uses local compute for training
|
||||
|
||||
- [auto-ml-remote-execution.ipynb](remote-execution/auto-ml-remote-execution.ipynb)
|
||||
- Dataset: scikit learn's [digit dataset](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html#sklearn.datasets.load_digits)
|
||||
- Example of using Auto ML for classification using a remote linux DSVM for training
|
||||
- Example of using automated ML for classification using a remote linux DSVM for training
|
||||
- Parallel execution of iterations
|
||||
- Async tracking of progress
|
||||
- Cancelling individual iterations or entire run
|
||||
- Retrieving models for any iteration or logged metric
|
||||
- Specify automl settings as kwargs
|
||||
- Specify automated ML settings as kwargs
|
||||
|
||||
- [auto-ml-remote-batchai.ipynb](remote-batchai/auto-ml-remote-batchai.ipynb)
|
||||
- [auto-ml-remote-amlcompute.ipynb](remote-batchai/auto-ml-remote-amlcompute.ipynb)
|
||||
- Dataset: scikit learn's [digit dataset](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html#sklearn.datasets.load_digits)
|
||||
- Example of using automated ML for classification using remote AmlCompute for training
|
||||
- Parallel execution of iterations
|
||||
- Async tracking of progress
|
||||
- Cancelling individual iterations or entire run
|
||||
- Retrieving models for any iteration or logged metric
|
||||
- Specify automl settings as kwargs
|
||||
- Specify automated ML settings as kwargs
|
||||
|
||||
- [auto-ml-remote-attach.ipynb](remote-attach/auto-ml-remote-attach.ipynb)
|
||||
- Dataset: Scikit learn's [20newsgroup](http://scikit-learn.org/stable/datasets/twenty_newsgroups.html)
|
||||
@@ -150,8 +152,8 @@ bash automl_setup_linux.sh
|
||||
|
||||
- [auto-ml-exploring-previous-runs.ipynb](exploring-previous-runs/auto-ml-exploring-previous-runs.ipynb)
|
||||
- List all projects for the workspace
|
||||
- List all AutoML Runs for a given project
|
||||
- Get details for a AutoML Run. (Automl settings, run widget & all metrics)
|
||||
- List all automated ML Runs for a given project
|
||||
- Get details for a automated ML Run. (automated ML settings, run widget & all metrics)
|
||||
- Download fitted pipeline for any iteration
|
||||
|
||||
- [auto-ml-remote-execution-with-datastore.ipynb](remote-execution-with-datastore/auto-ml-remote-execution-with-datastore.ipynb)
|
||||
@@ -160,7 +162,7 @@ bash automl_setup_linux.sh
|
||||
|
||||
- [auto-ml-classification-with-deployment.ipynb](classification-with-deployment/auto-ml-classification-with-deployment.ipynb)
|
||||
- Dataset: scikit learn's [digit dataset](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html#sklearn.datasets.load_digits)
|
||||
- Simple example of using Auto ML for classification
|
||||
- Simple example of using automated ML for classification
|
||||
- Registering the model
|
||||
- Creating Image and creating aci service
|
||||
- Testing the aci service
|
||||
@@ -169,6 +171,9 @@ bash automl_setup_linux.sh
|
||||
- How to specifying sample_weight
|
||||
- The difference that it makes to test results
|
||||
|
||||
- [auto-ml-subsampling-local.ipynb](subsampling/auto-ml-subsampling-local.ipynb)
|
||||
- How to enable subsampling
|
||||
|
||||
- [auto-ml-dataprep.ipynb](dataprep/auto-ml-dataprep.ipynb)
|
||||
- Using DataPrep for reading data
|
||||
|
||||
@@ -177,16 +182,21 @@ bash automl_setup_linux.sh
|
||||
|
||||
- [auto-ml-classification-with-whitelisting.ipynb](classification-with-whitelisting/auto-ml-classification-with-whitelisting.ipynb)
|
||||
- Dataset: scikit learn's [digit dataset](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html#sklearn.datasets.load_digits)
|
||||
- Simple example of using Auto ML for classification with whitelisting tensorflow models.
|
||||
- Simple example of using automated ML for classification with whitelisting tensorflow models.
|
||||
- Uses local compute for training
|
||||
|
||||
- [auto-ml-forecasting-energy-demand.ipynb](forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb)
|
||||
- Dataset: [NYC energy demand data](forecasting-a/nyc_energy.csv)
|
||||
- Example of using AutoML for training a forecasting model
|
||||
- Example of using automated ML for training a forecasting model
|
||||
|
||||
- [auto-ml-forecasting-orange-juice-sales.ipynb](forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb)
|
||||
- Dataset: [Dominick's grocery sales of orange juice](forecasting-b/dominicks_OJ.csv)
|
||||
- Example of training an AutoML forecasting model on multiple time-series
|
||||
- Example of training an automated ML forecasting model on multiple time-series
|
||||
|
||||
- [auto-ml-classification-with-onnx.ipynb](classification-with-onnx/auto-ml-classification-with-onnx.ipynb)
|
||||
- Dataset: scikit learn's [digit dataset](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html#sklearn.datasets.load_digits)
|
||||
- Simple example of using automated ML for classification with ONNX models
|
||||
- Uses local compute for training
|
||||
|
||||
<a name="documentation"></a>
|
||||
See [Configure automated machine learning experiments](https://docs.microsoft.com/azure/machine-learning/service/how-to-configure-auto-train) to learn how more about the the settings and features available for automated machine learning experiments.
|
||||
@@ -205,10 +215,18 @@ The main code of the file must be indented so that it is under this condition.
|
||||
<a name="troubleshooting"></a>
|
||||
# Troubleshooting
|
||||
## automl_setup fails
|
||||
1. On windows, make sure that you are running automl_setup from an Anconda Prompt window rather than a regular cmd window. You can launch the "Anaconda Prompt" window by hitting the Start button and typing "Anaconda Prompt". If you don't see the application "Anaconda Prompt", you might not have conda or mini conda installed. In that case, you can install it [here](https://conda.io/miniconda.html)
|
||||
1. On Windows, make sure that you are running automl_setup from an Anconda Prompt window rather than a regular cmd window. You can launch the "Anaconda Prompt" window by hitting the Start button and typing "Anaconda Prompt". If you don't see the application "Anaconda Prompt", you might not have conda or mini conda installed. In that case, you can install it [here](https://conda.io/miniconda.html)
|
||||
2. Check that you have conda 64-bit installed rather than 32-bit. You can check this with the command `conda info`. The `platform` should be `win-64` for Windows or `osx-64` for Mac.
|
||||
3. Check that you have conda 4.4.10 or later. You can check the version with the command `conda -V`. If you have a previous version installed, you can update it using the command: `conda update conda`.
|
||||
4. Pass a new name as the first parameter to automl_setup so that it creates a new conda environment. You can view existing conda environments using `conda env list` and remove them with `conda env remove -n <environmentname>`.
|
||||
4. On Linux, if the error is `gcc: error trying to exec 'cc1plus': execvp: No such file or directory`, install build essentials using the command `sudo apt-get install build-essential`.
|
||||
5. Pass a new name as the first parameter to automl_setup so that it creates a new conda environment. You can view existing conda environments using `conda env list` and remove them with `conda env remove -n <environmentname>`.
|
||||
|
||||
## automl_setup_linux.sh fails
|
||||
If automl_setup_linux.sh fails on Ubuntu Linux with the error: `unable to execute 'gcc': No such file or directory`
|
||||
1. Make sure that outbound ports 53 and 80 are enabled. On an Azure VM, you can do this from the Azure Portal by selecting the VM and clicking on Networking.
|
||||
2. Run the command: `sudo apt-get update`
|
||||
3. Run the command: `sudo apt-get install build-essential --fix-missing`
|
||||
4. Run `automl_setup_linux.sh` again.
|
||||
|
||||
## configuration.ipynb fails
|
||||
1) For local conda, make sure that you have susccessfully run automl_setup first.
|
||||
@@ -229,13 +247,23 @@ If a sample notebook fails with an error that property, method or library does n
|
||||
1) Check that you have selected correct kernel in jupyter notebook. The kernel is displayed in the top right of the notebook page. It can be changed using the `Kernel | Change Kernel` menu option. For Azure Notebooks, it should be `Python 3.6`. For local conda environments, it should be the conda envioronment name that you specified in automl_setup. The default is azure_automl. Note that the kernel is saved as part of the notebook. So, if you switch to a new conda environment, you will have to select the new kernel in the notebook.
|
||||
2) Check that the notebook is for the SDK version that you are using. You can check the SDK version by executing `azureml.core.VERSION` in a jupyter notebook cell. You can download previous version of the sample notebooks from GitHub by clicking the `Branch` button, selecting the `Tags` tab and then selecting the version.
|
||||
|
||||
## Numpy import fails on Windows
|
||||
Some Windows environments see an error loading numpy with the latest Python version 3.6.8. If you see this issue, try with Python version 3.6.7.
|
||||
|
||||
## Numpy import fails
|
||||
Check the tensorflow version in the automated ml conda environment. Supported versions are < 1.13. Uninstall tensorflow from the environment if version is >= 1.13
|
||||
You may check the version of tensorflow and uninstall as follows
|
||||
1) start a command shell, activate conda environment where automated ml packages are installed
|
||||
2) enter `pip freeze` and look for `tensorflow` , if found, the version listed should be < 1.13
|
||||
3) If the listed version is a not a supported version, `pip uninstall tensorflow` in the command shell and enter y for confirmation.
|
||||
|
||||
## Remote run: DsvmCompute.create fails
|
||||
There are several reasons why the DsvmCompute.create can fail. The reason is usually in the error message but you have to look at the end of the error message for the detailed reason. Some common reasons are:
|
||||
1) `Compute name is invalid, it should start with a letter, be between 2 and 16 character, and only include letters (a-zA-Z), numbers (0-9) and \'-\'.` Note that underscore is not allowed in the name.
|
||||
2) `The requested VM size xxxxx is not available in the current region.` You can select a different region or vm_size.
|
||||
|
||||
## Remote run: Unable to establish SSH connection
|
||||
AutoML uses the SSH protocol to communicate with remote DSVMs. This defaults to port 22. Possible causes for this error are:
|
||||
Automated ML uses the SSH protocol to communicate with remote DSVMs. This defaults to port 22. Possible causes for this error are:
|
||||
1) The DSVM is not ready for SSH connections. When DSVM creation completes, the DSVM might still not be ready to acceept SSH connections. The sample notebooks have a one minute delay to allow for this.
|
||||
2) Your Azure Subscription may restrict the IP address ranges that can access the DSVM on port 22. You can check this in the Azure Portal by selecting the Virtual Machine and then clicking Networking. The Virtual Machine name is the name that you provided in the notebook plus 10 alpha numeric characters to make the name unique. The Inbound Port Rules define what can access the VM on specific ports. Note that there is a priority priority order. So, a Deny entry with a low priority number will override a Allow entry with a higher priority number.
|
||||
|
||||
@@ -246,13 +274,13 @@ This is often an issue with the `get_data` method.
|
||||
3) You can get to the error log for the setup iteration by clicking the `Click here to see the run in Azure portal` link, click `Back to Experiment`, click on the highest run number and then click on Logs.
|
||||
|
||||
## Remote run: disk full
|
||||
AutoML creates files under /tmp/azureml_runs for each iteration that it runs. It creates a folder with the iteration id. For example: AutoML_9a038a18-77cc-48f1-80fb-65abdbc33abe_93. Under this, there is a azureml-logs folder, which contains logs. If you run too many iterations on the same DSVM, these files can fill the disk.
|
||||
Automated ML creates files under /tmp/azureml_runs for each iteration that it runs. It creates a folder with the iteration id. For example: AutoML_9a038a18-77cc-48f1-80fb-65abdbc33abe_93. Under this, there is a azureml-logs folder, which contains logs. If you run too many iterations on the same DSVM, these files can fill the disk.
|
||||
You can delete the files under /tmp/azureml_runs or just delete the VM and create a new one.
|
||||
If your get_data downloads files, make sure the delete them or they can use disk space as well.
|
||||
When using DataStore, it is good to specify an absolute path for the files so that they are downloaded just once. If you specify a relative path, it will download a file for each iteration.
|
||||
|
||||
## Remote run: Iterations fail and the log contains "MemoryError"
|
||||
This can be caused by insufficient memory on the DSVM. AutoML loads all training data into memory. So, the available memory should be more than the training data size.
|
||||
This can be caused by insufficient memory on the DSVM. Automated ML loads all training data into memory. So, the available memory should be more than the training data size.
|
||||
If you are using a remote DSVM, memory is needed for each concurrent iteration. The max_concurrent_iterations setting specifies the maximum concurrent iterations. For example, if the training data size is 8Gb and max_concurrent_iterations is set to 10, the minimum memory required is at least 80Gb.
|
||||
To resolve this issue, allocate a DSVM with more memory or reduce the value specified for max_concurrent_iterations.
|
||||
|
||||
|
||||
@@ -2,31 +2,20 @@ name: azure_automl
|
||||
dependencies:
|
||||
# The python interpreter version.
|
||||
# Currently Azure ML only supports 3.5.2 and later.
|
||||
- python=3.6
|
||||
- python>=3.5.2,<3.6.8
|
||||
- nb_conda
|
||||
- matplotlib==2.1.0
|
||||
- numpy>=1.11.0,<1.15.0
|
||||
- numpy>=1.11.0,<=1.16.2
|
||||
- cython
|
||||
- urllib3<1.24
|
||||
- scipy>=1.0.0,<=1.1.0
|
||||
- scikit-learn>=0.18.0,<=0.19.1
|
||||
- pandas>=0.22.0,<0.23.0
|
||||
- tensorflow>=1.12.0
|
||||
|
||||
# Required for azuremlftk
|
||||
- dill
|
||||
- pyodbc
|
||||
- statsmodels
|
||||
- numexpr
|
||||
- keras
|
||||
- distributed>=1.21.5,<1.24
|
||||
- scikit-learn>=0.19.0,<=0.20.3
|
||||
- pandas>=0.22.0,<=0.23.4
|
||||
- py-xgboost<=0.80
|
||||
|
||||
- pip:
|
||||
|
||||
# Required for azuremlftk
|
||||
- https://azuremlpackages.blob.core.windows.net/forecasting/azuremlftk-0.1.18323.5a1-py3-none-any.whl
|
||||
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azureml-sdk[automl,notebooks,explain]
|
||||
- azureml-sdk[automl,explain]
|
||||
- azureml-widgets
|
||||
- pandas_ml
|
||||
|
||||
|
||||
@@ -2,32 +2,21 @@ name: azure_automl
|
||||
dependencies:
|
||||
# The python interpreter version.
|
||||
# Currently Azure ML only supports 3.5.2 and later.
|
||||
- python=3.6
|
||||
- nomkl
|
||||
- python>=3.5.2,<3.6.8
|
||||
- nb_conda
|
||||
- matplotlib==2.1.0
|
||||
- numpy>=1.15.3
|
||||
- numpy>=1.11.0,<=1.16.2
|
||||
- cython
|
||||
- urllib3<1.24
|
||||
- scipy>=1.0.0,<=1.1.0
|
||||
- scikit-learn>=0.18.0,<=0.19.1
|
||||
- scikit-learn>=0.19.0,<=0.20.3
|
||||
- pandas>=0.22.0,<0.23.0
|
||||
- tensorflow>=1.12.0
|
||||
|
||||
# Required for azuremlftk
|
||||
- dill
|
||||
- pyodbc
|
||||
- statsmodels
|
||||
- numexpr
|
||||
- keras
|
||||
- distributed>=1.21.5,<1.24
|
||||
- py-xgboost<=0.80
|
||||
|
||||
- pip:
|
||||
|
||||
# Required for azuremlftk
|
||||
- https://azuremlpackages.blob.core.windows.net/forecasting/azuremlftk-0.1.18323.5a1-py3-none-any.whl
|
||||
|
||||
# Required packages for AzureML execution, history, and data preparation.
|
||||
- azureml-sdk[automl,notebooks,explain]
|
||||
- azureml-sdk[automl,explain]
|
||||
- azureml-widgets
|
||||
- pandas_ml
|
||||
|
||||
|
||||
|
||||
@@ -31,7 +31,6 @@ else
|
||||
conda install lightgbm -c conda-forge -y &&
|
||||
python -m ipykernel install --user --name $CONDA_ENV_NAME --display-name "Python ($CONDA_ENV_NAME)" &&
|
||||
jupyter nbextension uninstall --user --py azureml.widgets &&
|
||||
pip install numpy==1.15.3 &&
|
||||
echo "" &&
|
||||
echo "" &&
|
||||
echo "***************************************" &&
|
||||
|
||||
@@ -9,6 +9,13 @@
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -84,9 +91,9 @@
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# choose a name for experiment\n",
|
||||
"experiment_name = 'automl-local-classification'\n",
|
||||
"experiment_name = 'automl-classification-deployment'\n",
|
||||
"# project folder\n",
|
||||
"project_folder = './sample_projects/automl-local-classification'\n",
|
||||
"project_folder = './sample_projects/automl-classification-deployment'\n",
|
||||
"\n",
|
||||
"experiment=Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
@@ -103,23 +110,6 @@
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Opt-in diagnostics for better experience, quality, and security of future releases."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.telemetry import set_diagnostics_collection\n",
|
||||
"set_diagnostics_collection(send_diagnostics = True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -136,7 +126,7 @@
|
||||
"|**iterations**|Number of iterations. In each iteration AutoML trains a specific pipeline with the data.|\n",
|
||||
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
||||
"|**X**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
||||
"|**y**|(sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]<br>Multi-class targets. An indicator matrix turns on multilabel classification. This should be an array of integers.|\n",
|
||||
"|**y**|(sparse) array-like, shape = [n_samples, ], Multi-class targets.|\n",
|
||||
"|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder.|"
|
||||
]
|
||||
},
|
||||
@@ -156,7 +146,6 @@
|
||||
" primary_metric = 'AUC_weighted',\n",
|
||||
" iteration_timeout_minutes = 20,\n",
|
||||
" iterations = 10,\n",
|
||||
" n_cross_validations = 2,\n",
|
||||
" verbosity = logging.INFO,\n",
|
||||
" X = X_train, \n",
|
||||
" y = y_train,\n",
|
||||
@@ -280,7 +269,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To ensure the fit results are consistent with the training results, the SDK dependency versions need to be the same as the environment that trains the model. Details about retrieving the versions can be found in notebook [12.auto-ml-retrieve-the-training-sdk-versions](12.auto-ml-retrieve-the-training-sdk-versions.ipynb)."
|
||||
"To ensure the fit results are consistent with the training results, the SDK dependency versions need to be the same as the environment that trains the model. The following cells create a file, myenv.yml, which specifies the dependencies from the run."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -289,8 +278,6 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"experiment_name = 'automl-local-classification'\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"ml_run = AutoMLRun(experiment = experiment, run_id = local_run.id)"
|
||||
]
|
||||
@@ -322,7 +309,8 @@
|
||||
"source": [
|
||||
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||
"\n",
|
||||
"myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn'], pip_packages=['azureml-sdk[automl]'])\n",
|
||||
"myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn','py-xgboost<=0.80'],\n",
|
||||
" pip_packages=['azureml-sdk[automl]'])\n",
|
||||
"\n",
|
||||
"conda_env_file_name = 'myenv.yml'\n",
|
||||
"myenv.save_to_file('.', conda_env_file_name)"
|
||||
|
||||
@@ -0,0 +1,358 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Automated Machine Learning\n",
|
||||
"_**Classification with Local Compute**_\n",
|
||||
"\n",
|
||||
"## Contents\n",
|
||||
"1. [Introduction](#Introduction)\n",
|
||||
"1. [Setup](#Setup)\n",
|
||||
"1. [Data](#Data)\n",
|
||||
"1. [Train](#Train)\n",
|
||||
"1. [Results](#Results)\n",
|
||||
"1. [Test](#Test)\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Introduction\n",
|
||||
"\n",
|
||||
"In this example we use the scikit-learn's [digit dataset](http://scikit-learn.org/stable/datasets/index.html#optical-recognition-of-handwritten-digits-dataset) to showcase how you can use AutoML for a simple classification problem.\n",
|
||||
"\n",
|
||||
"Make sure you have executed the [configuration](../../../configuration.ipynb) before running this notebook.\n",
|
||||
"\n",
|
||||
"Please find the ONNX related documentations [here](https://github.com/onnx/onnx).\n",
|
||||
"\n",
|
||||
"In this notebook you will learn how to:\n",
|
||||
"1. Create an `Experiment` in an existing `Workspace`.\n",
|
||||
"2. Configure AutoML using `AutoMLConfig`.\n",
|
||||
"3. Train the model using local compute with ONNX compatible config on.\n",
|
||||
"4. Explore the results and save the ONNX model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"As part of the setup you have already created an Azure ML `Workspace` object. For AutoML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import logging\n",
|
||||
"\n",
|
||||
"from matplotlib import pyplot as plt\n",
|
||||
"import numpy as np\n",
|
||||
"import pandas as pd\n",
|
||||
"from sklearn import datasets\n",
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
"\n",
|
||||
"import azureml.core\n",
|
||||
"from azureml.core.experiment import Experiment\n",
|
||||
"from azureml.core.workspace import Workspace\n",
|
||||
"from azureml.train.automl import AutoMLConfig, constants"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# Choose a name for the experiment and specify the project folder.\n",
|
||||
"experiment_name = 'automl-classification-onnx'\n",
|
||||
"project_folder = './sample_projects/automl-classification-onnx'\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['SDK version'] = azureml.core.VERSION\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace Name'] = ws.name\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Project Directory'] = project_folder\n",
|
||||
"output['Experiment Name'] = experiment.name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Data\n",
|
||||
"\n",
|
||||
"This uses scikit-learn's [load_iris](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_iris.html) method."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"iris = datasets.load_iris()\n",
|
||||
"X_train, X_test, y_train, y_test = train_test_split(iris.data, \n",
|
||||
" iris.target, \n",
|
||||
" test_size=0.2, \n",
|
||||
" random_state=0)\n",
|
||||
"\n",
|
||||
"# Convert the X_train and X_test to pandas DataFrame and set column names,\n",
|
||||
"# This is needed for initializing the input variable names of ONNX model, \n",
|
||||
"# and the prediction with the ONNX model using the inference helper.\n",
|
||||
"X_train = pd.DataFrame(X_train, columns=['c1', 'c2', 'c3', 'c4'])\n",
|
||||
"X_test = pd.DataFrame(X_test, columns=['c1', 'c2', 'c3', 'c4'])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Train with enable ONNX compatible models config on\n",
|
||||
"\n",
|
||||
"Instantiate an `AutoMLConfig` object to specify the settings and data used to run the experiment.\n",
|
||||
"\n",
|
||||
"Set the parameter enable_onnx_compatible_models=True, if you also want to generate the ONNX compatible models. Please note, the forecasting task and TensorFlow models are not ONNX compatible yet.\n",
|
||||
"\n",
|
||||
"|Property|Description|\n",
|
||||
"|-|-|\n",
|
||||
"|**task**|classification or regression|\n",
|
||||
"|**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: <br><i>accuracy</i><br><i>AUC_weighted</i><br><i>average_precision_score_weighted</i><br><i>norm_macro_recall</i><br><i>precision_score_weighted</i>|\n",
|
||||
"|**iteration_timeout_minutes**|Time limit in minutes for each iteration.|\n",
|
||||
"|**iterations**|Number of iterations. In each iteration AutoML trains a specific pipeline with the data.|\n",
|
||||
"|**X**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
||||
"|**y**|(sparse) array-like, shape = [n_samples, ], Multi-class targets.|\n",
|
||||
"|**enable_onnx_compatible_models**|Enable the ONNX compatible models in the experiment.|\n",
|
||||
"|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder.|"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"automl_config = AutoMLConfig(task = 'classification',\n",
|
||||
" debug_log = 'automl_errors.log',\n",
|
||||
" primary_metric = 'AUC_weighted',\n",
|
||||
" iteration_timeout_minutes = 60,\n",
|
||||
" iterations = 10,\n",
|
||||
" verbosity = logging.INFO, \n",
|
||||
" X = X_train, \n",
|
||||
" y = y_train,\n",
|
||||
" preprocess=True,\n",
|
||||
" enable_onnx_compatible_models=True,\n",
|
||||
" path = project_folder)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Call the `submit` method on the experiment object and pass the run configuration. Execution of local runs is synchronous. Depending on the data and the number of iterations this can run for a while.\n",
|
||||
"In this example, we specify `show_output = True` to print currently running iterations to the console."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"local_run = experiment.submit(automl_config, show_output = True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"local_run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Results"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Widget for Monitoring Runs\n",
|
||||
"\n",
|
||||
"The widget will first report a \"loading\" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete.\n",
|
||||
"\n",
|
||||
"**Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.widgets import RunDetails\n",
|
||||
"RunDetails(local_run).show() "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Retrieve the Best ONNX Model\n",
|
||||
"\n",
|
||||
"Below we select the best pipeline from our iterations. The `get_output` method returns the best run and the fitted model. The Model includes the pipeline and any pre-processing. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*.\n",
|
||||
"\n",
|
||||
"Set the parameter return_onnx_model=True to retrieve the best ONNX model, instead of the Python model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"best_run, onnx_mdl = local_run.get_output(return_onnx_model=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Save the best ONNX model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.automl.core.onnx_convert import OnnxConverter\n",
|
||||
"onnx_fl_path = \"./best_model.onnx\"\n",
|
||||
"OnnxConverter.save_onnx_model(onnx_mdl, onnx_fl_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Predict with the ONNX model, using onnxruntime package"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import sys\n",
|
||||
"import json\n",
|
||||
"from azureml.automl.core.onnx_convert import OnnxConvertConstants\n",
|
||||
"\n",
|
||||
"if sys.version_info < OnnxConvertConstants.OnnxIncompatiblePythonVersion:\n",
|
||||
" python_version_compatible = True\n",
|
||||
"else:\n",
|
||||
" python_version_compatible = False\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" import onnxruntime\n",
|
||||
" from azureml.automl.core.onnx_convert import OnnxInferenceHelper \n",
|
||||
" onnxrt_present = True\n",
|
||||
"except ImportError:\n",
|
||||
" onnxrt_present = False\n",
|
||||
"\n",
|
||||
"def get_onnx_res(run):\n",
|
||||
" res_path = '_debug_y_trans_converter.json'\n",
|
||||
" run.download_file(name=constants.MODEL_RESOURCE_PATH_ONNX, output_file_path=res_path)\n",
|
||||
" with open(res_path) as f:\n",
|
||||
" onnx_res = json.load(f)\n",
|
||||
" return onnx_res\n",
|
||||
"\n",
|
||||
"if onnxrt_present and python_version_compatible: \n",
|
||||
" mdl_bytes = onnx_mdl.SerializeToString()\n",
|
||||
" onnx_res = get_onnx_res(best_run)\n",
|
||||
"\n",
|
||||
" onnxrt_helper = OnnxInferenceHelper(mdl_bytes, onnx_res)\n",
|
||||
" pred_onnx, pred_prob_onnx = onnxrt_helper.predict(X_test)\n",
|
||||
"\n",
|
||||
" print(pred_onnx)\n",
|
||||
" print(pred_prob_onnx)\n",
|
||||
"else:\n",
|
||||
" if not python_version_compatible:\n",
|
||||
" print('Please use Python version 3.6 to run the inference helper.') \n",
|
||||
" if not onnxrt_present:\n",
|
||||
" print('Please install the onnxruntime package to do the prediction with ONNX model.')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "savitam"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -9,6 +9,13 @@
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -60,6 +67,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#Note: This notebook will install tensorflow if not already installed in the enviornment..\n",
|
||||
"import logging\n",
|
||||
"\n",
|
||||
"from matplotlib import pyplot as plt\n",
|
||||
@@ -70,6 +78,17 @@
|
||||
"import azureml.core\n",
|
||||
"from azureml.core.experiment import Experiment\n",
|
||||
"from azureml.core.workspace import Workspace\n",
|
||||
"import sys\n",
|
||||
"whitelist_models=[\"LightGBM\"]\n",
|
||||
"if \"3.7\" != sys.version[0:3]:\n",
|
||||
" try:\n",
|
||||
" import tensorflow as tf1\n",
|
||||
" except ImportError:\n",
|
||||
" from pip._internal import main\n",
|
||||
" main(['install', 'tensorflow>=1.10.0,<=1.12.0'])\n",
|
||||
" logging.getLogger().setLevel(logging.ERROR)\n",
|
||||
" whitelist_models=[\"TensorFlowLinearClassifier\", \"TensorFlowDNN\"]\n",
|
||||
"\n",
|
||||
"from azureml.train.automl import AutoMLConfig"
|
||||
]
|
||||
},
|
||||
@@ -100,23 +119,6 @@
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Opt-in diagnostics for better experience, quality, and security of future releases."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.telemetry import set_diagnostics_collection\n",
|
||||
"set_diagnostics_collection(send_diagnostics = True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -155,7 +157,7 @@
|
||||
"|**iterations**|Number of iterations. In each iteration AutoML trains a specific pipeline with the data.|\n",
|
||||
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
||||
"|**X**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
||||
"|**y**|(sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]<br>Multi-class targets. An indicator matrix turns on multilabel classification. This should be an array of integers.|\n",
|
||||
"|**y**|(sparse) array-like, shape = [n_samples, ], Multi-class targets.|\n",
|
||||
"|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder.|\n",
|
||||
"|**whitelist_models**|List of models that AutoML should use. The possible values are listed [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#configure-your-experiment-settings).|"
|
||||
]
|
||||
@@ -171,12 +173,11 @@
|
||||
" primary_metric = 'AUC_weighted',\n",
|
||||
" iteration_timeout_minutes = 60,\n",
|
||||
" iterations = 10,\n",
|
||||
" n_cross_validations = 3,\n",
|
||||
" verbosity = logging.INFO,\n",
|
||||
" X = X_train, \n",
|
||||
" y = y_train,\n",
|
||||
" enable_tf=True,\n",
|
||||
" whitelist_models=[\"TensorFlowLinearClassifier\", \"TensorFlowDNN\"],\n",
|
||||
" whitelist_models=whitelist_models,\n",
|
||||
" path = project_folder)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -9,6 +9,13 @@
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -72,6 +79,32 @@
|
||||
"from azureml.train.automl import AutoMLConfig"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Accessing the Azure ML workspace requires authentication with Azure.\n",
|
||||
"\n",
|
||||
"The default authentication is interactive authentication using the default tenant. Executing the `ws = Workspace.from_config()` line in the cell below will prompt for authentication the first time that it is run.\n",
|
||||
"\n",
|
||||
"If you have multiple Azure tenants, you can specify the tenant by replacing the `ws = Workspace.from_config()` line in the cell below with the following:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"from azureml.core.authentication import InteractiveLoginAuthentication\n",
|
||||
"auth = InteractiveLoginAuthentication(tenant_id = 'mytenantid')\n",
|
||||
"ws = Workspace.from_config(auth = auth)\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"If you need to run in an environment where interactive login is not possible, you can use Service Principal authentication by replacing the `ws = Workspace.from_config()` line in the cell below with the following:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"from azureml.core.authentication import ServicePrincipalAuthentication\n",
|
||||
"auth = auth = ServicePrincipalAuthentication('mytenantid', 'myappid', 'mypassword')\n",
|
||||
"ws = Workspace.from_config(auth = auth)\n",
|
||||
"```\n",
|
||||
"For more details, see [aka.ms/aml-notebook-auth](http://aka.ms/aml-notebook-auth)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -81,8 +114,8 @@
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# Choose a name for the experiment and specify the project folder.\n",
|
||||
"experiment_name = 'automl-local-classification'\n",
|
||||
"project_folder = './sample_projects/automl-local-classification'\n",
|
||||
"experiment_name = 'automl-classification'\n",
|
||||
"project_folder = './sample_projects/automl-classification'\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
@@ -99,23 +132,6 @@
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Opt-in diagnostics for better experience, quality, and security of future releases."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.telemetry import set_diagnostics_collection\n",
|
||||
"set_diagnostics_collection(send_diagnostics = True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -150,12 +166,17 @@
|
||||
"|-|-|\n",
|
||||
"|**task**|classification or regression|\n",
|
||||
"|**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: <br><i>accuracy</i><br><i>AUC_weighted</i><br><i>average_precision_score_weighted</i><br><i>norm_macro_recall</i><br><i>precision_score_weighted</i>|\n",
|
||||
"|**iteration_timeout_minutes**|Time limit in minutes for each iteration.|\n",
|
||||
"|**iterations**|Number of iterations. In each iteration AutoML trains a specific pipeline with the data.|\n",
|
||||
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
||||
"|**X**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
||||
"|**y**|(sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]<br>Multi-class targets. An indicator matrix turns on multilabel classification. This should be an array of integers.|\n",
|
||||
"|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder.|"
|
||||
"|**y**|(sparse) array-like, shape = [n_samples, ], Multi-class targets.|\n",
|
||||
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
||||
"|\n",
|
||||
"\n",
|
||||
"Automated machine learning trains multiple machine learning pipelines. Each pipelines training is known as an iteration.\n",
|
||||
"* You can specify a maximum number of iterations using the `iterations` parameter.\n",
|
||||
"* You can specify a maximum time for the run using the `experiment_timeout_minutes` parameter.\n",
|
||||
"* If you specify neither the `iterations` nor the `experiment_timeout_minutes`, automated ML keeps running iterations while it continues to see improvements in the scores.\n",
|
||||
"\n",
|
||||
"The following example doesn't specify `iterations` or `experiment_timeout_minutes` and so runs until the scores stop improving.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -165,15 +186,10 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"automl_config = AutoMLConfig(task = 'classification',\n",
|
||||
" debug_log = 'automl_errors.log',\n",
|
||||
" primary_metric = 'AUC_weighted',\n",
|
||||
" iteration_timeout_minutes = 60,\n",
|
||||
" iterations = 25,\n",
|
||||
" n_cross_validations = 3,\n",
|
||||
" verbosity = logging.INFO,\n",
|
||||
" X = X_train, \n",
|
||||
" y = y_train,\n",
|
||||
" path = project_folder)"
|
||||
" n_cross_validations = 3)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -291,8 +307,45 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"best_run, fitted_model = local_run.get_output()\n",
|
||||
"print(best_run)\n",
|
||||
"print(fitted_model)"
|
||||
"print(best_run)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Print the properties of the model\n",
|
||||
"The fitted_model is a python object and you can read the different properties of the object.\n",
|
||||
"The following shows printing hyperparameters for each step in the pipeline."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from pprint import pprint\n",
|
||||
"\n",
|
||||
"def print_model(model, prefix=\"\"):\n",
|
||||
" for step in model.steps:\n",
|
||||
" print(prefix + step[0])\n",
|
||||
" if hasattr(step[1], 'estimators') and hasattr(step[1], 'weights'):\n",
|
||||
" pprint({'estimators': list(e[0] for e in step[1].estimators), 'weights': step[1].weights})\n",
|
||||
" print()\n",
|
||||
" for estimator in step[1].estimators:\n",
|
||||
" print_model(estimator[1], estimator[0]+ ' - ')\n",
|
||||
" elif hasattr(step[1], '_base_learners') and hasattr(step[1], '_meta_learner'):\n",
|
||||
" print(\"\\nMeta Learner\")\n",
|
||||
" pprint(step[1]._meta_learner)\n",
|
||||
" print()\n",
|
||||
" for estimator in step[1]._base_learners:\n",
|
||||
" print_model(estimator[1], estimator[0]+ ' - ')\n",
|
||||
" else:\n",
|
||||
" pprint(step[1].get_params())\n",
|
||||
" print()\n",
|
||||
" \n",
|
||||
"print_model(fitted_model)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -311,8 +364,16 @@
|
||||
"source": [
|
||||
"lookup_metric = \"log_loss\"\n",
|
||||
"best_run, fitted_model = local_run.get_output(metric = lookup_metric)\n",
|
||||
"print(best_run)\n",
|
||||
"print(fitted_model)"
|
||||
"print(best_run)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print_model(fitted_model)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -331,8 +392,16 @@
|
||||
"source": [
|
||||
"iteration = 3\n",
|
||||
"third_run, third_model = local_run.get_output(iteration = iteration)\n",
|
||||
"print(third_run)\n",
|
||||
"print(third_model)"
|
||||
"print(third_run)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print_model(third_model)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -9,6 +9,13 @@
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -49,23 +56,6 @@
|
||||
"Currently, Data Prep only supports __Ubuntu 16__ and __Red Hat Enterprise Linux 7__. We are working on supporting more linux distros."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Opt-in diagnostics for better experience, quality, and security of future releases."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.telemetry import set_diagnostics_collection\n",
|
||||
"set_diagnostics_collection(send_diagnostics = True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -134,21 +124,12 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# You can use `auto_read_file` which intelligently figures out delimiters and datatypes of a file.\n",
|
||||
"# The data referenced here was pulled from `sklearn.datasets.load_digits()`.\n",
|
||||
"simple_example_data_root = 'https://dprepdata.blob.core.windows.net/automl-notebook-data/'\n",
|
||||
"X = dprep.auto_read_file(simple_example_data_root + 'X.csv').skip(1) # Remove the header row.\n",
|
||||
"\n",
|
||||
"# The data referenced here was a 1MB simple random sample of the Chicago Crime data into a local temporary directory.\n",
|
||||
"# You can also use `read_csv` and `to_*` transformations to read (with overridable delimiter)\n",
|
||||
"# and convert column types manually.\n",
|
||||
"# Here we read a comma delimited file and convert all columns to integers.\n",
|
||||
"y = dprep.read_csv(simple_example_data_root + 'y.csv').to_long(dprep.ColumnSelector(term='.*', use_regex = True))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can peek the result of a Dataflow at any range using `skip(i)` and `head(j)`. Doing so evaluates only `j` records for all the steps in the Dataflow, which makes it fast even against large datasets."
|
||||
"example_data = 'https://dprepdata.blob.core.windows.net/demo/crime0-random.csv'\n",
|
||||
"dflow = dprep.auto_read_file(example_data).skip(1) # Remove the header row.\n",
|
||||
"dflow.get_profile()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -157,7 +138,30 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"X.skip(1).head(5)"
|
||||
"# As `Primary Type` is our y data, we need to drop the values those are null in this column.\n",
|
||||
"dflow = dflow.drop_nulls('Primary Type')\n",
|
||||
"dflow.head(5)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Review the Data Preparation Result\n",
|
||||
"\n",
|
||||
"You can peek the result of a Dataflow at any range using `skip(i)` and `head(j)`. Doing so evaluates only `j` records for all the steps in the Dataflow, which makes it fast even against large datasets.\n",
|
||||
"\n",
|
||||
"`Dataflow` objects are immutable and are composed of a list of data preparation steps. A `Dataflow` object can be branched at any point for further usage."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"X = dflow.drop_columns(columns=['Primary Type', 'FBI Code'])\n",
|
||||
"y = dflow.keep_columns(columns=['Primary Type'], validate_column_exists=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -179,9 +183,8 @@
|
||||
" \"iteration_timeout_minutes\" : 10,\n",
|
||||
" \"iterations\" : 2,\n",
|
||||
" \"primary_metric\" : 'AUC_weighted',\n",
|
||||
" \"preprocess\" : False,\n",
|
||||
" \"verbosity\" : logging.INFO,\n",
|
||||
" \"n_cross_validations\": 3\n",
|
||||
" \"preprocess\" : True,\n",
|
||||
" \"verbosity\" : logging.INFO\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
@@ -189,7 +192,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create or Attach a Remote Linux DSVM"
|
||||
"### Create or Attach an AmlCompute cluster"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -198,21 +201,36 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"dsvm_name = 'mydsvmc'\n",
|
||||
"from azureml.core.compute import AmlCompute\n",
|
||||
"from azureml.core.compute import ComputeTarget\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" while ws.compute_targets[dsvm_name].provisioning_state == 'Creating':\n",
|
||||
" time.sleep(1)\n",
|
||||
" \n",
|
||||
" dsvm_compute = DsvmCompute(ws, dsvm_name)\n",
|
||||
" print('Found existing DVSM.')\n",
|
||||
"except:\n",
|
||||
" print('Creating a new DSVM.')\n",
|
||||
" dsvm_config = DsvmCompute.provisioning_configuration(vm_size = \"Standard_D2_v2\")\n",
|
||||
" dsvm_compute = DsvmCompute.create(ws, name = dsvm_name, provisioning_configuration = dsvm_config)\n",
|
||||
" dsvm_compute.wait_for_completion(show_output = True)\n",
|
||||
" print(\"Waiting one minute for ssh to be accessible\")\n",
|
||||
" time.sleep(60) # Wait for ssh to be accessible"
|
||||
"# Choose a name for your cluster.\n",
|
||||
"amlcompute_cluster_name = \"cpucluster\"\n",
|
||||
"\n",
|
||||
"found = False\n",
|
||||
"\n",
|
||||
"# Check if this compute target already exists in the workspace.\n",
|
||||
"\n",
|
||||
"cts = ws.compute_targets\n",
|
||||
"if amlcompute_cluster_name in cts and cts[amlcompute_cluster_name].type == 'AmlCompute':\n",
|
||||
" found = True\n",
|
||||
" print('Found existing compute target.')\n",
|
||||
" compute_target = cts[amlcompute_cluster_name]\n",
|
||||
"\n",
|
||||
"if not found:\n",
|
||||
" print('Creating a new compute target...')\n",
|
||||
" provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_D2_V2\", # for GPU, use \"STANDARD_NC6\"\n",
|
||||
" #vm_priority = 'lowpriority', # optional\n",
|
||||
" max_nodes = 6)\n",
|
||||
"\n",
|
||||
" # Create the cluster.\\n\",\n",
|
||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, provisioning_config)\n",
|
||||
"\n",
|
||||
" # Can poll for a minimum number of nodes and for a specific timeout.\n",
|
||||
" # If no min_node_count is provided, it will use the scale settings for the cluster.\n",
|
||||
" compute_target.wait_for_completion(show_output = True, min_node_count = None, timeout_in_minutes = 20)\n",
|
||||
"\n",
|
||||
" # For a more detailed view of current AmlCompute status, use get_status()."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -224,11 +242,15 @@
|
||||
"from azureml.core.runconfig import RunConfiguration\n",
|
||||
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||
"\n",
|
||||
"# create a new RunConfig object\n",
|
||||
"conda_run_config = RunConfiguration(framework=\"python\")\n",
|
||||
"\n",
|
||||
"conda_run_config.target = dsvm_compute\n",
|
||||
"# Set compute target to AmlCompute\n",
|
||||
"conda_run_config.target = compute_target\n",
|
||||
"conda_run_config.environment.docker.enabled = True\n",
|
||||
"conda_run_config.environment.docker.base_image = azureml.core.runconfig.DEFAULT_CPU_IMAGE\n",
|
||||
"\n",
|
||||
"cd = CondaDependencies.create(pip_packages=['azureml-sdk[automl]'], conda_packages=['numpy'])\n",
|
||||
"cd = CondaDependencies.create(pip_packages=['azureml-sdk[automl]'], conda_packages=['numpy','py-xgboost<=0.80'])\n",
|
||||
"conda_run_config.environment.python.conda_dependencies = cd"
|
||||
]
|
||||
},
|
||||
@@ -274,6 +296,44 @@
|
||||
"remote_run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Pre-process cache cleanup\n",
|
||||
"The preprocess data gets cache at user default file store. When the run is completed the cache can be cleaned by running below cell"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run.clean_preprocessor_cache()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Cancelling Runs\n",
|
||||
"You can cancel ongoing remote runs using the `cancel` and `cancel_iteration` functions."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Cancel the ongoing experiment and stop scheduling new iterations.\n",
|
||||
"# remote_run.cancel()\n",
|
||||
"\n",
|
||||
"# Cancel iteration 1 and move onto iteration 2.\n",
|
||||
"# remote_run.cancel_iteration(1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -393,7 +453,8 @@
|
||||
"source": [
|
||||
"## Test\n",
|
||||
"\n",
|
||||
"#### Load Test Data"
|
||||
"#### Load Test Data\n",
|
||||
"For the test data, it should have the same preparation step as the train data. Otherwise it might get failed at the preprocessing step."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -402,12 +463,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sklearn import datasets\n",
|
||||
"\n",
|
||||
"digits = datasets.load_digits()\n",
|
||||
"X_test = digits.data[:10, :]\n",
|
||||
"y_test = digits.target[:10]\n",
|
||||
"images = digits.images[:10]"
|
||||
"dflow_test = dprep.auto_read_file(path='https://dprepdata.blob.core.windows.net/demo/crime0-test.csv').skip(1)\n",
|
||||
"dflow_test = dflow_test.drop_nulls('Primary Type')"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -415,7 +472,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Testing Our Best Fitted Model\n",
|
||||
"We will try to predict 2 digits and see how our model works."
|
||||
"We will use confusion matrix to see how our model works."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -424,65 +481,19 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#Randomly select digits and test\n",
|
||||
"from matplotlib import pyplot as plt\n",
|
||||
"import numpy as np\n",
|
||||
"from pandas_ml import ConfusionMatrix\n",
|
||||
"\n",
|
||||
"for index in np.random.choice(len(y_test), 2, replace = False):\n",
|
||||
" print(index)\n",
|
||||
" predicted = fitted_model.predict(X_test[index:index + 1])[0]\n",
|
||||
" label = y_test[index]\n",
|
||||
" title = \"Label value = %d Predicted value = %d \" % (label, predicted)\n",
|
||||
" fig = plt.figure(1, figsize=(3,3))\n",
|
||||
" ax1 = fig.add_axes((0,0,.8,.8))\n",
|
||||
" ax1.set_title(title)\n",
|
||||
" plt.imshow(images[index], cmap = plt.cm.gray_r, interpolation = 'nearest')\n",
|
||||
" plt.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Appendix"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Capture the `Dataflow` Objects for Later Use in AutoML\n",
|
||||
"y_test = dflow_test.keep_columns(columns=['Primary Type']).to_pandas_dataframe()\n",
|
||||
"X_test = dflow_test.drop_columns(columns=['Primary Type', 'FBI Code']).to_pandas_dataframe()\n",
|
||||
"\n",
|
||||
"`Dataflow` objects are immutable and are composed of a list of data preparation steps. A `Dataflow` object can be branched at any point for further usage."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# sklearn.digits.data + target\n",
|
||||
"digits_complete = dprep.auto_read_file('https://dprepdata.blob.core.windows.net/automl-notebook-data/digits-complete.csv')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"`digits_complete` (sourced from `sklearn.datasets.load_digits()`) is forked into `dflow_X` to capture all the feature columns and `dflow_y` to capture the label column."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(digits_complete.to_pandas_dataframe().shape)\n",
|
||||
"labels_column = 'Column64'\n",
|
||||
"dflow_X = digits_complete.drop_columns(columns = [labels_column])\n",
|
||||
"dflow_y = digits_complete.keep_columns(columns = [labels_column])"
|
||||
"\n",
|
||||
"ypred = fitted_model.predict(X_test)\n",
|
||||
"\n",
|
||||
"cm = ConfusionMatrix(y_test['Primary Type'], ypred)\n",
|
||||
"\n",
|
||||
"print(cm)\n",
|
||||
"\n",
|
||||
"cm.plot()"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -9,6 +9,13 @@
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -49,23 +56,6 @@
|
||||
"Currently, Data Prep only supports __Ubuntu 16__ and __Red Hat Enterprise Linux 7__. We are working on supporting more linux distros."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Opt-in diagnostics for better experience, quality, and security of future releases."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.telemetry import set_diagnostics_collection\n",
|
||||
"set_diagnostics_collection(send_diagnostics = True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -132,23 +122,12 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# You can use `auto_read_file` which intelligently figures out delimiters and datatypes of a file.\n",
|
||||
"# The data referenced here was pulled from `sklearn.datasets.load_digits()`.\n",
|
||||
"simple_example_data_root = 'https://dprepdata.blob.core.windows.net/automl-notebook-data/'\n",
|
||||
"X = dprep.auto_read_file(simple_example_data_root + 'X.csv').skip(1) # Remove the header row.\n",
|
||||
"\n",
|
||||
"# The data referenced here was a 1MB simple random sample of the Chicago Crime data into a local temporary directory.\n",
|
||||
"# You can also use `read_csv` and `to_*` transformations to read (with overridable delimiter)\n",
|
||||
"# and convert column types manually.\n",
|
||||
"# Here we read a comma delimited file and convert all columns to integers.\n",
|
||||
"y = dprep.read_csv(simple_example_data_root + 'y.csv').to_long(dprep.ColumnSelector(term='.*', use_regex = True))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Review the Data Preparation Result\n",
|
||||
"\n",
|
||||
"You can peek the result of a Dataflow at any range using `skip(i)` and `head(j)`. Doing so evaluates only `j` records for all the steps in the Dataflow, which makes it fast even against large datasets."
|
||||
"example_data = 'https://dprepdata.blob.core.windows.net/demo/crime0-random.csv'\n",
|
||||
"dflow = dprep.auto_read_file(example_data).skip(1) # Remove the header row.\n",
|
||||
"dflow.get_profile()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -157,7 +136,30 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"X.skip(1).head(5)"
|
||||
"# As `Primary Type` is our y data, we need to drop the values those are null in this column.\n",
|
||||
"dflow = dflow.drop_nulls('Primary Type')\n",
|
||||
"dflow.head(5)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Review the Data Preparation Result\n",
|
||||
"\n",
|
||||
"You can peek the result of a Dataflow at any range using `skip(i)` and `head(j)`. Doing so evaluates only `j` records for all the steps in the Dataflow, which makes it fast even against large datasets.\n",
|
||||
"\n",
|
||||
"`Dataflow` objects are immutable and are composed of a list of data preparation steps. A `Dataflow` object can be branched at any point for further usage."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"X = dflow.drop_columns(columns=['Primary Type', 'FBI Code'])\n",
|
||||
"y = dflow.keep_columns(columns=['Primary Type'], validate_column_exists=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -179,9 +181,8 @@
|
||||
" \"iteration_timeout_minutes\" : 10,\n",
|
||||
" \"iterations\" : 2,\n",
|
||||
" \"primary_metric\" : 'AUC_weighted',\n",
|
||||
" \"preprocess\" : False,\n",
|
||||
" \"verbosity\" : logging.INFO,\n",
|
||||
" \"n_cross_validations\": 3\n",
|
||||
" \"preprocess\" : True,\n",
|
||||
" \"verbosity\" : logging.INFO\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
@@ -344,7 +345,8 @@
|
||||
"source": [
|
||||
"## Test\n",
|
||||
"\n",
|
||||
"#### Load Test Data"
|
||||
"#### Load Test Data\n",
|
||||
"For the test data, it should have the same preparation step as the train data. Otherwise it might get failed at the preprocessing step."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -353,12 +355,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sklearn import datasets\n",
|
||||
"\n",
|
||||
"digits = datasets.load_digits()\n",
|
||||
"X_test = digits.data[:10, :]\n",
|
||||
"y_test = digits.target[:10]\n",
|
||||
"images = digits.images[:10]"
|
||||
"dflow_test = dprep.auto_read_file(path='https://dprepdata.blob.core.windows.net/demo/crime0-test.csv').skip(1)\n",
|
||||
"dflow_test = dflow_test.drop_nulls('Primary Type')"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -366,7 +364,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Testing Our Best Fitted Model\n",
|
||||
"We will try to predict 2 digits and see how our model works."
|
||||
"We will use confusion matrix to see how our model works."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -375,65 +373,18 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#Randomly select digits and test\n",
|
||||
"from matplotlib import pyplot as plt\n",
|
||||
"import numpy as np\n",
|
||||
"from pandas_ml import ConfusionMatrix\n",
|
||||
"\n",
|
||||
"for index in np.random.choice(len(y_test), 2, replace = False):\n",
|
||||
" print(index)\n",
|
||||
" predicted = fitted_model.predict(X_test[index:index + 1])[0]\n",
|
||||
" label = y_test[index]\n",
|
||||
" title = \"Label value = %d Predicted value = %d \" % (label, predicted)\n",
|
||||
" fig = plt.figure(1, figsize=(3,3))\n",
|
||||
" ax1 = fig.add_axes((0,0,.8,.8))\n",
|
||||
" ax1.set_title(title)\n",
|
||||
" plt.imshow(images[index], cmap = plt.cm.gray_r, interpolation = 'nearest')\n",
|
||||
" plt.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Appendix"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Capture the `Dataflow` Objects for Later Use in AutoML\n",
|
||||
"y_test = dflow_test.keep_columns(columns=['Primary Type']).to_pandas_dataframe()\n",
|
||||
"X_test = dflow_test.drop_columns(columns=['Primary Type', 'FBI Code']).to_pandas_dataframe()\n",
|
||||
"\n",
|
||||
"`Dataflow` objects are immutable and are composed of a list of data preparation steps. A `Dataflow` object can be branched at any point for further usage."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# sklearn.digits.data + target\n",
|
||||
"digits_complete = dprep.auto_read_file('https://dprepdata.blob.core.windows.net/automl-notebook-data/digits-complete.csv')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"`digits_complete` (sourced from `sklearn.datasets.load_digits()`) is forked into `dflow_X` to capture all the feature columns and `dflow_y` to capture the label column."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(digits_complete.to_pandas_dataframe().shape)\n",
|
||||
"labels_column = 'Column64'\n",
|
||||
"dflow_X = digits_complete.drop_columns(columns = [labels_column])\n",
|
||||
"dflow_y = digits_complete.keep_columns(columns = [labels_column])"
|
||||
"ypred = fitted_model.predict(X_test)\n",
|
||||
"\n",
|
||||
"cm = ConfusionMatrix(y_test['Primary Type'], ypred)\n",
|
||||
"\n",
|
||||
"print(cm)\n",
|
||||
"\n",
|
||||
"cm.plot()"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -9,6 +9,13 @@
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -70,23 +77,6 @@
|
||||
"ws = Workspace.from_config()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Opt-in diagnostics for better experience, quality, and security of future releases."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.telemetry import set_diagnostics_collection\n",
|
||||
"set_diagnostics_collection(send_diagnostics = True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
|
||||
@@ -0,0 +1,500 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Automated Machine Learning\n",
|
||||
"**BikeShare Demand Forecasting**\n",
|
||||
"\n",
|
||||
"## Contents\n",
|
||||
"1. [Introduction](#Introduction)\n",
|
||||
"1. [Setup](#Setup)\n",
|
||||
"1. [Data](#Data)\n",
|
||||
"1. [Train](#Train)\n",
|
||||
"1. [Evaluate](#Evaluate)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Introduction\n",
|
||||
"In this example, we show how AutoML can be used for bike share forecasting.\n",
|
||||
"\n",
|
||||
"The purpose is to demonstrate how to take advantage of the built-in holiday featurization, access the feature names, and further demonstrate how to work with the `forecast` function. Please also look at the additional forecasting notebooks, which document lagging, rolling windows, forecast quantiles, other ways to use the forecast function, and forecaster deployment.\n",
|
||||
"\n",
|
||||
"Make sure you have executed the [configuration](../../../configuration.ipynb) before running this notebook.\n",
|
||||
"\n",
|
||||
"In this notebook you would see\n",
|
||||
"1. Creating an Experiment in an existing Workspace\n",
|
||||
"2. Instantiating AutoMLConfig with new task type \"forecasting\" for timeseries data training, and other timeseries related settings: for this dataset we use the basic one: \"time_column_name\" \n",
|
||||
"3. Training the Model using local compute\n",
|
||||
"4. Exploring the results\n",
|
||||
"5. Viewing the engineered names for featurized data and featurization summary for all raw features\n",
|
||||
"6. Testing the fitted model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"import pandas as pd\n",
|
||||
"import numpy as np\n",
|
||||
"import logging\n",
|
||||
"import warnings\n",
|
||||
"# Squash warning messages for cleaner output in the notebook\n",
|
||||
"warnings.showwarning = lambda *args, **kwargs: None\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"from azureml.core.workspace import Workspace\n",
|
||||
"from azureml.core.experiment import Experiment\n",
|
||||
"from azureml.train.automl import AutoMLConfig\n",
|
||||
"from matplotlib import pyplot as plt\n",
|
||||
"from sklearn.metrics import mean_absolute_error, mean_squared_error"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As part of the setup you have already created a <b>Workspace</b>. For AutoML you would need to create an <b>Experiment</b>. An <b>Experiment</b> is a named object in a <b>Workspace</b>, which is used to run experiments."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# choose a name for the run history container in the workspace\n",
|
||||
"experiment_name = 'automl-bikeshareforecasting'\n",
|
||||
"# project folder\n",
|
||||
"project_folder = './sample_projects/automl-local-bikeshareforecasting'\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['SDK version'] = azureml.core.VERSION\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace'] = ws.name\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Project Directory'] = project_folder\n",
|
||||
"output['Run History Name'] = experiment_name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Data\n",
|
||||
"Read bike share demand data from file, and preview data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data = pd.read_csv('bike-no.csv', parse_dates=['date'])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's set up what we know abou the dataset. \n",
|
||||
"\n",
|
||||
"**Target column** is what we want to forecast.\n",
|
||||
"\n",
|
||||
"**Time column** is the time axis along which to predict.\n",
|
||||
"\n",
|
||||
"**Grain** is another word for an individual time series in your dataset. Grains are identified by values of the columns listed `grain_column_names`, for example \"store\" and \"item\" if your data has multiple time series of sales, one series for each combination of store and item sold.\n",
|
||||
"\n",
|
||||
"This dataset has only one time series. Please see the [orange juice notebook](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales) for an example of a multi-time series dataset."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"target_column_name = 'cnt'\n",
|
||||
"time_column_name = 'date'\n",
|
||||
"grain_column_names = []"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Split the data\n",
|
||||
"\n",
|
||||
"The first split we make is into train and test sets. Note we are splitting on time."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"train = data[data[time_column_name] < '2012-09-01']\n",
|
||||
"test = data[data[time_column_name] >= '2012-09-01']\n",
|
||||
"\n",
|
||||
"X_train = train.copy()\n",
|
||||
"y_train = X_train.pop(target_column_name).values\n",
|
||||
"\n",
|
||||
"X_test = test.copy()\n",
|
||||
"y_test = X_test.pop(target_column_name).values\n",
|
||||
"\n",
|
||||
"print(X_train.shape)\n",
|
||||
"print(y_train.shape)\n",
|
||||
"print(X_test.shape)\n",
|
||||
"print(y_test.shape)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Setting forecaster maximum horizon \n",
|
||||
"\n",
|
||||
"Assuming your test data forms a full and regular time series(regular time intervals and no holes), \n",
|
||||
"the maximum horizon you will need to forecast is the length of the longest grain in your test set."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"if len(grain_column_names) == 0:\n",
|
||||
" max_horizon = len(X_test)\n",
|
||||
"else:\n",
|
||||
" max_horizon = X_test.groupby(grain_column_names)[time_column_name].count().max()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Train\n",
|
||||
"\n",
|
||||
"Instantiate a AutoMLConfig object. This defines the settings and data used to run the experiment.\n",
|
||||
"\n",
|
||||
"|Property|Description|\n",
|
||||
"|-|-|\n",
|
||||
"|**task**|forecasting|\n",
|
||||
"|**primary_metric**|This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i>\n",
|
||||
"|**iterations**|Number of iterations. In each iteration, Auto ML trains a specific pipeline on the given data|\n",
|
||||
"|**iteration_timeout_minutes**|Time limit in minutes for each iteration.|\n",
|
||||
"|**X**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
||||
"|**y**|(sparse) array-like, shape = [n_samples, ], targets values.|\n",
|
||||
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
||||
"|**country_or_region**|The country/region used to generate holiday features. These should be ISO 3166 two-letter country/region codes (i.e. 'US', 'GB').|\n",
|
||||
"|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"time_column_name = 'date'\n",
|
||||
"automl_settings = {\n",
|
||||
" \"time_column_name\": time_column_name,\n",
|
||||
" # these columns are a breakdown of the total and therefore a leak\n",
|
||||
" \"drop_column_names\": ['casual', 'registered'],\n",
|
||||
" # knowing the country/region allows Automated ML to bring in holidays\n",
|
||||
" \"country_or_region\" : 'US',\n",
|
||||
" \"max_horizon\" : max_horizon,\n",
|
||||
" \"target_lags\": 1 \n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task = 'forecasting', \n",
|
||||
" primary_metric='normalized_root_mean_squared_error',\n",
|
||||
" iterations = 10,\n",
|
||||
" iteration_timeout_minutes = 5,\n",
|
||||
" X = X_train,\n",
|
||||
" y = y_train,\n",
|
||||
" n_cross_validations = 3, \n",
|
||||
" path=project_folder,\n",
|
||||
" verbosity = logging.INFO,\n",
|
||||
" **automl_settings)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We will now run the experiment, starting with 10 iterations of model search. Experiment can be continued for more iterations if the results are not yet good. You will see the currently running iterations printing to the console."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"local_run = experiment.submit(automl_config, show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Displaying the run objects gives you links to the visual tools in the Azure Portal. Go try them!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"local_run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Retrieve the Best Model\n",
|
||||
"Below we select the best pipeline from our iterations. The get_output method on automl_classifier returns the best run and the fitted model for the last fit invocation. There are overloads on get_output that allow you to retrieve the best run and fitted model for any logged metric or a particular iteration."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"best_run, fitted_model = local_run.get_output()\n",
|
||||
"fitted_model.steps"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### View the engineered names for featurized data\n",
|
||||
"\n",
|
||||
"You can accees the engineered feature names generated in time-series featurization. Note that a number of named holiday periods are represented. We recommend that you have at least one year of data when using this feature to ensure that all yearly holidays are captured in the training featurization."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"fitted_model.named_steps['timeseriestransformer'].get_engineered_feature_names()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### View the featurization summary\n",
|
||||
"\n",
|
||||
"You can also see what featurization steps were performed on different raw features in the user data. For each raw feature in the user data, the following information is displayed:\n",
|
||||
"\n",
|
||||
"- Raw feature name\n",
|
||||
"- Number of engineered features formed out of this raw feature\n",
|
||||
"- Type detected\n",
|
||||
"- If feature was dropped\n",
|
||||
"- List of feature transformations for the raw feature"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"fitted_model.named_steps['timeseriestransformer'].get_featurization_summary()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Test the Best Fitted Model\n",
|
||||
"\n",
|
||||
"Predict on training and test set, and calculate residual values.\n",
|
||||
"\n",
|
||||
"We always score on the original dataset whose schema matches the scheme of the training dataset."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"X_test.head()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"y_query = y_test.copy().astype(np.float)\n",
|
||||
"y_query.fill(np.NaN)\n",
|
||||
"y_fcst, X_trans = fitted_model.forecast(X_test, y_query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"It is a good practice to always align the output explicitly to the input, as the count and order of the rows may have changed during transformations that span multiple rows."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def align_outputs(y_predicted, X_trans, X_test, y_test, predicted_column_name = 'predicted'):\n",
|
||||
" \"\"\"\n",
|
||||
" Demonstrates how to get the output aligned to the inputs\n",
|
||||
" using pandas indexes. Helps understand what happened if\n",
|
||||
" the output's shape differs from the input shape, or if\n",
|
||||
" the data got re-sorted by time and grain during forecasting.\n",
|
||||
" \n",
|
||||
" Typical causes of misalignment are:\n",
|
||||
" * we predicted some periods that were missing in actuals -> drop from eval\n",
|
||||
" * model was asked to predict past max_horizon -> increase max horizon\n",
|
||||
" * data at start of X_test was needed for lags -> provide previous periods\n",
|
||||
" \"\"\"\n",
|
||||
" df_fcst = pd.DataFrame({predicted_column_name : y_predicted})\n",
|
||||
" # y and X outputs are aligned by forecast() function contract\n",
|
||||
" df_fcst.index = X_trans.index\n",
|
||||
" \n",
|
||||
" # align original X_test to y_test \n",
|
||||
" X_test_full = X_test.copy()\n",
|
||||
" X_test_full[target_column_name] = y_test\n",
|
||||
"\n",
|
||||
" # X_test_full's index does not include origin, so reset for merge\n",
|
||||
" df_fcst.reset_index(inplace=True)\n",
|
||||
" X_test_full = X_test_full.reset_index().drop(columns='index')\n",
|
||||
" together = df_fcst.merge(X_test_full, how='right')\n",
|
||||
" \n",
|
||||
" # drop rows where prediction or actuals are nan \n",
|
||||
" # happens because of missing actuals \n",
|
||||
" # or at edges of time due to lags/rolling windows\n",
|
||||
" clean = together[together[[target_column_name, predicted_column_name]].notnull().all(axis=1)]\n",
|
||||
" return(clean)\n",
|
||||
"\n",
|
||||
"df_all = align_outputs(y_fcst, X_trans, X_test, y_test)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def MAPE(actual, pred):\n",
|
||||
" \"\"\"\n",
|
||||
" Calculate mean absolute percentage error.\n",
|
||||
" Remove NA and values where actual is close to zero\n",
|
||||
" \"\"\"\n",
|
||||
" not_na = ~(np.isnan(actual) | np.isnan(pred))\n",
|
||||
" not_zero = ~np.isclose(actual, 0.0)\n",
|
||||
" actual_safe = actual[not_na & not_zero]\n",
|
||||
" pred_safe = pred[not_na & not_zero]\n",
|
||||
" APE = 100*np.abs((actual_safe - pred_safe)/actual_safe)\n",
|
||||
" return np.mean(APE)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"Simple forecasting model\")\n",
|
||||
"rmse = np.sqrt(mean_squared_error(df_all[target_column_name], df_all['predicted']))\n",
|
||||
"print(\"[Test Data] \\nRoot Mean squared error: %.2f\" % rmse)\n",
|
||||
"mae = mean_absolute_error(df_all[target_column_name], df_all['predicted'])\n",
|
||||
"print('mean_absolute_error score: %.2f' % mae)\n",
|
||||
"print('MAPE: %.2f' % MAPE(df_all[target_column_name], df_all['predicted']))\n",
|
||||
"\n",
|
||||
"# Plot outputs\n",
|
||||
"%matplotlib notebook\n",
|
||||
"test_pred = plt.scatter(df_all[target_column_name], df_all['predicted'], color='b')\n",
|
||||
"test_test = plt.scatter(y_test, y_test, color='g')\n",
|
||||
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n",
|
||||
"plt.show()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "xiaga@microsoft.com, tosingli@microsoft.com"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -0,0 +1,732 @@
|
||||
instant,date,season,yr,mnth,weekday,weathersit,temp,atemp,hum,windspeed,casual,registered,cnt
|
||||
1,1/1/2011,1,0,1,6,2,0.344167,0.363625,0.805833,0.160446,331,654,985
|
||||
2,1/2/2011,1,0,1,0,2,0.363478,0.353739,0.696087,0.248539,131,670,801
|
||||
3,1/3/2011,1,0,1,1,1,0.196364,0.189405,0.437273,0.248309,120,1229,1349
|
||||
4,1/4/2011,1,0,1,2,1,0.2,0.212122,0.590435,0.160296,108,1454,1562
|
||||
5,1/5/2011,1,0,1,3,1,0.226957,0.22927,0.436957,0.1869,82,1518,1600
|
||||
6,1/6/2011,1,0,1,4,1,0.204348,0.233209,0.518261,0.0895652,88,1518,1606
|
||||
7,1/7/2011,1,0,1,5,2,0.196522,0.208839,0.498696,0.168726,148,1362,1510
|
||||
8,1/8/2011,1,0,1,6,2,0.165,0.162254,0.535833,0.266804,68,891,959
|
||||
9,1/9/2011,1,0,1,0,1,0.138333,0.116175,0.434167,0.36195,54,768,822
|
||||
10,1/10/2011,1,0,1,1,1,0.150833,0.150888,0.482917,0.223267,41,1280,1321
|
||||
11,1/11/2011,1,0,1,2,2,0.169091,0.191464,0.686364,0.122132,43,1220,1263
|
||||
12,1/12/2011,1,0,1,3,1,0.172727,0.160473,0.599545,0.304627,25,1137,1162
|
||||
13,1/13/2011,1,0,1,4,1,0.165,0.150883,0.470417,0.301,38,1368,1406
|
||||
14,1/14/2011,1,0,1,5,1,0.16087,0.188413,0.537826,0.126548,54,1367,1421
|
||||
15,1/15/2011,1,0,1,6,2,0.233333,0.248112,0.49875,0.157963,222,1026,1248
|
||||
16,1/16/2011,1,0,1,0,1,0.231667,0.234217,0.48375,0.188433,251,953,1204
|
||||
17,1/17/2011,1,0,1,1,2,0.175833,0.176771,0.5375,0.194017,117,883,1000
|
||||
18,1/18/2011,1,0,1,2,2,0.216667,0.232333,0.861667,0.146775,9,674,683
|
||||
19,1/19/2011,1,0,1,3,2,0.292174,0.298422,0.741739,0.208317,78,1572,1650
|
||||
20,1/20/2011,1,0,1,4,2,0.261667,0.25505,0.538333,0.195904,83,1844,1927
|
||||
21,1/21/2011,1,0,1,5,1,0.1775,0.157833,0.457083,0.353242,75,1468,1543
|
||||
22,1/22/2011,1,0,1,6,1,0.0591304,0.0790696,0.4,0.17197,93,888,981
|
||||
23,1/23/2011,1,0,1,0,1,0.0965217,0.0988391,0.436522,0.2466,150,836,986
|
||||
24,1/24/2011,1,0,1,1,1,0.0973913,0.11793,0.491739,0.15833,86,1330,1416
|
||||
25,1/25/2011,1,0,1,2,2,0.223478,0.234526,0.616957,0.129796,186,1799,1985
|
||||
26,1/26/2011,1,0,1,3,3,0.2175,0.2036,0.8625,0.29385,34,472,506
|
||||
27,1/27/2011,1,0,1,4,1,0.195,0.2197,0.6875,0.113837,15,416,431
|
||||
28,1/28/2011,1,0,1,5,2,0.203478,0.223317,0.793043,0.1233,38,1129,1167
|
||||
29,1/29/2011,1,0,1,6,1,0.196522,0.212126,0.651739,0.145365,123,975,1098
|
||||
30,1/30/2011,1,0,1,0,1,0.216522,0.250322,0.722174,0.0739826,140,956,1096
|
||||
31,1/31/2011,1,0,1,1,2,0.180833,0.18625,0.60375,0.187192,42,1459,1501
|
||||
32,2/1/2011,1,0,2,2,2,0.192174,0.23453,0.829565,0.053213,47,1313,1360
|
||||
33,2/2/2011,1,0,2,3,2,0.26,0.254417,0.775417,0.264308,72,1454,1526
|
||||
34,2/3/2011,1,0,2,4,1,0.186957,0.177878,0.437826,0.277752,61,1489,1550
|
||||
35,2/4/2011,1,0,2,5,2,0.211304,0.228587,0.585217,0.127839,88,1620,1708
|
||||
36,2/5/2011,1,0,2,6,2,0.233333,0.243058,0.929167,0.161079,100,905,1005
|
||||
37,2/6/2011,1,0,2,0,1,0.285833,0.291671,0.568333,0.1418,354,1269,1623
|
||||
38,2/7/2011,1,0,2,1,1,0.271667,0.303658,0.738333,0.0454083,120,1592,1712
|
||||
39,2/8/2011,1,0,2,2,1,0.220833,0.198246,0.537917,0.36195,64,1466,1530
|
||||
40,2/9/2011,1,0,2,3,2,0.134783,0.144283,0.494783,0.188839,53,1552,1605
|
||||
41,2/10/2011,1,0,2,4,1,0.144348,0.149548,0.437391,0.221935,47,1491,1538
|
||||
42,2/11/2011,1,0,2,5,1,0.189091,0.213509,0.506364,0.10855,149,1597,1746
|
||||
43,2/12/2011,1,0,2,6,1,0.2225,0.232954,0.544167,0.203367,288,1184,1472
|
||||
44,2/13/2011,1,0,2,0,1,0.316522,0.324113,0.457391,0.260883,397,1192,1589
|
||||
45,2/14/2011,1,0,2,1,1,0.415,0.39835,0.375833,0.417908,208,1705,1913
|
||||
46,2/15/2011,1,0,2,2,1,0.266087,0.254274,0.314348,0.291374,140,1675,1815
|
||||
47,2/16/2011,1,0,2,3,1,0.318261,0.3162,0.423478,0.251791,218,1897,2115
|
||||
48,2/17/2011,1,0,2,4,1,0.435833,0.428658,0.505,0.230104,259,2216,2475
|
||||
49,2/18/2011,1,0,2,5,1,0.521667,0.511983,0.516667,0.264925,579,2348,2927
|
||||
50,2/19/2011,1,0,2,6,1,0.399167,0.391404,0.187917,0.507463,532,1103,1635
|
||||
51,2/20/2011,1,0,2,0,1,0.285217,0.27733,0.407826,0.223235,639,1173,1812
|
||||
52,2/21/2011,1,0,2,1,2,0.303333,0.284075,0.605,0.307846,195,912,1107
|
||||
53,2/22/2011,1,0,2,2,1,0.182222,0.186033,0.577778,0.195683,74,1376,1450
|
||||
54,2/23/2011,1,0,2,3,1,0.221739,0.245717,0.423043,0.094113,139,1778,1917
|
||||
55,2/24/2011,1,0,2,4,2,0.295652,0.289191,0.697391,0.250496,100,1707,1807
|
||||
56,2/25/2011,1,0,2,5,2,0.364348,0.350461,0.712174,0.346539,120,1341,1461
|
||||
57,2/26/2011,1,0,2,6,1,0.2825,0.282192,0.537917,0.186571,424,1545,1969
|
||||
58,2/27/2011,1,0,2,0,1,0.343478,0.351109,0.68,0.125248,694,1708,2402
|
||||
59,2/28/2011,1,0,2,1,2,0.407273,0.400118,0.876364,0.289686,81,1365,1446
|
||||
60,3/1/2011,1,0,3,2,1,0.266667,0.263879,0.535,0.216425,137,1714,1851
|
||||
61,3/2/2011,1,0,3,3,1,0.335,0.320071,0.449583,0.307833,231,1903,2134
|
||||
62,3/3/2011,1,0,3,4,1,0.198333,0.200133,0.318333,0.225754,123,1562,1685
|
||||
63,3/4/2011,1,0,3,5,2,0.261667,0.255679,0.610417,0.203346,214,1730,1944
|
||||
64,3/5/2011,1,0,3,6,2,0.384167,0.378779,0.789167,0.251871,640,1437,2077
|
||||
65,3/6/2011,1,0,3,0,2,0.376522,0.366252,0.948261,0.343287,114,491,605
|
||||
66,3/7/2011,1,0,3,1,1,0.261739,0.238461,0.551304,0.341352,244,1628,1872
|
||||
67,3/8/2011,1,0,3,2,1,0.2925,0.3024,0.420833,0.12065,316,1817,2133
|
||||
68,3/9/2011,1,0,3,3,2,0.295833,0.286608,0.775417,0.22015,191,1700,1891
|
||||
69,3/10/2011,1,0,3,4,3,0.389091,0.385668,0,0.261877,46,577,623
|
||||
70,3/11/2011,1,0,3,5,2,0.316522,0.305,0.649565,0.23297,247,1730,1977
|
||||
71,3/12/2011,1,0,3,6,1,0.329167,0.32575,0.594583,0.220775,724,1408,2132
|
||||
72,3/13/2011,1,0,3,0,1,0.384348,0.380091,0.527391,0.270604,982,1435,2417
|
||||
73,3/14/2011,1,0,3,1,1,0.325217,0.332,0.496957,0.136926,359,1687,2046
|
||||
74,3/15/2011,1,0,3,2,2,0.317391,0.318178,0.655652,0.184309,289,1767,2056
|
||||
75,3/16/2011,1,0,3,3,2,0.365217,0.36693,0.776522,0.203117,321,1871,2192
|
||||
76,3/17/2011,1,0,3,4,1,0.415,0.410333,0.602917,0.209579,424,2320,2744
|
||||
77,3/18/2011,1,0,3,5,1,0.54,0.527009,0.525217,0.231017,884,2355,3239
|
||||
78,3/19/2011,1,0,3,6,1,0.4725,0.466525,0.379167,0.368167,1424,1693,3117
|
||||
79,3/20/2011,1,0,3,0,1,0.3325,0.32575,0.47375,0.207721,1047,1424,2471
|
||||
80,3/21/2011,2,0,3,1,2,0.430435,0.409735,0.737391,0.288783,401,1676,2077
|
||||
81,3/22/2011,2,0,3,2,1,0.441667,0.440642,0.624583,0.22575,460,2243,2703
|
||||
82,3/23/2011,2,0,3,3,2,0.346957,0.337939,0.839565,0.234261,203,1918,2121
|
||||
83,3/24/2011,2,0,3,4,2,0.285,0.270833,0.805833,0.243787,166,1699,1865
|
||||
84,3/25/2011,2,0,3,5,1,0.264167,0.256312,0.495,0.230725,300,1910,2210
|
||||
85,3/26/2011,2,0,3,6,1,0.265833,0.257571,0.394167,0.209571,981,1515,2496
|
||||
86,3/27/2011,2,0,3,0,2,0.253043,0.250339,0.493913,0.1843,472,1221,1693
|
||||
87,3/28/2011,2,0,3,1,1,0.264348,0.257574,0.302174,0.212204,222,1806,2028
|
||||
88,3/29/2011,2,0,3,2,1,0.3025,0.292908,0.314167,0.226996,317,2108,2425
|
||||
89,3/30/2011,2,0,3,3,2,0.3,0.29735,0.646667,0.172888,168,1368,1536
|
||||
90,3/31/2011,2,0,3,4,3,0.268333,0.257575,0.918333,0.217646,179,1506,1685
|
||||
91,4/1/2011,2,0,4,5,2,0.3,0.283454,0.68625,0.258708,307,1920,2227
|
||||
92,4/2/2011,2,0,4,6,2,0.315,0.315637,0.65375,0.197146,898,1354,2252
|
||||
93,4/3/2011,2,0,4,0,1,0.378333,0.378767,0.48,0.182213,1651,1598,3249
|
||||
94,4/4/2011,2,0,4,1,1,0.573333,0.542929,0.42625,0.385571,734,2381,3115
|
||||
95,4/5/2011,2,0,4,2,2,0.414167,0.39835,0.642083,0.388067,167,1628,1795
|
||||
96,4/6/2011,2,0,4,3,1,0.390833,0.387608,0.470833,0.263063,413,2395,2808
|
||||
97,4/7/2011,2,0,4,4,1,0.4375,0.433696,0.602917,0.162312,571,2570,3141
|
||||
98,4/8/2011,2,0,4,5,2,0.335833,0.324479,0.83625,0.226992,172,1299,1471
|
||||
99,4/9/2011,2,0,4,6,2,0.3425,0.341529,0.8775,0.133083,879,1576,2455
|
||||
100,4/10/2011,2,0,4,0,2,0.426667,0.426737,0.8575,0.146767,1188,1707,2895
|
||||
101,4/11/2011,2,0,4,1,2,0.595652,0.565217,0.716956,0.324474,855,2493,3348
|
||||
102,4/12/2011,2,0,4,2,2,0.5025,0.493054,0.739167,0.274879,257,1777,2034
|
||||
103,4/13/2011,2,0,4,3,2,0.4125,0.417283,0.819167,0.250617,209,1953,2162
|
||||
104,4/14/2011,2,0,4,4,1,0.4675,0.462742,0.540417,0.1107,529,2738,3267
|
||||
105,4/15/2011,2,0,4,5,1,0.446667,0.441913,0.67125,0.226375,642,2484,3126
|
||||
106,4/16/2011,2,0,4,6,3,0.430833,0.425492,0.888333,0.340808,121,674,795
|
||||
107,4/17/2011,2,0,4,0,1,0.456667,0.445696,0.479583,0.303496,1558,2186,3744
|
||||
108,4/18/2011,2,0,4,1,1,0.5125,0.503146,0.5425,0.163567,669,2760,3429
|
||||
109,4/19/2011,2,0,4,2,2,0.505833,0.489258,0.665833,0.157971,409,2795,3204
|
||||
110,4/20/2011,2,0,4,3,1,0.595,0.564392,0.614167,0.241925,613,3331,3944
|
||||
111,4/21/2011,2,0,4,4,1,0.459167,0.453892,0.407083,0.325258,745,3444,4189
|
||||
112,4/22/2011,2,0,4,5,2,0.336667,0.321954,0.729583,0.219521,177,1506,1683
|
||||
113,4/23/2011,2,0,4,6,2,0.46,0.450121,0.887917,0.230725,1462,2574,4036
|
||||
114,4/24/2011,2,0,4,0,2,0.581667,0.551763,0.810833,0.192175,1710,2481,4191
|
||||
115,4/25/2011,2,0,4,1,1,0.606667,0.5745,0.776667,0.185333,773,3300,4073
|
||||
116,4/26/2011,2,0,4,2,1,0.631667,0.594083,0.729167,0.3265,678,3722,4400
|
||||
117,4/27/2011,2,0,4,3,2,0.62,0.575142,0.835417,0.3122,547,3325,3872
|
||||
118,4/28/2011,2,0,4,4,2,0.6175,0.578929,0.700833,0.320908,569,3489,4058
|
||||
119,4/29/2011,2,0,4,5,1,0.51,0.497463,0.457083,0.240063,878,3717,4595
|
||||
120,4/30/2011,2,0,4,6,1,0.4725,0.464021,0.503333,0.235075,1965,3347,5312
|
||||
121,5/1/2011,2,0,5,0,2,0.451667,0.448204,0.762083,0.106354,1138,2213,3351
|
||||
122,5/2/2011,2,0,5,1,2,0.549167,0.532833,0.73,0.183454,847,3554,4401
|
||||
123,5/3/2011,2,0,5,2,2,0.616667,0.582079,0.697083,0.342667,603,3848,4451
|
||||
124,5/4/2011,2,0,5,3,2,0.414167,0.40465,0.737083,0.328996,255,2378,2633
|
||||
125,5/5/2011,2,0,5,4,1,0.459167,0.441917,0.444167,0.295392,614,3819,4433
|
||||
126,5/6/2011,2,0,5,5,1,0.479167,0.474117,0.59,0.228246,894,3714,4608
|
||||
127,5/7/2011,2,0,5,6,1,0.52,0.512621,0.54125,0.16045,1612,3102,4714
|
||||
128,5/8/2011,2,0,5,0,1,0.528333,0.518933,0.631667,0.0746375,1401,2932,4333
|
||||
129,5/9/2011,2,0,5,1,1,0.5325,0.525246,0.58875,0.176,664,3698,4362
|
||||
130,5/10/2011,2,0,5,2,1,0.5325,0.522721,0.489167,0.115671,694,4109,4803
|
||||
131,5/11/2011,2,0,5,3,1,0.5425,0.5284,0.632917,0.120642,550,3632,4182
|
||||
132,5/12/2011,2,0,5,4,1,0.535,0.523363,0.7475,0.189667,695,4169,4864
|
||||
133,5/13/2011,2,0,5,5,2,0.5125,0.4943,0.863333,0.179725,692,3413,4105
|
||||
134,5/14/2011,2,0,5,6,2,0.520833,0.500629,0.9225,0.13495,902,2507,3409
|
||||
135,5/15/2011,2,0,5,0,2,0.5625,0.536,0.867083,0.152979,1582,2971,4553
|
||||
136,5/16/2011,2,0,5,1,1,0.5775,0.550512,0.787917,0.126871,773,3185,3958
|
||||
137,5/17/2011,2,0,5,2,2,0.561667,0.538529,0.837917,0.277354,678,3445,4123
|
||||
138,5/18/2011,2,0,5,3,2,0.55,0.527158,0.87,0.201492,536,3319,3855
|
||||
139,5/19/2011,2,0,5,4,2,0.530833,0.510742,0.829583,0.108213,735,3840,4575
|
||||
140,5/20/2011,2,0,5,5,1,0.536667,0.529042,0.719583,0.125013,909,4008,4917
|
||||
141,5/21/2011,2,0,5,6,1,0.6025,0.571975,0.626667,0.12065,2258,3547,5805
|
||||
142,5/22/2011,2,0,5,0,1,0.604167,0.5745,0.749583,0.148008,1576,3084,4660
|
||||
143,5/23/2011,2,0,5,1,2,0.631667,0.590296,0.81,0.233842,836,3438,4274
|
||||
144,5/24/2011,2,0,5,2,2,0.66,0.604813,0.740833,0.207092,659,3833,4492
|
||||
145,5/25/2011,2,0,5,3,1,0.660833,0.615542,0.69625,0.154233,740,4238,4978
|
||||
146,5/26/2011,2,0,5,4,1,0.708333,0.654688,0.6775,0.199642,758,3919,4677
|
||||
147,5/27/2011,2,0,5,5,1,0.681667,0.637008,0.65375,0.240679,871,3808,4679
|
||||
148,5/28/2011,2,0,5,6,1,0.655833,0.612379,0.729583,0.230092,2001,2757,4758
|
||||
149,5/29/2011,2,0,5,0,1,0.6675,0.61555,0.81875,0.213938,2355,2433,4788
|
||||
150,5/30/2011,2,0,5,1,1,0.733333,0.671092,0.685,0.131225,1549,2549,4098
|
||||
151,5/31/2011,2,0,5,2,1,0.775,0.725383,0.636667,0.111329,673,3309,3982
|
||||
152,6/1/2011,2,0,6,3,2,0.764167,0.720967,0.677083,0.207092,513,3461,3974
|
||||
153,6/2/2011,2,0,6,4,1,0.715,0.643942,0.305,0.292287,736,4232,4968
|
||||
154,6/3/2011,2,0,6,5,1,0.62,0.587133,0.354167,0.253121,898,4414,5312
|
||||
155,6/4/2011,2,0,6,6,1,0.635,0.594696,0.45625,0.123142,1869,3473,5342
|
||||
156,6/5/2011,2,0,6,0,2,0.648333,0.616804,0.6525,0.138692,1685,3221,4906
|
||||
157,6/6/2011,2,0,6,1,1,0.678333,0.621858,0.6,0.121896,673,3875,4548
|
||||
158,6/7/2011,2,0,6,2,1,0.7075,0.65595,0.597917,0.187808,763,4070,4833
|
||||
159,6/8/2011,2,0,6,3,1,0.775833,0.727279,0.622083,0.136817,676,3725,4401
|
||||
160,6/9/2011,2,0,6,4,2,0.808333,0.757579,0.568333,0.149883,563,3352,3915
|
||||
161,6/10/2011,2,0,6,5,1,0.755,0.703292,0.605,0.140554,815,3771,4586
|
||||
162,6/11/2011,2,0,6,6,1,0.725,0.678038,0.654583,0.15485,1729,3237,4966
|
||||
163,6/12/2011,2,0,6,0,1,0.6925,0.643325,0.747917,0.163567,1467,2993,4460
|
||||
164,6/13/2011,2,0,6,1,1,0.635,0.601654,0.494583,0.30535,863,4157,5020
|
||||
165,6/14/2011,2,0,6,2,1,0.604167,0.591546,0.507083,0.269283,727,4164,4891
|
||||
166,6/15/2011,2,0,6,3,1,0.626667,0.587754,0.471667,0.167912,769,4411,5180
|
||||
167,6/16/2011,2,0,6,4,2,0.628333,0.595346,0.688333,0.206471,545,3222,3767
|
||||
168,6/17/2011,2,0,6,5,1,0.649167,0.600383,0.735833,0.143029,863,3981,4844
|
||||
169,6/18/2011,2,0,6,6,1,0.696667,0.643954,0.670417,0.119408,1807,3312,5119
|
||||
170,6/19/2011,2,0,6,0,2,0.699167,0.645846,0.666667,0.102,1639,3105,4744
|
||||
171,6/20/2011,2,0,6,1,2,0.635,0.595346,0.74625,0.155475,699,3311,4010
|
||||
172,6/21/2011,3,0,6,2,2,0.680833,0.637646,0.770417,0.171025,774,4061,4835
|
||||
173,6/22/2011,3,0,6,3,1,0.733333,0.693829,0.7075,0.172262,661,3846,4507
|
||||
174,6/23/2011,3,0,6,4,2,0.728333,0.693833,0.703333,0.238804,746,4044,4790
|
||||
175,6/24/2011,3,0,6,5,1,0.724167,0.656583,0.573333,0.222025,969,4022,4991
|
||||
176,6/25/2011,3,0,6,6,1,0.695,0.643313,0.483333,0.209571,1782,3420,5202
|
||||
177,6/26/2011,3,0,6,0,1,0.68,0.637629,0.513333,0.0945333,1920,3385,5305
|
||||
178,6/27/2011,3,0,6,1,2,0.6825,0.637004,0.658333,0.107588,854,3854,4708
|
||||
179,6/28/2011,3,0,6,2,1,0.744167,0.692558,0.634167,0.144283,732,3916,4648
|
||||
180,6/29/2011,3,0,6,3,1,0.728333,0.654688,0.497917,0.261821,848,4377,5225
|
||||
181,6/30/2011,3,0,6,4,1,0.696667,0.637008,0.434167,0.185312,1027,4488,5515
|
||||
182,7/1/2011,3,0,7,5,1,0.7225,0.652162,0.39625,0.102608,1246,4116,5362
|
||||
183,7/2/2011,3,0,7,6,1,0.738333,0.667308,0.444583,0.115062,2204,2915,5119
|
||||
184,7/3/2011,3,0,7,0,2,0.716667,0.668575,0.6825,0.228858,2282,2367,4649
|
||||
185,7/4/2011,3,0,7,1,2,0.726667,0.665417,0.637917,0.0814792,3065,2978,6043
|
||||
186,7/5/2011,3,0,7,2,1,0.746667,0.696338,0.590417,0.126258,1031,3634,4665
|
||||
187,7/6/2011,3,0,7,3,1,0.72,0.685633,0.743333,0.149883,784,3845,4629
|
||||
188,7/7/2011,3,0,7,4,1,0.75,0.686871,0.65125,0.1592,754,3838,4592
|
||||
189,7/8/2011,3,0,7,5,2,0.709167,0.670483,0.757917,0.225129,692,3348,4040
|
||||
190,7/9/2011,3,0,7,6,1,0.733333,0.664158,0.609167,0.167912,1988,3348,5336
|
||||
191,7/10/2011,3,0,7,0,1,0.7475,0.690025,0.578333,0.183471,1743,3138,4881
|
||||
192,7/11/2011,3,0,7,1,1,0.7625,0.729804,0.635833,0.282337,723,3363,4086
|
||||
193,7/12/2011,3,0,7,2,1,0.794167,0.739275,0.559167,0.200254,662,3596,4258
|
||||
194,7/13/2011,3,0,7,3,1,0.746667,0.689404,0.631667,0.146133,748,3594,4342
|
||||
195,7/14/2011,3,0,7,4,1,0.680833,0.635104,0.47625,0.240667,888,4196,5084
|
||||
196,7/15/2011,3,0,7,5,1,0.663333,0.624371,0.59125,0.182833,1318,4220,5538
|
||||
197,7/16/2011,3,0,7,6,1,0.686667,0.638263,0.585,0.208342,2418,3505,5923
|
||||
198,7/17/2011,3,0,7,0,1,0.719167,0.669833,0.604167,0.245033,2006,3296,5302
|
||||
199,7/18/2011,3,0,7,1,1,0.746667,0.703925,0.65125,0.215804,841,3617,4458
|
||||
200,7/19/2011,3,0,7,2,1,0.776667,0.747479,0.650417,0.1306,752,3789,4541
|
||||
201,7/20/2011,3,0,7,3,1,0.768333,0.74685,0.707083,0.113817,644,3688,4332
|
||||
202,7/21/2011,3,0,7,4,2,0.815,0.826371,0.69125,0.222021,632,3152,3784
|
||||
203,7/22/2011,3,0,7,5,1,0.848333,0.840896,0.580417,0.1331,562,2825,3387
|
||||
204,7/23/2011,3,0,7,6,1,0.849167,0.804287,0.5,0.131221,987,2298,3285
|
||||
205,7/24/2011,3,0,7,0,1,0.83,0.794829,0.550833,0.169171,1050,2556,3606
|
||||
206,7/25/2011,3,0,7,1,1,0.743333,0.720958,0.757083,0.0908083,568,3272,3840
|
||||
207,7/26/2011,3,0,7,2,1,0.771667,0.696979,0.540833,0.200258,750,3840,4590
|
||||
208,7/27/2011,3,0,7,3,1,0.775,0.690667,0.402917,0.183463,755,3901,4656
|
||||
209,7/28/2011,3,0,7,4,1,0.779167,0.7399,0.583333,0.178479,606,3784,4390
|
||||
210,7/29/2011,3,0,7,5,1,0.838333,0.785967,0.5425,0.174138,670,3176,3846
|
||||
211,7/30/2011,3,0,7,6,1,0.804167,0.728537,0.465833,0.168537,1559,2916,4475
|
||||
212,7/31/2011,3,0,7,0,1,0.805833,0.729796,0.480833,0.164813,1524,2778,4302
|
||||
213,8/1/2011,3,0,8,1,1,0.771667,0.703292,0.550833,0.156717,729,3537,4266
|
||||
214,8/2/2011,3,0,8,2,1,0.783333,0.707071,0.49125,0.20585,801,4044,4845
|
||||
215,8/3/2011,3,0,8,3,2,0.731667,0.679937,0.6575,0.135583,467,3107,3574
|
||||
216,8/4/2011,3,0,8,4,2,0.71,0.664788,0.7575,0.19715,799,3777,4576
|
||||
217,8/5/2011,3,0,8,5,1,0.710833,0.656567,0.630833,0.184696,1023,3843,4866
|
||||
218,8/6/2011,3,0,8,6,2,0.716667,0.676154,0.755,0.22825,1521,2773,4294
|
||||
219,8/7/2011,3,0,8,0,1,0.7425,0.715292,0.752917,0.201487,1298,2487,3785
|
||||
220,8/8/2011,3,0,8,1,1,0.765,0.703283,0.592083,0.192175,846,3480,4326
|
||||
221,8/9/2011,3,0,8,2,1,0.775,0.724121,0.570417,0.151121,907,3695,4602
|
||||
222,8/10/2011,3,0,8,3,1,0.766667,0.684983,0.424167,0.200258,884,3896,4780
|
||||
223,8/11/2011,3,0,8,4,1,0.7175,0.651521,0.42375,0.164796,812,3980,4792
|
||||
224,8/12/2011,3,0,8,5,1,0.708333,0.654042,0.415,0.125621,1051,3854,4905
|
||||
225,8/13/2011,3,0,8,6,2,0.685833,0.645858,0.729583,0.211454,1504,2646,4150
|
||||
226,8/14/2011,3,0,8,0,2,0.676667,0.624388,0.8175,0.222633,1338,2482,3820
|
||||
227,8/15/2011,3,0,8,1,1,0.665833,0.616167,0.712083,0.208954,775,3563,4338
|
||||
228,8/16/2011,3,0,8,2,1,0.700833,0.645837,0.578333,0.236329,721,4004,4725
|
||||
229,8/17/2011,3,0,8,3,1,0.723333,0.666671,0.575417,0.143667,668,4026,4694
|
||||
230,8/18/2011,3,0,8,4,1,0.711667,0.662258,0.654583,0.233208,639,3166,3805
|
||||
231,8/19/2011,3,0,8,5,2,0.685,0.633221,0.722917,0.139308,797,3356,4153
|
||||
232,8/20/2011,3,0,8,6,1,0.6975,0.648996,0.674167,0.104467,1914,3277,5191
|
||||
233,8/21/2011,3,0,8,0,1,0.710833,0.675525,0.77,0.248754,1249,2624,3873
|
||||
234,8/22/2011,3,0,8,1,1,0.691667,0.638254,0.47,0.27675,833,3925,4758
|
||||
235,8/23/2011,3,0,8,2,1,0.640833,0.606067,0.455417,0.146763,1281,4614,5895
|
||||
236,8/24/2011,3,0,8,3,1,0.673333,0.630692,0.605,0.253108,949,4181,5130
|
||||
237,8/25/2011,3,0,8,4,2,0.684167,0.645854,0.771667,0.210833,435,3107,3542
|
||||
238,8/26/2011,3,0,8,5,1,0.7,0.659733,0.76125,0.0839625,768,3893,4661
|
||||
239,8/27/2011,3,0,8,6,2,0.68,0.635556,0.85,0.375617,226,889,1115
|
||||
240,8/28/2011,3,0,8,0,1,0.707059,0.647959,0.561765,0.304659,1415,2919,4334
|
||||
241,8/29/2011,3,0,8,1,1,0.636667,0.607958,0.554583,0.159825,729,3905,4634
|
||||
242,8/30/2011,3,0,8,2,1,0.639167,0.594704,0.548333,0.125008,775,4429,5204
|
||||
243,8/31/2011,3,0,8,3,1,0.656667,0.611121,0.597917,0.0833333,688,4370,5058
|
||||
244,9/1/2011,3,0,9,4,1,0.655,0.614921,0.639167,0.141796,783,4332,5115
|
||||
245,9/2/2011,3,0,9,5,2,0.643333,0.604808,0.727083,0.139929,875,3852,4727
|
||||
246,9/3/2011,3,0,9,6,1,0.669167,0.633213,0.716667,0.185325,1935,2549,4484
|
||||
247,9/4/2011,3,0,9,0,1,0.709167,0.665429,0.742083,0.206467,2521,2419,4940
|
||||
248,9/5/2011,3,0,9,1,2,0.673333,0.625646,0.790417,0.212696,1236,2115,3351
|
||||
249,9/6/2011,3,0,9,2,3,0.54,0.5152,0.886957,0.343943,204,2506,2710
|
||||
250,9/7/2011,3,0,9,3,3,0.599167,0.544229,0.917083,0.0970208,118,1878,1996
|
||||
251,9/8/2011,3,0,9,4,3,0.633913,0.555361,0.939565,0.192748,153,1689,1842
|
||||
252,9/9/2011,3,0,9,5,2,0.65,0.578946,0.897917,0.124379,417,3127,3544
|
||||
253,9/10/2011,3,0,9,6,1,0.66,0.607962,0.75375,0.153608,1750,3595,5345
|
||||
254,9/11/2011,3,0,9,0,1,0.653333,0.609229,0.71375,0.115054,1633,3413,5046
|
||||
255,9/12/2011,3,0,9,1,1,0.644348,0.60213,0.692174,0.088913,690,4023,4713
|
||||
256,9/13/2011,3,0,9,2,1,0.650833,0.603554,0.7125,0.141804,701,4062,4763
|
||||
257,9/14/2011,3,0,9,3,1,0.673333,0.6269,0.697083,0.1673,647,4138,4785
|
||||
258,9/15/2011,3,0,9,4,2,0.5775,0.553671,0.709167,0.271146,428,3231,3659
|
||||
259,9/16/2011,3,0,9,5,2,0.469167,0.461475,0.590417,0.164183,742,4018,4760
|
||||
260,9/17/2011,3,0,9,6,2,0.491667,0.478512,0.718333,0.189675,1434,3077,4511
|
||||
261,9/18/2011,3,0,9,0,1,0.5075,0.490537,0.695,0.178483,1353,2921,4274
|
||||
262,9/19/2011,3,0,9,1,2,0.549167,0.529675,0.69,0.151742,691,3848,4539
|
||||
263,9/20/2011,3,0,9,2,2,0.561667,0.532217,0.88125,0.134954,438,3203,3641
|
||||
264,9/21/2011,3,0,9,3,2,0.595,0.550533,0.9,0.0964042,539,3813,4352
|
||||
265,9/22/2011,3,0,9,4,2,0.628333,0.554963,0.902083,0.128125,555,4240,4795
|
||||
266,9/23/2011,4,0,9,5,2,0.609167,0.522125,0.9725,0.0783667,258,2137,2395
|
||||
267,9/24/2011,4,0,9,6,2,0.606667,0.564412,0.8625,0.0783833,1776,3647,5423
|
||||
268,9/25/2011,4,0,9,0,2,0.634167,0.572637,0.845,0.0503792,1544,3466,5010
|
||||
269,9/26/2011,4,0,9,1,2,0.649167,0.589042,0.848333,0.1107,684,3946,4630
|
||||
270,9/27/2011,4,0,9,2,2,0.636667,0.574525,0.885417,0.118171,477,3643,4120
|
||||
271,9/28/2011,4,0,9,3,2,0.635,0.575158,0.84875,0.148629,480,3427,3907
|
||||
272,9/29/2011,4,0,9,4,1,0.616667,0.574512,0.699167,0.172883,653,4186,4839
|
||||
273,9/30/2011,4,0,9,5,1,0.564167,0.544829,0.6475,0.206475,830,4372,5202
|
||||
274,10/1/2011,4,0,10,6,2,0.41,0.412863,0.75375,0.292296,480,1949,2429
|
||||
275,10/2/2011,4,0,10,0,2,0.356667,0.345317,0.791667,0.222013,616,2302,2918
|
||||
276,10/3/2011,4,0,10,1,2,0.384167,0.392046,0.760833,0.0833458,330,3240,3570
|
||||
277,10/4/2011,4,0,10,2,1,0.484167,0.472858,0.71,0.205854,486,3970,4456
|
||||
278,10/5/2011,4,0,10,3,1,0.538333,0.527138,0.647917,0.17725,559,4267,4826
|
||||
279,10/6/2011,4,0,10,4,1,0.494167,0.480425,0.620833,0.134954,639,4126,4765
|
||||
280,10/7/2011,4,0,10,5,1,0.510833,0.504404,0.684167,0.0223917,949,4036,4985
|
||||
281,10/8/2011,4,0,10,6,1,0.521667,0.513242,0.70125,0.0454042,2235,3174,5409
|
||||
282,10/9/2011,4,0,10,0,1,0.540833,0.523983,0.7275,0.06345,2397,3114,5511
|
||||
283,10/10/2011,4,0,10,1,1,0.570833,0.542925,0.73375,0.0423042,1514,3603,5117
|
||||
284,10/11/2011,4,0,10,2,2,0.566667,0.546096,0.80875,0.143042,667,3896,4563
|
||||
285,10/12/2011,4,0,10,3,3,0.543333,0.517717,0.90625,0.24815,217,2199,2416
|
||||
286,10/13/2011,4,0,10,4,2,0.589167,0.551804,0.896667,0.141787,290,2623,2913
|
||||
287,10/14/2011,4,0,10,5,2,0.550833,0.529675,0.71625,0.223883,529,3115,3644
|
||||
288,10/15/2011,4,0,10,6,1,0.506667,0.498725,0.483333,0.258083,1899,3318,5217
|
||||
289,10/16/2011,4,0,10,0,1,0.511667,0.503154,0.486667,0.281717,1748,3293,5041
|
||||
290,10/17/2011,4,0,10,1,1,0.534167,0.510725,0.579583,0.175379,713,3857,4570
|
||||
291,10/18/2011,4,0,10,2,2,0.5325,0.522721,0.701667,0.110087,637,4111,4748
|
||||
292,10/19/2011,4,0,10,3,3,0.541739,0.513848,0.895217,0.243339,254,2170,2424
|
||||
293,10/20/2011,4,0,10,4,1,0.475833,0.466525,0.63625,0.422275,471,3724,4195
|
||||
294,10/21/2011,4,0,10,5,1,0.4275,0.423596,0.574167,0.221396,676,3628,4304
|
||||
295,10/22/2011,4,0,10,6,1,0.4225,0.425492,0.629167,0.0926667,1499,2809,4308
|
||||
296,10/23/2011,4,0,10,0,1,0.421667,0.422333,0.74125,0.0995125,1619,2762,4381
|
||||
297,10/24/2011,4,0,10,1,1,0.463333,0.457067,0.772083,0.118792,699,3488,4187
|
||||
298,10/25/2011,4,0,10,2,1,0.471667,0.463375,0.622917,0.166658,695,3992,4687
|
||||
299,10/26/2011,4,0,10,3,2,0.484167,0.472846,0.720417,0.148642,404,3490,3894
|
||||
300,10/27/2011,4,0,10,4,2,0.47,0.457046,0.812917,0.197763,240,2419,2659
|
||||
301,10/28/2011,4,0,10,5,2,0.330833,0.318812,0.585833,0.229479,456,3291,3747
|
||||
302,10/29/2011,4,0,10,6,3,0.254167,0.227913,0.8825,0.351371,57,570,627
|
||||
303,10/30/2011,4,0,10,0,1,0.319167,0.321329,0.62375,0.176617,885,2446,3331
|
||||
304,10/31/2011,4,0,10,1,1,0.34,0.356063,0.703333,0.10635,362,3307,3669
|
||||
305,11/1/2011,4,0,11,2,1,0.400833,0.397088,0.68375,0.135571,410,3658,4068
|
||||
306,11/2/2011,4,0,11,3,1,0.3775,0.390133,0.71875,0.0820917,370,3816,4186
|
||||
307,11/3/2011,4,0,11,4,1,0.408333,0.405921,0.702083,0.136817,318,3656,3974
|
||||
308,11/4/2011,4,0,11,5,2,0.403333,0.403392,0.6225,0.271779,470,3576,4046
|
||||
309,11/5/2011,4,0,11,6,1,0.326667,0.323854,0.519167,0.189062,1156,2770,3926
|
||||
310,11/6/2011,4,0,11,0,1,0.348333,0.362358,0.734583,0.0920542,952,2697,3649
|
||||
311,11/7/2011,4,0,11,1,1,0.395,0.400871,0.75875,0.057225,373,3662,4035
|
||||
312,11/8/2011,4,0,11,2,1,0.408333,0.412246,0.721667,0.0690375,376,3829,4205
|
||||
313,11/9/2011,4,0,11,3,1,0.4,0.409079,0.758333,0.0621958,305,3804,4109
|
||||
314,11/10/2011,4,0,11,4,2,0.38,0.373721,0.813333,0.189067,190,2743,2933
|
||||
315,11/11/2011,4,0,11,5,1,0.324167,0.306817,0.44625,0.314675,440,2928,3368
|
||||
316,11/12/2011,4,0,11,6,1,0.356667,0.357942,0.552917,0.212062,1275,2792,4067
|
||||
317,11/13/2011,4,0,11,0,1,0.440833,0.43055,0.458333,0.281721,1004,2713,3717
|
||||
318,11/14/2011,4,0,11,1,1,0.53,0.524612,0.587083,0.306596,595,3891,4486
|
||||
319,11/15/2011,4,0,11,2,2,0.53,0.507579,0.68875,0.199633,449,3746,4195
|
||||
320,11/16/2011,4,0,11,3,3,0.456667,0.451988,0.93,0.136829,145,1672,1817
|
||||
321,11/17/2011,4,0,11,4,2,0.341667,0.323221,0.575833,0.305362,139,2914,3053
|
||||
322,11/18/2011,4,0,11,5,1,0.274167,0.272721,0.41,0.168533,245,3147,3392
|
||||
323,11/19/2011,4,0,11,6,1,0.329167,0.324483,0.502083,0.224496,943,2720,3663
|
||||
324,11/20/2011,4,0,11,0,2,0.463333,0.457058,0.684583,0.18595,787,2733,3520
|
||||
325,11/21/2011,4,0,11,1,3,0.4475,0.445062,0.91,0.138054,220,2545,2765
|
||||
326,11/22/2011,4,0,11,2,3,0.416667,0.421696,0.9625,0.118792,69,1538,1607
|
||||
327,11/23/2011,4,0,11,3,2,0.440833,0.430537,0.757917,0.335825,112,2454,2566
|
||||
328,11/24/2011,4,0,11,4,1,0.373333,0.372471,0.549167,0.167304,560,935,1495
|
||||
329,11/25/2011,4,0,11,5,1,0.375,0.380671,0.64375,0.0988958,1095,1697,2792
|
||||
330,11/26/2011,4,0,11,6,1,0.375833,0.385087,0.681667,0.0684208,1249,1819,3068
|
||||
331,11/27/2011,4,0,11,0,1,0.459167,0.4558,0.698333,0.208954,810,2261,3071
|
||||
332,11/28/2011,4,0,11,1,1,0.503478,0.490122,0.743043,0.142122,253,3614,3867
|
||||
333,11/29/2011,4,0,11,2,2,0.458333,0.451375,0.830833,0.258092,96,2818,2914
|
||||
334,11/30/2011,4,0,11,3,1,0.325,0.311221,0.613333,0.271158,188,3425,3613
|
||||
335,12/1/2011,4,0,12,4,1,0.3125,0.305554,0.524583,0.220158,182,3545,3727
|
||||
336,12/2/2011,4,0,12,5,1,0.314167,0.331433,0.625833,0.100754,268,3672,3940
|
||||
337,12/3/2011,4,0,12,6,1,0.299167,0.310604,0.612917,0.0957833,706,2908,3614
|
||||
338,12/4/2011,4,0,12,0,1,0.330833,0.3491,0.775833,0.0839583,634,2851,3485
|
||||
339,12/5/2011,4,0,12,1,2,0.385833,0.393925,0.827083,0.0622083,233,3578,3811
|
||||
340,12/6/2011,4,0,12,2,3,0.4625,0.4564,0.949583,0.232583,126,2468,2594
|
||||
341,12/7/2011,4,0,12,3,3,0.41,0.400246,0.970417,0.266175,50,655,705
|
||||
342,12/8/2011,4,0,12,4,1,0.265833,0.256938,0.58,0.240058,150,3172,3322
|
||||
343,12/9/2011,4,0,12,5,1,0.290833,0.317542,0.695833,0.0827167,261,3359,3620
|
||||
344,12/10/2011,4,0,12,6,1,0.275,0.266412,0.5075,0.233221,502,2688,3190
|
||||
345,12/11/2011,4,0,12,0,1,0.220833,0.253154,0.49,0.0665417,377,2366,2743
|
||||
346,12/12/2011,4,0,12,1,1,0.238333,0.270196,0.670833,0.06345,143,3167,3310
|
||||
347,12/13/2011,4,0,12,2,1,0.2825,0.301138,0.59,0.14055,155,3368,3523
|
||||
348,12/14/2011,4,0,12,3,2,0.3175,0.338362,0.66375,0.0609583,178,3562,3740
|
||||
349,12/15/2011,4,0,12,4,2,0.4225,0.412237,0.634167,0.268042,181,3528,3709
|
||||
350,12/16/2011,4,0,12,5,2,0.375,0.359825,0.500417,0.260575,178,3399,3577
|
||||
351,12/17/2011,4,0,12,6,2,0.258333,0.249371,0.560833,0.243167,275,2464,2739
|
||||
352,12/18/2011,4,0,12,0,1,0.238333,0.245579,0.58625,0.169779,220,2211,2431
|
||||
353,12/19/2011,4,0,12,1,1,0.276667,0.280933,0.6375,0.172896,260,3143,3403
|
||||
354,12/20/2011,4,0,12,2,2,0.385833,0.396454,0.595417,0.0615708,216,3534,3750
|
||||
355,12/21/2011,1,0,12,3,2,0.428333,0.428017,0.858333,0.2214,107,2553,2660
|
||||
356,12/22/2011,1,0,12,4,2,0.423333,0.426121,0.7575,0.047275,227,2841,3068
|
||||
357,12/23/2011,1,0,12,5,1,0.373333,0.377513,0.68625,0.274246,163,2046,2209
|
||||
358,12/24/2011,1,0,12,6,1,0.3025,0.299242,0.5425,0.190304,155,856,1011
|
||||
359,12/25/2011,1,0,12,0,1,0.274783,0.279961,0.681304,0.155091,303,451,754
|
||||
360,12/26/2011,1,0,12,1,1,0.321739,0.315535,0.506957,0.239465,430,887,1317
|
||||
361,12/27/2011,1,0,12,2,2,0.325,0.327633,0.7625,0.18845,103,1059,1162
|
||||
362,12/28/2011,1,0,12,3,1,0.29913,0.279974,0.503913,0.293961,255,2047,2302
|
||||
363,12/29/2011,1,0,12,4,1,0.248333,0.263892,0.574167,0.119412,254,2169,2423
|
||||
364,12/30/2011,1,0,12,5,1,0.311667,0.318812,0.636667,0.134337,491,2508,2999
|
||||
365,12/31/2011,1,0,12,6,1,0.41,0.414121,0.615833,0.220154,665,1820,2485
|
||||
366,1/1/2012,1,1,1,0,1,0.37,0.375621,0.6925,0.192167,686,1608,2294
|
||||
367,1/2/2012,1,1,1,1,1,0.273043,0.252304,0.381304,0.329665,244,1707,1951
|
||||
368,1/3/2012,1,1,1,2,1,0.15,0.126275,0.44125,0.365671,89,2147,2236
|
||||
369,1/4/2012,1,1,1,3,2,0.1075,0.119337,0.414583,0.1847,95,2273,2368
|
||||
370,1/5/2012,1,1,1,4,1,0.265833,0.278412,0.524167,0.129987,140,3132,3272
|
||||
371,1/6/2012,1,1,1,5,1,0.334167,0.340267,0.542083,0.167908,307,3791,4098
|
||||
372,1/7/2012,1,1,1,6,1,0.393333,0.390779,0.531667,0.174758,1070,3451,4521
|
||||
373,1/8/2012,1,1,1,0,1,0.3375,0.340258,0.465,0.191542,599,2826,3425
|
||||
374,1/9/2012,1,1,1,1,2,0.224167,0.247479,0.701667,0.0989,106,2270,2376
|
||||
375,1/10/2012,1,1,1,2,1,0.308696,0.318826,0.646522,0.187552,173,3425,3598
|
||||
376,1/11/2012,1,1,1,3,2,0.274167,0.282821,0.8475,0.131221,92,2085,2177
|
||||
377,1/12/2012,1,1,1,4,2,0.3825,0.381938,0.802917,0.180967,269,3828,4097
|
||||
378,1/13/2012,1,1,1,5,1,0.274167,0.249362,0.5075,0.378108,174,3040,3214
|
||||
379,1/14/2012,1,1,1,6,1,0.18,0.183087,0.4575,0.187183,333,2160,2493
|
||||
380,1/15/2012,1,1,1,0,1,0.166667,0.161625,0.419167,0.251258,284,2027,2311
|
||||
381,1/16/2012,1,1,1,1,1,0.19,0.190663,0.5225,0.231358,217,2081,2298
|
||||
382,1/17/2012,1,1,1,2,2,0.373043,0.364278,0.716087,0.34913,127,2808,2935
|
||||
383,1/18/2012,1,1,1,3,1,0.303333,0.275254,0.443333,0.415429,109,3267,3376
|
||||
384,1/19/2012,1,1,1,4,1,0.19,0.190038,0.4975,0.220158,130,3162,3292
|
||||
385,1/20/2012,1,1,1,5,2,0.2175,0.220958,0.45,0.20275,115,3048,3163
|
||||
386,1/21/2012,1,1,1,6,2,0.173333,0.174875,0.83125,0.222642,67,1234,1301
|
||||
387,1/22/2012,1,1,1,0,2,0.1625,0.16225,0.79625,0.199638,196,1781,1977
|
||||
388,1/23/2012,1,1,1,1,2,0.218333,0.243058,0.91125,0.110708,145,2287,2432
|
||||
389,1/24/2012,1,1,1,2,1,0.3425,0.349108,0.835833,0.123767,439,3900,4339
|
||||
390,1/25/2012,1,1,1,3,1,0.294167,0.294821,0.64375,0.161071,467,3803,4270
|
||||
391,1/26/2012,1,1,1,4,2,0.341667,0.35605,0.769583,0.0733958,244,3831,4075
|
||||
392,1/27/2012,1,1,1,5,2,0.425,0.415383,0.74125,0.342667,269,3187,3456
|
||||
393,1/28/2012,1,1,1,6,1,0.315833,0.326379,0.543333,0.210829,775,3248,4023
|
||||
394,1/29/2012,1,1,1,0,1,0.2825,0.272721,0.31125,0.24005,558,2685,3243
|
||||
395,1/30/2012,1,1,1,1,1,0.269167,0.262625,0.400833,0.215792,126,3498,3624
|
||||
396,1/31/2012,1,1,1,2,1,0.39,0.381317,0.416667,0.261817,324,4185,4509
|
||||
397,2/1/2012,1,1,2,3,1,0.469167,0.466538,0.507917,0.189067,304,4275,4579
|
||||
398,2/2/2012,1,1,2,4,2,0.399167,0.398971,0.672917,0.187187,190,3571,3761
|
||||
399,2/3/2012,1,1,2,5,1,0.313333,0.309346,0.526667,0.178496,310,3841,4151
|
||||
400,2/4/2012,1,1,2,6,2,0.264167,0.272725,0.779583,0.121896,384,2448,2832
|
||||
401,2/5/2012,1,1,2,0,2,0.265833,0.264521,0.687917,0.175996,318,2629,2947
|
||||
402,2/6/2012,1,1,2,1,1,0.282609,0.296426,0.622174,0.1538,206,3578,3784
|
||||
403,2/7/2012,1,1,2,2,1,0.354167,0.361104,0.49625,0.147379,199,4176,4375
|
||||
404,2/8/2012,1,1,2,3,2,0.256667,0.266421,0.722917,0.133721,109,2693,2802
|
||||
405,2/9/2012,1,1,2,4,1,0.265,0.261988,0.562083,0.194037,163,3667,3830
|
||||
406,2/10/2012,1,1,2,5,2,0.280833,0.293558,0.54,0.116929,227,3604,3831
|
||||
407,2/11/2012,1,1,2,6,3,0.224167,0.210867,0.73125,0.289796,192,1977,2169
|
||||
408,2/12/2012,1,1,2,0,1,0.1275,0.101658,0.464583,0.409212,73,1456,1529
|
||||
409,2/13/2012,1,1,2,1,1,0.2225,0.227913,0.41125,0.167283,94,3328,3422
|
||||
410,2/14/2012,1,1,2,2,2,0.319167,0.333946,0.50875,0.141179,135,3787,3922
|
||||
411,2/15/2012,1,1,2,3,1,0.348333,0.351629,0.53125,0.1816,141,4028,4169
|
||||
412,2/16/2012,1,1,2,4,2,0.316667,0.330162,0.752917,0.091425,74,2931,3005
|
||||
413,2/17/2012,1,1,2,5,1,0.343333,0.351629,0.634583,0.205846,349,3805,4154
|
||||
414,2/18/2012,1,1,2,6,1,0.346667,0.355425,0.534583,0.190929,1435,2883,4318
|
||||
415,2/19/2012,1,1,2,0,2,0.28,0.265788,0.515833,0.253112,618,2071,2689
|
||||
416,2/20/2012,1,1,2,1,1,0.28,0.273391,0.507826,0.229083,502,2627,3129
|
||||
417,2/21/2012,1,1,2,2,1,0.287826,0.295113,0.594348,0.205717,163,3614,3777
|
||||
418,2/22/2012,1,1,2,3,1,0.395833,0.392667,0.567917,0.234471,394,4379,4773
|
||||
419,2/23/2012,1,1,2,4,1,0.454167,0.444446,0.554583,0.190913,516,4546,5062
|
||||
420,2/24/2012,1,1,2,5,2,0.4075,0.410971,0.7375,0.237567,246,3241,3487
|
||||
421,2/25/2012,1,1,2,6,1,0.290833,0.255675,0.395833,0.421642,317,2415,2732
|
||||
422,2/26/2012,1,1,2,0,1,0.279167,0.268308,0.41,0.205229,515,2874,3389
|
||||
423,2/27/2012,1,1,2,1,1,0.366667,0.357954,0.490833,0.268033,253,4069,4322
|
||||
424,2/28/2012,1,1,2,2,1,0.359167,0.353525,0.395833,0.193417,229,4134,4363
|
||||
425,2/29/2012,1,1,2,3,2,0.344348,0.34847,0.804783,0.179117,65,1769,1834
|
||||
426,3/1/2012,1,1,3,4,1,0.485833,0.475371,0.615417,0.226987,325,4665,4990
|
||||
427,3/2/2012,1,1,3,5,2,0.353333,0.359842,0.657083,0.144904,246,2948,3194
|
||||
428,3/3/2012,1,1,3,6,2,0.414167,0.413492,0.62125,0.161079,956,3110,4066
|
||||
429,3/4/2012,1,1,3,0,1,0.325833,0.303021,0.403333,0.334571,710,2713,3423
|
||||
430,3/5/2012,1,1,3,1,1,0.243333,0.241171,0.50625,0.228858,203,3130,3333
|
||||
431,3/6/2012,1,1,3,2,1,0.258333,0.255042,0.456667,0.200875,221,3735,3956
|
||||
432,3/7/2012,1,1,3,3,1,0.404167,0.3851,0.513333,0.345779,432,4484,4916
|
||||
433,3/8/2012,1,1,3,4,1,0.5275,0.524604,0.5675,0.441563,486,4896,5382
|
||||
434,3/9/2012,1,1,3,5,2,0.410833,0.397083,0.407083,0.4148,447,4122,4569
|
||||
435,3/10/2012,1,1,3,6,1,0.2875,0.277767,0.350417,0.22575,968,3150,4118
|
||||
436,3/11/2012,1,1,3,0,1,0.361739,0.35967,0.476957,0.222587,1658,3253,4911
|
||||
437,3/12/2012,1,1,3,1,1,0.466667,0.459592,0.489167,0.207713,838,4460,5298
|
||||
438,3/13/2012,1,1,3,2,1,0.565,0.542929,0.6175,0.23695,762,5085,5847
|
||||
439,3/14/2012,1,1,3,3,1,0.5725,0.548617,0.507083,0.115062,997,5315,6312
|
||||
440,3/15/2012,1,1,3,4,1,0.5575,0.532825,0.579583,0.149883,1005,5187,6192
|
||||
441,3/16/2012,1,1,3,5,2,0.435833,0.436229,0.842083,0.113192,548,3830,4378
|
||||
442,3/17/2012,1,1,3,6,2,0.514167,0.505046,0.755833,0.110704,3155,4681,7836
|
||||
443,3/18/2012,1,1,3,0,2,0.4725,0.464,0.81,0.126883,2207,3685,5892
|
||||
444,3/19/2012,1,1,3,1,1,0.545,0.532821,0.72875,0.162317,982,5171,6153
|
||||
445,3/20/2012,1,1,3,2,1,0.560833,0.538533,0.807917,0.121271,1051,5042,6093
|
||||
446,3/21/2012,2,1,3,3,2,0.531667,0.513258,0.82125,0.0895583,1122,5108,6230
|
||||
447,3/22/2012,2,1,3,4,1,0.554167,0.531567,0.83125,0.117562,1334,5537,6871
|
||||
448,3/23/2012,2,1,3,5,2,0.601667,0.570067,0.694167,0.1163,2469,5893,8362
|
||||
449,3/24/2012,2,1,3,6,2,0.5025,0.486733,0.885417,0.192783,1033,2339,3372
|
||||
450,3/25/2012,2,1,3,0,2,0.4375,0.437488,0.880833,0.220775,1532,3464,4996
|
||||
451,3/26/2012,2,1,3,1,1,0.445833,0.43875,0.477917,0.386821,795,4763,5558
|
||||
452,3/27/2012,2,1,3,2,1,0.323333,0.315654,0.29,0.187192,531,4571,5102
|
||||
453,3/28/2012,2,1,3,3,1,0.484167,0.47095,0.48125,0.291671,674,5024,5698
|
||||
454,3/29/2012,2,1,3,4,1,0.494167,0.482304,0.439167,0.31965,834,5299,6133
|
||||
455,3/30/2012,2,1,3,5,2,0.37,0.375621,0.580833,0.138067,796,4663,5459
|
||||
456,3/31/2012,2,1,3,6,2,0.424167,0.421708,0.738333,0.250617,2301,3934,6235
|
||||
457,4/1/2012,2,1,4,0,2,0.425833,0.417287,0.67625,0.172267,2347,3694,6041
|
||||
458,4/2/2012,2,1,4,1,1,0.433913,0.427513,0.504348,0.312139,1208,4728,5936
|
||||
459,4/3/2012,2,1,4,2,1,0.466667,0.461483,0.396667,0.100133,1348,5424,6772
|
||||
460,4/4/2012,2,1,4,3,1,0.541667,0.53345,0.469583,0.180975,1058,5378,6436
|
||||
461,4/5/2012,2,1,4,4,1,0.435,0.431163,0.374167,0.219529,1192,5265,6457
|
||||
462,4/6/2012,2,1,4,5,1,0.403333,0.390767,0.377083,0.300388,1807,4653,6460
|
||||
463,4/7/2012,2,1,4,6,1,0.4375,0.426129,0.254167,0.274871,3252,3605,6857
|
||||
464,4/8/2012,2,1,4,0,1,0.5,0.492425,0.275833,0.232596,2230,2939,5169
|
||||
465,4/9/2012,2,1,4,1,1,0.489167,0.476638,0.3175,0.358196,905,4680,5585
|
||||
466,4/10/2012,2,1,4,2,1,0.446667,0.436233,0.435,0.249375,819,5099,5918
|
||||
467,4/11/2012,2,1,4,3,1,0.348696,0.337274,0.469565,0.295274,482,4380,4862
|
||||
468,4/12/2012,2,1,4,4,1,0.3975,0.387604,0.46625,0.290429,663,4746,5409
|
||||
469,4/13/2012,2,1,4,5,1,0.4425,0.431808,0.408333,0.155471,1252,5146,6398
|
||||
470,4/14/2012,2,1,4,6,1,0.495,0.487996,0.502917,0.190917,2795,4665,7460
|
||||
471,4/15/2012,2,1,4,0,1,0.606667,0.573875,0.507917,0.225129,2846,4286,7132
|
||||
472,4/16/2012,2,1,4,1,1,0.664167,0.614925,0.561667,0.284829,1198,5172,6370
|
||||
473,4/17/2012,2,1,4,2,1,0.608333,0.598487,0.390417,0.273629,989,5702,6691
|
||||
474,4/18/2012,2,1,4,3,2,0.463333,0.457038,0.569167,0.167912,347,4020,4367
|
||||
475,4/19/2012,2,1,4,4,1,0.498333,0.493046,0.6125,0.0659292,846,5719,6565
|
||||
476,4/20/2012,2,1,4,5,1,0.526667,0.515775,0.694583,0.149871,1340,5950,7290
|
||||
477,4/21/2012,2,1,4,6,1,0.57,0.542921,0.682917,0.283587,2541,4083,6624
|
||||
478,4/22/2012,2,1,4,0,3,0.396667,0.389504,0.835417,0.344546,120,907,1027
|
||||
479,4/23/2012,2,1,4,1,2,0.321667,0.301125,0.766667,0.303496,195,3019,3214
|
||||
480,4/24/2012,2,1,4,2,1,0.413333,0.405283,0.454167,0.249383,518,5115,5633
|
||||
481,4/25/2012,2,1,4,3,1,0.476667,0.470317,0.427917,0.118792,655,5541,6196
|
||||
482,4/26/2012,2,1,4,4,2,0.498333,0.483583,0.756667,0.176625,475,4551,5026
|
||||
483,4/27/2012,2,1,4,5,1,0.4575,0.452637,0.400833,0.347633,1014,5219,6233
|
||||
484,4/28/2012,2,1,4,6,2,0.376667,0.377504,0.489583,0.129975,1120,3100,4220
|
||||
485,4/29/2012,2,1,4,0,1,0.458333,0.450121,0.587083,0.116908,2229,4075,6304
|
||||
486,4/30/2012,2,1,4,1,2,0.464167,0.457696,0.57,0.171638,665,4907,5572
|
||||
487,5/1/2012,2,1,5,2,2,0.613333,0.577021,0.659583,0.156096,653,5087,5740
|
||||
488,5/2/2012,2,1,5,3,1,0.564167,0.537896,0.797083,0.138058,667,5502,6169
|
||||
489,5/3/2012,2,1,5,4,2,0.56,0.537242,0.768333,0.133696,764,5657,6421
|
||||
490,5/4/2012,2,1,5,5,1,0.6275,0.590917,0.735417,0.162938,1069,5227,6296
|
||||
491,5/5/2012,2,1,5,6,2,0.621667,0.584608,0.756667,0.152992,2496,4387,6883
|
||||
492,5/6/2012,2,1,5,0,2,0.5625,0.546737,0.74,0.149879,2135,4224,6359
|
||||
493,5/7/2012,2,1,5,1,2,0.5375,0.527142,0.664167,0.230721,1008,5265,6273
|
||||
494,5/8/2012,2,1,5,2,2,0.581667,0.557471,0.685833,0.296029,738,4990,5728
|
||||
495,5/9/2012,2,1,5,3,2,0.575,0.553025,0.744167,0.216412,620,4097,4717
|
||||
496,5/10/2012,2,1,5,4,1,0.505833,0.491783,0.552083,0.314063,1026,5546,6572
|
||||
497,5/11/2012,2,1,5,5,1,0.533333,0.520833,0.360417,0.236937,1319,5711,7030
|
||||
498,5/12/2012,2,1,5,6,1,0.564167,0.544817,0.480417,0.123133,2622,4807,7429
|
||||
499,5/13/2012,2,1,5,0,1,0.6125,0.585238,0.57625,0.225117,2172,3946,6118
|
||||
500,5/14/2012,2,1,5,1,2,0.573333,0.5499,0.789583,0.212692,342,2501,2843
|
||||
501,5/15/2012,2,1,5,2,2,0.611667,0.576404,0.794583,0.147392,625,4490,5115
|
||||
502,5/16/2012,2,1,5,3,1,0.636667,0.595975,0.697917,0.122512,991,6433,7424
|
||||
503,5/17/2012,2,1,5,4,1,0.593333,0.572613,0.52,0.229475,1242,6142,7384
|
||||
504,5/18/2012,2,1,5,5,1,0.564167,0.551121,0.523333,0.136817,1521,6118,7639
|
||||
505,5/19/2012,2,1,5,6,1,0.6,0.566908,0.45625,0.083975,3410,4884,8294
|
||||
506,5/20/2012,2,1,5,0,1,0.620833,0.583967,0.530417,0.254367,2704,4425,7129
|
||||
507,5/21/2012,2,1,5,1,2,0.598333,0.565667,0.81125,0.233204,630,3729,4359
|
||||
508,5/22/2012,2,1,5,2,2,0.615,0.580825,0.765833,0.118167,819,5254,6073
|
||||
509,5/23/2012,2,1,5,3,2,0.621667,0.584612,0.774583,0.102,766,4494,5260
|
||||
510,5/24/2012,2,1,5,4,1,0.655,0.6067,0.716667,0.172896,1059,5711,6770
|
||||
511,5/25/2012,2,1,5,5,1,0.68,0.627529,0.747083,0.14055,1417,5317,6734
|
||||
512,5/26/2012,2,1,5,6,1,0.6925,0.642696,0.7325,0.198992,2855,3681,6536
|
||||
513,5/27/2012,2,1,5,0,1,0.69,0.641425,0.697083,0.215171,3283,3308,6591
|
||||
514,5/28/2012,2,1,5,1,1,0.7125,0.6793,0.67625,0.196521,2557,3486,6043
|
||||
515,5/29/2012,2,1,5,2,1,0.7225,0.672992,0.684583,0.2954,880,4863,5743
|
||||
516,5/30/2012,2,1,5,3,2,0.656667,0.611129,0.67,0.134329,745,6110,6855
|
||||
517,5/31/2012,2,1,5,4,1,0.68,0.631329,0.492917,0.195279,1100,6238,7338
|
||||
518,6/1/2012,2,1,6,5,2,0.654167,0.607962,0.755417,0.237563,533,3594,4127
|
||||
519,6/2/2012,2,1,6,6,1,0.583333,0.566288,0.549167,0.186562,2795,5325,8120
|
||||
520,6/3/2012,2,1,6,0,1,0.6025,0.575133,0.493333,0.184087,2494,5147,7641
|
||||
521,6/4/2012,2,1,6,1,1,0.5975,0.578283,0.487083,0.284833,1071,5927,6998
|
||||
522,6/5/2012,2,1,6,2,2,0.540833,0.525892,0.613333,0.209575,968,6033,7001
|
||||
523,6/6/2012,2,1,6,3,1,0.554167,0.542292,0.61125,0.077125,1027,6028,7055
|
||||
524,6/7/2012,2,1,6,4,1,0.6025,0.569442,0.567083,0.15735,1038,6456,7494
|
||||
525,6/8/2012,2,1,6,5,1,0.649167,0.597862,0.467917,0.175383,1488,6248,7736
|
||||
526,6/9/2012,2,1,6,6,1,0.710833,0.648367,0.437083,0.144287,2708,4790,7498
|
||||
527,6/10/2012,2,1,6,0,1,0.726667,0.663517,0.538333,0.133721,2224,4374,6598
|
||||
528,6/11/2012,2,1,6,1,2,0.720833,0.659721,0.587917,0.207713,1017,5647,6664
|
||||
529,6/12/2012,2,1,6,2,2,0.653333,0.597875,0.833333,0.214546,477,4495,4972
|
||||
530,6/13/2012,2,1,6,3,1,0.655833,0.611117,0.582083,0.343279,1173,6248,7421
|
||||
531,6/14/2012,2,1,6,4,1,0.648333,0.624383,0.569583,0.253733,1180,6183,7363
|
||||
532,6/15/2012,2,1,6,5,1,0.639167,0.599754,0.589583,0.176617,1563,6102,7665
|
||||
533,6/16/2012,2,1,6,6,1,0.631667,0.594708,0.504167,0.166667,2963,4739,7702
|
||||
534,6/17/2012,2,1,6,0,1,0.5925,0.571975,0.59875,0.144904,2634,4344,6978
|
||||
535,6/18/2012,2,1,6,1,2,0.568333,0.544842,0.777917,0.174746,653,4446,5099
|
||||
536,6/19/2012,2,1,6,2,1,0.688333,0.654692,0.69,0.148017,968,5857,6825
|
||||
537,6/20/2012,2,1,6,3,1,0.7825,0.720975,0.592083,0.113812,872,5339,6211
|
||||
538,6/21/2012,3,1,6,4,1,0.805833,0.752542,0.567917,0.118787,778,5127,5905
|
||||
539,6/22/2012,3,1,6,5,1,0.7775,0.724121,0.57375,0.182842,964,4859,5823
|
||||
540,6/23/2012,3,1,6,6,1,0.731667,0.652792,0.534583,0.179721,2657,4801,7458
|
||||
541,6/24/2012,3,1,6,0,1,0.743333,0.674254,0.479167,0.145525,2551,4340,6891
|
||||
542,6/25/2012,3,1,6,1,1,0.715833,0.654042,0.504167,0.300383,1139,5640,6779
|
||||
543,6/26/2012,3,1,6,2,1,0.630833,0.594704,0.373333,0.347642,1077,6365,7442
|
||||
544,6/27/2012,3,1,6,3,1,0.6975,0.640792,0.36,0.271775,1077,6258,7335
|
||||
545,6/28/2012,3,1,6,4,1,0.749167,0.675512,0.4225,0.17165,921,5958,6879
|
||||
546,6/29/2012,3,1,6,5,1,0.834167,0.786613,0.48875,0.165417,829,4634,5463
|
||||
547,6/30/2012,3,1,6,6,1,0.765,0.687508,0.60125,0.161071,1455,4232,5687
|
||||
548,7/1/2012,3,1,7,0,1,0.815833,0.750629,0.51875,0.168529,1421,4110,5531
|
||||
549,7/2/2012,3,1,7,1,1,0.781667,0.702038,0.447083,0.195267,904,5323,6227
|
||||
550,7/3/2012,3,1,7,2,1,0.780833,0.70265,0.492083,0.126237,1052,5608,6660
|
||||
551,7/4/2012,3,1,7,3,1,0.789167,0.732337,0.53875,0.13495,2562,4841,7403
|
||||
552,7/5/2012,3,1,7,4,1,0.8275,0.761367,0.457917,0.194029,1405,4836,6241
|
||||
553,7/6/2012,3,1,7,5,1,0.828333,0.752533,0.450833,0.146142,1366,4841,6207
|
||||
554,7/7/2012,3,1,7,6,1,0.861667,0.804913,0.492083,0.163554,1448,3392,4840
|
||||
555,7/8/2012,3,1,7,0,1,0.8225,0.790396,0.57375,0.125629,1203,3469,4672
|
||||
556,7/9/2012,3,1,7,1,2,0.710833,0.654054,0.683333,0.180975,998,5571,6569
|
||||
557,7/10/2012,3,1,7,2,2,0.720833,0.664796,0.6675,0.151737,954,5336,6290
|
||||
558,7/11/2012,3,1,7,3,1,0.716667,0.650271,0.633333,0.151733,975,6289,7264
|
||||
559,7/12/2012,3,1,7,4,1,0.715833,0.654683,0.529583,0.146775,1032,6414,7446
|
||||
560,7/13/2012,3,1,7,5,2,0.731667,0.667933,0.485833,0.08085,1511,5988,7499
|
||||
561,7/14/2012,3,1,7,6,2,0.703333,0.666042,0.699167,0.143679,2355,4614,6969
|
||||
562,7/15/2012,3,1,7,0,1,0.745833,0.705196,0.717917,0.166667,1920,4111,6031
|
||||
563,7/16/2012,3,1,7,1,1,0.763333,0.724125,0.645,0.164187,1088,5742,6830
|
||||
564,7/17/2012,3,1,7,2,1,0.818333,0.755683,0.505833,0.114429,921,5865,6786
|
||||
565,7/18/2012,3,1,7,3,1,0.793333,0.745583,0.577083,0.137442,799,4914,5713
|
||||
566,7/19/2012,3,1,7,4,1,0.77,0.714642,0.600417,0.165429,888,5703,6591
|
||||
567,7/20/2012,3,1,7,5,2,0.665833,0.613025,0.844167,0.208967,747,5123,5870
|
||||
568,7/21/2012,3,1,7,6,3,0.595833,0.549912,0.865417,0.2133,1264,3195,4459
|
||||
569,7/22/2012,3,1,7,0,2,0.6675,0.623125,0.7625,0.0939208,2544,4866,7410
|
||||
570,7/23/2012,3,1,7,1,1,0.741667,0.690017,0.694167,0.138683,1135,5831,6966
|
||||
571,7/24/2012,3,1,7,2,1,0.750833,0.70645,0.655,0.211454,1140,6452,7592
|
||||
572,7/25/2012,3,1,7,3,1,0.724167,0.654054,0.45,0.1648,1383,6790,8173
|
||||
573,7/26/2012,3,1,7,4,1,0.776667,0.739263,0.596667,0.284813,1036,5825,6861
|
||||
574,7/27/2012,3,1,7,5,1,0.781667,0.734217,0.594583,0.152992,1259,5645,6904
|
||||
575,7/28/2012,3,1,7,6,1,0.755833,0.697604,0.613333,0.15735,2234,4451,6685
|
||||
576,7/29/2012,3,1,7,0,1,0.721667,0.667933,0.62375,0.170396,2153,4444,6597
|
||||
577,7/30/2012,3,1,7,1,1,0.730833,0.684987,0.66875,0.153617,1040,6065,7105
|
||||
578,7/31/2012,3,1,7,2,1,0.713333,0.662896,0.704167,0.165425,968,6248,7216
|
||||
579,8/1/2012,3,1,8,3,1,0.7175,0.667308,0.6775,0.141179,1074,6506,7580
|
||||
580,8/2/2012,3,1,8,4,1,0.7525,0.707088,0.659583,0.129354,983,6278,7261
|
||||
581,8/3/2012,3,1,8,5,2,0.765833,0.722867,0.6425,0.215792,1328,5847,7175
|
||||
582,8/4/2012,3,1,8,6,1,0.793333,0.751267,0.613333,0.257458,2345,4479,6824
|
||||
583,8/5/2012,3,1,8,0,1,0.769167,0.731079,0.6525,0.290421,1707,3757,5464
|
||||
584,8/6/2012,3,1,8,1,2,0.7525,0.710246,0.654167,0.129354,1233,5780,7013
|
||||
585,8/7/2012,3,1,8,2,2,0.735833,0.697621,0.70375,0.116908,1278,5995,7273
|
||||
586,8/8/2012,3,1,8,3,2,0.75,0.707717,0.672917,0.1107,1263,6271,7534
|
||||
587,8/9/2012,3,1,8,4,1,0.755833,0.699508,0.620417,0.1561,1196,6090,7286
|
||||
588,8/10/2012,3,1,8,5,2,0.715833,0.667942,0.715833,0.238813,1065,4721,5786
|
||||
589,8/11/2012,3,1,8,6,2,0.6925,0.638267,0.732917,0.206479,2247,4052,6299
|
||||
590,8/12/2012,3,1,8,0,1,0.700833,0.644579,0.530417,0.122512,2182,4362,6544
|
||||
591,8/13/2012,3,1,8,1,1,0.720833,0.662254,0.545417,0.136212,1207,5676,6883
|
||||
592,8/14/2012,3,1,8,2,1,0.726667,0.676779,0.686667,0.169158,1128,5656,6784
|
||||
593,8/15/2012,3,1,8,3,1,0.706667,0.654037,0.619583,0.169771,1198,6149,7347
|
||||
594,8/16/2012,3,1,8,4,1,0.719167,0.654688,0.519167,0.141796,1338,6267,7605
|
||||
595,8/17/2012,3,1,8,5,1,0.723333,0.2424,0.570833,0.231354,1483,5665,7148
|
||||
596,8/18/2012,3,1,8,6,1,0.678333,0.618071,0.603333,0.177867,2827,5038,7865
|
||||
597,8/19/2012,3,1,8,0,2,0.635833,0.603554,0.711667,0.08645,1208,3341,4549
|
||||
598,8/20/2012,3,1,8,1,2,0.635833,0.595967,0.734167,0.129979,1026,5504,6530
|
||||
599,8/21/2012,3,1,8,2,1,0.649167,0.601025,0.67375,0.0727708,1081,5925,7006
|
||||
600,8/22/2012,3,1,8,3,1,0.6675,0.621854,0.677083,0.0702833,1094,6281,7375
|
||||
601,8/23/2012,3,1,8,4,1,0.695833,0.637008,0.635833,0.0845958,1363,6402,7765
|
||||
602,8/24/2012,3,1,8,5,2,0.7025,0.6471,0.615,0.0721458,1325,6257,7582
|
||||
603,8/25/2012,3,1,8,6,2,0.661667,0.618696,0.712917,0.244408,1829,4224,6053
|
||||
604,8/26/2012,3,1,8,0,2,0.653333,0.595996,0.845833,0.228858,1483,3772,5255
|
||||
605,8/27/2012,3,1,8,1,1,0.703333,0.654688,0.730417,0.128733,989,5928,6917
|
||||
606,8/28/2012,3,1,8,2,1,0.728333,0.66605,0.62,0.190925,935,6105,7040
|
||||
607,8/29/2012,3,1,8,3,1,0.685,0.635733,0.552083,0.112562,1177,6520,7697
|
||||
608,8/30/2012,3,1,8,4,1,0.706667,0.652779,0.590417,0.0771167,1172,6541,7713
|
||||
609,8/31/2012,3,1,8,5,1,0.764167,0.6894,0.5875,0.168533,1433,5917,7350
|
||||
610,9/1/2012,3,1,9,6,2,0.753333,0.702654,0.638333,0.113187,2352,3788,6140
|
||||
611,9/2/2012,3,1,9,0,2,0.696667,0.649,0.815,0.0640708,2613,3197,5810
|
||||
612,9/3/2012,3,1,9,1,1,0.7075,0.661629,0.790833,0.151121,1965,4069,6034
|
||||
613,9/4/2012,3,1,9,2,1,0.725833,0.686888,0.755,0.236321,867,5997,6864
|
||||
614,9/5/2012,3,1,9,3,1,0.736667,0.708983,0.74125,0.187808,832,6280,7112
|
||||
615,9/6/2012,3,1,9,4,2,0.696667,0.655329,0.810417,0.142421,611,5592,6203
|
||||
616,9/7/2012,3,1,9,5,1,0.703333,0.657204,0.73625,0.171646,1045,6459,7504
|
||||
617,9/8/2012,3,1,9,6,2,0.659167,0.611121,0.799167,0.281104,1557,4419,5976
|
||||
618,9/9/2012,3,1,9,0,1,0.61,0.578925,0.5475,0.224496,2570,5657,8227
|
||||
619,9/10/2012,3,1,9,1,1,0.583333,0.565654,0.50375,0.258713,1118,6407,7525
|
||||
620,9/11/2012,3,1,9,2,1,0.5775,0.554292,0.52,0.0920542,1070,6697,7767
|
||||
621,9/12/2012,3,1,9,3,1,0.599167,0.570075,0.577083,0.131846,1050,6820,7870
|
||||
622,9/13/2012,3,1,9,4,1,0.6125,0.579558,0.637083,0.0827208,1054,6750,7804
|
||||
623,9/14/2012,3,1,9,5,1,0.633333,0.594083,0.6725,0.103863,1379,6630,8009
|
||||
624,9/15/2012,3,1,9,6,1,0.608333,0.585867,0.501667,0.247521,3160,5554,8714
|
||||
625,9/16/2012,3,1,9,0,1,0.58,0.563125,0.57,0.0901833,2166,5167,7333
|
||||
626,9/17/2012,3,1,9,1,2,0.580833,0.55305,0.734583,0.151742,1022,5847,6869
|
||||
627,9/18/2012,3,1,9,2,2,0.623333,0.565067,0.8725,0.357587,371,3702,4073
|
||||
628,9/19/2012,3,1,9,3,1,0.5525,0.540404,0.536667,0.215175,788,6803,7591
|
||||
629,9/20/2012,3,1,9,4,1,0.546667,0.532192,0.618333,0.118167,939,6781,7720
|
||||
630,9/21/2012,3,1,9,5,1,0.599167,0.571971,0.66875,0.154229,1250,6917,8167
|
||||
631,9/22/2012,3,1,9,6,1,0.65,0.610488,0.646667,0.283583,2512,5883,8395
|
||||
632,9/23/2012,4,1,9,0,1,0.529167,0.518933,0.467083,0.223258,2454,5453,7907
|
||||
633,9/24/2012,4,1,9,1,1,0.514167,0.502513,0.492917,0.142404,1001,6435,7436
|
||||
634,9/25/2012,4,1,9,2,1,0.55,0.544179,0.57,0.236321,845,6693,7538
|
||||
635,9/26/2012,4,1,9,3,1,0.635,0.596613,0.630833,0.2444,787,6946,7733
|
||||
636,9/27/2012,4,1,9,4,2,0.65,0.607975,0.690833,0.134342,751,6642,7393
|
||||
637,9/28/2012,4,1,9,5,2,0.619167,0.585863,0.69,0.164179,1045,6370,7415
|
||||
638,9/29/2012,4,1,9,6,1,0.5425,0.530296,0.542917,0.227604,2589,5966,8555
|
||||
639,9/30/2012,4,1,9,0,1,0.526667,0.517663,0.583333,0.134958,2015,4874,6889
|
||||
640,10/1/2012,4,1,10,1,2,0.520833,0.512,0.649167,0.0908042,763,6015,6778
|
||||
641,10/2/2012,4,1,10,2,3,0.590833,0.542333,0.871667,0.104475,315,4324,4639
|
||||
642,10/3/2012,4,1,10,3,2,0.6575,0.599133,0.79375,0.0665458,728,6844,7572
|
||||
643,10/4/2012,4,1,10,4,2,0.6575,0.607975,0.722917,0.117546,891,6437,7328
|
||||
644,10/5/2012,4,1,10,5,1,0.615,0.580187,0.6275,0.10635,1516,6640,8156
|
||||
645,10/6/2012,4,1,10,6,1,0.554167,0.538521,0.664167,0.268025,3031,4934,7965
|
||||
646,10/7/2012,4,1,10,0,2,0.415833,0.419813,0.708333,0.141162,781,2729,3510
|
||||
647,10/8/2012,4,1,10,1,2,0.383333,0.387608,0.709583,0.189679,874,4604,5478
|
||||
648,10/9/2012,4,1,10,2,2,0.446667,0.438112,0.761667,0.1903,601,5791,6392
|
||||
649,10/10/2012,4,1,10,3,1,0.514167,0.503142,0.630833,0.187821,780,6911,7691
|
||||
650,10/11/2012,4,1,10,4,1,0.435,0.431167,0.463333,0.181596,834,6736,7570
|
||||
651,10/12/2012,4,1,10,5,1,0.4375,0.433071,0.539167,0.235092,1060,6222,7282
|
||||
652,10/13/2012,4,1,10,6,1,0.393333,0.391396,0.494583,0.146142,2252,4857,7109
|
||||
653,10/14/2012,4,1,10,0,1,0.521667,0.508204,0.640417,0.278612,2080,4559,6639
|
||||
654,10/15/2012,4,1,10,1,2,0.561667,0.53915,0.7075,0.296037,760,5115,5875
|
||||
655,10/16/2012,4,1,10,2,1,0.468333,0.460846,0.558333,0.182221,922,6612,7534
|
||||
656,10/17/2012,4,1,10,3,1,0.455833,0.450108,0.692917,0.101371,979,6482,7461
|
||||
657,10/18/2012,4,1,10,4,2,0.5225,0.512625,0.728333,0.236937,1008,6501,7509
|
||||
658,10/19/2012,4,1,10,5,2,0.563333,0.537896,0.815,0.134954,753,4671,5424
|
||||
659,10/20/2012,4,1,10,6,1,0.484167,0.472842,0.572917,0.117537,2806,5284,8090
|
||||
660,10/21/2012,4,1,10,0,1,0.464167,0.456429,0.51,0.166054,2132,4692,6824
|
||||
661,10/22/2012,4,1,10,1,1,0.4875,0.482942,0.568333,0.0814833,830,6228,7058
|
||||
662,10/23/2012,4,1,10,2,1,0.544167,0.530304,0.641667,0.0945458,841,6625,7466
|
||||
663,10/24/2012,4,1,10,3,1,0.5875,0.558721,0.63625,0.0727792,795,6898,7693
|
||||
664,10/25/2012,4,1,10,4,2,0.55,0.529688,0.800417,0.124375,875,6484,7359
|
||||
665,10/26/2012,4,1,10,5,2,0.545833,0.52275,0.807083,0.132467,1182,6262,7444
|
||||
666,10/27/2012,4,1,10,6,2,0.53,0.515133,0.72,0.235692,2643,5209,7852
|
||||
667,10/28/2012,4,1,10,0,2,0.4775,0.467771,0.694583,0.398008,998,3461,4459
|
||||
668,10/29/2012,4,1,10,1,3,0.44,0.4394,0.88,0.3582,2,20,22
|
||||
669,10/30/2012,4,1,10,2,2,0.318182,0.309909,0.825455,0.213009,87,1009,1096
|
||||
670,10/31/2012,4,1,10,3,2,0.3575,0.3611,0.666667,0.166667,419,5147,5566
|
||||
671,11/1/2012,4,1,11,4,2,0.365833,0.369942,0.581667,0.157346,466,5520,5986
|
||||
672,11/2/2012,4,1,11,5,1,0.355,0.356042,0.522083,0.266175,618,5229,5847
|
||||
673,11/3/2012,4,1,11,6,2,0.343333,0.323846,0.49125,0.270529,1029,4109,5138
|
||||
674,11/4/2012,4,1,11,0,1,0.325833,0.329538,0.532917,0.179108,1201,3906,5107
|
||||
675,11/5/2012,4,1,11,1,1,0.319167,0.308075,0.494167,0.236325,378,4881,5259
|
||||
676,11/6/2012,4,1,11,2,1,0.280833,0.281567,0.567083,0.173513,466,5220,5686
|
||||
677,11/7/2012,4,1,11,3,2,0.295833,0.274621,0.5475,0.304108,326,4709,5035
|
||||
678,11/8/2012,4,1,11,4,1,0.352174,0.341891,0.333478,0.347835,340,4975,5315
|
||||
679,11/9/2012,4,1,11,5,1,0.361667,0.355413,0.540833,0.214558,709,5283,5992
|
||||
680,11/10/2012,4,1,11,6,1,0.389167,0.393937,0.645417,0.0578458,2090,4446,6536
|
||||
681,11/11/2012,4,1,11,0,1,0.420833,0.421713,0.659167,0.1275,2290,4562,6852
|
||||
682,11/12/2012,4,1,11,1,1,0.485,0.475383,0.741667,0.173517,1097,5172,6269
|
||||
683,11/13/2012,4,1,11,2,2,0.343333,0.323225,0.662917,0.342046,327,3767,4094
|
||||
684,11/14/2012,4,1,11,3,1,0.289167,0.281563,0.552083,0.199625,373,5122,5495
|
||||
685,11/15/2012,4,1,11,4,2,0.321667,0.324492,0.620417,0.152987,320,5125,5445
|
||||
686,11/16/2012,4,1,11,5,1,0.345,0.347204,0.524583,0.171025,484,5214,5698
|
||||
687,11/17/2012,4,1,11,6,1,0.325,0.326383,0.545417,0.179729,1313,4316,5629
|
||||
688,11/18/2012,4,1,11,0,1,0.3425,0.337746,0.692917,0.227612,922,3747,4669
|
||||
689,11/19/2012,4,1,11,1,2,0.380833,0.375621,0.623333,0.235067,449,5050,5499
|
||||
690,11/20/2012,4,1,11,2,2,0.374167,0.380667,0.685,0.082725,534,5100,5634
|
||||
691,11/21/2012,4,1,11,3,1,0.353333,0.364892,0.61375,0.103246,615,4531,5146
|
||||
692,11/22/2012,4,1,11,4,1,0.34,0.350371,0.580417,0.0528708,955,1470,2425
|
||||
693,11/23/2012,4,1,11,5,1,0.368333,0.378779,0.56875,0.148021,1603,2307,3910
|
||||
694,11/24/2012,4,1,11,6,1,0.278333,0.248742,0.404583,0.376871,532,1745,2277
|
||||
695,11/25/2012,4,1,11,0,1,0.245833,0.257583,0.468333,0.1505,309,2115,2424
|
||||
696,11/26/2012,4,1,11,1,1,0.313333,0.339004,0.535417,0.04665,337,4750,5087
|
||||
697,11/27/2012,4,1,11,2,2,0.291667,0.281558,0.786667,0.237562,123,3836,3959
|
||||
698,11/28/2012,4,1,11,3,1,0.296667,0.289762,0.50625,0.210821,198,5062,5260
|
||||
699,11/29/2012,4,1,11,4,1,0.28087,0.298422,0.555652,0.115522,243,5080,5323
|
||||
700,11/30/2012,4,1,11,5,1,0.298333,0.323867,0.649583,0.0584708,362,5306,5668
|
||||
701,12/1/2012,4,1,12,6,2,0.298333,0.316904,0.806667,0.0597042,951,4240,5191
|
||||
702,12/2/2012,4,1,12,0,2,0.3475,0.359208,0.823333,0.124379,892,3757,4649
|
||||
703,12/3/2012,4,1,12,1,1,0.4525,0.455796,0.7675,0.0827208,555,5679,6234
|
||||
704,12/4/2012,4,1,12,2,1,0.475833,0.469054,0.73375,0.174129,551,6055,6606
|
||||
705,12/5/2012,4,1,12,3,1,0.438333,0.428012,0.485,0.324021,331,5398,5729
|
||||
706,12/6/2012,4,1,12,4,1,0.255833,0.258204,0.50875,0.174754,340,5035,5375
|
||||
707,12/7/2012,4,1,12,5,2,0.320833,0.321958,0.764167,0.1306,349,4659,5008
|
||||
708,12/8/2012,4,1,12,6,2,0.381667,0.389508,0.91125,0.101379,1153,4429,5582
|
||||
709,12/9/2012,4,1,12,0,2,0.384167,0.390146,0.905417,0.157975,441,2787,3228
|
||||
710,12/10/2012,4,1,12,1,2,0.435833,0.435575,0.925,0.190308,329,4841,5170
|
||||
711,12/11/2012,4,1,12,2,2,0.353333,0.338363,0.596667,0.296037,282,5219,5501
|
||||
712,12/12/2012,4,1,12,3,2,0.2975,0.297338,0.538333,0.162937,310,5009,5319
|
||||
713,12/13/2012,4,1,12,4,1,0.295833,0.294188,0.485833,0.174129,425,5107,5532
|
||||
714,12/14/2012,4,1,12,5,1,0.281667,0.294192,0.642917,0.131229,429,5182,5611
|
||||
715,12/15/2012,4,1,12,6,1,0.324167,0.338383,0.650417,0.10635,767,4280,5047
|
||||
716,12/16/2012,4,1,12,0,2,0.3625,0.369938,0.83875,0.100742,538,3248,3786
|
||||
717,12/17/2012,4,1,12,1,2,0.393333,0.4015,0.907083,0.0982583,212,4373,4585
|
||||
718,12/18/2012,4,1,12,2,1,0.410833,0.409708,0.66625,0.221404,433,5124,5557
|
||||
719,12/19/2012,4,1,12,3,1,0.3325,0.342162,0.625417,0.184092,333,4934,5267
|
||||
720,12/20/2012,4,1,12,4,2,0.33,0.335217,0.667917,0.132463,314,3814,4128
|
||||
721,12/21/2012,1,1,12,5,2,0.326667,0.301767,0.556667,0.374383,221,3402,3623
|
||||
722,12/22/2012,1,1,12,6,1,0.265833,0.236113,0.44125,0.407346,205,1544,1749
|
||||
723,12/23/2012,1,1,12,0,1,0.245833,0.259471,0.515417,0.133083,408,1379,1787
|
||||
724,12/24/2012,1,1,12,1,2,0.231304,0.2589,0.791304,0.0772304,174,746,920
|
||||
725,12/25/2012,1,1,12,2,2,0.291304,0.294465,0.734783,0.168726,440,573,1013
|
||||
726,12/26/2012,1,1,12,3,3,0.243333,0.220333,0.823333,0.316546,9,432,441
|
||||
727,12/27/2012,1,1,12,4,2,0.254167,0.226642,0.652917,0.350133,247,1867,2114
|
||||
728,12/28/2012,1,1,12,5,2,0.253333,0.255046,0.59,0.155471,644,2451,3095
|
||||
729,12/29/2012,1,1,12,6,2,0.253333,0.2424,0.752917,0.124383,159,1182,1341
|
||||
730,12/30/2012,1,1,12,0,1,0.255833,0.2317,0.483333,0.350754,364,1432,1796
|
||||
731,12/31/2012,1,1,12,1,2,0.215833,0.223487,0.5775,0.154846,439,2290,2729
|
||||
|
@@ -9,6 +9,13 @@
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -37,7 +44,8 @@
|
||||
"2. Instantiating AutoMLConfig with new task type \"forecasting\" for timeseries data training, and other timeseries related settings: for this dataset we use the basic one: \"time_column_name\" \n",
|
||||
"3. Training the Model using local compute\n",
|
||||
"4. Exploring the results\n",
|
||||
"5. Testing the fitted model"
|
||||
"5. Viewing the engineered names for featurized data and featurization summary for all raw features\n",
|
||||
"6. Testing the fitted model"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -47,30 +55,6 @@
|
||||
"## Setup\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To use the *forecasting* task in AutoML, you need to have the **azuremlftk** package installed in your environment. The following cell tests whether this package is installed locally and, if not, gives you instructions for installing it. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"try:\n",
|
||||
" import ftk\n",
|
||||
" print('Using FTK version ' + ftk.__version__)\n",
|
||||
"except ImportError:\n",
|
||||
" print(\"Unable to import forecasting package. This notebook does not work without this package.\\n\"\n",
|
||||
" + \"Please open a command prompt and run `pip install azuremlftk` to install the package. \\n\"\n",
|
||||
" + \"Make sure you install the package into AutoML's Python environment.\\n\\n\"\n",
|
||||
" + \"For instance, if AutoML is installed in a conda environment called `python36`, run:\\n\"\n",
|
||||
" + \"> activate python36\\n> pip install azuremlftk\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -146,12 +130,22 @@
|
||||
"data.head()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# let's take note of what columns means what in the data\n",
|
||||
"time_column_name = 'timeStamp'\n",
|
||||
"target_column_name = 'demand'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Split the data to train and test\n",
|
||||
"\n"
|
||||
"### Split the data into train and test sets\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -160,50 +154,10 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"train = data[data['timeStamp'] < '2017-02-01']\n",
|
||||
"test = data[data['timeStamp'] >= '2017-02-01']\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Prepare the test data, we will feed X_test to the fitted model and get prediction"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"y_test = test.pop('demand').values\n",
|
||||
"X_test = test"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Split the train data to train and valid\n",
|
||||
"\n",
|
||||
"Use one month's data as valid data\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"X_train = train[train['timeStamp'] < '2017-01-01']\n",
|
||||
"X_valid = train[train['timeStamp'] >= '2017-01-01']\n",
|
||||
"y_train = X_train.pop('demand').values\n",
|
||||
"y_valid = X_valid.pop('demand').values\n",
|
||||
"print(X_train.shape)\n",
|
||||
"print(y_train.shape)\n",
|
||||
"print(X_valid.shape)\n",
|
||||
"print(y_valid.shape)"
|
||||
"X_train = data[data[time_column_name] < '2017-02-01']\n",
|
||||
"X_test = data[data[time_column_name] >= '2017-02-01']\n",
|
||||
"y_train = X_train.pop(target_column_name).values\n",
|
||||
"y_test = X_test.pop(target_column_name).values"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -221,9 +175,8 @@
|
||||
"|**iterations**|Number of iterations. In each iteration, Auto ML trains a specific pipeline on the given data|\n",
|
||||
"|**iteration_timeout_minutes**|Time limit in minutes for each iteration.|\n",
|
||||
"|**X**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
||||
"|**y**|(sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]<br>Multi-class targets. An indicator matrix turns on multilabel classification. This should be an array of integers. |\n",
|
||||
"|**X_valid**|Data used to evaluate a model in a iteration. (sparse) array-like, shape = [n_samples, n_features]|\n",
|
||||
"|**y_valid**|Data used to evaluate a model in a iteration. (sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]<br>Multi-class targets. An indicator matrix turns on multilabel classification. This should be an array of integers. |\n",
|
||||
"|**y**|(sparse) array-like, shape = [n_samples, ], targets values.|\n",
|
||||
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
||||
"|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder. "
|
||||
]
|
||||
},
|
||||
@@ -233,9 +186,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"time_column_name = 'timeStamp'\n",
|
||||
"automl_settings = {\n",
|
||||
" \"time_column_name\": time_column_name,\n",
|
||||
" \"time_column_name\": time_column_name \n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
@@ -246,8 +198,7 @@
|
||||
" iteration_timeout_minutes = 5,\n",
|
||||
" X = X_train,\n",
|
||||
" y = y_train,\n",
|
||||
" X_valid = X_valid,\n",
|
||||
" y_valid = y_valid,\n",
|
||||
" n_cross_validations = 3,\n",
|
||||
" path=project_folder,\n",
|
||||
" verbosity = logging.INFO,\n",
|
||||
" **automl_settings)"
|
||||
@@ -257,7 +208,8 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can call the submit method on the experiment object and pass the run configuration. For Local runs the execution is synchronous. Depending on the data and number of iterations this can run for while.\n",
|
||||
"Submitting the configuration will start a new run in this experiment. For local runs, the execution is synchronous. Depending on the data and number of iterations, this can run for a while. Parameters controlling concurrency may speed up the process, depending on your hardware.\n",
|
||||
"\n",
|
||||
"You will see the currently running iterations printing to the console."
|
||||
]
|
||||
},
|
||||
@@ -297,13 +249,34 @@
|
||||
"fitted_model.steps"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### View the engineered names for featurized data\n",
|
||||
"Below we display the engineered feature names generated for the featurized data using the time-series featurization."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"fitted_model.named_steps['timeseriestransformer'].get_engineered_feature_names()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Test the Best Fitted Model\n",
|
||||
"\n",
|
||||
"Predict on training and test set, and calculate residual values."
|
||||
"For forecasting, we will use the `forecast` function instead of the `predict` function. There are two reasons for this.\n",
|
||||
"\n",
|
||||
"We need to pass the recent values of the target variable `y`, whereas the scikit-compatible `predict` function only takes the non-target variables `X`. In our case, the test data immediately follows the training data, and we fill the `y` variable with `NaN`. The `NaN` serves as a question mark for the forecaster to fill with the actuals. Using the forecast function will produce forecasts using the shortest possible forecast horizon. The last time at which a definite (non-NaN) value is seen is the _forecast origin_ - the last time when the value of the target is known. \n",
|
||||
"\n",
|
||||
"Using the `predict` method would result in getting predictions for EVERY horizon the forecaster can predict at. This is useful when training and evaluating the performance of the forecaster at various horizons, but the level of detail is excessive for normal use."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -312,15 +285,64 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"y_pred = fitted_model.predict(X_test)\n",
|
||||
"y_pred"
|
||||
"# Replace ALL values in y_pred by NaN. \n",
|
||||
"# The forecast origin will be at the beginning of the first forecast period\n",
|
||||
"# (which is the same time as the end of the last training period).\n",
|
||||
"y_query = y_test.copy().astype(np.float)\n",
|
||||
"y_query.fill(np.nan)\n",
|
||||
"# The featurized data, aligned to y, will also be returned.\n",
|
||||
"# This contains the assumptions that were made in the forecast\n",
|
||||
"# and helps align the forecast to the original data\n",
|
||||
"y_fcst, X_trans = fitted_model.forecast(X_test, y_query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# limit the evaluation to data where y_test has actuals\n",
|
||||
"def align_outputs(y_predicted, X_trans, X_test, y_test, predicted_column_name = 'predicted'):\n",
|
||||
" \"\"\"\n",
|
||||
" Demonstrates how to get the output aligned to the inputs\n",
|
||||
" using pandas indexes. Helps understand what happened if\n",
|
||||
" the output's shape differs from the input shape, or if\n",
|
||||
" the data got re-sorted by time and grain during forecasting.\n",
|
||||
" \n",
|
||||
" Typical causes of misalignment are:\n",
|
||||
" * we predicted some periods that were missing in actuals -> drop from eval\n",
|
||||
" * model was asked to predict past max_horizon -> increase max horizon\n",
|
||||
" * data at start of X_test was needed for lags -> provide previous periods\n",
|
||||
" \"\"\"\n",
|
||||
" df_fcst = pd.DataFrame({predicted_column_name : y_predicted})\n",
|
||||
" # y and X outputs are aligned by forecast() function contract\n",
|
||||
" df_fcst.index = X_trans.index\n",
|
||||
" \n",
|
||||
" # align original X_test to y_test \n",
|
||||
" X_test_full = X_test.copy()\n",
|
||||
" X_test_full[target_column_name] = y_test\n",
|
||||
"\n",
|
||||
" # X_test_full's does not include origin, so reset for merge\n",
|
||||
" df_fcst.reset_index(inplace=True)\n",
|
||||
" X_test_full = X_test_full.reset_index().drop(columns='index')\n",
|
||||
" together = df_fcst.merge(X_test_full, how='right')\n",
|
||||
" \n",
|
||||
" # drop rows where prediction or actuals are nan \n",
|
||||
" # happens because of missing actuals \n",
|
||||
" # or at edges of time due to lags/rolling windows\n",
|
||||
" clean = together[together[[target_column_name, predicted_column_name]].notnull().all(axis=1)]\n",
|
||||
" return(clean)\n",
|
||||
"\n",
|
||||
"df_all = align_outputs(y_fcst, X_trans, X_test, y_test)\n",
|
||||
"df_all.head()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Use the Check Data Function to remove the nan values from y_test to avoid error when calculate metrics "
|
||||
"Looking at `X_trans` is also useful to see what featurization happened to the data."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -329,29 +351,14 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"if len(y_test) != len(y_pred):\n",
|
||||
" raise ValueError(\n",
|
||||
" 'the true values and prediction values do not have equal length.')\n",
|
||||
"elif len(y_test) == 0:\n",
|
||||
" raise ValueError(\n",
|
||||
" 'y_true and y_pred are empty.')\n",
|
||||
"\n",
|
||||
"# if there is any non-numeric element in the y_true or y_pred,\n",
|
||||
"# the ValueError exception will be thrown.\n",
|
||||
"y_test_f = np.array(y_test).astype(float)\n",
|
||||
"y_pred_f = np.array(y_pred).astype(float)\n",
|
||||
"\n",
|
||||
"# remove entries both in y_true and y_pred where at least\n",
|
||||
"# one element in y_true or y_pred is missing\n",
|
||||
"y_test = y_test_f[~(np.isnan(y_test_f) | np.isnan(y_pred_f))]\n",
|
||||
"y_pred = y_pred_f[~(np.isnan(y_test_f) | np.isnan(y_pred_f))]"
|
||||
"X_trans"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Calculate metrics for the prediction\n"
|
||||
"### Calculate accuracy metrics\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -360,26 +367,180 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"[Test Data] \\nRoot Mean squared error: %.2f\" % np.sqrt(mean_squared_error(y_test, y_pred)))\n",
|
||||
"# Explained variance score: 1 is perfect prediction\n",
|
||||
"print('mean_absolute_error score: %.2f' % mean_absolute_error(y_test, y_pred))\n",
|
||||
"print('R2 score: %.2f' % r2_score(y_test, y_pred))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def MAPE(actual, pred):\n",
|
||||
" \"\"\"\n",
|
||||
" Calculate mean absolute percentage error.\n",
|
||||
" Remove NA and values where actual is close to zero\n",
|
||||
" \"\"\"\n",
|
||||
" not_na = ~(np.isnan(actual) | np.isnan(pred))\n",
|
||||
" not_zero = ~np.isclose(actual, 0.0)\n",
|
||||
" actual_safe = actual[not_na & not_zero]\n",
|
||||
" pred_safe = pred[not_na & not_zero]\n",
|
||||
" APE = 100*np.abs((actual_safe - pred_safe)/actual_safe)\n",
|
||||
" return np.mean(APE)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"Simple forecasting model\")\n",
|
||||
"rmse = np.sqrt(mean_squared_error(df_all[target_column_name], df_all['predicted']))\n",
|
||||
"print(\"[Test Data] \\nRoot Mean squared error: %.2f\" % rmse)\n",
|
||||
"mae = mean_absolute_error(df_all[target_column_name], df_all['predicted'])\n",
|
||||
"print('mean_absolute_error score: %.2f' % mae)\n",
|
||||
"print('MAPE: %.2f' % MAPE(df_all[target_column_name], df_all['predicted']))\n",
|
||||
"\n",
|
||||
"# Plot outputs\n",
|
||||
"%matplotlib notebook\n",
|
||||
"test_pred = plt.scatter(y_test, y_pred, color='b')\n",
|
||||
"test_pred = plt.scatter(df_all[target_column_name], df_all['predicted'], color='b')\n",
|
||||
"test_test = plt.scatter(y_test, y_test, color='g')\n",
|
||||
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The distribution looks a little heavy tailed: we underestimate the excursions of the extremes. A normal-quantile transform of the target might help, but let's first try using some past data with the lags and rolling window transforms.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Using lags and rolling window features to improve the forecast"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We did not use lags in the previous model specification. In effect, the prediction was the result of a simple regression on date, grain and any additional features. This is often a very good prediction as common time series patterns like seasonality and trends can be captured in this manner. Such simple regression is horizon-less: it doesn't matter how far into the future we are predicting, because we are not using past data.\n",
|
||||
"\n",
|
||||
"Now that we configured target lags, that is the previous values of the target variables, and the prediction is no longer horizon-less. We therefore must specify the `max_horizon` that the model will learn to forecast. The `target_lags` keyword specifies how far back we will construct the lags of the target variable, and the `target_rolling_window_size` specifies the size of the rolling window over which we will generate the `max`, `min` and `sum` features."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"automl_settings_lags = {\n",
|
||||
" 'time_column_name': time_column_name,\n",
|
||||
" 'target_lags': 1,\n",
|
||||
" 'target_rolling_window_size': 5,\n",
|
||||
" # you MUST set the max_horizon when using lags and rolling windows\n",
|
||||
" # it is optional when looking-back features are not used \n",
|
||||
" 'max_horizon': len(y_test), # only one grain\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"automl_config_lags = AutoMLConfig(task = 'forecasting',\n",
|
||||
" debug_log = 'automl_nyc_energy_errors.log',\n",
|
||||
" primary_metric='normalized_root_mean_squared_error',\n",
|
||||
" iterations = 10,\n",
|
||||
" iteration_timeout_minutes = 5,\n",
|
||||
" X = X_train,\n",
|
||||
" y = y_train,\n",
|
||||
" n_cross_validations = 3,\n",
|
||||
" path=project_folder,\n",
|
||||
" verbosity = logging.INFO,\n",
|
||||
" **automl_settings_lags)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"local_run_lags = experiment.submit(automl_config_lags, show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"best_run_lags, fitted_model_lags = local_run_lags.get_output()\n",
|
||||
"y_fcst_lags, X_trans_lags = fitted_model_lags.forecast(X_test, y_query)\n",
|
||||
"df_lags = align_outputs(y_fcst_lags, X_trans_lags, X_test, y_test)\n",
|
||||
"df_lags.head()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"X_trans_lags"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"Forecasting model with lags\")\n",
|
||||
"rmse = np.sqrt(mean_squared_error(df_lags[target_column_name], df_lags['predicted']))\n",
|
||||
"print(\"[Test Data] \\nRoot Mean squared error: %.2f\" % rmse)\n",
|
||||
"mae = mean_absolute_error(df_lags[target_column_name], df_lags['predicted'])\n",
|
||||
"print('mean_absolute_error score: %.2f' % mae)\n",
|
||||
"print('MAPE: %.2f' % MAPE(df_lags[target_column_name], df_lags['predicted']))\n",
|
||||
"\n",
|
||||
"# Plot outputs\n",
|
||||
"%matplotlib notebook\n",
|
||||
"test_pred = plt.scatter(df_lags[target_column_name], df_lags['predicted'], color='b')\n",
|
||||
"test_test = plt.scatter(y_test, y_test, color='g')\n",
|
||||
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### What features matter for the forecast?"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.train.automl.automlexplainer import explain_model\n",
|
||||
"\n",
|
||||
"# feature names are everything in the transformed data except the target\n",
|
||||
"features = X_trans.columns[:-1]\n",
|
||||
"expl = explain_model(fitted_model, X_train, X_test, features = features, best_run=best_run_lags, y_train = y_train)\n",
|
||||
"# unpack the tuple\n",
|
||||
"shap_values, expected_values, feat_overall_imp, feat_names, per_class_summary, per_class_imp = expl\n",
|
||||
"best_run_lags"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Please go to the Azure Portal's best run to see the top features chart.\n",
|
||||
"\n",
|
||||
"The informative features make all sorts of intuitive sense. Temperature is a strong driver of heating and cooling demand in NYC. Apart from that, the daily life cycle, expressed by `hour`, and the weekly cycle, expressed by `wday` drives people's energy use habits."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "xiaga"
|
||||
"name": "xiaga, tosingli"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
@@ -397,7 +558,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.8"
|
||||
"version": "3.6.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -9,6 +9,13 @@
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -20,7 +27,9 @@
|
||||
"1. [Introduction](#Introduction)\n",
|
||||
"1. [Setup](#Setup)\n",
|
||||
"1. [Data](#Data)\n",
|
||||
"1. [Train](#Train)"
|
||||
"1. [Train](#Train)\n",
|
||||
"1. [Predict](#Predict)\n",
|
||||
"1. [Operationalize](#Operationalize)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -38,7 +47,7 @@
|
||||
"3. Find and train a forecasting model using local compute\n",
|
||||
"4. Evaluate the performance of the model\n",
|
||||
"\n",
|
||||
"The examples in the follow code samples use the [University of Chicago's Dominick's Finer Foods dataset](https://research.chicagobooth.edu/kilts/marketing-databases/dominicks) to forecast orange juice sales. Dominick's was a grocery chain in the Chicago metropolitan area."
|
||||
"The examples in the follow code samples use the University of Chicago's Dominick's Finer Foods dataset to forecast orange juice sales. Dominick's was a grocery chain in the Chicago metropolitan area."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -48,30 +57,6 @@
|
||||
"## Setup"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To use the *forecasting* task in AutoML, you need to have the **azuremlftk** package installed in your environment. The following cell tests whether this package is installed locally and, if not, gives you instructions for installing it."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"try:\n",
|
||||
" import ftk\n",
|
||||
" print('Using FTK version ' + ftk.__version__)\n",
|
||||
"except ImportError:\n",
|
||||
" print(\"Unable to import forecasting package. This notebook does not work without this package.\\n\"\n",
|
||||
" + \"Please open a command prompt and run `pip install azuremlftk` to install the package. \\n\"\n",
|
||||
" + \"Make sure you install the package into AutoML's Python environment.\\n\\n\"\n",
|
||||
" + \"For instance, if AutoML is installed in a conda environment called `python36`, run:\\n\"\n",
|
||||
" + \"> activate python36\\n> pip install azuremlftk\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -109,9 +94,9 @@
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# choose a name for the run history container in the workspace\n",
|
||||
"experiment_name = 'automl-ojsalesforecasting'\n",
|
||||
"experiment_name = 'automl-ojforecasting'\n",
|
||||
"# project folder\n",
|
||||
"project_folder = './sample_projects/automl-local-ojsalesforecasting'\n",
|
||||
"project_folder = './sample_projects/automl-local-ojforecasting'\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
@@ -171,8 +156,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Data Splitting\n",
|
||||
"For the purposes of demonstration and later forecast evaluation, we now split the data into a training and a testing set. The test set will contain the final 20 weeks of observed sales for each time-series."
|
||||
"For demonstration purposes, we extract sales time-series for just a few of the stores:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -181,19 +165,37 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ntest_periods = 20\n",
|
||||
"use_stores = [2, 5, 8]\n",
|
||||
"data_subset = data[data.Store.isin(use_stores)]\n",
|
||||
"nseries = data_subset.groupby(grain_column_names).ngroups\n",
|
||||
"print('Data subset contains {0} individual time-series.'.format(nseries))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Data Splitting\n",
|
||||
"We now split the data into a training and a testing set for later forecast evaluation. The test set will contain the final 20 weeks of observed sales for each time-series. The splits should be stratified by series, so we use a group-by statement on the grain columns."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"n_test_periods = 20\n",
|
||||
"\n",
|
||||
"def split_last_n_by_grain(df, n):\n",
|
||||
" \"\"\"\n",
|
||||
" Group df by grain and split on last n rows for each group\n",
|
||||
" \"\"\"\n",
|
||||
" \"\"\"Group df by grain and split on last n rows for each group.\"\"\"\n",
|
||||
" df_grouped = (df.sort_values(time_column_name) # Sort by ascending time\n",
|
||||
" .groupby(grain_column_names, group_keys=False))\n",
|
||||
" df_head = df_grouped.apply(lambda dfg: dfg.iloc[:-n])\n",
|
||||
" df_tail = df_grouped.apply(lambda dfg: dfg.iloc[-n:])\n",
|
||||
" return df_head, df_tail\n",
|
||||
"\n",
|
||||
"X_train, X_test = split_last_n_by_grain(data, ntest_periods)"
|
||||
"X_train, X_test = split_last_n_by_grain(data_subset, n_test_periods)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -211,24 +213,7 @@
|
||||
"\n",
|
||||
"AutoML will currently train a single, regression-type model across **all** time-series in a given training set. This allows the model to generalize across related series.\n",
|
||||
"\n",
|
||||
"You are almost ready to start an AutoML training job. We will first need to create a validation set from the existing training set (i.e. for hyper-parameter tuning): "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"nvalidation_periods = 20\n",
|
||||
"X_train, X_validate = split_last_n_by_grain(X_train, nvalidation_periods)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We also need to separate the target column from the rest of the DataFrame: "
|
||||
"You are almost ready to start an AutoML training job. First, we need to separate the target column from the rest of the DataFrame: "
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -238,8 +223,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"target_column_name = 'Quantity'\n",
|
||||
"y_train = X_train.pop(target_column_name).values\n",
|
||||
"y_validate = X_validate.pop(target_column_name).values "
|
||||
"y_train = X_train.pop(target_column_name).values"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -248,22 +232,31 @@
|
||||
"source": [
|
||||
"## Train\n",
|
||||
"\n",
|
||||
"The AutoMLConfig object defines the settings and data for an AutoML training job. Here, we set necessary inputs like the task type, the number of AutoML iterations to try, and the training and validation data. \n",
|
||||
"The AutoMLConfig object defines the settings and data for an AutoML training job. Here, we set necessary inputs like the task type, the number of AutoML iterations to try, the training data, and cross-validation parameters. \n",
|
||||
"\n",
|
||||
"For forecasting tasks, there are some additional parameters that can be set: the name of the column holding the date/time and the grain column names. A time column is required for forecasting, while the grain is optional. If a grain is not given, the forecaster assumes that the whole dataset is a single time-series. We also pass a list of columns to drop prior to modeling. The _logQuantity_ column is completely correlated with the target quantity, so it must be removed to prevent a target leak. \n",
|
||||
"For forecasting tasks, there are some additional parameters that can be set: the name of the column holding the date/time, the grain column names, and the maximum forecast horizon. A time column is required for forecasting, while the grain is optional. If a grain is not given, AutoML assumes that the whole dataset is a single time-series. We also pass a list of columns to drop prior to modeling. The _logQuantity_ column is completely correlated with the target quantity, so it must be removed to prevent a target leak.\n",
|
||||
"\n",
|
||||
"The forecast horizon is given in units of the time-series frequency; for instance, the OJ series frequency is weekly, so a horizon of 20 means that a trained model will estimate sales up-to 20 weeks beyond the latest date in the training data for each series. In this example, we set the maximum horizon to the number of samples per series in the test set (n_test_periods). Generally, the value of this parameter will be dictated by business needs. For example, a demand planning organizaion that needs to estimate the next month of sales would set the horizon accordingly. \n",
|
||||
"\n",
|
||||
"Finally, a note about the cross-validation (CV) procedure for time-series data. AutoML uses out-of-sample error estimates to select a best pipeline/model, so it is important that the CV fold splitting is done correctly. Time-series can violate the basic statistical assumptions of the canonical K-Fold CV strategy, so AutoML implements a [rolling origin validation](https://robjhyndman.com/hyndsight/tscv/) procedure to create CV folds for time-series data. To use this procedure, you just need to specify the desired number of CV folds in the AutoMLConfig object. It is also possible to bypass CV and use your own validation set by setting the *X_valid* and *y_valid* parameters of AutoMLConfig.\n",
|
||||
"\n",
|
||||
"Here is a summary of AutoMLConfig parameters used for training the OJ model:\n",
|
||||
"\n",
|
||||
"|Property|Description|\n",
|
||||
"|-|-|\n",
|
||||
"|**task**|forecasting|\n",
|
||||
"|**primary_metric**|This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i>\n",
|
||||
"|**iterations**|Number of iterations. In each iteration, Auto ML trains a specific pipeline on the given data|\n",
|
||||
"|**X**|Training matrix of features, shape = [n_training_samples, n_features]|\n",
|
||||
"|**y**|Target values, shape = [n_training_samples, ]|\n",
|
||||
"|**X_valid**|Validation matrix of features, shape = [n_validation_samples, n_features]|\n",
|
||||
"|**y_valid**|Target values for validation, shape = [n_validation_samples, ]\n",
|
||||
"|**X**|Training matrix of features as a pandas DataFrame, shape = [n_training_samples, n_features]|\n",
|
||||
"|**y**|Target values as a numpy.ndarray, shape = [n_training_samples, ]|\n",
|
||||
"|**n_cross_validations**|Number of cross-validation folds to use for model/pipeline selection|\n",
|
||||
"|**enable_ensembling**|Allow AutoML to create ensembles of the best performing models\n",
|
||||
"|**debug_log**|Log file path for writing debugging information\n",
|
||||
"|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder. "
|
||||
"|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder.|\n",
|
||||
"|**time_column_name**|Name of the datetime column in the input data|\n",
|
||||
"|**grain_column_names**|Name(s) of the columns defining individual series in the input data|\n",
|
||||
"|**drop_column_names**|Name(s) of columns to drop prior to modeling|\n",
|
||||
"|**max_horizon**|Maximum desired forecast horizon in units of time-series frequency|"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -272,24 +265,24 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"automl_settings = {\n",
|
||||
"time_series_settings = {\n",
|
||||
" 'time_column_name': time_column_name,\n",
|
||||
" 'grain_column_names': grain_column_names,\n",
|
||||
" 'drop_column_names': ['logQuantity']\n",
|
||||
" 'drop_column_names': ['logQuantity'],\n",
|
||||
" 'max_horizon': n_test_periods # optional\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task='forecasting',\n",
|
||||
" debug_log='automl_oj_sales_errors.log',\n",
|
||||
" primary_metric='normalized_root_mean_squared_error',\n",
|
||||
" primary_metric='normalized_mean_absolute_error',\n",
|
||||
" iterations=10,\n",
|
||||
" X=X_train,\n",
|
||||
" y=y_train,\n",
|
||||
" X_valid=X_validate,\n",
|
||||
" y_valid=y_validate,\n",
|
||||
" n_cross_validations=5,\n",
|
||||
" enable_ensembling=False,\n",
|
||||
" path=project_folder,\n",
|
||||
" verbosity=logging.INFO,\n",
|
||||
" **automl_settings)"
|
||||
" **time_series_settings)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -309,15 +302,6 @@
|
||||
"local_run = experiment.submit(automl_config, show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"local_run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -340,7 +324,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Make Predictions from the Best Fitted Model\n",
|
||||
"# Predict\n",
|
||||
"Now that we have retrieved the best pipeline/model, it can be used to make predictions on test data. First, we remove the target values from the test set:"
|
||||
]
|
||||
},
|
||||
@@ -368,7 +352,7 @@
|
||||
"source": [
|
||||
"To produce predictions on the test set, we need to know the feature values at all dates in the test set. This requirement is somewhat reasonable for the OJ sales data since the features mainly consist of price, which is usually set in advance, and customer demographics which are approximately constant for each store over the 20 week forecast horizon in the testing data. \n",
|
||||
"\n",
|
||||
"The target predictions can be retrieved by calling the `predict` method on the best model:"
|
||||
"We will first create a query `y_query`, which is aligned index-for-index to `X_test`. This is a vector of target values where each `NaN` serves the function of the question mark to be replaced by forecast. Passing definite values in the `y` argument allows the `forecast` function to make predictions on data that does not immediately follow the train data which contains `y`. In each grain, the last time point where the model sees a definite value of `y` is that grain's _forecast origin_."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -377,15 +361,76 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"y_pred = fitted_pipeline.predict(X_test)"
|
||||
"# Replace ALL values in y_pred by NaN.\n",
|
||||
"# The forecast origin will be at the beginning of the first forecast period.\n",
|
||||
"# (Which is the same time as the end of the last training period.)\n",
|
||||
"y_query = y_test.copy().astype(np.float)\n",
|
||||
"y_query.fill(np.nan)\n",
|
||||
"# The featurized data, aligned to y, will also be returned.\n",
|
||||
"# This contains the assumptions that were made in the forecast\n",
|
||||
"# and helps align the forecast to the original data\n",
|
||||
"y_pred, X_trans = fitted_pipeline.forecast(X_test, y_query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Calculate evaluation metrics for the prediction\n",
|
||||
"To evaluate the accuracy of the forecast, we'll compare against the actual sales quantities for some select metrics, included the mean absolute percentage error (MAPE)."
|
||||
"If you are used to scikit pipelines, perhaps you expected `predict(X_test)`. However, forecasting requires a more general interface that also supplies the past target `y` values. Please use `forecast(X,y)` as `predict(X)` is reserved for internal purposes on forecasting models.\n",
|
||||
"\n",
|
||||
"The [energy demand forecasting notebook](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand) demonstrates the use of the forecast function in more detail in the context of using lags and rolling window features. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Evaluate\n",
|
||||
"\n",
|
||||
"To evaluate the accuracy of the forecast, we'll compare against the actual sales quantities for some select metrics, included the mean absolute percentage error (MAPE). \n",
|
||||
"\n",
|
||||
"It is a good practice to always align the output explicitly to the input, as the count and order of the rows may have changed during transformations that span multiple rows."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def align_outputs(y_predicted, X_trans, X_test, y_test, predicted_column_name = 'predicted'):\n",
|
||||
" \"\"\"\n",
|
||||
" Demonstrates how to get the output aligned to the inputs\n",
|
||||
" using pandas indexes. Helps understand what happened if\n",
|
||||
" the output's shape differs from the input shape, or if\n",
|
||||
" the data got re-sorted by time and grain during forecasting.\n",
|
||||
" \n",
|
||||
" Typical causes of misalignment are:\n",
|
||||
" * we predicted some periods that were missing in actuals -> drop from eval\n",
|
||||
" * model was asked to predict past max_horizon -> increase max horizon\n",
|
||||
" * data at start of X_test was needed for lags -> provide previous periods in y\n",
|
||||
" \"\"\"\n",
|
||||
" \n",
|
||||
" df_fcst = pd.DataFrame({predicted_column_name : y_predicted})\n",
|
||||
" # y and X outputs are aligned by forecast() function contract\n",
|
||||
" df_fcst.index = X_trans.index\n",
|
||||
" \n",
|
||||
" # align original X_test to y_test \n",
|
||||
" X_test_full = X_test.copy()\n",
|
||||
" X_test_full[target_column_name] = y_test\n",
|
||||
"\n",
|
||||
" # X_test_full's index does not include origin, so reset for merge\n",
|
||||
" df_fcst.reset_index(inplace=True)\n",
|
||||
" X_test_full = X_test_full.reset_index().drop(columns='index')\n",
|
||||
" together = df_fcst.merge(X_test_full, how='right')\n",
|
||||
" \n",
|
||||
" # drop rows where prediction or actuals are nan \n",
|
||||
" # happens because of missing actuals \n",
|
||||
" # or at edges of time due to lags/rolling windows\n",
|
||||
" clean = together[together[[target_column_name, predicted_column_name]].notnull().all(axis=1)]\n",
|
||||
" return(clean)\n",
|
||||
"\n",
|
||||
"df_all = align_outputs(y_pred, X_trans, X_test, y_test)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -404,18 +449,392 @@
|
||||
" actual_safe = actual[not_na & not_zero]\n",
|
||||
" pred_safe = pred[not_na & not_zero]\n",
|
||||
" APE = 100*np.abs((actual_safe - pred_safe)/actual_safe)\n",
|
||||
" return np.mean(APE)\n",
|
||||
" return np.mean(APE)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"Simple forecasting model\")\n",
|
||||
"rmse = np.sqrt(mean_squared_error(df_all[target_column_name], df_all['predicted']))\n",
|
||||
"print(\"[Test Data] \\nRoot Mean squared error: %.2f\" % rmse)\n",
|
||||
"mae = mean_absolute_error(df_all[target_column_name], df_all['predicted'])\n",
|
||||
"print('mean_absolute_error score: %.2f' % mae)\n",
|
||||
"print('MAPE: %.2f' % MAPE(df_all[target_column_name], df_all['predicted']))\n",
|
||||
"\n",
|
||||
"print(\"[Test Data] \\nRoot Mean squared error: %.2f\" % np.sqrt(mean_squared_error(y_test, y_pred)))\n",
|
||||
"print('mean_absolute_error score: %.2f' % mean_absolute_error(y_test, y_pred))\n",
|
||||
"print('MAPE: %.2f' % MAPE(y_test, y_pred))"
|
||||
"# Plot outputs\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"\n",
|
||||
"%matplotlib notebook\n",
|
||||
"test_pred = plt.scatter(df_all[target_column_name], df_all['predicted'], color='b')\n",
|
||||
"test_test = plt.scatter(y_test, y_test, color='g')\n",
|
||||
"plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Operationalize"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"_Operationalization_ means getting the model into the cloud so that other can run it after you close the notebook. We will create a docker running on Azure Container Instances with the model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"description = 'AutoML OJ forecaster'\n",
|
||||
"tags = None\n",
|
||||
"model = local_run.register_model(description = description, tags = tags)\n",
|
||||
"\n",
|
||||
"print(local_run.model_id)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Develop the scoring script\n",
|
||||
"\n",
|
||||
"Serializing and deserializing complex data frames may be tricky. We first develop the `run()` function of the scoring script locally, then write it into a scoring script. It is much easier to debug any quirks of the scoring function without crossing two compute environments. For this exercise, we handle a common quirk of how pandas dataframes serialize time stamp values."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# this is where we test the run function of the scoring script interactively\n",
|
||||
"# before putting it in the scoring script\n",
|
||||
"\n",
|
||||
"timestamp_columns = ['WeekStarting']\n",
|
||||
"\n",
|
||||
"def run(rawdata, test_model = None):\n",
|
||||
" \"\"\"\n",
|
||||
" Intended to process 'rawdata' string produced by\n",
|
||||
" \n",
|
||||
" {'X': X_test.to_json(), y' : y_test.to_json()}\n",
|
||||
" \n",
|
||||
" Don't convert the X payload to numpy.array, use it as pandas.DataFrame\n",
|
||||
" \"\"\"\n",
|
||||
" try:\n",
|
||||
" # unpack the data frame with timestamp \n",
|
||||
" rawobj = json.loads(rawdata) # rawobj is now a dict of strings \n",
|
||||
" X_pred = pd.read_json(rawobj['X'], convert_dates=False) # load the pandas DF from a json string\n",
|
||||
" for col in timestamp_columns: # fix timestamps\n",
|
||||
" X_pred[col] = pd.to_datetime(X_pred[col], unit='ms') \n",
|
||||
" \n",
|
||||
" y_pred = np.array(rawobj['y']) # reconstitute numpy array from serialized list\n",
|
||||
" \n",
|
||||
" if test_model is None:\n",
|
||||
" result = model.forecast(X_pred, y_pred) # use the global model from init function\n",
|
||||
" else:\n",
|
||||
" result = test_model.forecast(X_pred, y_pred) # use the model on which we are testing\n",
|
||||
" \n",
|
||||
" except Exception as e:\n",
|
||||
" result = str(e)\n",
|
||||
" return json.dumps({\"error\": result})\n",
|
||||
" \n",
|
||||
" forecast_as_list = result[0].tolist()\n",
|
||||
" index_as_df = result[1].index.to_frame().reset_index(drop=True)\n",
|
||||
" \n",
|
||||
" return json.dumps({\"forecast\": forecast_as_list, # return the minimum over the wire: \n",
|
||||
" \"index\": index_as_df.to_json() # no forecast and its featurized values\n",
|
||||
" })"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# test the run function here before putting in the scoring script\n",
|
||||
"import json\n",
|
||||
"\n",
|
||||
"test_sample = json.dumps({'X': X_test.to_json(), 'y' : y_query.tolist()})\n",
|
||||
"response = run(test_sample, fitted_pipeline)\n",
|
||||
"\n",
|
||||
"# unpack the response, dealing with the timestamp serialization again\n",
|
||||
"res_dict = json.loads(response)\n",
|
||||
"y_fcst_all = pd.read_json(res_dict['index'])\n",
|
||||
"y_fcst_all[time_column_name] = pd.to_datetime(y_fcst_all[time_column_name], unit = 'ms')\n",
|
||||
"y_fcst_all['forecast'] = res_dict['forecast']\n",
|
||||
"y_fcst_all.head()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now that the function works locally in the notebook, let's write it down into the scoring script. The scoring script is authored by the data scientist. Adjust it to taste, adding inputs, outputs and processing as needed."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%writefile score_fcast.py\n",
|
||||
"import pickle\n",
|
||||
"import json\n",
|
||||
"import numpy as np\n",
|
||||
"import pandas as pd\n",
|
||||
"import azureml.train.automl\n",
|
||||
"from sklearn.externals import joblib\n",
|
||||
"from azureml.core.model import Model\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def init():\n",
|
||||
" global model\n",
|
||||
" model_path = Model.get_model_path(model_name = '<<modelid>>') # this name is model.id of model that we want to deploy\n",
|
||||
" # deserialize the model file back into a sklearn model\n",
|
||||
" model = joblib.load(model_path)\n",
|
||||
"\n",
|
||||
"timestamp_columns = ['WeekStarting']\n",
|
||||
"\n",
|
||||
"def run(rawdata, test_model = None):\n",
|
||||
" \"\"\"\n",
|
||||
" Intended to process 'rawdata' string produced by\n",
|
||||
" \n",
|
||||
" {'X': X_test.to_json(), y' : y_test.to_json()}\n",
|
||||
" \n",
|
||||
" Don't convert the X payload to numpy.array, use it as pandas.DataFrame\n",
|
||||
" \"\"\"\n",
|
||||
" try:\n",
|
||||
" # unpack the data frame with timestamp \n",
|
||||
" rawobj = json.loads(rawdata) # rawobj is now a dict of strings \n",
|
||||
" X_pred = pd.read_json(rawobj['X'], convert_dates=False) # load the pandas DF from a json string\n",
|
||||
" for col in timestamp_columns: # fix timestamps\n",
|
||||
" X_pred[col] = pd.to_datetime(X_pred[col], unit='ms') \n",
|
||||
" \n",
|
||||
" y_pred = np.array(rawobj['y']) # reconstitute numpy array from serialized list\n",
|
||||
" \n",
|
||||
" if test_model is None:\n",
|
||||
" result = model.forecast(X_pred, y_pred) # use the global model from init function\n",
|
||||
" else:\n",
|
||||
" result = test_model.forecast(X_pred, y_pred) # use the model on which we are testing\n",
|
||||
" \n",
|
||||
" except Exception as e:\n",
|
||||
" result = str(e)\n",
|
||||
" return json.dumps({\"error\": result})\n",
|
||||
" \n",
|
||||
" # prepare to send over wire as json\n",
|
||||
" forecast_as_list = result[0].tolist()\n",
|
||||
" index_as_df = result[1].index.to_frame().reset_index(drop=True)\n",
|
||||
" \n",
|
||||
" return json.dumps({\"forecast\": forecast_as_list, # return the minimum over the wire: \n",
|
||||
" \"index\": index_as_df.to_json() # no forecast and its featurized values\n",
|
||||
" })"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# get the model\n",
|
||||
"from azureml.train.automl.run import AutoMLRun\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"ml_run = AutoMLRun(experiment = experiment, run_id = local_run.id)\n",
|
||||
"best_iteration = int(str.split(best_run.id,'_')[-1]) # the iteration number is a postfix of the run ID."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# get the best model's dependencies and write them into this file\n",
|
||||
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||
"\n",
|
||||
"conda_env_file_name = 'fcast_env.yml'\n",
|
||||
"\n",
|
||||
"dependencies = ml_run.get_run_sdk_dependencies(iteration = best_iteration)\n",
|
||||
"for p in ['azureml-train-automl', 'azureml-sdk', 'azureml-core']:\n",
|
||||
" print('{}\\t{}'.format(p, dependencies[p]))\n",
|
||||
"\n",
|
||||
"myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn'], pip_packages=['azureml-sdk[automl]'])\n",
|
||||
"\n",
|
||||
"myenv.save_to_file('.', conda_env_file_name)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# this is the script file name we wrote a few cells above\n",
|
||||
"script_file_name = 'score_fcast.py'\n",
|
||||
"\n",
|
||||
"# Substitute the actual version number in the environment file.\n",
|
||||
"# This is not strictly needed in this notebook because the model should have been generated using the current SDK version.\n",
|
||||
"# However, we include this in case this code is used on an experiment from a previous SDK version.\n",
|
||||
"\n",
|
||||
"with open(conda_env_file_name, 'r') as cefr:\n",
|
||||
" content = cefr.read()\n",
|
||||
"\n",
|
||||
"with open(conda_env_file_name, 'w') as cefw:\n",
|
||||
" cefw.write(content.replace(azureml.core.VERSION, dependencies['azureml-sdk']))\n",
|
||||
"\n",
|
||||
"# Substitute the actual model id in the script file.\n",
|
||||
"\n",
|
||||
"with open(script_file_name, 'r') as cefr:\n",
|
||||
" content = cefr.read()\n",
|
||||
"\n",
|
||||
"with open(script_file_name, 'w') as cefw:\n",
|
||||
" cefw.write(content.replace('<<modelid>>', local_run.model_id))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create a Container Image"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.image import Image, ContainerImage\n",
|
||||
"\n",
|
||||
"image_config = ContainerImage.image_configuration(runtime= \"python\",\n",
|
||||
" execution_script = script_file_name,\n",
|
||||
" conda_file = conda_env_file_name,\n",
|
||||
" tags = {'type': \"automl-forecasting\"},\n",
|
||||
" description = \"Image for automl forecasting sample\")\n",
|
||||
"\n",
|
||||
"image = Image.create(name = \"automl-fcast-image\",\n",
|
||||
" # this is the model object \n",
|
||||
" models = [model],\n",
|
||||
" image_config = image_config, \n",
|
||||
" workspace = ws)\n",
|
||||
"\n",
|
||||
"image.wait_for_creation(show_output = True)\n",
|
||||
"\n",
|
||||
"if image.creation_state == 'Failed':\n",
|
||||
" print(\"Image build log at: \" + image.image_build_log_uri)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Deploy the Image as a Web Service on Azure Container Instance"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.webservice import AciWebservice\n",
|
||||
"\n",
|
||||
"aciconfig = AciWebservice.deploy_configuration(cpu_cores = 1, \n",
|
||||
" memory_gb = 2, \n",
|
||||
" tags = {'type': \"automl-forecasting\"},\n",
|
||||
" description = \"Automl forecasting sample service\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.webservice import Webservice\n",
|
||||
"\n",
|
||||
"aci_service_name = 'automl-forecast-01'\n",
|
||||
"print(aci_service_name)\n",
|
||||
"\n",
|
||||
"aci_service = Webservice.deploy_from_image(deployment_config = aciconfig,\n",
|
||||
" image = image,\n",
|
||||
" name = aci_service_name,\n",
|
||||
" workspace = ws)\n",
|
||||
"aci_service.wait_for_deployment(True)\n",
|
||||
"print(aci_service.state)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Call the service"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# we send the data to the service serialized into a json string\n",
|
||||
"test_sample = json.dumps({'X':X_test.to_json(), 'y' : y_query.tolist()})\n",
|
||||
"response = aci_service.run(input_data = test_sample)\n",
|
||||
"\n",
|
||||
"# translate from networkese to datascientese\n",
|
||||
"try: \n",
|
||||
" res_dict = json.loads(response)\n",
|
||||
" y_fcst_all = pd.read_json(res_dict['index'])\n",
|
||||
" y_fcst_all[time_column_name] = pd.to_datetime(y_fcst_all[time_column_name], unit = 'ms')\n",
|
||||
" y_fcst_all['forecast'] = res_dict['forecast'] \n",
|
||||
"except:\n",
|
||||
" print(res_dict)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"y_fcst_all.head()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Delete the web service if desired"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"serv = Webservice(ws, 'automl-forecast-01')\n",
|
||||
"# serv.delete() # don't do it accidentally"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "erwright"
|
||||
"name": "erwright, tosingli"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
@@ -433,7 +852,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.8"
|
||||
"version": "3.6.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -9,6 +9,13 @@
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -37,8 +44,9 @@
|
||||
"In this notebook you will learn how to:\n",
|
||||
"1. Create an `Experiment` in an existing `Workspace`.\n",
|
||||
"2. Configure AutoML using `AutoMLConfig`.\n",
|
||||
"4. Train the model.\n",
|
||||
"5. Explore the results.\n",
|
||||
"3. Train the model.\n",
|
||||
"4. Explore the results.\n",
|
||||
"5. Viewing the engineered names for featurized data and featurization summary for all raw features.\n",
|
||||
"6. Test the best fitted model.\n",
|
||||
"\n",
|
||||
"In addition this notebook showcases the following features\n",
|
||||
@@ -102,23 +110,6 @@
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Opt-in diagnostics for better experience, quality, and security of future releases."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.telemetry import set_diagnostics_collection\n",
|
||||
"set_diagnostics_collection(send_diagnostics = True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -171,12 +162,11 @@
|
||||
"|**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: <br><i>accuracy</i><br><i>AUC_weighted</i><br><i>average_precision_score_weighted</i><br><i>norm_macro_recall</i><br><i>precision_score_weighted</i>|\n",
|
||||
"|**iteration_timeout_minutes**|Time limit in minutes for each iteration.|\n",
|
||||
"|**iterations**|Number of iterations. In each iteration AutoML trains a specific pipeline with the data.|\n",
|
||||
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
||||
"|**preprocess**|Setting this to *True* enables AutoML to perform preprocessing on the input to handle *missing data*, and to perform some common *feature extraction*.|\n",
|
||||
"|**experiment_exit_score**|*double* value indicating the target for *primary_metric*. <br>Once the target is surpassed the run terminates.|\n",
|
||||
"|**blacklist_models**|*List* of *strings* indicating machine learning algorithms for AutoML to avoid in this run.<br><br> Allowed values for **Classification**<br><i>LogisticRegression</i><br><i>SGD</i><br><i>MultinomialNaiveBayes</i><br><i>BernoulliNaiveBayes</i><br><i>SVM</i><br><i>LinearSVM</i><br><i>KNN</i><br><i>DecisionTree</i><br><i>RandomForest</i><br><i>ExtremeRandomTrees</i><br><i>LightGBM</i><br><i>GradientBoosting</i><br><i>TensorFlowDNN</i><br><i>TensorFlowLinearClassifier</i><br><br>Allowed values for **Regression**<br><i>ElasticNet</i><br><i>GradientBoosting</i><br><i>DecisionTree</i><br><i>KNN</i><br><i>LassoLars</i><br><i>SGD</i><br><i>RandomForest</i><br><i>ExtremeRandomTrees</i><br><i>LightGBM</i><br><i>TensorFlowLinearRegressor</i><br><i>TensorFlowDNN</i>|\n",
|
||||
"|**X**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
||||
"|**y**|(sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]<br>Multi-class targets. An indicator matrix turns on multilabel classification. This should be an array of integers.|\n",
|
||||
"|**y**|(sparse) array-like, shape = [n_samples, ], Multi-class targets.|\n",
|
||||
"|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder.|"
|
||||
]
|
||||
},
|
||||
@@ -191,7 +181,6 @@
|
||||
" primary_metric = 'AUC_weighted',\n",
|
||||
" iteration_timeout_minutes = 60,\n",
|
||||
" iterations = 20,\n",
|
||||
" n_cross_validations = 5,\n",
|
||||
" preprocess = True,\n",
|
||||
" experiment_exit_score = 0.9984,\n",
|
||||
" blacklist_models = ['KNN','LinearSVM'],\n",
|
||||
@@ -335,6 +324,45 @@
|
||||
"# best_run, fitted_model = local_run.get_output(iteration = iteration)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### View the engineered names for featurized data\n",
|
||||
"Below we display the engineered feature names generated for the featurized data using the preprocessing featurization."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"fitted_model.named_steps['datatransformer'].get_engineered_feature_names()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### View the featurization summary\n",
|
||||
"Below we display the featurization that was performed on different raw features in the user data. For each raw feature in the user data, the following information is displayed:-\n",
|
||||
"- Raw feature name\n",
|
||||
"- Number of engineered features formed out of this raw feature\n",
|
||||
"- Type detected\n",
|
||||
"- If feature was dropped\n",
|
||||
"- List of feature transformations for the raw feature"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"fitted_model.named_steps['datatransformer'].get_featurization_summary()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
|
||||
@@ -9,6 +9,13 @@
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -74,9 +81,9 @@
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# choose a name for experiment\n",
|
||||
"experiment_name = 'automl-local-classification'\n",
|
||||
"experiment_name = 'automl-model-explanation'\n",
|
||||
"# project folder\n",
|
||||
"project_folder = './sample_projects/automl-local-classification-model-explanation'\n",
|
||||
"project_folder = './sample_projects/automl-model-explanation'\n",
|
||||
"\n",
|
||||
"experiment=Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
@@ -93,23 +100,6 @@
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Opt-in diagnostics for better experience, quality, and security of future releases"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.telemetry import set_diagnostics_collection\n",
|
||||
"set_diagnostics_collection(send_diagnostics=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -157,9 +147,9 @@
|
||||
"|**max_time_sec**|Time limit in minutes for each iterations|\n",
|
||||
"|**iterations**|Number of iterations. In each iteration Auto ML trains the data with a specific pipeline|\n",
|
||||
"|**X**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
||||
"|**y**|(sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]<br>Multi-class targets. An indicator matrix turns on multilabel classification. This should be an array of integers. |\n",
|
||||
"|**y**|(sparse) array-like, shape = [n_samples, ], Multi-class targets.|\n",
|
||||
"|**X_valid**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
||||
"|**y_valid**|(sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]|\n",
|
||||
"|**y_valid**|(sparse) array-like, shape = [n_samples, ], Multi-class targets.|\n",
|
||||
"|**model_explainability**|Indicate to explain each trained pipeline or not |\n",
|
||||
"|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder. |"
|
||||
]
|
||||
@@ -271,7 +261,9 @@
|
||||
"3.\toverall_summary: The model level feature importance values sorted in descending order\n",
|
||||
"4.\toverall_imp: The feature names sorted in the same order as in overall_summary\n",
|
||||
"5.\tper_class_summary: The class level feature importance values sorted in descending order. Only available for the classification case\n",
|
||||
"6.\tper_class_imp: The feature names sorted in the same order as in per_class_summary. Only available for the classification case"
|
||||
"6.\tper_class_imp: The feature names sorted in the same order as in per_class_summary. Only available for the classification case\n",
|
||||
"\n",
|
||||
"Note:- The **retrieve_model_explanation()** API only works in case AutoML has been configured with **'model_explainability'** flag set to **True**. "
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -322,7 +314,7 @@
|
||||
"from azureml.train.automl.automlexplainer import explain_model\n",
|
||||
"\n",
|
||||
"shap_values, expected_values, overall_summary, overall_imp, per_class_summary, per_class_imp = \\\n",
|
||||
" explain_model(fitted_model, X_train, X_test)"
|
||||
" explain_model(fitted_model, X_train, X_test, features=features)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -9,6 +9,13 @@
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -96,23 +103,6 @@
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Opt-in diagnostics for better experience, quality, and security of future releases."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.telemetry import set_diagnostics_collection\n",
|
||||
"set_diagnostics_collection(send_diagnostics = True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -154,7 +144,7 @@
|
||||
"|**iterations**|Number of iterations. In each iteration AutoML trains a specific pipeline with the data.|\n",
|
||||
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
||||
"|**X**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
||||
"|**y**|(sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]<br>Multi-class targets. An indicator matrix turns on multilabel classification. This should be an array of integers.|\n",
|
||||
"|**y**|(sparse) array-like, shape = [n_samples, ], targets values.|\n",
|
||||
"|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder.|"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -9,6 +9,13 @@
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -67,6 +74,7 @@
|
||||
"source": [
|
||||
"import logging\n",
|
||||
"import os\n",
|
||||
"import csv\n",
|
||||
"\n",
|
||||
"from matplotlib import pyplot as plt\n",
|
||||
"import numpy as np\n",
|
||||
@@ -89,7 +97,7 @@
|
||||
"\n",
|
||||
"# Choose a name for the run history container in the workspace.\n",
|
||||
"experiment_name = 'automl-remote-amlcompute'\n",
|
||||
"project_folder = './sample_projects/automl-remote-amlcompute'\n",
|
||||
"project_folder = './project'\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
@@ -106,31 +114,12 @@
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Opt-in diagnostics for better experience, quality, and security of future releases."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.telemetry import set_diagnostics_collection\n",
|
||||
"set_diagnostics_collection(send_diagnostics = True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create or Attach existing AmlCompute\n",
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you create `AmlCompute` as your training compute resource.\n",
|
||||
"\n",
|
||||
"**Creation of AmlCompute takes approximately 5 minutes.** If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
|
||||
"You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you create an AmlCompute as your training compute resource.\n",
|
||||
"\n",
|
||||
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
|
||||
]
|
||||
@@ -145,32 +134,72 @@
|
||||
"from azureml.core.compute import ComputeTarget\n",
|
||||
"\n",
|
||||
"# Choose a name for your cluster.\n",
|
||||
"amlcompute_cluster_name = \"automlcl\"\n",
|
||||
"amlcompute_cluster_name = \"cpucluster\"\n",
|
||||
"\n",
|
||||
"found = False\n",
|
||||
"\n",
|
||||
"# Check if this compute target already exists in the workspace.\n",
|
||||
"\n",
|
||||
"cts = ws.compute_targets\n",
|
||||
"if amlcompute_cluster_name in cts and cts[amlcompute_cluster_name].type == 'AmlCompute':\n",
|
||||
" found = True\n",
|
||||
" print('Found existing compute target.')\n",
|
||||
" compute_target = cts[amlcompute_cluster_name]\n",
|
||||
" \n",
|
||||
"\n",
|
||||
"if not found:\n",
|
||||
" print('Creating a new compute target...')\n",
|
||||
" provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_D2_V2\", # for GPU, use \"STANDARD_NC6\"\n",
|
||||
" #vm_priority = 'lowpriority', # optional\n",
|
||||
" max_nodes = 6)\n",
|
||||
"\n",
|
||||
" # Create the cluster.\n",
|
||||
" # Create the cluster.\\n\",\n",
|
||||
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, provisioning_config)\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" # Can poll for a minimum number of nodes and for a specific timeout.\n",
|
||||
" # If no min_node_count is provided, it will use the scale settings for the cluster.\n",
|
||||
" compute_target.wait_for_completion(show_output = True, min_node_count = None, timeout_in_minutes = 20)\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" # For a more detailed view of current AmlCompute status, use get_status()."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Data\n",
|
||||
"For remote executions, you need to make the data accessible from the remote compute.\n",
|
||||
"This can be done by uploading the data to DataStore.\n",
|
||||
"In this example, we upload scikit-learn's [load_digits](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html) data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data_train = datasets.load_digits()\n",
|
||||
"\n",
|
||||
"if not os.path.isdir('data'):\n",
|
||||
" os.mkdir('data')\n",
|
||||
" \n",
|
||||
"if not os.path.exists(project_folder):\n",
|
||||
" os.makedirs(project_folder)\n",
|
||||
" \n",
|
||||
"pd.DataFrame(data_train.data).to_csv(\"data/X_train.tsv\", index=False, header=False, quoting=csv.QUOTE_ALL, sep=\"\\t\")\n",
|
||||
"pd.DataFrame(data_train.target).to_csv(\"data/y_train.tsv\", index=False, header=False, sep=\"\\t\")\n",
|
||||
"\n",
|
||||
"ds = ws.get_default_datastore()\n",
|
||||
"ds.upload(src_dir='./data', target_path='bai_data', overwrite=True, show_progress=True)\n",
|
||||
"\n",
|
||||
"from azureml.core.runconfig import DataReferenceConfiguration\n",
|
||||
"dr = DataReferenceConfiguration(datastore_name=ds.name, \n",
|
||||
" path_on_datastore='bai_data', \n",
|
||||
" path_on_compute='/tmp/azureml_runs',\n",
|
||||
" mode='download', # download files from datastore to compute target\n",
|
||||
" overwrite=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -188,29 +217,13 @@
|
||||
"conda_run_config.environment.docker.enabled = True\n",
|
||||
"conda_run_config.environment.docker.base_image = azureml.core.runconfig.DEFAULT_CPU_IMAGE\n",
|
||||
"\n",
|
||||
"cd = CondaDependencies.create(pip_packages=['azureml-sdk[automl]'], conda_packages=['numpy'])\n",
|
||||
"# set the data reference of the run coonfiguration\n",
|
||||
"conda_run_config.data_references = {ds.name: dr}\n",
|
||||
"\n",
|
||||
"cd = CondaDependencies.create(pip_packages=['azureml-sdk[automl]'], conda_packages=['numpy','py-xgboost<=0.80'])\n",
|
||||
"conda_run_config.environment.python.conda_dependencies = cd"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Data\n",
|
||||
"For remote executions you should author a `get_data.py` file containing a `get_data()` function. This file should be in the root directory of the project. You can encapsulate code to read data either from a blob storage or local disk in this file.\n",
|
||||
"In this example, the `get_data()` function returns data using scikit-learn's [load_digits](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html) method."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"if not os.path.exists(project_folder):\n",
|
||||
" os.makedirs(project_folder)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -219,17 +232,13 @@
|
||||
"source": [
|
||||
"%%writefile $project_folder/get_data.py\n",
|
||||
"\n",
|
||||
"from sklearn import datasets\n",
|
||||
"from scipy import sparse\n",
|
||||
"import numpy as np\n",
|
||||
"import pandas as pd\n",
|
||||
"\n",
|
||||
"def get_data():\n",
|
||||
" \n",
|
||||
" digits = datasets.load_digits()\n",
|
||||
" X_train = digits.data\n",
|
||||
" y_train = digits.target\n",
|
||||
" X_train = pd.read_csv(\"/tmp/azureml_runs/bai_data/X_train.tsv\", delimiter=\"\\t\", header=None, quotechar='\"')\n",
|
||||
" y_train = pd.read_csv(\"/tmp/azureml_runs/bai_data/y_train.tsv\", delimiter=\"\\t\", header=None, quotechar='\"')\n",
|
||||
"\n",
|
||||
" return { \"X\" : X_train, \"y\" : y_train }"
|
||||
" return { \"X\" : X_train.values, \"y\" : y_train[0].values }\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -258,7 +267,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"automl_settings = {\n",
|
||||
" \"iteration_timeout_minutes\": 2,\n",
|
||||
" \"iteration_timeout_minutes\": 10,\n",
|
||||
" \"iterations\": 20,\n",
|
||||
" \"n_cross_validations\": 5,\n",
|
||||
" \"primary_metric\": 'AUC_weighted',\n",
|
||||
@@ -1,532 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Automated Machine Learning\n",
|
||||
"_**Remote Execution using attach**_\n",
|
||||
"\n",
|
||||
"## Contents\n",
|
||||
"1. [Introduction](#Introduction)\n",
|
||||
"1. [Setup](#Setup)\n",
|
||||
"1. [Data](#Data)\n",
|
||||
"1. [Train](#Train)\n",
|
||||
"1. [Results](#Results)\n",
|
||||
"1. [Test](#Test)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Introduction\n",
|
||||
"In this example we use the scikit-learn's [20newsgroup](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.fetch_20newsgroups.html) to showcase how you can use AutoML to handle text data with remote attach.\n",
|
||||
"\n",
|
||||
"Make sure you have executed the [configuration](../../../configuration.ipynb) before running this notebook.\n",
|
||||
"\n",
|
||||
"In this notebook you will learn how to:\n",
|
||||
"1. Create an `Experiment` in an existing `Workspace`.\n",
|
||||
"2. Attach an existing DSVM to a workspace.\n",
|
||||
"3. Configure AutoML using `AutoMLConfig`.\n",
|
||||
"4. Train the model using the DSVM.\n",
|
||||
"5. Explore the results.\n",
|
||||
"6. Test the best fitted model.\n",
|
||||
"\n",
|
||||
"In addition this notebook showcases the following features\n",
|
||||
"- **Parallel** executions for iterations\n",
|
||||
"- **Asynchronous** tracking of progress\n",
|
||||
"- **Cancellation** of individual iterations or the entire run\n",
|
||||
"- Retrieving models for any iteration or logged metric\n",
|
||||
"- Specifying AutoML settings as `**kwargs`\n",
|
||||
"- Handling **text** data using the `preprocess` flag"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"As part of the setup you have already created an Azure ML `Workspace` object. For AutoML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"import numpy as np\n",
|
||||
"import pandas as pd\n",
|
||||
"\n",
|
||||
"import azureml.core\n",
|
||||
"from azureml.core.experiment import Experiment\n",
|
||||
"from azureml.core.workspace import Workspace\n",
|
||||
"from azureml.train.automl import AutoMLConfig"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# Choose a name for the run history container in the workspace.\n",
|
||||
"experiment_name = 'automl-remote-attach'\n",
|
||||
"project_folder = './sample_projects/automl-remote-attach'\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['SDK version'] = azureml.core.VERSION\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace'] = ws.name\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Project Directory'] = project_folder\n",
|
||||
"output['Experiment Name'] = experiment.name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Opt-in diagnostics for better experience, quality, and security of future releases."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.telemetry import set_diagnostics_collection\n",
|
||||
"set_diagnostics_collection(send_diagnostics = True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Attach a Remote Linux DSVM\n",
|
||||
"To use a remote Docker compute target:\n",
|
||||
"1. Create a Linux DSVM in Azure, following these [quick instructions](https://docs.microsoft.com/en-us/azure/machine-learning/desktop-workbench/how-to-create-dsvm-hdi). Make sure you use the Ubuntu flavor (not CentOS). Make sure that disk space is available under `/tmp` because AutoML creates files under `/tmp/azureml_run`s. The DSVM should have more cores than the number of parallel runs that you plan to enable. It should also have at least 4GB per core.\n",
|
||||
"2. Enter the IP address, user name and password below.\n",
|
||||
"\n",
|
||||
"**Note:** By default, SSH runs on port 22 and you don't need to change the port number below. If you've configured SSH to use a different port, change `dsvm_ssh_port` accordinglyaddress. [Read more](https://render.githubusercontent.com/documentation/sdk/ssh-issue.md) on changing SSH ports for security reasons."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.compute import ComputeTarget, RemoteCompute\n",
|
||||
"import time\n",
|
||||
"\n",
|
||||
"# Add your VM information below\n",
|
||||
"# If a compute with the specified compute_name already exists, it will be used and the dsvm_ip_addr, dsvm_ssh_port, \n",
|
||||
"# dsvm_username and dsvm_password will be ignored.\n",
|
||||
"compute_name = 'mydsvmb'\n",
|
||||
"dsvm_ip_addr = '<<ip_addr>>'\n",
|
||||
"dsvm_ssh_port = 22\n",
|
||||
"dsvm_username = '<<username>>'\n",
|
||||
"dsvm_password = '<<password>>'\n",
|
||||
"\n",
|
||||
"if compute_name in ws.compute_targets:\n",
|
||||
" print('Using existing compute.')\n",
|
||||
" dsvm_compute = ws.compute_targets[compute_name]\n",
|
||||
"else:\n",
|
||||
" attach_config = RemoteCompute.attach_configuration(address=dsvm_ip_addr, username=dsvm_username, password=dsvm_password, ssh_port=dsvm_ssh_port)\n",
|
||||
" ComputeTarget.attach(workspace=ws, name=compute_name, attach_configuration=attach_config)\n",
|
||||
"\n",
|
||||
" while ws.compute_targets[compute_name].provisioning_state == 'Creating':\n",
|
||||
" time.sleep(1)\n",
|
||||
"\n",
|
||||
" dsvm_compute = ws.compute_targets[compute_name]\n",
|
||||
" \n",
|
||||
" if dsvm_compute.provisioning_state == 'Failed':\n",
|
||||
" print('Attached failed.')\n",
|
||||
" print(dsvm_compute.provisioning_errors)\n",
|
||||
" dsvm_compute.detach()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.runconfig import RunConfiguration\n",
|
||||
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||
"\n",
|
||||
"# create a new RunConfig object\n",
|
||||
"conda_run_config = RunConfiguration(framework=\"python\")\n",
|
||||
"\n",
|
||||
"# Set compute target to the Linux DSVM\n",
|
||||
"conda_run_config.target = dsvm_compute\n",
|
||||
"\n",
|
||||
"cd = CondaDependencies.create(pip_packages=['azureml-sdk[automl]'], conda_packages=['numpy'])\n",
|
||||
"conda_run_config.environment.python.conda_dependencies = cd"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Data\n",
|
||||
"For remote executions you should author a `get_data.py` file containing a `get_data()` function. This file should be in the root directory of the project. You can encapsulate code to read data either from a blob storage or local disk in this file.\n",
|
||||
"In this example, the `get_data()` function returns a [dictionary](README.md#getdata)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"if not os.path.exists(project_folder):\n",
|
||||
" os.makedirs(project_folder)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%writefile $project_folder/get_data.py\n",
|
||||
"\n",
|
||||
"import numpy as np\n",
|
||||
"from sklearn.datasets import fetch_20newsgroups\n",
|
||||
"\n",
|
||||
"def get_data():\n",
|
||||
" remove = ('headers', 'footers', 'quotes')\n",
|
||||
" categories = [\n",
|
||||
" 'alt.atheism',\n",
|
||||
" 'talk.religion.misc',\n",
|
||||
" 'comp.graphics',\n",
|
||||
" 'sci.space',\n",
|
||||
" ]\n",
|
||||
" data_train = fetch_20newsgroups(subset = 'train', categories = categories,\n",
|
||||
" shuffle = True, random_state = 42,\n",
|
||||
" remove = remove)\n",
|
||||
" \n",
|
||||
" X_train = np.array(data_train.data).reshape((len(data_train.data),1))\n",
|
||||
" y_train = np.array(data_train.target)\n",
|
||||
" \n",
|
||||
" return { \"X\" : X_train, \"y\" : y_train }"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Train\n",
|
||||
"\n",
|
||||
"You can specify `automl_settings` as `**kwargs` as well. Also note that you can use a `get_data()` function for local excutions too.\n",
|
||||
"\n",
|
||||
"**Note:** When using Remote DSVM, you can't pass Numpy arrays directly to the fit method.\n",
|
||||
"\n",
|
||||
"|Property|Description|\n",
|
||||
"|-|-|\n",
|
||||
"|**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: <br><i>accuracy</i><br><i>AUC_weighted</i><br><i>average_precision_score_weighted</i><br><i>norm_macro_recall</i><br><i>precision_score_weighted</i>|\n",
|
||||
"|**iteration_timeout_minutes**|Time limit in minutes for each iteration.|\n",
|
||||
"|**iterations**|Number of iterations. In each iteration AutoML trains a specific pipeline with the data.|\n",
|
||||
"|**n_cross_validations**|Number of cross validation splits.|\n",
|
||||
"|**max_concurrent_iterations**|Maximum number of iterations that would be executed in parallel. This should be less than the number of cores on the DSVM.|\n",
|
||||
"|**preprocess**|Setting this to *True* enables AutoML to perform preprocessing on the input to handle *missing data*, and to perform some common *feature extraction*.|\n",
|
||||
"|**enable_cache**|Setting this to *True* enables preprocess done once and reuse the same preprocessed data for all the iterations. Default value is True.\n",
|
||||
"|**max_cores_per_iteration**|Indicates how many cores on the compute target would be used to train a single pipeline.<br>Default is *1*; you can set it to *-1* to use all cores.|"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"automl_settings = {\n",
|
||||
" \"iteration_timeout_minutes\": 60,\n",
|
||||
" \"iterations\": 4,\n",
|
||||
" \"n_cross_validations\": 5,\n",
|
||||
" \"primary_metric\": 'AUC_weighted',\n",
|
||||
" \"preprocess\": True,\n",
|
||||
" \"max_cores_per_iteration\": 2\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"automl_config = AutoMLConfig(task = 'classification',\n",
|
||||
" path = project_folder,\n",
|
||||
" run_configuration=conda_run_config,\n",
|
||||
" data_script = project_folder + \"/get_data.py\",\n",
|
||||
" **automl_settings\n",
|
||||
" )\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Call the `submit` method on the experiment object and pass the run configuration. For remote runs the execution is asynchronous, so you will see the iterations get populated as they complete. You can interact with the widgets and models even when the experiment is running to retrieve the best model up to that point. Once you are satisfied with the model, you can cancel a particular iteration or the whole run."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run = experiment.submit(automl_config)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Results\n",
|
||||
"#### Widget for Monitoring Runs\n",
|
||||
"\n",
|
||||
"The widget will first report a \"loading\" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete.\n",
|
||||
"\n",
|
||||
"You can click on a pipeline to see run properties and output logs. Logs are also available on the DSVM under `/tmp/azureml_run/{iterationid}/azureml-logs`\n",
|
||||
"\n",
|
||||
"**Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.widgets import RunDetails\n",
|
||||
"RunDetails(remote_run).show() "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Wait until the run finishes.\n",
|
||||
"remote_run.wait_for_completion(show_output = True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Pre-process cache cleanup\n",
|
||||
"The preprocess data gets cache at user default file store. When the run is completed the cache can be cleaned by running below cell"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run.clean_preprocessor_cache()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\n",
|
||||
"#### Retrieve All Child Runs\n",
|
||||
"You can also use SDK methods to fetch all the child runs and see individual metrics that we log. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"children = list(remote_run.get_children())\n",
|
||||
"metricslist = {}\n",
|
||||
"for run in children:\n",
|
||||
" properties = run.get_properties()\n",
|
||||
" metrics = {k: v for k, v in run.get_metrics().items() if isinstance(v, float)}\n",
|
||||
" metricslist[int(properties['iteration'])] = metrics\n",
|
||||
"\n",
|
||||
"rundata = pd.DataFrame(metricslist).sort_index(1)\n",
|
||||
"rundata"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Cancelling Runs\n",
|
||||
"You can cancel ongoing remote runs using the `cancel` and `cancel_iteration` functions."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Cancel the ongoing experiment and stop scheduling new iterations.\n",
|
||||
"# remote_run.cancel()\n",
|
||||
"\n",
|
||||
"# Cancel iteration 1 and move onto iteration 2.\n",
|
||||
"# remote_run.cancel_iteration(1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Retrieve the Best Model\n",
|
||||
"\n",
|
||||
"Below we select the best pipeline from our iterations. The `get_output` method returns the best run and the fitted model. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"best_run, fitted_model = remote_run.get_output()\n",
|
||||
"print(best_run)\n",
|
||||
"print(fitted_model)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Best Model Based on Any Other Metric\n",
|
||||
"Show the run and the model which has the smallest `accuracy` value:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# lookup_metric = \"accuracy\"\n",
|
||||
"# best_run, fitted_model = remote_run.get_output(metric = lookup_metric)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Model from a Specific Iteration"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"iteration = 0\n",
|
||||
"zero_run, zero_model = remote_run.get_output(iteration = iteration)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Test"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Load test data.\n",
|
||||
"from pandas_ml import ConfusionMatrix\n",
|
||||
"from sklearn.datasets import fetch_20newsgroups\n",
|
||||
"\n",
|
||||
"remove = ('headers', 'footers', 'quotes')\n",
|
||||
"categories = [\n",
|
||||
" 'alt.atheism',\n",
|
||||
" 'talk.religion.misc',\n",
|
||||
" 'comp.graphics',\n",
|
||||
" 'sci.space',\n",
|
||||
" ]\n",
|
||||
"\n",
|
||||
"data_test = fetch_20newsgroups(subset = 'test', categories = categories,\n",
|
||||
" shuffle = True, random_state = 42,\n",
|
||||
" remove = remove)\n",
|
||||
"\n",
|
||||
"X_test = np.array(data_test.data).reshape((len(data_test.data),1))\n",
|
||||
"y_test = data_test.target\n",
|
||||
"\n",
|
||||
"# Test our best pipeline.\n",
|
||||
"\n",
|
||||
"y_pred = fitted_model.predict(X_test)\n",
|
||||
"y_pred_strings = [data_test.target_names[i] for i in y_pred]\n",
|
||||
"y_test_strings = [data_test.target_names[i] for i in y_test]\n",
|
||||
"\n",
|
||||
"cm = ConfusionMatrix(y_test_strings, y_pred_strings)\n",
|
||||
"print(cm)\n",
|
||||
"cm.plot()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "savitam"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -1,600 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Automated Machine Learning\n",
|
||||
"_**Remote Execution with DataStore**_\n",
|
||||
"\n",
|
||||
"## Contents\n",
|
||||
"1. [Introduction](#Introduction)\n",
|
||||
"1. [Setup](#Setup)\n",
|
||||
"1. [Data](#Data)\n",
|
||||
"1. [Train](#Train)\n",
|
||||
"1. [Results](#Results)\n",
|
||||
"1. [Test](#Test)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Introduction\n",
|
||||
"This sample accesses a data file on a remote DSVM through DataStore. Advantages of using data store are:\n",
|
||||
"1. DataStore secures the access details.\n",
|
||||
"2. DataStore supports read, write to blob and file store\n",
|
||||
"3. AutoML natively supports copying data from DataStore to DSVM\n",
|
||||
"\n",
|
||||
"Make sure you have executed the [configuration](../../../configuration.ipynb) before running this notebook.\n",
|
||||
"\n",
|
||||
"In this notebook you would see\n",
|
||||
"1. Storing data in DataStore.\n",
|
||||
"2. get_data returning data from DataStore."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"As part of the setup you have already created a <b>Workspace</b>. For AutoML you would need to create an <b>Experiment</b>. An <b>Experiment</b> is a named object in a <b>Workspace</b>, which is used to run experiments."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import logging\n",
|
||||
"import os\n",
|
||||
"import time\n",
|
||||
"\n",
|
||||
"import numpy as np\n",
|
||||
"import pandas as pd\n",
|
||||
"\n",
|
||||
"import azureml.core\n",
|
||||
"from azureml.core.compute import DsvmCompute\n",
|
||||
"from azureml.core.experiment import Experiment\n",
|
||||
"from azureml.core.workspace import Workspace\n",
|
||||
"from azureml.train.automl import AutoMLConfig"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# choose a name for experiment\n",
|
||||
"experiment_name = 'automl-remote-datastore-file'\n",
|
||||
"# project folder\n",
|
||||
"project_folder = './sample_projects/automl-remote-datastore-file'\n",
|
||||
"\n",
|
||||
"experiment=Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['SDK version'] = azureml.core.VERSION\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace'] = ws.name\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Project Directory'] = project_folder\n",
|
||||
"output['Experiment Name'] = experiment.name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"outputDf = pd.DataFrame(data = output, index = [''])\n",
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Opt-in diagnostics for better experience, quality, and security of future releases"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.telemetry import set_diagnostics_collection\n",
|
||||
"set_diagnostics_collection(send_diagnostics=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create a Remote Linux DSVM\n",
|
||||
"Note: If creation fails with a message about Marketplace purchase eligibilty, go to portal.azure.com, start creating DSVM there, and select \"Want to create programmatically\" to enable programmatic creation. Once you've enabled it, you can exit without actually creating VM.\n",
|
||||
"\n",
|
||||
"**Note**: By default SSH runs on port 22 and you don't need to specify it. But if for security reasons you can switch to a different port (such as 5022), you can append the port number to the address. [Read more](https://render.githubusercontent.com/documentation/sdk/ssh-issue.md) on this."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"compute_target_name = 'mydsvmc'\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" while ws.compute_targets[compute_target_name].provisioning_state == 'Creating':\n",
|
||||
" time.sleep(1)\n",
|
||||
" \n",
|
||||
" dsvm_compute = DsvmCompute(workspace=ws, name=compute_target_name)\n",
|
||||
" print('found existing:', dsvm_compute.name)\n",
|
||||
"except:\n",
|
||||
" dsvm_config = DsvmCompute.provisioning_configuration(vm_size=\"Standard_D2_v2\")\n",
|
||||
" dsvm_compute = DsvmCompute.create(ws, name=compute_target_name, provisioning_configuration=dsvm_config)\n",
|
||||
" dsvm_compute.wait_for_completion(show_output=True)\n",
|
||||
" print(\"Waiting one minute for ssh to be accessible\")\n",
|
||||
" time.sleep(60) # Wait for ssh to be accessible"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Data\n",
|
||||
"\n",
|
||||
"### Copy data file to local\n",
|
||||
"\n",
|
||||
"Download the data file.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"if not os.path.isdir('data'):\n",
|
||||
" os.mkdir('data') "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sklearn.datasets import fetch_20newsgroups\n",
|
||||
"import csv\n",
|
||||
"\n",
|
||||
"remove = ('headers', 'footers', 'quotes')\n",
|
||||
"categories = [\n",
|
||||
" 'alt.atheism',\n",
|
||||
" 'talk.religion.misc',\n",
|
||||
" 'comp.graphics',\n",
|
||||
" 'sci.space',\n",
|
||||
" ]\n",
|
||||
"data_train = fetch_20newsgroups(subset = 'train', categories = categories,\n",
|
||||
" shuffle = True, random_state = 42,\n",
|
||||
" remove = remove)\n",
|
||||
" \n",
|
||||
"pd.DataFrame(data_train.data).to_csv(\"data/X_train.tsv\", index=False, header=False, quoting=csv.QUOTE_ALL, sep=\"\\t\")\n",
|
||||
"pd.DataFrame(data_train.target).to_csv(\"data/y_train.tsv\", index=False, header=False, sep=\"\\t\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Upload data to the cloud"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now make the data accessible remotely by uploading that data from your local machine into Azure so it can be accessed for remote training. The datastore is a convenient construct associated with your workspace for you to upload/download data, and interact with it from your remote compute targets. It is backed by Azure blob storage account.\n",
|
||||
"\n",
|
||||
"The data.tsv files are uploaded into a directory named data at the root of the datastore."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#blob_datastore = Datastore(ws, blob_datastore_name)\n",
|
||||
"ds = ws.get_default_datastore()\n",
|
||||
"print(ds.datastore_type, ds.account_name, ds.container_name)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# ds.upload_files(\"data.tsv\")\n",
|
||||
"ds.upload(src_dir='./data', target_path='data', overwrite=True, show_progress=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Configure & Run\n",
|
||||
"\n",
|
||||
"First let's create a DataReferenceConfigruation object to inform the system what data folder to download to the compute target.\n",
|
||||
"The path_on_compute should be an absolute path to ensure that the data files are downloaded only once. The get_data method should use this same path to access the data files."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.runconfig import DataReferenceConfiguration\n",
|
||||
"dr = DataReferenceConfiguration(datastore_name=ds.name, \n",
|
||||
" path_on_datastore='data', \n",
|
||||
" path_on_compute='/tmp/azureml_runs',\n",
|
||||
" mode='download', # download files from datastore to compute target\n",
|
||||
" overwrite=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.runconfig import RunConfiguration\n",
|
||||
"from azureml.core.conda_dependencies import CondaDependencies\n",
|
||||
"\n",
|
||||
"# create a new RunConfig object\n",
|
||||
"conda_run_config = RunConfiguration(framework=\"python\")\n",
|
||||
"\n",
|
||||
"# Set compute target to the Linux DSVM\n",
|
||||
"conda_run_config.target = dsvm_compute\n",
|
||||
"# set the data reference of the run coonfiguration\n",
|
||||
"conda_run_config.data_references = {ds.name: dr}\n",
|
||||
"\n",
|
||||
"cd = CondaDependencies.create(pip_packages=['azureml-sdk[automl]'], conda_packages=['numpy'])\n",
|
||||
"conda_run_config.environment.python.conda_dependencies = cd"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create Get Data File\n",
|
||||
"For remote executions you should author a get_data.py file containing a get_data() function. This file should be in the root directory of the project. You can encapsulate code to read data either from a blob storage or local disk in this file.\n",
|
||||
"\n",
|
||||
"The *get_data()* function returns a [dictionary](README.md#getdata).\n",
|
||||
"\n",
|
||||
"The read_csv uses the path_on_compute value specified in the DataReferenceConfiguration call plus the path_on_datastore folder and then the actual file name."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"if not os.path.exists(project_folder):\n",
|
||||
" os.makedirs(project_folder)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%writefile $project_folder/get_data.py\n",
|
||||
"\n",
|
||||
"import pandas as pd\n",
|
||||
"\n",
|
||||
"def get_data():\n",
|
||||
" X_train = pd.read_csv(\"/tmp/azureml_runs/data/X_train.tsv\", delimiter=\"\\t\", header=None, quotechar='\"')\n",
|
||||
" y_train = pd.read_csv(\"/tmp/azureml_runs/data/y_train.tsv\", delimiter=\"\\t\", header=None, quotechar='\"')\n",
|
||||
"\n",
|
||||
" return { \"X\" : X_train.values, \"y\" : y_train[0].values }"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Train\n",
|
||||
"\n",
|
||||
"You can specify automl_settings as **kwargs** as well. Also note that you can use the get_data() symantic for local excutions too. \n",
|
||||
"\n",
|
||||
"<i>Note: For Remote DSVM and Batch AI you cannot pass Numpy arrays directly to AutoMLConfig.</i>\n",
|
||||
"\n",
|
||||
"|Property|Description|\n",
|
||||
"|-|-|\n",
|
||||
"|**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: <br><i>accuracy</i><br><i>AUC_weighted</i><br><i>average_precision_score_weighted</i><br><i>norm_macro_recall</i><br><i>precision_score_weighted</i>|\n",
|
||||
"|**iteration_timeout_minutes**|Time limit in minutes for each iteration|\n",
|
||||
"|**iterations**|Number of iterations. In each iteration Auto ML trains a specific pipeline with the data|\n",
|
||||
"|**n_cross_validations**|Number of cross validation splits|\n",
|
||||
"|**max_concurrent_iterations**|Max number of iterations that would be executed in parallel. This should be less than the number of cores on the DSVM\n",
|
||||
"|**preprocess**| *True/False* <br>Setting this to *True* enables Auto ML to perform preprocessing <br>on the input to handle *missing data*, and perform some common *feature extraction*|\n",
|
||||
"|**enable_cache**|Setting this to *True* enables preprocess done once and reuse the same preprocessed data for all the iterations. Default value is True.|\n",
|
||||
"|**max_cores_per_iteration**| Indicates how many cores on the compute target would be used to train a single pipeline.<br> Default is *1*, you can set it to *-1* to use all cores|"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"automl_settings = {\n",
|
||||
" \"iteration_timeout_minutes\": 60,\n",
|
||||
" \"iterations\": 4,\n",
|
||||
" \"n_cross_validations\": 5,\n",
|
||||
" \"primary_metric\": 'AUC_weighted',\n",
|
||||
" \"preprocess\": True,\n",
|
||||
" \"max_cores_per_iteration\": 1,\n",
|
||||
" \"verbosity\": logging.INFO\n",
|
||||
"}\n",
|
||||
"automl_config = AutoMLConfig(task = 'classification',\n",
|
||||
" debug_log = 'automl_errors.log',\n",
|
||||
" path=project_folder,\n",
|
||||
" run_configuration=conda_run_config,\n",
|
||||
" #compute_target = dsvm_compute,\n",
|
||||
" data_script = project_folder + \"/get_data.py\",\n",
|
||||
" **automl_settings\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For remote runs the execution is asynchronous, so you will see the iterations get populated as they complete. You can interact with the widgets/models even when the experiment is running to retreive the best model up to that point. Once you are satisfied with the model you can cancel a particular iteration or the whole run."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run = experiment.submit(automl_config, show_output=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Results\n",
|
||||
"#### Widget for monitoring runs\n",
|
||||
"\n",
|
||||
"The widget will sit on \"loading\" until the first iteration completed, then you will see an auto-updating graph and table show up. It refreshed once per minute, so you should see the graph update as child runs complete.\n",
|
||||
"\n",
|
||||
"You can click on a pipeline to see run properties and output logs. Logs are also available on the DSVM under /tmp/azureml_run/{iterationid}/azureml-logs\n",
|
||||
"\n",
|
||||
"NOTE: The widget displays a link at the bottom. This links to a web-ui to explore the individual run details."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.widgets import RunDetails\n",
|
||||
"RunDetails(remote_run).show() "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Wait until the run finishes.\n",
|
||||
"remote_run.wait_for_completion(show_output = True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\n",
|
||||
"#### Retrieve All Child Runs\n",
|
||||
"You can also use sdk methods to fetch all the child runs and see individual metrics that we log. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"children = list(remote_run.get_children())\n",
|
||||
"metricslist = {}\n",
|
||||
"for run in children:\n",
|
||||
" properties = run.get_properties()\n",
|
||||
" metrics = {k: v for k, v in run.get_metrics().items() if isinstance(v, float)} \n",
|
||||
" metricslist[int(properties['iteration'])] = metrics\n",
|
||||
"\n",
|
||||
"rundata = pd.DataFrame(metricslist).sort_index(1)\n",
|
||||
"rundata"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Canceling Runs\n",
|
||||
"You can cancel ongoing remote runs using the *cancel()* and *cancel_iteration()* functions"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Cancel the ongoing experiment and stop scheduling new iterations\n",
|
||||
"# remote_run.cancel()\n",
|
||||
"\n",
|
||||
"# Cancel iteration 1 and move onto iteration 2\n",
|
||||
"# remote_run.cancel_iteration(1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Pre-process cache cleanup\n",
|
||||
"The preprocess data gets cache at user default file store. When the run is completed the cache can be cleaned by running below cell"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"remote_run.clean_preprocessor_cache()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Retrieve the Best Model\n",
|
||||
"\n",
|
||||
"Below we select the best pipeline from our iterations. The *get_output* method returns the best run and the fitted model. There are overloads on *get_output* that allow you to retrieve the best run and fitted model for *any* logged metric or a particular *iteration*."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"best_run, fitted_model = remote_run.get_output()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Best Model based on any other metric"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# lookup_metric = \"accuracy\"\n",
|
||||
"# best_run, fitted_model = remote_run.get_output(metric=lookup_metric)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Model from a specific iteration"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# iteration = 1\n",
|
||||
"# best_run, fitted_model = remote_run.get_output(iteration=iteration)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Test\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Load test data.\n",
|
||||
"from pandas_ml import ConfusionMatrix\n",
|
||||
"\n",
|
||||
"data_test = fetch_20newsgroups(subset = 'test', categories = categories,\n",
|
||||
" shuffle = True, random_state = 42,\n",
|
||||
" remove = remove)\n",
|
||||
"\n",
|
||||
"X_test = np.array(data_test.data).reshape((len(data_test.data),1))\n",
|
||||
"y_test = data_test.target\n",
|
||||
"\n",
|
||||
"# Test our best pipeline.\n",
|
||||
"\n",
|
||||
"y_pred = fitted_model.predict(X_test)\n",
|
||||
"y_pred_strings = [data_test.target_names[i] for i in y_pred]\n",
|
||||
"y_test_strings = [data_test.target_names[i] for i in y_test]\n",
|
||||
"\n",
|
||||
"cm = ConfusionMatrix(y_test_strings, y_pred_strings)\n",
|
||||
"print(cm)\n",
|
||||
"cm.plot()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "savitam"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -9,6 +9,13 @@
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -75,7 +82,7 @@
|
||||
"experiment_name = 'non_sample_weight_experiment'\n",
|
||||
"sample_weight_experiment_name = 'sample_weight_experiment'\n",
|
||||
"\n",
|
||||
"project_folder = './sample_projects/automl-local-classification'\n",
|
||||
"project_folder = './sample_projects/sample_weight'\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"sample_weight_experiment=Experiment(ws, sample_weight_experiment_name)\n",
|
||||
@@ -93,23 +100,6 @@
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Opt-in diagnostics for better experience, quality, and security of future releases."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.telemetry import set_diagnostics_collection\n",
|
||||
"set_diagnostics_collection(send_diagnostics = True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
|
||||
@@ -9,6 +9,13 @@
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -79,9 +86,9 @@
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# choose a name for the experiment\n",
|
||||
"experiment_name = 'automl-local-missing-data'\n",
|
||||
"experiment_name = 'sparse-data-train-test-split'\n",
|
||||
"# project folder\n",
|
||||
"project_folder = './sample_projects/automl-local-missing-data'\n",
|
||||
"project_folder = './sample_projects/sparse-data-train-test-split'\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
@@ -98,23 +105,6 @@
|
||||
"outputDf.T"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Opt-in diagnostics for better experience, quality, and security of future releases."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.telemetry import set_diagnostics_collection\n",
|
||||
"set_diagnostics_collection(send_diagnostics = True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -173,9 +163,9 @@
|
||||
"|**iterations**|Number of iterations. In each iteration AutoML trains a specific pipeline with the data.|\n",
|
||||
"|**preprocess**|Setting this to *True* enables AutoML to perform preprocessing on the input to handle *missing data*, and to perform some common *feature extraction*.<br>**Note:** If input data is sparse, you cannot use *True*.|\n",
|
||||
"|**X**|(sparse) array-like, shape = [n_samples, n_features]|\n",
|
||||
"|**y**|(sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]<br>Multi-class targets. An indicator matrix turns on multilabel classification. This should be an array of integers.|\n",
|
||||
"|**y**|(sparse) array-like, shape = [n_samples, ], Multi-class targets.|\n",
|
||||
"|**X_valid**|(sparse) array-like, shape = [n_samples, n_features] for the custom validation set.|\n",
|
||||
"|**y_valid**|(sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]<br>Multi-class targets. An indicator matrix turns on multilabel classification for the custom validation set.|\n",
|
||||
"|**y_valid**|(sparse) array-like, shape = [n_samples, ], Multi-class targets.|\n",
|
||||
"|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder.|"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -0,0 +1,208 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Automated Machine Learning\n",
|
||||
"_**Classification with Local Compute**_\n",
|
||||
"\n",
|
||||
"## Contents\n",
|
||||
"1. [Introduction](#Introduction)\n",
|
||||
"1. [Setup](#Setup)\n",
|
||||
"1. [Data](#Data)\n",
|
||||
"1. [Train](#Train)\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Introduction\n",
|
||||
"\n",
|
||||
"In this example we will explore AutoML's subsampling feature. This is useful for training on large datasets to speed up the convergence.\n",
|
||||
"\n",
|
||||
"The setup is quiet similar to a normal classification, with the exception of the `enable_subsampling` option. Keep in mind that even with the `enable_subsampling` flag set, subsampling will only be run for large datasets (>= 50k rows) and large (>= 85) or no iteration restrictions.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"As part of the setup you have already created an Azure ML `Workspace` object. For AutoML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import logging\n",
|
||||
"\n",
|
||||
"import numpy as np\n",
|
||||
"import pandas as pd\n",
|
||||
"\n",
|
||||
"import azureml.core\n",
|
||||
"from azureml.core.experiment import Experiment\n",
|
||||
"from azureml.core.workspace import Workspace\n",
|
||||
"from azureml.train.automl import AutoMLConfig\n",
|
||||
"from azureml.train.automl.run import AutoMLRun"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ws = Workspace.from_config()\n",
|
||||
"\n",
|
||||
"# Choose a name for the experiment and specify the project folder.\n",
|
||||
"experiment_name = 'automl-subsampling'\n",
|
||||
"project_folder = './sample_projects/automl-subsampling'\n",
|
||||
"\n",
|
||||
"experiment = Experiment(ws, experiment_name)\n",
|
||||
"\n",
|
||||
"output = {}\n",
|
||||
"output['SDK version'] = azureml.core.VERSION\n",
|
||||
"output['Subscription ID'] = ws.subscription_id\n",
|
||||
"output['Workspace Name'] = ws.name\n",
|
||||
"output['Resource Group'] = ws.resource_group\n",
|
||||
"output['Location'] = ws.location\n",
|
||||
"output['Project Directory'] = project_folder\n",
|
||||
"output['Experiment Name'] = experiment.name\n",
|
||||
"pd.set_option('display.max_colwidth', -1)\n",
|
||||
"pd.DataFrame(data = output, index = ['']).T"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Data\n",
|
||||
"\n",
|
||||
"We will create a simple dataset using the numpy sin function just for this example. We need just over 50k rows."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"base = np.arange(60000)\n",
|
||||
"cos = np.cos(base)\n",
|
||||
"y = np.round(np.sin(base)).astype('int')\n",
|
||||
"\n",
|
||||
"# Exclude the first 100 rows from training so that they can be used for test.\n",
|
||||
"X_train = np.hstack((base.reshape(-1, 1), cos.reshape(-1, 1)))\n",
|
||||
"y_train = y"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Train\n",
|
||||
"\n",
|
||||
"Instantiate an `AutoMLConfig` object to specify the settings and data used to run the experiment.\n",
|
||||
"\n",
|
||||
"|Property|Description|\n",
|
||||
"|-|-|\n",
|
||||
"|**enable_subsampling**|This enables subsampling as an option. However it does not guarantee subsampling will be used. It also depends on how large the dataset is and how many iterations it's expected to run at a minimum.|\n",
|
||||
"|**iterations**|Number of iterations. Subsampling requires a lot of iterations at smaller percent so in order for subsampling to be used we need to set iterations to be a high number.|\n",
|
||||
"|**experiment_timeout_minutes**|The experiment timeout, it's set to 5 right now to shorten the demo but it should probably be higher if we want to finish all the iterations.|\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"automl_config = AutoMLConfig(task = 'classification',\n",
|
||||
" debug_log = 'automl_errors.log',\n",
|
||||
" primary_metric = 'accuracy',\n",
|
||||
" iterations = 85,\n",
|
||||
" experiment_timeout_minutes = 5,\n",
|
||||
" n_cross_validations = 2,\n",
|
||||
" verbosity = logging.INFO,\n",
|
||||
" X = X_train, \n",
|
||||
" y = y_train,\n",
|
||||
" enable_subsampling=True,\n",
|
||||
" path = project_folder)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Call the `submit` method on the experiment object and pass the run configuration. Execution of local runs is synchronous. Depending on the data and the number of iterations this can run for a while.\n",
|
||||
"In this example, we specify `show_output = True` to print currently running iterations to the console."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"local_run = experiment.submit(automl_config, show_output = True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "rogehe"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -21,6 +21,13 @@ Notebook 6 is an Automated ML sample notebook for Classification.
|
||||
|
||||
Learn more about [how to use Azure Databricks as a development environment](https://docs.microsoft.com/azure/machine-learning/service/how-to-configure-environment#azure-databricks) for Azure Machine Learning service.
|
||||
|
||||
**Databricks as a Compute Target from AML Pipelines**
|
||||
You can use Azure Databricks as a compute target from [Azure Machine Learning Pipelines](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-ml-pipelines). Take a look at this notebook for details: [aml-pipelines-use-databricks-as-compute-target.ipynb](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/azure-databricks/databricks-as-remote-compute-target/aml-pipelines-use-databricks-as-compute-target.ipynb).
|
||||
|
||||
For more on SDK concepts, please refer to [notebooks](https://github.com/Azure/MachineLearningNotebooks).
|
||||
|
||||
**Please let us know your feedback.**
|
||||
|
||||
|
||||
|
||||

|
||||
@@ -15,7 +15,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -60,14 +60,10 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# import the Workspace class and check the azureml SDK version\n",
|
||||
"from azureml.core import Workspace\n",
|
||||
"\n",
|
||||
"ws = Workspace.from_config(auth = auth)\n",
|
||||
"print('Workspace name: ' + ws.name, \n",
|
||||
" 'Azure region: ' + ws.location, \n",
|
||||
" 'Subscription id: ' + ws.subscription_id, \n",
|
||||
" 'Resource group: ' + ws.resource_group, sep = '\\n')"
|
||||
"# Set auth to be used by workspace related APIs.\n",
|
||||
"# For automation or CI/CD ServicePrincipalAuthentication can be used.\n",
|
||||
"# https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.authentication.serviceprincipalauthentication?view=azure-ml-py\n",
|
||||
"auth = None"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -79,7 +75,7 @@
|
||||
"# import the Workspace class and check the azureml SDK version\n",
|
||||
"from azureml.core import Workspace\n",
|
||||
"\n",
|
||||
"ws = Workspace.from_config()\n",
|
||||
"ws = Workspace.from_config(auth = auth)\n",
|
||||
"print('Workspace name: ' + ws.name, \n",
|
||||
" 'Azure region: ' + ws.location, \n",
|
||||
" 'Subscription id: ' + ws.subscription_id, \n",
|
||||
@@ -344,15 +340,19 @@
|
||||
"source": [
|
||||
"dbutils.notebook.exit(\"success\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "pasha"
|
||||
},
|
||||
{
|
||||
"name": "wamartin"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
@@ -370,9 +370,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.0"
|
||||
"version": "3.6.6"
|
||||
},
|
||||
"name": "03.Build_model_runHistory",
|
||||
"name": "build-model-run-history-03",
|
||||
"notebookId": 3836944406456339
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -15,16 +15,16 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Please ensure you have run all previous notebooks in sequence before running this.\n",
|
||||
"\n",
|
||||
"Please Register Azure Container Instance(ACI) using Azure Portal: https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-manager-supported-services#portal in your subscription before using the SDK to deploy your ML model to ACI."
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
"Please ensure you have run all previous notebooks in sequence before running this.\n",
|
||||
"\n",
|
||||
"Please Register Azure Container Instance(ACI) using Azure Portal: https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-manager-supported-services#portal in your subscription before using the SDK to deploy your ML model to ACI."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -45,15 +45,10 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Workspace\n",
|
||||
"\n",
|
||||
"#'''\n",
|
||||
"ws = Workspace.from_config(auth = auth)\n",
|
||||
"print('Workspace name: ' + ws.name, \n",
|
||||
" 'Azure region: ' + ws.location, \n",
|
||||
" 'Subscription id: ' + ws.subscription_id, \n",
|
||||
" 'Resource group: ' + ws.resource_group, sep = '\\n')\n",
|
||||
"#'''"
|
||||
"# Set auth to be used by workspace related APIs.\n",
|
||||
"# For automation or CI/CD ServicePrincipalAuthentication can be used.\n",
|
||||
"# https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.authentication.serviceprincipalauthentication?view=azure-ml-py\n",
|
||||
"auth = None"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -63,18 +58,12 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Workspace\n",
|
||||
"import azureml.core\n",
|
||||
"\n",
|
||||
"# Check core SDK version number\n",
|
||||
"print(\"SDK version:\", azureml.core.VERSION)\n",
|
||||
"\n",
|
||||
"#'''\n",
|
||||
"ws = Workspace.from_config()\n",
|
||||
"ws = Workspace.from_config(auth = auth)\n",
|
||||
"print('Workspace name: ' + ws.name, \n",
|
||||
" 'Azure region: ' + ws.location, \n",
|
||||
" 'Subscription id: ' + ws.subscription_id, \n",
|
||||
" 'Resource group: ' + ws.resource_group, sep = '\\n')\n",
|
||||
"#'''"
|
||||
" 'Resource group: ' + ws.resource_group, sep = '\\n')"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -293,24 +282,21 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#comment to not delete the web service\n",
|
||||
"#myservice.delete()"
|
||||
"myservice.delete()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "pasha"
|
||||
},
|
||||
{
|
||||
"name": "wamartin"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
@@ -328,9 +314,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.0"
|
||||
"version": "3.6.6"
|
||||
},
|
||||
"name": "04.DeploytoACI",
|
||||
"name": "deploy-to-aci-04",
|
||||
"notebookId": 3836944406456376
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -0,0 +1,250 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Azure ML & Azure Databricks notebooks by Parashar Shah.\n",
|
||||
"\n",
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This notebook uses image from ACI notebook for deploying to AKS."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import azureml.core\n",
|
||||
"\n",
|
||||
"# Check core SDK version number\n",
|
||||
"print(\"SDK version:\", azureml.core.VERSION)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Set auth to be used by workspace related APIs.\n",
|
||||
"# For automation or CI/CD ServicePrincipalAuthentication can be used.\n",
|
||||
"# https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.authentication.serviceprincipalauthentication?view=azure-ml-py\n",
|
||||
"auth = None"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core import Workspace\n",
|
||||
"\n",
|
||||
"ws = Workspace.from_config(auth = auth)\n",
|
||||
"print('Workspace name: ' + ws.name, \n",
|
||||
" 'Azure region: ' + ws.location, \n",
|
||||
" 'Subscription id: ' + ws.subscription_id, \n",
|
||||
" 'Resource group: ' + ws.resource_group, sep = '\\n')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# List images by ws\n",
|
||||
"\n",
|
||||
"from azureml.core.image import ContainerImage\n",
|
||||
"for i in ContainerImage.list(workspace = ws):\n",
|
||||
" print('{}(v.{} [{}]) stored at {} with build log {}'.format(i.name, i.version, i.creation_state, i.image_location, i.image_build_log_uri))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.image import Image\n",
|
||||
"myimage = Image(workspace=ws, name=\"aciws\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#create AKS compute\n",
|
||||
"#it may take 20-25 minutes to create a new cluster\n",
|
||||
"\n",
|
||||
"from azureml.core.compute import AksCompute, ComputeTarget\n",
|
||||
"\n",
|
||||
"# Use the default configuration (can also provide parameters to customize)\n",
|
||||
"prov_config = AksCompute.provisioning_configuration()\n",
|
||||
"\n",
|
||||
"aks_name = 'ps-aks-demo2' \n",
|
||||
"\n",
|
||||
"# Create the cluster\n",
|
||||
"aks_target = ComputeTarget.create(workspace = ws, \n",
|
||||
" name = aks_name, \n",
|
||||
" provisioning_configuration = prov_config)\n",
|
||||
"\n",
|
||||
"aks_target.wait_for_completion(show_output = True)\n",
|
||||
"\n",
|
||||
"print(aks_target.provisioning_state)\n",
|
||||
"print(aks_target.provisioning_errors)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.webservice import Webservice\n",
|
||||
"help( Webservice.deploy_from_image)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azureml.core.webservice import Webservice, AksWebservice\n",
|
||||
"from azureml.core.image import ContainerImage\n",
|
||||
"\n",
|
||||
"#Set the web service configuration (using default here with app insights)\n",
|
||||
"aks_config = AksWebservice.deploy_configuration(enable_app_insights=True)\n",
|
||||
"\n",
|
||||
"#unique service name\n",
|
||||
"service_name ='ps-aks-service'\n",
|
||||
"\n",
|
||||
"# Webservice creation using single command, there is a variant to use image directly as well.\n",
|
||||
"aks_service = Webservice.deploy_from_image(\n",
|
||||
" workspace=ws, \n",
|
||||
" name=service_name,\n",
|
||||
" deployment_config = aks_config,\n",
|
||||
" image = myimage,\n",
|
||||
" deployment_target = aks_target\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"aks_service.wait_for_deployment(show_output=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"aks_service.deployment_status"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#for using the Web HTTP API \n",
|
||||
"print(aks_service.scoring_uri)\n",
|
||||
"print(aks_service.get_keys())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"#get the some sample data\n",
|
||||
"test_data_path = \"AdultCensusIncomeTest\"\n",
|
||||
"test = spark.read.parquet(test_data_path).limit(5)\n",
|
||||
"\n",
|
||||
"test_json = json.dumps(test.toJSON().collect())\n",
|
||||
"\n",
|
||||
"print(test_json)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#using data defined above predict if income is >50K (1) or <=50K (0)\n",
|
||||
"aks_service.run(input_data=test_json)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#comment to not delete the web service\n",
|
||||
"aks_service.delete()\n",
|
||||
"#image.delete()\n",
|
||||
"#model.delete()\n",
|
||||
"aks_target.delete() "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "pasha"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6",
|
||||
"language": "python",
|
||||
"name": "python36"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.6"
|
||||
},
|
||||
"name": "deploy-to-aks-existingimage-05",
|
||||
"notebookId": 1030695628045968
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 1
|
||||
}
|
||||
@@ -15,7 +15,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -42,7 +42,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Download AdultCensusIncome.csv from Azure CDN. This file has 32,561 rows.\n",
|
||||
"basedataurl = \"https://amldockerdatasets.azureedge.net\"\n",
|
||||
"dataurl = \"https://amldockerdatasets.azureedge.net/AdultCensusIncome.csv\"\n",
|
||||
"datafile = \"AdultCensusIncome.csv\"\n",
|
||||
"datafile_dbfs = os.path.join(\"/dbfs\", datafile)\n",
|
||||
"\n",
|
||||
@@ -50,7 +50,7 @@
|
||||
" print(\"found {} at {}\".format(datafile, datafile_dbfs))\n",
|
||||
"else:\n",
|
||||
" print(\"downloading {} to {}\".format(datafile, datafile_dbfs))\n",
|
||||
" urllib.request.urlretrieve(os.path.join(basedataurl, datafile), datafile_dbfs)"
|
||||
" urllib.request.urlretrieve(dataurl, datafile_dbfs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -146,15 +146,19 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"authors": [
|
||||
{
|
||||
"name": "pasha"
|
||||
},
|
||||
{
|
||||
"name": "wamartin"
|
||||
}
|
||||
],
|
||||
"kernelspec": {
|
||||
@@ -172,9 +176,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.0"
|
||||
"version": "3.6.6"
|
||||
},
|
||||
"name": "02.Ingest_data",
|
||||
"name": "ingest-data-02",
|
||||
"notebookId": 3836944406456362
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||