Compare commits

...

871 Commits

Author SHA1 Message Date
amlrelsa-ms
faffb3fef7 update samples from Release-135 as a part of SDK release 2022-04-04 20:15:29 +00:00
Harneet Virk
6c6227c403 Merge pull request #1729 from rezasherafat/rl_notebook_update
add docker subfolder to pong notebook directly.
2022-03-30 16:05:10 -07:00
Reza Sherafat
e3be364e7a add docker subfolder to pong notebook directly. 2022-03-30 22:47:50 +00:00
Harneet Virk
90e20a60e9 Merge pull request #1726 from Azure/release_update/Release-131
update samples from Release-131 as a part of  SDK release
2022-03-29 19:32:11 -07:00
amlrelsa-ms
33a4eacf1d update samples from Release-131 as a part of SDK release 2022-03-30 02:26:53 +00:00
Harneet Virk
e30b53fddc Merge pull request #1725 from Azure/release_update/Release-130
update samples from Release-130 as a part of  SDK release
2022-03-29 15:41:28 -07:00
amlrelsa-ms
95b0392ed2 update samples from Release-130 as a part of SDK release 2022-03-29 22:33:38 +00:00
Harneet Virk
796798cb49 Merge pull request #1724 from Azure/release_update/Release-129
update samples from Release-129 as a part of  1.40.0 SDK release
2022-03-29 12:18:30 -07:00
amlrelsa-ms
08b0ba7854 update samples from Release-129 as a part of SDK release 2022-03-29 18:28:35 +00:00
Harneet Virk
ceaf82acc6 Merge pull request #1720 from Azure/release_update/Release-128
update samples from Release-128 as a part of  SDK release
2022-03-21 17:56:06 -07:00
amlrelsa-ms
dadc93cfe5 update samples from Release-128 as a part of SDK release 2022-03-22 00:51:19 +00:00
Harneet Virk
c7076bf95c Merge pull request #1715 from Azure/release_update/Release-127
update samples from Release-127 as a part of  SDK release
2022-03-15 17:02:41 -07:00
amlrelsa-ms
ebdffd5626 update samples from Release-127 as a part of SDK release 2022-03-16 00:00:00 +00:00
Harneet Virk
d123880562 Merge pull request #1711 from Azure/release_update/Release-126
update samples from Release-126 as a part of  SDK release
2022-03-11 16:53:06 -08:00
amlrelsa-ms
4864e8ea60 update samples from Release-126 as a part of SDK release 2022-03-12 00:47:46 +00:00
Harneet Virk
c86db0d7fd Merge pull request #1707 from Azure/release_update/Release-124
update samples from Release-124 as a part of  SDK release
2022-03-08 09:15:45 -08:00
amlrelsa-ms
ccfbbb3b14 update samples from Release-124 as a part of SDK release 2022-03-08 00:37:35 +00:00
Harneet Virk
c42ba64b15 Merge pull request #1700 from Azure/release_update/Release-123
update samples from Release-123 as a part of  SDK release
2022-03-01 16:33:02 -08:00
amlrelsa-ms
6d8bf32243 update samples from Release-123 as a part of SDK release 2022-02-28 17:20:57 +00:00
Harneet Virk
9094da4085 Merge pull request #1684 from Azure/release_update/Release-122
update samples from Release-122 as a part of  SDK release
2022-02-14 11:38:49 -08:00
amlrelsa-ms
ebf9d2855c update samples from Release-122 as a part of SDK release 2022-02-14 19:24:27 +00:00
v-pbavanari
1bbd78eb33 update samples from Release-121 as a part of SDK release (#1678)
Co-authored-by: amlrelsa-ms <amlrelsa@microsoft.com>
2022-02-02 12:28:49 -05:00
v-pbavanari
77f5a69e04 update samples from Release-120 as a part of SDK release (#1676)
Co-authored-by: amlrelsa-ms <amlrelsa@microsoft.com>
2022-01-28 12:51:49 -05:00
raja7592
ce82af2ab0 update samples from Release-118 as a part of SDK release (#1673)
Co-authored-by: amlrelsa-ms <amlrelsa@microsoft.com>
2022-01-24 20:07:35 -05:00
Harneet Virk
2a2d2efa17 Merge pull request #1658 from Azure/release_update/Release-117
Update samples from Release sdk 1.37.0 as a part of  SDK release
2021-12-13 10:36:08 -08:00
amlrelsa-ms
dd494e9cac update samples from Release-117 as a part of SDK release 2021-12-13 16:57:22 +00:00
Harneet Virk
352adb7487 Merge pull request #1629 from Azure/release_update/Release-116
Update samples from Release as a part of SDK release 1.36.0
2021-11-08 09:48:25 -08:00
amlrelsa-ms
aebe34b4e8 update samples from Release-116 as a part of SDK release 2021-11-08 16:09:41 +00:00
Harneet Virk
c7e1241e20 Merge pull request #1612 from Azure/release_update/Release-115
Update samples from Release-115 as a part of  SDK release
2021-10-11 12:01:59 -07:00
amlrelsa-ms
6529298c24 update samples from Release-115 as a part of SDK release 2021-10-11 16:09:57 +00:00
Harneet Virk
e2dddfde85 Merge pull request #1601 from Azure/release_update/Release-114
update samples from Release-114 as a part of  SDK release
2021-09-29 14:21:59 -07:00
amlrelsa-ms
36d96f96ec update samples from Release-114 as a part of SDK release 2021-09-29 20:16:51 +00:00
Harneet Virk
7ebcfea5a3 Merge pull request #1600 from Azure/release_update/Release-113
update samples from Release-113 as a part of  SDK release
2021-09-28 12:53:57 -07:00
amlrelsa-ms
b20bfed33a update samples from Release-113 as a part of SDK release 2021-09-28 19:44:58 +00:00
Harneet Virk
a66a92e338 Merge pull request #1597 from Azure/release_update/Release-112
update samples from Release-112 as a part of  SDK release
2021-09-24 14:44:53 -07:00
amlrelsa-ms
c56c2c3525 update samples from Release-112 as a part of SDK release 2021-09-24 21:40:44 +00:00
Harneet Virk
4cac072fa4 Merge pull request #1588 from Azure/release_update/Release-111
Update samples from Release-111 as a part of SDK 1.34.0 release
2021-09-09 09:02:38 -07:00
amlrelsa-ms
aeab6b3e28 update samples from Release-111 as a part of SDK release 2021-09-07 17:32:15 +00:00
Harneet Virk
015e261f29 Merge pull request #1581 from Azure/release_update/Release-110
update samples from Release-110 as a part of  SDK release
2021-08-20 09:21:08 -07:00
amlrelsa-ms
d2a423dde9 update samples from Release-110 as a part of SDK release 2021-08-20 00:28:42 +00:00
Harneet Virk
3ecbfd6532 Merge pull request #1578 from Azure/release_update/Release-109
update samples from Release-109 as a part of  SDK release
2021-08-18 18:16:31 -07:00
amlrelsa-ms
02ecb2d755 update samples from Release-109 as a part of SDK release 2021-08-18 22:07:12 +00:00
Harneet Virk
122df6e846 Merge pull request #1576 from Azure/release_update/Release-108
update samples from Release-108 as a part of  SDK release
2021-08-18 09:47:34 -07:00
amlrelsa-ms
7d6a0a2051 update samples from Release-108 as a part of SDK release 2021-08-18 00:33:54 +00:00
Harneet Virk
6cc8af80a2 Merge pull request #1565 from Azure/release_update/Release-107
update samples from Release-107 as a part of  SDK release 1.33
2021-08-02 13:14:30 -07:00
amlrelsa-ms
f61898f718 update samples from Release-107 as a part of SDK release 2021-08-02 18:01:38 +00:00
Harneet Virk
5cb465171e Merge pull request #1556 from Azure/update-spark-notebook
updating spark notebook
2021-07-26 17:09:42 -07:00
Shivani Santosh Sambare
0ce37dd18f updating spark notebook 2021-07-26 15:51:54 -07:00
Cody
d835b183a5 update README.md (#1552) 2021-07-15 10:43:22 -07:00
Cody
d3cafebff9 add code of conduct (#1551) 2021-07-15 08:08:44 -07:00
Harneet Virk
354b194a25 Merge pull request #1543 from Azure/release_update/Release-106
update samples from Release-106 as a part of  SDK release
2021-07-06 11:05:55 -07:00
amlrelsa-ms
a52d67bb84 update samples from Release-106 as a part of SDK release 2021-07-06 17:17:27 +00:00
Harneet Virk
421ea3d920 Merge pull request #1530 from Azure/release_update/Release-105
update samples from Release-105 as a part of  SDK release
2021-06-25 09:58:05 -07:00
amlrelsa-ms
24f53f1aa1 update samples from Release-105 as a part of SDK release 2021-06-24 23:00:13 +00:00
Harneet Virk
6fc5d11de2 Merge pull request #1518 from Azure/release_update/Release-104
update samples from Release-104 as a part of  SDK release
2021-06-21 10:29:53 -07:00
amlrelsa-ms
d17547d890 update samples from Release-104 as a part of SDK release 2021-06-21 17:16:09 +00:00
Harneet Virk
928e0d4327 Merge pull request #1510 from Azure/release_update/Release-103
update samples from Release-103 as a part of  SDK release
2021-06-14 10:33:34 -07:00
amlrelsa-ms
05327cfbb9 update samples from Release-103 as a part of SDK release 2021-06-14 17:30:30 +00:00
Harneet Virk
8f7717014b Merge pull request #1506 from Azure/release_update/Release-102
update samples from Release-102 as a part of  SDK release 1.30.0
2021-06-07 11:14:02 -07:00
amlrelsa-ms
a47e50b79a update samples from Release-102 as a part of SDK release 2021-06-07 17:34:51 +00:00
Harneet Virk
8f89d88def Merge pull request #1505 from Azure/release_update/Release-101
update samples from Release-101 as a part of  SDK release
2021-06-04 19:54:53 -07:00
amlrelsa-ms
ec97207bb1 update samples from Release-101 as a part of SDK release 2021-06-05 02:54:13 +00:00
Harneet Virk
a2d20b0f47 Merge pull request #1493 from Azure/release_update/Release-98
update samples from Release-98 as a part of  SDK release
2021-05-28 08:04:58 -07:00
amlrelsa-ms
8180cebd75 update samples from Release-98 as a part of SDK release 2021-05-28 03:44:25 +00:00
Harneet Virk
700ab2d782 Merge pull request #1489 from Azure/release_update/Release-97
update samples from Release-97 as a part of  SDK  1.29.0 release
2021-05-25 07:43:14 -07:00
amlrelsa-ms
ec9a5a061d update samples from Release-97 as a part of SDK release 2021-05-24 17:39:23 +00:00
Harneet Virk
467630f955 Merge pull request #1466 from Azure/release_update/Release-96
update samples from Release-96 as a part of  SDK release 1.28.0
2021-05-10 22:48:19 -07:00
amlrelsa-ms
eac6b69bae update samples from Release-96 as a part of SDK release 2021-05-10 18:38:34 +00:00
Harneet Virk
441a5b0141 Merge pull request #1440 from Azure/release_update/Release-95
update samples from Release-95 as a part of  SDK 1.27 release
2021-04-19 11:51:21 -07:00
amlrelsa-ms
70902df6da update samples from Release-95 as a part of SDK release 2021-04-19 18:42:58 +00:00
nikAI77
6f893ff0b4 update samples from Release-94 as a part of SDK release (#1418)
Co-authored-by: amlrelsa-ms <amlrelsa@microsoft.com>
2021-04-06 12:36:12 -04:00
Harneet Virk
bda592a236 Merge pull request #1406 from Azure/release_update/Release-93
update samples from Release-93 as a part of  SDK release
2021-03-24 11:25:00 -07:00
amlrelsa-ms
8b32e8d5ad update samples from Release-93 as a part of SDK release 2021-03-24 16:45:36 +00:00
Harneet Virk
54a065c698 Merge pull request #1386 from yunjie-hub/master
Add synapse sample notebooks
2021-03-09 18:05:10 -08:00
yunjie-hub
b9718678b3 Add files via upload 2021-03-09 18:02:27 -08:00
Harneet Virk
3fa40d2c6d Merge pull request #1385 from Azure/release_update/Release-92
update samples from Release-92 as a part of  SDK release
2021-03-09 17:51:27 -08:00
amlrelsa-ms
883e4a4c59 update samples from Release-92 as a part of SDK release 2021-03-10 01:48:54 +00:00
Harneet Virk
e90826b331 Merge pull request #1384 from yunjie-hub/master
Add synapse sample notebooks
2021-03-09 12:40:33 -08:00
yunjie-hub
ac04172f6d Add files via upload 2021-03-09 12:38:23 -08:00
Harneet Virk
8c0000beb4 Merge pull request #1382 from Azure/release_update/Release-91
update samples from Release-91 as a part of  SDK release
2021-03-08 21:43:10 -08:00
amlrelsa-ms
35287ab0d8 update samples from Release-91 as a part of SDK release 2021-03-09 05:36:08 +00:00
Harneet Virk
3fe4f8b038 Merge pull request #1375 from Azure/release_update/Release-90
update samples from Release-90 as a part of  SDK release
2021-03-01 09:15:14 -08:00
amlrelsa-ms
1722678469 update samples from Release-90 as a part of SDK release 2021-03-01 17:13:25 +00:00
Harneet Virk
17da7e8706 Merge pull request #1364 from Azure/release_update/Release-89
update samples from Release-89 as a part of  SDK release
2021-02-23 17:27:27 -08:00
amlrelsa-ms
d2e7213ff3 update samples from Release-89 as a part of SDK release 2021-02-24 01:26:17 +00:00
mx-iao
882cb76e8a Merge pull request #1361 from Azure/minxia/distr-pytorch
Update distributed pytorch example
2021-02-23 12:07:20 -08:00
mx-iao
37f37a46c1 Delete pytorch_mnist.py 2021-02-23 11:19:39 -08:00
mx-iao
0cd1412421 Delete distributed-pytorch-with-nccl-gloo.ipynb 2021-02-23 11:19:33 -08:00
mx-iao
c3ae9f00f6 Add files via upload 2021-02-23 11:19:02 -08:00
mx-iao
11b02c650c Rename how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-distributeddataparallel.ipynb to how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-distributeddataparallel/distributed-pytorch-with-distributeddataparallel.ipynb 2021-02-23 11:18:43 -08:00
mx-iao
606048c71f Add files via upload 2021-02-23 11:18:10 -08:00
Harneet Virk
cb1c354d44 Merge pull request #1353 from Azure/release_update/Release-88
update samples from Release-88 as a part of  SDK release 1.23.0
2021-02-22 11:49:02 -08:00
amlrelsa-ms
c868fff5a2 update samples from Release-88 as a part of SDK release 2021-02-22 19:23:04 +00:00
Harneet Virk
bc4e6611c4 Merge pull request #1342 from Azure/release_update/Release-87
update samples from Release-87 as a part of  SDK release
2021-02-16 18:43:49 -08:00
amlrelsa-ms
0a58881b70 update samples from Release-87 as a part of SDK release 2021-02-17 02:13:51 +00:00
Harneet Virk
2544e85c5f Merge pull request #1333 from Azure/release_update/Release-85
SDK release 1.22.0
2021-02-10 07:59:22 -08:00
amlrelsa-ms
7fe27501d1 update samples from Release-85 as a part of SDK release 2021-02-10 15:27:28 +00:00
Harneet Virk
624c46e7f9 Merge pull request #1321 from Azure/release_update/Release-84
update samples from Release-84 as a part of  SDK release
2021-02-05 19:10:29 -08:00
amlrelsa-ms
40fbadd85c update samples from Release-84 as a part of SDK release 2021-02-06 03:09:22 +00:00
Harneet Virk
0c1fc25542 Merge pull request #1317 from Azure/release_update/Release-83
update samples from Release-83 as a part of  SDK release
2021-02-03 14:31:31 -08:00
amlrelsa-ms
e8e1357229 update samples from Release-83 as a part of SDK release 2021-02-03 05:22:32 +00:00
Harneet Virk
ad44f8fa2b Merge pull request #1313 from zronaghi/contrib-rapids
Update RAPIDS README
2021-01-29 10:33:47 -08:00
Zahra Ronaghi
ee63e759f0 Update RAPIDS README 2021-01-28 22:19:27 -06:00
Harneet Virk
b81d97ebbf Merge pull request #1303 from Azure/release_update/Release-82
update samples from Release-82 as a part of  SDK release 1.21.0
2021-01-25 11:09:12 -08:00
amlrelsa-ms
249fb6bbb5 update samples from Release-82 as a part of SDK release 2021-01-25 19:03:14 +00:00
Harneet Virk
cda1f3e4cf Merge pull request #1289 from Azure/release_update/Release-81
update samples from Release-81 as a part of  SDK release
2021-01-11 12:52:48 -07:00
amlrelsa-ms
1d05efaac2 update samples from Release-81 as a part of SDK release 2021-01-11 19:35:54 +00:00
Harneet Virk
3adebd1127 Merge pull request #1262 from Azure/release_update/Release-80
update samples from Release-80 as a part of  SDK release
2020-12-11 16:49:33 -08:00
amlrelsa-ms
a6817063df update samples from Release-80 as a part of SDK release 2020-12-12 00:45:42 +00:00
Harneet Virk
a79f8c254a Merge pull request #1255 from Azure/release_update/Release-79
update samples from Release-79 as a part of  SDK release
2020-12-07 11:11:32 -08:00
amlrelsa-ms
fb4f287458 update samples from Release-79 as a part of SDK release 2020-12-07 19:09:59 +00:00
Harneet Virk
41366a4af0 Merge pull request #1238 from Azure/release_update/Release-78
update samples from Release-78 as a part of  SDK release
2020-11-11 13:00:22 -08:00
amlrelsa-ms
74deb14fac update samples from Release-78 as a part of SDK release 2020-11-11 19:32:32 +00:00
Harneet Virk
4ed1d445ae Merge pull request #1236 from Azure/release_update/Release-77
update samples from Release-77 as a part of  SDK release
2020-11-10 10:52:23 -08:00
amlrelsa-ms
b5c15db0b4 update samples from Release-77 as a part of SDK release 2020-11-10 18:46:23 +00:00
Harneet Virk
91d43bade6 Merge pull request #1235 from Azure/release_update_stablev2/Release-44
update samples from Release-44 as a part of 1.18.0 SDK stable release
2020-11-10 08:52:24 -08:00
amlrelsa-ms
bd750f5817 update samples from Release-44 as a part of 1.18.0 SDK stable release 2020-11-10 03:42:03 +00:00
mx-iao
637bcc5973 Merge pull request #1229 from Azure/lostmygithubaccount-patch-3
Update README.md
2020-11-03 15:18:37 -10:00
Cody
ba741fb18d Update README.md 2020-11-03 17:16:28 -08:00
Harneet Virk
ac0ad8d487 Merge pull request #1228 from Azure/release_update/Release-76
update samples from Release-76 as a part of  SDK release
2020-11-03 16:12:15 -08:00
amlrelsa-ms
5019ad6c5a update samples from Release-76 as a part of SDK release 2020-11-03 22:31:02 +00:00
Cody
41a2ebd2b3 Merge pull request #1226 from Azure/lostmygithubaccount-patch-3
Update README.md
2020-11-03 11:25:10 -08:00
Cody
53e3283d1d Update README.md 2020-11-03 11:17:41 -08:00
Harneet Virk
ba9c4c5465 Merge pull request #1225 from Azure/release_update/Release-75
update samples from Release-75 as a part of  SDK release
2020-11-03 11:11:11 -08:00
amlrelsa-ms
a6c65f00ec update samples from Release-75 as a part of SDK release 2020-11-03 19:07:12 +00:00
Cody
95072eabc2 Merge pull request #1221 from Azure/lostmygithubaccount-patch-2
Update README.md
2020-11-02 11:52:05 -08:00
Cody
12905ef254 Update README.md 2020-11-02 06:59:44 -08:00
Harneet Virk
4cf56eee91 Merge pull request #1217 from Azure/release_update/Release-74
update samples from Release-74 as a part of  SDK release
2020-10-30 17:27:02 -07:00
amlrelsa-ms
d345ff6c37 update samples from Release-74 as a part of SDK release 2020-10-30 22:20:10 +00:00
Harneet Virk
560dcac0a0 Merge pull request #1214 from Azure/release_update/Release-73
update samples from Release-73 as a part of  SDK release
2020-10-29 23:38:02 -07:00
amlrelsa-ms
322087a58c update samples from Release-73 as a part of SDK release 2020-10-30 06:37:05 +00:00
Harneet Virk
e255c000ab Merge pull request #1211 from Azure/release_update/Release-72
update samples from Release-72 as a part of  SDK release
2020-10-28 14:30:50 -07:00
amlrelsa-ms
7871e37ec0 update samples from Release-72 as a part of SDK release 2020-10-28 21:24:40 +00:00
Cody
58e584e7eb Update README.md (#1209) 2020-10-27 21:00:38 -04:00
Harneet Virk
1b0d75cb45 Merge pull request #1206 from Azure/release_update/Release-71
update samples from Release-71 as a part of  SDK 1.17.0 release
2020-10-26 22:29:48 -07:00
amlrelsa-ms
5c38272fb4 update samples from Release-71 as a part of SDK release 2020-10-27 04:11:39 +00:00
Harneet Virk
e026c56f19 Merge pull request #1200 from Azure/cody/add-new-repo-link
update readme
2020-10-22 10:50:03 -07:00
Cody
4aad830f1c update readme 2020-10-22 09:13:20 -07:00
Harneet Virk
c1b125025a Merge pull request #1198 from harneetvirk/master
Fixing/Removing broken links
2020-10-20 12:30:46 -07:00
Harneet Virk
9f364f7638 Update README.md 2020-10-20 12:30:03 -07:00
Harneet Virk
4beb749a76 Fixing/Removing the broken links 2020-10-20 12:28:45 -07:00
Harneet Virk
04fe8c4580 Merge pull request #1191 from savitamittal1/patch-4
Update README.md
2020-10-17 08:48:20 -07:00
Harneet Virk
498018451a Merge pull request #1193 from savitamittal1/patch-6
Update automl-databricks-local-with-deployment.ipynb
2020-10-17 08:47:54 -07:00
savitamittal1
04305e33f0 Update automl-databricks-local-with-deployment.ipynb 2020-10-16 23:58:12 -07:00
savitamittal1
d22e76d5e0 Update README.md 2020-10-16 23:53:41 -07:00
Harneet Virk
d71c482f75 Merge pull request #1184 from Azure/release_update/Release-70
update samples from Release-70 as a part of  SDK 1.16.0 release
2020-10-12 22:24:25 -07:00
amlrelsa-ms
5775f8a78f update samples from Release-70 as a part of SDK release 2020-10-13 05:19:49 +00:00
Cody
aae823ecd8 Merge pull request #1181 from samuel100/quickstart-notebook
quickstart nb added
2020-10-09 10:54:32 -07:00
Sam Kemp
f1126e07f9 quickstart nb added 2020-10-09 10:35:19 +01:00
Harneet Virk
0e4b27a233 Merge pull request #1171 from savitamittal1/patch-2
Update automl-databricks-local-01.ipynb
2020-10-02 09:41:14 -07:00
Harneet Virk
0a3d5f68a1 Merge pull request #1172 from savitamittal1/patch-3
Update automl-databricks-local-with-deployment.ipynb
2020-10-02 09:41:02 -07:00
savitamittal1
a6fe2affcb Update automl-databricks-local-with-deployment.ipynb
fixed link to readme
2020-10-01 19:38:11 -07:00
savitamittal1
ce469ddf6a Update automl-databricks-local-01.ipynb
fixed link for readme
2020-10-01 19:36:06 -07:00
mx-iao
9fe459be79 Merge pull request #1166 from Azure/minxia/patch
patch for resume training notebook
2020-09-29 17:30:24 -07:00
mx-iao
89c35c8ed6 Update train-tensorflow-resume-training.ipynb 2020-09-29 17:28:17 -07:00
mx-iao
33168c7f5d Update train-tensorflow-resume-training.ipynb 2020-09-29 17:27:23 -07:00
Cody
1d0766bd46 Merge pull request #1165 from samuel100/quickstart-add
quickstart added
2020-09-29 13:13:36 -07:00
Sam Kemp
9903e56882 quickstart added 2020-09-29 21:09:55 +01:00
Harneet Virk
a039166b90 Merge pull request #1162 from Azure/release_update/Release-69
update samples from Release-69 as a part of  SDK 1.15.0 release
2020-09-28 23:54:05 -07:00
amlrelsa-ms
4e4bf48013 update samples from Release-69 as a part of SDK release 2020-09-29 06:48:31 +00:00
Harneet Virk
0a2408300a Merge pull request #1158 from Azure/release_update/Release-68
update samples from Release-68 as a part of  SDK release
2020-09-25 09:23:59 -07:00
amlrelsa-ms
d99c3f5470 update samples from Release-68 as a part of SDK release 2020-09-25 16:10:59 +00:00
Harneet Virk
3f62fe7d47 Merge pull request #1157 from Azure/release_update/Release-67
update samples from Release-67 as a part of  SDK release
2020-09-23 15:51:20 -07:00
amlrelsa-ms
6059c1dc0c update samples from Release-67 as a part of SDK release 2020-09-23 22:48:56 +00:00
Harneet Virk
8e2032fcde Merge pull request #1153 from Azure/release_update/Release-66
update samples from Release-66 as a part of  SDK release
2020-09-21 16:04:23 -07:00
amlrelsa-ms
824d844cd7 update samples from Release-66 as a part of SDK release 2020-09-21 23:02:01 +00:00
Harneet Virk
bb1c7db690 Merge pull request #1148 from Azure/release_update/Release-65
update samples from Release-65 as a part of  SDK release
2020-09-16 18:23:12 -07:00
amlrelsa-ms
8dad09a42f update samples from Release-65 as a part of SDK release 2020-09-17 01:14:32 +00:00
Harneet Virk
db2bf8ae93 Merge pull request #1137 from Azure/release_update/Release-64
update samples from Release-64 as a part of  SDK release
2020-09-09 15:31:51 -07:00
amlrelsa-ms
820c09734f update samples from Release-64 as a part of SDK release 2020-09-09 22:30:45 +00:00
Cody
a2a33c70a6 Merge pull request #1123 from oliverw1/patch-2
docs: bring docs in line with code
2020-09-02 11:12:31 -07:00
Cody
2ff791968a Merge pull request #1122 from oliverw1/patch-1
docs: Move unintended side columns below the main rows
2020-09-02 11:11:58 -07:00
Harneet Virk
7186127804 Merge pull request #1128 from Azure/release_update/Release-63
update samples from Release-63 as a part of  SDK release
2020-08-31 13:23:08 -07:00
amlrelsa-ms
b01c52bfd6 update samples from Release-63 as a part of SDK release 2020-08-31 20:00:07 +00:00
Oliver W
28be7bcf58 docs: bring docs in line with code
A non-existant name was being referred to, which only serves confusion.
2020-08-28 10:24:24 +02:00
Oliver W
37a9350fde Properly format markdown table
Remove the unintended two columns that appeared on the right side
2020-08-28 09:29:46 +02:00
Harneet Virk
5080053a35 Merge pull request #1120 from Azure/release_update/Release-62
update samples from Release-62 as a part of  SDK release
2020-08-27 17:12:05 -07:00
amlrelsa-ms
3c02102691 update samples from Release-62 as a part of SDK release 2020-08-27 23:28:05 +00:00
Sheri Gilley
07e1676762 Merge pull request #1010 from GinSiuCheng/patch-1
Include additional details on user authentication
2020-08-25 11:45:58 -05:00
Sheri Gilley
919a3c078f fix code blocks 2020-08-25 11:13:24 -05:00
Sheri Gilley
9b53c924ed add code block for better formatting 2020-08-25 11:09:56 -05:00
Sheri Gilley
04ad58056f fix quotes 2020-08-25 11:06:18 -05:00
Sheri Gilley
576bf386b5 fix quotes 2020-08-25 11:05:25 -05:00
Cody
7e62d1cfd6 Merge pull request #891 from Fokko/patch-1
Don't print the access token
2020-08-22 18:28:33 -07:00
Cody
ec67a569af Merge pull request #804 from omartin2010/patch-3
typo
2020-08-17 14:35:55 -07:00
Cody
6d1e80bcef Merge pull request #1031 from hyoshioka0128/patch-1
Typo "Mircosoft"→"Microsoft"
2020-08-17 14:32:44 -07:00
mx-iao
db00d9ad3c Merge pull request #1100 from Azure/lostmygithubaccount-patch-1
fix minor typo in how-to-use-azureml/README.md
2020-08-17 14:30:18 -07:00
Harneet Virk
d33c75abc3 Merge pull request #1104 from Azure/release_update/Release-61
update samples from Release-61 as a part of  SDK release
2020-08-17 10:59:39 -07:00
amlrelsa-ms
d0dc4836ae update samples from Release-61 as a part of SDK release 2020-08-17 17:45:26 +00:00
Cody
982f8fcc1d Update README.md 2020-08-14 15:25:39 -07:00
Akshaya Annavajhala
79739b5e1b Remove broken links (#1095)
* Remove broken links

* Update README.md
2020-08-10 19:35:41 -04:00
Harneet Virk
aac4fa1fb9 Merge pull request #1081 from Azure/release_update/Release-60
update samples from Release-60 as a part of  SDK 1.11.0 release
2020-08-04 00:04:38 -07:00
amlrelsa-ms
5b684070e1 update samples from Release-60 as a part of SDK release 2020-08-04 06:12:06 +00:00
Harneet Virk
0ab8b141ee Merge pull request #1078 from Azure/release_update/Release-59
update samples from Release-59 as a part of  SDK release
2020-07-31 10:52:22 -07:00
amlrelsa-ms
b9ef23ad4b update samples from Release-59 as a part of SDK release 2020-07-31 17:23:17 +00:00
Harneet Virk
7e2c1ca152 Merge pull request #1063 from Azure/release_update/Release-58
update samples from Release-58 as a part of  SDK release
2020-07-20 13:46:37 -07:00
amlrelsa-ms
d096535e48 update samples from Release-58 as a part of SDK release 2020-07-20 20:44:42 +00:00
Harneet Virk
f80512a6db Merge pull request #1056 from wchill/wchill-patch-1
Update README.md with KeyError: brand workaround
2020-07-15 10:22:18 -07:00
Eric Ahn
b54111620e Update README.md 2020-07-14 17:47:23 -07:00
Harneet Virk
8dd52ee2df Merge pull request #1036 from Azure/release_update/Release-57
update samples from Release-57 as a part of  SDK release
2020-07-06 15:06:14 -07:00
amlrelsa-ms
6c629f1eda update samples from Release-57 as a part of SDK release 2020-07-06 22:05:24 +00:00
Hiroshi Yoshioka
9c32ca9db5 Typo "Mircosoft"→"Microsoft"
https://docs.microsoft.com/en-us/samples/azure/machinelearningnotebooks/azure-machine-learning-service-example-notebooks/
2020-06-29 12:21:23 +09:00
Harneet Virk
053efde8c9 Merge pull request #1022 from Azure/release_update/Release-56
update samples from Release-56 as a part of  SDK release
2020-06-22 11:12:31 -07:00
amlrelsa-ms
5189691f06 update samples from Release-56 as a part of SDK release 2020-06-22 18:11:40 +00:00
Gin
745b4f0624 Include additional details on user authentication
Additional details should be included for user authentication esp. for enterprise users who may have more than one single aad tenant linked to a user.
2020-06-13 21:24:56 -04:00
Harneet Virk
fb900916e3 Update README.md 2020-06-11 13:26:04 -07:00
Harneet Virk
738347f3da Merge pull request #996 from Azure/release_update/Release-55
update samples from Release-55 as a part of  SDK release
2020-06-08 15:31:35 -07:00
amlrelsa-ms
34a67c1f8b update samples from Release-55 as a part of SDK release 2020-06-08 22:28:25 +00:00
Harneet Virk
34898828be Merge pull request #992 from Azure/release_update/Release-54
update samples from Release-54 as a part of  SDK release
2020-06-02 14:42:02 -07:00
vizhur
a7c3a0fdb8 update samples from Release-54 as a part of SDK release 2020-06-02 21:34:10 +00:00
Harneet Virk
6d11cdfa0a Merge pull request #984 from Azure/release_update/Release-53
update samples from Release-53 as a part of  SDK release
2020-05-26 19:59:58 -07:00
vizhur
11e8ed2bab update samples from Release-53 as a part of SDK release 2020-05-27 02:45:07 +00:00
Harneet Virk
12c06a4168 Merge pull request #978 from ahcan76/patch-1
Fix image paths in tutorial-1st-experiment-sdk-train.ipynb
2020-05-18 12:58:21 -07:00
ahcan76
1f75dc9725 Update tutorial-1st-experiment-sdk-train.ipynb
Fix the image path
2020-05-18 22:40:54 +03:00
Harneet Virk
1a1a42d525 Merge pull request #977 from Azure/release_update/Release-52
update samples from Release-52 as a part of  SDK release
2020-05-18 12:22:48 -07:00
vizhur
879a272a8d update samples from Release-52 as a part of SDK release 2020-05-18 19:21:05 +00:00
Harneet Virk
bc65bde097 Merge pull request #971 from Azure/release_update/Release-51
update samples from Release-51 as a part of  SDK release
2020-05-13 22:17:45 -07:00
vizhur
690bdfbdbe update samples from Release-51 as a part of SDK release 2020-05-14 05:03:47 +00:00
Harneet Virk
3c02bd8782 Merge pull request #967 from Azure/release_update/Release-50
update samples from Release-50 as a part of  SDK release
2020-05-12 19:57:40 -07:00
vizhur
5c14610a1c update samples from Release-50 as a part of SDK release 2020-05-13 02:45:40 +00:00
Harneet Virk
4e3afae6fb Merge pull request #965 from Azure/release_update/Release-49
update samples from Release-49 as a part of  SDK release
2020-05-11 19:25:28 -07:00
vizhur
a2144aa083 update samples from Release-49 as a part of SDK release 2020-05-12 02:24:34 +00:00
Harneet Virk
0e6334178f Merge pull request #963 from Azure/release_update/Release-46
update samples from Release-46 as a part of  SDK release
2020-05-11 14:49:34 -07:00
vizhur
4ec9178d22 update samples from Release-46 as a part of SDK release 2020-05-11 21:48:31 +00:00
Harneet Virk
2aa7c53b0c Merge pull request #962 from Azure/release_update_stablev2/Release-11
update samples from Release-11 as a part of 1.5.0 SDK stable release
2020-05-11 12:42:32 -07:00
vizhur
553fa43e17 update samples from Release-11 as a part of 1.5.0 SDK stable release 2020-05-11 18:59:22 +00:00
Harneet Virk
e98131729e Merge pull request #949 from Azure/release_update_stablev2/Release-8
update samples from Release-8 as a part of 1.4.0 SDK stable release
2020-04-27 11:00:37 -07:00
vizhur
fd2b09e2c2 update samples from Release-8 as a part of 1.4.0 SDK stable release 2020-04-27 17:44:41 +00:00
Harneet Virk
7970209069 Merge pull request #930 from Azure/release_update/Release-44
update samples from Release-44 as a part of  SDK release
2020-04-17 12:46:29 -07:00
vizhur
24f8651bb5 update samples from Release-44 as a part of SDK release 2020-04-17 19:45:37 +00:00
Harneet Virk
b881f78e46 Merge pull request #918 from Azure/release_update_stablev2/Release-6
update samples from Release-6 as a part of 1.3.0 SDK stable release
2020-04-13 09:23:38 -07:00
vizhur
057e22b253 update samples from Release-6 as a part of 1.3.0 SDK stable release 2020-04-13 16:22:23 +00:00
Fokko Driesprong
119fd0a8f6 Don't print the access token
That's never a good idea, no exceptions :)
2020-03-31 08:14:05 +02:00
Harneet Virk
c520bd1d41 Merge pull request #884 from Azure/release_update/Release-43
update samples from Release-43 as a part of  SDK release
2020-03-23 16:49:27 -07:00
vizhur
d3f1212440 update samples from Release-43 as a part of SDK release 2020-03-23 23:39:45 +00:00
Harneet Virk
b95a65eef4 Merge pull request #883 from Azure/release_update_stablev2/Release-3
update samples from Release-3 as a part of 1.2.0 SDK stable release
2020-03-23 16:21:53 -07:00
vizhur
2218af619f update samples from Release-3 as a part of 1.2.0 SDK stable release 2020-03-23 23:11:53 +00:00
Harneet Virk
0401128638 Merge pull request #878 from Azure/release_update/Release-42
update samples from Release-42 as a part of  SDK release
2020-03-20 11:14:02 -07:00
vizhur
59fcb54998 update samples from Release-42 as a part of SDK release 2020-03-20 18:10:08 +00:00
Harneet Virk
e0ea99a6bb Merge pull request #862 from Azure/release_update/Release-41
update samples from Release-41 as a part of  SDK release
2020-03-13 14:57:58 -07:00
vizhur
b06f5ce269 update samples from Release-41 as a part of SDK release 2020-03-13 21:57:04 +00:00
Harneet Virk
ed0ce9e895 Merge pull request #856 from Azure/release_update/Release-40
update samples from Release-40 as a part of  SDK release
2020-03-12 12:28:18 -07:00
vizhur
71053d705b update samples from Release-40 as a part of SDK release 2020-03-12 19:25:26 +00:00
Harneet Virk
77f98bf75f Merge pull request #852 from Azure/release_update_stable/Release-6
update samples from Release-6 as a part of 1.1.5 SDK stable release
2020-03-11 15:37:59 -06:00
vizhur
e443fd1342 update samples from Release-6 as a part of 1.1.5rc0 SDK stable release 2020-03-11 19:51:02 +00:00
Harneet Virk
2165cf308e update samples from Release-25 as a part of 1.1.2rc0 SDK experimental release (#829)
Co-authored-by: vizhur <vizhur@live.com>
2020-03-02 15:42:04 -05:00
Olivier Martin
d4a486827d typo 2020-02-17 17:16:47 -05:00
Harneet Virk
3d6caa10a3 Merge pull request #801 from Azure/release_update/Release-39
update samples from Release-39 as a part of  SDK release
2020-02-13 19:03:36 -07:00
vizhur
4df079db1c update samples from Release-39 as a part of SDK release 2020-02-14 02:01:41 +00:00
Sander Vanhove
67d0b02ef9 Fix broken link in README (#797) 2020-02-13 08:20:28 -05:00
Harneet Virk
4e7b3784d5 Merge pull request #788 from Azure/release_update/Release-38
update samples from Release-38 as a part of  SDK release
2020-02-11 13:16:15 -07:00
vizhur
ed91e39d7e update samples from Release-38 as a part of SDK release 2020-02-11 20:00:16 +00:00
Harneet Virk
a09a1a16a7 Merge pull request #780 from Azure/release_update/Release-37
update samples from Release-37 as a part of  SDK release
2020-02-07 21:52:34 -07:00
vizhur
9662505517 update samples from Release-37 as a part of SDK release 2020-02-08 04:49:27 +00:00
Harneet Virk
8e103c02ff Merge pull request #779 from Azure/release_update/Release-36
update samples from Release-36 as a part of  SDK release
2020-02-07 21:40:57 -07:00
vizhur
ecb5157add update samples from Release-36 as a part of SDK release 2020-02-08 04:35:14 +00:00
Shané Winner
d7d23d5e7c Update index.md 2020-02-05 22:41:22 -08:00
Harneet Virk
83a21ba53a update samples from Release-35 as a part of SDK release (#765)
Co-authored-by: vizhur <vizhur@live.com>
2020-02-05 20:03:41 -05:00
Harneet Virk
3c9cb89c1a update samples from Release-18 as a part of 1.1.0rc0 SDK experimental release (#760)
Co-authored-by: vizhur <vizhur@live.com>
2020-02-04 22:19:52 -05:00
Sheri Gilley
cca7c2e26f add cell metadata 2020-02-04 11:31:07 -06:00
Harneet Virk
e895d7c2bf update samples - test (#758)
Co-authored-by: vizhur <vizhur@live.com>
2020-01-31 15:19:58 -05:00
Shané Winner
3588eb9665 Update index.md 2020-01-23 15:46:43 -08:00
Harneet Virk
a09e726f31 update samples - test (#748)
Co-authored-by: vizhur <vizhur@live.com>
2020-01-23 16:50:29 -05:00
Shané Winner
4fb1d9ee5b Update index.md 2020-01-22 11:38:24 -08:00
Harneet Virk
b05ff80e9d update samples from Release-169 as a part of 1.0.85 SDK release (#742)
Co-authored-by: vizhur <vizhur@live.com>
2020-01-21 18:00:15 -05:00
Shané Winner
512630472b Update index.md 2020-01-08 14:52:23 -08:00
vizhur
ae1337fe70 Merge pull request #724 from Azure/release_update/Release-167
update samples from Release-167 as a part of 1.0.83 SDK release
2020-01-06 15:38:25 -05:00
vizhur
c95f970dc8 update samples from Release-167 as a part of 1.0.83 SDK release 2020-01-06 20:16:21 +00:00
Shané Winner
9b9d112719 Update index.md 2019-12-24 07:40:48 -08:00
vizhur
fe8fcd4b48 Merge pull request #712 from Azure/release_update/Release-31
update samples - test
2019-12-23 20:28:02 -05:00
vizhur
296ae01587 update samples - test 2019-12-24 00:42:48 +00:00
Shané Winner
8f4efe15eb Update index.md 2019-12-10 09:05:23 -08:00
vizhur
d179080467 Merge pull request #690 from Azure/release_update/Release-163
update samples from Release-163 as a part of 1.0.79 SDK release
2019-12-09 15:41:03 -05:00
vizhur
0040644e7a update samples from Release-163 as a part of 1.0.79 SDK release 2019-12-09 20:09:30 +00:00
Shané Winner
8aa04307fb Update index.md 2019-12-03 10:24:18 -08:00
Shané Winner
a525da4488 Update index.md 2019-11-27 13:08:21 -08:00
Shané Winner
e149565a8a Merge pull request #679 from Azure/release_update/Release-30
update samples - test
2019-11-27 13:05:00 -08:00
vizhur
75610ec31c update samples - test 2019-11-27 21:02:21 +00:00
Shané Winner
0c2c450b6b Update index.md 2019-11-25 14:34:48 -08:00
Shané Winner
0d548eabff Merge pull request #677 from Azure/release_update/Release-29
update samples - test
2019-11-25 14:31:50 -08:00
vizhur
e4029801e6 update samples - test 2019-11-25 22:24:09 +00:00
Shané Winner
156974ee7b Update index.md 2019-11-25 11:42:53 -08:00
Shané Winner
1f05157d24 Merge pull request #676 from Azure/release_update/Release-160
update samples from Release-160 as a part of 1.0.76 SDK release
2019-11-25 11:39:27 -08:00
vizhur
2214ea8616 update samples from Release-160 as a part of 1.0.76 SDK release 2019-11-25 19:28:19 +00:00
Sheri Gilley
b54b2566de Merge pull request #667 from Azure/sdk-codetest
remove deprecated auto_prepare_environment
2019-11-21 09:25:15 -06:00
Sheri Gilley
57b0f701f8 remove deprecated auto_prepare_environment 2019-11-20 17:28:44 -06:00
Shané Winner
d658c85208 Update index.md 2019-11-12 14:59:15 -08:00
vizhur
a5f627a9b6 Merge pull request #655 from Azure/release_update/Release-28
update samples - test
2019-11-12 17:11:45 -05:00
vizhur
a8b08bdff0 update samples - test 2019-11-12 21:53:12 +00:00
Shané Winner
0dc3f34b86 Update index.md 2019-11-11 14:49:44 -08:00
Shané Winner
9ba7d5e5bb Update index.md 2019-11-11 14:48:05 -08:00
Shané Winner
c6ad2f8ec0 Merge pull request #654 from Azure/release_update/Release-158
update samples from Release-158 as a part of 1.0.74 SDK release
2019-11-11 10:25:18 -08:00
vizhur
33d6def8c3 update samples from Release-158 as a part of 1.0.74 SDK release 2019-11-11 16:57:02 +00:00
Shané Winner
69d4344dff Update index.md 2019-11-04 10:09:41 -08:00
Shané Winner
34aeec1439 Update index.md 2019-11-04 10:08:10 -08:00
Shané Winner
a9b9ebbf7d Merge pull request #641 from Azure/release_update/Release-27
update samples - test
2019-11-04 10:02:25 -08:00
vizhur
41fa508d53 update samples - test 2019-11-04 17:57:28 +00:00
Shané Winner
e1bfa98844 Update index.md 2019-11-04 08:41:15 -08:00
Shané Winner
2bcee9aa20 Update index.md 2019-11-04 08:40:29 -08:00
Shané Winner
37541b1071 Merge pull request #638 from Azure/release_update/Release-26
update samples - test
2019-11-04 08:31:59 -08:00
Shané Winner
4aff1310a7 Merge branch 'master' into release_update/Release-26 2019-11-04 08:31:37 -08:00
Shané Winner
51ecb7c54f Update index.md 2019-11-01 10:38:46 -07:00
Shané Winner
4e7fc7c82c Update index.md 2019-11-01 10:36:02 -07:00
vizhur
4ed3f0767a update samples - test 2019-11-01 14:48:01 +00:00
vizhur
46ec74f8df Merge pull request #627 from jingyanwangms/jingywa/lightgbm-notebook
add Lightgbm Estimator notebook
2019-10-22 20:54:33 -04:00
Jingyan Wang
8d2e362a10 add Lightgbm notebook 2019-10-22 17:40:32 -07:00
vizhur
86c1b3d760 adding missing files for rapids 2019-10-21 12:20:15 -04:00
Shané Winner
41dc05952f Update index.md 2019-10-15 16:37:53 -07:00
vizhur
df2e08e4a3 Merge pull request #622 from Azure/release_update/Release-25
update samples - test
2019-10-15 18:34:28 -04:00
vizhur
828a976907 update samples - test 2019-10-15 22:01:55 +00:00
vizhur
1a373f11a0 Merge pull request #621 from Azure/ak/revert-db-overwrite
Revert automatic overwrite of databricks content
2019-10-15 16:07:37 -04:00
Akshaya Annavajhala (AK)
60de701207 revert overwrites 2019-10-15 12:33:31 -07:00
Akshaya Annavajhala (AK)
5841fa4a42 revert overwrites 2019-10-15 12:27:56 -07:00
Shané Winner
659fb7abc3 Merge pull request #619 from Azure/release_update/Release-153
update samples from Release-153 as a part of 1.0.69 SDK release
2019-10-14 15:39:40 -07:00
vizhur
2e404cfc3a update samples from Release-153 as a part of 1.0.69 SDK release 2019-10-14 22:30:58 +00:00
Shané Winner
5fcf4887bc Update index.md 2019-10-06 11:44:35 -07:00
Shané Winner
1e7f3117ae Update index.md 2019-10-06 11:44:01 -07:00
Shané Winner
bbb3f85da9 Update README.md 2019-10-06 11:33:56 -07:00
Shané Winner
c816dfb479 Update index.md 2019-10-06 11:29:58 -07:00
Shané Winner
8c128640b1 Update index.md 2019-10-06 11:28:34 -07:00
vizhur
4d2b937846 Merge pull request #600 from Azure/release_update/Release-24
Fix for Tensorflow 2.0 related Notebook Failures
2019-10-02 16:27:31 -04:00
vizhur
5492f52faf update samples - test 2019-10-02 20:23:54 +00:00
Shané Winner
735db9ebe7 Update index.md 2019-10-01 09:59:10 -07:00
Shané Winner
573030b990 Update README.md 2019-10-01 09:52:10 -07:00
Shané Winner
392a059000 Update index.md 2019-10-01 09:44:37 -07:00
Shané Winner
3580e54fbb Update index.md 2019-10-01 09:42:20 -07:00
Shané Winner
2017bcd716 Update index.md 2019-10-01 09:41:33 -07:00
Roope Astala
4a3f8e7025 Merge pull request #594 from Azure/release_update/Release-149
update samples from Release-149 as a part of 1.0.65 SDK release
2019-09-30 13:29:57 -04:00
vizhur
45880114db update samples from Release-149 as a part of 1.0.65 SDK release 2019-09-30 17:08:52 +00:00
Roope Astala
314bad72a4 Merge pull request #588 from skaarthik/rapids
updating to use AML base image and system managed dependencies
2019-09-25 07:44:31 -04:00
Kaarthik Sivashanmugam
f252308005 updating to use AML base image and system managed dependencies 2019-09-24 20:47:15 -07:00
Kaarthik Sivashanmugam
6622a6c5f2 Merge pull request #1 from Azure/master
merge latest changes from Azure/MLNB repo
2019-09-24 20:40:43 -07:00
Roope Astala
6b19e2f263 Merge pull request #587 from Azure/akshaya-a-patch-3
Update README.md to remove confusing reference
2019-09-24 16:13:16 -04:00
Akshaya Annavajhala
42fd4598cb Update README.md 2019-09-24 15:28:30 -04:00
Roope Astala
476d945439 Merge pull request #580 from akshaya-a/master
Add documentation on the preview ADB linking experience
2019-09-24 09:31:45 -04:00
Shané Winner
e96bb9bef2 Delete manage-runs.yml 2019-09-22 20:37:17 -07:00
Shané Winner
2be4a5e54d Delete manage-runs.ipynb 2019-09-22 20:37:07 -07:00
Shané Winner
247a25f280 Delete hello_with_delay.py 2019-09-22 20:36:50 -07:00
Shané Winner
5d9d8eade6 Delete hello_with_children.py 2019-09-22 20:36:39 -07:00
Shané Winner
dba978e42a Delete hello.py 2019-09-22 20:36:29 -07:00
Shané Winner
7f4101c33e Delete run_details.PNG 2019-09-22 20:36:12 -07:00
Shané Winner
62b0d5df69 Delete run_history.png 2019-09-22 20:36:01 -07:00
Shané Winner
f10b55a1bc Delete logging-api.ipynb 2019-09-22 20:35:47 -07:00
Shané Winner
da9e86635e Delete logging-api.yml 2019-09-22 20:35:36 -07:00
Shané Winner
9ca6388996 Delete datasets-diff.ipynb 2019-09-19 14:14:59 -07:00
Akshaya Annavajhala
3ce779063b address PR feedback 2019-09-18 15:48:42 -04:00
Akshaya Annavajhala
ce635ce4fe add the word mlflow 2019-09-18 13:25:41 -04:00
Akshaya Annavajhala
f08e68c8e9 add linking docs 2019-09-18 11:08:46 -04:00
Shané Winner
93a1d232db Update index.md 2019-09-17 10:00:57 -07:00
Shané Winner
923483528c Update index.md 2019-09-17 09:59:23 -07:00
Shané Winner
cbeacb2ab2 Delete sklearn_regression_model.pkl 2019-09-17 09:37:44 -07:00
Shané Winner
c928c50707 Delete score.py 2019-09-17 09:37:34 -07:00
Shané Winner
efb42bacf9 Delete register-model-deploy-local.ipynb 2019-09-17 09:37:26 -07:00
Shané Winner
d8f349a1ae Delete register-model-deploy-local-advanced.ipynb 2019-09-17 09:37:17 -07:00
Shané Winner
96a61fdc78 Delete myenv.yml 2019-09-17 09:37:08 -07:00
Shané Winner
ff8128f023 Delete helloworld.txt 2019-09-17 09:36:59 -07:00
Shané Winner
8260302a68 Delete dockerSharedDrive.JPG 2019-09-17 09:36:50 -07:00
Shané Winner
fbd7f4a55b Delete README.md 2019-09-17 09:36:41 -07:00
Shané Winner
d4e4206179 Delete helloworld.txt 2019-09-17 09:35:38 -07:00
Shané Winner
a98b918feb Delete model-register-and-deploy.ipynb 2019-09-17 09:35:29 -07:00
Shané Winner
890490ec70 Delete model-register-and-deploy.yml 2019-09-17 09:35:17 -07:00
Shané Winner
c068c9b979 Delete myenv.yml 2019-09-17 09:34:54 -07:00
Shané Winner
f334a3516f Delete score.py 2019-09-17 09:34:44 -07:00
Shané Winner
96248d8dff Delete sklearn_regression_model.pkl 2019-09-17 09:34:27 -07:00
Shané Winner
c42e865700 Delete README.md 2019-09-17 09:29:20 -07:00
vizhur
9233ce089a Merge pull request #577 from Azure/release_update/Release-146
update samples from Release-146 as a part of 1.0.62 SDK release
2019-09-16 19:44:43 -04:00
vizhur
6bb1e2a3e3 update samples from Release-146 as a part of 1.0.62 SDK release 2019-09-16 23:21:57 +00:00
Shané Winner
e1724c8a89 Merge pull request #573 from lostmygithubaccount/master
adding timeseries dataset example notebook
2019-09-16 11:00:30 -07:00
Shané Winner
446e0768cc Delete datasets-diff.ipynb 2019-09-16 10:53:16 -07:00
Cody Peterson
8a2f114a16 adding timeseries dataset example notebook 2019-09-13 08:30:26 -07:00
Shané Winner
80c0d4d30f Merge pull request #570 from trevorbye/master
new pipeline tutorial
2019-09-11 09:28:40 -07:00
Trevor Bye
e8f4708a5a adding index metadata 2019-09-11 09:24:41 -07:00
Trevor Bye
fbaeb84204 adding tutorial 2019-09-11 09:02:06 -07:00
Trevor Bye
da1fab0a77 removing dprep file from old deleted tutorial 2019-09-10 12:31:57 -07:00
Shané Winner
94d2890bb5 Update index.md 2019-09-06 06:37:35 -07:00
Shané Winner
4d1ec4f7d4 Update index.md 2019-09-06 06:30:54 -07:00
Shané Winner
ace3153831 Update index.md 2019-09-06 06:28:50 -07:00
Shané Winner
58bbfe57b2 Update index.md 2019-09-06 06:15:36 -07:00
vizhur
11ea00b1d9 Update index.md 2019-09-06 09:14:30 -04:00
Shané Winner
b81efca3e5 Update index.md 2019-09-06 06:13:03 -07:00
vizhur
d7ceb9bca2 Update index.md 2019-09-06 09:08:02 -04:00
Shané Winner
17730dc69a Merge pull request #564 from MayMSFT/patch-1
Update file-dataset-img-classification.ipynb
2019-09-04 13:31:08 -07:00
May Hu
3a029d48a2 Update file-dataset-img-classification.ipynb
made edit on the sdk version
2019-09-04 13:25:10 -07:00
vizhur
06d43956f3 Merge pull request #558 from Azure/release_update/Release-144
update samples from Release-144 as a part of 1.0.60 SDK release
2019-09-03 22:09:33 -04:00
vizhur
a1cb9b33a5 update samples from Release-144 as a part of 1.0.60 SDK release 2019-09-03 22:39:55 +00:00
Shané Winner
fdc3fe2a53 Delete README.md 2019-08-29 10:22:24 -07:00
Shané Winner
628b35912c Delete train-remote.yml 2019-08-29 10:22:15 -07:00
Shané Winner
3f4cc22e94 Delete train-remote.ipynb 2019-08-29 10:22:07 -07:00
Shané Winner
18d7afb707 Delete train_diabetes.py 2019-08-29 10:21:59 -07:00
Shané Winner
cd35ca30d4 Delete train-local.ipynb 2019-08-29 10:21:48 -07:00
Shané Winner
30eae0b46c Delete train-local.yml 2019-08-29 10:21:40 -07:00
Shané Winner
f16951387f Delete train.py 2019-08-29 10:21:27 -07:00
Shané Winner
0d8de29147 Delete train-and-deploy-pytorch.ipynb 2019-08-29 10:21:16 -07:00
Shané Winner
836354640c Delete train-and-deploy-pytorch.yml 2019-08-29 10:21:08 -07:00
Shané Winner
6162e80972 Delete deploy-model.yml 2019-08-29 10:20:55 -07:00
Shané Winner
fe9fe3392d Delete deploy-model.ipynb 2019-08-29 10:20:46 -07:00
Shané Winner
5ec6d8861b Delete auto-ml-dataprep-remote-execution.yml 2019-08-27 11:19:38 -07:00
Shané Winner
ae188f324e Delete auto-ml-dataprep-remote-execution.ipynb 2019-08-27 11:19:27 -07:00
Shané Winner
4c30c2bdb9 Delete auto-ml-dataprep.yml 2019-08-27 11:19:00 -07:00
Shané Winner
b891440e2d Delete auto-ml-dataprep.ipynb 2019-08-27 11:18:50 -07:00
Shané Winner
784827cdd2 Update README.md 2019-08-27 09:23:40 -07:00
vizhur
0957af04ca Merge pull request #545 from Azure/imatiach-msft-patch-1
add dataprep dependency to notebook
2019-08-23 13:14:30 -04:00
Ilya Matiach
a3bdd193d1 add dataprep dependency to notebook
add dataprep dependency to train-explain-model-on-amlcompute-and-deploy.ipynb notebook for azureml-explain-model package
2019-08-23 13:11:36 -04:00
Shané Winner
dff09970ac Update README.md 2019-08-23 08:38:01 -07:00
Shané Winner
abc7d21711 Update README.md 2019-08-23 05:28:45 +00:00
Shané Winner
ec12ef635f Delete azure-ml-datadrift.ipynb 2019-08-21 10:32:40 -07:00
Shané Winner
81b3e6f09f Delete azure-ml-datadrift.yml 2019-08-21 10:32:32 -07:00
Shané Winner
cc167dceda Delete score.py 2019-08-21 10:32:23 -07:00
Shané Winner
bc52a6d8ee Delete datasets-diff.ipynb 2019-08-21 10:31:50 -07:00
Shané Winner
5bbbdbe73c Delete Titanic.csv 2019-08-21 10:31:38 -07:00
Shané Winner
fd4de05ddd Delete train.py 2019-08-21 10:31:26 -07:00
Shané Winner
9eaab2189d Delete datasets-tutorial.ipynb 2019-08-21 10:31:15 -07:00
Shané Winner
12147754b2 Delete datasets-diff.ipynb 2019-08-21 10:31:05 -07:00
Shané Winner
90ef263823 Delete README.md 2019-08-21 10:30:54 -07:00
Shané Winner
143590cfb4 Delete new-york-taxi_scale-out.ipynb 2019-08-21 10:30:39 -07:00
Shané Winner
40379014ad Delete new-york-taxi.ipynb 2019-08-21 10:30:29 -07:00
Shané Winner
f7b0e99fa1 Delete part-00000-34f8a7a7-c3cd-4926-92b2-ba2dcd3f95b7.gz.parquet 2019-08-21 10:30:18 -07:00
Shané Winner
7a7ac48411 Delete part-00000-34f8a7a7-c3cd-4926-92b2-ba2dcd3f95b7.gz.parquet 2019-08-21 10:30:04 -07:00
Shané Winner
50107c5b1e Delete part-00007-0b08e77b-f17a-4c20-972c-aa382e830fca-c000.csv 2019-08-21 10:29:51 -07:00
Shané Winner
e41d7e6819 Delete part-00006-0b08e77b-f17a-4c20-972c-aa382e830fca-c000.csv 2019-08-21 10:29:36 -07:00
Shané Winner
691e038e84 Delete part-00005-0b08e77b-f17a-4c20-972c-aa382e830fca-c000.csv 2019-08-21 10:29:18 -07:00
Shané Winner
426e79d635 Delete part-00004-0b08e77b-f17a-4c20-972c-aa382e830fca-c000.csv 2019-08-21 10:29:02 -07:00
Shané Winner
326677e87f Delete part-00003-0b08e77b-f17a-4c20-972c-aa382e830fca-c000.csv 2019-08-21 10:28:45 -07:00
Shané Winner
44988e30ae Delete part-00002-0b08e77b-f17a-4c20-972c-aa382e830fca-c000.csv 2019-08-21 10:28:31 -07:00
Shané Winner
646ae37384 Delete part-00001-0b08e77b-f17a-4c20-972c-aa382e830fca-c000.csv 2019-08-21 10:28:18 -07:00
Shané Winner
457e29a663 Delete part-00000-0b08e77b-f17a-4c20-972c-aa382e830fca-c000.csv 2019-08-21 10:28:03 -07:00
Shané Winner
2771edfb2c Delete _SUCCESS 2019-08-21 10:27:45 -07:00
Shané Winner
f0001ec322 Delete adls-dpreptestfiles.crt 2019-08-21 10:27:31 -07:00
Shané Winner
d3e02a017d Delete chicago-aldermen-2015.csv 2019-08-21 10:27:05 -07:00
Shané Winner
a0ebed6876 Delete crime-dirty.csv 2019-08-21 10:26:55 -07:00
Shané Winner
dc0ab6db47 Delete crime-spring.csv 2019-08-21 10:26:45 -07:00
Shané Winner
ea7900f82c Delete crime-winter.csv 2019-08-21 10:26:35 -07:00
Shané Winner
0cb3fd180d Delete crime.parquet 2019-08-21 10:26:26 -07:00
Shané Winner
b05c3e46bb Delete crime.txt 2019-08-21 10:26:17 -07:00
Shané Winner
a1b7d298d3 Delete crime.xlsx 2019-08-21 10:25:41 -07:00
Shané Winner
cc5516c3b3 Delete crime_duplicate_headers.csv 2019-08-21 10:25:32 -07:00
Shané Winner
4fb6070b89 Delete crime.zip 2019-08-21 10:25:23 -07:00
Shané Winner
1b926cdf53 Delete crime-full.csv 2019-08-21 10:25:13 -07:00
Shané Winner
72fc00fb65 Delete crime.dprep 2019-08-21 10:24:56 -07:00
Shané Winner
ddc6b57253 Delete ADLSgen2-datapreptest.crt 2019-08-21 10:24:47 -07:00
Shané Winner
e8b3b98338 Delete crime_fixed_width_file.txt 2019-08-21 10:24:38 -07:00
Shané Winner
66325a1405 Delete crime_multiple_separators.csv 2019-08-21 10:24:29 -07:00
Shané Winner
0efbeaf4b8 Delete json.json 2019-08-21 10:24:12 -07:00
Shané Winner
11d487fb28 Merge pull request #542 from Azure/sgilley/update-deploy
change deployment to model-centric approach
2019-08-21 10:22:13 -07:00
Shané Winner
073e319ef9 Delete large_dflow.json 2019-08-21 10:21:41 -07:00
Shané Winner
3ed75f28d1 Delete map_func.py 2019-08-21 10:21:23 -07:00
Shané Winner
bfc0367f54 Delete median_income.csv 2019-08-21 10:21:14 -07:00
Shané Winner
075eeb583f Delete median_income_transformed.csv 2019-08-21 10:21:05 -07:00
Shané Winner
b7531d3b9e Delete parquet.parquet 2019-08-21 10:20:55 -07:00
Shané Winner
41dc3bd1cf Delete secrets.dprep 2019-08-21 10:20:45 -07:00
Shané Winner
b790b385a4 Delete stream-path.csv 2019-08-21 10:20:36 -07:00
Shané Winner
8700328fe9 Delete summarize.ipynb 2019-08-21 10:17:21 -07:00
Shané Winner
adbd2c8200 Delete subsetting-sampling.ipynb 2019-08-21 10:17:12 -07:00
Shané Winner
7d552effb0 Delete split-column-by-example.ipynb 2019-08-21 10:17:01 -07:00
Shané Winner
bc81d2a5a7 Delete semantic-types.ipynb 2019-08-21 10:16:52 -07:00
Shané Winner
7620de2d91 Delete secrets.ipynb 2019-08-21 10:16:42 -07:00
Shané Winner
07a43a0444 Delete replace-fill-error.ipynb 2019-08-21 10:16:33 -07:00
Shané Winner
f4d5874e09 Delete replace-datasource-replace-reference.ipynb 2019-08-21 10:16:23 -07:00
Shané Winner
8a0b4d24bd Delete random-split.ipynb 2019-08-21 10:16:14 -07:00
Shané Winner
636f19be1f Delete quantile-transformation.ipynb 2019-08-21 10:16:04 -07:00
Shané Winner
0fd7f7d9b2 Delete open-save-dataflows.ipynb 2019-08-21 10:15:54 -07:00
Shané Winner
ab6c66534f Delete one-hot-encoder.ipynb 2019-08-21 10:15:45 -07:00
Shané Winner
faccf13759 Delete min-max-scaler.ipynb 2019-08-21 10:15:36 -07:00
Shané Winner
4c6a28e4ed Delete label-encoder.ipynb 2019-08-21 10:15:25 -07:00
Shané Winner
64ad88e2cb Delete join.ipynb 2019-08-21 10:15:17 -07:00
Shané Winner
969ac90d39 Delete impute-missing-values.ipynb 2019-08-21 10:12:12 -07:00
Shané Winner
fb977c1e95 Delete fuzzy-group.ipynb 2019-08-21 10:12:03 -07:00
Shané Winner
d5ba3916f7 Delete filtering.ipynb 2019-08-21 10:11:53 -07:00
Shané Winner
f7f1087337 Delete external-references.ipynb 2019-08-21 10:11:43 -07:00
Shané Winner
47ea2dbc03 Delete derive-column-by-example.ipynb 2019-08-21 10:11:33 -07:00
Shané Winner
bd2cf534e5 Delete datastore.ipynb 2019-08-21 10:11:24 -07:00
Shané Winner
65f1668d69 Delete data-profile.ipynb 2019-08-21 10:11:16 -07:00
Shané Winner
e0fb7df0aa Delete data-ingestion.ipynb 2019-08-21 10:11:06 -07:00
Shané Winner
7047f76299 Delete custom-python-transforms.ipynb 2019-08-21 10:10:56 -07:00
Shané Winner
c39f2d5eb6 Delete column-type-transforms.ipynb 2019-08-21 10:10:45 -07:00
Shané Winner
5fda69a388 Delete column-manipulations.ipynb 2019-08-21 10:10:36 -07:00
Shané Winner
87ce954eef Delete cache.ipynb 2019-08-21 10:10:26 -07:00
Shané Winner
ebbeac413a Delete auto-read-file.ipynb 2019-08-21 10:10:15 -07:00
Shané Winner
a68bbaaab4 Delete assertions.ipynb 2019-08-21 10:10:05 -07:00
Shané Winner
8784dc979f Delete append-columns-and-rows.ipynb 2019-08-21 10:09:55 -07:00
Shané Winner
f8047544fc Delete add-column-using-expression.ipynb 2019-08-21 10:09:44 -07:00
Shané Winner
eeb2a05e4f Delete working-with-file-streams.ipynb 2019-08-21 10:09:33 -07:00
Shané Winner
6db9d7bd8b Delete writing-data.ipynb 2019-08-21 10:09:19 -07:00
Shané Winner
80e2fde734 Delete getting-started.ipynb 2019-08-21 10:09:04 -07:00
Shané Winner
ae4f5d40ee Delete README.md 2019-08-21 10:08:53 -07:00
Shané Winner
5516edadfd Delete README.md 2019-08-21 10:08:13 -07:00
Sheri Gilley
475afbf44b change deployment to model-centric approach 2019-08-21 09:50:49 -05:00
Shané Winner
197eaf1aab Merge pull request #541 from Azure/sdgilley/update-tutorial
Update img-classification-part1-training.ipynb
2019-08-20 15:59:24 -07:00
Sheri Gilley
184680f1d2 Update img-classification-part1-training.ipynb
updated explanation of datastore
2019-08-20 17:52:45 -05:00
Shané Winner
474f58bd0b Merge pull request #540 from trevorbye/master
removing tutorials for single combined tutorial
2019-08-20 15:22:47 -07:00
Trevor Bye
22c8433897 removing tutorials for single combined tutorial 2019-08-20 12:09:21 -07:00
Josée Martens
822cdd0f01 Update issue templates 2019-08-20 08:35:00 -05:00
Josée Martens
6e65d42986 Update issue templates 2019-08-20 08:26:45 -05:00
Harneet Virk
4c0cbac834 Merge pull request #537 from Azure/release_update/Release-141
update samples from Release-141 as a part of 1.0.57 SDK release
2019-08-19 18:32:44 -07:00
vizhur
44a7481ed1 update samples from Release-141 as a part of 1.0.57 SDK release 2019-08-19 23:33:44 +00:00
Ilya Matiach
8f418b216d Merge pull request #526 from imatiach-msft/ilmat/remove-old-explain-dirs
removing old explain model directories
2019-08-13 12:37:00 -04:00
Ilya Matiach
2d549ecad3 removing old directories 2019-08-13 12:31:51 -04:00
Josée Martens
4dbb024529 Update issue templates 2019-08-11 18:02:17 -05:00
Josée Martens
142a1a510e Update issue templates 2019-08-11 18:00:12 -05:00
vizhur
2522486c26 Merge pull request #519 from wamartin-aml/master
Add dataprep dependency
2019-08-08 09:34:36 -04:00
Walter Martin
6d5226e47c Add dataprep dependency 2019-08-08 09:31:18 -04:00
Shané Winner
e7676d7cdc Delete README.md 2019-08-07 13:14:39 -07:00
Shané Winner
a84f6636f1 Delete README.md 2019-08-07 13:14:24 -07:00
Roope Astala
41be10d1c1 Delete authentication-in-azure-ml.ipynb 2019-08-07 10:12:48 -04:00
vizhur
429eb43914 Merge pull request #513 from Azure/release_update/Release-139
update samples from Release-139 as a part of 1.0.55 SDK release
2019-08-05 16:22:25 -04:00
vizhur
c0dae0c645 update samples from Release-139 as a part of 1.0.55 SDK release 2019-08-05 18:39:19 +00:00
Shané Winner
e4d9a2b4c5 Delete score.py 2019-07-29 09:33:11 -07:00
Shané Winner
7648e8f516 Delete readme.md 2019-07-29 09:32:55 -07:00
Shané Winner
b5ed94b4eb Delete azure-ml-datadrift.ipynb 2019-07-29 09:32:47 -07:00
Shané Winner
85e487f74f Delete new-york-taxi_scale-out.ipynb 2019-07-28 00:38:05 -07:00
Shané Winner
c0a5b2de79 Delete new-york-taxi.ipynb 2019-07-28 00:37:56 -07:00
Shané Winner
0a9e076e5f Delete stream-path.csv 2019-07-28 00:37:44 -07:00
Shané Winner
e3b974811d Delete secrets.dprep 2019-07-28 00:37:33 -07:00
Shané Winner
381d1a6f35 Delete parquet.parquet 2019-07-28 00:37:20 -07:00
Shané Winner
adaa55675e Delete median_income_transformed.csv 2019-07-28 00:37:12 -07:00
Shané Winner
5e3c592d4b Delete median_income.csv 2019-07-28 00:37:02 -07:00
Shané Winner
9c6f1e2571 Delete map_func.py 2019-07-28 00:36:52 -07:00
Shané Winner
bd1bedd563 Delete large_dflow.json 2019-07-28 00:36:43 -07:00
Shané Winner
9716f3614e Delete json.json 2019-07-28 00:36:30 -07:00
Shané Winner
d2c72ca149 Delete crime_multiple_separators.csv 2019-07-28 00:36:19 -07:00
Shané Winner
4f62f64207 Delete crime_fixed_width_file.txt 2019-07-28 00:36:10 -07:00
Shané Winner
16473eb33e Delete crime_duplicate_headers.csv 2019-07-28 00:36:01 -07:00
Shané Winner
d10474c249 Delete crime.zip 2019-07-28 00:35:51 -07:00
Shané Winner
6389cc16f9 Delete crime.xlsx 2019-07-28 00:35:41 -07:00
Shané Winner
bc0a8e0152 Delete crime.txt 2019-07-28 00:35:30 -07:00
Shané Winner
39384aea52 Delete crime.parquet 2019-07-28 00:35:20 -07:00
Shané Winner
5bf4b0bafe Delete crime.dprep 2019-07-28 00:35:11 -07:00
Shané Winner
f22adb7949 Delete crime-winter.csv 2019-07-28 00:35:00 -07:00
Shané Winner
8409ab7133 Delete crime-spring.csv 2019-07-28 00:34:50 -07:00
Shané Winner
32acd55774 Delete crime-full.csv 2019-07-28 00:34:39 -07:00
Shané Winner
7f65c1a255 Delete crime-dirty.csv 2019-07-28 00:34:27 -07:00
Shané Winner
bc7ccc7ef3 Delete chicago-aldermen-2015.csv 2019-07-28 00:34:17 -07:00
Shané Winner
1cc79a71e9 Delete adls-dpreptestfiles.crt 2019-07-28 00:34:05 -07:00
Shané Winner
c0bec5f110 Delete part-00000-34f8a7a7-c3cd-4926-92b2-ba2dcd3f95b7.gz.parquet 2019-07-28 00:33:51 -07:00
Shané Winner
77e5664482 Delete part-00000-34f8a7a7-c3cd-4926-92b2-ba2dcd3f95b7.gz.parquet 2019-07-28 00:33:38 -07:00
Shané Winner
e2eb64372a Delete part-00007-0b08e77b-f17a-4c20-972c-aa382e830fca-c000.csv 2019-07-28 00:33:23 -07:00
Shané Winner
03cbb6a3a2 Delete part-00006-0b08e77b-f17a-4c20-972c-aa382e830fca-c000.csv 2019-07-28 00:33:12 -07:00
Shané Winner
44d3d998a8 Delete part-00005-0b08e77b-f17a-4c20-972c-aa382e830fca-c000.csv 2019-07-28 00:33:00 -07:00
Shané Winner
c626f37057 Delete part-00004-0b08e77b-f17a-4c20-972c-aa382e830fca-c000.csv 2019-07-28 00:32:48 -07:00
Shané Winner
0175574864 Delete part-00003-0b08e77b-f17a-4c20-972c-aa382e830fca-c000.csv 2019-07-28 00:32:37 -07:00
Shané Winner
f6e8d57da3 Delete part-00002-0b08e77b-f17a-4c20-972c-aa382e830fca-c000.csv 2019-07-28 00:32:25 -07:00
Shané Winner
01cd31ce44 Delete part-00001-0b08e77b-f17a-4c20-972c-aa382e830fca-c000.csv 2019-07-28 00:32:13 -07:00
Shané Winner
eb2024b3e0 Delete part-00000-0b08e77b-f17a-4c20-972c-aa382e830fca-c000.csv 2019-07-28 00:32:01 -07:00
Shané Winner
6bce41b3d7 Delete _SUCCESS 2019-07-28 00:31:49 -07:00
Shané Winner
bbdabbb552 Delete writing-data.ipynb 2019-07-28 00:31:32 -07:00
Shané Winner
65343fc263 Delete working-with-file-streams.ipynb 2019-07-28 00:31:22 -07:00
Shané Winner
b6b27fded6 Delete summarize.ipynb 2019-07-28 00:26:56 -07:00
Shané Winner
7e492cbeb6 Delete subsetting-sampling.ipynb 2019-07-28 00:26:41 -07:00
Shané Winner
4cc8f4c6af Delete split-column-by-example.ipynb 2019-07-28 00:26:25 -07:00
Shané Winner
9fba46821b Delete semantic-types.ipynb 2019-07-28 00:26:11 -07:00
Shané Winner
a45954a58f Delete secrets.ipynb 2019-07-28 00:25:58 -07:00
Shané Winner
f16dfb0e5b Delete replace-fill-error.ipynb 2019-07-28 00:25:45 -07:00
Shané Winner
edabbf9031 Delete replace-datasource-replace-reference.ipynb 2019-07-28 00:25:32 -07:00
Shané Winner
63d1d57dfb Delete random-split.ipynb 2019-07-28 00:25:21 -07:00
Shané Winner
10f7004161 Delete quantile-transformation.ipynb 2019-07-28 00:25:10 -07:00
Shané Winner
86ba4e7406 Delete open-save-dataflows.ipynb 2019-07-28 00:24:54 -07:00
Shané Winner
33bda032b8 Delete one-hot-encoder.ipynb 2019-07-28 00:24:43 -07:00
Shané Winner
0fd4bfbc56 Delete min-max-scaler.ipynb 2019-07-28 00:24:32 -07:00
Shané Winner
3fe08c944e Delete label-encoder.ipynb 2019-07-28 00:24:21 -07:00
Shané Winner
d587ea5676 Delete join.ipynb 2019-07-28 00:24:08 -07:00
Shané Winner
edd8562102 Delete impute-missing-values.ipynb 2019-07-28 00:23:55 -07:00
Shané Winner
5ac2c63336 Delete fuzzy-group.ipynb 2019-07-28 00:23:41 -07:00
Shané Winner
1f4e4cdda2 Delete filtering.ipynb 2019-07-28 00:23:28 -07:00
Shané Winner
2e245c1691 Delete external-references.ipynb 2019-07-28 00:23:11 -07:00
Shané Winner
e1b09f71fa Delete derive-column-by-example.ipynb 2019-07-28 00:22:54 -07:00
Shané Winner
8e2220d397 Delete datastore.ipynb 2019-07-28 00:22:43 -07:00
Shané Winner
f74ccf5048 Delete data-profile.ipynb 2019-07-28 00:22:32 -07:00
Shané Winner
97a6d9ca43 Delete data-ingestion.ipynb 2019-07-28 00:22:21 -07:00
Shané Winner
a0ff1c6b64 Delete custom-python-transforms.ipynb 2019-07-28 00:22:11 -07:00
Shané Winner
08f15ef4cf Delete column-type-transforms.ipynb 2019-07-28 00:21:58 -07:00
Shané Winner
7160416c0b Delete column-manipulations.ipynb 2019-07-28 00:21:47 -07:00
Shané Winner
218fed3d65 Delete cache.ipynb 2019-07-28 00:21:35 -07:00
Shané Winner
b8499dfb98 Delete auto-read-file.ipynb 2019-07-28 00:21:22 -07:00
Shané Winner
6bfd472cc2 Delete assertions.ipynb 2019-07-28 00:20:55 -07:00
Shané Winner
ecefb229e9 Delete append-columns-and-rows.ipynb 2019-07-28 00:20:40 -07:00
Shané Winner
883ad806ba Delete add-column-using-expression.ipynb 2019-07-28 00:20:22 -07:00
Shané Winner
848b5bc302 Delete getting-started.ipynb 2019-07-28 00:19:59 -07:00
Shané Winner
58087b53a0 Delete README.md 2019-07-28 00:19:45 -07:00
Shané Winner
ff4d5450a7 Delete README.md 2019-07-28 00:19:29 -07:00
Shané Winner
e2b2b89842 Delete datasets-tutorial.ipynb 2019-07-28 00:19:13 -07:00
Shané Winner
390be2ba24 Delete train.py 2019-07-28 00:19:00 -07:00
Shané Winner
cd1258f81d Delete Titanic.csv 2019-07-28 00:18:41 -07:00
Shané Winner
8a0b48ea48 Delete README.md 2019-07-28 00:18:14 -07:00
Roope Astala
b0dc904189 Merge pull request #502 from msdavx/patch-1
Add demo notebook for datasets diff attribute.
2019-07-26 19:16:13 -04:00
msdavx
82bede239a Add demo notebook for datasets diff attribute. 2019-07-26 11:10:37 -07:00
vizhur
774517e173 Merge pull request #500 from Azure/release_update/Release-137
update samples from Release-137 as a part of 1.0.53 SDK release
2019-07-25 16:36:25 -04:00
Shané Winner
c3ce2bc7fe Delete README.md 2019-07-25 13:28:15 -07:00
Shané Winner
5dd09a1f7c Delete README.md 2019-07-25 13:28:01 -07:00
vizhur
ee1da0ee19 update samples from Release-137 as a part of 1.0.53 SDK release 2019-07-24 22:37:36 +00:00
Paula Ledgerwood
ddfce6b24c Merge pull request #498 from Azure/revert-461-master
Revert "Finetune SSD VGG"
2019-07-24 14:25:43 -07:00
Paula Ledgerwood
31dfc3dc55 Revert "Finetune SSD VGG" 2019-07-24 14:08:00 -07:00
Paula Ledgerwood
168c45b188 Merge pull request #461 from borisneal/master
Finetune SSD VGG
2019-07-24 14:07:15 -07:00
fierval
159948db67 moving notice.txt 2019-07-24 08:50:41 -07:00
fierval
d842731a3b remove tf prereq item 2019-07-23 14:58:51 -07:00
fierval
7822fd4c13 notice + attribution for anchors 2019-07-23 14:49:20 -07:00
fierval
d9fbe4cd87 new folder structure 2019-07-22 10:31:22 -07:00
Shané Winner
a64f4d331a Merge pull request #488 from trevorbye/master
adding new notebook
2019-07-18 10:40:36 -07:00
Trevor Bye
c41f449208 adding new notebook 2019-07-18 10:27:21 -07:00
vizhur
4fe8c1702d Merge pull request #486 from Azure/release_update/Release-22
Fix for automl remote env
2019-07-12 19:18:13 -04:00
vizhur
18cd152591 update samples - test 2019-07-12 22:51:17 +00:00
vizhur
4170a394ed Merge pull request #474 from Azure/release_update/Release-132
update samples from Release-132 as a part of 1.0.48 SDK release
2019-07-09 19:14:29 -04:00
vizhur
475ea36106 update samples from Release-132 as a part of 1.0.48 SDK release 2019-07-09 22:02:57 +00:00
Roope Astala
9e0fc4f0e7 Merge pull request #459 from datashinobi/yassine/datadrift2
fix link to config nb & settingwithcopywarning
2019-07-03 12:41:31 -04:00
fierval
b025816c92 remove config.json 2019-07-02 17:32:56 -07:00
fierval
c75e820107 ssd vgg 2019-07-02 17:23:56 -07:00
Yassine Khelifi
e97e4742ba fix link to config nb & settingwithcopywarning 2019-07-02 16:56:21 +00:00
Roope Astala
14ecfb0bf3 Merge pull request #448 from jeff-shepherd/master
Update new notebooks to use dataprep and add sql files
2019-06-27 09:07:47 -04:00
Jeff Shepherd
61b396be4f Added sql files 2019-06-26 14:26:01 -07:00
Jeff Shepherd
3d2552174d Updated notebooks to use dataprep 2019-06-26 14:23:20 -07:00
Roope Astala
cd3c980a6e Merge pull request #447 from Azure/release-1.0.45
Merged notebook changes from release 1.0.45
2019-06-26 16:32:09 -04:00
Heather Shapiro
249bcac3c7 Merged notebook changes from release 1.0.45 2019-06-26 14:39:09 -04:00
Roope Astala
4a6bcebccc Update configuration.ipynb 2019-06-21 09:35:13 -04:00
Roope Astala
56e0ebc5ac Merge pull request #438 from rastala/master
add pipeline scripts
2019-06-19 18:56:42 -04:00
rastala
2aa39f2f4a add pipeline scripts 2019-06-19 18:55:32 -04:00
Roope Astala
4d247c1877 Merge pull request #437 from rastala/master
pytorch with mlflow
2019-06-19 17:23:06 -04:00
rastala
f6682f6f6d pytorch with mlflow 2019-06-19 17:21:52 -04:00
Roope Astala
26ecf25233 Merge pull request #436 from rastala/master
Update readme
2019-06-19 11:52:23 -04:00
Roope Astala
44c3a486c0 update readme 2019-06-19 11:49:49 -04:00
Roope Astala
c574f429b8 update readme 2019-06-19 11:48:52 -04:00
Roope Astala
77d557a5dc Merge pull request #435 from ganzhi/jamgan/drift
Add demo notebook for AML Data Drift
2019-06-17 16:39:46 -04:00
James Gan
13dedec4a4 Make it in same folder as internal repo 2019-06-17 13:38:27 -07:00
James Gan
6f5c52676f Add notebook to demo data drift 2019-06-17 13:33:30 -07:00
James Gan
90c105537c Add demo notebook for AML Data Drift 2019-06-17 13:31:08 -07:00
Roope Astala
ef264b1073 Merge pull request #434 from rastala/master
update pytorch
2019-06-17 11:57:29 -04:00
Roope Astala
824ac5e021 update pytorch 2019-06-17 11:56:42 -04:00
Roope Astala
e9a7b95716 Merge pull request #421 from csteegz/csteegz-add-warning
Add warning for using prediction client on azure notebooks
2019-06-13 20:27:34 -04:00
Roope Astala
789ee26357 Merge pull request #431 from jeff-shepherd/master
Fixed path for auto-ml-remote-amlcompute notebook
2019-06-13 16:56:25 -04:00
Jeff Shepherd
fc541706e7 Fixed path for auto-ml-remote-amlcompute 2019-06-13 13:12:32 -07:00
Roope Astala
64b8aa2a55 Merge pull request #429 from jeff-shepherd/master
Removed deprecated notebooks from readme
2019-06-13 14:40:57 -04:00
Jeff Shepherd
d3dc35dbb6 Removed deprecated notebooks from readme 2019-06-13 11:03:25 -07:00
Roope Astala
b55ac368e7 Merge pull request #428 from rastala/master
update cluster creation
2019-06-13 12:16:30 -04:00
Roope Astala
de162316d7 update cluster creation 2019-06-13 12:14:58 -04:00
Roope Astala
4ecc58dfe2 Merge pull request #427 from rastala/master
dockerfile
2019-06-12 10:24:34 -04:00
Roope Astala
daf27a76e4 dockerfile 2019-06-12 10:23:34 -04:00
Roope Astala
a05444845b Merge pull request #426 from rastala/master
version 1.0.43
2019-06-12 10:09:08 -04:00
Roope Astala
79c9f50c15 version 1.0.43 2019-06-12 10:08:35 -04:00
Roope Astala
67e10e0f6b Merge pull request #417 from lan-tang/patch-1
Create readme.md in data-drift
2019-06-11 13:47:55 -04:00
Roope Astala
1ef0331a0f Merge pull request #423 from rastala/master
add sklearn estimator
2019-06-11 11:30:37 -04:00
Roope Astala
5e91c836b9 add sklearn estimator 2019-06-11 11:29:56 -04:00
Colin Versteeg
661762854a add warning to training 2019-06-10 16:51:33 -07:00
Colin Versteeg
fbc90ba74f add to quickstart 2019-06-10 16:50:59 -07:00
Colin Versteeg
0d9c83d0a8 Update accelerated-models-object-detection.ipynb 2019-06-10 16:48:17 -07:00
Colin Versteeg
ca4cab1de9 Merge pull request #1 from Azure/master
pull from master
2019-06-10 16:45:12 -07:00
Roope Astala
ddbb3c45f6 Merge pull request #420 from rastala/master
mlflow integration preview
2019-06-10 15:12:36 -04:00
rastala
8eed4e39d0 mlflow integration preview 2019-06-10 15:10:57 -04:00
Lan Tang
b37c0297db Create readme.md 2019-06-07 12:32:32 -07:00
Roope Astala
968cc798d0 Update README.md 2019-06-05 12:15:33 -04:00
Roope Astala
5c9ca452fb Create README.md 2019-06-05 12:15:19 -04:00
Shané Winner
5e82680272 Update README.md 2019-05-31 10:58:39 -07:00
Roope Astala
41841fc8c0 Update README.md 2019-05-31 13:00:41 -04:00
Roope Astala
896bf63736 Merge pull request #397 from rastala/master
dockerfile
2019-05-29 11:05:18 -04:00
Roope Astala
d4751bf6ec dockerfile 2019-05-29 11:04:19 -04:00
Roope Astala
3531fe8a21 Merge pull request #396 from rastala/master
version 1.0.41
2019-05-29 11:01:15 -04:00
Roope Astala
db6ae67940 version 1.0.41 2019-05-29 10:59:59 -04:00
Shané Winner
2a479bb01e Merge pull request #395 from imatiach-msft/ilmat/fix-typo
fix typo
2019-05-28 14:02:33 -07:00
Ilya Matiach
d05eec92af fix typo 2019-05-28 16:59:59 -04:00
Josée Martens
70fdab0a28 Update auto-ml-classification-with-deployment.ipynb 2019-05-24 13:45:04 -05:00
Josée Martens
7ce5a43b58 Update auto-ml-classification-with-deployment.ipynb 2019-05-24 13:44:35 -05:00
Josée Martens
d2a9dbb582 Update auto-ml-classification-with-deployment.ipynb 2019-05-24 13:43:38 -05:00
Roope Astala
a5d774683d Merge pull request #390 from rastala/master
fix default cluster creation in config notebook
2019-05-23 12:30:09 -04:00
Roope Astala
0e850f0917 fix default cluster creation in config notebook 2019-05-23 12:27:53 -04:00
Shané Winner
59f34b7179 Delete configtest.ipynb 2019-05-22 10:47:50 -07:00
Shané Winner
2a3cb69004 Create configtest.ipynb 2019-05-22 10:41:16 -07:00
Shané Winner
42894ff81a Delete LICENSE.txt 2019-05-22 10:22:05 -07:00
Shané Winner
2163cab50b Delete LICENSE.txt 2019-05-22 10:21:42 -07:00
Shané Winner
255edb04c0 Rename LICENSE.txt to LICENSE 2019-05-22 10:13:08 -07:00
Shané Winner
cfce079278 Rename LICENSES to LICENSE.txt 2019-05-22 10:06:31 -07:00
Shané Winner
ae6f067c81 Deleted index.html
cleaning up root directory
2019-05-22 10:04:23 -07:00
Shané Winner
1b7ff724f3 Deleted pr.md
Contents of this file moved to the README in the root directory.
2019-05-22 10:03:40 -07:00
Shané Winner
8bba850db1 moved the content in the pr.md file
moved the content in the pr.md file to under 'Projects using Azure Machine Learning'
2019-05-21 07:51:28 -07:00
Shané Winner
b9e35ea0cb Create LICENSE 2019-05-21 07:44:10 -07:00
Shané Winner
ffa28aa89c Delete sdk 2019-05-21 07:43:06 -07:00
Shané Winner
6ab85a20e3 Create LICENSES 2019-05-21 07:42:07 -07:00
Shané Winner
486c44d157 Create sdk 2019-05-21 07:39:43 -07:00
Shané Winner
cd80040dd8 Delete Licenses 2019-05-21 07:39:03 -07:00
Shané Winner
465a5b13b1 Create Licenses 2019-05-21 07:38:52 -07:00
Shané Winner
dcd2d58880 Added notice on the data/telemetry 2019-05-20 14:44:43 -07:00
Roope Astala
93bf4393f2 Merge pull request #381 from jeff-shepherd/master
Revert change to default amlcompute cluster
2019-05-16 15:35:43 -04:00
Jeff Shepherd
d6ebb484a6 Revert change to default amlcomputecluster to support existing resource
groups
2019-05-16 12:27:23 -07:00
Roope Astala
35afd43193 Merge pull request #372 from rogerhe/master
adding macOS specific yml. Install nomkl to workaround openmp issue
2019-05-14 19:07:42 -04:00
Roope Astala
2d68535de2 Merge pull request #376 from rastala/master
version 1.0.39
2019-05-14 16:04:09 -04:00
Roope Astala
0d448892a3 version check 2019-05-14 16:03:39 -04:00
Roope Astala
2d41c00488 version 1.0.39 2019-05-14 16:01:14 -04:00
Roger He
22597ac684 adding macOS specific yml. Install nomkl to workaround openmp issue 2019-05-09 16:51:51 -07:00
Josée Martens
8b1bffc200 Update README.md 2019-05-08 12:36:49 -05:00
Josée Martens
a240ac319f Update README.md 2019-05-08 12:27:57 -05:00
Josée Martens
83cfe3b9b3 Update README.md 2019-05-08 12:25:41 -05:00
Paula Ledgerwood
dcce6f227f Merge pull request #360 from Azure/paledger/update-readme
Update readme/cluster location from PM's instructions
2019-05-06 10:08:22 -07:00
Paula Ledgerwood
5328186d68 Update python kernel version 2019-05-06 09:45:20 -07:00
Paula Ledgerwood
7ccaa2cf57 Update readme from PM's instructions 2019-05-06 09:41:54 -07:00
Shané Winner
56b0664b6b Update img-classification-part1-training.ipynb 2019-05-05 17:47:31 -07:00
Shané Winner
4c1167edc4 Update img-classification-part1-training.ipynb 2019-05-05 17:45:48 -07:00
Shané Winner
eb643fe213 Update README.md 2019-05-05 17:26:29 -07:00
Shané Winner
5faa9d293c Update README.md 2019-05-05 15:34:27 -07:00
Shané Winner
32e2b5f647 Update train-hyperparameter-tune-deploy-with-tensorflow.ipynb 2019-05-05 15:32:19 -07:00
Shané Winner
ae25654882 Update train-hyperparameter-tune-deploy-with-pytorch.ipynb 2019-05-05 15:29:42 -07:00
Shané Winner
0ca05093bd Update train-hyperparameter-tune-deploy-with-keras.ipynb 2019-05-05 15:28:16 -07:00
Shané Winner
5e39582de3 Update train-hyperparameter-tune-deploy-with-chainer.ipynb 2019-05-05 15:24:14 -07:00
Shané Winner
6b6a6da9dc Update tensorboard.ipynb 2019-05-05 15:22:28 -07:00
Shané Winner
cba2c6b9e2 Update how-to-use-estimator.ipynb 2019-05-05 15:20:50 -07:00
Shané Winner
58557abd20 Update export-run-history-to-tensorboard.ipynb 2019-05-05 15:18:48 -07:00
Shané Winner
59452a3141 Update distributed-tensorflow-with-parameter-server.ipynb 2019-05-05 15:17:15 -07:00
Shané Winner
463718e26b Update distributed-tensorflow-with-horovod.ipynb 2019-05-05 15:15:13 -07:00
Shané Winner
9ea0ba5131 Update distributed-pytorch-with-horovod.ipynb 2019-05-05 15:13:28 -07:00
Shané Winner
2804a8d859 Update distributed-cntk-with-custom-docker.ipynb 2019-05-05 15:11:51 -07:00
Shané Winner
4761b668ff Update distributed-chainer.ipynb 2019-05-05 15:09:28 -07:00
Shané Winner
c4163017c2 Update using-environments.ipynb 2019-05-05 00:11:40 -07:00
Shané Winner
71e8e9bd23 Update train-within-notebook.ipynb 2019-05-05 00:09:26 -07:00
Shané Winner
6ff06dd137 Update train-on-remote-vm.ipynb 2019-05-05 00:06:23 -07:00
Shané Winner
73db8ae04d Update train-on-local.ipynb 2019-05-04 23:52:01 -07:00
Shané Winner
3637dce58a Update train-on-amlcompute.ipynb 2019-05-04 23:48:16 -07:00
Shané Winner
23771fc599 added tracking pixel and edited config text 2019-05-04 21:08:10 -07:00
Shané Winner
5f04a467b7 added tracking pixel 2019-05-04 21:03:08 -07:00
Shané Winner
532f65c998 added tracking pixel and edited config text 2019-05-04 20:59:50 -07:00
Shané Winner
f36dda0c2d added tracking pixel and edited the config text 2019-05-04 20:54:32 -07:00
Shané Winner
c7b56929bc added tracking pixel and edited config text 2019-05-04 20:50:57 -07:00
Shané Winner
5f19d75a42 added tracking pixel and edited the config text 2019-05-04 20:48:04 -07:00
Shané Winner
a1968aafa2 updated config text and added tracking pixel 2019-05-04 20:43:54 -07:00
Shané Winner
6b82991017 edited config text and added tracking pixel 2019-05-04 20:40:23 -07:00
Shané Winner
725013511e added tracking pixel 2019-05-04 20:34:58 -07:00
Shané Winner
6a20160173 added tracking pixel 2019-05-04 20:02:01 -07:00
Shané Winner
137db8aec0 added tracking pixel 2019-05-04 19:49:50 -07:00
Shané Winner
b7b10c394b added tracking pixel 2019-05-04 19:47:28 -07:00
Shané Winner
46206716a4 added tracking pixel 2019-05-04 19:44:23 -07:00
Shané Winner
92bb98ac62 added tracking pixel 2019-05-04 19:41:33 -07:00
Shané Winner
b398c24262 added tracking pixel 2019-05-04 19:38:28 -07:00
Shané Winner
e0618302e3 added tracking pixel 2019-05-04 19:35:57 -07:00
Shané Winner
b6cddafa3e edited config text and added the pixel tracker 2019-05-04 19:31:59 -07:00
Shané Winner
4188bd2474 updated the config text and added the tracking pixel 2019-05-04 19:25:26 -07:00
Shané Winner
69126edfcb update config text and added tracking pixel 2019-05-04 19:20:46 -07:00
Shané Winner
4e14c35b9b added pixel tracker 2019-05-04 16:31:07 -07:00
Shané Winner
1608c19aa6 updated tracking pixel and and config text 2019-05-04 15:12:53 -07:00
Shané Winner
46b8611b74 tracking pixel and edited config text 2019-05-04 15:08:57 -07:00
Shané Winner
fbb01bde70 update the config text and added pixel tracker server 2019-05-04 15:01:35 -07:00
Shané Winner
cefe2f0811 updated the config text and added the tracking pixel 2019-05-04 14:58:45 -07:00
Shané Winner
42e0a31f88 updated the config text and the tracking pixel 2019-05-04 14:54:37 -07:00
Shané Winner
8b0998ac9f updated the config text and the tracking pixel 2019-05-04 14:49:29 -07:00
Shané Winner
046c6051fb updated config text and added tracking pixel 2019-05-04 14:38:39 -07:00
Shané Winner
bdb7db15ef updated tracking pixel and the config text 2019-05-04 14:35:28 -07:00
Shané Winner
b13139f103 update the config text and the tracking pixel 2019-05-04 14:31:25 -07:00
Shané Winner
8adb206ae3 updated config text and pixel tracker 2019-05-04 13:56:09 -07:00
Shané Winner
484b6bbb7a updated the config text and pixel server 2019-05-04 13:51:12 -07:00
Shané Winner
55ef0bda6a updated config text 2019-05-04 13:46:43 -07:00
Shané Winner
1401cdef33 updated config text 2019-05-04 13:41:34 -07:00
Shané Winner
5d02206cbd updated with tracking pixel 2019-05-04 13:34:11 -07:00
Shané Winner
c24b65d4ae updated with tracking pixel 2019-05-04 13:32:14 -07:00
Shané Winner
57c5ef318f updated with pixel tracker 2019-05-04 13:25:11 -07:00
Shané Winner
ba033d72f8 Update train-in-spark.ipynb 2019-05-04 09:33:07 -07:00
Shané Winner
aa657ac528 Update manage-runs.ipynb 2019-05-04 09:29:00 -07:00
Shané Winner
7d8289679d added the tracking pixel and the edited the config text 2019-05-04 08:40:18 -07:00
Shané Winner
a7c3db0560 Update model-register-and-deploy.ipynb 2019-05-03 23:21:58 -07:00
Shané Winner
e548847881 pixel text and config text update 2019-05-03 23:20:57 -07:00
Shané Winner
08c6b1f4ed tracking pixel test 2019-05-03 23:15:28 -07:00
Shané Winner
78abb65f5e updated configuration text 2019-05-03 23:08:55 -07:00
Shané Winner
3c6c090732 Update README.md 2019-05-03 22:54:31 -07:00
Shané Winner
513e36d9b2 updated the config verbiage and tracking pixel 2019-05-03 22:54:02 -07:00
Ilya Matiach
9db91a7fb8 Merge pull request #351 from imatiach-msft/ilmat/update-raw-features-notebook
Update raw features explanation notebook
2019-05-03 12:47:28 -04:00
Roope Astala
d9b26b655b Merge pull request #356 from rastala/master
how to use environments
2019-05-03 10:27:33 -04:00
Roope Astala
cb8dc41766 how to use environments 2019-05-03 10:25:39 -04:00
Ilya Matiach
9c9b4bb122 Update raw features explanation notebook 2019-05-02 14:29:53 -04:00
Roope Astala
f5c896c70f Merge pull request #345 from csteegz/add-gpu-deploy
Create production-deploy-to-aks-gpu.ipynb
2019-05-02 14:13:50 -04:00
Colleen Forbes
3b572eddb2 Merge pull request #350 from MayMSFT/master
add dataset tutorial
2019-05-02 09:33:25 -07:00
May Hu
51523db294 add dataset tutorial 2019-05-02 09:07:11 -07:00
Ilya Matiach
3b4998941c Merge pull request #348 from imatiach-msft/ilmat/update-explain-model-nb
updating model explanation notebooks
2019-04-30 17:27:44 -04:00
Ilya Matiach
6cdbfb8722 updating model explanation notebooks 2019-04-30 17:12:54 -04:00
Colin Versteeg
c086bd69c7 Create production-deploy-to-aks-gpu.ipynb
Add deploy to aks GPU notebook
2019-04-29 16:26:42 -07:00
Shané Winner
279c9b8dc4 Pixel Tracker 2019-04-29 11:27:03 -07:00
Shané Winner
98589fe335 Testing Pixel Tracker 2019-04-29 11:16:08 -07:00
Shané Winner
77f21058a2 Testing Pixel Tracker 2019-04-29 11:04:05 -07:00
Roope Astala
baa65d0886 Merge pull request #343 from Azure/paledger/add-accel-models
Initial commit to add AccelModels notebooks from AzureMlCli repo
2019-04-29 13:56:06 -04:00
Paula Ledgerwood
0fffa11b2a Update links and code formatting 2019-04-29 10:20:55 -07:00
Paula Ledgerwood
20ec225343 Initial commit to add notebooks from AzureMlCli repo 2019-04-26 11:16:33 -07:00
Roope Astala
845e9d653e Merge pull request #342 from rastala/master
dockerfile 1.0.33
2019-04-26 14:01:55 -04:00
Roope Astala
639ef81636 dockerfile 1.0.33 2019-04-26 13:57:46 -04:00
Roope Astala
60158bf41a Merge pull request #341 from rastala/master
version 1.0.33
2019-04-26 13:45:47 -04:00
Roope Astala
8dbbb01b8a version 1.0.33 2019-04-26 13:44:15 -04:00
Roope Astala
6e6b2b0c48 Merge pull request #340 from rastala/master
add readme
2019-04-26 09:41:49 -04:00
Roope Astala
85f5721bf8 add readme 2019-04-26 09:40:24 -04:00
Shané Winner
6a7dd741e7 Pixel server added 2019-04-23 13:48:23 -07:00
Shané Winner
314218fc89 Added pixel server 2019-04-23 13:47:06 -07:00
Shané Winner
b50d2725c7 Added pixel server 2019-04-23 13:46:06 -07:00
Shané Winner
9a2f448792 Added pixel server 2019-04-23 13:45:05 -07:00
Shané Winner
dd620f19fd Pixel server added 2019-04-23 13:43:41 -07:00
Shané Winner
8116d31da4 Pixel Server added 2019-04-23 13:40:26 -07:00
Shané Winner
ef29dc1fa5 Added Pixel Server 2019-04-23 13:39:18 -07:00
Shané Winner
97b345cb33 Implemented Pixel Server 2019-04-23 13:37:41 -07:00
Shané Winner
282250e670 Implementing Pixel Server 2019-04-23 13:36:24 -07:00
Shané Winner
acef60c5b3 Testing pixel web app 2019-04-23 13:15:04 -07:00
Shané Winner
bfb444eb15 Testing Pixel Tracker 2019-04-23 13:07:48 -07:00
Shané Winner
6277659bf2 Testing Pixel Server 2019-04-23 11:48:55 -07:00
Shané Winner
1645e12712 Testing Tracking Pixel 2019-04-23 11:15:53 -07:00
Roope Astala
cc4a32e70b Merge pull request #337 from jeff-shepherd/master
Updated automl_setup scripts
2019-04-23 13:50:09 -04:00
Jeff Shepherd
997a35aed5 Updated automl_setup scripts 2019-04-23 10:40:33 -07:00
Roope Astala
dd6317a4a0 Merge pull request #336 from rastala/master
adding work-with-data
2019-04-23 10:05:08 -04:00
Roope Astala
82d8353d54 adding work-with-data 2019-04-23 10:04:32 -04:00
Shané Winner
59a01c17a0 Testing the pixel tracker 2019-04-22 14:45:09 -07:00
Shané Winner
e31e1d9af3 Implemented a test pixel tracker 2019-04-22 14:41:32 -07:00
Roope Astala
d38b9db255 Merge pull request #334 from rastala/master
docker update
2019-04-22 15:43:28 -04:00
Roope Astala
761ad88c93 docker update 2019-04-22 15:43:02 -04:00
Roope Astala
644729e5db Merge pull request #333 from rastala/master
version 1.0.30
2019-04-22 15:40:11 -04:00
Roope Astala
e2b1b3fcaa version 1.0.30 2019-04-22 15:39:18 -04:00
Roope Astala
dc692589a9 Merge pull request #326 from rastala/master
update aks notebook
2019-04-18 16:19:51 -04:00
Roope Astala
624b4595b5 update aks notebook 2019-04-18 16:18:33 -04:00
Roope Astala
0ed85c33c2 Delete release.json 2019-04-18 10:01:50 -04:00
Roope Astala
5b01de605f Merge pull request #318 from savitamittal1/hdinotebook
Sample HDI notebook
2019-04-18 10:01:26 -04:00
Savitam
c351ac988a Sample HDI notebook
sample HDI notebook
2019-04-15 12:35:34 -07:00
Josée Martens
759ec3934c Delete yt_cover.png 2019-04-15 12:06:25 -05:00
Josée Martens
b499b88a85 Delete python36.png 2019-04-15 12:06:16 -05:00
Josée Martens
5f4edac3c1 Update NBSETUP.md 2019-04-15 12:00:31 -05:00
Josée Martens
edfce0d936 Update README.md 2019-04-12 17:28:16 -05:00
Josée Martens
1516c7fc24 Update README.md
testing for search
2019-04-12 17:19:55 -05:00
Roope Astala
389fb668ce Add files via upload 2019-04-10 11:12:55 -04:00
Josée Martens
647d5e72a5 Merge pull request #307 from Azure/vizhur-patch-2
Create googled8147fb6c0788258.html
2019-04-09 15:21:51 -05:00
vizhur
43ac4c84bb Create googled8147fb6c0788258.html 2019-04-09 16:19:47 -04:00
Roope Astala
8a1a82b50a Merge pull request #303 from rastala/master
dockerfile and missing config update
2019-04-08 15:38:13 -04:00
Roope Astala
72f386298c dockerfile and missing config update 2019-04-08 15:37:48 -04:00
Roope Astala
41d697e298 Merge pull request #302 from rastala/master
version 1.0.23
2019-04-08 15:35:50 -04:00
Roope Astala
c3ce932029 version 1.0.23 2019-04-08 15:34:51 -04:00
Roope Astala
a956162114 Merge pull request #290 from rastala/master
update aks deployment notebook
2019-04-03 10:53:51 -04:00
Roope Astala
cb5a178e40 Merge branch 'master' of github.com:rastala/MachineLearningNotebooks 2019-04-03 10:52:40 -04:00
Roope Astala
d81c336c59 update production deploy to aks 2019-04-03 10:52:15 -04:00
Roope Astala
4244a24d81 Merge pull request #287 from jeff-shepherd/master
Fixed line termination on automl_setup_linux.sh
2019-04-03 09:21:35 -04:00
Jeff Shepherd
3b488555e5 Added back automl_setup_linux.sh with correct line termination 2019-04-02 16:24:05 -07:00
Jeff Shepherd
6abc478f33 Removed automl_setup_linux.sh 2019-04-02 16:23:11 -07:00
Roope Astala
666c2579eb Merge pull request #285 from jeff-shepherd/master
Corrected line termination for automl_setup_mac.sh
2019-04-02 09:19:53 -04:00
Jeff Shepherd
5af3aa4231 Fixed line termination 2019-04-01 16:19:00 -07:00
Jeff Shepherd
e48d828ab0 Removed automl_setup_mac.sh 2019-04-01 16:17:56 -07:00
Jeff Shepherd
44aa636c21 Merge branch 'master' of https://github.com/Azure/MachineLearningNotebooks 2019-04-01 16:07:11 -07:00
Jeff Shepherd
4678f9adc3 Merge branch 'master' of https://github.com/jeff-shepherd/MachineLearningNotebooks 2019-04-01 16:04:46 -07:00
Jeff Shepherd
5bf85edade Added automl_setup_mac.sh with correct line termination 2019-04-01 16:03:39 -07:00
Jeff Shepherd
94f381e884 Removed automl_setup_mac.sh 2019-04-01 16:02:53 -07:00
Roope Astala
ea1b7599c3 Merge pull request #267 from rastala/master
add automl files
2019-03-25 19:26:07 -04:00
Roope Astala
6b8a6befde add automl files 2019-03-25 19:25:38 -04:00
Roope Astala
c1511b7b74 Merge pull request #266 from rastala/master
1.0.21 dockerfile
2019-03-25 15:10:05 -04:00
Roope Astala
8f007a3333 1.0.21 dockerfile 2019-03-25 15:09:39 -04:00
Roope Astala
5ad3ca00e8 Merge pull request #265 from rastala/master
version 1.0.21
2019-03-25 15:07:09 -04:00
Roope Astala
556a41e223 version 1.0.21 2019-03-25 15:06:08 -04:00
Roope Astala
407b8929d0 Merge pull request #259 from jeff-shepherd/master
Added example of printing model hyperparameters
2019-03-19 09:40:25 -04:00
Jeff Shepherd
18a11bbd8d Added model printing example 2019-03-18 16:31:48 -07:00
Roope Astala
8b439a9f7c Merge pull request #256 from rastala/master
update RAPIDS 2
2019-03-18 12:09:33 -04:00
rastala
75c393a221 update RAPIDS 2 2019-03-18 12:08:10 -04:00
Roope Astala
be7176fe06 Merge pull request #255 from rastala/master
update RAPIDS sample
2019-03-18 11:42:51 -04:00
rastala
7b41675355 update RAPIDS sample 2019-03-18 11:40:43 -04:00
Jeff Shepherd
fa7685f6fa Added example of printing model hyperparameters 2019-03-15 13:18:17 -07:00
Roope Astala
6b444b1467 Merge pull request #248 from rastala/master
dockerfile 1.0.18
2019-03-11 15:33:07 -04:00
Roope Astala
c9767473ae dockerfile 1.0.18 2019-03-11 15:32:30 -04:00
Sheri Gilley
7db93bcb1d update comments 2019-01-22 17:18:19 -06:00
Sheri Gilley
fcbe925640 Merge branch 'sdk-codetest' of https://github.com/Azure/MachineLearningNotebooks into sdk-codetest 2019-01-07 13:06:12 -06:00
Sheri Gilley
bedfbd649e fix files 2019-01-07 13:06:02 -06:00
Sheri Gilley
fb760f648d Delete temp.py 2019-01-07 12:58:32 -06:00
Sheri Gilley
a9a0713d2f Delete donotupload.py 2019-01-07 12:57:58 -06:00
Sheri Gilley
c9d018b52c remove prepare environment 2019-01-07 12:56:54 -06:00
Sheri Gilley
53dbd0afcf hdi run config code 2019-01-07 11:29:40 -06:00
Sheri Gilley
e3a64b1f16 code for remote vm 2019-01-04 12:51:11 -06:00
Sheri Gilley
732eecfc7c update names 2019-01-04 12:45:28 -06:00
Sheri Gilley
6995c086ff change snippet names 2019-01-03 22:39:06 -06:00
Sheri Gilley
80bba4c7ae code for amlcompute section 2019-01-03 18:55:31 -06:00
Sheri Gilley
3c581b533f for local computer 2019-01-03 18:07:12 -06:00
Sheri Gilley
cc688caa4e change names 2019-01-03 08:53:49 -06:00
Sheri Gilley
da225e116e new code 2019-01-03 08:02:35 -06:00
Sheri Gilley
73c5d02880 Update quickstart.py 2018-12-17 12:23:03 -06:00
Sheri Gilley
e472b54f1b Update quickstart.py 2018-12-17 12:22:40 -06:00
Sheri Gilley
716c6d8bb1 add quickstart code 2018-11-06 11:27:58 -06:00
Sheri Gilley
23189c6f40 move folder 2018-10-17 16:24:46 -05:00
Sheri Gilley
361b57ed29 change all names to camelCase 2018-10-17 11:47:09 -05:00
Sheri Gilley
3f531fd211 try camelCase 2018-10-17 11:09:46 -05:00
Sheri Gilley
111f5e8d73 playing around 2018-10-17 10:46:33 -05:00
Sheri Gilley
96c59d5c2b testing 2018-10-17 09:56:04 -05:00
Sheri Gilley
ce3214b7c6 fix name 2018-10-16 17:33:24 -05:00
Sheri Gilley
53199d17de add delete 2018-10-16 16:54:08 -05:00
Sheri Gilley
54c883412c add test service 2018-10-16 16:49:41 -05:00
711 changed files with 106940 additions and 71143 deletions

9
CODE_OF_CONDUCT.md Normal file
View File

@@ -0,0 +1,9 @@
# Microsoft Open Source Code of Conduct
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
Resources:
- [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/)
- [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
- Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns

View File

@@ -0,0 +1,29 @@
FROM continuumio/miniconda:4.5.11
# install git
RUN apt-get update && apt-get upgrade -y && apt-get install -y git
# create a new conda environment named azureml
RUN conda create -n azureml -y -q Python=3.6
# install additional packages used by sample notebooks. this is optional
RUN ["/bin/bash", "-c", "source activate azureml && conda install -y tqdm cython matplotlib scikit-learn"]
# install azurmel-sdk components
RUN ["/bin/bash", "-c", "source activate azureml && pip install azureml-sdk[notebooks]==1.0.18"]
# clone Azure ML GitHub sample notebooks
RUN cd /home && git clone -b "azureml-sdk-1.0.18" --single-branch https://github.com/Azure/MachineLearningNotebooks.git
# generate jupyter configuration file
RUN ["/bin/bash", "-c", "source activate azureml && mkdir ~/.jupyter && cd ~/.jupyter && jupyter notebook --generate-config"]
# set an emtpy token for Jupyter to remove authentication.
# this is NOT recommended for production environment
RUN echo "c.NotebookApp.token = ''" >> ~/.jupyter/jupyter_notebook_config.py
# open up port 8887 on the container
EXPOSE 8887
# start Jupyter notebook server on port 8887 when the container starts
CMD /bin/bash -c "cd /home/MachineLearningNotebooks && source activate azureml && jupyter notebook --port 8887 --no-browser --ip 0.0.0.0 --allow-root"

View File

@@ -0,0 +1,29 @@
FROM continuumio/miniconda:4.5.11
# install git
RUN apt-get update && apt-get upgrade -y && apt-get install -y git
# create a new conda environment named azureml
RUN conda create -n azureml -y -q Python=3.6
# install additional packages used by sample notebooks. this is optional
RUN ["/bin/bash", "-c", "source activate azureml && conda install -y tqdm cython matplotlib scikit-learn"]
# install azurmel-sdk components
RUN ["/bin/bash", "-c", "source activate azureml && pip install azureml-sdk[notebooks]==1.0.21"]
# clone Azure ML GitHub sample notebooks
RUN cd /home && git clone -b "azureml-sdk-1.0.21" --single-branch https://github.com/Azure/MachineLearningNotebooks.git
# generate jupyter configuration file
RUN ["/bin/bash", "-c", "source activate azureml && mkdir ~/.jupyter && cd ~/.jupyter && jupyter notebook --generate-config"]
# set an emtpy token for Jupyter to remove authentication.
# this is NOT recommended for production environment
RUN echo "c.NotebookApp.token = ''" >> ~/.jupyter/jupyter_notebook_config.py
# open up port 8887 on the container
EXPOSE 8887
# start Jupyter notebook server on port 8887 when the container starts
CMD /bin/bash -c "cd /home/MachineLearningNotebooks && source activate azureml && jupyter notebook --port 8887 --no-browser --ip 0.0.0.0 --allow-root"

View File

@@ -0,0 +1,29 @@
FROM continuumio/miniconda:4.5.11
# install git
RUN apt-get update && apt-get upgrade -y && apt-get install -y git
# create a new conda environment named azureml
RUN conda create -n azureml -y -q Python=3.6
# install additional packages used by sample notebooks. this is optional
RUN ["/bin/bash", "-c", "source activate azureml && conda install -y tqdm cython matplotlib scikit-learn"]
# install azurmel-sdk components
RUN ["/bin/bash", "-c", "source activate azureml && pip install azureml-sdk[notebooks]==1.0.23"]
# clone Azure ML GitHub sample notebooks
RUN cd /home && git clone -b "azureml-sdk-1.0.23" --single-branch https://github.com/Azure/MachineLearningNotebooks.git
# generate jupyter configuration file
RUN ["/bin/bash", "-c", "source activate azureml && mkdir ~/.jupyter && cd ~/.jupyter && jupyter notebook --generate-config"]
# set an emtpy token for Jupyter to remove authentication.
# this is NOT recommended for production environment
RUN echo "c.NotebookApp.token = ''" >> ~/.jupyter/jupyter_notebook_config.py
# open up port 8887 on the container
EXPOSE 8887
# start Jupyter notebook server on port 8887 when the container starts
CMD /bin/bash -c "cd /home/MachineLearningNotebooks && source activate azureml && jupyter notebook --port 8887 --no-browser --ip 0.0.0.0 --allow-root"

View File

@@ -0,0 +1,29 @@
FROM continuumio/miniconda:4.5.11
# install git
RUN apt-get update && apt-get upgrade -y && apt-get install -y git
# create a new conda environment named azureml
RUN conda create -n azureml -y -q Python=3.6
# install additional packages used by sample notebooks. this is optional
RUN ["/bin/bash", "-c", "source activate azureml && conda install -y tqdm cython matplotlib scikit-learn"]
# install azurmel-sdk components
RUN ["/bin/bash", "-c", "source activate azureml && pip install azureml-sdk[notebooks]==1.0.30"]
# clone Azure ML GitHub sample notebooks
RUN cd /home && git clone -b "azureml-sdk-1.0.30" --single-branch https://github.com/Azure/MachineLearningNotebooks.git
# generate jupyter configuration file
RUN ["/bin/bash", "-c", "source activate azureml && mkdir ~/.jupyter && cd ~/.jupyter && jupyter notebook --generate-config"]
# set an emtpy token for Jupyter to remove authentication.
# this is NOT recommended for production environment
RUN echo "c.NotebookApp.token = ''" >> ~/.jupyter/jupyter_notebook_config.py
# open up port 8887 on the container
EXPOSE 8887
# start Jupyter notebook server on port 8887 when the container starts
CMD /bin/bash -c "cd /home/MachineLearningNotebooks && source activate azureml && jupyter notebook --port 8887 --no-browser --ip 0.0.0.0 --allow-root"

View File

@@ -0,0 +1,29 @@
FROM continuumio/miniconda:4.5.11
# install git
RUN apt-get update && apt-get upgrade -y && apt-get install -y git
# create a new conda environment named azureml
RUN conda create -n azureml -y -q Python=3.6
# install additional packages used by sample notebooks. this is optional
RUN ["/bin/bash", "-c", "source activate azureml && conda install -y tqdm cython matplotlib scikit-learn"]
# install azurmel-sdk components
RUN ["/bin/bash", "-c", "source activate azureml && pip install azureml-sdk[notebooks]==1.0.33"]
# clone Azure ML GitHub sample notebooks
RUN cd /home && git clone -b "azureml-sdk-1.0.33" --single-branch https://github.com/Azure/MachineLearningNotebooks.git
# generate jupyter configuration file
RUN ["/bin/bash", "-c", "source activate azureml && mkdir ~/.jupyter && cd ~/.jupyter && jupyter notebook --generate-config"]
# set an emtpy token for Jupyter to remove authentication.
# this is NOT recommended for production environment
RUN echo "c.NotebookApp.token = ''" >> ~/.jupyter/jupyter_notebook_config.py
# open up port 8887 on the container
EXPOSE 8887
# start Jupyter notebook server on port 8887 when the container starts
CMD /bin/bash -c "cd /home/MachineLearningNotebooks && source activate azureml && jupyter notebook --port 8887 --no-browser --ip 0.0.0.0 --allow-root"

View File

@@ -0,0 +1,29 @@
FROM continuumio/miniconda:4.5.11
# install git
RUN apt-get update && apt-get upgrade -y && apt-get install -y git
# create a new conda environment named azureml
RUN conda create -n azureml -y -q Python=3.6
# install additional packages used by sample notebooks. this is optional
RUN ["/bin/bash", "-c", "source activate azureml && conda install -y tqdm cython matplotlib scikit-learn"]
# install azurmel-sdk components
RUN ["/bin/bash", "-c", "source activate azureml && pip install azureml-sdk[notebooks]==1.0.41"]
# clone Azure ML GitHub sample notebooks
RUN cd /home && git clone -b "azureml-sdk-1.0.41" --single-branch https://github.com/Azure/MachineLearningNotebooks.git
# generate jupyter configuration file
RUN ["/bin/bash", "-c", "source activate azureml && mkdir ~/.jupyter && cd ~/.jupyter && jupyter notebook --generate-config"]
# set an emtpy token for Jupyter to remove authentication.
# this is NOT recommended for production environment
RUN echo "c.NotebookApp.token = ''" >> ~/.jupyter/jupyter_notebook_config.py
# open up port 8887 on the container
EXPOSE 8887
# start Jupyter notebook server on port 8887 when the container starts
CMD /bin/bash -c "cd /home/MachineLearningNotebooks && source activate azureml && jupyter notebook --port 8887 --no-browser --ip 0.0.0.0 --allow-root"

View File

@@ -0,0 +1,29 @@
FROM continuumio/miniconda:4.5.11
# install git
RUN apt-get update && apt-get upgrade -y && apt-get install -y git
# create a new conda environment named azureml
RUN conda create -n azureml -y -q Python=3.6
# install additional packages used by sample notebooks. this is optional
RUN ["/bin/bash", "-c", "source activate azureml && conda install -y tqdm cython matplotlib scikit-learn"]
# install azurmel-sdk components
RUN ["/bin/bash", "-c", "source activate azureml && pip install azureml-sdk[notebooks]==1.0.43"]
# clone Azure ML GitHub sample notebooks
RUN cd /home && git clone -b "azureml-sdk-1.0.43" --single-branch https://github.com/Azure/MachineLearningNotebooks.git
# generate jupyter configuration file
RUN ["/bin/bash", "-c", "source activate azureml && mkdir ~/.jupyter && cd ~/.jupyter && jupyter notebook --generate-config"]
# set an emtpy token for Jupyter to remove authentication.
# this is NOT recommended for production environment
RUN echo "c.NotebookApp.token = ''" >> ~/.jupyter/jupyter_notebook_config.py
# open up port 8887 on the container
EXPOSE 8887
# start Jupyter notebook server on port 8887 when the container starts
CMD /bin/bash -c "cd /home/MachineLearningNotebooks && source activate azureml && jupyter notebook --port 8887 --no-browser --ip 0.0.0.0 --allow-root"

View File

@@ -1,3 +1,4 @@
This software is made available to you on the condition that you agree to This software is made available to you on the condition that you agree to
[your agreement][1] governing your use of Azure. [your agreement][1] governing your use of Azure.
If you do not have an existing agreement governing your use of Azure, you agree that If you do not have an existing agreement governing your use of Azure, you agree that

View File

@@ -1,6 +1,4 @@
# Setting up environment # Set up your notebook environment for Azure Machine Learning
---
To run the notebooks in this repository use one of following options. To run the notebooks in this repository use one of following options.
@@ -12,9 +10,7 @@ Azure Notebooks is a hosted Jupyter-based notebook service in the Azure cloud. A
1. Follow the instructions in the [Configuration](configuration.ipynb) notebook to create and connect to a workspace 1. Follow the instructions in the [Configuration](configuration.ipynb) notebook to create and connect to a workspace
1. Open one of the sample notebooks 1. Open one of the sample notebooks
**Make sure the Azure Notebook kernel is set to `Python 3.6`** when you open a notebook **Make sure the Azure Notebook kernel is set to `Python 3.6`** when you open a notebook by choosing Kernel > Change Kernel > Python 3.6 from the menus.
![set kernel to Python 3.6](images/python36.png)
## **Option 2: Use your own notebook server** ## **Option 2: Use your own notebook server**
@@ -28,14 +24,11 @@ pip install azureml-sdk
git clone https://github.com/Azure/MachineLearningNotebooks.git git clone https://github.com/Azure/MachineLearningNotebooks.git
# below steps are optional # below steps are optional
# install the base SDK and a Jupyter notebook server # install the base SDK, Jupyter notebook server and tensorboard
pip install azureml-sdk[notebooks] pip install azureml-sdk[notebooks,tensorboard]
# install the data prep component
pip install azureml-dataprep
# install model explainability component # install model explainability component
pip install azureml-sdk[explain] pip install azureml-sdk[interpret]
# install automated ml components # install automated ml components
pip install azureml-sdk[automl] pip install azureml-sdk[automl]
@@ -58,8 +51,7 @@ Please make sure you start with the [Configuration](configuration.ipynb) noteboo
### Video walkthrough: ### Video walkthrough:
[![Get Started video](images/yt_cover.png)](https://youtu.be/VIsXeTuW3FU) [!VIDEO https://youtu.be/VIsXeTuW3FU]
## **Option 3: Use Docker** ## **Option 3: Use Docker**
@@ -90,14 +82,11 @@ Now you can point your browser to http://localhost:8887. We recommend that you s
If you need additional Azure ML SDK components, you can either modify the Docker files before you build the Docker images to add additional steps, or install them through command line in the live container after you build the Docker image. For example: If you need additional Azure ML SDK components, you can either modify the Docker files before you build the Docker images to add additional steps, or install them through command line in the live container after you build the Docker image. For example:
```sh ```sh
# install dataprep components
pip install azureml-dataprep
# install the core SDK and automated ml components # install the core SDK and automated ml components
pip install azureml-sdk[automl] pip install azureml-sdk[automl]
# install the core SDK and model explainability component # install the core SDK and model explainability component
pip install azureml-sdk[explain] pip install azureml-sdk[interpret]
# install the core SDK and experimental components # install the core SDK and experimental components
pip install azureml-sdk[contrib] pip install azureml-sdk[contrib]

View File

@@ -1,56 +1,43 @@
# Azure Machine Learning service example notebooks # Azure Machine Learning Python SDK notebooks
This repository contains example notebooks demonstrating the [Azure Machine Learning](https://azure.microsoft.com/en-us/services/machine-learning-service/) Python SDK which allows you to build, train, deploy and manage machine learning solutions using Azure. The AML SDK allows you the choice of using local or cloud compute resources, while managing and maintaining the complete data science workflow from the cloud. > a community-driven repository of examples using mlflow for tracking can be found at https://github.com/Azure/azureml-examples
![Azure ML workflow](https://raw.githubusercontent.com/MicrosoftDocs/azure-docs/master/articles/machine-learning/service/media/overview-what-is-azure-ml/aml.png) Welcome to the Azure Machine Learning Python SDK notebooks repository!
## Getting started
These notebooks are recommended for use in an Azure Machine Learning [Compute Instance](https://docs.microsoft.com/azure/machine-learning/concept-compute-instance), where you can run them without any additional set up.
However, the notebooks can be run in any development environment with the correct `azureml` packages installed.
Install the `azureml.core` Python package:
## Quick installation
```sh ```sh
pip install azureml-sdk pip install azureml-core
``` ```
Read more detailed instructions on [how to set up your environment](./NBSETUP.md) using Azure Notebook service, your own Jupyter notebook server, or Docker.
## How to navigate and use the example notebooks? Install additional packages as needed:
You should always run the [Configuration](./configuration.ipynb) notebook first when setting up a notebook library on a new machine or in a new environment. It configures your notebook library to connect to an Azure Machine Learning workspace, and sets up your workspace and compute to be used by many of the other examples.
If you want to... ```sh
pip install azureml-mlflow
pip install azureml-dataset-runtime
pip install azureml-automl-runtime
pip install azureml-pipeline
pip install azureml-pipeline-steps
...
```
* ...try out and explore Azure ML, start with image classification tutorials: [Part 1 (Training)](./tutorials/img-classification-part1-training.ipynb) and [Part 2 (Deployment)](./tutorials/img-classification-part2-deploy.ipynb). We recommend starting with one of the [quickstarts](tutorials/compute-instance-quickstarts).
* ...prepare your data and do automated machine learning, start with regression tutorials: [Part 1 (Data Prep)](./tutorials/regression-part1-data-prep.ipynb) and [Part 2 (Automated ML)](./tutorials/regression-part2-automated-ml.ipynb).
* ...learn about experimentation and tracking run history, first [train within Notebook](./how-to-use-azureml/training/train-within-notebook/train-within-notebook.ipynb), then try [training on remote VM](./how-to-use-azureml/training/train-on-remote-vm/train-on-remote-vm.ipynb) and [using logging APIs](./how-to-use-azureml/training/logging-api/logging-api.ipynb).
* ...train deep learning models at scale, first learn about [Machine Learning Compute](./how-to-use-azureml/training/train-on-amlcompute/train-on-amlcompute.ipynb), and then try [distributed hyperparameter tuning](./how-to-use-azureml/training-with-deep-learning/train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.ipynb) and [distributed training](./how-to-use-azureml/training-with-deep-learning/distributed-pytorch-with-horovod/distributed-pytorch-with-horovod.ipynb).
* ...deploy models as a realtime scoring service, first learn the basics by [training within Notebook and deploying to Azure Container Instance](./how-to-use-azureml/training/train-within-notebook/train-within-notebook.ipynb), then learn how to [register and manage models, and create Docker images](./how-to-use-azureml/deployment/register-model-create-image-deploy-service/register-model-create-image-deploy-service.ipynb), and [production deploy models on Azure Kubernetes Cluster](./how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks.ipynb).
* ...deploy models as a batch scoring service, first [train a model within Notebook](./how-to-use-azureml/training/train-within-notebook/train-within-notebook.ipynb), learn how to [register and manage models](./how-to-use-azureml/deployment/register-model-create-image-deploy-service/register-model-create-image-deploy-service.ipynb), then [create Machine Learning Compute for scoring compute](./how-to-use-azureml/training/train-on-amlcompute/train-on-amlcompute.ipynb), and [use Machine Learning Pipelines to deploy your model](./how-to-use-azureml/machine-learning-pipelines/pipeline-mpi-batch-prediction.ipynb).
* ...monitor your deployed models, learn about using [App Insights](./how-to-use-azureml/deployment/enable-app-insights-in-production-service/enable-app-insights-in-production-service.ipynb) and [model data collection](./how-to-use-azureml/deployment/enable-data-collection-for-models-in-aks/enable-data-collection-for-models-in-aks.ipynb).
## Tutorials ## Contributing
The [Tutorials](./tutorials) folder contains notebooks for the tutorials described in the [Azure Machine Learning documentation](https://aka.ms/aml-docs). This repository is a push-only mirror. Pull requests are ignored.
## How to use Azure ML ## Code of Conduct
The [How to use Azure ML](./how-to-use-azureml) folder contains specific examples demonstrating the features of the Azure Machine Learning SDK This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). Please see the [code of conduct](CODE_OF_CONDUCT.md) for details.
- [Training](./how-to-use-azureml/training) - Examples of how to build models using Azure ML's logging and execution capabilities on local and remote compute targets ## Reference
- [Training with Deep Learning](./how-to-use-azureml/training-with-deep-learning) - Examples demonstrating how to build deep learning models using estimators and parameter sweeps
- [Manage Azure ML Service](./how-to-use-azureml/manage-azureml-service) - Examples how to perform tasks, such as authenticate against Azure ML service in different ways.
- [Automated Machine Learning](./how-to-use-azureml/automated-machine-learning) - Examples using Automated Machine Learning to automatically generate optimal machine learning pipelines and models
- [Machine Learning Pipelines](./how-to-use-azureml/machine-learning-pipelines) - Examples showing how to create and use reusable pipelines for training and batch scoring
- [Deployment](./how-to-use-azureml/deployment) - Examples showing how to deploy and manage machine learning models and solutions
- [Azure Databricks](./how-to-use-azureml/azure-databricks) - Examples showing how to use Azure ML with Azure Databricks
--- - [Documentation](https://docs.microsoft.com/azure/machine-learning)
## Documentation
* Quickstarts, end-to-end tutorials, and how-tos on the [official documentation site for Azure Machine Learning service](https://docs.microsoft.com/en-us/azure/machine-learning/service/).
* [Python SDK reference](https://docs.microsoft.com/en-us/python/api/overview/azure/ml/intro?view=azure-ml-py)
* Azure ML Data Prep SDK [overview](https://aka.ms/data-prep-sdk), [Python SDK reference](https://aka.ms/aml-data-prep-apiref), and [tutorials and how-tos](https://aka.ms/aml-data-prep-notebooks).
---
## Projects using Azure Machine Learning
Visit following repos to see projects contributed by Azure ML users:
- [Fine tune natural language processing models using Azure Machine Learning service](https://github.com/Microsoft/AzureML-BERT)
- [Fashion MNIST with Azure ML SDK](https://github.com/amynic/azureml-sdk-fashion)

View File

@@ -9,6 +9,13 @@
"Licensed under the MIT License." "Licensed under the MIT License."
] ]
}, },
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/configuration.png)"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@@ -51,7 +58,7 @@
"\n", "\n",
"### What is an Azure Machine Learning workspace\n", "### What is an Azure Machine Learning workspace\n",
"\n", "\n",
"An Azure ML Workspace is an Azure resource that organizes and coordinates the actions of many other Azure resources to assist in executing and sharing machine learning workflows. In particular, an Azure ML Workspace coordinates storage, databases, and compute resources providing added functionality for machine learning experimentation, deployment, inferencing, and the monitoring of deployed models." "An Azure ML Workspace is an Azure resource that organizes and coordinates the actions of many other Azure resources to assist in executing and sharing machine learning workflows. In particular, an Azure ML Workspace coordinates storage, databases, and compute resources providing added functionality for machine learning experimentation, deployment, inference, and the monitoring of deployed models."
] ]
}, },
{ {
@@ -96,7 +103,7 @@
"source": [ "source": [
"import azureml.core\n", "import azureml.core\n",
"\n", "\n",
"print(\"This notebook was created using version 1.0.18 of the Azure ML SDK\")\n", "print(\"This notebook was created using version 1.40.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
] ]
}, },
@@ -207,7 +214,10 @@
"* You do not have permission to create a resource group if it's non-existing.\n", "* You do not have permission to create a resource group if it's non-existing.\n",
"* You are not a subscription owner or contributor and no Azure ML workspaces have ever been created in this subscription\n", "* You are not a subscription owner or contributor and no Azure ML workspaces have ever been created in this subscription\n",
"\n", "\n",
"If workspace creation fails, please work with your IT admin to provide you with the appropriate permissions or to provision the required resources." "If workspace creation fails, please work with your IT admin to provide you with the appropriate permissions or to provision the required resources.\n",
"\n",
"**Note**: A Basic workspace is created by default. If you would like to create an Enterprise workspace, please specify sku = 'enterprise'.\n",
"Please visit our [pricing page](https://azure.microsoft.com/en-us/pricing/details/machine-learning/) for more details on our Enterprise edition.\n"
] ]
}, },
{ {
@@ -228,6 +238,7 @@
" resource_group = resource_group, \n", " resource_group = resource_group, \n",
" location = workspace_region,\n", " location = workspace_region,\n",
" create_resource_group = True,\n", " create_resource_group = True,\n",
" sku = 'basic',\n",
" exist_ok = True)\n", " exist_ok = True)\n",
"ws.get_details()\n", "ws.get_details()\n",
"\n", "\n",
@@ -243,6 +254,8 @@
"\n", "\n",
"Many of the sample notebooks use Azure ML managed compute (AmlCompute) to train models using a dynamically scalable pool of compute. In this section you will create default compute clusters for use by the other notebooks and any other operations you choose.\n", "Many of the sample notebooks use Azure ML managed compute (AmlCompute) to train models using a dynamically scalable pool of compute. In this section you will create default compute clusters for use by the other notebooks and any other operations you choose.\n",
"\n", "\n",
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
"\n",
"To create a cluster, you need to specify a compute configuration that specifies the type of machine to be used and the scalability behaviors. Then you choose a name for the cluster that is unique within the workspace that can be used to address the cluster later.\n", "To create a cluster, you need to specify a compute configuration that specifies the type of machine to be used and the scalability behaviors. Then you choose a name for the cluster that is unique within the workspace that can be used to address the cluster later.\n",
"\n", "\n",
"The cluster parameters are:\n", "The cluster parameters are:\n",
@@ -251,7 +264,7 @@
"```shell\n", "```shell\n",
"az vm list-skus -o tsv\n", "az vm list-skus -o tsv\n",
"```\n", "```\n",
"* min_nodes - this sets the minimum size of the cluster. If you set the minimum to 0 the cluster will shut down all nodes while note in use. Setting this number to a value higher than 0 will allow for faster start-up times, but you will also be billed when the cluster is not in use.\n", "* min_nodes - this sets the minimum size of the cluster. If you set the minimum to 0 the cluster will shut down all nodes while not in use. Setting this number to a value higher than 0 will allow for faster start-up times, but you will also be billed when the cluster is not in use.\n",
"* max_nodes - this sets the maximum size of the cluster. Setting this to a larger number allows for more concurrency and a greater distributed processing of scale-out jobs.\n", "* max_nodes - this sets the maximum size of the cluster. Setting this to a larger number allows for more concurrency and a greater distributed processing of scale-out jobs.\n",
"\n", "\n",
"\n", "\n",
@@ -268,14 +281,14 @@
"from azureml.core.compute_target import ComputeTargetException\n", "from azureml.core.compute_target import ComputeTargetException\n",
"\n", "\n",
"# Choose a name for your CPU cluster\n", "# Choose a name for your CPU cluster\n",
"cpu_cluster_name = \"cpucluster\"\n", "cpu_cluster_name = \"cpu-cluster\"\n",
"\n", "\n",
"# Verify that cluster does not exist already\n", "# Verify that cluster does not exist already\n",
"try:\n", "try:\n",
" cpu_cluster = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n", " cpu_cluster = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
" print(\"Found existing cpucluster\")\n", " print(\"Found existing cpu-cluster\")\n",
"except ComputeTargetException:\n", "except ComputeTargetException:\n",
" print(\"Creating new cpucluster\")\n", " print(\"Creating new cpu-cluster\")\n",
" \n", " \n",
" # Specify the configuration for the new cluster\n", " # Specify the configuration for the new cluster\n",
" compute_config = AmlCompute.provisioning_configuration(vm_size=\"STANDARD_D2_V2\",\n", " compute_config = AmlCompute.provisioning_configuration(vm_size=\"STANDARD_D2_V2\",\n",
@@ -306,14 +319,14 @@
"from azureml.core.compute_target import ComputeTargetException\n", "from azureml.core.compute_target import ComputeTargetException\n",
"\n", "\n",
"# Choose a name for your GPU cluster\n", "# Choose a name for your GPU cluster\n",
"gpu_cluster_name = \"gpucluster\"\n", "gpu_cluster_name = \"gpu-cluster\"\n",
"\n", "\n",
"# Verify that cluster does not exist already\n", "# Verify that cluster does not exist already\n",
"try:\n", "try:\n",
" gpu_cluster = ComputeTarget(workspace=ws, name=gpu_cluster_name)\n", " gpu_cluster = ComputeTarget(workspace=ws, name=gpu_cluster_name)\n",
" print(\"Found existing gpu cluster\")\n", " print(\"Found existing gpu cluster\")\n",
"except ComputeTargetException:\n", "except ComputeTargetException:\n",
" print(\"Creating new gpucluster\")\n", " print(\"Creating new gpu-cluster\")\n",
" \n", " \n",
" # Specify the configuration for the new cluster\n", " # Specify the configuration for the new cluster\n",
" compute_config = AmlCompute.provisioning_configuration(vm_size=\"STANDARD_NC6\",\n", " compute_config = AmlCompute.provisioning_configuration(vm_size=\"STANDARD_NC6\",\n",
@@ -336,7 +349,7 @@
"\n", "\n",
"In this notebook you configured this notebook library to connect easily to an Azure ML workspace. You can copy this notebook to your own libraries to connect them to you workspace, or use it to bootstrap new workspaces completely.\n", "In this notebook you configured this notebook library to connect easily to an Azure ML workspace. You can copy this notebook to your own libraries to connect them to you workspace, or use it to bootstrap new workspaces completely.\n",
"\n", "\n",
"If you came here from another notebook, you can return there and complete that exercise, or you can try out the [Tutorials](./tutorials) or jump into \"how-to\" notebooks and start creating and deploying models. A good place to start is the [train in notebook](./how-to-use-azureml/training/train-in-notebook) example that walks through a simplified but complete end to end machine learning process." "If you came here from another notebook, you can return there and complete that exercise, or you can try out the [Tutorials](./tutorials) or jump into \"how-to\" notebooks and start creating and deploying models. A good place to start is the [train within notebook](./how-to-use-azureml/training/train-within-notebook) example that walks through a simplified but complete end to end machine learning process."
] ]
}, },
{ {
@@ -350,7 +363,7 @@
"metadata": { "metadata": {
"authors": [ "authors": [
{ {
"name": "roastala" "name": "ninhu"
} }
], ],
"kernelspec": { "kernelspec": {

4
configuration.yml Normal file
View File

@@ -0,0 +1,4 @@
name: configuration
dependencies:
- pip:
- azureml-sdk

305
contrib/RAPIDS/README.md Normal file
View File

@@ -0,0 +1,305 @@
## How to use the RAPIDS on AzureML materials
### Setting up requirements
The material requires the use of the Azure ML SDK and of the Jupyter Notebook Server to run the interactive execution. Please refer to instructions to [setup the environment.](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-environment#local "Local Computer Set Up") Follow the instructions under **Local Computer**, make sure to run the last step: <span style="font-family: Courier New;">pip install \<new package\></span> with <span style="font-family: Courier New;">new package = progressbar2 (pip install progressbar2)</span>
After following the directions, the user should end up setting a conda environment (<span style="font-family: Courier New;">myenv</span>)that can be activated in an Anaconda prompt
The user would also require an Azure Subscription with a Machine Learning Services quota on the desired region for 24 nodes or more (to be able to select a vmSize with 4 GPUs as it is used on the Notebook) on the desired VM family ([NC\_v3](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu#ncv3-series), [NC\_v2](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu#ncv2-series), [ND](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu#nd-series) or [ND_v2](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu#ndv2-series-preview)), the specific vmSize to be used within the chosen family would also need to be whitelisted for Machine Learning Services usage.
&nbsp;
### Getting and running the material
Clone the AzureML Notebooks repository in GitHub by running the following command on a local_directory:
* C:\local_directory>git clone https://github.com/Azure/MachineLearningNotebooks.git
On a conda prompt navigate to the local directory, activate the conda environment (<span style="font-family: Courier New;">myenv</span>), where the Azure ML SDK was installed and launch Jupyter Notebook.
* (<span style="font-family: Courier New;">myenv</span>) C:\local_directory>jupyter notebook
From the resulting browser at http://localhost:8888/tree, navigate to the master notebook:
* http://localhost:8888/tree/MachineLearningNotebooks/contrib/RAPIDS/azure-ml-with-nvidia-rapids.ipynb
&nbsp;
The following notebook will appear:
![](imgs/NotebookHome.png)
&nbsp;
### Master Jupyter Notebook
The notebook can be executed interactively step by step, by pressing the Run button (In a red circle in the above image.)
The first couple of functional steps import the necessary AzureML libraries. If you experience any errors please refer back to the [setup the environment.](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-environment#local "Local Computer Set Up") instructions.
&nbsp;
#### Setting up a Workspace
The following step gathers the information necessary to set up a workspace to execute the RAPIDS script. This needs to be done only once, or not at all if you already have a workspace you can use set up on the Azure Portal:
![](imgs/WorkSpaceSetUp.png)
It is important to be sure to set the correct values for the subscription\_id, resource\_group, workspace\_name, and region before executing the step. An example is:
subscription_id = os.environ.get("SUBSCRIPTION_ID", "1358e503-xxxx-4043-xxxx-65b83xxxx32d")
resource_group = os.environ.get("RESOURCE_GROUP", "AML-Rapids-Testing")
workspace_name = os.environ.get("WORKSPACE_NAME", "AML_Rapids_Tester")
workspace_region = os.environ.get("WORKSPACE_REGION", "West US 2")
&nbsp;
The resource\_group and workspace_name could take any value, the region should match the region for which the subscription has the required Machine Learning Services node quota.
The first time the code is executed it will redirect to the Azure Portal to validate subscription credentials. After the workspace is created, its related information is stored on a local file so that this step can be subsequently skipped. The immediate step will just load the saved workspace
![](imgs/saved_workspace.png)
Once a workspace has been created the user could skip its creation and just jump to this step. The configuration file resides in:
* C:\local_directory\\MachineLearningNotebooks\contrib\RAPIDS\aml_config\config.json
&nbsp;
#### Creating an AML Compute Target
Following step, creates an AML Compute Target
![](imgs/target_creation.png)
Parameter vm\_size on function call AmlCompute.provisioning\_configuration() has to be a member of the VM families ([NC\_v3](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu#ncv3-series), [NC\_v2](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu#ncv2-series), [ND](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu#nd-series) or [ND_v2](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu#ndv2-series-preview)) that are the ones provided with P40 or V100 GPUs, that are the ones supported by RAPIDS. In this particular case an Standard\_NC24s\_V2 was used.
&nbsp;
If the output of running the step has an error of the form:
![](imgs/targeterror1.png)
It is an indication that even though the subscription has a node quota for VMs for that family, it does not have a node quota for Machine Learning Services for that family.
You will need to request an increase node quota for that family in that region for **Machine Learning Services**.
&nbsp;
Another possible error is the following:
![](imgs/targeterror2.png)
Which indicates that specified vmSize has not been whitelisted for usage on Machine Learning Services and a request to do so should be filled.
The successful creation of the compute target would have an output like the following:
![](imgs/targetsuccess.png)
&nbsp;
#### RAPIDS script uploading and viewing
The next step copies the RAPIDS script process_data.py, which is a slightly modified implementation of the [RAPIDS E2E example](https://github.com/rapidsai/notebooks/blob/master/mortgage/E2E.ipynb), into a script processing folder and it presents its contents to the user. (The script is discussed in the next section in detail).
If the user wants to use a different RAPIDS script, the references to the <span style="font-family: Courier New;">process_data.py</span> script have to be changed
![](imgs/scriptuploading.png)
&nbsp;
#### Data Uploading
The RAPIDS script loads and extracts features from the Fannie Maes Mortgage Dataset to train an XGBoost prediction model. The script uses two years of data
The next few steps download and decompress the data and is made available to the script as an [Azure Machine Learning Datastore](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-access-data).
&nbsp;
The following functions are used to download and decompress the input data
![](imgs/dcf1.png)
![](imgs/dcf2.png)
![](imgs/dcf3.png)
![](imgs/dcf4.png)
&nbsp;
The next step uses those functions to download locally file:
http://rapidsai-data.s3-website.us-east-2.amazonaws.com/notebook-mortgage-data/mortgage_2000-2001.tgz'
And to decompress it, into local folder path = .\mortgage_2000-2001
The step takes several minutes, the intermediate outputs provide progress indicators.
![](imgs/downamddecom.png)
&nbsp;
The decompressed data should have the following structure:
* .\mortgage_2000-2001\acq\Acquisition_<year>Q<num>.txt
* .\mortgage_2000-2001\perf\Performance_<year>Q<num>.txt
* .\mortgage_2000-2001\names.csv
The data is divided in partitions that roughly correspond to yearly quarters. RAPIDS includes support for multi-node, multi-GPU deployments, enabling scaling up and out on much larger dataset sizes. The user will be able to verify that the number of partitions that the script is able to process increases with the number of GPUs used. The RAPIDS script is implemented for single-machine scenarios. An example supporting multiple nodes will be published later.
&nbsp;
The next step upload the data into the [Azure Machine Learning Datastore](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-access-data) under reference <span style="font-family: Courier New;">fileroot = mortgage_2000-2001</span>
The step takes several minutes to load the data, the output provides a progress indicator.
![](imgs/datastore.png)
Once the data has been loaded into the Azure Machine LEarning Data Store, in subsequent run, the user can comment out the ds.upload line and just make reference to the <span style="font-family: Courier New;">mortgage_2000-2001</blog> data store reference
&nbsp;
#### Setting up required libraries and environment to run RAPIDS code
There are two options to setup the environment to run RAPIDS code. The following steps shows how to ues a prebuilt conda environment. A recommended alternative is to specify a base Docker image and package dependencies. You can find sample code for that in the notebook.
![](imgs/install2.png)
&nbsp;
#### Wrapper function to submit the RAPIDS script as an Azure Machine Learning experiment
The next step consists of the definition of a wrapper function to be used when the user attempts to run the RAPIDS script with different arguments. It takes as arguments: <span style="font-family: Times New Roman;">*cpu\_training*</span>; a flag that indicates if the run is meant to be processed with CPU-only, <span style="font-family: Times New Roman;">*gpu\_count*</span>; the number of GPUs to be used if they are meant to be used and part_count: the number of data partitions to be used
![](imgs/wrapper.png)
&nbsp;
The core of the function resides in configuring the run by the instantiation of a ScriptRunConfig object, which defines the source_directory for the script to be executed, the name of the script and the arguments to be passed to the script.
In addition to the wrapper function arguments, two other arguments are passed: <span style="font-family: Times New Roman;">*data\_dir*</span>, the directory where the data is stored and <span style="font-family: Times New Roman;">*end_year*</span> is the largest year to use partition from.
As mentioned earlier the size of the data that can be processed increases with the number of gpus, in the function, dictionary <span style="font-family: Times New Roman;">*max\_gpu\_count\_data\_partition_mapping*</span> maps the maximum number of partitions that we empirically found that the system can handle given the number of GPUs used. The function throws a warning when the number of partitions for a given number of gpus exceeds the maximum but the script is still executed, however the user should expect an error as an out of memory situation would be encountered
If the user wants to use a different RAPIDS script, the reference to the process_data.py script has to be changed
&nbsp;
#### Submitting Experiments
We are ready to submit experiments: launching the RAPIDS script with different sets of parameters.
&nbsp;
The following couple of steps submit experiments under different conditions.
![](imgs/submission1.png)
&nbsp;
The user can change variable num\_gpu between one and the number of GPUs supported by the chosen vmSize. Variable part\_count can take any value between 1 and 11, but if it exceeds the maximum for num_gpu, the run would result in an error
&nbsp;
If the experiment is successfully submitted, it would be placed on a queue for processing, its status would appeared as Queued and an output like the following would appear
![](imgs/queue.png)
&nbsp;
When the experiment starts running, its status would appeared as Running and the output would change to something like this:
![](imgs/running.png)
&nbsp;
#### Reproducing the performance gains plot results on the Blog Post
When the run has finished successfully, its status would appeared as Completed and the output would change to something like this:
&nbsp;
![](imgs/completed.png)
Which is the output for an experiment run with three partitions and one GPU, notice that the reported processing time is 49.16 seconds just as depicted on the performance gains plot on the blog post
&nbsp;
![](imgs/2GPUs.png)
This output corresponds to a run with three partitions and two GPUs, notice that the reported processing time is 37.50 seconds just as depicted on the performance gains plot on the blog post
&nbsp;
![](imgs/3GPUs.png)
This output corresponds to an experiment run with three partitions and three GPUs, notice that the reported processing time is 24.40 seconds just as depicted on the performance gains plot on the blog post
&nbsp;
![](imgs/4gpus.png)
This output corresponds to an experiment run with three partitions and four GPUs, notice that the reported processing time is 23.33 seconds just as depicted on the performance gains plot on the blogpost
&nbsp;
![](imgs/CPUBase.png)
This output corresponds to an experiment run with three partitions and using only CPU, notice that the reported processing time is 9 minutes and 1.21 seconds or 541.21 second just as depicted on the performance gains plot on the blog post
&nbsp;
![](imgs/OOM.png)
This output corresponds to an experiment run with nine partitions and four GPUs, notice that the notebook throws a warning signaling that the number of partitions exceed the maximum that the system can handle with those many GPUs and the run ends up failing, hence having and status of Failed.
&nbsp;
##### Freeing Resources
In the last step the notebook deletes the compute target. (This step is optional especially if the min_nodes in the cluster is set to 0 with which the cluster will scale down to 0 nodes when there is no usage.)
![](imgs/clusterdelete.png)
&nbsp;
### RAPIDS Script
The Master Notebook runs experiments by launching a RAPIDS script with different sets of parameters. In this section, the RAPIDS script, process_data.py in the material, is analyzed
The script first imports all the necessary libraries and parses the arguments passed by the Master Notebook.
The all internal functions to be used by the script are defined.
&nbsp;
#### Wrapper Auxiliary Functions:
The below functions are wrappers for a configuration module for librmm, the RAPIDS Memory Manager python interface:
![](imgs/wap1.png)![](imgs/wap2.png)
&nbsp;
A couple of other functions are wrappers for the submission of jobs to the DASK client:
![](imgs/wap3.png)
![](imgs/wap4.png)
&nbsp;
#### Data Loading Functions:
The data is loaded through the use of the following three functions
![](imgs/DLF1.png)![](imgs/DLF2.png)![](imgs/DLF3.png)
All three functions use library function cudf.read_csv(), cuDF version for the well known counterpart on Pandas.
&nbsp;
#### Data Transformation and Feature Extraction Functions:
The raw data is transformed and processed to extract features by joining, slicing, grouping, aggregating, factoring, etc, the original dataframes just as is done with Pandas. The following functions in the script are used for that purpose:
![](imgs/fef1.png)![](imgs/fef2.png)![](imgs/fef3.png)![](imgs/fef4.png)![](imgs/fef5.png)
![](imgs/fef6.png)![](imgs/fef7.png)![](imgs/fef8.png)![](imgs/fef9.png)
&nbsp;
#### Main() Function
The previous functions are used in the Main function to accomplish several steps: Set up the Dask client, do all ETL operations, set up and train an XGBoost model, the function also assigns which data needs to be processed by each Dask client
&nbsp;
##### Setting Up DASK client:
The following lines:
![](imgs/daskini.png)
&nbsp;
Initialize and set up a DASK client with a number of workers corresponding to the number of GPUs to be used on the run. A successful execution of the set up will result on the following output:
![](imgs/daskoutput.png)
##### All ETL functions are used on single calls to process\_quarter_gpu, one per data partition
![](imgs/ETL.png)
&nbsp;
##### Concentrating the data assigned to each DASK worker
The partitions assigned to each worker are concatenated and set up for training.
![](imgs/Dask2.png)
&nbsp;
##### Setting Training Parameters
The parameters used for the training of a gradient boosted decision tree model are set up in the following code block:
![](imgs/PArameters.png)
Notice how the parameters are modified when using the CPU-only mode.
&nbsp;
##### Launching the training of a gradient boosted decision tree model using XGBoost.
![](imgs/training.png)
The outputs of the script can be observed in the master notebook as the script is executed

View File

@@ -9,6 +9,13 @@
"Licensed under the MIT License." "Licensed under the MIT License."
] ]
}, },
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/contrib/RAPIDS/azure-ml-with-nvidia-rapids/azure-ml-with-nvidia-rapids.png)"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@@ -20,7 +27,7 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"The [RAPIDS](https://www.developer.nvidia.com/rapids) suite of software libraries from NVIDIA enables the execution of end-to-end data science and analytics pipelines entirely on GPUs. In many machine learning projects, a significant portion of the model training time is spent in setting up the data; this stage of the process is known as Extraction, Transformation and Loading, or ETL. By using the DataFrame API for ETL\u00c2\u00a0and GPU-capable ML algorithms in RAPIDS, data preparation and training models can be done in GPU-accelerated end-to-end pipelines without incurring serialization costs between the pipeline stages. This notebook demonstrates how to use NVIDIA RAPIDS to prepare data and train model\u00c2\u00a0in Azure.\n", "The [RAPIDS](https://www.developer.nvidia.com/rapids) suite of software libraries from NVIDIA enables the execution of end-to-end data science and analytics pipelines entirely on GPUs. In many machine learning projects, a significant portion of the model training time is spent in setting up the data; this stage of the process is known as Extraction, Transformation and Loading, or ETL. By using the DataFrame API for ETL\u00c2\u00a0and GPU-capable ML algorithms in RAPIDS, data preparation and training models can be done in GPU-accelerated end-to-end pipelines without incurring serialization costs between the pipeline stages. This notebook demonstrates how to use NVIDIA RAPIDS to prepare data and train model\u00c3\u201a\u00c2\u00a0in Azure.\n",
" \n", " \n",
"In this notebook, we will do the following:\n", "In this notebook, we will do the following:\n",
" \n", " \n",
@@ -62,6 +69,7 @@
"source": [ "source": [
"import os\n", "import os\n",
"from azureml.core import Workspace, Experiment\n", "from azureml.core import Workspace, Experiment\n",
"from azureml.core.conda_dependencies import CondaDependencies\n",
"from azureml.core.compute import AmlCompute, ComputeTarget\n", "from azureml.core.compute import AmlCompute, ComputeTarget\n",
"from azureml.data.data_reference import DataReference\n", "from azureml.data.data_reference import DataReference\n",
"from azureml.core.runconfig import RunConfiguration\n", "from azureml.core.runconfig import RunConfiguration\n",
@@ -118,8 +126,10 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"ws = Workspace.from_config()\n", "ws = Workspace.from_config()\n",
"\n",
"# if a locally-saved configuration file for the workspace is not available, use the following to load workspace\n", "# if a locally-saved configuration file for the workspace is not available, use the following to load workspace\n",
"# ws = Workspace(subscription_id=subscription_id, resource_group=resource_group, workspace_name=workspace_name)\n", "# ws = Workspace(subscription_id=subscription_id, resource_group=resource_group, workspace_name=workspace_name)\n",
"\n",
"print('Workspace name: ' + ws.name, \n", "print('Workspace name: ' + ws.name, \n",
" 'Azure region: ' + ws.location, \n", " 'Azure region: ' + ws.location, \n",
" 'Subscription id: ' + ws.subscription_id, \n", " 'Subscription id: ' + ws.subscription_id, \n",
@@ -160,7 +170,7 @@
"if gpu_cluster_name in ws.compute_targets:\n", "if gpu_cluster_name in ws.compute_targets:\n",
" gpu_cluster = ws.compute_targets[gpu_cluster_name]\n", " gpu_cluster = ws.compute_targets[gpu_cluster_name]\n",
" if gpu_cluster and type(gpu_cluster) is AmlCompute:\n", " if gpu_cluster and type(gpu_cluster) is AmlCompute:\n",
" print('found compute target. just use it. ' + gpu_cluster_name)\n", " print('Found compute target. Will use {0} '.format(gpu_cluster_name))\n",
"else:\n", "else:\n",
" print(\"creating new cluster\")\n", " print(\"creating new cluster\")\n",
" # vm_size parameter below could be modified to one of the RAPIDS-supported VM types\n", " # vm_size parameter below could be modified to one of the RAPIDS-supported VM types\n",
@@ -178,13 +188,6 @@
"### Script to process data and train model" "### Script to process data and train model"
] ]
}, },
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The _process&#95;data.py_ script used in the step below is a slightly modified implementation of [RAPIDS E2E example](https://github.com/rapidsai/notebooks/blob/master/mortgage/E2E.ipynb)."
]
},
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
@@ -193,10 +196,7 @@
"source": [ "source": [
"# copy process_data.py into the script folder\n", "# copy process_data.py into the script folder\n",
"import shutil\n", "import shutil\n",
"shutil.copy('./process_data.py', os.path.join(scripts_folder, 'process_data.py'))\n", "shutil.copy('./process_data.py', os.path.join(scripts_folder, 'process_data.py'))"
"\n",
"with open(os.path.join(scripts_folder, './process_data.py'), 'r') as process_data_script:\n",
" print(process_data_script.read())"
] ]
}, },
{ {
@@ -210,21 +210,97 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"This sample uses [Fannie Mae\u00e2\u20ac\u2122s Single-Family Loan Performance Data](http://www.fanniemae.com/portal/funding-the-market/data/loan-performance-data.html). Refer to the 'Available mortgage datasets' section in [instructions](https://rapidsai.github.io/demos/datasets/mortgage-data) to get sample data.\n", "This sample uses [Fannie Mae's Single-Family Loan Performance Data](http://www.fanniemae.com/portal/funding-the-market/data/loan-performance-data.html). Once you obtain access to the data, you will need to make this data available in an [Azure Machine Learning Datastore](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-access-data), for use in this sample. The following code shows how to do that."
"\n",
"Once you obtain access to the data, you will need to make this data available in an [Azure Machine Learning Datastore](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-access-data), for use in this sample."
] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"<font color='red'>Important</font>: The following step assumes the data is uploaded to the Workspace's default data store under a folder named 'mortgagedata2000_01'. Note that uploading data to the Workspace's default data store is not necessary and the data can be referenced from any datastore, e.g., from Azure Blob or File service, once it is added as a datastore to the workspace. The path_on_datastore parameter needs to be updated, depending on where the data is available. The directory where the data is available should have the following folder structure, as the process_data.py script expects this directory structure:\n", "### Downloading Data"
"* _&lt;data directory>_/acq\n", ]
"* _&lt;data directory>_/perf\n", },
"* _names.csv_\n", {
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import tarfile\n",
"import hashlib\n",
"from urllib.request import urlretrieve\n",
"\n", "\n",
"The 'acq' and 'perf' refer to directories containing data files. The _&lt;data directory>_ is the path specified in _path&#95;on&#95;datastore_ parameter in the step below." "def validate_downloaded_data(path):\n",
" if(os.path.isdir(path) and os.path.exists(path + '//names.csv')) :\n",
" if(os.path.isdir(path + '//acq' ) and len(os.listdir(path + '//acq')) == 8):\n",
" if(os.path.isdir(path + '//perf' ) and len(os.listdir(path + '//perf')) == 11):\n",
" print(\"Data has been downloaded and decompressed at: {0}\".format(path))\n",
" return True\n",
" print(\"Data has not been downloaded and decompressed\")\n",
" return False\n",
"\n",
"def show_progress(count, block_size, total_size):\n",
" global pbar\n",
" global processed\n",
" \n",
" if count == 0:\n",
" pbar = ProgressBar(maxval=total_size)\n",
" processed = 0\n",
" \n",
" processed += block_size\n",
" processed = min(processed,total_size)\n",
" pbar.update(processed)\n",
"\n",
" \n",
"def download_file(fileroot):\n",
" filename = fileroot + '.tgz'\n",
" if(not os.path.exists(filename) or hashlib.md5(open(filename, 'rb').read()).hexdigest() != '82dd47135053303e9526c2d5c43befd5' ):\n",
" url_format = 'http://rapidsai-data.s3-website.us-east-2.amazonaws.com/notebook-mortgage-data/{0}.tgz'\n",
" url = url_format.format(fileroot)\n",
" print(\"...Downloading file :{0}\".format(filename))\n",
" urlretrieve(url, filename)\n",
" pbar.finish()\n",
" print(\"...File :{0} finished downloading\".format(filename))\n",
" else:\n",
" print(\"...File :{0} has been downloaded already\".format(filename))\n",
" return filename\n",
"\n",
"def decompress_file(filename,path):\n",
" tar = tarfile.open(filename)\n",
" print(\"...Getting information from {0} about files to decompress\".format(filename))\n",
" members = tar.getmembers()\n",
" numFiles = len(members)\n",
" so_far = 0\n",
" for member_info in members:\n",
" tar.extract(member_info,path=path)\n",
" so_far += 1\n",
" print(\"...All {0} files have been decompressed\".format(numFiles))\n",
" tar.close()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"fileroot = 'mortgage_2000-2001'\n",
"path = '.\\\\{0}'.format(fileroot)\n",
"pbar = None\n",
"processed = 0\n",
"\n",
"if(not validate_downloaded_data(path)):\n",
" print(\"Downloading and Decompressing Input Data\")\n",
" filename = download_file(fileroot)\n",
" decompress_file(filename,path)\n",
" print(\"Input Data has been Downloaded and Decompressed\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Uploading Data to Workspace"
] ]
}, },
{ {
@@ -237,10 +313,12 @@
"\n", "\n",
"# download and uncompress data in a local directory before uploading to data store\n", "# download and uncompress data in a local directory before uploading to data store\n",
"# directory specified in src_dir parameter below should have the acq, perf directories with data and names.csv file\n", "# directory specified in src_dir parameter below should have the acq, perf directories with data and names.csv file\n",
"# ds.upload(src_dir='<local directory that has data>', target_path='mortgagedata2000_01', overwrite=True, show_progress=True)\n", "\n",
"# ---->>>> UNCOMMENT THE BELOW LINE TO UPLOAD YOUR DATA IF NOT DONE SO ALREADY <<<<----\n",
"# ds.upload(src_dir=path, target_path=fileroot, overwrite=True, show_progress=True)\n",
"\n", "\n",
"# data already uploaded to the datastore\n", "# data already uploaded to the datastore\n",
"data_ref = DataReference(data_reference_name='data', datastore=ds, path_on_datastore='mortgagedata2000_01')" "data_ref = DataReference(data_reference_name='data', datastore=ds, path_on_datastore=fileroot)"
] ]
}, },
{ {
@@ -254,7 +332,26 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"AML allows the option of using existing Docker images with prebuilt conda environments. The following step use an existing image from [Docker Hub](https://hub.docker.com/r/rapidsai/rapidsai/)." "RunConfiguration is used to submit jobs to Azure Machine Learning service. When creating RunConfiguration for a job, users can either \n",
"1. specify a Docker image with prebuilt conda environment and use it without any modifications to run the job, or \n",
"2. specify a Docker image as the base image and conda or pip packages as dependnecies to let AML build a new Docker image with a conda environment containing specified dependencies to use in the job\n",
"\n",
"The second option is the recommended option in AML. \n",
"The following steps have code for both options. You can pick the one that is more appropriate for your requirements. "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Specify prebuilt conda environment"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The following code shows how to install RAPIDS using conda. The `rapids.yml` file contains the list of packages necessary to run this tutorial. **NOTE:** Initial build of the image might take up to 20 minutes as the service needs to build and cache the new image; once the image is built the subequent runs use the cached image and the overhead is minimal."
] ]
}, },
{ {
@@ -263,21 +360,52 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"run_config = RunConfiguration()\n", "cd = CondaDependencies(conda_dependencies_file_path='rapids.yml')\n",
"run_config = RunConfiguration(conda_dependencies=cd)\n",
"run_config.framework = 'python'\n", "run_config.framework = 'python'\n",
"run_config.environment.python.user_managed_dependencies = True\n",
"# use conda environment named 'rapids' available in the Docker image\n",
"# this conda environment does not include azureml-defaults package that is required for using AML functionality like metrics tracking, model management etc.\n",
"run_config.environment.python.interpreter_path = '/conda/envs/rapids/bin/python'\n",
"run_config.target = gpu_cluster_name\n", "run_config.target = gpu_cluster_name\n",
"run_config.environment.docker.enabled = True\n", "run_config.environment.docker.enabled = True\n",
"run_config.environment.docker.gpu_support = True\n", "run_config.environment.docker.gpu_support = True\n",
"# if registry is not mentioned the image is pulled from Docker Hub\n", "run_config.environment.docker.base_image = \"mcr.microsoft.com/azureml/openmpi4.1.0-cuda11.1-cudnn8-ubuntu20.04\"\n",
"run_config.environment.docker.base_image = \"rapidsai/rapidsai:cuda9.2_ubuntu16.04_root\"\n",
"run_config.environment.spark.precache_packages = False\n", "run_config.environment.spark.precache_packages = False\n",
"run_config.data_references={'data':data_ref.to_config()}" "run_config.data_references={'data':data_ref.to_config()}"
] ]
}, },
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Using Docker"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Alternatively, you can specify RAPIDS Docker image."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# run_config = RunConfiguration()\n",
"# run_config.framework = 'python'\n",
"# run_config.environment.python.user_managed_dependencies = True\n",
"# run_config.environment.python.interpreter_path = '/conda/envs/rapids/bin/python'\n",
"# run_config.target = gpu_cluster_name\n",
"# run_config.environment.docker.enabled = True\n",
"# run_config.environment.docker.gpu_support = True\n",
"# run_config.environment.docker.base_image = \"rapidsai/rapidsai:cuda9.2-runtime-ubuntu18.04\"\n",
"# # run_config.environment.docker.base_image_registry.address = '<registry_url>' # not required if the base_image is in Docker hub\n",
"# # run_config.environment.docker.base_image_registry.username = '<user_name>' # needed only for private images\n",
"# # run_config.environment.docker.base_image_registry.password = '<password>' # needed only for private images\n",
"# run_config.environment.spark.precache_packages = False\n",
"# run_config.data_references={'data':data_ref.to_config()}"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@@ -293,17 +421,24 @@
"source": [ "source": [
"# parameter cpu_predictor indicates if training should be done on CPU. If set to true, GPUs are used *only* for ETL and *not* for training\n", "# parameter cpu_predictor indicates if training should be done on CPU. If set to true, GPUs are used *only* for ETL and *not* for training\n",
"# parameter num_gpu indicates number of GPUs to use among the GPUs available in the VM for ETL and if cpu_predictor is false, for training as well \n", "# parameter num_gpu indicates number of GPUs to use among the GPUs available in the VM for ETL and if cpu_predictor is false, for training as well \n",
"def run_rapids_experiment(cpu_training, gpu_count):\n", "def run_rapids_experiment(cpu_training, gpu_count, part_count):\n",
" # any value between 1-4 is allowed here depending the type of VMs available in gpu_cluster\n", " # any value between 1-4 is allowed here depending the type of VMs available in gpu_cluster\n",
" if gpu_count not in [1, 2, 3, 4]:\n", " if gpu_count not in [1, 2, 3, 4]:\n",
" raise Exception('Value specified for the number of GPUs to use {0} is invalid'.format(gpu_count))\n", " raise Exception('Value specified for the number of GPUs to use {0} is invalid'.format(gpu_count))\n",
"\n", "\n",
" # following data partition mapping is empirical (specific to GPUs used and current data partitioning scheme) and may need to be tweaked\n", " # following data partition mapping is empirical (specific to GPUs used and current data partitioning scheme) and may need to be tweaked\n",
" gpu_count_data_partition_mapping = {1: 2, 2: 4, 3: 5, 4: 7}\n", " max_gpu_count_data_partition_mapping = {1: 3, 2: 4, 3: 6, 4: 8}\n",
" part_count = gpu_count_data_partition_mapping[gpu_count]\n", " \n",
"\n", " if part_count > max_gpu_count_data_partition_mapping[gpu_count]:\n",
" print(\"Too many partitions for the number of GPUs, exceeding memory threshold\")\n",
" \n",
" if part_count > 11:\n",
" print(\"Warning: Maximum number of partitions available is 11\")\n",
" part_count = 11\n",
" \n",
" end_year = 2000\n", " end_year = 2000\n",
" if gpu_count > 2:\n", " \n",
" if part_count > 4:\n",
" end_year = 2001 # use more data with more GPUs\n", " end_year = 2001 # use more data with more GPUs\n",
"\n", "\n",
" src = ScriptRunConfig(source_directory=scripts_folder, \n", " src = ScriptRunConfig(source_directory=scripts_folder, \n",
@@ -317,7 +452,8 @@
"\n", "\n",
" exp = Experiment(ws, 'rapidstest')\n", " exp = Experiment(ws, 'rapidstest')\n",
" run = exp.submit(config=src)\n", " run = exp.submit(config=src)\n",
" RunDetails(run).show()" " RunDetails(run).show()\n",
" return run"
] ]
}, },
{ {
@@ -335,9 +471,10 @@
"source": [ "source": [
"cpu_predictor = False\n", "cpu_predictor = False\n",
"# the value for num_gpu should be less than or equal to the number of GPUs available in the VM\n", "# the value for num_gpu should be less than or equal to the number of GPUs available in the VM\n",
"num_gpu = 1 \n", "num_gpu = 1\n",
"data_part_count = 1\n",
"# train using CPU, use GPU for both ETL and training\n", "# train using CPU, use GPU for both ETL and training\n",
"run_rapids_experiment(cpu_predictor, num_gpu)" "run = run_rapids_experiment(cpu_predictor, num_gpu, data_part_count)"
] ]
}, },
{ {
@@ -358,8 +495,9 @@
"cpu_predictor = True\n", "cpu_predictor = True\n",
"# the value for num_gpu should be less than or equal to the number of GPUs available in the VM\n", "# the value for num_gpu should be less than or equal to the number of GPUs available in the VM\n",
"num_gpu = 1\n", "num_gpu = 1\n",
"data_part_count = 1\n",
"# train using CPU, use GPU for ETL\n", "# train using CPU, use GPU for ETL\n",
"run_rapids_experiment(cpu_predictor, num_gpu)" "run = run_rapids_experiment(cpu_predictor, num_gpu, data_part_count)"
] ]
}, },
{ {
@@ -401,9 +539,9 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.6.6" "version": "3.6.8"
} }
}, },
"nbformat": 4, "nbformat": 4,
"nbformat_minor": 2 "nbformat_minor": 4
} }

Binary file not shown.

After

Width:  |  Height:  |  Size: 180 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 183 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 183 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 177 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 70 KiB

BIN
contrib/RAPIDS/imgs/ETL.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 64 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 554 KiB

BIN
contrib/RAPIDS/imgs/OOM.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 213 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 58 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 187 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 163 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 120 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 55 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 52 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 181 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 31 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 10 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 99 KiB

View File

@@ -1,9 +1,9 @@
# License Info: https://github.com/rapidsai/notebooks/blob/master/LICENSE
import numpy as np import numpy as np
import datetime import datetime
import dask_xgboost as dxgb_gpu import dask_xgboost as dxgb_gpu
import dask import dask
import dask_cudf import dask_cudf
from dask_cuda import LocalCUDACluster
from dask.delayed import delayed from dask.delayed import delayed
from dask.distributed import Client, wait from dask.distributed import Client, wait
import xgboost as xgb import xgboost as xgb
@@ -15,81 +15,21 @@ from glob import glob
import os import os
import argparse import argparse
parser = argparse.ArgumentParser("rapidssample")
parser.add_argument("--data_dir", type=str, help="location of data")
parser.add_argument("--num_gpu", type=int, help="Number of GPUs to use", default=1)
parser.add_argument("--part_count", type=int, help="Number of data files to train against", default=2)
parser.add_argument("--end_year", type=int, help="Year to end the data load", default=2000)
parser.add_argument("--cpu_predictor", type=str, help="Flag to use CPU for prediction", default='False')
parser.add_argument('-f', type=str, default='') # added for notebook execution scenarios
args = parser.parse_args()
data_dir = args.data_dir
num_gpu = args.num_gpu
part_count = args.part_count
end_year = args.end_year
cpu_predictor = args.cpu_predictor.lower() in ('yes', 'true', 't', 'y', '1')
print('data_dir = {0}'.format(data_dir))
print('num_gpu = {0}'.format(num_gpu))
print('part_count = {0}'.format(part_count))
part_count = part_count + 1 # adding one because the usage below is not inclusive
print('end_year = {0}'.format(end_year))
print('cpu_predictor = {0}'.format(cpu_predictor))
import subprocess
cmd = "hostname --all-ip-addresses"
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
IPADDR = str(output.decode()).split()[0]
print('IPADDR is {0}'.format(IPADDR))
cmd = "/rapids/notebooks/utils/dask-setup.sh 0"
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
cmd = "/rapids/notebooks/utils/dask-setup.sh rapids " + str(num_gpu) + " 8786 8787 8790 " + str(IPADDR) + " MASTER"
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
print(output.decode())
import dask
from dask.delayed import delayed
from dask.distributed import Client, wait
_client = IPADDR + str(":8786")
client = dask.distributed.Client(_client)
def initialize_rmm_pool():
from librmm_cffi import librmm_config as rmm_cfg
rmm_cfg.use_pool_allocator = True
#rmm_cfg.initial_pool_size = 2<<30 # set to 2GiB. Default is 1/2 total GPU memory
import cudf
return cudf._gdf.rmm_initialize()
def initialize_rmm_no_pool():
from librmm_cffi import librmm_config as rmm_cfg
rmm_cfg.use_pool_allocator = False
import cudf
return cudf._gdf.rmm_initialize()
def run_dask_task(func, **kwargs): def run_dask_task(func, **kwargs):
task = func(**kwargs) task = func(**kwargs)
return task return task
def process_quarter_gpu(year=2000, quarter=1, perf_file=""): def process_quarter_gpu(client, col_names_path, acq_data_path, year=2000, quarter=1, perf_file=""):
dask_client = client
ml_arrays = run_dask_task(delayed(run_gpu_workflow), ml_arrays = run_dask_task(delayed(run_gpu_workflow),
col_path=col_names_path,
acq_path=acq_data_path,
quarter=quarter, quarter=quarter,
year=year, year=year,
perf_file=perf_file) perf_file=perf_file)
return client.compute(ml_arrays, return dask_client.compute(ml_arrays,
optimize_graph=False, optimize_graph=False,
fifo_timeout="0ms" fifo_timeout="0ms")
)
def null_workaround(df, **kwargs): def null_workaround(df, **kwargs):
for column, data_type in df.dtypes.items(): for column, data_type in df.dtypes.items():
@@ -99,9 +39,9 @@ def null_workaround(df, **kwargs):
df[column] = df[column].fillna(-1) df[column] = df[column].fillna(-1)
return df return df
def run_gpu_workflow(quarter=1, year=2000, perf_file="", **kwargs): def run_gpu_workflow(col_path, acq_path, quarter=1, year=2000, perf_file="", **kwargs):
names = gpu_load_names() names = gpu_load_names(col_path=col_path)
acq_gdf = gpu_load_acquisition_csv(acquisition_path= acq_data_path + "/Acquisition_" acq_gdf = gpu_load_acquisition_csv(acquisition_path= acq_path + "/Acquisition_"
+ str(year) + "Q" + str(quarter) + ".txt") + str(year) + "Q" + str(quarter) + ".txt")
acq_gdf = acq_gdf.merge(names, how='left', on=['seller_name']) acq_gdf = acq_gdf.merge(names, how='left', on=['seller_name'])
acq_gdf.drop_column('seller_name') acq_gdf.drop_column('seller_name')
@@ -231,7 +171,7 @@ def gpu_load_acquisition_csv(acquisition_path, **kwargs):
return cudf.read_csv(acquisition_path, names=cols, delimiter='|', dtype=list(dtypes.values()), skiprows=1) return cudf.read_csv(acquisition_path, names=cols, delimiter='|', dtype=list(dtypes.values()), skiprows=1)
def gpu_load_names(**kwargs): def gpu_load_names(col_path):
""" Loads names used for renaming the banks """ Loads names used for renaming the banks
Returns Returns
@@ -248,30 +188,30 @@ def gpu_load_names(**kwargs):
("new", "category"), ("new", "category"),
]) ])
return cudf.read_csv(col_names_path, names=cols, delimiter='|', dtype=list(dtypes.values()), skiprows=1) return cudf.read_csv(col_path, names=cols, delimiter='|', dtype=list(dtypes.values()), skiprows=1)
def create_ever_features(gdf, **kwargs): def create_ever_features(gdf, **kwargs):
everdf = gdf[['loan_id', 'current_loan_delinquency_status']] everdf = gdf[['loan_id', 'current_loan_delinquency_status']]
everdf = everdf.groupby('loan_id', method='hash').max() everdf = everdf.groupby('loan_id', method='hash').max().reset_index()
del(gdf) del(gdf)
everdf['ever_30'] = (everdf['max_current_loan_delinquency_status'] >= 1).astype('int8') everdf['ever_30'] = (everdf['current_loan_delinquency_status'] >= 1).astype('int8')
everdf['ever_90'] = (everdf['max_current_loan_delinquency_status'] >= 3).astype('int8') everdf['ever_90'] = (everdf['current_loan_delinquency_status'] >= 3).astype('int8')
everdf['ever_180'] = (everdf['max_current_loan_delinquency_status'] >= 6).astype('int8') everdf['ever_180'] = (everdf['current_loan_delinquency_status'] >= 6).astype('int8')
everdf.drop_column('max_current_loan_delinquency_status') everdf.drop_column('current_loan_delinquency_status')
return everdf return everdf
def create_delinq_features(gdf, **kwargs): def create_delinq_features(gdf, **kwargs):
delinq_gdf = gdf[['loan_id', 'monthly_reporting_period', 'current_loan_delinquency_status']] delinq_gdf = gdf[['loan_id', 'monthly_reporting_period', 'current_loan_delinquency_status']]
del(gdf) del(gdf)
delinq_30 = delinq_gdf.query('current_loan_delinquency_status >= 1')[['loan_id', 'monthly_reporting_period']].groupby('loan_id', method='hash').min() delinq_30 = delinq_gdf.query('current_loan_delinquency_status >= 1')[['loan_id', 'monthly_reporting_period']].groupby('loan_id', method='hash').min().reset_index()
delinq_30['delinquency_30'] = delinq_30['min_monthly_reporting_period'] delinq_30['delinquency_30'] = delinq_30['monthly_reporting_period']
delinq_30.drop_column('min_monthly_reporting_period') delinq_30.drop_column('monthly_reporting_period')
delinq_90 = delinq_gdf.query('current_loan_delinquency_status >= 3')[['loan_id', 'monthly_reporting_period']].groupby('loan_id', method='hash').min() delinq_90 = delinq_gdf.query('current_loan_delinquency_status >= 3')[['loan_id', 'monthly_reporting_period']].groupby('loan_id', method='hash').min().reset_index()
delinq_90['delinquency_90'] = delinq_90['min_monthly_reporting_period'] delinq_90['delinquency_90'] = delinq_90['monthly_reporting_period']
delinq_90.drop_column('min_monthly_reporting_period') delinq_90.drop_column('monthly_reporting_period')
delinq_180 = delinq_gdf.query('current_loan_delinquency_status >= 6')[['loan_id', 'monthly_reporting_period']].groupby('loan_id', method='hash').min() delinq_180 = delinq_gdf.query('current_loan_delinquency_status >= 6')[['loan_id', 'monthly_reporting_period']].groupby('loan_id', method='hash').min().reset_index()
delinq_180['delinquency_180'] = delinq_180['min_monthly_reporting_period'] delinq_180['delinquency_180'] = delinq_180['monthly_reporting_period']
delinq_180.drop_column('min_monthly_reporting_period') delinq_180.drop_column('monthly_reporting_period')
del(delinq_gdf) del(delinq_gdf)
delinq_merge = delinq_30.merge(delinq_90, how='left', on=['loan_id'], type='hash') delinq_merge = delinq_30.merge(delinq_90, how='left', on=['loan_id'], type='hash')
delinq_merge['delinquency_90'] = delinq_merge['delinquency_90'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]')) delinq_merge['delinquency_90'] = delinq_merge['delinquency_90'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]'))
@@ -324,16 +264,15 @@ def create_joined_df(gdf, everdf, **kwargs):
def create_12_mon_features(joined_df, **kwargs): def create_12_mon_features(joined_df, **kwargs):
testdfs = [] testdfs = []
n_months = 12 n_months = 12
for y in range(1, n_months + 1): for y in range(1, n_months + 1):
tmpdf = joined_df[['loan_id', 'timestamp_year', 'timestamp_month', 'delinquency_12', 'upb_12']] tmpdf = joined_df[['loan_id', 'timestamp_year', 'timestamp_month', 'delinquency_12', 'upb_12']]
tmpdf['josh_months'] = tmpdf['timestamp_year'] * 12 + tmpdf['timestamp_month'] tmpdf['josh_months'] = tmpdf['timestamp_year'] * 12 + tmpdf['timestamp_month']
tmpdf['josh_mody_n'] = ((tmpdf['josh_months'].astype('float64') - 24000 - y) / 12).floor() tmpdf['josh_mody_n'] = ((tmpdf['josh_months'].astype('float64') - 24000 - y) / 12).floor()
tmpdf = tmpdf.groupby(['loan_id', 'josh_mody_n'], method='hash').agg({'delinquency_12': 'max','upb_12': 'min'}) tmpdf = tmpdf.groupby(['loan_id', 'josh_mody_n'], method='hash').agg({'delinquency_12': 'max','upb_12': 'min'}).reset_index()
tmpdf['delinquency_12'] = (tmpdf['max_delinquency_12']>3).astype('int32') tmpdf['delinquency_12'] = (tmpdf['delinquency_12']>3).astype('int32')
tmpdf['delinquency_12'] +=(tmpdf['min_upb_12']==0).astype('int32') tmpdf['delinquency_12'] +=(tmpdf['upb_12']==0).astype('int32')
tmpdf.drop_column('max_delinquency_12') tmpdf['upb_12'] = tmpdf['upb_12']
tmpdf['upb_12'] = tmpdf['min_upb_12']
tmpdf.drop_column('min_upb_12')
tmpdf['timestamp_year'] = (((tmpdf['josh_mody_n'] * n_months) + 24000 + (y - 1)) / 12).floor().astype('int16') tmpdf['timestamp_year'] = (((tmpdf['josh_mody_n'] * n_months) + 24000 + (y - 1)) / 12).floor().astype('int16')
tmpdf['timestamp_month'] = np.int8(y) tmpdf['timestamp_month'] = np.int8(y)
tmpdf.drop_column('josh_mody_n') tmpdf.drop_column('josh_mody_n')
@@ -374,6 +313,7 @@ def last_mile_cleaning(df, **kwargs):
'delinquency_30', 'delinquency_90', 'delinquency_180', 'upb_12', 'delinquency_30', 'delinquency_90', 'delinquency_180', 'upb_12',
'zero_balance_effective_date','foreclosed_after', 'disposition_date','timestamp' 'zero_balance_effective_date','foreclosed_after', 'disposition_date','timestamp'
] ]
for column in drop_list: for column in drop_list:
df.drop_column(column) df.drop_column(column)
for col, dtype in df.dtypes.iteritems(): for col, dtype in df.dtypes.iteritems():
@@ -384,32 +324,67 @@ def last_mile_cleaning(df, **kwargs):
df['delinquency_12'] = df['delinquency_12'].fillna(False).astype('int32') df['delinquency_12'] = df['delinquency_12'].fillna(False).astype('int32')
for column in df.columns: for column in df.columns:
df[column] = df[column].fillna(-1) df[column] = df[column].fillna(-1)
return df.to_arrow(index=False) return df.to_arrow(preserve_index=False)
def main():
parser = argparse.ArgumentParser("rapidssample")
parser.add_argument("--data_dir", type=str, help="location of data")
parser.add_argument("--num_gpu", type=int, help="Number of GPUs to use", default=1)
parser.add_argument("--part_count", type=int, help="Number of data files to train against", default=2)
parser.add_argument("--end_year", type=int, help="Year to end the data load", default=2000)
parser.add_argument("--cpu_predictor", type=str, help="Flag to use CPU for prediction", default='False')
parser.add_argument('-f', type=str, default='') # added for notebook execution scenarios
args = parser.parse_args()
data_dir = args.data_dir
num_gpu = args.num_gpu
part_count = args.part_count
end_year = args.end_year
cpu_predictor = args.cpu_predictor.lower() in ('yes', 'true', 't', 'y', '1')
# to download data for this notebook, visit https://rapidsai.github.io/demos/datasets/mortgage-data and update the following paths accordingly if cpu_predictor:
acq_data_path = "{0}/acq".format(data_dir) #"/rapids/data/mortgage/acq" print('Training with CPUs require num gpu = 1')
perf_data_path = "{0}/perf".format(data_dir) #"/rapids/data/mortgage/perf" num_gpu = 1
col_names_path = "{0}/names.csv".format(data_dir) # "/rapids/data/mortgage/names.csv"
start_year = 2000
#end_year = 2000 # end_year is inclusive -- converted to parameter
#part_count = 2 # the number of data files to train against -- converted to parameter
client.run(initialize_rmm_pool) print('data_dir = {0}'.format(data_dir))
print('num_gpu = {0}'.format(num_gpu))
print('part_count = {0}'.format(part_count))
print('end_year = {0}'.format(end_year))
print('cpu_predictor = {0}'.format(cpu_predictor))
# NOTE: The ETL calculates additional features which are then dropped before creating the XGBoost DMatrix. import subprocess
# This can be optimized to avoid calculating the dropped features.
print("Reading ...") cmd = "hostname --all-ip-addresses"
t1 = datetime.datetime.now() process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
gpu_dfs = [] output, error = process.communicate()
gpu_time = 0 IPADDR = str(output.decode()).split()[0]
quarter = 1
year = start_year cluster = LocalCUDACluster(ip=IPADDR,n_workers=num_gpu)
count = 0 client = Client(cluster)
while year <= end_year: client
print(client.ncores())
# to download data for this notebook, visit https://rapidsai.github.io/demos/datasets/mortgage-data and update the following paths accordingly
acq_data_path = "{0}/acq".format(data_dir) #"/rapids/data/mortgage/acq"
perf_data_path = "{0}/perf".format(data_dir) #"/rapids/data/mortgage/perf"
col_names_path = "{0}/names.csv".format(data_dir) # "/rapids/data/mortgage/names.csv"
start_year = 2000
client
print('--->>> Workers used: {0}'.format(client.ncores()))
# NOTE: The ETL calculates additional features which are then dropped before creating the XGBoost DMatrix.
# This can be optimized to avoid calculating the dropped features.
print("Reading ...")
t1 = datetime.datetime.now()
gpu_dfs = []
gpu_time = 0
quarter = 1
year = start_year
count = 0
while year <= end_year:
for file in glob(os.path.join(perf_data_path + "/Performance_" + str(year) + "Q" + str(quarter) + "*")): for file in glob(os.path.join(perf_data_path + "/Performance_" + str(year) + "Q" + str(quarter) + "*")):
if count < part_count: if count < part_count:
gpu_dfs.append(process_quarter_gpu(year=year, quarter=quarter, perf_file=file)) gpu_dfs.append(process_quarter_gpu(client, col_names_path, acq_data_path, year=year, quarter=quarter, perf_file=file))
count += 1 count += 1
print('file: {0}'.format(file)) print('file: {0}'.format(file))
print('count: {0}'.format(count)) print('count: {0}'.format(count))
@@ -418,16 +393,12 @@ while year <= end_year:
year += 1 year += 1
quarter = 1 quarter = 1
wait(gpu_dfs) wait(gpu_dfs)
t2 = datetime.datetime.now() t2 = datetime.datetime.now()
print("Reading time ...") print("Reading time: {0}".format(str(t2-t1)))
print(t2-t1) print('--->>> Number of data parts: {0}'.format(len(gpu_dfs)))
print('len(gpu_dfs) is {0}'.format(len(gpu_dfs)))
client.run(cudf._gdf.rmm_finalize) dxgb_gpu_params = {
client.run(initialize_rmm_no_pool)
dxgb_gpu_params = {
'nround': 100, 'nround': 100,
'max_depth': 8, 'max_depth': 8,
'max_leaves': 2**8, 'max_leaves': 2**8,
@@ -443,58 +414,57 @@ dxgb_gpu_params = {
'n_gpus': 1, 'n_gpus': 1,
'distributed_dask': True, 'distributed_dask': True,
'loss': 'ls', 'loss': 'ls',
'objective': 'gpu:reg:linear', 'objective': 'reg:squarederror',
'max_features': 'auto', 'max_features': 'auto',
'criterion': 'friedman_mse', 'criterion': 'friedman_mse',
'grow_policy': 'lossguide', 'grow_policy': 'lossguide',
'verbose': True 'verbose': True
} }
if cpu_predictor: if cpu_predictor:
print('Training using CPUs') print('\n---->>>> Training using CPUs <<<<----\n')
dxgb_gpu_params['predictor'] = 'cpu_predictor' dxgb_gpu_params['predictor'] = 'cpu_predictor'
dxgb_gpu_params['tree_method'] = 'hist' dxgb_gpu_params['tree_method'] = 'hist'
dxgb_gpu_params['objective'] = 'reg:linear' dxgb_gpu_params['objective'] = 'reg:linear'
else: else:
print('Training using GPUs') print('\n---->>>> Training using GPUs <<<<----\n')
print('Training parameters are {0}'.format(dxgb_gpu_params)) print('Training parameters are {0}'.format(dxgb_gpu_params))
gpu_dfs = [delayed(DataFrame.from_arrow)(gpu_df) for gpu_df in gpu_dfs[:part_count]] gpu_dfs = [delayed(DataFrame.from_arrow)(gpu_df) for gpu_df in gpu_dfs[:part_count]]
gpu_dfs = [gpu_df for gpu_df in gpu_dfs]
wait(gpu_dfs)
gpu_dfs = [gpu_df for gpu_df in gpu_dfs] tmp_map = [(gpu_df, list(client.who_has(gpu_df).values())[0]) for gpu_df in gpu_dfs]
new_map = {}
wait(gpu_dfs) for key, value in tmp_map:
tmp_map = [(gpu_df, list(client.who_has(gpu_df).values())[0]) for gpu_df in gpu_dfs]
new_map = {}
for key, value in tmp_map:
if value not in new_map: if value not in new_map:
new_map[value] = [key] new_map[value] = [key]
else: else:
new_map[value].append(key) new_map[value].append(key)
del(tmp_map) del(tmp_map)
gpu_dfs = [] gpu_dfs = []
for list_delayed in new_map.values(): for list_delayed in new_map.values():
gpu_dfs.append(delayed(cudf.concat)(list_delayed)) gpu_dfs.append(delayed(cudf.concat)(list_delayed))
del(new_map) del(new_map)
gpu_dfs = [(gpu_df[['delinquency_12']], gpu_df[delayed(list)(gpu_df.columns.difference(['delinquency_12']))]) for gpu_df in gpu_dfs] gpu_dfs = [(gpu_df[['delinquency_12']], gpu_df[delayed(list)(gpu_df.columns.difference(['delinquency_12']))]) for gpu_df in gpu_dfs]
gpu_dfs = [(gpu_df[0].persist(), gpu_df[1].persist()) for gpu_df in gpu_dfs] gpu_dfs = [(gpu_df[0].persist(), gpu_df[1].persist()) for gpu_df in gpu_dfs]
gpu_dfs = [dask.delayed(xgb.DMatrix)(gpu_df[1], gpu_df[0]) for gpu_df in gpu_dfs]
gpu_dfs = [gpu_df.persist() for gpu_df in gpu_dfs]
gc.collect() gpu_dfs = [dask.delayed(xgb.DMatrix)(gpu_df[1], gpu_df[0]) for gpu_df in gpu_dfs]
labels = None gpu_dfs = [gpu_df.persist() for gpu_df in gpu_dfs]
gc.collect()
wait(gpu_dfs)
print('str(gpu_dfs) is {0}'.format(str(gpu_dfs))) # TRAIN THE MODEL
labels = None
t1 = datetime.datetime.now()
bst = dxgb_gpu.train(client, dxgb_gpu_params, gpu_dfs, labels, num_boost_round=dxgb_gpu_params['nround'])
t2 = datetime.datetime.now()
print('\n---->>>> Training time: {0} <<<<----\n'.format(str(t2-t1)))
print('Exiting script')
wait(gpu_dfs) if __name__ == '__main__':
t1 = datetime.datetime.now() main()
bst = dxgb_gpu.train(client, dxgb_gpu_params, gpu_dfs, labels, num_boost_round=dxgb_gpu_params['nround'])
t2 = datetime.datetime.now()
print("Training time ...")
print(t2-t1)
print('str(bst) is {0}'.format(str(bst)))
print('Exiting script')

View File

@@ -0,0 +1,621 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved. \n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/contrib/fairness/fairlearn-azureml-mitigation.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Unfairness Mitigation with Fairlearn and Azure Machine Learning\n",
"**This notebook shows how to upload results from Fairlearn's GridSearch mitigation algorithm into a dashboard in Azure Machine Learning Studio**\n",
"\n",
"## Table of Contents\n",
"\n",
"1. [Introduction](#Introduction)\n",
"1. [Loading the Data](#LoadingData)\n",
"1. [Training an Unmitigated Model](#UnmitigatedModel)\n",
"1. [Mitigation with GridSearch](#Mitigation)\n",
"1. [Uploading a Fairness Dashboard to Azure](#AzureUpload)\n",
" 1. Registering models\n",
" 1. Computing Fairness Metrics\n",
" 1. Uploading to Azure\n",
"1. [Conclusion](#Conclusion)\n",
"\n",
"<a id=\"Introduction\"></a>\n",
"## Introduction\n",
"This notebook shows how to use [Fairlearn (an open source fairness assessment and unfairness mitigation package)](http://fairlearn.org) and Azure Machine Learning Studio for a binary classification problem. This example uses the well-known adult census dataset. For the purposes of this notebook, we shall treat this as a loan decision problem. We will pretend that the label indicates whether or not each individual repaid a loan in the past. We will use the data to train a predictor to predict whether previously unseen individuals will repay a loan or not. The assumption is that the model predictions are used to decide whether an individual should be offered a loan. Its purpose is purely illustrative of a workflow including a fairness dashboard - in particular, we do **not** include a full discussion of the detailed issues which arise when considering fairness in machine learning. For such discussions, please [refer to the Fairlearn website](http://fairlearn.org/).\n",
"\n",
"We will apply the [grid search algorithm](https://fairlearn.org/v0.4.6/api_reference/fairlearn.reductions.html#fairlearn.reductions.GridSearch) from the Fairlearn package using a specific notion of fairness called Demographic Parity. This produces a set of models, and we will view these in a dashboard both locally and in the Azure Machine Learning Studio.\n",
"\n",
"### Setup\n",
"\n",
"To use this notebook, an Azure Machine Learning workspace is required.\n",
"Please see the [configuration notebook](../../configuration.ipynb) for information about creating one, if required.\n",
"This notebook also requires the following packages:\n",
"* `azureml-contrib-fairness`\n",
"* `fairlearn>=0.6.2` (pre-v0.5.0 will work with minor modifications)\n",
"* `joblib`\n",
"* `liac-arff`\n",
"* `raiwidgets`\n",
"\n",
"Fairlearn relies on features introduced in v0.22.1 of `scikit-learn`. If you have an older version already installed, please uncomment and run the following cell:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# !pip install --upgrade scikit-learn>=0.22.1"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Finally, please ensure that when you downloaded this notebook, you also downloaded the `fairness_nb_utils.py` file from the same location, and placed it in the same directory as this notebook."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"LoadingData\"></a>\n",
"## Loading the Data\n",
"We use the well-known `adult` census dataset, which we will fetch from the OpenML website. We start with a fairly unremarkable set of imports:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from fairlearn.reductions import GridSearch, DemographicParity, ErrorRate\n",
"from raiwidgets import FairnessDashboard\n",
"\n",
"from sklearn.compose import ColumnTransformer\n",
"from sklearn.impute import SimpleImputer\n",
"from sklearn.linear_model import LogisticRegression\n",
"from sklearn.model_selection import train_test_split\n",
"from sklearn.preprocessing import StandardScaler, OneHotEncoder\n",
"from sklearn.compose import make_column_selector as selector\n",
"from sklearn.pipeline import Pipeline\n",
"\n",
"import pandas as pd"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We can now load and inspect the data:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from fairness_nb_utils import fetch_census_dataset\n",
"\n",
"data = fetch_census_dataset()\n",
" \n",
"# Extract the items we want\n",
"X_raw = data.data\n",
"y = (data.target == '>50K') * 1\n",
"\n",
"X_raw[\"race\"].value_counts().to_dict()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We are going to treat the sex and race of each individual as protected attributes, and in this particular case we are going to remove these attributes from the main data (this is not always the best option - see the [Fairlearn website](http://fairlearn.github.io/) for further discussion). Protected attributes are often denoted by 'A' in the literature, and we follow that convention here:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"A = X_raw[['sex','race']]\n",
"X_raw = X_raw.drop(labels=['sex', 'race'], axis = 1)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We now preprocess our data. To avoid the problem of data leakage, we split our data into training and test sets before performing any other transformations. Subsequent transformations (such as scalings) will be fit to the training data set, and then applied to the test dataset."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"(X_train, X_test, y_train, y_test, A_train, A_test) = train_test_split(\n",
" X_raw, y, A, test_size=0.3, random_state=12345, stratify=y\n",
")\n",
"\n",
"# Ensure indices are aligned between X, y and A,\n",
"# after all the slicing and splitting of DataFrames\n",
"# and Series\n",
"\n",
"X_train = X_train.reset_index(drop=True)\n",
"X_test = X_test.reset_index(drop=True)\n",
"y_train = y_train.reset_index(drop=True)\n",
"y_test = y_test.reset_index(drop=True)\n",
"A_train = A_train.reset_index(drop=True)\n",
"A_test = A_test.reset_index(drop=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We have two types of column in the dataset - categorical columns which will need to be one-hot encoded, and numeric ones which will need to be rescaled. We also need to take care of missing values. We use a simple approach here, but please bear in mind that this is another way that bias could be introduced (especially if one subgroup tends to have more missing values).\n",
"\n",
"For this preprocessing, we make use of `Pipeline` objects from `sklearn`:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"numeric_transformer = Pipeline(\n",
" steps=[\n",
" (\"impute\", SimpleImputer()),\n",
" (\"scaler\", StandardScaler()),\n",
" ]\n",
")\n",
"\n",
"categorical_transformer = Pipeline(\n",
" [\n",
" (\"impute\", SimpleImputer(strategy=\"most_frequent\")),\n",
" (\"ohe\", OneHotEncoder(handle_unknown=\"ignore\", sparse=False)),\n",
" ]\n",
")\n",
"\n",
"preprocessor = ColumnTransformer(\n",
" transformers=[\n",
" (\"num\", numeric_transformer, selector(dtype_exclude=\"category\")),\n",
" (\"cat\", categorical_transformer, selector(dtype_include=\"category\")),\n",
" ]\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Now, the preprocessing pipeline is defined, we can run it on our training data, and apply the generated transform to our test data:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"X_train = preprocessor.fit_transform(X_train)\n",
"X_test = preprocessor.transform(X_test)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"UnmitigatedModel\"></a>\n",
"## Training an Unmitigated Model\n",
"\n",
"So we have a point of comparison, we first train a model (specifically, logistic regression from scikit-learn) on the raw data, without applying any mitigation algorithm:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"unmitigated_predictor = LogisticRegression(solver='liblinear', fit_intercept=True)\n",
"\n",
"unmitigated_predictor.fit(X_train, y_train)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We can view this model in the fairness dashboard, and see the disparities which appear:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"FairnessDashboard(sensitive_features=A_test,\n",
" y_true=y_test,\n",
" y_pred={\"unmitigated\": unmitigated_predictor.predict(X_test)})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Looking at the disparity in accuracy when we select 'Sex' as the sensitive feature, we see that males have an error rate about three times greater than the females. More interesting is the disparity in opportunitiy - males are offered loans at three times the rate of females.\n",
"\n",
"Despite the fact that we removed the feature from the training data, our predictor still discriminates based on sex. This demonstrates that simply ignoring a protected attribute when fitting a predictor rarely eliminates unfairness. There will generally be enough other features correlated with the removed attribute to lead to disparate impact."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"Mitigation\"></a>\n",
"## Mitigation with GridSearch\n",
"\n",
"The `GridSearch` class in `Fairlearn` implements a simplified version of the exponentiated gradient reduction of [Agarwal et al. 2018](https://arxiv.org/abs/1803.02453). The user supplies a standard ML estimator, which is treated as a blackbox - for this simple example, we shall use the logistic regression estimator from scikit-learn. `GridSearch` works by generating a sequence of relabellings and reweightings, and trains a predictor for each.\n",
"\n",
"For this example, we specify demographic parity (on the protected attribute of sex) as the fairness metric. Demographic parity requires that individuals are offered the opportunity (a loan in this example) independent of membership in the protected class (i.e., females and males should be offered loans at the same rate). *We are using this metric for the sake of simplicity* in this example; the appropriate fairness metric can only be selected after *careful examination of the broader context* in which the model is to be used."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"sweep = GridSearch(LogisticRegression(solver='liblinear', fit_intercept=True),\n",
" constraints=DemographicParity(),\n",
" grid_size=71)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"With our estimator created, we can fit it to the data. After `fit()` completes, we extract the full set of predictors from the `GridSearch` object.\n",
"\n",
"The following cell trains a many copies of the underlying estimator, and may take a minute or two to run:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"sweep.fit(X_train, y_train,\n",
" sensitive_features=A_train.sex)\n",
"\n",
"# For Fairlearn pre-v0.5.0, need sweep._predictors\n",
"predictors = sweep.predictors_"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We could load these predictors into the Fairness dashboard now. However, the plot would be somewhat confusing due to their number. In this case, we are going to remove the predictors which are dominated in the error-disparity space by others from the sweep (note that the disparity will only be calculated for the protected attribute; other potentially protected attributes will *not* be mitigated). In general, one might not want to do this, since there may be other considerations beyond the strict optimisation of error and disparity (of the given protected attribute)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"errors, disparities = [], []\n",
"for predictor in predictors:\n",
" error = ErrorRate()\n",
" error.load_data(X_train, pd.Series(y_train), sensitive_features=A_train.sex)\n",
" disparity = DemographicParity()\n",
" disparity.load_data(X_train, pd.Series(y_train), sensitive_features=A_train.sex)\n",
" \n",
" errors.append(error.gamma(predictor.predict)[0])\n",
" disparities.append(disparity.gamma(predictor.predict).max())\n",
" \n",
"all_results = pd.DataFrame( {\"predictor\": predictors, \"error\": errors, \"disparity\": disparities})\n",
"\n",
"dominant_models_dict = dict()\n",
"base_name_format = \"census_gs_model_{0}\"\n",
"row_id = 0\n",
"for row in all_results.itertuples():\n",
" model_name = base_name_format.format(row_id)\n",
" errors_for_lower_or_eq_disparity = all_results[\"error\"][all_results[\"disparity\"]<=row.disparity]\n",
" if row.error <= errors_for_lower_or_eq_disparity.min():\n",
" dominant_models_dict[model_name] = row.predictor\n",
" row_id = row_id + 1"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We can construct predictions for the dominant models (we include the unmitigated predictor as well, for comparison):"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"predictions_dominant = {\"census_unmitigated\": unmitigated_predictor.predict(X_test)}\n",
"models_dominant = {\"census_unmitigated\": unmitigated_predictor}\n",
"for name, predictor in dominant_models_dict.items():\n",
" value = predictor.predict(X_test)\n",
" predictions_dominant[name] = value\n",
" models_dominant[name] = predictor"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"These predictions may then be viewed in the fairness dashboard. We include the race column from the dataset, as an alternative basis for assessing the models. However, since we have not based our mitigation on it, the variation in the models with respect to race can be large."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"FairnessDashboard(sensitive_features=A_test, \n",
" y_true=y_test.tolist(),\n",
" y_pred=predictions_dominant)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"When using sex as the sensitive feature and accuracy as the metric, we see a Pareto front forming - the set of predictors which represent optimal tradeoffs between accuracy and disparity in predictions. In the ideal case, we would have a predictor at (1,0) - perfectly accurate and without any unfairness under demographic parity (with respect to the protected attribute \"sex\"). The Pareto front represents the closest we can come to this ideal based on our data and choice of estimator. Note the range of the axes - the disparity axis covers more values than the accuracy, so we can reduce disparity substantially for a small loss in accuracy. Finally, we also see that the unmitigated model is towards the top right of the plot, with high accuracy, but worst disparity.\n",
"\n",
"By clicking on individual models on the plot, we can inspect their metrics for disparity and accuracy in greater detail. In a real example, we would then pick the model which represented the best trade-off between accuracy and disparity given the relevant business constraints."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"AzureUpload\"></a>\n",
"## Uploading a Fairness Dashboard to Azure\n",
"\n",
"Uploading a fairness dashboard to Azure is a two stage process. The `FairnessDashboard` invoked in the previous section relies on the underlying Python kernel to compute metrics on demand. This is obviously not available when the fairness dashboard is rendered in AzureML Studio. By default, the dashboard in Azure Machine Learning Studio also requires the models to be registered. The required stages are therefore:\n",
"1. Register the dominant models\n",
"1. Precompute all the required metrics\n",
"1. Upload to Azure\n",
"\n",
"Before that, we need to connect to Azure Machine Learning Studio:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Workspace, Experiment, Model\n",
"\n",
"ws = Workspace.from_config()\n",
"ws.get_details()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"RegisterModels\"></a>\n",
"### Registering Models\n",
"\n",
"The fairness dashboard is designed to integrate with registered models, so we need to do this for the models we want in the Studio portal. The assumption is that the names of the models specified in the dashboard dictionary correspond to the `id`s (i.e. `<name>:<version>` pairs) of registered models in the workspace. We register each of the models in the `models_dominant` dictionary into the workspace. For this, we have to save each model to a file, and then register that file:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import joblib\n",
"import os\n",
"\n",
"os.makedirs('models', exist_ok=True)\n",
"def register_model(name, model):\n",
" print(\"Registering \", name)\n",
" model_path = \"models/{0}.pkl\".format(name)\n",
" joblib.dump(value=model, filename=model_path)\n",
" registered_model = Model.register(model_path=model_path,\n",
" model_name=name,\n",
" workspace=ws)\n",
" print(\"Registered \", registered_model.id)\n",
" return registered_model.id\n",
"\n",
"model_name_id_mapping = dict()\n",
"for name, model in models_dominant.items():\n",
" m_id = register_model(name, model)\n",
" model_name_id_mapping[name] = m_id"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Now, produce new predictions dictionaries, with the updated names:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"predictions_dominant_ids = dict()\n",
"for name, y_pred in predictions_dominant.items():\n",
" predictions_dominant_ids[model_name_id_mapping[name]] = y_pred"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"PrecomputeMetrics\"></a>\n",
"### Precomputing Metrics\n",
"\n",
"We create a _dashboard dictionary_ using Fairlearn's `metrics` package. The `_create_group_metric_set` method has arguments similar to the Dashboard constructor, except that the sensitive features are passed as a dictionary (to ensure that names are available), and we must specify the type of prediction. Note that we use the `predictions_dominant_ids` dictionary we just created:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"sf = { 'sex': A_test.sex, 'race': A_test.race }\n",
"\n",
"from fairlearn.metrics._group_metric_set import _create_group_metric_set\n",
"\n",
"\n",
"dash_dict = _create_group_metric_set(y_true=y_test,\n",
" predictions=predictions_dominant_ids,\n",
" sensitive_features=sf,\n",
" prediction_type='binary_classification')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"DashboardUpload\"></a>\n",
"### Uploading the Dashboard\n",
"\n",
"Now, we import our `contrib` package which contains the routine to perform the upload:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.contrib.fairness import upload_dashboard_dictionary, download_dashboard_by_upload_id"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Now we can create an Experiment, then a Run, and upload our dashboard to it:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"exp = Experiment(ws, \"Test_Fairlearn_GridSearch_Census_Demo\")\n",
"print(exp)\n",
"\n",
"run = exp.start_logging()\n",
"try:\n",
" dashboard_title = \"Dominant Models from GridSearch\"\n",
" upload_id = upload_dashboard_dictionary(run,\n",
" dash_dict,\n",
" dashboard_name=dashboard_title)\n",
" print(\"\\nUploaded to id: {0}\\n\".format(upload_id))\n",
"\n",
" downloaded_dict = download_dashboard_by_upload_id(run, upload_id)\n",
"finally:\n",
" run.complete()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The dashboard can be viewed in the Run Details page.\n",
"\n",
"Finally, we can verify that the dashboard dictionary which we downloaded matches our upload:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(dash_dict == downloaded_dict)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"Conclusion\"></a>\n",
"## Conclusion\n",
"\n",
"In this notebook we have demonstrated how to use the `GridSearch` algorithm from Fairlearn to generate a collection of models, and then present them in the fairness dashboard in Azure Machine Learning Studio. Please remember that this notebook has not attempted to discuss the many considerations which should be part of any approach to unfairness mitigation. The [Fairlearn website](http://fairlearn.org/) provides that discussion"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"authors": [
{
"name": "riedgar"
}
],
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.10"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -0,0 +1,11 @@
name: fairlearn-azureml-mitigation
dependencies:
- pip:
- azureml-sdk
- azureml-contrib-fairness
- fairlearn>=0.6.2
- joblib
- liac-arff
- raiwidgets~=0.17.0
- itsdangerous==2.0.1
- markupsafe<2.1.0

View File

@@ -0,0 +1,111 @@
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
"""Utilities for azureml-contrib-fairness notebooks."""
import arff
from collections import OrderedDict
from contextlib import closing
import gzip
import pandas as pd
from sklearn.datasets import fetch_openml
from sklearn.utils import Bunch
import time
def fetch_openml_with_retries(data_id, max_retries=4, retry_delay=60):
"""Fetch a given dataset from OpenML with retries as specified."""
for i in range(max_retries):
try:
print("Download attempt {0} of {1}".format(i + 1, max_retries))
data = fetch_openml(data_id=data_id, as_frame=True)
break
except Exception as e: # noqa: B902
print("Download attempt failed with exception:")
print(e)
if i + 1 != max_retries:
print("Will retry after {0} seconds".format(retry_delay))
time.sleep(retry_delay)
retry_delay = retry_delay * 2
else:
raise RuntimeError("Unable to download dataset from OpenML")
return data
_categorical_columns = [
'workclass',
'education',
'marital-status',
'occupation',
'relationship',
'race',
'sex',
'native-country'
]
def fetch_census_dataset():
"""Fetch the Adult Census Dataset.
This uses a particular URL for the Adult Census dataset. The code
is a simplified version of fetch_openml() in sklearn.
The data are copied from:
https://openml.org/data/v1/download/1595261.gz
(as of 2021-03-31)
"""
try:
from urllib import urlretrieve
except ImportError:
from urllib.request import urlretrieve
filename = "1595261.gz"
data_url = "https://rainotebookscdn.blob.core.windows.net/datasets/"
remaining_attempts = 5
sleep_duration = 10
while remaining_attempts > 0:
try:
urlretrieve(data_url + filename, filename)
http_stream = gzip.GzipFile(filename=filename, mode='rb')
with closing(http_stream):
def _stream_generator(response):
for line in response:
yield line.decode('utf-8')
stream = _stream_generator(http_stream)
data = arff.load(stream)
except Exception as exc: # noqa: B902
remaining_attempts -= 1
print("Error downloading dataset from {} ({} attempt(s) remaining)"
.format(data_url, remaining_attempts))
print(exc)
time.sleep(sleep_duration)
sleep_duration *= 2
continue
else:
# dataset successfully downloaded
break
else:
raise Exception("Could not retrieve dataset from {}.".format(data_url))
attributes = OrderedDict(data['attributes'])
arff_columns = list(attributes)
raw_df = pd.DataFrame(data=data['data'], columns=arff_columns)
target_column_name = 'class'
target = raw_df.pop(target_column_name)
for col_name in _categorical_columns:
dtype = pd.api.types.CategoricalDtype(attributes[col_name])
raw_df[col_name] = raw_df[col_name].astype(dtype, copy=False)
result = Bunch()
result.data = raw_df
result.target = target
return result

View File

@@ -0,0 +1,545 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved. \n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/contrib/fairness/upload-fairness-dashboard.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Upload a Fairness Dashboard to Azure Machine Learning Studio\n",
"**This notebook shows how to generate and upload a fairness assessment dashboard from Fairlearn to AzureML Studio**\n",
"\n",
"## Table of Contents\n",
"\n",
"1. [Introduction](#Introduction)\n",
"1. [Loading the Data](#LoadingData)\n",
"1. [Processing the Data](#ProcessingData)\n",
"1. [Training Models](#TrainingModels)\n",
"1. [Logging in to AzureML](#LoginAzureML)\n",
"1. [Registering the Models](#RegisterModels)\n",
"1. [Using the Fairness Dashboard](#LocalDashboard)\n",
"1. [Uploading a Fairness Dashboard to Azure](#AzureUpload)\n",
" 1. Computing Fairness Metrics\n",
" 1. Uploading to Azure\n",
"1. [Conclusion](#Conclusion)\n",
" \n",
"\n",
"<a id=\"Introduction\"></a>\n",
"## Introduction\n",
"\n",
"In this notebook, we walk through a simple example of using the `azureml-contrib-fairness` package to upload a collection of fairness statistics for a fairness dashboard. It is an example of integrating the [open source Fairlearn package](https://www.github.com/fairlearn/fairlearn) with Azure Machine Learning. This is not an example of fairness analysis or mitigation - this notebook simply shows how to get a fairness dashboard into the Azure Machine Learning portal. We will load the data and train a couple of simple models. We will then use Fairlearn to generate data for a Fairness dashboard, which we can upload to Azure Machine Learning portal and view there.\n",
"\n",
"### Setup\n",
"\n",
"To use this notebook, an Azure Machine Learning workspace is required.\n",
"Please see the [configuration notebook](../../configuration.ipynb) for information about creating one, if required.\n",
"This notebook also requires the following packages:\n",
"* `azureml-contrib-fairness`\n",
"* `fairlearn>=0.6.2` (also works for pre-v0.5.0 with slight modifications)\n",
"* `joblib`\n",
"* `liac-arff`\n",
"* `raiwidgets`\n",
"\n",
"Fairlearn relies on features introduced in v0.22.1 of `scikit-learn`. If you have an older version already installed, please uncomment and run the following cell:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# !pip install --upgrade scikit-learn>=0.22.1"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Finally, please ensure that when you downloaded this notebook, you also downloaded the `fairness_nb_utils.py` file from the same location, and placed it in the same directory as this notebook."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"LoadingData\"></a>\n",
"## Loading the Data\n",
"We use the well-known `adult` census dataset, which we fetch from the OpenML website. We start with a fairly unremarkable set of imports:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from sklearn import svm\n",
"from sklearn.compose import ColumnTransformer\n",
"from sklearn.impute import SimpleImputer\n",
"from sklearn.linear_model import LogisticRegression\n",
"from sklearn.model_selection import train_test_split\n",
"from sklearn.preprocessing import StandardScaler, OneHotEncoder\n",
"from sklearn.compose import make_column_selector as selector\n",
"from sklearn.pipeline import Pipeline"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Now we can load the data:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from fairness_nb_utils import fetch_census_dataset\n",
"\n",
"data = fetch_census_dataset()\n",
" \n",
"# Extract the items we want\n",
"X_raw = data.data\n",
"y = (data.target == '>50K') * 1"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We can take a look at some of the data. For example, the next cells shows the counts of the different races identified in the dataset:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(X_raw[\"race\"].value_counts().to_dict())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"ProcessingData\"></a>\n",
"## Processing the Data\n",
"\n",
"With the data loaded, we process it for our needs. First, we extract the sensitive features of interest into `A` (conventionally used in the literature) and leave the rest of the feature data in `X_raw`:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"A = X_raw[['sex','race']]\n",
"X_raw = X_raw.drop(labels=['sex', 'race'],axis = 1)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We now preprocess our data. To avoid the problem of data leakage, we split our data into training and test sets before performing any other transformations. Subsequent transformations (such as scalings) will be fit to the training data set, and then applied to the test dataset."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"(X_train, X_test, y_train, y_test, A_train, A_test) = train_test_split(\n",
" X_raw, y, A, test_size=0.3, random_state=12345, stratify=y\n",
")\n",
"\n",
"# Ensure indices are aligned between X, y and A,\n",
"# after all the slicing and splitting of DataFrames\n",
"# and Series\n",
"\n",
"X_train = X_train.reset_index(drop=True)\n",
"X_test = X_test.reset_index(drop=True)\n",
"y_train = y_train.reset_index(drop=True)\n",
"y_test = y_test.reset_index(drop=True)\n",
"A_train = A_train.reset_index(drop=True)\n",
"A_test = A_test.reset_index(drop=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We have two types of column in the dataset - categorical columns which will need to be one-hot encoded, and numeric ones which will need to be rescaled. We also need to take care of missing values. We use a simple approach here, but please bear in mind that this is another way that bias could be introduced (especially if one subgroup tends to have more missing values).\n",
"\n",
"For this preprocessing, we make use of `Pipeline` objects from `sklearn`:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"numeric_transformer = Pipeline(\n",
" steps=[\n",
" (\"impute\", SimpleImputer()),\n",
" (\"scaler\", StandardScaler()),\n",
" ]\n",
")\n",
"\n",
"categorical_transformer = Pipeline(\n",
" [\n",
" (\"impute\", SimpleImputer(strategy=\"most_frequent\")),\n",
" (\"ohe\", OneHotEncoder(handle_unknown=\"ignore\", sparse=False)),\n",
" ]\n",
")\n",
"\n",
"preprocessor = ColumnTransformer(\n",
" transformers=[\n",
" (\"num\", numeric_transformer, selector(dtype_exclude=\"category\")),\n",
" (\"cat\", categorical_transformer, selector(dtype_include=\"category\")),\n",
" ]\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Now, the preprocessing pipeline is defined, we can run it on our training data, and apply the generated transform to our test data:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"X_train = preprocessor.fit_transform(X_train)\n",
"X_test = preprocessor.transform(X_test)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"TrainingModels\"></a>\n",
"## Training Models\n",
"\n",
"We now train a couple of different models on our data. The `adult` census dataset is a classification problem - the goal is to predict whether a particular individual exceeds an income threshold. For the purpose of generating a dashboard to upload, it is sufficient to train two basic classifiers. First, a logistic regression classifier:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"lr_predictor = LogisticRegression(solver='liblinear', fit_intercept=True)\n",
"\n",
"lr_predictor.fit(X_train, y_train)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"And for comparison, a support vector classifier:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"svm_predictor = svm.SVC()\n",
"\n",
"svm_predictor.fit(X_train, y_train)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"LoginAzureML\"></a>\n",
"## Logging in to AzureML\n",
"\n",
"With our two classifiers trained, we can log into our AzureML workspace:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Workspace, Experiment, Model\n",
"\n",
"ws = Workspace.from_config()\n",
"ws.get_details()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"RegisterModels\"></a>\n",
"## Registering the Models\n",
"\n",
"Next, we register our models. By default, the subroutine which uploads the models checks that the names provided correspond to registered models in the workspace. We define a utility routine to do the registering:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import joblib\n",
"import os\n",
"\n",
"os.makedirs('models', exist_ok=True)\n",
"def register_model(name, model):\n",
" print(\"Registering \", name)\n",
" model_path = \"models/{0}.pkl\".format(name)\n",
" joblib.dump(value=model, filename=model_path)\n",
" registered_model = Model.register(model_path=model_path,\n",
" model_name=name,\n",
" workspace=ws)\n",
" print(\"Registered \", registered_model.id)\n",
" return registered_model.id"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Now, we register the models. For convenience in subsequent method calls, we store the results in a dictionary, which maps the `id` of the registered model (a string in `name:version` format) to the predictor itself:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model_dict = {}\n",
"\n",
"lr_reg_id = register_model(\"fairness_linear_regression\", lr_predictor)\n",
"model_dict[lr_reg_id] = lr_predictor\n",
"svm_reg_id = register_model(\"fairness_svm\", svm_predictor)\n",
"model_dict[svm_reg_id] = svm_predictor"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"LocalDashboard\"></a>\n",
"## Using the Fairlearn Dashboard\n",
"\n",
"We can now examine the fairness of the two models we have training, both as a function of race and (binary) sex. Before uploading the dashboard to the AzureML portal, we will first instantiate a local instance of the Fairlearn dashboard.\n",
"\n",
"Regardless of the viewing location, the dashboard is based on three things - the true values, the model predictions and the sensitive feature values. The dashboard can use predictions from multiple models and multiple sensitive features if desired (as we are doing here).\n",
"\n",
"Our first step is to generate a dictionary mapping the `id` of the registered model to the corresponding array of predictions:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ys_pred = {}\n",
"for n, p in model_dict.items():\n",
" ys_pred[n] = p.predict(X_test)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We can examine these predictions in a locally invoked Fairlearn dashboard. This can be compared to the dashboard uploaded to the portal (in the next section):"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from raiwidgets import FairnessDashboard\n",
"\n",
"FairnessDashboard(sensitive_features=A_test, \n",
" y_true=y_test.tolist(),\n",
" y_pred=ys_pred)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"AzureUpload\"></a>\n",
"## Uploading a Fairness Dashboard to Azure\n",
"\n",
"Uploading a fairness dashboard to Azure is a two stage process. The `FairnessDashboard` invoked in the previous section relies on the underlying Python kernel to compute metrics on demand. This is obviously not available when the fairness dashboard is rendered in AzureML Studio. The required stages are therefore:\n",
"1. Precompute all the required metrics\n",
"1. Upload to Azure\n",
"\n",
"\n",
"### Computing Fairness Metrics\n",
"We use Fairlearn to create a dictionary which contains all the data required to display a dashboard. This includes both the raw data (true values, predicted values and sensitive features), and also the fairness metrics. The API is similar to that used to invoke the Dashboard locally. However, there are a few minor changes to the API, and the type of problem being examined (binary classification, regression etc.) needs to be specified explicitly:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"sf = { 'Race': A_test.race, 'Sex': A_test.sex }\n",
"\n",
"from fairlearn.metrics._group_metric_set import _create_group_metric_set\n",
"\n",
"dash_dict = _create_group_metric_set(y_true=y_test,\n",
" predictions=ys_pred,\n",
" sensitive_features=sf,\n",
" prediction_type='binary_classification')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The `_create_group_metric_set()` method is currently underscored since its exact design is not yet final in Fairlearn."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Uploading to Azure\n",
"\n",
"We can now import the `azureml.contrib.fairness` package itself. We will round-trip the data, so there are two required subroutines:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.contrib.fairness import upload_dashboard_dictionary, download_dashboard_by_upload_id"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Finally, we can upload the generated dictionary to AzureML. The upload method requires a run, so we first create an experiment and a run. The uploaded dashboard can be seen on the corresponding Run Details page in AzureML Studio. For completeness, we also download the dashboard dictionary which we uploaded."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"exp = Experiment(ws, \"notebook-01\")\n",
"print(exp)\n",
"\n",
"run = exp.start_logging()\n",
"try:\n",
" dashboard_title = \"Sample notebook upload\"\n",
" upload_id = upload_dashboard_dictionary(run,\n",
" dash_dict,\n",
" dashboard_name=dashboard_title)\n",
" print(\"\\nUploaded to id: {0}\\n\".format(upload_id))\n",
"\n",
" downloaded_dict = download_dashboard_by_upload_id(run, upload_id)\n",
"finally:\n",
" run.complete()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Finally, we can verify that the dashboard dictionary which we downloaded matches our upload:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(dash_dict == downloaded_dict)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"Conclusion\"></a>\n",
"## Conclusion\n",
"\n",
"In this notebook we have demonstrated how to generate and upload a fairness dashboard to AzureML Studio. We have not discussed how to analyse the results and apply mitigations. Those topics will be covered elsewhere."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"authors": [
{
"name": "riedgar"
}
],
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.10"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@@ -0,0 +1,11 @@
name: upload-fairness-dashboard
dependencies:
- pip:
- azureml-sdk
- azureml-contrib-fairness
- fairlearn>=0.6.2
- joblib
- liac-arff
- raiwidgets~=0.17.0
- itsdangerous==2.0.1
- markupsafe<2.1.0

View File

@@ -4,14 +4,12 @@ Learn how to use Azure Machine Learning services for experimentation and model m
As a pre-requisite, run the [configuration Notebook](../configuration.ipynb) notebook first to set up your Azure ML Workspace. Then, run the notebooks in following recommended order. As a pre-requisite, run the [configuration Notebook](../configuration.ipynb) notebook first to set up your Azure ML Workspace. Then, run the notebooks in following recommended order.
* [train-within-notebook](./training/train-within-notebook): Train a model hile tracking run history, and learn how to deploy the model as web service to Azure Container Instance. * [train-within-notebook](./training/train-within-notebook): Train a model while tracking run history, and learn how to deploy the model as web service to Azure Container Instance.
* [train-on-local](./training/train-on-local): Learn how to submit a run to local computer and use Azure ML managed run configuration. * [train-on-local](./training/train-on-local): Learn how to submit a run to local computer and use Azure ML managed run configuration.
* [train-on-amlcompute](./training/train-on-amlcompute): Use a 1-n node Azure ML managed compute cluster for remote runs on Azure CPU or GPU infrastructure. * [train-on-amlcompute](./training/train-on-amlcompute): Use a 1-n node Azure ML managed compute cluster for remote runs on Azure CPU or GPU infrastructure.
* [train-on-remote-vm](./training/train-on-remote-vm): Use Data Science Virtual Machine as a target for remote runs. * [train-on-remote-vm](./training/train-on-remote-vm): Use Data Science Virtual Machine as a target for remote runs.
* [logging-api](./training/logging-api): Learn about the details of logging metrics to run history. * [logging-api](./track-and-monitor-experiments/logging-api): Learn about the details of logging metrics to run history.
* [register-model-create-image-deploy-service](./deployment/register-model-create-image-deploy-service): Learn about the details of model management.
* [production-deploy-to-aks](./deployment/production-deploy-to-aks) Deploy a model to production at scale on Azure Kubernetes Service. * [production-deploy-to-aks](./deployment/production-deploy-to-aks) Deploy a model to production at scale on Azure Kubernetes Service.
* [enable-data-collection-for-models-in-aks](./deployment/enable-data-collection-for-models-in-aks) Learn about data collection APIs for deployed model.
* [enable-app-insights-in-production-service](./deployment/enable-app-insights-in-production-service) Learn how to use App Insights with production web service. * [enable-app-insights-in-production-service](./deployment/enable-app-insights-in-production-service) Learn how to use App Insights with production web service.
Find quickstarts, end-to-end tutorials, and how-tos on the [official documentation site for Azure Machine Learning service](https://docs.microsoft.com/en-us/azure/machine-learning/service/). Find quickstarts, end-to-end tutorials, and how-tos on the [official documentation site for Azure Machine Learning service](https://docs.microsoft.com/en-us/azure/machine-learning/service/).

View File

@@ -1,8 +1,8 @@
# Table of Contents # Table of Contents
1. [Automated ML Introduction](#introduction) 1. [Automated ML Introduction](#introduction)
1. [Running samples in Azure Notebooks](#jupyter) 1. [Setup using Compute Instances](#jupyter)
1. [Running samples in Azure Databricks](#databricks) 1. [Setup using a Local Conda environment](#localconda)
1. [Running samples in a Local Conda environment](#localconda) 1. [Setup using Azure Databricks](#databricks)
1. [Automated ML SDK Sample Notebooks](#samples) 1. [Automated ML SDK Sample Notebooks](#samples)
1. [Documentation](#documentation) 1. [Documentation](#documentation)
1. [Running using python command](#pythoncommand) 1. [Running using python command](#pythoncommand)
@@ -13,61 +13,43 @@
Automated machine learning (automated ML) builds high quality machine learning models for you by automating model and hyperparameter selection. Bring a labelled dataset that you want to build a model for, automated ML will give you a high quality machine learning model that you can use for predictions. Automated machine learning (automated ML) builds high quality machine learning models for you by automating model and hyperparameter selection. Bring a labelled dataset that you want to build a model for, automated ML will give you a high quality machine learning model that you can use for predictions.
If you are new to Data Science, AutoML will help you get jumpstarted by simplifying machine learning model building. It abstracts you from needing to perform model selection, hyperparameter selection and in one step creates a high quality trained model for you to use. If you are new to Data Science, automated ML will help you get jumpstarted by simplifying machine learning model building. It abstracts you from needing to perform model selection, hyperparameter selection and in one step creates a high quality trained model for you to use.
If you are an experienced data scientist, AutoML will help increase your productivity by intelligently performing the model and hyperparameter selection for your training and generates high quality models much quicker than manually specifying several combinations of the parameters and running training jobs. AutoML provides visibility and access to all the training jobs and the performance characteristics of the models to help you further tune the pipeline if you desire. If you are an experienced data scientist, automated ML will help increase your productivity by intelligently performing the model and hyperparameter selection for your training and generates high quality models much quicker than manually specifying several combinations of the parameters and running training jobs. Automated ML provides visibility and access to all the training jobs and the performance characteristics of the models to help you further tune the pipeline if you desire.
Below are the three execution environments supported by AutoML. Below are the three execution environments supported by automated ML.
<a name="jupyter"></a> <a name="jupyter"></a>
## Running samples in Azure Notebooks - Jupyter based notebooks in the Azure cloud ## Setup using Compute Instances - Jupyter based notebooks from a Azure Virtual Machine
1. [![Azure Notebooks](https://notebooks.azure.com/launch.png)](https://aka.ms/aml-clone-azure-notebooks) 1. Open the [ML Azure portal](https://ml.azure.com)
[Import sample notebooks ](https://aka.ms/aml-clone-azure-notebooks) into Azure Notebooks. 1. Select Compute
1. Follow the instructions in the [configuration](../../configuration.ipynb) notebook to create and connect to a workspace. 1. Select Compute Instances
1. Open one of the sample notebooks. 1. Click New
1. Type a Compute Name, select a Virtual Machine type and select a Virtual Machine size
<a name="databricks"></a> 1. Click Create
## Running samples in Azure Databricks
**NOTE**: Please create your Azure Databricks cluster as v4.x (high concurrency preferred) with **Python 3** (dropdown).
**NOTE**: You should at least have contributor access to your Azure subcription to run the notebook.
- Please remove the previous SDK version if there is any and install the latest SDK by installing **azureml-sdk[automl_databricks]** as a PyPi library in Azure Databricks workspace.
- You can find the detail Readme instructions at [GitHub](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/azure-databricks).
- Download the sample notebook automl-databricks-local-01.ipynb from [GitHub](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/azure-databricks) and import into the Azure databricks workspace.
- Attach the notebook to the cluster.
<a name="localconda"></a> <a name="localconda"></a>
## Running samples in a Local Conda environment ## Setup using a Local Conda environment
To run these notebook on your own notebook server, use these installation instructions. To run these notebook on your own notebook server, use these installation instructions.
The instructions below will install everything you need and then start a Jupyter notebook.
The instructions below will install everything you need and then start a Jupyter notebook. To start your Jupyter notebook manually, use:
```
conda activate azure_automl
jupyter notebook
```
or on Mac:
```
source activate azure_automl
jupyter notebook
```
### 1. Install mini-conda from [here](https://conda.io/miniconda.html), choose 64-bit Python 3.7 or higher. ### 1. Install mini-conda from [here](https://conda.io/miniconda.html), choose 64-bit Python 3.7 or higher.
- **Note**: if you already have conda installed, you can keep using it but it should be version 4.4.10 or later (as shown by: conda -V). If you have a previous version installed, you can update it using the command: conda update conda. - **Note**: if you already have conda installed, you can keep using it but it should be version 4.4.10 or later (as shown by: conda -V). If you have a previous version installed, you can update it using the command: conda update conda.
There's no need to install mini-conda specifically. There's no need to install mini-conda specifically.
### 2. Downloading the sample notebooks ### 2. Downloading the sample notebooks
- Download the sample notebooks from [GitHub](https://github.com/Azure/MachineLearningNotebooks) as zip and extract the contents to a local directory. The AutoML sample notebooks are in the "automl" folder. - Download the sample notebooks from [GitHub](https://github.com/Azure/MachineLearningNotebooks) as zip and extract the contents to a local directory. The automated ML sample notebooks are in the "automated-machine-learning" folder.
### 3. Setup a new conda environment ### 3. Setup a new conda environment
The **automl/automl_setup** script creates a new conda environment, installs the necessary packages, configures the widget and starts a jupyter notebook. The **automl_setup** script creates a new conda environment, installs the necessary packages, configures the widget and starts a jupyter notebook. It takes the conda environment name as an optional parameter. The default conda environment name is azure_automl. The exact command depends on the operating system. See the specific sections below for Windows, Mac and Linux. It can take about 10 minutes to execute.
It takes the conda environment name as an optional parameter. The default conda environment name is azure_automl. The exact command depends on the operating system. See the specific sections below for Windows, Mac and Linux. It can take about 10 minutes to execute.
Packages installed by the **automl_setup** script:
<ul><li>python</li><li>nb_conda</li><li>matplotlib</li><li>numpy</li><li>cython</li><li>urllib3</li><li>scipy</li><li>scikit-learn</li><li>pandas</li><li>tensorflow</li><li>py-xgboost</li><li>azureml-sdk</li><li>azureml-widgets</li><li>pandas-ml</li></ul>
For more details refer to the [automl_env.yml](./automl_env.yml)
## Windows ## Windows
Start an **Anaconda Prompt** window, cd to the **how-to-use-azureml/automated-machine-learning** folder where the sample notebooks were extracted and then run: Start an **Anaconda Prompt** window, cd to the **how-to-use-azureml/automated-machine-learning** folder where the sample notebooks were extracted and then run:
``` ```
@@ -95,98 +77,116 @@ bash automl_setup_linux.sh
### 5. Running Samples ### 5. Running Samples
- Please make sure you use the Python [conda env:azure_automl] kernel when trying the sample Notebooks. - Please make sure you use the Python [conda env:azure_automl] kernel when trying the sample Notebooks.
- Follow the instructions in the individual notebooks to explore various features in AutoML - Follow the instructions in the individual notebooks to explore various features in automated ML.
### 6. Starting jupyter notebook manually
To start your Jupyter notebook manually, use:
```
conda activate azure_automl
jupyter notebook
```
or on Mac or Linux:
```
source activate azure_automl
jupyter notebook
```
<a name="databricks"></a>
## Setup using Azure Databricks
**NOTE**: Please create your Azure Databricks cluster as v7.1 (high concurrency preferred) with **Python 3** (dropdown).
**NOTE**: You should at least have contributor access to your Azure subcription to run the notebook.
- You can find the detail Readme instructions at [GitHub](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/azure-databricks/automl).
- Download the sample notebook automl-databricks-local-01.ipynb from [GitHub](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/azure-databricks/automl) and import into the Azure databricks workspace.
- Attach the notebook to the cluster.
<a name="samples"></a> <a name="samples"></a>
# Automated ML SDK Sample Notebooks # Automated ML SDK Sample Notebooks
- [auto-ml-classification.ipynb](classification/auto-ml-classification.ipynb) ## Classification
- Dataset: scikit learn's [digit dataset](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html#sklearn.datasets.load_digits) - **Classify Credit Card Fraud**
- Simple example of using Auto ML for classification - Dataset: [Kaggle's credit card fraud detection dataset](https://www.kaggle.com/mlg-ulb/creditcardfraud)
- Uses local compute for training - **[Jupyter Notebook (remote run)](classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.ipynb)**
- run the experiment remotely on AML Compute cluster
- test the performance of the best model in the local environment
- **[Jupyter Notebook (local run)](local-run-classification-credit-card-fraud/auto-ml-classification-credit-card-fraud-local.ipynb)**
- run experiment in the local environment
- use Mimic Explainer for computing feature importance
- deploy the best model along with the explainer to an Azure Kubernetes (AKS) cluster, which will compute the raw and engineered feature importances at inference time
- **Predict Term Deposit Subscriptions in a Bank**
- Dataset: [UCI's bank marketing dataset](https://www.kaggle.com/janiobachmann/bank-marketing-dataset)
- **[Jupyter Notebook](classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb)**
- run experiment remotely on AML Compute cluster to generate ONNX compatible models
- view the featurization steps that were applied during training
- view feature importance for the best model
- download the best model in ONNX format and use it for inferencing using ONNXRuntime
- deploy the best model in PKL format to Azure Container Instance (ACI)
- **Predict Newsgroup based on Text from News Article**
- Dataset: [20 newsgroups text dataset](https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html)
- **[Jupyter Notebook](classification-text-dnn/auto-ml-classification-text-dnn.ipynb)**
- AutoML highlights here include using deep neural networks (DNNs) to create embedded features from text data
- AutoML will use Bidirectional Encoder Representations from Transformers (BERT) when a GPU compute is used
- Bidirectional Long-Short Term neural network (BiLSTM) will be utilized when a CPU compute is used, thereby optimizing the choice of DNN
- [auto-ml-regression.ipynb](regression/auto-ml-regression.ipynb) ## Regression
- Dataset: scikit learn's [diabetes dataset](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_diabetes.html) - **Predict Performance of Hardware Parts**
- Simple example of using Auto ML for regression - Dataset: Hardware Performance Dataset
- Uses local compute for training - **[Jupyter Notebook](regression/auto-ml-regression.ipynb)**
- run the experiment remotely on AML Compute cluster
- get best trained model for a different metric than the one the experiment was optimized for
- test the performance of the best model in the local environment
- **[Jupyter Notebook (advanced)](regression/auto-ml-regression.ipynb)**
- run the experiment remotely on AML Compute cluster
- customize featurization: override column purpose within the dataset, configure transformer parameters
- get best trained model for a different metric than the one the experiment was optimized for
- run a model explanation experiment on the remote cluster
- deploy the model along the explainer and run online inferencing
- [auto-ml-remote-execution.ipynb](remote-execution/auto-ml-remote-execution.ipynb) ## Time Series Forecasting
- Dataset: scikit learn's [digit dataset](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html#sklearn.datasets.load_digits) - **Forecast Energy Demand**
- Example of using Auto ML for classification using a remote linux DSVM for training - Dataset: [NYC energy demand data](http://mis.nyiso.com/public/P-58Blist.htm)
- Parallel execution of iterations - **[Jupyter Notebook](forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb)**
- Async tracking of progress - run experiment remotely on AML Compute cluster
- Cancelling individual iterations or entire run - use lags and rolling window features
- Retrieving models for any iteration or logged metric - view the featurization steps that were applied during training
- Specify automl settings as kwargs - get the best model, use it to forecast on test data and compare the accuracy of predictions against real data
- **Forecast Orange Juice Sales (Multi-Series)**
- [auto-ml-remote-amlcompute.ipynb](remote-batchai/auto-ml-remote-amlcompute.ipynb) - Dataset: [Dominick's grocery sales of orange juice](forecasting-orange-juice-sales/dominicks_OJ.csv)
- Dataset: scikit learn's [digit dataset](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html#sklearn.datasets.load_digits) - **[Jupyter Notebook](forecasting-orange-juice-sales/dominicks_OJ.csv)**
- Example of using automated ML for classification using remote AmlCompute for training - run experiment remotely on AML Compute cluster
- Parallel execution of iterations - customize time-series featurization, change column purpose and override transformer hyper parameters
- Async tracking of progress - evaluate locally the performance of the generated best model
- Cancelling individual iterations or entire run - deploy the best model as a webservice on Azure Container Instance (ACI)
- Retrieving models for any iteration or logged metric - get online predictions from the deployed model
- Specify automl settings as kwargs - **Forecast Demand of a Bike-Sharing Service**
- Dataset: [Bike demand data](forecasting-bike-share/bike-no.csv)
- [auto-ml-remote-attach.ipynb](remote-attach/auto-ml-remote-attach.ipynb) - **[Jupyter Notebook](forecasting-bike-share/auto-ml-forecasting-bike-share.ipynb)**
- Dataset: Scikit learn's [20newsgroup](http://scikit-learn.org/stable/datasets/twenty_newsgroups.html) - run experiment remotely on AML Compute cluster
- handling text data with preprocess flag - integrate holiday features
- Reading data from a blob store for remote executions - run rolling forecast for test set that is longer than the forecast horizon
- using pandas dataframes for reading data - compute metrics on the predictions from the remote forecast
- **The Forecast Function Interface**
- [auto-ml-missing-data-blacklist-early-termination.ipynb](missing-data-blacklist-early-termination/auto-ml-missing-data-blacklist-early-termination.ipynb) - Dataset: Generated for sample purposes
- Dataset: scikit learn's [digit dataset](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html#sklearn.datasets.load_digits) - **[Jupyter Notebook](forecasting-forecast-function/auto-ml-forecasting-function.ipynb)**
- Blacklist certain pipelines - train a forecaster using a remote AML Compute cluster
- Specify a target metrics to indicate stopping criteria - capabilities of forecast function (e.g. forecast farther into the horizon)
- Handling Missing Data in the input - generate confidence intervals
- **Forecast Beverage Production**
- [auto-ml-sparse-data-train-test-split.ipynb](sparse-data-train-test-split/auto-ml-sparse-data-train-test-split.ipynb) - Dataset: [Monthly beer production data](forecasting-beer-remote/Beer_no_valid_split_train.csv)
- Dataset: Scikit learn's [20newsgroup](http://scikit-learn.org/stable/datasets/twenty_newsgroups.html) - **[Jupyter Notebook](forecasting-beer-remote/auto-ml-forecasting-beer-remote.ipynb)**
- Handle sparse datasets - train using a remote AML Compute cluster
- Specify custom train and validation set - enable the DNN learning model
- forecast on a remote compute cluster and compare different model performance
- [auto-ml-exploring-previous-runs.ipynb](exploring-previous-runs/auto-ml-exploring-previous-runs.ipynb) - **Continuous Retraining with NOAA Weather Data**
- List all projects for the workspace - Dataset: [NOAA weather data from Azure Open Datasets](https://azure.microsoft.com/en-us/services/open-datasets/)
- List all AutoML Runs for a given project - **[Jupyter Notebook](continuous-retraining/auto-ml-continuous-retraining.ipynb)**
- Get details for a AutoML Run. (Automl settings, run widget & all metrics) - continuously retrain a model using Pipelines and AutoML
- Download fitted pipeline for any iteration - create a Pipeline to upload a time series dataset to an Azure blob
- create a Pipeline to run an AutoML experiment and register the best resulting model in the Workspace
- [auto-ml-remote-execution-with-datastore.ipynb](remote-execution-with-datastore/auto-ml-remote-execution-with-datastore.ipynb) - publish the training pipeline created and schedule it to run daily
- Dataset: Scikit learn's [20newsgroup](http://scikit-learn.org/stable/datasets/twenty_newsgroups.html)
- Download the data and store it in DataStore.
- [auto-ml-classification-with-deployment.ipynb](classification-with-deployment/auto-ml-classification-with-deployment.ipynb)
- Dataset: scikit learn's [digit dataset](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html#sklearn.datasets.load_digits)
- Simple example of using Auto ML for classification
- Registering the model
- Creating Image and creating aci service
- Testing the aci service
- [auto-ml-sample-weight.ipynb](sample-weight/auto-ml-sample-weight.ipynb)
- How to specifying sample_weight
- The difference that it makes to test results
- [auto-ml-subsampling-local.ipynb](subsampling/auto-ml-subsampling-local.ipynb)
- How to enable subsampling
- [auto-ml-dataprep.ipynb](dataprep/auto-ml-dataprep.ipynb)
- Using DataPrep for reading data
- [auto-ml-dataprep-remote-execution.ipynb](dataprep-remote-execution/auto-ml-dataprep-remote-execution.ipynb)
- Using DataPrep for reading data with remote execution
- [auto-ml-classification-with-whitelisting.ipynb](classification-with-whitelisting/auto-ml-classification-with-whitelisting.ipynb)
- Dataset: scikit learn's [digit dataset](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html#sklearn.datasets.load_digits)
- Simple example of using Auto ML for classification with whitelisting tensorflow models.
- Uses local compute for training
- [auto-ml-forecasting-energy-demand.ipynb](forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb)
- Dataset: [NYC energy demand data](forecasting-a/nyc_energy.csv)
- Example of using AutoML for training a forecasting model
- [auto-ml-forecasting-orange-juice-sales.ipynb](forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb)
- Dataset: [Dominick's grocery sales of orange juice](forecasting-b/dominicks_OJ.csv)
- Example of training an AutoML forecasting model on multiple time-series
<a name="documentation"></a> <a name="documentation"></a>
See [Configure automated machine learning experiments](https://docs.microsoft.com/azure/machine-learning/service/how-to-configure-auto-train) to learn how more about the the settings and features available for automated machine learning experiments. See [Configure automated machine learning experiments](https://docs.microsoft.com/azure/machine-learning/service/how-to-configure-auto-train) to learn how more about the the settings and features available for automated machine learning experiments.
@@ -205,10 +205,18 @@ The main code of the file must be indented so that it is under this condition.
<a name="troubleshooting"></a> <a name="troubleshooting"></a>
# Troubleshooting # Troubleshooting
## automl_setup fails ## automl_setup fails
1. On windows, make sure that you are running automl_setup from an Anconda Prompt window rather than a regular cmd window. You can launch the "Anaconda Prompt" window by hitting the Start button and typing "Anaconda Prompt". If you don't see the application "Anaconda Prompt", you might not have conda or mini conda installed. In that case, you can install it [here](https://conda.io/miniconda.html) 1. On Windows, make sure that you are running automl_setup from an Anconda Prompt window rather than a regular cmd window. You can launch the "Anaconda Prompt" window by hitting the Start button and typing "Anaconda Prompt". If you don't see the application "Anaconda Prompt", you might not have conda or mini conda installed. In that case, you can install it [here](https://conda.io/miniconda.html)
2. Check that you have conda 64-bit installed rather than 32-bit. You can check this with the command `conda info`. The `platform` should be `win-64` for Windows or `osx-64` for Mac. 2. Check that you have conda 64-bit installed rather than 32-bit. You can check this with the command `conda info`. The `platform` should be `win-64` for Windows or `osx-64` for Mac.
3. Check that you have conda 4.4.10 or later. You can check the version with the command `conda -V`. If you have a previous version installed, you can update it using the command: `conda update conda`. 3. Check that you have conda 4.7.8 or later. You can check the version with the command `conda -V`. If you have a previous version installed, you can update it using the command: `conda update conda`.
4. Pass a new name as the first parameter to automl_setup so that it creates a new conda environment. You can view existing conda environments using `conda env list` and remove them with `conda env remove -n <environmentname>`. 4. On Linux, if the error is `gcc: error trying to exec 'cc1plus': execvp: No such file or directory`, install build essentials using the command `sudo apt-get install build-essential`.
5. Pass a new name as the first parameter to automl_setup so that it creates a new conda environment. You can view existing conda environments using `conda env list` and remove them with `conda env remove -n <environmentname>`.
## automl_setup_linux.sh fails
If automl_setup_linux.sh fails on Ubuntu Linux with the error: `unable to execute 'gcc': No such file or directory`
1. Make sure that outbound ports 53 and 80 are enabled. On an Azure VM, you can do this from the Azure Portal by selecting the VM and clicking on Networking.
2. Run the command: `sudo apt-get update`
3. Run the command: `sudo apt-get install build-essential --fix-missing`
4. Run `automl_setup_linux.sh` again.
## configuration.ipynb fails ## configuration.ipynb fails
1) For local conda, make sure that you have susccessfully run automl_setup first. 1) For local conda, make sure that you have susccessfully run automl_setup first.
@@ -217,6 +225,17 @@ The main code of the file must be indented so that it is under this condition.
4) Check that the region is one of the supported regions: `eastus2`, `eastus`, `westcentralus`, `southeastasia`, `westeurope`, `australiaeast`, `westus2`, `southcentralus` 4) Check that the region is one of the supported regions: `eastus2`, `eastus`, `westcentralus`, `southeastasia`, `westeurope`, `australiaeast`, `westus2`, `southcentralus`
5) Check that you have access to the region using the Azure Portal. 5) Check that you have access to the region using the Azure Portal.
## import AutoMLConfig fails after upgrade from before 1.0.76 to 1.0.76 or later
There were package changes in automated machine learning version 1.0.76, which require the previous version to be uninstalled before upgrading to the new version.
If you have manually upgraded from a version of automated machine learning before 1.0.76 to 1.0.76 or later, you may get the error:
`ImportError: cannot import name 'AutoMLConfig'`
This can be resolved by running:
`pip uninstall azureml-train-automl` and then
`pip install azureml-train-automl`
The automl_setup.cmd script does this automatically.
## workspace.from_config fails ## workspace.from_config fails
If the call `ws = Workspace.from_config()` fails: If the call `ws = Workspace.from_config()` fails:
1) Make sure that you have run the `configuration.ipynb` notebook successfully. 1) Make sure that you have run the `configuration.ipynb` notebook successfully.
@@ -232,13 +251,29 @@ If a sample notebook fails with an error that property, method or library does n
## Numpy import fails on Windows ## Numpy import fails on Windows
Some Windows environments see an error loading numpy with the latest Python version 3.6.8. If you see this issue, try with Python version 3.6.7. Some Windows environments see an error loading numpy with the latest Python version 3.6.8. If you see this issue, try with Python version 3.6.7.
## Numpy import fails
Check the tensorflow version in the automated ml conda environment. Supported versions are < 1.13. Uninstall tensorflow from the environment if version is >= 1.13
You may check the version of tensorflow and uninstall as follows
1) start a command shell, activate conda environment where automated ml packages are installed
2) enter `pip freeze` and look for `tensorflow` , if found, the version listed should be < 1.13
3) If the listed version is a not a supported version, `pip uninstall tensorflow` in the command shell and enter y for confirmation.
## KeyError: 'brand' when running AutoML on local compute or Azure Databricks cluster**
If a new environment was created after 10 June 2020 using SDK 1.7.0 or lower, training may fail with the above error due to an update in the py-cpuinfo package. (Environments created on or before 10 June 2020 are unaffected, as well as experiments run on remote compute as cached training images are used.) To work around this issue, either of the two following steps can be taken:
1) Update the SDK version to 1.8.0 or higher (this will also downgrade py-cpuinfo to 5.0.0):
`pip install --upgrade azureml-sdk[automl]`
2) Downgrade the installed version of py-cpuinfo to 5.0.0:
`pip install py-cpuinfo==5.0.0`
## Remote run: DsvmCompute.create fails ## Remote run: DsvmCompute.create fails
There are several reasons why the DsvmCompute.create can fail. The reason is usually in the error message but you have to look at the end of the error message for the detailed reason. Some common reasons are: There are several reasons why the DsvmCompute.create can fail. The reason is usually in the error message but you have to look at the end of the error message for the detailed reason. Some common reasons are:
1) `Compute name is invalid, it should start with a letter, be between 2 and 16 character, and only include letters (a-zA-Z), numbers (0-9) and \'-\'.` Note that underscore is not allowed in the name. 1) `Compute name is invalid, it should start with a letter, be between 2 and 16 character, and only include letters (a-zA-Z), numbers (0-9) and \'-\'.` Note that underscore is not allowed in the name.
2) `The requested VM size xxxxx is not available in the current region.` You can select a different region or vm_size. 2) `The requested VM size xxxxx is not available in the current region.` You can select a different region or vm_size.
## Remote run: Unable to establish SSH connection ## Remote run: Unable to establish SSH connection
AutoML uses the SSH protocol to communicate with remote DSVMs. This defaults to port 22. Possible causes for this error are: Automated ML uses the SSH protocol to communicate with remote DSVMs. This defaults to port 22. Possible causes for this error are:
1) The DSVM is not ready for SSH connections. When DSVM creation completes, the DSVM might still not be ready to acceept SSH connections. The sample notebooks have a one minute delay to allow for this. 1) The DSVM is not ready for SSH connections. When DSVM creation completes, the DSVM might still not be ready to acceept SSH connections. The sample notebooks have a one minute delay to allow for this.
2) Your Azure Subscription may restrict the IP address ranges that can access the DSVM on port 22. You can check this in the Azure Portal by selecting the Virtual Machine and then clicking Networking. The Virtual Machine name is the name that you provided in the notebook plus 10 alpha numeric characters to make the name unique. The Inbound Port Rules define what can access the VM on specific ports. Note that there is a priority priority order. So, a Deny entry with a low priority number will override a Allow entry with a higher priority number. 2) Your Azure Subscription may restrict the IP address ranges that can access the DSVM on port 22. You can check this in the Azure Portal by selecting the Virtual Machine and then clicking Networking. The Virtual Machine name is the name that you provided in the notebook plus 10 alpha numeric characters to make the name unique. The Inbound Port Rules define what can access the VM on specific ports. Note that there is a priority priority order. So, a Deny entry with a low priority number will override a Allow entry with a higher priority number.
@@ -249,13 +284,13 @@ This is often an issue with the `get_data` method.
3) You can get to the error log for the setup iteration by clicking the `Click here to see the run in Azure portal` link, click `Back to Experiment`, click on the highest run number and then click on Logs. 3) You can get to the error log for the setup iteration by clicking the `Click here to see the run in Azure portal` link, click `Back to Experiment`, click on the highest run number and then click on Logs.
## Remote run: disk full ## Remote run: disk full
AutoML creates files under /tmp/azureml_runs for each iteration that it runs. It creates a folder with the iteration id. For example: AutoML_9a038a18-77cc-48f1-80fb-65abdbc33abe_93. Under this, there is a azureml-logs folder, which contains logs. If you run too many iterations on the same DSVM, these files can fill the disk. Automated ML creates files under /tmp/azureml_runs for each iteration that it runs. It creates a folder with the iteration id. For example: AutoML_9a038a18-77cc-48f1-80fb-65abdbc33abe_93. Under this, there is a azureml-logs folder, which contains logs. If you run too many iterations on the same DSVM, these files can fill the disk.
You can delete the files under /tmp/azureml_runs or just delete the VM and create a new one. You can delete the files under /tmp/azureml_runs or just delete the VM and create a new one.
If your get_data downloads files, make sure the delete them or they can use disk space as well. If your get_data downloads files, make sure the delete them or they can use disk space as well.
When using DataStore, it is good to specify an absolute path for the files so that they are downloaded just once. If you specify a relative path, it will download a file for each iteration. When using DataStore, it is good to specify an absolute path for the files so that they are downloaded just once. If you specify a relative path, it will download a file for each iteration.
## Remote run: Iterations fail and the log contains "MemoryError" ## Remote run: Iterations fail and the log contains "MemoryError"
This can be caused by insufficient memory on the DSVM. AutoML loads all training data into memory. So, the available memory should be more than the training data size. This can be caused by insufficient memory on the DSVM. Automated ML loads all training data into memory. So, the available memory should be more than the training data size.
If you are using a remote DSVM, memory is needed for each concurrent iteration. The max_concurrent_iterations setting specifies the maximum concurrent iterations. For example, if the training data size is 8Gb and max_concurrent_iterations is set to 10, the minimum memory required is at least 80Gb. If you are using a remote DSVM, memory is needed for each concurrent iteration. The max_concurrent_iterations setting specifies the maximum concurrent iterations. For example, if the training data size is 8Gb and max_concurrent_iterations is set to 10, the minimum memory required is at least 80Gb.
To resolve this issue, allocate a DSVM with more memory or reduce the value specified for max_concurrent_iterations. To resolve this issue, allocate a DSVM with more memory or reduce the value specified for max_concurrent_iterations.

View File

@@ -1,22 +1,30 @@
name: azure_automl name: azure_automl
channels:
- conda-forge
- pytorch
- main
dependencies: dependencies:
# The python interpreter version. # The python interpreter version.
# Currently Azure ML only supports 3.5.2 and later. # Currently Azure ML only supports 3.6.0 and later.
- python>=3.5.2,<3.6.8 - pip==20.2.4
- nb_conda - python>=3.6,<3.9
- matplotlib==2.1.0 - matplotlib==3.2.1
- numpy>=1.11.0,<1.15.0 - py-xgboost==1.3.3
- cython - pytorch::pytorch=1.4.0
- urllib3<1.24 - conda-forge::fbprophet==0.7.1
- scipy>=1.0.0,<=1.1.0 - cudatoolkit=10.1.243
- scikit-learn>=0.18.0,<=0.19.1 - tqdm==4.63.1
- pandas>=0.22.0,<0.23.0 - notebook
- tensorflow>=1.12.0 - pywin32==225
- py-xgboost<=0.80 - PySocks==1.7.1
- conda-forge::pyqt==5.12.3
- pip: - pip:
# Required packages for AzureML execution, history, and data preparation. # Required packages for AzureML execution, history, and data preparation.
- azureml-sdk[automl,explain] - azureml-widgets~=1.40.0
- azureml-widgets - pytorch-transformers==1.0.0
- pandas_ml - spacy==2.2.4
- pystan==2.19.1.1
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.40.0/validated_win32_requirements.txt [--no-deps]
- arch==4.14

View File

@@ -0,0 +1,33 @@
name: azure_automl
channels:
- conda-forge
- pytorch
- main
dependencies:
# The python interpreter version.
# Currently Azure ML only supports 3.6.0 and later.
- pip==20.2.4
- python>=3.6,<3.9
- boto3==1.20.19
- botocore<=1.23.19
- matplotlib==3.2.1
- numpy==1.19.5
- cython==0.29.14
- urllib3==1.26.7
- scipy>=1.4.1,<=1.5.2
- scikit-learn==0.22.1
- py-xgboost<=1.3.3
- holidays==0.10.3
- conda-forge::fbprophet==0.7.1
- pytorch::pytorch=1.4.0
- cudatoolkit=10.1.243
- pip:
# Required packages for AzureML execution, history, and data preparation.
- azureml-widgets~=1.40.0
- pytorch-transformers==1.0.0
- spacy==2.2.4
- pystan==2.19.1.1
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.40.0/validated_linux_requirements.txt [--no-deps]
- arch==4.14

View File

@@ -1,23 +1,34 @@
name: azure_automl name: azure_automl
channels:
- conda-forge
- pytorch
- main
dependencies: dependencies:
# The python interpreter version. # The python interpreter version.
# Currently Azure ML only supports 3.5.2 and later. # Currently Azure ML only supports 3.6.0 and later.
- python>=3.5.2,<3.6.8 - pip==20.2.4
- nb_conda - nomkl
- matplotlib==2.1.0 - python>=3.6,<3.9
- numpy>=1.15.3 - boto3==1.20.19
- cython - botocore<=1.23.19
- urllib3<1.24 - matplotlib==3.2.1
- scipy>=1.0.0,<=1.1.0 - numpy==1.19.5
- scikit-learn>=0.18.0,<=0.19.1 - cython==0.29.14
- pandas>=0.22.0,<0.23.0 - urllib3==1.26.7
- tensorflow>=1.12.0 - scipy>=1.4.1,<=1.5.2
- py-xgboost<=0.80 - scikit-learn==0.22.1
- py-xgboost<=1.3.3
- holidays==0.10.3
- conda-forge::fbprophet==0.7.1
- pytorch::pytorch=1.4.0
- cudatoolkit=9.0
- pip: - pip:
# Required packages for AzureML execution, history, and data preparation. # Required packages for AzureML execution, history, and data preparation.
- azureml-sdk[automl,explain] - azureml-widgets~=1.40.0
- azureml-widgets - pytorch-transformers==1.0.0
- pandas_ml - spacy==2.2.4
- pystan==2.19.1.1
- https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz
- -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.40.0/validated_darwin_requirements.txt [--no-deps]
- arch==4.14

View File

@@ -6,14 +6,28 @@ set PIP_NO_WARN_SCRIPT_LOCATION=0
IF "%conda_env_name%"=="" SET conda_env_name="azure_automl" IF "%conda_env_name%"=="" SET conda_env_name="azure_automl"
IF "%automl_env_file%"=="" SET automl_env_file="automl_env.yml" IF "%automl_env_file%"=="" SET automl_env_file="automl_env.yml"
SET check_conda_version_script="check_conda_version.py"
IF NOT EXIST %automl_env_file% GOTO YmlMissing IF NOT EXIST %automl_env_file% GOTO YmlMissing
IF "%CONDA_EXE%"=="" GOTO CondaMissing
IF NOT EXIST %check_conda_version_script% GOTO VersionCheckMissing
python "%check_conda_version_script%"
IF errorlevel 1 GOTO ErrorExit:
SET replace_version_script="replace_latest_version.ps1"
IF EXIST %replace_version_script% (
powershell -file %replace_version_script% %automl_env_file%
)
call conda activate %conda_env_name% 2>nul: call conda activate %conda_env_name% 2>nul:
if not errorlevel 1 ( if not errorlevel 1 (
echo Upgrading azureml-sdk[automl,notebooks,explain] in existing conda environment %conda_env_name% echo Upgrading existing conda environment %conda_env_name%
call pip install --upgrade azureml-sdk[automl,notebooks,explain] call pip uninstall azureml-train-automl -y -q
call conda env update --name %conda_env_name% --file %automl_env_file%
if errorlevel 1 goto ErrorExit if errorlevel 1 goto ErrorExit
) else ( ) else (
call conda env create -f %automl_env_file% -n %conda_env_name% call conda env create -f %automl_env_file% -n %conda_env_name%
@@ -42,6 +56,19 @@ IF NOT "%options%"=="nolaunch" (
goto End goto End
:CondaMissing
echo Please run this script from an Anaconda Prompt window.
echo You can start an Anaconda Prompt window by
echo typing Anaconda Prompt on the Start menu.
echo If you don't see the Anaconda Prompt app, install Miniconda.
echo If you are running an older version of Miniconda or Anaconda,
echo you can upgrade using the command: conda update conda
goto End
:VersionCheckMissing
echo File %check_conda_version_script% not found.
goto End
:YmlMissing :YmlMissing
echo File %automl_env_file% not found. echo File %automl_env_file% not found.

View File

@@ -4,6 +4,7 @@ CONDA_ENV_NAME=$1
AUTOML_ENV_FILE=$2 AUTOML_ENV_FILE=$2
OPTIONS=$3 OPTIONS=$3
PIP_NO_WARN_SCRIPT_LOCATION=0 PIP_NO_WARN_SCRIPT_LOCATION=0
CHECK_CONDA_VERSION_SCRIPT="check_conda_version.py"
if [ "$CONDA_ENV_NAME" == "" ] if [ "$CONDA_ENV_NAME" == "" ]
then then
@@ -12,7 +13,7 @@ fi
if [ "$AUTOML_ENV_FILE" == "" ] if [ "$AUTOML_ENV_FILE" == "" ]
then then
AUTOML_ENV_FILE="automl_env.yml" AUTOML_ENV_FILE="automl_env_linux.yml"
fi fi
if [ ! -f $AUTOML_ENV_FILE ]; then if [ ! -f $AUTOML_ENV_FILE ]; then
@@ -20,10 +21,23 @@ if [ ! -f $AUTOML_ENV_FILE ]; then
exit 1 exit 1
fi fi
if [ ! -f $CHECK_CONDA_VERSION_SCRIPT ]; then
echo "File $CHECK_CONDA_VERSION_SCRIPT not found"
exit 1
fi
python "$CHECK_CONDA_VERSION_SCRIPT"
if [ $? -ne 0 ]; then
exit 1
fi
sed -i 's/AZUREML-SDK-VERSION/latest/' $AUTOML_ENV_FILE
if source activate $CONDA_ENV_NAME 2> /dev/null if source activate $CONDA_ENV_NAME 2> /dev/null
then then
echo "Upgrading azureml-sdk[automl,notebooks,explain] in existing conda environment" $CONDA_ENV_NAME echo "Upgrading existing conda environment" $CONDA_ENV_NAME
pip install --upgrade azureml-sdk[automl,notebooks,explain] && pip uninstall azureml-train-automl -y -q
conda env update --name $CONDA_ENV_NAME --file $AUTOML_ENV_FILE &&
jupyter nbextension uninstall --user --py azureml.widgets jupyter nbextension uninstall --user --py azureml.widgets
else else
conda env create -f $AUTOML_ENV_FILE -n $CONDA_ENV_NAME && conda env create -f $AUTOML_ENV_FILE -n $CONDA_ENV_NAME &&

View File

@@ -4,6 +4,7 @@ CONDA_ENV_NAME=$1
AUTOML_ENV_FILE=$2 AUTOML_ENV_FILE=$2
OPTIONS=$3 OPTIONS=$3
PIP_NO_WARN_SCRIPT_LOCATION=0 PIP_NO_WARN_SCRIPT_LOCATION=0
CHECK_CONDA_VERSION_SCRIPT="check_conda_version.py"
if [ "$CONDA_ENV_NAME" == "" ] if [ "$CONDA_ENV_NAME" == "" ]
then then
@@ -20,10 +21,24 @@ if [ ! -f $AUTOML_ENV_FILE ]; then
exit 1 exit 1
fi fi
if [ ! -f $CHECK_CONDA_VERSION_SCRIPT ]; then
echo "File $CHECK_CONDA_VERSION_SCRIPT not found"
exit 1
fi
python "$CHECK_CONDA_VERSION_SCRIPT"
if [ $? -ne 0 ]; then
exit 1
fi
sed -i '' 's/AZUREML-SDK-VERSION/latest/' $AUTOML_ENV_FILE
brew install libomp
if source activate $CONDA_ENV_NAME 2> /dev/null if source activate $CONDA_ENV_NAME 2> /dev/null
then then
echo "Upgrading azureml-sdk[automl,notebooks,explain] in existing conda environment" $CONDA_ENV_NAME echo "Upgrading existing conda environment" $CONDA_ENV_NAME
pip install --upgrade azureml-sdk[automl,notebooks,explain] && pip uninstall azureml-train-automl -y -q
conda env update --name $CONDA_ENV_NAME --file $AUTOML_ENV_FILE &&
jupyter nbextension uninstall --user --py azureml.widgets jupyter nbextension uninstall --user --py azureml.widgets
else else
conda env create -f $AUTOML_ENV_FILE -n $CONDA_ENV_NAME && conda env create -f $AUTOML_ENV_FILE -n $CONDA_ENV_NAME &&
@@ -31,7 +46,6 @@ else
conda install lightgbm -c conda-forge -y && conda install lightgbm -c conda-forge -y &&
python -m ipykernel install --user --name $CONDA_ENV_NAME --display-name "Python ($CONDA_ENV_NAME)" && python -m ipykernel install --user --name $CONDA_ENV_NAME --display-name "Python ($CONDA_ENV_NAME)" &&
jupyter nbextension uninstall --user --py azureml.widgets && jupyter nbextension uninstall --user --py azureml.widgets &&
pip install numpy==1.15.3 &&
echo "" && echo "" &&
echo "" && echo "" &&
echo "***************************************" && echo "***************************************" &&

View File

@@ -0,0 +1,26 @@
from distutils.version import LooseVersion
import platform
try:
import conda
except Exception:
print('Failed to import conda.')
print('This setup is usually run from the base conda environment.')
print('You can activate the base environment using the command "conda activate base"')
exit(1)
architecture = platform.architecture()[0]
if architecture != "64bit":
print('This setup requires 64bit Anaconda or Miniconda. Found: ' + architecture)
exit(1)
minimumVersion = "4.7.8"
versionInvalid = (LooseVersion(conda.__version__) < LooseVersion(minimumVersion))
if versionInvalid:
print('Setup requires conda version ' + minimumVersion + ' or higher.')
print('You can use the command "conda update conda" to upgrade conda.')
exit(versionInvalid)

View File

@@ -0,0 +1,4 @@
name: auto-ml-classification-bank-marketing-all-features
dependencies:
- pip:
- azureml-sdk

View File

@@ -0,0 +1,487 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Automated Machine Learning\n",
"_**Classification of credit card fraudulent transactions on remote compute **_\n",
"\n",
"## Contents\n",
"1. [Introduction](#Introduction)\n",
"1. [Setup](#Setup)\n",
"1. [Train](#Train)\n",
"1. [Results](#Results)\n",
"1. [Test](#Test)\n",
"1. [Acknowledgements](#Acknowledgements)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Introduction\n",
"\n",
"In this example we use the associated credit card dataset to showcase how you can use AutoML for a simple classification problem. The goal is to predict if a credit card transaction is considered a fraudulent charge.\n",
"\n",
"This notebook is using remote compute to train the model.\n",
"\n",
"If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration](../../../configuration.ipynb) notebook first if you haven't already to establish your connection to the AzureML Workspace. \n",
"\n",
"In this notebook you will learn how to:\n",
"1. Create an experiment using an existing workspace.\n",
"2. Configure AutoML using `AutoMLConfig`.\n",
"3. Train the model using remote compute.\n",
"4. Explore the results.\n",
"5. Test the fitted model."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setup\n",
"\n",
"As part of the setup you have already created an Azure ML `Workspace` object. For Automated ML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import logging\n",
"\n",
"from matplotlib import pyplot as plt\n",
"import pandas as pd\n",
"import os\n",
"\n",
"import azureml.core\n",
"from azureml.core.experiment import Experiment\n",
"from azureml.core.workspace import Workspace\n",
"from azureml.core.dataset import Dataset\n",
"from azureml.train.automl import AutoMLConfig"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ws = Workspace.from_config()\n",
"\n",
"# choose a name for experiment\n",
"experiment_name = \"automl-classification-ccard-remote\"\n",
"\n",
"experiment = Experiment(ws, experiment_name)\n",
"\n",
"output = {}\n",
"output[\"Subscription ID\"] = ws.subscription_id\n",
"output[\"Workspace\"] = ws.name\n",
"output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n",
"output[\"Experiment Name\"] = experiment.name\n",
"pd.set_option(\"display.max_colwidth\", None)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Create or Attach existing AmlCompute\n",
"A compute target is required to execute the Automated ML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
"\n",
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
"\n",
"#### Creation of AmlCompute takes approximately 5 minutes. \n",
"If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
"from azureml.core.compute_target import ComputeTargetException\n",
"\n",
"# Choose a name for your CPU cluster\n",
"cpu_cluster_name = \"cpu-cluster-1\"\n",
"\n",
"# Verify that cluster does not exist already\n",
"try:\n",
" compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
" print(\"Found existing cluster, use it.\")\n",
"except ComputeTargetException:\n",
" compute_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"STANDARD_DS12_V2\", max_nodes=6\n",
" )\n",
" compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
"compute_target.wait_for_completion(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Data"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Load Data\n",
"\n",
"Load the credit card dataset from a csv file containing both training features and labels. The features are inputs to the model, while the training labels represent the expected output of the model. Next, we'll split the data using random_split and extract the training data for the model."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"name": "load-data"
},
"outputs": [],
"source": [
"data = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/creditcard.csv\"\n",
"dataset = Dataset.Tabular.from_delimited_files(data)\n",
"training_data, validation_data = dataset.random_split(percentage=0.8, seed=223)\n",
"label_column_name = \"Class\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Train\n",
"\n",
"Instantiate a AutoMLConfig object. This defines the settings and data used to run the experiment.\n",
"\n",
"|Property|Description|\n",
"|-|-|\n",
"|**task**|classification or regression|\n",
"|**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: <br><i>accuracy</i><br><i>AUC_weighted</i><br><i>average_precision_score_weighted</i><br><i>norm_macro_recall</i><br><i>precision_score_weighted</i>|\n",
"|**enable_early_stopping**|Stop the run if the metric score is not showing improvement.|\n",
"|**n_cross_validations**|Number of cross validation splits.|\n",
"|**training_data**|Input dataset, containing both features and label column.|\n",
"|**label_column_name**|The name of the label column.|\n",
"\n",
"**_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"name": "automl-config"
},
"outputs": [],
"source": [
"automl_settings = {\n",
" \"n_cross_validations\": 3,\n",
" \"primary_metric\": \"average_precision_score_weighted\",\n",
" \"enable_early_stopping\": True,\n",
" \"max_concurrent_iterations\": 2, # This is a limit for testing purpose, please increase it as per cluster size\n",
" \"experiment_timeout_hours\": 0.25, # This is a time limit for testing purposes, remove it for real use cases, this will drastically limit ablity to find the best model possible\n",
" \"verbosity\": logging.INFO,\n",
"}\n",
"\n",
"automl_config = AutoMLConfig(\n",
" task=\"classification\",\n",
" debug_log=\"automl_errors.log\",\n",
" compute_target=compute_target,\n",
" training_data=training_data,\n",
" label_column_name=label_column_name,\n",
" **automl_settings,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Call the `submit` method on the experiment object and pass the run configuration. Depending on the data and the number of iterations this can run for a while. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"remote_run = experiment.submit(automl_config, show_output=False)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# If you need to retrieve a run that already started, use the following code\n",
"# from azureml.train.automl.run import AutoMLRun\n",
"# remote_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Results"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Widget for Monitoring Runs\n",
"\n",
"The widget will first report a \"loading\" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete.\n",
"\n",
"**Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": [
"widget-rundetails-sample"
]
},
"outputs": [],
"source": [
"from azureml.widgets import RunDetails\n",
"\n",
"RunDetails(remote_run).show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"remote_run.wait_for_completion(show_output=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Explain model\n",
"\n",
"Automated ML models can be explained and visualized using the SDK Explainability library. "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Analyze results\n",
"\n",
"### Retrieve the Best Model\n",
"\n",
"Below we select the best pipeline from our iterations. The `get_output` method returns the best run and the fitted model. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"best_run, fitted_model = remote_run.get_output()\n",
"fitted_model"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Print the properties of the model\n",
"The fitted_model is a python object and you can read the different properties of the object.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Test the fitted model\n",
"\n",
"Now that the model is trained, split the data in the same way the data was split for training (The difference here is the data is being split locally) and then run the test data through the trained model to get the predicted values."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# convert the test data to dataframe\n",
"X_test_df = validation_data.drop_columns(\n",
" columns=[label_column_name]\n",
").to_pandas_dataframe()\n",
"y_test_df = validation_data.keep_columns(\n",
" columns=[label_column_name], validate=True\n",
").to_pandas_dataframe()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# call the predict functions on the model\n",
"y_pred = fitted_model.predict(X_test_df)\n",
"y_pred"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Calculate metrics for the prediction\n",
"\n",
"Now visualize the data on a scatter plot to show what our truth (actual) values are compared to the predicted values \n",
"from the trained model that was returned."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from sklearn.metrics import confusion_matrix\n",
"import numpy as np\n",
"import itertools\n",
"\n",
"cf = confusion_matrix(y_test_df.values, y_pred)\n",
"plt.imshow(cf, cmap=plt.cm.Blues, interpolation=\"nearest\")\n",
"plt.colorbar()\n",
"plt.title(\"Confusion Matrix\")\n",
"plt.xlabel(\"Predicted\")\n",
"plt.ylabel(\"Actual\")\n",
"class_labels = [\"False\", \"True\"]\n",
"tick_marks = np.arange(len(class_labels))\n",
"plt.xticks(tick_marks, class_labels)\n",
"plt.yticks([-0.5, 0, 1, 1.5], [\"\", \"False\", \"True\", \"\"])\n",
"# plotting text value inside cells\n",
"thresh = cf.max() / 2.0\n",
"for i, j in itertools.product(range(cf.shape[0]), range(cf.shape[1])):\n",
" plt.text(\n",
" j,\n",
" i,\n",
" format(cf[i, j], \"d\"),\n",
" horizontalalignment=\"center\",\n",
" color=\"white\" if cf[i, j] > thresh else \"black\",\n",
" )\n",
"plt.show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Acknowledgements"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This Credit Card fraud Detection dataset is made available under the Open Database License: http://opendatacommons.org/licenses/odbl/1.0/. Any rights in individual contents of the database are licensed under the Database Contents License: http://opendatacommons.org/licenses/dbcl/1.0/ and is available at: https://www.kaggle.com/mlg-ulb/creditcardfraud\n",
"\n",
"The dataset has been collected and analysed during a research collaboration of Worldline and the Machine Learning Group (http://mlg.ulb.ac.be) of ULB (Universit\u00c3\u00a9 Libre de Bruxelles) on big data mining and fraud detection.\n",
"More details on current and past projects on related topics are available on https://www.researchgate.net/project/Fraud-detection-5 and the page of the DefeatFraud project\n",
"\n",
"Please cite the following works:\n",
"\n",
"Andrea Dal Pozzolo, Olivier Caelen, Reid A. Johnson and Gianluca Bontempi. Calibrating Probability with Undersampling for Unbalanced Classification. In Symposium on Computational Intelligence and Data Mining (CIDM), IEEE, 2015\n",
"\n",
"Dal Pozzolo, Andrea; Caelen, Olivier; Le Borgne, Yann-Ael; Waterschoot, Serge; Bontempi, Gianluca. Learned lessons in credit card fraud detection from a practitioner perspective, Expert systems with applications,41,10,4915-4928,2014, Pergamon\n",
"\n",
"Dal Pozzolo, Andrea; Boracchi, Giacomo; Caelen, Olivier; Alippi, Cesare; Bontempi, Gianluca. Credit card fraud detection: a realistic modeling and a novel learning strategy, IEEE transactions on neural networks and learning systems,29,8,3784-3797,2018,IEEE\n",
"\n",
"Dal Pozzolo, Andrea Adaptive Machine learning for credit card fraud detection ULB MLG PhD thesis (supervised by G. Bontempi)\n",
"\n",
"Carcillo, Fabrizio; Dal Pozzolo, Andrea; Le Borgne, Yann-A\u00c3\u00abl; Caelen, Olivier; Mazzer, Yannis; Bontempi, Gianluca. Scarff: a scalable framework for streaming credit card fraud detection with Spark, Information fusion,41, 182-194,2018,Elsevier\n",
"\n",
"Carcillo, Fabrizio; Le Borgne, Yann-A\u00c3\u00abl; Caelen, Olivier; Bontempi, Gianluca. Streaming active learning strategies for real-life credit card fraud detection: assessment and visualization, International Journal of Data Science and Analytics, 5,4,285-300,2018,Springer International Publishing\n",
"\n",
"Bertrand Lebichot, Yann-A\u00c3\u00abl Le Borgne, Liyun He, Frederic Obl\u00c3\u00a9, Gianluca Bontempi Deep-Learning Domain Adaptation Techniques for Credit Cards Fraud Detection, INNSBDDL 2019: Recent Advances in Big Data and Deep Learning, pp 78-88, 2019\n",
"\n",
"Fabrizio Carcillo, Yann-A\u00c3\u00abl Le Borgne, Olivier Caelen, Frederic Obl\u00c3\u00a9, Gianluca Bontempi Combining Unsupervised and Supervised Learning in Credit Card Fraud Detection Information Sciences, 2019"
]
}
],
"metadata": {
"authors": [
{
"name": "ratanase"
}
],
"category": "tutorial",
"compute": [
"AML Compute"
],
"datasets": [
"Creditcard"
],
"deployment": [
"None"
],
"exclude_from_index": false,
"file_extension": ".py",
"framework": [
"None"
],
"friendly_name": "Classification of credit card fraudulent transactions using Automated ML",
"index_order": 5,
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.7"
},
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"tags": [
"remote_run",
"AutomatedML"
],
"task": "Classification",
"version": "3.6.7"
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -0,0 +1,4 @@
name: auto-ml-classification-credit-card-fraud
dependencies:
- pip:
- azureml-sdk

View File

@@ -0,0 +1,592 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Automated Machine Learning\n",
"_**Text Classification Using Deep Learning**_\n",
"\n",
"## Contents\n",
"1. [Introduction](#Introduction)\n",
"1. [Setup](#Setup)\n",
"1. [Data](#Data)\n",
"1. [Train](#Train)\n",
"1. [Evaluate](#Evaluate)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Introduction\n",
"This notebook demonstrates classification with text data using deep learning in AutoML.\n",
"\n",
"AutoML highlights here include using deep neural networks (DNNs) to create embedded features from text data. Depending on the compute cluster the user provides, AutoML tried out Bidirectional Encoder Representations from Transformers (BERT) when a GPU compute is used, and Bidirectional Long-Short Term neural network (BiLSTM) when a CPU compute is used, thereby optimizing the choice of DNN for the uesr's setup.\n",
"\n",
"Make sure you have executed the [configuration](../../../configuration.ipynb) before running this notebook.\n",
"\n",
"Notebook synopsis:\n",
"\n",
"1. Creating an Experiment in an existing Workspace\n",
"2. Configuration and remote run of AutoML for a text dataset (20 Newsgroups dataset from scikit-learn) for classification\n",
"3. Registering the best model for future use\n",
"4. Evaluating the final model on a test set"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setup"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"import logging\n",
"import os\n",
"import shutil\n",
"\n",
"import pandas as pd\n",
"\n",
"import azureml.core\n",
"from azureml.core.experiment import Experiment\n",
"from azureml.core.workspace import Workspace\n",
"from azureml.core.dataset import Dataset\n",
"from azureml.core.compute import AmlCompute\n",
"from azureml.core.compute import ComputeTarget\n",
"from azureml.core.run import Run\n",
"from azureml.widgets import RunDetails\n",
"from azureml.core.model import Model\n",
"from helper import run_inference, get_result_df\n",
"from azureml.train.automl import AutoMLConfig\n",
"from sklearn.datasets import fetch_20newsgroups"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"As part of the setup you have already created a <b>Workspace</b>. To run AutoML, you also need to create an <b>Experiment</b>. An Experiment corresponds to a prediction problem you are trying to solve, while a Run corresponds to a specific approach to the problem."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ws = Workspace.from_config()\n",
"\n",
"# Choose an experiment name.\n",
"experiment_name = \"automl-classification-text-dnn\"\n",
"\n",
"experiment = Experiment(ws, experiment_name)\n",
"\n",
"output = {}\n",
"output[\"Subscription ID\"] = ws.subscription_id\n",
"output[\"Workspace Name\"] = ws.name\n",
"output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n",
"output[\"Experiment Name\"] = experiment.name\n",
"pd.set_option(\"display.max_colwidth\", None)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Set up a compute cluster\n",
"This section uses a user-provided compute cluster (named \"dnntext-cluster\" in this example). If a cluster with this name does not exist in the user's workspace, the below code will create a new cluster. You can choose the parameters of the cluster as mentioned in the comments.\n",
"\n",
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
"\n",
"Whether you provide/select a CPU or GPU cluster, AutoML will choose the appropriate DNN for that setup - BiLSTM or BERT text featurizer will be included in the candidate featurizers on CPU and GPU respectively. If your goal is to obtain the most accurate model, we recommend you use GPU clusters since BERT featurizers usually outperform BiLSTM featurizers."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
"from azureml.core.compute_target import ComputeTargetException\n",
"\n",
"num_nodes = 2\n",
"\n",
"# Choose a name for your cluster.\n",
"amlcompute_cluster_name = \"dnntext-cluster\"\n",
"\n",
"# Verify that cluster does not exist already\n",
"try:\n",
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
" print(\"Found existing cluster, use it.\")\n",
"except ComputeTargetException:\n",
" compute_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"STANDARD_NC6\", # CPU for BiLSTM, such as \"STANDARD_D2_V2\"\n",
" # To use BERT (this is recommended for best performance), select a GPU such as \"STANDARD_NC6\"\n",
" # or similar GPU option\n",
" # available in your workspace\n",
" idle_seconds_before_scaledown=60,\n",
" max_nodes=num_nodes,\n",
" )\n",
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
"\n",
"compute_target.wait_for_completion(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Get data\n",
"For this notebook we will use 20 Newsgroups data from scikit-learn. We filter the data to contain four classes and take a sample as training data. Please note that for accuracy improvement, more data is needed. For this notebook we provide a small-data example so that you can use this template to use with your larger sized data."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"data_dir = \"text-dnn-data\" # Local directory to store data\n",
"blobstore_datadir = data_dir # Blob store directory to store data in\n",
"target_column_name = \"y\"\n",
"feature_column_name = \"X\"\n",
"\n",
"\n",
"def get_20newsgroups_data():\n",
" \"\"\"Fetches 20 Newsgroups data from scikit-learn\n",
" Returns them in form of pandas dataframes\n",
" \"\"\"\n",
" remove = (\"headers\", \"footers\", \"quotes\")\n",
" categories = [\n",
" \"rec.sport.baseball\",\n",
" \"rec.sport.hockey\",\n",
" \"comp.graphics\",\n",
" \"sci.space\",\n",
" ]\n",
"\n",
" data = fetch_20newsgroups(\n",
" subset=\"train\",\n",
" categories=categories,\n",
" shuffle=True,\n",
" random_state=42,\n",
" remove=remove,\n",
" )\n",
" data = pd.DataFrame(\n",
" {feature_column_name: data.data, target_column_name: data.target}\n",
" )\n",
"\n",
" data_train = data[:200]\n",
" data_test = data[200:300]\n",
"\n",
" data_train = remove_blanks_20news(\n",
" data_train, feature_column_name, target_column_name\n",
" )\n",
" data_test = remove_blanks_20news(data_test, feature_column_name, target_column_name)\n",
"\n",
" return data_train, data_test\n",
"\n",
"\n",
"def remove_blanks_20news(data, feature_column_name, target_column_name):\n",
"\n",
" data[feature_column_name] = (\n",
" data[feature_column_name]\n",
" .replace(r\"\\n\", \" \", regex=True)\n",
" .apply(lambda x: x.strip())\n",
" )\n",
" data = data[data[feature_column_name] != \"\"]\n",
"\n",
" return data"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Fetch data and upload to datastore for use in training"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"data_train, data_test = get_20newsgroups_data()\n",
"\n",
"if not os.path.isdir(data_dir):\n",
" os.mkdir(data_dir)\n",
"\n",
"train_data_fname = data_dir + \"/train_data.csv\"\n",
"test_data_fname = data_dir + \"/test_data.csv\"\n",
"\n",
"data_train.to_csv(train_data_fname, index=False)\n",
"data_test.to_csv(test_data_fname, index=False)\n",
"\n",
"datastore = ws.get_default_datastore()\n",
"datastore.upload(src_dir=data_dir, target_path=blobstore_datadir, overwrite=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"train_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, blobstore_datadir + \"/train_data.csv\")]\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Prepare AutoML run"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This notebook uses the blocked_models parameter to exclude some models that can take a longer time to train on some text datasets. You can choose to remove models from the blocked_models list but you may need to increase the experiment_timeout_hours parameter value to get results."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_settings = {\n",
" \"experiment_timeout_minutes\": 30,\n",
" \"primary_metric\": \"accuracy\",\n",
" \"max_concurrent_iterations\": num_nodes,\n",
" \"max_cores_per_iteration\": -1,\n",
" \"enable_dnn\": True,\n",
" \"enable_early_stopping\": True,\n",
" \"validation_size\": 0.3,\n",
" \"verbosity\": logging.INFO,\n",
" \"enable_voting_ensemble\": False,\n",
" \"enable_stack_ensemble\": False,\n",
"}\n",
"\n",
"automl_config = AutoMLConfig(\n",
" task=\"classification\",\n",
" debug_log=\"automl_errors.log\",\n",
" compute_target=compute_target,\n",
" training_data=train_dataset,\n",
" label_column_name=target_column_name,\n",
" blocked_models=[\"LightGBM\", \"XGBoostClassifier\"],\n",
" **automl_settings,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Submit AutoML Run"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_run = experiment.submit(automl_config, show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Displaying the run objects gives you links to the visual tools in the Azure Portal. Go try them!"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Retrieve the Best Model\n",
"Below we select the best model pipeline from our iterations, use it to test on test data on the same compute cluster."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"For local inferencing, you can load the model locally via. the method `remote_run.get_output()`. For more information on the arguments expected by this method, you can run `remote_run.get_output??`.\n",
"Note that when the model contains BERT, this step will require pytorch and pytorch-transformers installed in your local environment. The exact versions of these packages can be found in the **automl_env.yml** file located in the local copy of your azureml-examples folder here: \"azureml-examples/python-sdk/tutorials/automl-with-azureml\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Retrieve the best Run object\n",
"best_run = automl_run.get_best_child()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You can now see what text transformations are used to convert text data to features for this dataset, including deep learning transformations based on BiLSTM or Transformer (BERT is one implementation of a Transformer) models."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Download the featurization summary JSON file locally\n",
"best_run.download_file(\n",
" \"outputs/featurization_summary.json\", \"featurization_summary.json\"\n",
")\n",
"\n",
"# Render the JSON as a pandas DataFrame\n",
"with open(\"featurization_summary.json\", \"r\") as f:\n",
" records = json.load(f)\n",
"\n",
"featurization_summary = pd.DataFrame.from_records(records)\n",
"featurization_summary[\"Transformations\"].tolist()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Registering the best model\n",
"We now register the best fitted model from the AutoML Run for use in future deployments. "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Get results stats, extract the best model from AutoML run, download and register the resultant best model"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"summary_df = get_result_df(automl_run)\n",
"best_dnn_run_id = summary_df[\"run_id\"].iloc[0]\n",
"best_dnn_run = Run(experiment, best_dnn_run_id)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model_dir = \"Model\" # Local folder where the model will be stored temporarily\n",
"if not os.path.isdir(model_dir):\n",
" os.mkdir(model_dir)\n",
"\n",
"best_dnn_run.download_file(\"outputs/model.pkl\", model_dir + \"/model.pkl\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Register the model in your Azure Machine Learning Workspace. If you previously registered a model, please make sure to delete it so as to replace it with this new model."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Register the model\n",
"model_name = \"textDNN-20News\"\n",
"model = Model.register(\n",
" model_path=model_dir + \"/model.pkl\", model_name=model_name, tags=None, workspace=ws\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Evaluate on Test Data"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We now use the best fitted model from the AutoML Run to make predictions on the test set. \n",
"\n",
"Test set schema should match that of the training set."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"test_dataset = Dataset.Tabular.from_delimited_files(\n",
" path=[(datastore, blobstore_datadir + \"/test_data.csv\")]\n",
")\n",
"\n",
"# preview the first 3 rows of the dataset\n",
"test_dataset.take(3).to_pandas_dataframe()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"test_experiment = Experiment(ws, experiment_name + \"_test\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"script_folder = os.path.join(os.getcwd(), \"inference\")\n",
"os.makedirs(script_folder, exist_ok=True)\n",
"shutil.copy(\"infer.py\", script_folder)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"test_run = run_inference(\n",
" test_experiment,\n",
" compute_target,\n",
" script_folder,\n",
" best_dnn_run,\n",
" test_dataset,\n",
" target_column_name,\n",
" model_name,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Display computed metrics"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"test_run"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"RunDetails(test_run).show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"test_run.wait_for_completion()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"pd.Series(test_run.get_metrics())"
]
}
],
"metadata": {
"authors": [
{
"name": "anshirga"
}
],
"compute": [
"AML Compute"
],
"datasets": [
"None"
],
"deployment": [
"None"
],
"exclude_from_index": false,
"framework": [
"None"
],
"friendly_name": "DNN Text Featurization",
"index_order": 2,
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.7"
},
"tags": [
"None"
],
"task": "Text featurization using DNNs for classification"
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -0,0 +1,4 @@
name: auto-ml-classification-text-dnn
dependencies:
- pip:
- azureml-sdk

View File

@@ -0,0 +1,68 @@
import pandas as pd
from azureml.core import Environment
from azureml.train.estimator import Estimator
from azureml.core.run import Run
def run_inference(
test_experiment,
compute_target,
script_folder,
train_run,
test_dataset,
target_column_name,
model_name,
):
inference_env = train_run.get_environment()
est = Estimator(
source_directory=script_folder,
entry_script="infer.py",
script_params={
"--target_column_name": target_column_name,
"--model_name": model_name,
},
inputs=[test_dataset.as_named_input("test_data")],
compute_target=compute_target,
environment_definition=inference_env,
)
run = test_experiment.submit(
est,
tags={
"training_run_id": train_run.id,
"run_algorithm": train_run.properties["run_algorithm"],
"valid_score": train_run.properties["score"],
"primary_metric": train_run.properties["primary_metric"],
},
)
run.log("run_algorithm", run.tags["run_algorithm"])
return run
def get_result_df(remote_run):
children = list(remote_run.get_children(recursive=True))
summary_df = pd.DataFrame(
index=["run_id", "run_algorithm", "primary_metric", "Score"]
)
goal_minimize = False
for run in children:
if "run_algorithm" in run.properties and "score" in run.properties:
summary_df[run.id] = [
run.id,
run.properties["run_algorithm"],
run.properties["primary_metric"],
float(run.properties["score"]),
]
if "goal" in run.properties:
goal_minimize = run.properties["goal"].split("_")[-1] == "min"
summary_df = summary_df.T.sort_values(
"Score", ascending=goal_minimize
).drop_duplicates(["run_algorithm"])
summary_df = summary_df.set_index("run_algorithm")
return summary_df

View File

@@ -0,0 +1,68 @@
import argparse
import pandas as pd
import numpy as np
from sklearn.externals import joblib
from azureml.automl.runtime.shared.score import scoring, constants
from azureml.core import Run
from azureml.core.model import Model
parser = argparse.ArgumentParser()
parser.add_argument(
"--target_column_name",
type=str,
dest="target_column_name",
help="Target Column Name",
)
parser.add_argument(
"--model_name", type=str, dest="model_name", help="Name of registered model"
)
args = parser.parse_args()
target_column_name = args.target_column_name
model_name = args.model_name
print("args passed are: ")
print("Target column name: ", target_column_name)
print("Name of registered model: ", model_name)
model_path = Model.get_model_path(model_name)
# deserialize the model file back into a sklearn model
model = joblib.load(model_path)
run = Run.get_context()
# get input dataset by name
test_dataset = run.input_datasets["test_data"]
X_test_df = test_dataset.drop_columns(
columns=[target_column_name]
).to_pandas_dataframe()
y_test_df = (
test_dataset.with_timestamp_columns(None)
.keep_columns(columns=[target_column_name])
.to_pandas_dataframe()
)
predicted = model.predict_proba(X_test_df)
if isinstance(predicted, pd.DataFrame):
predicted = predicted.values
# Use the AutoML scoring module
train_labels = model.classes_
class_labels = np.unique(
np.concatenate((y_test_df.values, np.reshape(train_labels, (-1, 1))))
)
classification_metrics = list(constants.CLASSIFICATION_SCALAR_SET)
scores = scoring.score_classification(
y_test_df.values, predicted, classification_metrics, class_labels, train_labels
)
print("scores:")
print(scores)
for key, value in scores.items():
run.log(key, value)

View File

@@ -1,503 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Automated Machine Learning\n",
"_**Classification with Deployment**_\n",
"\n",
"## Contents\n",
"1. [Introduction](#Introduction)\n",
"1. [Setup](#Setup)\n",
"1. [Train](#Train)\n",
"1. [Deploy](#Deploy)\n",
"1. [Test](#Test)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Introduction\n",
"\n",
"In this example we use the scikit learn's [digit dataset](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html) to showcase how you can use AutoML for a simple classification problem and deploy it to an Azure Container Instance (ACI).\n",
"\n",
"Make sure you have executed the [configuration](../../../configuration.ipynb) before running this notebook.\n",
"\n",
"In this notebook you will learn how to:\n",
"1. Create an experiment using an existing workspace.\n",
"2. Configure AutoML using `AutoMLConfig`.\n",
"3. Train the model using local compute.\n",
"4. Explore the results.\n",
"5. Register the model.\n",
"6. Create a container image.\n",
"7. Create an Azure Container Instance (ACI) service.\n",
"8. Test the ACI service."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setup\n",
"\n",
"As part of the setup you have already created an Azure ML `Workspace` object. For AutoML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"import logging\n",
"\n",
"from matplotlib import pyplot as plt\n",
"import numpy as np\n",
"import pandas as pd\n",
"from sklearn import datasets\n",
"\n",
"import azureml.core\n",
"from azureml.core.experiment import Experiment\n",
"from azureml.core.workspace import Workspace\n",
"from azureml.train.automl import AutoMLConfig\n",
"from azureml.train.automl.run import AutoMLRun"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ws = Workspace.from_config()\n",
"\n",
"# choose a name for experiment\n",
"experiment_name = 'automl-classification-deployment'\n",
"# project folder\n",
"project_folder = './sample_projects/automl-classification-deployment'\n",
"\n",
"experiment=Experiment(ws, experiment_name)\n",
"\n",
"output = {}\n",
"output['SDK version'] = azureml.core.VERSION\n",
"output['Subscription ID'] = ws.subscription_id\n",
"output['Workspace'] = ws.name\n",
"output['Resource Group'] = ws.resource_group\n",
"output['Location'] = ws.location\n",
"output['Project Directory'] = project_folder\n",
"output['Experiment Name'] = experiment.name\n",
"pd.set_option('display.max_colwidth', -1)\n",
"outputDf = pd.DataFrame(data = output, index = [''])\n",
"outputDf.T"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Train\n",
"\n",
"Instantiate a AutoMLConfig object. This defines the settings and data used to run the experiment.\n",
"\n",
"|Property|Description|\n",
"|-|-|\n",
"|**task**|classification or regression|\n",
"|**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: <br><i>accuracy</i><br><i>AUC_weighted</i><br><i>average_precision_score_weighted</i><br><i>norm_macro_recall</i><br><i>precision_score_weighted</i>|\n",
"|**iteration_timeout_minutes**|Time limit in minutes for each iteration.|\n",
"|**iterations**|Number of iterations. In each iteration AutoML trains a specific pipeline with the data.|\n",
"|**n_cross_validations**|Number of cross validation splits.|\n",
"|**X**|(sparse) array-like, shape = [n_samples, n_features]|\n",
"|**y**|(sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]<br>Multi-class targets. An indicator matrix turns on multilabel classification. This should be an array of integers.|\n",
"|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder.|"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"digits = datasets.load_digits()\n",
"X_train = digits.data[10:,:]\n",
"y_train = digits.target[10:]\n",
"\n",
"automl_config = AutoMLConfig(task = 'classification',\n",
" name = experiment_name,\n",
" debug_log = 'automl_errors.log',\n",
" primary_metric = 'AUC_weighted',\n",
" iteration_timeout_minutes = 20,\n",
" iterations = 10,\n",
" n_cross_validations = 2,\n",
" verbosity = logging.INFO,\n",
" X = X_train, \n",
" y = y_train,\n",
" path = project_folder)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Call the `submit` method on the experiment object and pass the run configuration. Execution of local runs is synchronous. Depending on the data and the number of iterations this can run for a while.\n",
"In this example, we specify `show_output = True` to print currently running iterations to the console."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"local_run = experiment.submit(automl_config, show_output = True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"local_run"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Deploy\n",
"\n",
"### Retrieve the Best Model\n",
"\n",
"Below we select the best pipeline from our iterations. The `get_output` method on `automl_classifier` returns the best run and the fitted model for the last invocation. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"best_run, fitted_model = local_run.get_output()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Register the Fitted Model for Deployment\n",
"If neither `metric` nor `iteration` are specified in the `register_model` call, the iteration with the best primary metric is registered."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"description = 'AutoML Model'\n",
"tags = None\n",
"model = local_run.register_model(description = description, tags = tags)\n",
"\n",
"print(local_run.model_id) # This will be written to the script file later in the notebook."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create Scoring Script"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%%writefile score.py\n",
"import pickle\n",
"import json\n",
"import numpy\n",
"import azureml.train.automl\n",
"from sklearn.externals import joblib\n",
"from azureml.core.model import Model\n",
"\n",
"\n",
"def init():\n",
" global model\n",
" model_path = Model.get_model_path(model_name = '<<modelid>>') # this name is model.id of model that we want to deploy\n",
" # deserialize the model file back into a sklearn model\n",
" model = joblib.load(model_path)\n",
"\n",
"def run(rawdata):\n",
" try:\n",
" data = json.loads(rawdata)['data']\n",
" data = numpy.array(data)\n",
" result = model.predict(data)\n",
" except Exception as e:\n",
" result = str(e)\n",
" return json.dumps({\"error\": result})\n",
" return json.dumps({\"result\":result.tolist()})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create a YAML File for the Environment"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"To ensure the fit results are consistent with the training results, the SDK dependency versions need to be the same as the environment that trains the model. Details about retrieving the versions can be found in notebook [12.auto-ml-retrieve-the-training-sdk-versions](12.auto-ml-retrieve-the-training-sdk-versions.ipynb)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"experiment = Experiment(ws, experiment_name)\n",
"ml_run = AutoMLRun(experiment = experiment, run_id = local_run.id)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"dependencies = ml_run.get_run_sdk_dependencies(iteration = 7)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"for p in ['azureml-train-automl', 'azureml-sdk', 'azureml-core']:\n",
" print('{}\\t{}'.format(p, dependencies[p]))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.conda_dependencies import CondaDependencies\n",
"\n",
"myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn'], pip_packages=['azureml-sdk[automl]'])\n",
"\n",
"conda_env_file_name = 'myenv.yml'\n",
"myenv.save_to_file('.', conda_env_file_name)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Substitute the actual version number in the environment file.\n",
"# This is not strictly needed in this notebook because the model should have been generated using the current SDK version.\n",
"# However, we include this in case this code is used on an experiment from a previous SDK version.\n",
"\n",
"with open(conda_env_file_name, 'r') as cefr:\n",
" content = cefr.read()\n",
"\n",
"with open(conda_env_file_name, 'w') as cefw:\n",
" cefw.write(content.replace(azureml.core.VERSION, dependencies['azureml-sdk']))\n",
"\n",
"# Substitute the actual model id in the script file.\n",
"\n",
"script_file_name = 'score.py'\n",
"\n",
"with open(script_file_name, 'r') as cefr:\n",
" content = cefr.read()\n",
"\n",
"with open(script_file_name, 'w') as cefw:\n",
" cefw.write(content.replace('<<modelid>>', local_run.model_id))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create a Container Image"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.image import Image, ContainerImage\n",
"\n",
"image_config = ContainerImage.image_configuration(runtime= \"python\",\n",
" execution_script = script_file_name,\n",
" conda_file = conda_env_file_name,\n",
" tags = {'area': \"digits\", 'type': \"automl_classification\"},\n",
" description = \"Image for automl classification sample\")\n",
"\n",
"image = Image.create(name = \"automlsampleimage\",\n",
" # this is the model object \n",
" models = [model],\n",
" image_config = image_config, \n",
" workspace = ws)\n",
"\n",
"image.wait_for_creation(show_output = True)\n",
"\n",
"if image.creation_state == 'Failed':\n",
" print(\"Image build log at: \" + image.image_build_log_uri)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Deploy the Image as a Web Service on Azure Container Instance"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.webservice import AciWebservice\n",
"\n",
"aciconfig = AciWebservice.deploy_configuration(cpu_cores = 1, \n",
" memory_gb = 1, \n",
" tags = {'area': \"digits\", 'type': \"automl_classification\"}, \n",
" description = 'sample service for Automl Classification')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.webservice import Webservice\n",
"\n",
"aci_service_name = 'automl-sample-01'\n",
"print(aci_service_name)\n",
"aci_service = Webservice.deploy_from_image(deployment_config = aciconfig,\n",
" image = image,\n",
" name = aci_service_name,\n",
" workspace = ws)\n",
"aci_service.wait_for_deployment(True)\n",
"print(aci_service.state)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Delete a Web Service"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#aci_service.delete()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Get Logs from a Deployed Web Service"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#aci_service.get_logs()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Test"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#Randomly select digits and test\n",
"digits = datasets.load_digits()\n",
"X_test = digits.data[:10, :]\n",
"y_test = digits.target[:10]\n",
"images = digits.images[:10]\n",
"\n",
"for index in np.random.choice(len(y_test), 3, replace = False):\n",
" print(index)\n",
" test_sample = json.dumps({'data':X_test[index:index + 1].tolist()})\n",
" predicted = aci_service.run(input_data = test_sample)\n",
" label = y_test[index]\n",
" predictedDict = json.loads(predicted)\n",
" title = \"Label value = %d Predicted value = %s \" % ( label,predictedDict['result'][0])\n",
" fig = plt.figure(1, figsize = (3,3))\n",
" ax1 = fig.add_axes((0,0,.8,.8))\n",
" ax1.set_title(title)\n",
" plt.imshow(images[index], cmap = plt.cm.gray_r, interpolation = 'nearest')\n",
" plt.show()"
]
}
],
"metadata": {
"authors": [
{
"name": "savitam"
}
],
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.6"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -1,381 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Automated Machine Learning\n",
"_**Classification using whitelist models**_\n",
"\n",
"## Contents\n",
"1. [Introduction](#Introduction)\n",
"1. [Setup](#Setup)\n",
"1. [Data](#Data)\n",
"1. [Train](#Train)\n",
"1. [Results](#Results)\n",
"1. [Test](#Test)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Introduction\n",
"\n",
"In this example we use the scikit-learn's [digit dataset](http://scikit-learn.org/stable/datasets/index.html#optical-recognition-of-handwritten-digits-dataset) to showcase how you can use AutoML for a simple classification problem.\n",
"\n",
"Make sure you have executed the [configuration](../../../configuration.ipynb) before running this notebook.\n",
"This notebooks shows how can automl can be trained on a a selected list of models,see the readme.md for the models.\n",
"This trains the model exclusively on tensorflow based models.\n",
"\n",
"In this notebook you will learn how to:\n",
"1. Create an `Experiment` in an existing `Workspace`.\n",
"2. Configure AutoML using `AutoMLConfig`.\n",
"3. Train the model on a whilelisted models using local compute. \n",
"4. Explore the results.\n",
"5. Test the best fitted model."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setup\n",
"\n",
"As part of the setup you have already created an Azure ML `Workspace` object. For AutoML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import logging\n",
"\n",
"from matplotlib import pyplot as plt\n",
"import numpy as np\n",
"import pandas as pd\n",
"from sklearn import datasets\n",
"\n",
"import azureml.core\n",
"from azureml.core.experiment import Experiment\n",
"from azureml.core.workspace import Workspace\n",
"from azureml.train.automl import AutoMLConfig"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ws = Workspace.from_config()\n",
"\n",
"# Choose a name for the experiment and specify the project folder.\n",
"experiment_name = 'automl-local-whitelist'\n",
"project_folder = './sample_projects/automl-local-whitelist'\n",
"\n",
"experiment = Experiment(ws, experiment_name)\n",
"\n",
"output = {}\n",
"output['SDK version'] = azureml.core.VERSION\n",
"output['Subscription ID'] = ws.subscription_id\n",
"output['Workspace Name'] = ws.name\n",
"output['Resource Group'] = ws.resource_group\n",
"output['Location'] = ws.location\n",
"output['Project Directory'] = project_folder\n",
"output['Experiment Name'] = experiment.name\n",
"pd.set_option('display.max_colwidth', -1)\n",
"outputDf = pd.DataFrame(data = output, index = [''])\n",
"outputDf.T"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Data\n",
"\n",
"This uses scikit-learn's [load_digits](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html) method."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"digits = datasets.load_digits()\n",
"\n",
"# Exclude the first 100 rows from training so that they can be used for test.\n",
"X_train = digits.data[100:,:]\n",
"y_train = digits.target[100:]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Train\n",
"\n",
"Instantiate an `AutoMLConfig` object to specify the settings and data used to run the experiment.\n",
"\n",
"|Property|Description|\n",
"|-|-|\n",
"|**task**|classification or regression|\n",
"|**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: <br><i>accuracy</i><br><i>AUC_weighted</i><br><i>balanced_accuracy</i><br><i>average_precision_score_weighted</i><br><i>precision_score_weighted</i>|\n",
"|**iteration_timeout_minutes**|Time limit in minutes for each iteration.|\n",
"|**iterations**|Number of iterations. In each iteration AutoML trains a specific pipeline with the data.|\n",
"|**n_cross_validations**|Number of cross validation splits.|\n",
"|**X**|(sparse) array-like, shape = [n_samples, n_features]|\n",
"|**y**|(sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]<br>Multi-class targets. An indicator matrix turns on multilabel classification. This should be an array of integers.|\n",
"|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder.|\n",
"|**whitelist_models**|List of models that AutoML should use. The possible values are listed [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#configure-your-experiment-settings).|"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_config = AutoMLConfig(task = 'classification',\n",
" debug_log = 'automl_errors.log',\n",
" primary_metric = 'AUC_weighted',\n",
" iteration_timeout_minutes = 60,\n",
" iterations = 10,\n",
" n_cross_validations = 3,\n",
" verbosity = logging.INFO,\n",
" X = X_train, \n",
" y = y_train,\n",
" enable_tf=True,\n",
" whitelist_models=[\"TensorFlowLinearClassifier\", \"TensorFlowDNN\"],\n",
" path = project_folder)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Call the `submit` method on the experiment object and pass the run configuration. Execution of local runs is synchronous. Depending on the data and the number of iterations this can run for a while.\n",
"In this example, we specify `show_output = True` to print currently running iterations to the console."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"local_run = experiment.submit(automl_config, show_output = True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"local_run"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Results"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Widget for Monitoring Runs\n",
"\n",
"The widget will first report a \"loading\" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete.\n",
"\n",
"**Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.widgets import RunDetails\n",
"RunDetails(local_run).show() "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"\n",
"#### Retrieve All Child Runs\n",
"You can also use SDK methods to fetch all the child runs and see individual metrics that we log."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"children = list(local_run.get_children())\n",
"metricslist = {}\n",
"for run in children:\n",
" properties = run.get_properties()\n",
" metrics = {k: v for k, v in run.get_metrics().items() if isinstance(v, float)}\n",
" metricslist[int(properties['iteration'])] = metrics\n",
"\n",
"rundata = pd.DataFrame(metricslist).sort_index(1)\n",
"rundata"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Retrieve the Best Model\n",
"\n",
"Below we select the best pipeline from our iterations. The `get_output` method returns the best run and the fitted model. The Model includes the pipeline and any pre-processing. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"best_run, fitted_model = local_run.get_output()\n",
"print(best_run)\n",
"print(fitted_model)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Best Model Based on Any Other Metric\n",
"Show the run and the model that has the smallest `log_loss` value:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"lookup_metric = \"log_loss\"\n",
"best_run, fitted_model = local_run.get_output(metric = lookup_metric)\n",
"print(best_run)\n",
"print(fitted_model)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Model from a Specific Iteration\n",
"Show the run and the model from the third iteration:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"iteration = 3\n",
"third_run, third_model = local_run.get_output(iteration = iteration)\n",
"print(third_run)\n",
"print(third_model)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Test\n",
"\n",
"#### Load Test Data"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"digits = datasets.load_digits()\n",
"X_test = digits.data[:10, :]\n",
"y_test = digits.target[:10]\n",
"images = digits.images[:10]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Testing Our Best Fitted Model\n",
"We will try to predict 2 digits and see how our model works."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Randomly select digits and test.\n",
"for index in np.random.choice(len(y_test), 2, replace = False):\n",
" print(index)\n",
" predicted = fitted_model.predict(X_test[index:index + 1])[0]\n",
" label = y_test[index]\n",
" title = \"Label value = %d Predicted value = %d \" % (label, predicted)\n",
" fig = plt.figure(1, figsize = (3,3))\n",
" ax1 = fig.add_axes((0,0,.8,.8))\n",
" ax1.set_title(title)\n",
" plt.imshow(images[index], cmap = plt.cm.gray_r, interpolation = 'nearest')\n",
" plt.show()"
]
}
],
"metadata": {
"authors": [
{
"name": "savitam"
}
],
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.6"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -1,396 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Automated Machine Learning\n",
"_**Classification with Local Compute**_\n",
"\n",
"## Contents\n",
"1. [Introduction](#Introduction)\n",
"1. [Setup](#Setup)\n",
"1. [Data](#Data)\n",
"1. [Train](#Train)\n",
"1. [Results](#Results)\n",
"1. [Test](#Test)\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Introduction\n",
"\n",
"In this example we use the scikit-learn's [digit dataset](http://scikit-learn.org/stable/datasets/index.html#optical-recognition-of-handwritten-digits-dataset) to showcase how you can use AutoML for a simple classification problem.\n",
"\n",
"Make sure you have executed the [configuration](../../../configuration.ipynb) before running this notebook.\n",
"\n",
"In this notebook you will learn how to:\n",
"1. Create an `Experiment` in an existing `Workspace`.\n",
"2. Configure AutoML using `AutoMLConfig`.\n",
"3. Train the model using local compute.\n",
"4. Explore the results.\n",
"5. Test the best fitted model."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setup\n",
"\n",
"As part of the setup you have already created an Azure ML `Workspace` object. For AutoML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import logging\n",
"\n",
"from matplotlib import pyplot as plt\n",
"import numpy as np\n",
"import pandas as pd\n",
"from sklearn import datasets\n",
"\n",
"import azureml.core\n",
"from azureml.core.experiment import Experiment\n",
"from azureml.core.workspace import Workspace\n",
"from azureml.train.automl import AutoMLConfig"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ws = Workspace.from_config()\n",
"\n",
"# Choose a name for the experiment and specify the project folder.\n",
"experiment_name = 'automl-classification'\n",
"project_folder = './sample_projects/automl-classification'\n",
"\n",
"experiment = Experiment(ws, experiment_name)\n",
"\n",
"output = {}\n",
"output['SDK version'] = azureml.core.VERSION\n",
"output['Subscription ID'] = ws.subscription_id\n",
"output['Workspace Name'] = ws.name\n",
"output['Resource Group'] = ws.resource_group\n",
"output['Location'] = ws.location\n",
"output['Project Directory'] = project_folder\n",
"output['Experiment Name'] = experiment.name\n",
"pd.set_option('display.max_colwidth', -1)\n",
"outputDf = pd.DataFrame(data = output, index = [''])\n",
"outputDf.T"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Data\n",
"\n",
"This uses scikit-learn's [load_digits](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html) method."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"digits = datasets.load_digits()\n",
"\n",
"# Exclude the first 100 rows from training so that they can be used for test.\n",
"X_train = digits.data[100:,:]\n",
"y_train = digits.target[100:]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Train\n",
"\n",
"Instantiate an `AutoMLConfig` object to specify the settings and data used to run the experiment.\n",
"\n",
"|Property|Description|\n",
"|-|-|\n",
"|**task**|classification or regression|\n",
"|**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: <br><i>accuracy</i><br><i>AUC_weighted</i><br><i>average_precision_score_weighted</i><br><i>norm_macro_recall</i><br><i>precision_score_weighted</i>|\n",
"|**iteration_timeout_minutes**|Time limit in minutes for each iteration.|\n",
"|**iterations**|Number of iterations. In each iteration AutoML trains a specific pipeline with the data.|\n",
"|**n_cross_validations**|Number of cross validation splits.|\n",
"|**X**|(sparse) array-like, shape = [n_samples, n_features]|\n",
"|**y**|(sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]<br>Multi-class targets. An indicator matrix turns on multilabel classification. This should be an array of integers.|\n",
"|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder.|"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_config = AutoMLConfig(task = 'classification',\n",
" debug_log = 'automl_errors.log',\n",
" primary_metric = 'AUC_weighted',\n",
" iteration_timeout_minutes = 60,\n",
" iterations = 25,\n",
" n_cross_validations = 3,\n",
" verbosity = logging.INFO,\n",
" X = X_train, \n",
" y = y_train,\n",
" path = project_folder)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Call the `submit` method on the experiment object and pass the run configuration. Execution of local runs is synchronous. Depending on the data and the number of iterations this can run for a while.\n",
"In this example, we specify `show_output = True` to print currently running iterations to the console."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"local_run = experiment.submit(automl_config, show_output = True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"local_run"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Optionally, you can continue an interrupted local run by calling `continue_experiment` without the `iterations` parameter, or run more iterations for a completed run by specifying the `iterations` parameter:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"local_run = local_run.continue_experiment(X = X_train, \n",
" y = y_train, \n",
" show_output = True,\n",
" iterations = 5)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Results"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Widget for Monitoring Runs\n",
"\n",
"The widget will first report a \"loading\" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete.\n",
"\n",
"**Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.widgets import RunDetails\n",
"RunDetails(local_run).show() "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"\n",
"#### Retrieve All Child Runs\n",
"You can also use SDK methods to fetch all the child runs and see individual metrics that we log."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"children = list(local_run.get_children())\n",
"metricslist = {}\n",
"for run in children:\n",
" properties = run.get_properties()\n",
" metrics = {k: v for k, v in run.get_metrics().items() if isinstance(v, float)}\n",
" metricslist[int(properties['iteration'])] = metrics\n",
"\n",
"rundata = pd.DataFrame(metricslist).sort_index(1)\n",
"rundata"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Retrieve the Best Model\n",
"\n",
"Below we select the best pipeline from our iterations. The `get_output` method returns the best run and the fitted model. The Model includes the pipeline and any pre-processing. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"best_run, fitted_model = local_run.get_output()\n",
"print(best_run)\n",
"print(fitted_model)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Best Model Based on Any Other Metric\n",
"Show the run and the model that has the smallest `log_loss` value:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"lookup_metric = \"log_loss\"\n",
"best_run, fitted_model = local_run.get_output(metric = lookup_metric)\n",
"print(best_run)\n",
"print(fitted_model)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Model from a Specific Iteration\n",
"Show the run and the model from the third iteration:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"iteration = 3\n",
"third_run, third_model = local_run.get_output(iteration = iteration)\n",
"print(third_run)\n",
"print(third_model)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Test \n",
"\n",
"#### Load Test Data"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"digits = datasets.load_digits()\n",
"X_test = digits.data[:10, :]\n",
"y_test = digits.target[:10]\n",
"images = digits.images[:10]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Testing Our Best Fitted Model\n",
"We will try to predict 2 digits and see how our model works."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Randomly select digits and test.\n",
"for index in np.random.choice(len(y_test), 2, replace = False):\n",
" print(index)\n",
" predicted = fitted_model.predict(X_test[index:index + 1])[0]\n",
" label = y_test[index]\n",
" title = \"Label value = %d Predicted value = %d \" % (label, predicted)\n",
" fig = plt.figure(1, figsize = (3,3))\n",
" ax1 = fig.add_axes((0,0,.8,.8))\n",
" ax1.set_title(title)\n",
" plt.imshow(images[index], cmap = plt.cm.gray_r, interpolation = 'nearest')\n",
" plt.show()"
]
}
],
"metadata": {
"authors": [
{
"name": "savitam"
}
],
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.6"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -0,0 +1,585 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Automated Machine Learning \n",
"**Continuous retraining using Pipelines and Time-Series TabularDataset**\n",
"## Contents\n",
"1. [Introduction](#Introduction)\n",
"2. [Setup](#Setup)\n",
"3. [Compute](#Compute)\n",
"4. [Run Configuration](#Run-Configuration)\n",
"5. [Data Ingestion Pipeline](#Data-Ingestion-Pipeline)\n",
"6. [Training Pipeline](#Training-Pipeline)\n",
"7. [Publish Retraining Pipeline and Schedule](#Publish-Retraining-Pipeline-and-Schedule)\n",
"8. [Test Retraining](#Test-Retraining)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Introduction\n",
"In this example we use AutoML and Pipelines to enable contious retraining of a model based on updates to the training dataset. We will create two pipelines, the first one to demonstrate a training dataset that gets updated over time. We leverage time-series capabilities of `TabularDataset` to achieve this. The second pipeline utilizes pipeline `Schedule` to trigger continuous retraining. \n",
"Make sure you have executed the [configuration notebook](../../../configuration.ipynb) before running this notebook.\n",
"In this notebook you will learn how to:\n",
"* Create an Experiment in an existing Workspace.\n",
"* Configure AutoML using AutoMLConfig.\n",
"* Create data ingestion pipeline to update a time-series based TabularDataset\n",
"* Create training pipeline to prepare data, run AutoML, register the model and setup pipeline triggers.\n",
"\n",
"## Setup\n",
"As part of the setup you have already created an Azure ML `Workspace` object. For AutoML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import logging\n",
"\n",
"from matplotlib import pyplot as plt\n",
"import numpy as np\n",
"import pandas as pd\n",
"from sklearn import datasets\n",
"\n",
"import azureml.core\n",
"from azureml.core.experiment import Experiment\n",
"from azureml.core.workspace import Workspace\n",
"from azureml.train.automl import AutoMLConfig"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Accessing the Azure ML workspace requires authentication with Azure.\n",
"\n",
"The default authentication is interactive authentication using the default tenant. Executing the ws = Workspace.from_config() line in the cell below will prompt for authentication the first time that it is run.\n",
"\n",
"If you have multiple Azure tenants, you can specify the tenant by replacing the ws = Workspace.from_config() line in the cell below with the following:\n",
"```\n",
"from azureml.core.authentication import InteractiveLoginAuthentication\n",
"auth = InteractiveLoginAuthentication(tenant_id = 'mytenantid')\n",
"ws = Workspace.from_config(auth = auth)\n",
"```\n",
"If you need to run in an environment where interactive login is not possible, you can use Service Principal authentication by replacing the ws = Workspace.from_config() line in the cell below with the following:\n",
"```\n",
"from azureml.core.authentication import ServicePrincipalAuthentication\n",
"auth = auth = ServicePrincipalAuthentication('mytenantid', 'myappid', 'mypassword')\n",
"ws = Workspace.from_config(auth = auth)\n",
"```\n",
"For more details, see aka.ms/aml-notebook-auth"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ws = Workspace.from_config()\n",
"dstor = ws.get_default_datastore()\n",
"\n",
"# Choose a name for the run history container in the workspace.\n",
"experiment_name = \"retrain-noaaweather\"\n",
"experiment = Experiment(ws, experiment_name)\n",
"\n",
"output = {}\n",
"output[\"Subscription ID\"] = ws.subscription_id\n",
"output[\"Workspace\"] = ws.name\n",
"output[\"Resource Group\"] = ws.resource_group\n",
"output[\"Location\"] = ws.location\n",
"output[\"Run History Name\"] = experiment_name\n",
"pd.set_option(\"display.max_colwidth\", None)\n",
"outputDf = pd.DataFrame(data=output, index=[\"\"])\n",
"outputDf.T"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Compute \n",
"\n",
"#### Create or Attach existing AmlCompute\n",
"\n",
"You will need to create a compute target for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n",
"\n",
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
"\n",
"#### Creation of AmlCompute takes approximately 5 minutes. \n",
"If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n",
"As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
"from azureml.core.compute_target import ComputeTargetException\n",
"\n",
"# Choose a name for your CPU cluster\n",
"amlcompute_cluster_name = \"cont-cluster\"\n",
"\n",
"# Verify that cluster does not exist already\n",
"try:\n",
" compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n",
" print(\"Found existing cluster, use it.\")\n",
"except ComputeTargetException:\n",
" compute_config = AmlCompute.provisioning_configuration(\n",
" vm_size=\"STANDARD_DS12_V2\", max_nodes=4\n",
" )\n",
" compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n",
"compute_target.wait_for_completion(show_output=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Run Configuration"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.runconfig import CondaDependencies, RunConfiguration\n",
"\n",
"# create a new RunConfig object\n",
"conda_run_config = RunConfiguration(framework=\"python\")\n",
"\n",
"# Set compute target to AmlCompute\n",
"conda_run_config.target = compute_target\n",
"\n",
"conda_run_config.environment.docker.enabled = True\n",
"\n",
"cd = CondaDependencies.create(\n",
" pip_packages=[\n",
" \"azureml-sdk[automl]\",\n",
" \"applicationinsights\",\n",
" \"azureml-opendatasets\",\n",
" \"azureml-defaults\",\n",
" ],\n",
" conda_packages=[\"numpy==1.16.2\"],\n",
" pin_sdk_version=False,\n",
")\n",
"conda_run_config.environment.python.conda_dependencies = cd\n",
"\n",
"print(\"run config is ready\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Data Ingestion Pipeline \n",
"For this demo, we will use NOAA weather data from [Azure Open Datasets](https://azure.microsoft.com/services/open-datasets/). You can replace this with your own dataset, or you can skip this pipeline if you already have a time-series based `TabularDataset`.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# The name and target column of the Dataset to create\n",
"dataset = \"NOAA-Weather-DS4\"\n",
"target_column_name = \"temperature\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"\n",
"### Upload Data Step\n",
"The data ingestion pipeline has a single step with a script to query the latest weather data and upload it to the blob store. During the first run, the script will create and register a time-series based `TabularDataset` with the past one week of weather data. For each subsequent run, the script will create a partition in the blob store by querying NOAA for new weather data since the last modified time of the dataset (`dataset.data_changed_time`) and creating a data.csv file."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.core import Pipeline, PipelineParameter\n",
"from azureml.pipeline.steps import PythonScriptStep\n",
"\n",
"ds_name = PipelineParameter(name=\"ds_name\", default_value=dataset)\n",
"upload_data_step = PythonScriptStep(\n",
" script_name=\"upload_weather_data.py\",\n",
" allow_reuse=False,\n",
" name=\"upload_weather_data\",\n",
" arguments=[\"--ds_name\", ds_name],\n",
" compute_target=compute_target,\n",
" runconfig=conda_run_config,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Submit Pipeline Run"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"data_pipeline = Pipeline(\n",
" description=\"pipeline_with_uploaddata\", workspace=ws, steps=[upload_data_step]\n",
")\n",
"data_pipeline_run = experiment.submit(\n",
" data_pipeline, pipeline_parameters={\"ds_name\": dataset}\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"data_pipeline_run.wait_for_completion(show_output=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Training Pipeline\n",
"### Prepare Training Data Step\n",
"\n",
"Script to check if new data is available since the model was last trained. If no new data is available, we cancel the remaining pipeline steps. We need to set allow_reuse flag to False to allow the pipeline to run even when inputs don't change. We also need the name of the model to check the time the model was last trained."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.core import PipelineData\n",
"\n",
"# The model name with which to register the trained model in the workspace.\n",
"model_name = PipelineParameter(\"model_name\", default_value=\"noaaweatherds\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"data_prep_step = PythonScriptStep(\n",
" script_name=\"check_data.py\",\n",
" allow_reuse=False,\n",
" name=\"check_data\",\n",
" arguments=[\"--ds_name\", ds_name, \"--model_name\", model_name],\n",
" compute_target=compute_target,\n",
" runconfig=conda_run_config,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core import Dataset\n",
"\n",
"train_ds = Dataset.get_by_name(ws, dataset)\n",
"train_ds = train_ds.drop_columns([\"partition_date\"])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### AutoMLStep\n",
"Create an AutoMLConfig and a training step."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.train.automl import AutoMLConfig\n",
"from azureml.pipeline.steps import AutoMLStep\n",
"\n",
"automl_settings = {\n",
" \"iteration_timeout_minutes\": 10,\n",
" \"experiment_timeout_hours\": 0.25,\n",
" \"n_cross_validations\": 3,\n",
" \"primary_metric\": \"r2_score\",\n",
" \"max_concurrent_iterations\": 3,\n",
" \"max_cores_per_iteration\": -1,\n",
" \"verbosity\": logging.INFO,\n",
" \"enable_early_stopping\": True,\n",
"}\n",
"\n",
"automl_config = AutoMLConfig(\n",
" task=\"regression\",\n",
" debug_log=\"automl_errors.log\",\n",
" path=\".\",\n",
" compute_target=compute_target,\n",
" training_data=train_ds,\n",
" label_column_name=target_column_name,\n",
" **automl_settings,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.core import PipelineData, TrainingOutput\n",
"\n",
"metrics_output_name = \"metrics_output\"\n",
"best_model_output_name = \"best_model_output\"\n",
"\n",
"metrics_data = PipelineData(\n",
" name=\"metrics_data\",\n",
" datastore=dstor,\n",
" pipeline_output_name=metrics_output_name,\n",
" training_output=TrainingOutput(type=\"Metrics\"),\n",
")\n",
"model_data = PipelineData(\n",
" name=\"model_data\",\n",
" datastore=dstor,\n",
" pipeline_output_name=best_model_output_name,\n",
" training_output=TrainingOutput(type=\"Model\"),\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_step = AutoMLStep(\n",
" name=\"automl_module\",\n",
" automl_config=automl_config,\n",
" outputs=[metrics_data, model_data],\n",
" allow_reuse=False,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Register Model Step\n",
"Script to register the model to the workspace. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"register_model_step = PythonScriptStep(\n",
" script_name=\"register_model.py\",\n",
" name=\"register_model\",\n",
" allow_reuse=False,\n",
" arguments=[\n",
" \"--model_name\",\n",
" model_name,\n",
" \"--model_path\",\n",
" model_data,\n",
" \"--ds_name\",\n",
" ds_name,\n",
" ],\n",
" inputs=[model_data],\n",
" compute_target=compute_target,\n",
" runconfig=conda_run_config,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Submit Pipeline Run"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"training_pipeline = Pipeline(\n",
" description=\"training_pipeline\",\n",
" workspace=ws,\n",
" steps=[data_prep_step, automl_step, register_model_step],\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"training_pipeline_run = experiment.submit(\n",
" training_pipeline,\n",
" pipeline_parameters={\"ds_name\": dataset, \"model_name\": \"noaaweatherds\"},\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"training_pipeline_run.wait_for_completion(show_output=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Publish Retraining Pipeline and Schedule\n",
"Once we are happy with the pipeline, we can publish the training pipeline to the workspace and create a schedule to trigger on blob change. The schedule polls the blob store where the data is being uploaded and runs the retraining pipeline if there is a data change. A new version of the model will be registered to the workspace once the run is complete."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"pipeline_name = \"Retraining-Pipeline-NOAAWeather\"\n",
"\n",
"published_pipeline = training_pipeline.publish(\n",
" name=pipeline_name, description=\"Pipeline that retrains AutoML model\"\n",
")\n",
"\n",
"published_pipeline"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.core import Schedule\n",
"\n",
"schedule = Schedule.create(\n",
" workspace=ws,\n",
" name=\"RetrainingSchedule\",\n",
" pipeline_parameters={\"ds_name\": dataset, \"model_name\": \"noaaweatherds\"},\n",
" pipeline_id=published_pipeline.id,\n",
" experiment_name=experiment_name,\n",
" datastore=dstor,\n",
" wait_for_provisioning=True,\n",
" polling_interval=1440,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Test Retraining\n",
"Here we setup the data ingestion pipeline to run on a schedule, to verify that the retraining pipeline runs as expected. \n",
"\n",
"Note: \n",
"* Azure NOAA Weather data is updated daily and retraining will not trigger if there is no new data available. \n",
"* Depending on the polling interval set in the schedule, the retraining may take some time trigger after data ingestion pipeline completes."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"pipeline_name = \"DataIngestion-Pipeline-NOAAWeather\"\n",
"\n",
"published_pipeline = training_pipeline.publish(\n",
" name=pipeline_name, description=\"Pipeline that updates NOAAWeather Dataset\"\n",
")\n",
"\n",
"published_pipeline"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.pipeline.core import Schedule\n",
"\n",
"schedule = Schedule.create(\n",
" workspace=ws,\n",
" name=\"RetrainingSchedule-DataIngestion\",\n",
" pipeline_parameters={\"ds_name\": dataset},\n",
" pipeline_id=published_pipeline.id,\n",
" experiment_name=experiment_name,\n",
" datastore=dstor,\n",
" wait_for_provisioning=True,\n",
" polling_interval=1440,\n",
")"
]
}
],
"metadata": {
"authors": [
{
"name": "vivijay"
}
],
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.6"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -0,0 +1,4 @@
name: auto-ml-continuous-retraining
dependencies:
- pip:
- azureml-sdk

View File

@@ -0,0 +1,46 @@
import argparse
import os
import azureml.core
from datetime import datetime
import pandas as pd
import pytz
from azureml.core import Dataset, Model
from azureml.core.run import Run, _OfflineRun
from azureml.core import Workspace
run = Run.get_context()
ws = None
if type(run) == _OfflineRun:
ws = Workspace.from_config()
else:
ws = run.experiment.workspace
print("Check for new data.")
parser = argparse.ArgumentParser("split")
parser.add_argument("--ds_name", help="input dataset name")
parser.add_argument("--model_name", help="name of the deployed model")
args = parser.parse_args()
print("Argument 1(ds_name): %s" % args.ds_name)
print("Argument 2(model_name): %s" % args.model_name)
# Get the latest registered model
try:
model = Model(ws, args.model_name)
last_train_time = model.created_time
print("Model was last trained on {0}.".format(last_train_time))
except Exception as e:
print("Could not get last model train time.")
last_train_time = datetime.min.replace(tzinfo=pytz.UTC)
train_ds = Dataset.get_by_name(ws, args.ds_name)
dataset_changed_time = train_ds.data_changed_time
if not dataset_changed_time > last_train_time:
print("Cancelling run since there is no new data.")
run.parent.cancel()
else:
# New data is available since the model was last trained
print("Dataset was last updated on {0}. Retraining...".format(dataset_changed_time))

View File

@@ -0,0 +1,35 @@
from azureml.core.model import Model, Dataset
from azureml.core.run import Run, _OfflineRun
from azureml.core import Workspace
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--model_name")
parser.add_argument("--model_path")
parser.add_argument("--ds_name")
args = parser.parse_args()
print("Argument 1(model_name): %s" % args.model_name)
print("Argument 2(model_path): %s" % args.model_path)
print("Argument 3(ds_name): %s" % args.ds_name)
run = Run.get_context()
ws = None
if type(run) == _OfflineRun:
ws = Workspace.from_config()
else:
ws = run.experiment.workspace
train_ds = Dataset.get_by_name(ws, args.ds_name)
datasets = [(Dataset.Scenario.TRAINING, train_ds)]
# Register model with training dataset
model = Model.register(
workspace=ws,
model_path=args.model_path,
model_name=args.model_name,
datasets=datasets,
)
print("Registered version {0} of model {1}".format(model.version, model.name))

View File

@@ -0,0 +1,157 @@
import argparse
import os
from datetime import datetime
from dateutil.relativedelta import relativedelta
import pandas as pd
import traceback
from azureml.core import Dataset
from azureml.core.run import Run, _OfflineRun
from azureml.core import Workspace
from azureml.opendatasets import NoaaIsdWeather
run = Run.get_context()
ws = None
if type(run) == _OfflineRun:
ws = Workspace.from_config()
else:
ws = run.experiment.workspace
usaf_list = [
"725724",
"722149",
"723090",
"722159",
"723910",
"720279",
"725513",
"725254",
"726430",
"720381",
"723074",
"726682",
"725486",
"727883",
"723177",
"722075",
"723086",
"724053",
"725070",
"722073",
"726060",
"725224",
"725260",
"724520",
"720305",
"724020",
"726510",
"725126",
"722523",
"703333",
"722249",
"722728",
"725483",
"722972",
"724975",
"742079",
"727468",
"722193",
"725624",
"722030",
"726380",
"720309",
"722071",
"720326",
"725415",
"724504",
"725665",
"725424",
"725066",
]
def get_noaa_data(start_time, end_time):
columns = [
"usaf",
"wban",
"datetime",
"latitude",
"longitude",
"elevation",
"windAngle",
"windSpeed",
"temperature",
"stationName",
"p_k",
]
isd = NoaaIsdWeather(start_time, end_time, cols=columns)
noaa_df = isd.to_pandas_dataframe()
df_filtered = noaa_df[noaa_df["usaf"].isin(usaf_list)]
df_filtered.reset_index(drop=True)
print(
"Received {0} rows of training data between {1} and {2}".format(
df_filtered.shape[0], start_time, end_time
)
)
return df_filtered
print("Check for new data and prepare the data")
parser = argparse.ArgumentParser("split")
parser.add_argument("--ds_name", help="name of the Dataset to update")
args = parser.parse_args()
print("Argument 1(ds_name): %s" % args.ds_name)
dstor = ws.get_default_datastore()
register_dataset = False
end_time = datetime.utcnow()
try:
ds = Dataset.get_by_name(ws, args.ds_name)
end_time_last_slice = ds.data_changed_time.replace(tzinfo=None)
print("Dataset {0} last updated on {1}".format(args.ds_name, end_time_last_slice))
except Exception:
print(traceback.format_exc())
print(
"Dataset with name {0} not found, registering new dataset.".format(args.ds_name)
)
register_dataset = True
end_time = datetime(2021, 5, 1, 0, 0)
end_time_last_slice = end_time - relativedelta(weeks=2)
train_df = get_noaa_data(end_time_last_slice, end_time)
if train_df.size > 0:
print(
"Received {0} rows of new data after {1}.".format(
train_df.shape[0], end_time_last_slice
)
)
folder_name = "{}/{:04d}/{:02d}/{:02d}/{:02d}/{:02d}/{:02d}".format(
args.ds_name,
end_time.year,
end_time.month,
end_time.day,
end_time.hour,
end_time.minute,
end_time.second,
)
file_path = "{0}/data.csv".format(folder_name)
# Add a new partition to the registered dataset
os.makedirs(folder_name, exist_ok=True)
train_df.to_csv(file_path, index=False)
dstor.upload_files(
files=[file_path], target_path=folder_name, overwrite=True, show_progress=True
)
else:
print("No new data since {0}.".format(end_time_last_slice))
if register_dataset:
ds = Dataset.Tabular.from_delimited_files(
dstor.path("{}/**/*.csv".format(args.ds_name)),
partition_format="/{partition_date:yyyy/MM/dd/HH/mm/ss}/data.csv",
)
ds.register(ws, name=args.ds_name)

View File

@@ -1,498 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Automated Machine Learning\n",
"_**Prepare Data using `azureml.dataprep` for Remote Execution (DSVM)**_\n",
"\n",
"## Contents\n",
"1. [Introduction](#Introduction)\n",
"1. [Setup](#Setup)\n",
"1. [Data](#Data)\n",
"1. [Train](#Train)\n",
"1. [Results](#Results)\n",
"1. [Test](#Test)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Introduction\n",
"In this example we showcase how you can use the `azureml.dataprep` SDK to load and prepare data for AutoML. `azureml.dataprep` can also be used standalone; full documentation can be found [here](https://github.com/Microsoft/PendletonDocs).\n",
"\n",
"Make sure you have executed the [configuration](../../../configuration.ipynb) before running this notebook.\n",
"\n",
"In this notebook you will learn how to:\n",
"1. Define data loading and preparation steps in a `Dataflow` using `azureml.dataprep`.\n",
"2. Pass the `Dataflow` to AutoML for a local run.\n",
"3. Pass the `Dataflow` to AutoML for a remote run."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setup\n",
"\n",
"Currently, Data Prep only supports __Ubuntu 16__ and __Red Hat Enterprise Linux 7__. We are working on supporting more linux distros."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"As part of the setup you have already created an Azure ML `Workspace` object. For AutoML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import logging\n",
"import time\n",
"\n",
"import pandas as pd\n",
"\n",
"import azureml.core\n",
"from azureml.core.compute import DsvmCompute\n",
"from azureml.core.experiment import Experiment\n",
"from azureml.core.workspace import Workspace\n",
"import azureml.dataprep as dprep\n",
"from azureml.train.automl import AutoMLConfig"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ws = Workspace.from_config()\n",
" \n",
"# choose a name for experiment\n",
"experiment_name = 'automl-dataprep-remote-dsvm'\n",
"# project folder\n",
"project_folder = './sample_projects/automl-dataprep-remote-dsvm'\n",
" \n",
"experiment = Experiment(ws, experiment_name)\n",
" \n",
"output = {}\n",
"output['SDK version'] = azureml.core.VERSION\n",
"output['Subscription ID'] = ws.subscription_id\n",
"output['Workspace Name'] = ws.name\n",
"output['Resource Group'] = ws.resource_group\n",
"output['Location'] = ws.location\n",
"output['Project Directory'] = project_folder\n",
"output['Experiment Name'] = experiment.name\n",
"pd.set_option('display.max_colwidth', -1)\n",
"outputDf = pd.DataFrame(data = output, index = [''])\n",
"outputDf.T"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Data"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# You can use `auto_read_file` which intelligently figures out delimiters and datatypes of a file.\n",
"# The data referenced here was pulled from `sklearn.datasets.load_digits()`.\n",
"simple_example_data_root = 'https://dprepdata.blob.core.windows.net/automl-notebook-data/'\n",
"X = dprep.auto_read_file(simple_example_data_root + 'X.csv').skip(1) # Remove the header row.\n",
"\n",
"# You can also use `read_csv` and `to_*` transformations to read (with overridable delimiter)\n",
"# and convert column types manually.\n",
"# Here we read a comma delimited file and convert all columns to integers.\n",
"y = dprep.read_csv(simple_example_data_root + 'y.csv').to_long(dprep.ColumnSelector(term='.*', use_regex = True))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You can peek the result of a Dataflow at any range using `skip(i)` and `head(j)`. Doing so evaluates only `j` records for all the steps in the Dataflow, which makes it fast even against large datasets."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"X.skip(1).head(5)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Train\n",
"\n",
"This creates a general AutoML settings object applicable for both local and remote runs."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_settings = {\n",
" \"iteration_timeout_minutes\" : 10,\n",
" \"iterations\" : 2,\n",
" \"primary_metric\" : 'AUC_weighted',\n",
" \"preprocess\" : False,\n",
" \"verbosity\" : logging.INFO,\n",
" \"n_cross_validations\": 3\n",
"}"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create or Attach a Remote Linux DSVM"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"dsvm_name = 'mydsvmc'\n",
"\n",
"try:\n",
" while ws.compute_targets[dsvm_name].provisioning_state == 'Creating':\n",
" time.sleep(1)\n",
" \n",
" dsvm_compute = DsvmCompute(ws, dsvm_name)\n",
" print('Found existing DVSM.')\n",
"except:\n",
" print('Creating a new DSVM.')\n",
" dsvm_config = DsvmCompute.provisioning_configuration(vm_size = \"Standard_D2_v2\")\n",
" dsvm_compute = DsvmCompute.create(ws, name = dsvm_name, provisioning_configuration = dsvm_config)\n",
" dsvm_compute.wait_for_completion(show_output = True)\n",
" print(\"Waiting one minute for ssh to be accessible\")\n",
" time.sleep(90) # Wait for ssh to be accessible"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.core.runconfig import RunConfiguration\n",
"from azureml.core.conda_dependencies import CondaDependencies\n",
"\n",
"conda_run_config = RunConfiguration(framework=\"python\")\n",
"\n",
"conda_run_config.target = dsvm_compute\n",
"\n",
"cd = CondaDependencies.create(pip_packages=['azureml-sdk[automl]'], conda_packages=['numpy'])\n",
"conda_run_config.environment.python.conda_dependencies = cd"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Pass Data with `Dataflow` Objects\n",
"\n",
"The `Dataflow` objects captured above can also be passed to the `submit` method for a remote run. AutoML will serialize the `Dataflow` object and send it to the remote compute target. The `Dataflow` will not be evaluated locally."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_config = AutoMLConfig(task = 'classification',\n",
" debug_log = 'automl_errors.log',\n",
" path = project_folder,\n",
" run_configuration=conda_run_config,\n",
" X = X,\n",
" y = y,\n",
" **automl_settings)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"remote_run = experiment.submit(automl_config, show_output = True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"remote_run"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Results"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Widget for Monitoring Runs\n",
"\n",
"The widget will first report a \"loading\" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete.\n",
"\n",
"**Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.widgets import RunDetails\n",
"RunDetails(remote_run).show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Retrieve All Child Runs\n",
"You can also use SDK methods to fetch all the child runs and see individual metrics that we log."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"children = list(remote_run.get_children())\n",
"metricslist = {}\n",
"for run in children:\n",
" properties = run.get_properties()\n",
" metrics = {k: v for k, v in run.get_metrics().items() if isinstance(v, float)}\n",
" metricslist[int(properties['iteration'])] = metrics\n",
" \n",
"rundata = pd.DataFrame(metricslist).sort_index(1)\n",
"rundata"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Retrieve the Best Model\n",
"\n",
"Below we select the best pipeline from our iterations. The `get_output` method returns the best run and the fitted model. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"best_run, fitted_model = remote_run.get_output()\n",
"print(best_run)\n",
"print(fitted_model)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Best Model Based on Any Other Metric\n",
"Show the run and the model that has the smallest `log_loss` value:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"lookup_metric = \"log_loss\"\n",
"best_run, fitted_model = remote_run.get_output(metric = lookup_metric)\n",
"print(best_run)\n",
"print(fitted_model)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Model from a Specific Iteration\n",
"Show the run and the model from the first iteration:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"iteration = 0\n",
"best_run, fitted_model = remote_run.get_output(iteration = iteration)\n",
"print(best_run)\n",
"print(fitted_model)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Test\n",
"\n",
"#### Load Test Data"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from sklearn import datasets\n",
"\n",
"digits = datasets.load_digits()\n",
"X_test = digits.data[:10, :]\n",
"y_test = digits.target[:10]\n",
"images = digits.images[:10]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Testing Our Best Fitted Model\n",
"We will try to predict 2 digits and see how our model works."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#Randomly select digits and test\n",
"from matplotlib import pyplot as plt\n",
"import numpy as np\n",
"\n",
"for index in np.random.choice(len(y_test), 2, replace = False):\n",
" print(index)\n",
" predicted = fitted_model.predict(X_test[index:index + 1])[0]\n",
" label = y_test[index]\n",
" title = \"Label value = %d Predicted value = %d \" % (label, predicted)\n",
" fig = plt.figure(1, figsize=(3,3))\n",
" ax1 = fig.add_axes((0,0,.8,.8))\n",
" ax1.set_title(title)\n",
" plt.imshow(images[index], cmap = plt.cm.gray_r, interpolation = 'nearest')\n",
" plt.show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Appendix"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Capture the `Dataflow` Objects for Later Use in AutoML\n",
"\n",
"`Dataflow` objects are immutable and are composed of a list of data preparation steps. A `Dataflow` object can be branched at any point for further usage."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# sklearn.digits.data + target\n",
"digits_complete = dprep.auto_read_file('https://dprepdata.blob.core.windows.net/automl-notebook-data/digits-complete.csv')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"`digits_complete` (sourced from `sklearn.datasets.load_digits()`) is forked into `dflow_X` to capture all the feature columns and `dflow_y` to capture the label column."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(digits_complete.to_pandas_dataframe().shape)\n",
"labels_column = 'Column64'\n",
"dflow_X = digits_complete.drop_columns(columns = [labels_column])\n",
"dflow_y = digits_complete.keep_columns(columns = [labels_column])"
]
}
],
"metadata": {
"authors": [
{
"name": "savitam"
}
],
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -1,449 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
"\n",
"Licensed under the MIT License."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Automated Machine Learning\n",
"_**Prepare Data using `azureml.dataprep` for Local Execution**_\n",
"\n",
"## Contents\n",
"1. [Introduction](#Introduction)\n",
"1. [Setup](#Setup)\n",
"1. [Data](#Data)\n",
"1. [Train](#Train)\n",
"1. [Results](#Results)\n",
"1. [Test](#Test)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Introduction\n",
"In this example we showcase how you can use the `azureml.dataprep` SDK to load and prepare data for AutoML. `azureml.dataprep` can also be used standalone; full documentation can be found [here](https://github.com/Microsoft/PendletonDocs).\n",
"\n",
"Make sure you have executed the [configuration](../../../configuration.ipynb) before running this notebook.\n",
"\n",
"In this notebook you will learn how to:\n",
"1. Define data loading and preparation steps in a `Dataflow` using `azureml.dataprep`.\n",
"2. Pass the `Dataflow` to AutoML for a local run.\n",
"3. Pass the `Dataflow` to AutoML for a remote run."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setup\n",
"\n",
"Currently, Data Prep only supports __Ubuntu 16__ and __Red Hat Enterprise Linux 7__. We are working on supporting more linux distros."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"As part of the setup you have already created an Azure ML `Workspace` object. For AutoML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import logging\n",
"\n",
"import pandas as pd\n",
"\n",
"import azureml.core\n",
"from azureml.core.experiment import Experiment\n",
"from azureml.core.workspace import Workspace\n",
"import azureml.dataprep as dprep\n",
"from azureml.train.automl import AutoMLConfig"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ws = Workspace.from_config()\n",
" \n",
"# choose a name for experiment\n",
"experiment_name = 'automl-dataprep-local'\n",
"# project folder\n",
"project_folder = './sample_projects/automl-dataprep-local'\n",
" \n",
"experiment = Experiment(ws, experiment_name)\n",
" \n",
"output = {}\n",
"output['SDK version'] = azureml.core.VERSION\n",
"output['Subscription ID'] = ws.subscription_id\n",
"output['Workspace Name'] = ws.name\n",
"output['Resource Group'] = ws.resource_group\n",
"output['Location'] = ws.location\n",
"output['Project Directory'] = project_folder\n",
"output['Experiment Name'] = experiment.name\n",
"pd.set_option('display.max_colwidth', -1)\n",
"outputDf = pd.DataFrame(data = output, index = [''])\n",
"outputDf.T"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Data"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# You can use `auto_read_file` which intelligently figures out delimiters and datatypes of a file.\n",
"# The data referenced here was pulled from `sklearn.datasets.load_digits()`.\n",
"simple_example_data_root = 'https://dprepdata.blob.core.windows.net/automl-notebook-data/'\n",
"X = dprep.auto_read_file(simple_example_data_root + 'X.csv').skip(1) # Remove the header row.\n",
"\n",
"# You can also use `read_csv` and `to_*` transformations to read (with overridable delimiter)\n",
"# and convert column types manually.\n",
"# Here we read a comma delimited file and convert all columns to integers.\n",
"y = dprep.read_csv(simple_example_data_root + 'y.csv').to_long(dprep.ColumnSelector(term='.*', use_regex = True))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Review the Data Preparation Result\n",
"\n",
"You can peek the result of a Dataflow at any range using `skip(i)` and `head(j)`. Doing so evaluates only `j` records for all the steps in the Dataflow, which makes it fast even against large datasets."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"X.skip(1).head(5)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Train\n",
"\n",
"This creates a general AutoML settings object applicable for both local and remote runs."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_settings = {\n",
" \"iteration_timeout_minutes\" : 10,\n",
" \"iterations\" : 2,\n",
" \"primary_metric\" : 'AUC_weighted',\n",
" \"preprocess\" : False,\n",
" \"verbosity\" : logging.INFO,\n",
" \"n_cross_validations\": 3\n",
"}"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Pass Data with `Dataflow` Objects\n",
"\n",
"The `Dataflow` objects captured above can be passed to the `submit` method for a local run. AutoML will retrieve the results from the `Dataflow` for model training."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"automl_config = AutoMLConfig(task = 'classification',\n",
" debug_log = 'automl_errors.log',\n",
" X = X,\n",
" y = y,\n",
" **automl_settings)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"local_run = experiment.submit(automl_config, show_output = True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"local_run"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Results"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Widget for Monitoring Runs\n",
"\n",
"The widget will first report a \"loading\" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete.\n",
"\n",
"**Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.widgets import RunDetails\n",
"RunDetails(local_run).show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Retrieve All Child Runs\n",
"You can also use SDK methods to fetch all the child runs and see individual metrics that we log."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"children = list(local_run.get_children())\n",
"metricslist = {}\n",
"for run in children:\n",
" properties = run.get_properties()\n",
" metrics = {k: v for k, v in run.get_metrics().items() if isinstance(v, float)}\n",
" metricslist[int(properties['iteration'])] = metrics\n",
" \n",
"rundata = pd.DataFrame(metricslist).sort_index(1)\n",
"rundata"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Retrieve the Best Model\n",
"\n",
"Below we select the best pipeline from our iterations. The `get_output` method returns the best run and the fitted model. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"best_run, fitted_model = local_run.get_output()\n",
"print(best_run)\n",
"print(fitted_model)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Best Model Based on Any Other Metric\n",
"Show the run and the model that has the smallest `log_loss` value:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"lookup_metric = \"log_loss\"\n",
"best_run, fitted_model = local_run.get_output(metric = lookup_metric)\n",
"print(best_run)\n",
"print(fitted_model)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Model from a Specific Iteration\n",
"Show the run and the model from the first iteration:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"iteration = 0\n",
"best_run, fitted_model = local_run.get_output(iteration = iteration)\n",
"print(best_run)\n",
"print(fitted_model)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Test\n",
"\n",
"#### Load Test Data"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from sklearn import datasets\n",
"\n",
"digits = datasets.load_digits()\n",
"X_test = digits.data[:10, :]\n",
"y_test = digits.target[:10]\n",
"images = digits.images[:10]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Testing Our Best Fitted Model\n",
"We will try to predict 2 digits and see how our model works."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#Randomly select digits and test\n",
"from matplotlib import pyplot as plt\n",
"import numpy as np\n",
"\n",
"for index in np.random.choice(len(y_test), 2, replace = False):\n",
" print(index)\n",
" predicted = fitted_model.predict(X_test[index:index + 1])[0]\n",
" label = y_test[index]\n",
" title = \"Label value = %d Predicted value = %d \" % (label, predicted)\n",
" fig = plt.figure(1, figsize=(3,3))\n",
" ax1 = fig.add_axes((0,0,.8,.8))\n",
" ax1.set_title(title)\n",
" plt.imshow(images[index], cmap = plt.cm.gray_r, interpolation = 'nearest')\n",
" plt.show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Appendix"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Capture the `Dataflow` Objects for Later Use in AutoML\n",
"\n",
"`Dataflow` objects are immutable and are composed of a list of data preparation steps. A `Dataflow` object can be branched at any point for further usage."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# sklearn.digits.data + target\n",
"digits_complete = dprep.auto_read_file('https://dprepdata.blob.core.windows.net/automl-notebook-data/digits-complete.csv')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"`digits_complete` (sourced from `sklearn.datasets.load_digits()`) is forked into `dflow_X` to capture all the feature columns and `dflow_y` to capture the label column."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(digits_complete.to_pandas_dataframe().shape)\n",
"labels_column = 'Column64'\n",
"dflow_X = digits_complete.drop_columns(columns = [labels_column])\n",
"dflow_y = digits_complete.keep_columns(columns = [labels_column])"
]
}
],
"metadata": {
"authors": [
{
"name": "savitam"
}
],
"kernelspec": {
"display_name": "Python 3.6",
"language": "python",
"name": "python36"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -0,0 +1,92 @@
# Experimental Notebooks for Automated ML
Notebooks listed in this folder are leveraging experimental features. Namespaces or function signitures may change in future SDK releases. The notebooks published here will reflect the latest supported APIs. All of these notebooks can run on a client-only installation of the Automated ML SDK.
The client only installation doesn't contain any of the machine learning libraries, such as scikit-learn, xgboost, or tensorflow, making it much faster to install and is less likely to conflict with any packages in an existing environment. However, since the ML libraries are not available locally, models cannot be downloaded and loaded directly in the client. To replace the functionality of having models locally, these notebooks also demonstrate the ModelProxy feature which will allow you to submit a predict/forecast to the training environment.
<a name="localconda"></a>
## Setup using a Local Conda environment
To run these notebook on your own notebook server, use these installation instructions.
The instructions below will install everything you need and then start a Jupyter notebook.
If you would like to use a lighter-weight version of the client that does not install all of the machine learning libraries locally, you can leverage the [experimental notebooks.](experimental/README.md)
### 1. Install mini-conda from [here](https://conda.io/miniconda.html), choose 64-bit Python 3.7 or higher.
- **Note**: if you already have conda installed, you can keep using it but it should be version 4.4.10 or later (as shown by: conda -V). If you have a previous version installed, you can update it using the command: conda update conda.
There's no need to install mini-conda specifically.
### 2. Downloading the sample notebooks
- Download the sample notebooks from [GitHub](https://github.com/Azure/MachineLearningNotebooks) as zip and extract the contents to a local directory. The automated ML sample notebooks are in the "automated-machine-learning" folder.
### 3. Setup a new conda environment
The **automl_setup_thin_client** script creates a new conda environment, installs the necessary packages, configures the widget and starts a jupyter notebook. It takes the conda environment name as an optional parameter. The default conda environment name is azure_automl_experimental. The exact command depends on the operating system. See the specific sections below for Windows, Mac and Linux. It can take about 10 minutes to execute.
Packages installed by the **automl_setup** script:
<ul><li>python</li><li>nb_conda</li><li>matplotlib</li><li>numpy</li><li>cython</li><li>urllib3</li><li>pandas</li><li>azureml-sdk</li><li>azureml-widgets</li><li>pandas-ml</li></ul>
For more details refer to the [automl_env_thin_client.yml](./automl_env_thin_client.yml)
## Windows
Start an **Anaconda Prompt** window, cd to the **how-to-use-azureml/automated-machine-learning/experimental** folder where the sample notebooks were extracted and then run:
```
automl_setup_thin_client
```
## Mac
Install "Command line developer tools" if it is not already installed (you can use the command: `xcode-select --install`).
Start a Terminal windows, cd to the **how-to-use-azureml/automated-machine-learning/experimental** folder where the sample notebooks were extracted and then run:
```
bash automl_setup_thin_client_mac.sh
```
## Linux
cd to the **how-to-use-azureml/automated-machine-learning/experimental** folder where the sample notebooks were extracted and then run:
```
bash automl_setup_thin_client_linux.sh
```
### 4. Running configuration.ipynb
- Before running any samples you next need to run the configuration notebook. Click on [configuration](../../configuration.ipynb) notebook
- Execute the cells in the notebook to Register Machine Learning Services Resource Provider and create a workspace. (*instructions in notebook*)
### 5. Running Samples
- Please make sure you use the Python [conda env:azure_automl_experimental] kernel when trying the sample Notebooks.
- Follow the instructions in the individual notebooks to explore various features in automated ML.
### 6. Starting jupyter notebook manually
To start your Jupyter notebook manually, use:
```
conda activate azure_automl
jupyter notebook
```
or on Mac or Linux:
```
source activate azure_automl
jupyter notebook
```
<a name="samples"></a>
# Automated ML SDK Sample Notebooks
- [auto-ml-regression-model-proxy.ipynb](regression-model-proxy/auto-ml-regression-model-proxy.ipynb)
- Dataset: Hardware Performance Dataset
- Simple example of using automated ML for regression
- Uses azure compute for training
- Uses ModelProxy for submitting prediction to training environment on azure compute
<a name="documentation"></a>
See [Configure automated machine learning experiments](https://docs.microsoft.com/azure/machine-learning/service/how-to-configure-auto-train) to learn how more about the the settings and features available for automated machine learning experiments.
<a name="pythoncommand"></a>
# Running using python command
Jupyter notebook provides a File / Download as / Python (.py) option for saving the notebook as a Python file.
You can then run this file using the python command.
However, on Windows the file needs to be modified before it can be run.
The following condition must be added to the main code in the file:
if __name__ == "__main__":
The main code of the file must be indented so that it is under this condition.

View File

@@ -0,0 +1,63 @@
@echo off
set conda_env_name=%1
set automl_env_file=%2
set options=%3
set PIP_NO_WARN_SCRIPT_LOCATION=0
IF "%conda_env_name%"=="" SET conda_env_name="azure_automl_experimental"
IF "%automl_env_file%"=="" SET automl_env_file="automl_thin_client_env.yml"
IF NOT EXIST %automl_env_file% GOTO YmlMissing
IF "%CONDA_EXE%"=="" GOTO CondaMissing
call conda activate %conda_env_name% 2>nul:
if not errorlevel 1 (
echo Upgrading existing conda environment %conda_env_name%
call pip uninstall azureml-train-automl -y -q
call conda env update --name %conda_env_name% --file %automl_env_file%
if errorlevel 1 goto ErrorExit
) else (
call conda env create -f %automl_env_file% -n %conda_env_name%
)
call conda activate %conda_env_name% 2>nul:
if errorlevel 1 goto ErrorExit
call python -m ipykernel install --user --name %conda_env_name% --display-name "Python (%conda_env_name%)"
REM azureml.widgets is now installed as part of the pip install under the conda env.
REM Removing the old user install so that the notebooks will use the latest widget.
call jupyter nbextension uninstall --user --py azureml.widgets
echo.
echo.
echo ***************************************
echo * AutoML setup completed successfully *
echo ***************************************
IF NOT "%options%"=="nolaunch" (
echo.
echo Starting jupyter notebook - please run the configuration notebook
echo.
jupyter notebook --log-level=50 --notebook-dir='..\..'
)
goto End
:CondaMissing
echo Please run this script from an Anaconda Prompt window.
echo You can start an Anaconda Prompt window by
echo typing Anaconda Prompt on the Start menu.
echo If you don't see the Anaconda Prompt app, install Miniconda.
echo If you are running an older version of Miniconda or Anaconda,
echo you can upgrade using the command: conda update conda
goto End
:YmlMissing
echo File %automl_env_file% not found.
:ErrorExit
echo Install failed
:End

Some files were not shown because too many files have changed in this diff Show More