mirror of
https://github.com/getredash/redash.git
synced 2025-12-25 10:00:45 -05:00
Compare commits
963 Commits
v0.3.6+b37
...
v0.5.0+b65
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9dc3a35c1a | ||
|
|
f8878d3006 | ||
|
|
1c0d596f26 | ||
|
|
1afd2ab388 | ||
|
|
4aa9500402 | ||
|
|
4a8a4482fc | ||
|
|
d83849a1b5 | ||
|
|
44272f5d66 | ||
|
|
83727ae931 | ||
|
|
0b0b88a255 | ||
|
|
f23d709f4e | ||
|
|
88abbc7ea6 | ||
|
|
16f0413af8 | ||
|
|
f47020a64d | ||
|
|
55e1ef81f7 | ||
|
|
6bb43d0411 | ||
|
|
f51c2328c9 | ||
|
|
fd37188ace | ||
|
|
758e27ce91 | ||
|
|
9a3b25eb50 | ||
|
|
6da890dfb8 | ||
|
|
0d35ec7139 | ||
|
|
dc0f9a63cb | ||
|
|
21c042996e | ||
|
|
5f22adadf2 | ||
|
|
4e8888ce2f | ||
|
|
0a69609d38 | ||
|
|
2dbcd88313 | ||
|
|
6b0775f7c7 | ||
|
|
e85d3c3c9f | ||
|
|
e20f57bba8 | ||
|
|
933ace2e38 | ||
|
|
4c1e5aed6b | ||
|
|
77d982b4aa | ||
|
|
02c8163265 | ||
|
|
ef868dbb6e | ||
|
|
b2bab33baa | ||
|
|
149e0835f8 | ||
|
|
50bed1d8f2 | ||
|
|
d4b5d78743 | ||
|
|
7fc82a2562 | ||
|
|
92fb138c2c | ||
|
|
71b4b45a3c | ||
|
|
07f4a1b227 | ||
|
|
e116e88e98 | ||
|
|
2278a181ca | ||
|
|
98dc75a404 | ||
|
|
536918aab3 | ||
|
|
c75ac80c7a | ||
|
|
522d8542e9 | ||
|
|
562df44c22 | ||
|
|
86e6798c96 | ||
|
|
db7a287e82 | ||
|
|
518206f208 | ||
|
|
bcee1e12b4 | ||
|
|
410f4f35e2 | ||
|
|
84ea9fec43 | ||
|
|
cda82b7adc | ||
|
|
f2d8c2020b | ||
|
|
1b82ecbc46 | ||
|
|
e381331c36 | ||
|
|
ff58247987 | ||
|
|
dcf0d2cbe3 | ||
|
|
eb99fa5671 | ||
|
|
ce3e19f212 | ||
|
|
44dca6da01 | ||
|
|
34c9fee540 | ||
|
|
e0b13b2ffa | ||
|
|
df362c12b6 | ||
|
|
0d1f8c948a | ||
|
|
f523378326 | ||
|
|
b0f9e49709 | ||
|
|
b6dbb4e3f8 | ||
|
|
3f6a0e8ffa | ||
|
|
a7bcc6d31e | ||
|
|
8aa2d8e70a | ||
|
|
4720e12be7 | ||
|
|
5463591f0d | ||
|
|
2a0198fba8 | ||
|
|
652f214b25 | ||
|
|
aa49780134 | ||
|
|
f483b61cfb | ||
|
|
38a189b671 | ||
|
|
c2331988db | ||
|
|
eff5bdb454 | ||
|
|
bd1babec3a | ||
|
|
d43c2bbf62 | ||
|
|
87db8099d6 | ||
|
|
ebea118c7d | ||
|
|
297ac5c9bd | ||
|
|
9b23fb4235 | ||
|
|
0a71f5e22d | ||
|
|
0a8aaceb85 | ||
|
|
00979f3ad7 | ||
|
|
c7b48837f2 | ||
|
|
418c5322c1 | ||
|
|
dc5b4c26a3 | ||
|
|
9ed0a5ba85 | ||
|
|
db0770fc17 | ||
|
|
9bb58e71d2 | ||
|
|
560598eaad | ||
|
|
f9144fc927 | ||
|
|
883bf173c0 | ||
|
|
3f2bb65b32 | ||
|
|
3917af019a | ||
|
|
e88837e835 | ||
|
|
7abdc2543e | ||
|
|
91ab90a6fe | ||
|
|
7fd2bd3d24 | ||
|
|
3ed1ea1e33 | ||
|
|
a4486c56b9 | ||
|
|
3da0ecf36c | ||
|
|
11a1095b18 | ||
|
|
b43485f322 | ||
|
|
d83675326b | ||
|
|
8d7b9a552e | ||
|
|
e1eb75b786 | ||
|
|
34a3c9e91c | ||
|
|
e007a2891d | ||
|
|
febe6e4aa7 | ||
|
|
8099dafc68 | ||
|
|
ce3d5e637f | ||
|
|
4a52ccd4fa | ||
|
|
a0c81f8a31 | ||
|
|
ce13b79bdc | ||
|
|
c580db277d | ||
|
|
5e944e9a8f | ||
|
|
4b94cf706a | ||
|
|
364c51456d | ||
|
|
1274d36abc | ||
|
|
f6bd562dd2 | ||
|
|
065d2bc2c6 | ||
|
|
653ed1c57a | ||
|
|
7dc1176628 | ||
|
|
365b8a8c93 | ||
|
|
6e1e0a9967 | ||
|
|
170640a63f | ||
|
|
5e970b73d5 | ||
|
|
a4643472a5 | ||
|
|
7aa01f2bd2 | ||
|
|
cb4b0e0296 | ||
|
|
2c05e921c4 | ||
|
|
c4877f254e | ||
|
|
9fc59de35f | ||
|
|
eb50f3fc94 | ||
|
|
12fe59827f | ||
|
|
d32caff31d | ||
|
|
ba540ff380 | ||
|
|
2112faab02 | ||
|
|
34c6be398a | ||
|
|
3f9c2a5592 | ||
|
|
8076b7f0b7 | ||
|
|
8940d66b0b | ||
|
|
948e2247e4 | ||
|
|
eba2ba1918 | ||
|
|
59d5ba9273 | ||
|
|
4aba24a976 | ||
|
|
762c331ddf | ||
|
|
9592610f8b | ||
|
|
8b7399ddc9 | ||
|
|
f6221da9dc | ||
|
|
10c84d2cd0 | ||
|
|
60d784d7bc | ||
|
|
b28e4be8d7 | ||
|
|
e74b36996f | ||
|
|
4c28d11259 | ||
|
|
b1e1a32f37 | ||
|
|
a12b43265d | ||
|
|
c2d621ae0f | ||
|
|
d93e07061b | ||
|
|
cb59973b9a | ||
|
|
72e41a94e4 | ||
|
|
9013497fc7 | ||
|
|
a74ae32122 | ||
|
|
9cfae349da | ||
|
|
a16718917b | ||
|
|
e2e365d9ff | ||
|
|
5310498d0f | ||
|
|
bb1d2f8805 | ||
|
|
0d5f001d38 | ||
|
|
236f7f9c04 | ||
|
|
74bf8e5239 | ||
|
|
71e125b4b0 | ||
|
|
6a8befc641 | ||
|
|
a79aa382d7 | ||
|
|
5698f9692a | ||
|
|
b2381f6933 | ||
|
|
9a732a4dbf | ||
|
|
17eb7e4146 | ||
|
|
16a6c96c22 | ||
|
|
bc0a5160ac | ||
|
|
62ab1fda80 | ||
|
|
b5309833ee | ||
|
|
7b932507a6 | ||
|
|
c9fda5e6f1 | ||
|
|
a274bde092 | ||
|
|
b4024ec880 | ||
|
|
6367943d31 | ||
|
|
eaa83556c3 | ||
|
|
7e720bcecd | ||
|
|
003c285d11 | ||
|
|
54687e72bd | ||
|
|
8c59386dc9 | ||
|
|
0369c557a4 | ||
|
|
1ca95dc497 | ||
|
|
85ea9060b0 | ||
|
|
19b4ec7102 | ||
|
|
b2fea7f2fe | ||
|
|
d5947669ab | ||
|
|
4cb97db98e | ||
|
|
9b5d43067a | ||
|
|
8731a8d273 | ||
|
|
08a06b0792 | ||
|
|
90157157df | ||
|
|
f5ea1f1559 | ||
|
|
cf89e6b184 | ||
|
|
5920747122 | ||
|
|
2fff4f4036 | ||
|
|
442ece5a4f | ||
|
|
4bbf04b68a | ||
|
|
f74af231ce | ||
|
|
ffa679e04b | ||
|
|
8f1d267c00 | ||
|
|
af61517384 | ||
|
|
15a7374a4b | ||
|
|
c0fe4a7c84 | ||
|
|
2a18c4493b | ||
|
|
fc60c1b86a | ||
|
|
5b998269b3 | ||
|
|
914378cc65 | ||
|
|
30f98e9796 | ||
|
|
2b524075d9 | ||
|
|
3641e332b0 | ||
|
|
4ce3f4eaa9 | ||
|
|
0b173e67a5 | ||
|
|
2af234d180 | ||
|
|
d751fd8c8c | ||
|
|
35552f9b77 | ||
|
|
1cc36b481a | ||
|
|
c9b95bc359 | ||
|
|
86d64c35ab | ||
|
|
8712c8567c | ||
|
|
b0cc646b5e | ||
|
|
8e1c852b0d | ||
|
|
349f67337d | ||
|
|
4af979d3eb | ||
|
|
727cc67f19 | ||
|
|
f51df00564 | ||
|
|
8d7044a81a | ||
|
|
d1c62b106d | ||
|
|
a1dcf94d4d | ||
|
|
53fc9bbf54 | ||
|
|
7755e9859d | ||
|
|
21f3a80940 | ||
|
|
06910d9002 | ||
|
|
5777070bec | ||
|
|
8e3adcd283 | ||
|
|
381ab62505 | ||
|
|
93491004e2 | ||
|
|
d1f0ae9538 | ||
|
|
94bb55d66b | ||
|
|
9de6996dc8 | ||
|
|
9636359497 | ||
|
|
9a6b40aff9 | ||
|
|
82dee49a43 | ||
|
|
9b4482f25d | ||
|
|
4caf1ac3d3 | ||
|
|
0cda4a6632 | ||
|
|
a80618fbe2 | ||
|
|
310808f1fb | ||
|
|
939168773a | ||
|
|
c6a415535e | ||
|
|
ce87c7b736 | ||
|
|
036eb46ea4 | ||
|
|
95ad15057b | ||
|
|
459309ee4e | ||
|
|
4e0069810e | ||
|
|
5a62e90f17 | ||
|
|
cf689c424f | ||
|
|
dad9eb21a0 | ||
|
|
8b581368dc | ||
|
|
ca093ec235 | ||
|
|
c6e210f107 | ||
|
|
e2d0285496 | ||
|
|
16125327b1 | ||
|
|
d8d666c971 | ||
|
|
772ea94b59 | ||
|
|
e499e8099d | ||
|
|
75bc9bb318 | ||
|
|
f79362c7a3 | ||
|
|
2c34ecde35 | ||
|
|
1610d9b782 | ||
|
|
17dd4efb27 | ||
|
|
7a2af73bea | ||
|
|
81d027611f | ||
|
|
9ef941bc63 | ||
|
|
cb0d27e691 | ||
|
|
03767bbc0a | ||
|
|
0042b73cd9 | ||
|
|
1c095bcd99 | ||
|
|
4287d9a2e2 | ||
|
|
e297faab7c | ||
|
|
c0329cc0ef | ||
|
|
dc7050d4ef | ||
|
|
3a2f2be95d | ||
|
|
b4432ee21d | ||
|
|
d9b0e84bbe | ||
|
|
51c59dad63 | ||
|
|
2d398696d0 | ||
|
|
ceb08808f8 | ||
|
|
3cee9c9b3a | ||
|
|
28224a0ba1 | ||
|
|
4e8cd93905 | ||
|
|
069fe38354 | ||
|
|
37512b5fdd | ||
|
|
3fbc73d181 | ||
|
|
113821cc97 | ||
|
|
3f9ba7ff00 | ||
|
|
073deb8315 | ||
|
|
7793b3fe41 | ||
|
|
25de0303a1 | ||
|
|
a37aa11baf | ||
|
|
1ae40981fe | ||
|
|
17bb5eac91 | ||
|
|
e5348bcf9f | ||
|
|
ee7e452c70 | ||
|
|
73402a4f3c | ||
|
|
42a3309731 | ||
|
|
f2e06e6191 | ||
|
|
a10a38575b | ||
|
|
c7efe3a99f | ||
|
|
cc544e9343 | ||
|
|
2abffff9fd | ||
|
|
e91c9a00b1 | ||
|
|
c9608dfa4f | ||
|
|
bd0b5c7136 | ||
|
|
d198a99419 | ||
|
|
16c461c15f | ||
|
|
c874a2218b | ||
|
|
d92d994532 | ||
|
|
1704914d6b | ||
|
|
cddd7e909d | ||
|
|
2270042c0f | ||
|
|
8e5e37ee1b | ||
|
|
146131761f | ||
|
|
cdf6a1994b | ||
|
|
6efd830bd4 | ||
|
|
f39a848aa2 | ||
|
|
9f2fc1f90a | ||
|
|
fd9d71b927 | ||
|
|
ba8a39db57 | ||
|
|
191ad19cac | ||
|
|
14112fd45b | ||
|
|
676cf32c22 | ||
|
|
289d38b2a6 | ||
|
|
850ac9f4c8 | ||
|
|
4ffd21be09 | ||
|
|
3e87fff8b1 | ||
|
|
a37c1eb589 | ||
|
|
63c85deb5c | ||
|
|
ac89584083 | ||
|
|
74f9d85752 | ||
|
|
b85c535c6f | ||
|
|
f50799cc7b | ||
|
|
a2dbc76116 | ||
|
|
83933e24ac | ||
|
|
638df29d95 | ||
|
|
2e01d57c9b | ||
|
|
d26b822f6c | ||
|
|
c49fbe1ac2 | ||
|
|
4b6b1984aa | ||
|
|
8a546b4193 | ||
|
|
31c09dd7ce | ||
|
|
98f0bc0188 | ||
|
|
36d27dfd74 | ||
|
|
9edd8313ec | ||
|
|
790cbd95b1 | ||
|
|
5dd8b102e1 | ||
|
|
43496ecdb2 | ||
|
|
ff099b4314 | ||
|
|
78da5ae92e | ||
|
|
59a8c0c2c2 | ||
|
|
cb800c5907 | ||
|
|
31cc6fdaeb | ||
|
|
1a8611a3c0 | ||
|
|
258e3c957d | ||
|
|
1d83021ab3 | ||
|
|
7ed9dc90d3 | ||
|
|
f3628f7bba | ||
|
|
314a75f8a2 | ||
|
|
a686baa372 | ||
|
|
a4518dc2aa | ||
|
|
9b8c3872c6 | ||
|
|
5a0f524b5e | ||
|
|
0551e992fa | ||
|
|
1b0d315b30 | ||
|
|
577fdffc7f | ||
|
|
241d31f608 | ||
|
|
57a23a1181 | ||
|
|
c2e4e19004 | ||
|
|
69f14c3a61 | ||
|
|
fcda122107 | ||
|
|
d7f6b589cd | ||
|
|
4de9bf2d61 | ||
|
|
599f12fdc2 | ||
|
|
18d16bb92d | ||
|
|
26365054bf | ||
|
|
58a22c0a97 | ||
|
|
cce4a08b54 | ||
|
|
f80a940ff4 | ||
|
|
794d8ddfcf | ||
|
|
7adf4bf763 | ||
|
|
e50aa536c2 | ||
|
|
2d3348b1a9 | ||
|
|
df733d3e9c | ||
|
|
b1d6a5a45a | ||
|
|
3bb26c5906 | ||
|
|
e2f9b7565b | ||
|
|
6556f22e91 | ||
|
|
e5377abf0f | ||
|
|
b4625f1c78 | ||
|
|
63037c62a0 | ||
|
|
617bbc213f | ||
|
|
9e3cb6e581 | ||
|
|
d4dfc67059 | ||
|
|
5ec2d2fe97 | ||
|
|
0b093415ca | ||
|
|
77f226e4a2 | ||
|
|
71a4d5288d | ||
|
|
72c74101da | ||
|
|
1bb12b87ac | ||
|
|
ec40436a65 | ||
|
|
7cd129db52 | ||
|
|
904c54003d | ||
|
|
ba63048fc0 | ||
|
|
ecb80df10a | ||
|
|
782919788d | ||
|
|
37dbdf494f | ||
|
|
9717a686be | ||
|
|
55167adef6 | ||
|
|
001e2a8887 | ||
|
|
a503e20c92 | ||
|
|
80a5804c9c | ||
|
|
89cbaf0ac5 | ||
|
|
f2f61a1fc9 | ||
|
|
b93132e5d9 | ||
|
|
156bf96788 | ||
|
|
4d1908dceb | ||
|
|
870cc142a9 | ||
|
|
eade74ffb0 | ||
|
|
880412da94 | ||
|
|
a9dae21483 | ||
|
|
0578273f7e | ||
|
|
cf9fe300fe | ||
|
|
1bea6a9627 | ||
|
|
5ce4fcb974 | ||
|
|
028a3e9d62 | ||
|
|
fa2438f40d | ||
|
|
10bccfb4ad | ||
|
|
3c0972b8ac | ||
|
|
98ac23a843 | ||
|
|
df458c1052 | ||
|
|
dd86711b32 | ||
|
|
4493d22ec9 | ||
|
|
5ffd2615e7 | ||
|
|
e996b4fa22 | ||
|
|
bcca2aa341 | ||
|
|
602d935559 | ||
|
|
af9318fbd1 | ||
|
|
2ba4bcd98e | ||
|
|
fac9082a03 | ||
|
|
9ac335116c | ||
|
|
fbc325bf07 | ||
|
|
cad34f63bf | ||
|
|
d9964d84b3 | ||
|
|
9379f76562 | ||
|
|
21e02ee04e | ||
|
|
214806d31b | ||
|
|
cea1a73ad6 | ||
|
|
e37fa7e5a0 | ||
|
|
b079b27875 | ||
|
|
3c895310f4 | ||
|
|
ae9e80d6a8 | ||
|
|
9f0abd0bc6 | ||
|
|
3bedfe75a8 | ||
|
|
76ce8b0876 | ||
|
|
fcebbb4856 | ||
|
|
1b02f58247 | ||
|
|
687b3be784 | ||
|
|
4922be1422 | ||
|
|
062e65732a | ||
|
|
c40a73726e | ||
|
|
e8d453e2d4 | ||
|
|
0c4d0cb5c5 | ||
|
|
7efa48b3d7 | ||
|
|
000c482f1b | ||
|
|
c919648412 | ||
|
|
6b57d4a2f7 | ||
|
|
21b52e0b80 | ||
|
|
7bd5604607 | ||
|
|
bb83157cbe | ||
|
|
ca7af014ae | ||
|
|
a429487894 | ||
|
|
12f2dc8795 | ||
|
|
ec76ea307f | ||
|
|
499909e09e | ||
|
|
baad4742ef | ||
|
|
a8773a9582 | ||
|
|
efbb78ad7f | ||
|
|
8d41180f4c | ||
|
|
5a07ac38da | ||
|
|
163f483a56 | ||
|
|
e2ce0809da | ||
|
|
bea85d0f62 | ||
|
|
f87119e31a | ||
|
|
6a5b3a89d9 | ||
|
|
48b0c60cf1 | ||
|
|
9b31e193ee | ||
|
|
20d12c0498 | ||
|
|
fec57ecf59 | ||
|
|
1c52d533d4 | ||
|
|
c26fdb5dad | ||
|
|
db35b6f4e8 | ||
|
|
690d4b8f50 | ||
|
|
5b0f124307 | ||
|
|
cc9d10b12b | ||
|
|
5ee924a770 | ||
|
|
d6337ec472 | ||
|
|
05f1a6b7ea | ||
|
|
dc364981c8 | ||
|
|
362c899632 | ||
|
|
a80ed6998e | ||
|
|
c7540ba87b | ||
|
|
06e282102c | ||
|
|
0b0d2bcdfc | ||
|
|
3451deee03 | ||
|
|
2d995d0935 | ||
|
|
3b34b1c2d9 | ||
|
|
ae3151d3a7 | ||
|
|
f07428a0df | ||
|
|
0ab59033b5 | ||
|
|
09f2e89bc4 | ||
|
|
3066327b0e | ||
|
|
52d7650d61 | ||
|
|
aaa38689b3 | ||
|
|
bf62b52183 | ||
|
|
0961d13ac2 | ||
|
|
e976f39d2b | ||
|
|
c34889ced9 | ||
|
|
a569a2c2c1 | ||
|
|
356128fbf5 | ||
|
|
a1ac2d512b | ||
|
|
c3fc9879e0 | ||
|
|
126d6f7f60 | ||
|
|
3d726fe7b0 | ||
|
|
c6ba21ad4c | ||
|
|
be3bad7b90 | ||
|
|
2f53c7924d | ||
|
|
08d46bbbe3 | ||
|
|
db94db2957 | ||
|
|
c87dcf8aac | ||
|
|
0e1dbc9624 | ||
|
|
0b90b7ea79 | ||
|
|
2b652cac1f | ||
|
|
6c40610d34 | ||
|
|
f1aec05835 | ||
|
|
4860ea1b4e | ||
|
|
53dcd8b7b2 | ||
|
|
e8e2aab8e3 | ||
|
|
8d1b523b94 | ||
|
|
31c59467db | ||
|
|
54c5a7dcb3 | ||
|
|
d4287558f9 | ||
|
|
da496975bc | ||
|
|
aaafb0f465 | ||
|
|
7618fc97d2 | ||
|
|
f01d224bdf | ||
|
|
08355ff8af | ||
|
|
f2ebfaba3e | ||
|
|
67f4c78d61 | ||
|
|
02cf984711 | ||
|
|
ef86f44215 | ||
|
|
315803dde2 | ||
|
|
f8280552a0 | ||
|
|
4adfc4353b | ||
|
|
7d9a7eafc6 | ||
|
|
97b727dcc0 | ||
|
|
81525fa61b | ||
|
|
87bb092c9d | ||
|
|
02f376b6d3 | ||
|
|
10f2bc3df5 | ||
|
|
3e7b1cdc15 | ||
|
|
234b15765c | ||
|
|
53d81aebed | ||
|
|
462aaad9c0 | ||
|
|
4f72a61ea6 | ||
|
|
bc1ae8b496 | ||
|
|
98ee88c1bb | ||
|
|
bd8abbbdbd | ||
|
|
1ac945ad66 | ||
|
|
c2b038c1c0 | ||
|
|
02b5179eb3 | ||
|
|
a2f55b9838 | ||
|
|
933f799952 | ||
|
|
826fccbc94 | ||
|
|
be0b5bb0d1 | ||
|
|
2b274b706e | ||
|
|
3ab1f9b5a3 | ||
|
|
e512fef78c | ||
|
|
448e82108d | ||
|
|
be93e77b2f | ||
|
|
5aed2b6baf | ||
|
|
00b5aba88a | ||
|
|
9c0edfdb9d | ||
|
|
b40e2e0a6f | ||
|
|
d73130ebac | ||
|
|
13016c7476 | ||
|
|
667eb3035b | ||
|
|
13f2ee2ae8 | ||
|
|
1b46c39a27 | ||
|
|
5d19096e0c | ||
|
|
3f79189410 | ||
|
|
1940099d3c | ||
|
|
240e0780a0 | ||
|
|
3e38ef959b | ||
|
|
9e2af21d5e | ||
|
|
3aa4d4c36c | ||
|
|
81866cb6d3 | ||
|
|
bee20a5478 | ||
|
|
b43e32169b | ||
|
|
4d99541f7c | ||
|
|
089b67c40e | ||
|
|
9ca0f4a4fa | ||
|
|
0e1a0b4798 | ||
|
|
467ae5c8fa | ||
|
|
a3bf50e15e | ||
|
|
9d44a73d02 | ||
|
|
8e9d537882 | ||
|
|
774b9cc368 | ||
|
|
00e3b06004 | ||
|
|
3014ba8eec | ||
|
|
823f0b8db5 | ||
|
|
af1b1c0edb | ||
|
|
dd4c3f152a | ||
|
|
0a511e4f8a | ||
|
|
524c2b8203 | ||
|
|
578d9c6785 | ||
|
|
c7efad2197 | ||
|
|
adda8707ba | ||
|
|
640d0082da | ||
|
|
f5bd7f113f | ||
|
|
8b1978fb26 | ||
|
|
812e8cca9a | ||
|
|
63bc04e800 | ||
|
|
7eb776bc3f | ||
|
|
56981a5333 | ||
|
|
54cd4723ba | ||
|
|
c9f8b04a12 | ||
|
|
11e970ee8a | ||
|
|
3d7367aa04 | ||
|
|
2bcf5b2fc5 | ||
|
|
39bc4d7151 | ||
|
|
f08e58a301 | ||
|
|
a49270630c | ||
|
|
f703517f70 | ||
|
|
6c1ca3036b | ||
|
|
6ed80a9b92 | ||
|
|
42fa5c2ee7 | ||
|
|
8f34b241d4 | ||
|
|
b0d6ce61b0 | ||
|
|
9defa45428 | ||
|
|
52bcb8dfb6 | ||
|
|
1f90f13b81 | ||
|
|
0a522863dc | ||
|
|
e8a974813d | ||
|
|
50da387936 | ||
|
|
489869ee42 | ||
|
|
316b2a1b1c | ||
|
|
a1625f7125 | ||
|
|
63379d9b24 | ||
|
|
d812f26e81 | ||
|
|
4ba3152a99 | ||
|
|
d4f48cdc21 | ||
|
|
dc0cc3af65 | ||
|
|
27031c96b5 | ||
|
|
b1ca28fbb5 | ||
|
|
1b7bfb42fc | ||
|
|
ea65204eaa | ||
|
|
4351e5a642 | ||
|
|
f35289624c | ||
|
|
47c322cb31 | ||
|
|
88f1237990 | ||
|
|
4740a8b520 | ||
|
|
521b6ab851 | ||
|
|
9e328551e4 | ||
|
|
44eaffd110 | ||
|
|
cb964b5888 | ||
|
|
81cbc7b87c | ||
|
|
8fa45749a9 | ||
|
|
910ea4caec | ||
|
|
0bff263c4b | ||
|
|
38f85d3cc8 | ||
|
|
83002d09a4 | ||
|
|
a567178987 | ||
|
|
13c47639da | ||
|
|
74b0535b31 | ||
|
|
cbd7799b44 | ||
|
|
98a8c4752b | ||
|
|
b2debb32d1 | ||
|
|
098f3f6e4c | ||
|
|
e8c7f728a2 | ||
|
|
387ffbb0fc | ||
|
|
d2d4f6186f | ||
|
|
d5cd02cab3 | ||
|
|
d831710b0a | ||
|
|
d5316b2c4d | ||
|
|
7c4bedf371 | ||
|
|
7018ed28fb | ||
|
|
7213e62937 | ||
|
|
219ea98f33 | ||
|
|
f6cbc36112 | ||
|
|
93bc54e275 | ||
|
|
44cd109ba3 | ||
|
|
482168f98e | ||
|
|
f9b9c7136e | ||
|
|
84ec26f648 | ||
|
|
fcfe5da506 | ||
|
|
1e4bdb367e | ||
|
|
d3ee55a971 | ||
|
|
3a967c5985 | ||
|
|
92f5df4704 | ||
|
|
2e8789de3b | ||
|
|
b7827f3eea | ||
|
|
8c101a1bbf | ||
|
|
ee216dbf64 | ||
|
|
54675117de | ||
|
|
30d5b46daf | ||
|
|
45ec489080 | ||
|
|
93fe613a9a | ||
|
|
704f2c176d | ||
|
|
d538134bb9 | ||
|
|
6e38050ac4 | ||
|
|
f3c87ef313 | ||
|
|
09a2136f02 | ||
|
|
5c7331d0a4 | ||
|
|
187ea86c24 | ||
|
|
48639adc42 | ||
|
|
509412dee6 | ||
|
|
44a95c4888 | ||
|
|
0f3400a6b7 | ||
|
|
a55bbc5e8c | ||
|
|
8dad478a19 | ||
|
|
31208c2af1 | ||
|
|
11f57b02e6 | ||
|
|
86a99e2337 | ||
|
|
3470d38d7c | ||
|
|
e6959e75f9 | ||
|
|
1e4f70747b | ||
|
|
6ee3bc099d | ||
|
|
13d44ee3e8 | ||
|
|
fc9bffddbd | ||
|
|
64d573e28e | ||
|
|
b2781a1ea6 | ||
|
|
04cdc75841 | ||
|
|
bb7bb40e76 | ||
|
|
a4055364e4 | ||
|
|
71da6e4528 | ||
|
|
5c113284e2 | ||
|
|
b2cb3bcf1d | ||
|
|
1821f90664 | ||
|
|
a66a8982ee | ||
|
|
0a83a1f168 | ||
|
|
e97d3172eb | ||
|
|
7c838bf54e | ||
|
|
4a5c5143b3 | ||
|
|
c02afbb4f9 | ||
|
|
b647bc9b41 | ||
|
|
c36b90db0f | ||
|
|
ddf3959d4d | ||
|
|
b5f88c199c | ||
|
|
a0586457da | ||
|
|
288d1f7e5a | ||
|
|
38c28bccdb | ||
|
|
e8b0178ae4 | ||
|
|
9eeebf93fa | ||
|
|
c1ccf02ff9 | ||
|
|
6533aa2826 | ||
|
|
ece1a51530 | ||
|
|
1d4a407161 | ||
|
|
9f5678c711 | ||
|
|
819ac84c2a | ||
|
|
fe90f3703e | ||
|
|
0e956a605f | ||
|
|
32210d89f8 | ||
|
|
18a77c995f | ||
|
|
9f36234c52 | ||
|
|
0b74d9e998 | ||
|
|
54d545094f | ||
|
|
c239c476af | ||
|
|
a382a0cd44 | ||
|
|
0fee59a6ed | ||
|
|
e18226d108 | ||
|
|
b079952491 | ||
|
|
d2da71c22a | ||
|
|
9eb2a6a535 | ||
|
|
dd5ef7ec72 | ||
|
|
c2cbcd3727 | ||
|
|
5c7baf9e05 | ||
|
|
e5f5e18ecc | ||
|
|
dae30037b6 | ||
|
|
30eba3bfae | ||
|
|
77c0486f8c | ||
|
|
e00475520a | ||
|
|
bf90a6247e | ||
|
|
3185cc041a | ||
|
|
f64b9084f5 | ||
|
|
dc09561f30 | ||
|
|
e154cbe1ba | ||
|
|
1f9ac49e27 | ||
|
|
a7de923cea | ||
|
|
a75430106e | ||
|
|
bc816100a0 | ||
|
|
33de209497 | ||
|
|
8401e25504 | ||
|
|
db14c695e6 | ||
|
|
7a61b2ec80 | ||
|
|
1e16e58f37 | ||
|
|
e84ca44178 | ||
|
|
644c03503b | ||
|
|
d88288302a | ||
|
|
42e0797b5b | ||
|
|
8826d41922 | ||
|
|
26d2d6f403 | ||
|
|
438386de5d | ||
|
|
99197396f1 | ||
|
|
3770463499 | ||
|
|
d3979a5a5a | ||
|
|
e5bba73ea8 | ||
|
|
cd925d1896 | ||
|
|
82fe6f6fa7 | ||
|
|
c05cf29a37 | ||
|
|
160f491cc5 | ||
|
|
d652013572 | ||
|
|
c970503f61 | ||
|
|
5218f4f182 | ||
|
|
9230a77f96 | ||
|
|
f8cc78eca5 | ||
|
|
a9f9af3cb8 | ||
|
|
ec71621d93 | ||
|
|
52376993df | ||
|
|
74a5253c69 | ||
|
|
2aebc023d1 | ||
|
|
8dfd453381 | ||
|
|
899cb9d4cf | ||
|
|
e34021c0be | ||
|
|
041d5da13b | ||
|
|
d421848795 | ||
|
|
96185e9c60 | ||
|
|
5bd8ef2e5d | ||
|
|
3dae7e9523 | ||
|
|
7d4660173e | ||
|
|
612c6a331b | ||
|
|
0c852a145e | ||
|
|
ed2d3a27e7 | ||
|
|
de162817af | ||
|
|
fd1acd6533 | ||
|
|
7282f61133 | ||
|
|
0687d9ed98 | ||
|
|
e45a3ebdb4 | ||
|
|
b72f9f054d | ||
|
|
92b9fb60e9 | ||
|
|
08951ab515 | ||
|
|
c2d2bd0ea1 | ||
|
|
ff6204c98e | ||
|
|
c08831ca13 | ||
|
|
c8ef72e4d2 | ||
|
|
b1bd52423a | ||
|
|
4b980b8076 | ||
|
|
63baa20403 | ||
|
|
612aca217c | ||
|
|
92b56c99a3 | ||
|
|
349b18d63a | ||
|
|
11d331c051 | ||
|
|
63851b16af | ||
|
|
4384eed09f | ||
|
|
e746805eaa | ||
|
|
6c480178fe | ||
|
|
7e94cc7ff8 | ||
|
|
db20eeb555 | ||
|
|
9794f12a9b | ||
|
|
9af88076e6 | ||
|
|
290ae85128 | ||
|
|
5c78760649 | ||
|
|
3cb8365ef3 | ||
|
|
38e95a7f07 | ||
|
|
6d392b1c91 | ||
|
|
a8f7028c22 | ||
|
|
35c7366b96 | ||
|
|
137bd43821 | ||
|
|
08c9a0630d | ||
|
|
abdc9f75cc | ||
|
|
ecaae1b934 | ||
|
|
fc06f8c88e | ||
|
|
0fc62f07cc | ||
|
|
4afb12669a | ||
|
|
030864b72b | ||
|
|
0bf6e39c66 | ||
|
|
0d6613b998 | ||
|
|
99875ff746 | ||
|
|
05bb0fcf43 | ||
|
|
bce60758e9 | ||
|
|
7b85e78636 | ||
|
|
4fa6ef828c | ||
|
|
08ca3431ac | ||
|
|
cfcc21b1cb | ||
|
|
4ea54ef5ce | ||
|
|
fc65920462 | ||
|
|
88a7ff62af | ||
|
|
1c75ae08bc | ||
|
|
5ea63534f7 | ||
|
|
95805169dc | ||
|
|
bcd018d8de | ||
|
|
34627f5e60 | ||
|
|
0ae1692f99 | ||
|
|
6becbee27a | ||
|
|
78633b06de | ||
|
|
78bf265d7a | ||
|
|
1690a25262 | ||
|
|
f76f284ce2 | ||
|
|
5080b754d4 | ||
|
|
bdb97182e4 | ||
|
|
c668ed8a2b | ||
|
|
10a1350bb3 | ||
|
|
c10fb2916f | ||
|
|
91185abb4c | ||
|
|
e402b06c6c | ||
|
|
6a09adf11c | ||
|
|
ba7ba751fd | ||
|
|
ba3c02c912 | ||
|
|
6f6bd256b5 | ||
|
|
c8d1780ee8 | ||
|
|
31e904c21a | ||
|
|
6773488644 | ||
|
|
84b0d52510 | ||
|
|
db9aa4bc38 | ||
|
|
04e1534001 | ||
|
|
74d4928fb0 | ||
|
|
d31d422eb0 | ||
|
|
eb5b62b670 | ||
|
|
53ef4fee1e | ||
|
|
b3cdc4f5fc | ||
|
|
63abb61248 | ||
|
|
59e16866fb | ||
|
|
9fc36bd6fa | ||
|
|
4051fae33b | ||
|
|
b014dadfe3 | ||
|
|
900b084156 | ||
|
|
fa96c94085 | ||
|
|
bd1d287c87 | ||
|
|
b74f7e4eac | ||
|
|
7a57132c1c | ||
|
|
46c2367e50 | ||
|
|
7378f85297 |
9
.gitignore
vendored
9
.gitignore
vendored
@@ -4,10 +4,17 @@
|
||||
.coverage
|
||||
rd_ui/dist
|
||||
.DS_Store
|
||||
celerybeat-schedule*
|
||||
.#*
|
||||
\#*#
|
||||
*~
|
||||
|
||||
# Vagrant related
|
||||
.vagrant
|
||||
Berksfile.lock
|
||||
redash/dump.rdb
|
||||
.env
|
||||
.ruby-version
|
||||
.ruby-version
|
||||
venv
|
||||
|
||||
dump.rdb
|
||||
|
||||
2
.landscape.yaml
Normal file
2
.landscape.yaml
Normal file
@@ -0,0 +1,2 @@
|
||||
ignore-paths:
|
||||
- migrations
|
||||
3
Makefile
3
Makefile
@@ -6,7 +6,7 @@ FILENAME=$(CIRCLE_ARTIFACTS)/$(NAME).$(VERSION).tar.gz
|
||||
|
||||
deps:
|
||||
cd rd_ui && npm install
|
||||
cd rd_ui && npm install grunt-cli bower
|
||||
cd rd_ui && npm install -g bower grunt-cli
|
||||
cd rd_ui && bower install
|
||||
cd rd_ui && grunt build
|
||||
|
||||
@@ -19,3 +19,4 @@ upload:
|
||||
|
||||
test:
|
||||
nosetests --with-coverage --cover-package=redash tests/*.py
|
||||
cd rd_ui && grunt test
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
web: ./manage.py runserver -p $PORT
|
||||
worker: ./manage.py runworkers
|
||||
web: ./manage.py runserver -p $PORT --host 0.0.0.0
|
||||
worker: ./bin/run celery worker --app=redash.worker --beat -Qqueries,celery,scheduled_queries
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
web: ./manage.py runserver -p $PORT --host 0.0.0.0 -d -r
|
||||
worker: ./manage.py runworkers
|
||||
worker: ./bin/run celery worker --app=redash.worker --beat -Qqueries,celery,scheduled_queries
|
||||
|
||||
68
README.md
68
README.md
@@ -1,72 +1,46 @@
|
||||
# [_re:dash_](https://github.com/everythingme/redash)
|
||||

|
||||
<p align="center">
|
||||
<img title="re:dash" src='https://raw.githubusercontent.com/EverythingMe/redash/screenshots/redash_logo.png' />
|
||||
|
||||
</p>
|
||||
<p align="center">
|
||||
<img title="Build Status" src='https://circleci.com/gh/EverythingMe/redash.png?circle-token=8a695aa5ec2cbfa89b48c275aea298318016f040'/>
|
||||
</p>
|
||||
|
||||
**_re:dash_** is our take on freeing the data within our company in a way that will better fit our culture and usage patterns.
|
||||
|
||||
Prior to **_re:dash_**, we tried to use tranditional BI suites and discovered a set of bloated, technically challenged and slow tools/flows. What we were looking for was a more hacker'ish way to look at data, so we built one.
|
||||
Prior to **_re:dash_**, we tried to use traditional BI suites and discovered a set of bloated, technically challenged and slow tools/flows. What we were looking for was a more hacker'ish way to look at data, so we built one.
|
||||
|
||||
**_re:dash_** was built to allow fast and easy access to billions of records, that we process and collect using Amazon Redshift ("petabyte scale data warehouse" that "speaks" PostgreSQL).
|
||||
Today **_re:dash_** has support for querying multiple databases, including: Redshift, Google BigQuery, PostgreSQL, MySQL, Graphite and custom scripts.
|
||||
|
||||
**_re:dash_** consists of two parts:
|
||||
|
||||
1. **Query Editor**: think of [JS Fiddle](http://jsfiddle.net) for SQL queries. It's your way to share data in the organization in an open way, by sharing both the dataset and the query that generated it. This way everyone can peer review not only the resulting dataset but also the process that generated it. Also it's possible to fork it and generate new datasets and reach new insights.
|
||||
2. **Dashboards/Visualizations**: once you have a dataset, you can create different visualizations out of it, and then combine several visualizations into a single dashboard. Currently it supports bar charts, pivot table and cohorts.
|
||||
1. **Query Editor**: think of [JS Fiddle](http://jsfiddle.net) for SQL queries. It's your way to share data in the organization in an open way, by sharing both the dataset and the query that generated it. This way everyone can peer review not only the resulting dataset but also the process that generated it. Also it's possible to fork it and generate new datasets and reach new insights.
|
||||
2. **Dashboards/Visualizations**: once you have a dataset, you can create different visualizations out of it, and then combine several visualizations into a single dashboard. Currently it supports charts, pivot table and cohorts.
|
||||
|
||||
This is the first release, which is more than usable but still has its rough edges and way to go to fulfill its full potential. The Query Editor part is quite solid, but the visualizations need more work to enrich them and to make them more user friendly.
|
||||
**_re:dash_** is a work in progress and has its rough edges and way to go to fulfill its full potential. The Query Editor part is quite solid, but the visualizations need more work to enrich them and to make them more user friendly.
|
||||
|
||||
## Demo
|
||||
|
||||

|
||||
|
||||
You can try out the demo instance: http://rd-demo.herokuapp.com/ (login with any Google account).
|
||||
You can try out the demo instance: http://demo.redash.io/ (login with any Google account).
|
||||
|
||||
## Getting Started
|
||||
|
||||
* [Setting up re:dash instance](https://github.com/EverythingMe/redash/wiki/Setting-up-re:dash-instance) (includes links to ready made AWS/GCE images).
|
||||
* Additional documentation in the [Wiki](https://github.com/everythingme/redash/wiki).
|
||||
|
||||
Due to Heroku dev plan limits, it has a small database of flights (see schema [here](http://rd-demo.herokuapp.com/dashboard/schema)). Also due to another Heroku limitation, it is running with the regular user, hence you can DELETE or INSERT data/tables. Please be nice and don't do this.
|
||||
|
||||
## Getting help
|
||||
|
||||
* [Google Group (mailing list)](https://groups.google.com/forum/#!forum/redash-users): the best place to get updates about new releases or ask general questions.
|
||||
* #redash IRC channel on [Freenode](http://www.freenode.net/).
|
||||
|
||||
## Technology
|
||||
|
||||
* Python
|
||||
* [AngularJS](http://angularjs.org/)
|
||||
* [PostgreSQL](http://www.postgresql.org/) / [AWS Redshift](http://aws.amazon.com/redshift/)
|
||||
* [Redis](http://redis.io)
|
||||
|
||||
PostgreSQL is used both as the operatinal database for the system, but also as the data store that is being queried. To be exact, we built this system to use on top of Amazon's Redshift, which supports the PG driver. But it's quite simple to add support for other datastores, and we do plan to do so.
|
||||
|
||||
This is our first large scale AngularJS project, and we learned a lot during the development of it. There are still things we need to iron out, and comments on the way we use AngularJS are more than welcome (and pull requests just as well).
|
||||
|
||||
### HighCharts
|
||||
|
||||
HighCharts is really great, but it's not free for commercial use. Please refer to their [licensing options](http://shop.highsoft.com/highcharts.html), to see what applies for your use.
|
||||
|
||||
It's very likely that in the future we will switch to [D3.js](http://d3js.org/) instead.
|
||||
|
||||
## Getting Started
|
||||
|
||||
* [Setting up re:dash on Heroku in 5 minutes](https://github.com/EverythingMe/redash/wiki/Setting-up-re:dash-on-Heroku-in-5-minutes)
|
||||
* [Setting re:dash on your own server (Ubuntu)](https://github.com/EverythingMe/redash/wiki/Setting-re:dash-on-your-own-server-(Ubuntu))
|
||||
|
||||
**Need help setting re:dash or one of the dependencies up?** Ping @arikfr on the IRC #redash channel or send a message to the [mailing list](https://groups.google.com/forum/#!forum/redash-users), and he will gladly help.
|
||||
* Find us [on gitter](https://gitter.im/EverythingMe/redash#) (chat).
|
||||
* Contact Arik, the maintainer directly: arik@everything.me.
|
||||
|
||||
## Roadmap
|
||||
|
||||
Below you can see the "big" features of the next 3 releases (for full list, click on the link):
|
||||
|
||||
### [v0.3](https://github.com/EverythingMe/redash/issues?milestone=2&state=open)
|
||||
|
||||
- Dashboard filters: ability to filter/slice the data you see in a single dashboard using filters (date or selectors).
|
||||
- Multiple databases support (including other database type than PostgreSQL).
|
||||
- Scheduled reports by email.
|
||||
- Comments on queries.
|
||||
|
||||
### [v0.4](https://github.com/EverythingMe/redash/issues?milestone=3&state=open)
|
||||
|
||||
- Query versioning.
|
||||
- More "realtime" UI (using websockets).
|
||||
- More visualizations.
|
||||
TBD.
|
||||
|
||||
## Reporting Bugs and Contributing Code
|
||||
|
||||
|
||||
11
Vagrantfile
vendored
Normal file
11
Vagrantfile
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
|
||||
VAGRANTFILE_API_VERSION = "2"
|
||||
|
||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||
config.vm.box = "redash/dev"
|
||||
config.vm.synced_folder "./", "/opt/redash/current"
|
||||
config.vm.network "forwarded_port", guest: 5000, host: 9001
|
||||
end
|
||||
@@ -1,7 +1,7 @@
|
||||
machine:
|
||||
node:
|
||||
version:
|
||||
0.10.22
|
||||
0.10.24
|
||||
python:
|
||||
version:
|
||||
2.7.3
|
||||
@@ -17,9 +17,12 @@ test:
|
||||
override:
|
||||
- make test
|
||||
post:
|
||||
- make pack
|
||||
- make pack
|
||||
deployment:
|
||||
github:
|
||||
branch: master
|
||||
commands:
|
||||
- make upload
|
||||
notify:
|
||||
webhooks:
|
||||
- url: https://webhooks.gitter.im/e/895d09c3165a0913ac2f
|
||||
|
||||
131
manage.py
131
manage.py
@@ -2,17 +2,19 @@
|
||||
"""
|
||||
CLI to manage redash.
|
||||
"""
|
||||
import signal
|
||||
import logging
|
||||
import time
|
||||
from redash import settings, app, db, models, data_manager, __version__
|
||||
from flask.ext.script import Manager
|
||||
|
||||
from redash import settings, models, __version__
|
||||
from redash.wsgi import app
|
||||
from redash.import_export import import_manager
|
||||
from flask.ext.script import Manager, prompt_pass
|
||||
from redash.cli import users, database, data_sources
|
||||
|
||||
manager = Manager(app)
|
||||
database_manager = Manager(help="Manages the database (create/drop tables).")
|
||||
users_manager = Manager(help="Users management commands.")
|
||||
data_sources_manager = Manager(help="Data sources management commands.")
|
||||
manager.add_command("database", database.manager)
|
||||
manager.add_command("users", users.manager)
|
||||
manager.add_command("import", import_manager)
|
||||
manager.add_command("ds", data_sources.manager)
|
||||
|
||||
|
||||
@manager.command
|
||||
def version():
|
||||
@@ -22,40 +24,19 @@ def version():
|
||||
|
||||
@manager.command
|
||||
def runworkers():
|
||||
"""Starts the re:dash query executors/workers."""
|
||||
|
||||
def stop_handler(signum, frame):
|
||||
logging.warning("Exiting; waiting for workers")
|
||||
data_manager.stop_workers()
|
||||
exit()
|
||||
|
||||
signal.signal(signal.SIGTERM, stop_handler)
|
||||
signal.signal(signal.SIGINT, stop_handler)
|
||||
|
||||
old_workers = data_manager.redis_connection.smembers('workers')
|
||||
data_manager.redis_connection.delete('workers')
|
||||
|
||||
logging.info("Cleaning old workers: %s", old_workers)
|
||||
|
||||
data_manager.start_workers(settings.WORKERS_COUNT)
|
||||
logging.info("Workers started.")
|
||||
|
||||
while True:
|
||||
try:
|
||||
data_manager.refresh_queries()
|
||||
data_manager.report_status()
|
||||
except Exception as e:
|
||||
logging.error("Something went wrong with refreshing queries...")
|
||||
logging.exception(e)
|
||||
time.sleep(60)
|
||||
"""Start workers (deprecated)."""
|
||||
print "** This command is deprecated. Please use Celery's CLI to control the workers. **"
|
||||
|
||||
|
||||
@manager.shell
|
||||
def make_shell_context():
|
||||
from redash.models import db
|
||||
return dict(app=app, db=db, models=models)
|
||||
|
||||
|
||||
@manager.command
|
||||
def check_settings():
|
||||
"""Show the settings as re:dash sees them (useful for debugging)."""
|
||||
from types import ModuleType
|
||||
|
||||
for name in dir(settings):
|
||||
@@ -63,86 +44,6 @@ def check_settings():
|
||||
if not callable(item) and not name.startswith("__") and not isinstance(item, ModuleType):
|
||||
print "{} = {}".format(name, item)
|
||||
|
||||
@database_manager.command
|
||||
def create_tables():
|
||||
"""Creates the database tables."""
|
||||
from redash.models import create_db
|
||||
|
||||
create_db(True, False)
|
||||
|
||||
@database_manager.command
|
||||
def drop_tables():
|
||||
"""Drop the database tables."""
|
||||
from redash.models import create_db
|
||||
|
||||
create_db(False, True)
|
||||
|
||||
|
||||
@users_manager.option('email', help="User's email")
|
||||
@users_manager.option('name', help="User's full name")
|
||||
@users_manager.option('--admin', dest='is_admin', action="store_true", default=False, help="set user as admin")
|
||||
@users_manager.option('--google', dest='google_auth', action="store_true", default=False, help="user uses Google Auth to login")
|
||||
@users_manager.option('--password', dest='password', default=None, help="Password for users who don't use Google Auth (leave blank for prompt).")
|
||||
@users_manager.option('--permissions', dest='permissions', default=models.User.DEFAULT_PERMISSIONS, help="Comma seperated list of permissions (leave blank for default).")
|
||||
def create(email, name, permissions, is_admin=False, google_auth=False, password=None):
|
||||
print "Creating user (%s, %s)..." % (email, name)
|
||||
print "Admin: %r" % is_admin
|
||||
print "Login with Google Auth: %r\n" % google_auth
|
||||
if isinstance(permissions, basestring):
|
||||
permissions = permissions.split(',')
|
||||
permissions.remove('') # in case it was empty string
|
||||
|
||||
if is_admin:
|
||||
permissions += ['admin']
|
||||
|
||||
user = models.User(email=email, name=name, permissions=permissions)
|
||||
if not google_auth:
|
||||
password = password or prompt_pass("Password")
|
||||
user.hash_password(password)
|
||||
|
||||
try:
|
||||
user.save()
|
||||
except Exception, e:
|
||||
print "Failed creating user: %s" % e.message
|
||||
|
||||
|
||||
@users_manager.option('email', help="email address of user to delete")
|
||||
def delete(email):
|
||||
deleted_count = models.User.delete().where(models.User.email == email).execute()
|
||||
print "Deleted %d users." % deleted_count
|
||||
|
||||
@data_sources_manager.command
|
||||
def import_from_settings(name=None):
|
||||
"""Import data source from settings (env variables)."""
|
||||
name = name or "Default"
|
||||
data_source = models.DataSource.create(name=name,
|
||||
type=settings.CONNECTION_ADAPTER,
|
||||
options=settings.CONNECTION_STRING)
|
||||
|
||||
print "Imported data source from settings (id={}).".format(data_source.id)
|
||||
|
||||
|
||||
@data_sources_manager.command
|
||||
def list():
|
||||
"""List currently configured data sources"""
|
||||
for ds in models.DataSource.select():
|
||||
print "Name: {}\nType: {}\nOptions: {}".format(ds.name, ds.type, ds.options)
|
||||
|
||||
@data_sources_manager.command
|
||||
def new(name, type, options):
|
||||
"""Create new data source"""
|
||||
# TODO: validate it's a valid type and in the future, validate the options.
|
||||
print "Creating {} data source ({}) with options:\n{}".format(type, name, options)
|
||||
data_source = models.DataSource.create(name=name,
|
||||
type=type,
|
||||
options=options)
|
||||
print "Id: {}".format(data_source.id)
|
||||
|
||||
|
||||
manager.add_command("database", database_manager)
|
||||
manager.add_command("users", users_manager)
|
||||
manager.add_command("import", import_manager)
|
||||
manager.add_command("ds", data_sources_manager)
|
||||
|
||||
if __name__ == '__main__':
|
||||
manager.run()
|
||||
manager.run()
|
||||
|
||||
12
migrations/0001_allow_delete_query.py
Normal file
12
migrations/0001_allow_delete_query.py
Normal file
@@ -0,0 +1,12 @@
|
||||
from playhouse.migrate import Migrator
|
||||
from redash.models import db
|
||||
from redash import models
|
||||
|
||||
if __name__ == '__main__':
|
||||
db.connect_db()
|
||||
migrator = Migrator(db.database)
|
||||
|
||||
with db.database.transaction():
|
||||
migrator.add_column(models.Query, models.Query.is_archived, 'is_archived')
|
||||
|
||||
db.close_db(None)
|
||||
@@ -1,6 +1,6 @@
|
||||
from playhouse.migrate import Migrator
|
||||
from redash import db
|
||||
from redash import models
|
||||
from redash.models import db
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
@@ -9,4 +9,4 @@ if __name__ == '__main__':
|
||||
with db.database.transaction():
|
||||
migrator.add_column(models.Dashboard, models.Dashboard.dashboard_filters_enabled, 'dashboard_filters_enabled')
|
||||
|
||||
db.close_db(None)
|
||||
db.close_db(None)
|
||||
|
||||
13
migrations/add_queue_name_to_data_source.py
Normal file
13
migrations/add_queue_name_to_data_source.py
Normal file
@@ -0,0 +1,13 @@
|
||||
from playhouse.migrate import Migrator
|
||||
from redash.models import db
|
||||
from redash import models
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
db.connect_db()
|
||||
migrator = Migrator(db.database)
|
||||
with db.database.transaction():
|
||||
migrator.add_column(models.DataSource, models.DataSource.queue_name, 'queue_name')
|
||||
migrator.add_column(models.DataSource, models.DataSource.scheduled_queue_name, 'scheduled_queue_name')
|
||||
|
||||
db.close_db(None)
|
||||
@@ -1,5 +1,5 @@
|
||||
from playhouse.migrate import Migrator
|
||||
from redash import db
|
||||
from redash.models import db
|
||||
from redash import models
|
||||
|
||||
|
||||
@@ -10,4 +10,4 @@ if __name__ == '__main__':
|
||||
migrator.add_column(models.Widget, models.Widget.text, 'text')
|
||||
migrator.set_nullable(models.Widget, models.Widget.visualization, True)
|
||||
|
||||
db.close_db(None)
|
||||
db.close_db(None)
|
||||
|
||||
12
migrations/create_events.py
Normal file
12
migrations/create_events.py
Normal file
@@ -0,0 +1,12 @@
|
||||
from redash.models import db
|
||||
from redash import models
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
db.connect_db()
|
||||
|
||||
if not models.Event.table_exists():
|
||||
print "Creating events table..."
|
||||
models.Event.create_table()
|
||||
|
||||
db.close_db(None)
|
||||
29
migrations/permissions_migration.py
Normal file
29
migrations/permissions_migration.py
Normal file
@@ -0,0 +1,29 @@
|
||||
import peewee
|
||||
from playhouse.migrate import Migrator
|
||||
from redash import models
|
||||
from redash.models import db
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
db.connect_db()
|
||||
migrator = Migrator(db.database)
|
||||
|
||||
if not models.Group.table_exists():
|
||||
print "Creating groups table..."
|
||||
models.Group.create_table()
|
||||
|
||||
with db.database.transaction():
|
||||
models.Group.insert(name='admin', permissions=['admin'], tables=['*']).execute()
|
||||
models.Group.insert(name='api', permissions=['view_query'], tables=['*']).execute()
|
||||
models.Group.insert(name='default', permissions=models.Group.DEFAULT_PERMISSIONS, tables=['*']).execute()
|
||||
|
||||
migrator.add_column(models.User, models.User.groups, 'groups')
|
||||
|
||||
models.User.update(groups=['admin', 'default']).where(peewee.SQL("is_admin = true")).execute()
|
||||
models.User.update(groups=['admin', 'default']).where(peewee.SQL("'admin' = any(permissions)")).execute()
|
||||
models.User.update(groups=['default']).where(peewee.SQL("is_admin = false")).execute()
|
||||
|
||||
migrator.drop_column(models.User, 'permissions')
|
||||
migrator.drop_column(models.User, 'is_admin')
|
||||
|
||||
db.close_db(None)
|
||||
@@ -1,6 +1,5 @@
|
||||
language: node_js
|
||||
node_js:
|
||||
- '0.8'
|
||||
- '0.10'
|
||||
before_script:
|
||||
- 'npm install -g bower grunt-cli'
|
||||
|
||||
@@ -1,10 +1,5 @@
|
||||
// Generated on 2013-08-25 using generator-angular 0.4.0
|
||||
// Generated on 2014-07-30 using generator-angular 0.9.2
|
||||
'use strict';
|
||||
var LIVERELOAD_PORT = 35729;
|
||||
var lrSnippet = require('connect-livereload')({ port: LIVERELOAD_PORT });
|
||||
var mountFolder = function (connect, dir) {
|
||||
return connect.static(require('path').resolve(dir));
|
||||
};
|
||||
|
||||
// # Globbing
|
||||
// for performance reasons we're only matching one level down:
|
||||
@@ -13,48 +8,148 @@ var mountFolder = function (connect, dir) {
|
||||
// 'test/spec/**/*.js'
|
||||
|
||||
module.exports = function (grunt) {
|
||||
|
||||
// Load grunt tasks automatically
|
||||
require('load-grunt-tasks')(grunt);
|
||||
|
||||
// Time how long tasks take. Can help when optimizing build times
|
||||
require('time-grunt')(grunt);
|
||||
|
||||
// configurable paths
|
||||
var yeomanConfig = {
|
||||
app: 'app',
|
||||
// Configurable paths for the application
|
||||
var appConfig = {
|
||||
app: require('./bower.json').appPath || 'app',
|
||||
dist: 'dist'
|
||||
};
|
||||
|
||||
try {
|
||||
yeomanConfig.app = require('./bower.json').appPath || yeomanConfig.app;
|
||||
} catch (e) {}
|
||||
|
||||
// Define the configuration for all the tasks
|
||||
grunt.initConfig({
|
||||
yeoman: yeomanConfig,
|
||||
|
||||
// Project settings
|
||||
yeoman: appConfig,
|
||||
|
||||
// Watches files for changes and runs tasks based on the changed files
|
||||
watch: {
|
||||
coffee: {
|
||||
files: ['<%= yeoman.app %>/scripts/{,*/}*.coffee'],
|
||||
tasks: ['coffee:dist']
|
||||
bower: {
|
||||
files: ['bower.json'],
|
||||
tasks: ['wiredep']
|
||||
},
|
||||
coffeeTest: {
|
||||
files: ['test/spec/{,*/}*.coffee'],
|
||||
tasks: ['coffee:test']
|
||||
js: {
|
||||
files: ['<%= yeoman.app %>/scripts/{,*/}*.js'],
|
||||
tasks: ['newer:jshint:all'],
|
||||
options: {
|
||||
livereload: '<%= connect.options.livereload %>'
|
||||
}
|
||||
},
|
||||
jsTest: {
|
||||
files: ['test/spec/{,*/}*.js'],
|
||||
tasks: ['newer:jshint:test', 'karma']
|
||||
},
|
||||
styles: {
|
||||
files: ['<%= yeoman.app %>/styles/{,*/}*.css'],
|
||||
tasks: ['copy:styles', 'autoprefixer']
|
||||
tasks: ['newer:copy:styles', 'autoprefixer']
|
||||
},
|
||||
gruntfile: {
|
||||
files: ['Gruntfile.js']
|
||||
},
|
||||
livereload: {
|
||||
options: {
|
||||
livereload: LIVERELOAD_PORT
|
||||
livereload: '<%= connect.options.livereload %>'
|
||||
},
|
||||
files: [
|
||||
'<%= yeoman.app %>/{,*/}*.html',
|
||||
'.tmp/styles/{,*/}*.css',
|
||||
'{.tmp,<%= yeoman.app %>}/scripts/{,*/}*.js',
|
||||
'<%= yeoman.app %>/images/{,*/}*.{png,jpg,jpeg,gif,webp,svg}'
|
||||
]
|
||||
}
|
||||
},
|
||||
|
||||
// The actual grunt server settings
|
||||
connect: {
|
||||
options: {
|
||||
port: 9000,
|
||||
// Change this to '0.0.0.0' to access the server from outside.
|
||||
hostname: 'localhost',
|
||||
livereload: 35729
|
||||
},
|
||||
livereload: {
|
||||
options: {
|
||||
open: true,
|
||||
middleware: function (connect) {
|
||||
return [
|
||||
connect.static('.tmp'),
|
||||
connect().use(
|
||||
'/bower_components',
|
||||
connect.static('./bower_components')
|
||||
),
|
||||
connect.static(appConfig.app)
|
||||
];
|
||||
}
|
||||
}
|
||||
},
|
||||
test: {
|
||||
options: {
|
||||
port: 9001,
|
||||
middleware: function (connect) {
|
||||
return [
|
||||
connect.static('.tmp'),
|
||||
connect.static('test'),
|
||||
connect().use(
|
||||
'/bower_components',
|
||||
connect.static('./bower_components')
|
||||
),
|
||||
connect.static(appConfig.app)
|
||||
];
|
||||
}
|
||||
}
|
||||
},
|
||||
dist: {
|
||||
options: {
|
||||
open: true,
|
||||
base: '<%= yeoman.dist %>'
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
// Make sure code styles are up to par and there are no obvious mistakes
|
||||
jshint: {
|
||||
options: {
|
||||
jshintrc: '.jshintrc',
|
||||
reporter: require('jshint-stylish')
|
||||
},
|
||||
all: {
|
||||
src: [
|
||||
'Gruntfile.js',
|
||||
'<%= yeoman.app %>/scripts/{,*/}*.js'
|
||||
]
|
||||
},
|
||||
test: {
|
||||
options: {
|
||||
jshintrc: 'test/.jshintrc'
|
||||
},
|
||||
src: ['test/spec/{,*/}*.js']
|
||||
}
|
||||
},
|
||||
|
||||
// Empties folders to start fresh
|
||||
clean: {
|
||||
dist: {
|
||||
files: [{
|
||||
dot: true,
|
||||
src: [
|
||||
'.tmp',
|
||||
'<%= yeoman.dist %>/{,*/}*',
|
||||
'!<%= yeoman.dist %>/.git*'
|
||||
]
|
||||
}]
|
||||
},
|
||||
server: '.tmp'
|
||||
},
|
||||
|
||||
// Add vendor prefixed styles
|
||||
autoprefixer: {
|
||||
options: ['last 1 version'],
|
||||
options: {
|
||||
browsers: ['last 1 version']
|
||||
},
|
||||
dist: {
|
||||
files: [{
|
||||
expand: true,
|
||||
@@ -64,134 +159,94 @@ module.exports = function (grunt) {
|
||||
}]
|
||||
}
|
||||
},
|
||||
connect: {
|
||||
|
||||
// Automatically inject Bower components into the app
|
||||
wiredep: {
|
||||
options: {
|
||||
port: 9000,
|
||||
// Change this to '0.0.0.0' to access the server from outside.
|
||||
hostname: 'localhost'
|
||||
},
|
||||
livereload: {
|
||||
options: {
|
||||
middleware: function (connect) {
|
||||
return [
|
||||
lrSnippet,
|
||||
mountFolder(connect, '.tmp'),
|
||||
mountFolder(connect, yeomanConfig.app)
|
||||
];
|
||||
}
|
||||
}
|
||||
},
|
||||
test: {
|
||||
options: {
|
||||
middleware: function (connect) {
|
||||
return [
|
||||
mountFolder(connect, '.tmp'),
|
||||
mountFolder(connect, 'test')
|
||||
];
|
||||
}
|
||||
}
|
||||
},
|
||||
dist: {
|
||||
options: {
|
||||
middleware: function (connect) {
|
||||
return [
|
||||
mountFolder(connect, yeomanConfig.dist)
|
||||
];
|
||||
}
|
||||
}
|
||||
app: {
|
||||
src: ['<%= yeoman.app %>/index.html'],
|
||||
ignorePath: /\.\.\//
|
||||
}
|
||||
},
|
||||
open: {
|
||||
server: {
|
||||
url: 'http://localhost:<%= connect.options.port %>'
|
||||
}
|
||||
},
|
||||
clean: {
|
||||
dist: {
|
||||
files: [{
|
||||
dot: true,
|
||||
src: [
|
||||
'.tmp',
|
||||
'<%= yeoman.dist %>/*',
|
||||
'!<%= yeoman.dist %>/.git*'
|
||||
]
|
||||
}]
|
||||
},
|
||||
server: '.tmp'
|
||||
},
|
||||
jshint: {
|
||||
options: {
|
||||
jshintrc: '.jshintrc'
|
||||
},
|
||||
all: [
|
||||
'Gruntfile.js',
|
||||
'<%= yeoman.app %>/scripts/{,*/}*.js'
|
||||
]
|
||||
},
|
||||
coffee: {
|
||||
options: {
|
||||
sourceMap: true,
|
||||
sourceRoot: ''
|
||||
},
|
||||
dist: {
|
||||
files: [{
|
||||
expand: true,
|
||||
cwd: '<%= yeoman.app %>/scripts',
|
||||
src: '{,*/}*.coffee',
|
||||
dest: '.tmp/scripts',
|
||||
ext: '.js'
|
||||
}]
|
||||
},
|
||||
test: {
|
||||
files: [{
|
||||
expand: true,
|
||||
cwd: 'test/spec',
|
||||
src: '{,*/}*.coffee',
|
||||
dest: '.tmp/spec',
|
||||
ext: '.js'
|
||||
}]
|
||||
}
|
||||
},
|
||||
// not used since Uglify task does concat,
|
||||
// but still available if needed
|
||||
/*concat: {
|
||||
dist: {}
|
||||
},*/
|
||||
rev: {
|
||||
dist: {
|
||||
files: {
|
||||
src: [
|
||||
'<%= yeoman.dist %>/scripts/{,*/}*.js',
|
||||
'<%= yeoman.dist %>/styles/{,*/}*.css',
|
||||
'<%= yeoman.dist %>/images/{,*/}*.{png,jpg,jpeg,gif,webp,svg}',
|
||||
'<%= yeoman.dist %>/styles/fonts/*'
|
||||
]
|
||||
}
|
||||
|
||||
// Renames files for browser caching purposes
|
||||
filerev: {
|
||||
dist: {
|
||||
src: [
|
||||
'<%= yeoman.dist %>/scripts/{,*/}*.js',
|
||||
'<%= yeoman.dist %>/styles/{,*/}*.css',
|
||||
'<%= yeoman.dist %>/images/{,*/}*.{png,jpg,jpeg,gif,webp,svg}',
|
||||
'<%= yeoman.dist %>/styles/fonts/*'
|
||||
]
|
||||
}
|
||||
},
|
||||
|
||||
// Reads HTML for usemin blocks to enable smart builds that automatically
|
||||
// concat, minify and revision files. Creates configurations in memory so
|
||||
// additional tasks can operate on them
|
||||
useminPrepare: {
|
||||
html: ['<%= yeoman.app %>/index.html', '<%= yeoman.app %>/login.html'],
|
||||
options: {
|
||||
dest: '<%= yeoman.dist %>'
|
||||
dest: '<%= yeoman.dist %>',
|
||||
flow: {
|
||||
html: {
|
||||
steps: {
|
||||
js: ['concat', 'uglifyjs'],
|
||||
css: ['cssmin']
|
||||
},
|
||||
post: {}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
// Performs rewrites based on filerev and the useminPrepare configuration
|
||||
usemin: {
|
||||
html: ['<%= yeoman.dist %>/{,*/}*.html'],
|
||||
css: ['<%= yeoman.dist %>/styles/{,*/}*.css'],
|
||||
options: {
|
||||
dirs: ['<%= yeoman.dist %>']
|
||||
assetsDirs: ['<%= yeoman.dist %>','<%= yeoman.dist %>/images']
|
||||
}
|
||||
},
|
||||
|
||||
// The following *-min tasks will produce minified files in the dist folder
|
||||
// By default, your `index.html`'s <!-- Usemin block --> will take care of
|
||||
// minification. These next options are pre-configured if you do not wish
|
||||
// to use the Usemin blocks.
|
||||
// cssmin: {
|
||||
// dist: {
|
||||
// files: {
|
||||
// '<%= yeoman.dist %>/styles/main.css': [
|
||||
// '.tmp/styles/{,*/}*.css'
|
||||
// ]
|
||||
// }
|
||||
// }
|
||||
// },
|
||||
// uglify: {
|
||||
// dist: {
|
||||
// files: {
|
||||
// '<%= yeoman.dist %>/scripts/scripts.js': [
|
||||
// '<%= yeoman.dist %>/scripts/scripts.js'
|
||||
// ]
|
||||
// }
|
||||
// }
|
||||
// },
|
||||
// concat: {
|
||||
// dist: {}
|
||||
// },
|
||||
|
||||
imagemin: {
|
||||
dist: {
|
||||
files: [{
|
||||
expand: true,
|
||||
cwd: '<%= yeoman.app %>/images',
|
||||
src: '{,*/}*.{png,jpg,jpeg}',
|
||||
src: '{,*/}*.{png,jpg,jpeg,gif}',
|
||||
dest: '<%= yeoman.dist %>/images'
|
||||
}]
|
||||
}
|
||||
},
|
||||
|
||||
svgmin: {
|
||||
dist: {
|
||||
files: [{
|
||||
@@ -202,41 +257,47 @@ module.exports = function (grunt) {
|
||||
}]
|
||||
}
|
||||
},
|
||||
cssmin: {
|
||||
// By default, your `index.html` <!-- Usemin Block --> will take care of
|
||||
// minification. This option is pre-configured if you do not wish to use
|
||||
// Usemin blocks.
|
||||
// dist: {
|
||||
// files: {
|
||||
// '<%= yeoman.dist %>/styles/main.css': [
|
||||
// '.tmp/styles/{,*/}*.css',
|
||||
// '<%= yeoman.app %>/styles/{,*/}*.css'
|
||||
// ]
|
||||
// }
|
||||
// }
|
||||
},
|
||||
|
||||
htmlmin: {
|
||||
dist: {
|
||||
options: {
|
||||
/*removeCommentsFromCDATA: true,
|
||||
// https://github.com/yeoman/grunt-usemin/issues/44
|
||||
//collapseWhitespace: true,
|
||||
collapseWhitespace: true,
|
||||
conservativeCollapse: true,
|
||||
collapseBooleanAttributes: true,
|
||||
removeAttributeQuotes: true,
|
||||
removeRedundantAttributes: true,
|
||||
useShortDoctype: true,
|
||||
removeEmptyAttributes: true,
|
||||
removeOptionalTags: true*/
|
||||
removeCommentsFromCDATA: true,
|
||||
removeOptionalTags: true
|
||||
},
|
||||
files: [{
|
||||
expand: true,
|
||||
cwd: '<%= yeoman.app %>',
|
||||
src: ['*.html', 'views/**/*.html'],
|
||||
cwd: '<%= yeoman.dist %>',
|
||||
src: ['*.html', 'views/{,*/}*.html'],
|
||||
dest: '<%= yeoman.dist %>'
|
||||
}]
|
||||
}
|
||||
},
|
||||
// Put files not handled in other tasks here
|
||||
|
||||
// ngmin tries to make the code safe for minification automatically by
|
||||
// using the Angular long form for dependency injection. It doesn't work on
|
||||
// things like resolve or inject so those have to be done manually.
|
||||
ngmin: {
|
||||
dist: {
|
||||
files: [{
|
||||
expand: true,
|
||||
cwd: '.tmp/concat/scripts',
|
||||
src: '*.js',
|
||||
dest: '.tmp/concat/scripts'
|
||||
}]
|
||||
}
|
||||
},
|
||||
|
||||
// Replace Google CDN references
|
||||
cdnify: {
|
||||
dist: {
|
||||
html: ['<%= yeoman.dist %>/*.html']
|
||||
}
|
||||
},
|
||||
|
||||
// Copies remaining files to places other tasks can use
|
||||
copy: {
|
||||
dist: {
|
||||
files: [{
|
||||
@@ -247,18 +308,21 @@ module.exports = function (grunt) {
|
||||
src: [
|
||||
'*.{ico,png,txt}',
|
||||
'.htaccess',
|
||||
'bower_components/**/*',
|
||||
'images/{,*/}*.{gif,webp}',
|
||||
'styles/{,*/}*.{png,gif}',
|
||||
'*.html',
|
||||
'views/{,*/}*.html',
|
||||
'images/{,*/}*.{webp}',
|
||||
'fonts/*'
|
||||
]
|
||||
}, {
|
||||
expand: true,
|
||||
cwd: '.tmp/images',
|
||||
dest: '<%= yeoman.dist %>/images',
|
||||
src: [
|
||||
'generated/*'
|
||||
]
|
||||
src: ['generated/*']
|
||||
}, {
|
||||
expand: true,
|
||||
cwd: 'bower_components/bootstrap/dist',
|
||||
src: 'fonts/*',
|
||||
dest: '<%= yeoman.dist %>'
|
||||
}]
|
||||
},
|
||||
styles: {
|
||||
@@ -268,70 +332,52 @@ module.exports = function (grunt) {
|
||||
src: '{,*/}*.css'
|
||||
}
|
||||
},
|
||||
|
||||
// Run some tasks in parallel to speed up the build process
|
||||
concurrent: {
|
||||
server: [
|
||||
'coffee:dist',
|
||||
'copy:styles'
|
||||
],
|
||||
test: [
|
||||
'coffee',
|
||||
'copy:styles'
|
||||
],
|
||||
dist: [
|
||||
'coffee',
|
||||
'copy:styles',
|
||||
'imagemin',
|
||||
'svgmin',
|
||||
'htmlmin'
|
||||
'svgmin'
|
||||
]
|
||||
},
|
||||
|
||||
// Test settings
|
||||
karma: {
|
||||
unit: {
|
||||
configFile: 'karma.conf.js',
|
||||
configFile: 'test/karma.conf.js',
|
||||
singleRun: true
|
||||
}
|
||||
},
|
||||
cdnify: {
|
||||
dist: {
|
||||
html: ['<%= yeoman.dist %>/*.html']
|
||||
}
|
||||
},
|
||||
ngmin: {
|
||||
dist: {
|
||||
files: [{
|
||||
expand: true,
|
||||
cwd: '<%= yeoman.dist %>/scripts',
|
||||
src: '*.js',
|
||||
dest: '<%= yeoman.dist %>/scripts'
|
||||
}]
|
||||
}
|
||||
},
|
||||
uglify: {
|
||||
dist: {
|
||||
files: {
|
||||
'<%= yeoman.dist %>/scripts/scripts.js': [
|
||||
'<%= yeoman.dist %>/scripts/scripts.js'
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
grunt.registerTask('server', function (target) {
|
||||
|
||||
grunt.registerTask('serve', 'Compile then start a connect web server', function (target) {
|
||||
if (target === 'dist') {
|
||||
return grunt.task.run(['build', 'open', 'connect:dist:keepalive']);
|
||||
return grunt.task.run(['build', 'connect:dist:keepalive']);
|
||||
}
|
||||
|
||||
grunt.task.run([
|
||||
'clean:server',
|
||||
'wiredep',
|
||||
'concurrent:server',
|
||||
'autoprefixer',
|
||||
'connect:livereload',
|
||||
'open',
|
||||
'watch'
|
||||
]);
|
||||
});
|
||||
|
||||
grunt.registerTask('server', 'DEPRECATED TASK. Use the "serve" task instead', function (target) {
|
||||
grunt.log.warn('The `server` task has been deprecated. Use `grunt serve` to start a server.');
|
||||
grunt.task.run(['serve:' + target]);
|
||||
});
|
||||
|
||||
grunt.registerTask('test', [
|
||||
'clean:server',
|
||||
'concurrent:test',
|
||||
@@ -342,21 +388,23 @@ module.exports = function (grunt) {
|
||||
|
||||
grunt.registerTask('build', [
|
||||
'clean:dist',
|
||||
'wiredep',
|
||||
'useminPrepare',
|
||||
'concurrent:dist',
|
||||
'autoprefixer',
|
||||
'concat',
|
||||
'ngmin',
|
||||
'copy:dist',
|
||||
'cdnify',
|
||||
'ngmin',
|
||||
'cssmin',
|
||||
'uglify',
|
||||
'rev',
|
||||
'usemin'
|
||||
'filerev',
|
||||
'usemin',
|
||||
'htmlmin'
|
||||
]);
|
||||
|
||||
grunt.registerTask('default', [
|
||||
'jshint',
|
||||
'newer:jshint',
|
||||
'test',
|
||||
'build'
|
||||
]);
|
||||
|
||||
BIN
rd_ui/app/favicon.ico
Normal file
BIN
rd_ui/app/favicon.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 1.1 KiB |
BIN
rd_ui/app/google_login.png
Normal file
BIN
rd_ui/app/google_login.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 18 KiB |
@@ -12,9 +12,12 @@
|
||||
<link rel="stylesheet" href="/bower_components/bootstrap/dist/css/bootstrap.css">
|
||||
<link rel="stylesheet" href="/bower_components/codemirror/lib/codemirror.css">
|
||||
<link rel="stylesheet" href="/bower_components/gridster/dist/jquery.gridster.css">
|
||||
<link rel="stylesheet" href="/bower_components/pivottable/examples/pivot.css">
|
||||
<link rel="stylesheet" href="/bower_components/pivottable/dist/pivot.css">
|
||||
<link rel="stylesheet" href="/bower_components/cornelius/src/cornelius.css">
|
||||
<link rel="stylesheet" href="/bower_components/select2/select2.css">
|
||||
<link rel="stylesheet" href="/bower_components/angular-ui-select/dist/select.css">
|
||||
<link rel="stylesheet" href="/bower_components/pace/themes/pace-theme-minimal.css">
|
||||
<link rel="stylesheet" href="/bower_components/font-awesome/css/font-awesome.css">
|
||||
<link rel="stylesheet" href="/styles/redash.css">
|
||||
<!-- endbuild -->
|
||||
</head>
|
||||
@@ -36,7 +39,7 @@
|
||||
<div class="collapse navbar-collapse navbar-ex1-collapse">
|
||||
<ul class="nav navbar-nav">
|
||||
<li class="active" ng-show="pageTitle"><a class="page-title" ng-bind="pageTitle"></a></li>
|
||||
<li class="dropdown">
|
||||
<li class="dropdown" ng-show="groupedDashboards.length > 0 || otherDashboards.length > 0 || currentUser.hasPermission('create_dashboard')">
|
||||
<a href="#" class="dropdown-toggle" data-toggle="dropdown"><span class="glyphicon glyphicon-th-large"></span> <b class="caret"></b></a>
|
||||
<ul class="dropdown-menu">
|
||||
<span ng-repeat="(name, group) in groupedDashboards">
|
||||
@@ -52,7 +55,7 @@
|
||||
<li ng-repeat="dashboard in otherDashboards">
|
||||
<a role="menu-item" ng-href="/dashboard/{{dashboard.slug}}" ng-bind="dashboard.name"></a>
|
||||
</li>
|
||||
<li class="divider" ng-show="currentUser.hasPermission('create_dashboard')"></li>
|
||||
<li class="divider" ng-show="currentUser.hasPermission('create_dashboard') && (groupedDashboards.length > 0 || otherDashboards.length > 0)"></li>
|
||||
<li><a data-toggle="modal" href="#new_dashboard_dialog" ng-show="currentUser.hasPermission('create_dashboard')">New Dashboard</a></li>
|
||||
</ul>
|
||||
</li>
|
||||
@@ -64,6 +67,12 @@
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
<form class="navbar-form navbar-left" role="search" ng-submit="searchQueries()">
|
||||
<div class="form-group">
|
||||
<input type="text" ng-model="term" class="form-control" placeholder="Search queries...">
|
||||
</div>
|
||||
<button type="submit" class="btn btn-default"><span class="glyphicon glyphicon-search"></span></button>
|
||||
</form>
|
||||
<ul class="nav navbar-nav navbar-right">
|
||||
<p class="navbar-text avatar" ng-show="currentUser.id" ng-cloak>
|
||||
<img ng-src="{{currentUser.gravatar_url}}" class="img-circle" alt="{{currentUser.name}}"/>
|
||||
@@ -103,18 +112,20 @@
|
||||
<script src="/bower_components/highcharts/modules/exporting.js"></script>
|
||||
<script src="/bower_components/gridster/dist/jquery.gridster.js"></script>
|
||||
<script src="/bower_components/angular-growl/build/angular-growl.js"></script>
|
||||
<script src="/bower_components/pivottable/examples/pivot.js"></script>
|
||||
<script src="/bower_components/pivottable/dist/pivot.js"></script>
|
||||
<script src="/bower_components/cornelius/src/cornelius.js"></script>
|
||||
<script src="/bower_components/mousetrap/mousetrap.js"></script>
|
||||
<script src="/bower_components/mousetrap/plugins/global-bind/mousetrap-global-bind.js"></script>
|
||||
<script src="/bower_components/select2/select2.js"></script>
|
||||
<script src="/bower_components/angular-ui-select2/src/select2.js"></script>
|
||||
<script src="/bower_components/angular-ui-select/dist/select.js"></script>
|
||||
<script src="/bower_components/underscore.string/lib/underscore.string.js"></script>
|
||||
<script src="/bower_components/marked/lib/marked.js"></script>
|
||||
<script src="/scripts/ng_highchart.js"></script>
|
||||
<script src="/scripts/ng_smart_table.js"></script>
|
||||
<script src="/scripts/ui-bootstrap-tpls-0.5.0.min.js"></script>
|
||||
<script src="/bower_components/bucky/bucky.js"></script>
|
||||
<script src="/bower_components/pace/pace.js"></script>
|
||||
<!-- endbuild -->
|
||||
|
||||
<!-- build:js({.tmp,app}) /scripts/scripts.js -->
|
||||
@@ -131,6 +142,7 @@
|
||||
<script src="/scripts/visualizations/base.js"></script>
|
||||
<script src="/scripts/visualizations/chart.js"></script>
|
||||
<script src="/scripts/visualizations/cohort.js"></script>
|
||||
<script src="/scripts/visualizations/counter.js"></script>
|
||||
<script src="/scripts/visualizations/table.js"></script>
|
||||
<script src="/scripts/visualizations/pivot.js"></script>
|
||||
<script src="/scripts/directives/directives.js"></script>
|
||||
@@ -157,4 +169,4 @@
|
||||
</script>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
</html>
|
||||
|
||||
@@ -35,6 +35,19 @@
|
||||
<div class="row">
|
||||
|
||||
<div class="main">
|
||||
{% if show_google_openid %}
|
||||
|
||||
<div class="row">
|
||||
<a href="/oauth/google?next={{next}}"><img src="/google_login.png" class="login-button"/></a>
|
||||
</div>
|
||||
|
||||
<div class="login-or">
|
||||
<hr class="hr-or">
|
||||
<span class="span-or">or</span>
|
||||
</div>
|
||||
|
||||
{% endif %}
|
||||
|
||||
<form role="form" method="post" name="login">
|
||||
<div class="form-group">
|
||||
<label for="inputUsernameEmail">Username or email</label>
|
||||
@@ -56,20 +69,7 @@
|
||||
</button>
|
||||
</form>
|
||||
|
||||
{% if show_google_openid %}
|
||||
|
||||
<div class="login-or">
|
||||
<hr class="hr-or">
|
||||
<span class="span-or">or</span>
|
||||
</div>
|
||||
|
||||
<div class="row">
|
||||
<div class="col-xs-6 col-sm-6 col-md-6">
|
||||
<a href="/google_auth/login?next={{next}}" class="btn btn-lg btn-info btn-block">Google</a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{% endif %}
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
@@ -14,7 +14,8 @@ angular.module('redash', [
|
||||
'ui.bootstrap',
|
||||
'smartTable.table',
|
||||
'ngResource',
|
||||
'ngRoute'
|
||||
'ngRoute',
|
||||
'ui.select'
|
||||
]).config(['$routeProvider', '$locationProvider', '$compileProvider', 'growlProvider',
|
||||
function ($routeProvider, $locationProvider, $compileProvider, growlProvider) {
|
||||
if (featureFlags.clientSideMetrics) {
|
||||
@@ -37,7 +38,8 @@ angular.module('redash', [
|
||||
|
||||
$routeProvider.when('/dashboard/:dashboardSlug', {
|
||||
templateUrl: '/views/dashboard.html',
|
||||
controller: 'DashboardCtrl'
|
||||
controller: 'DashboardCtrl',
|
||||
reloadOnSearch: false
|
||||
});
|
||||
$routeProvider.when('/queries', {
|
||||
templateUrl: '/views/queries.html',
|
||||
@@ -54,6 +56,11 @@ angular.module('redash', [
|
||||
}]
|
||||
}
|
||||
});
|
||||
$routeProvider.when('/queries/search', {
|
||||
templateUrl: '/views/queries_search_results.html',
|
||||
controller: 'QuerySearchCtrl',
|
||||
reloadOnSearch: true,
|
||||
});
|
||||
$routeProvider.when('/queries/:queryId', {
|
||||
templateUrl: '/views/query.html',
|
||||
controller: 'QueryViewCtrl',
|
||||
@@ -74,14 +81,23 @@ angular.module('redash', [
|
||||
templateUrl: '/views/admin_status.html',
|
||||
controller: 'AdminStatusCtrl'
|
||||
});
|
||||
$routeProvider.when('/admin/workers', {
|
||||
templateUrl: '/views/admin_workers.html',
|
||||
controller: 'AdminWorkersCtrl'
|
||||
});
|
||||
|
||||
$routeProvider.when('/', {
|
||||
templateUrl: '/views/index.html',
|
||||
controller: 'IndexCtrl'
|
||||
});
|
||||
$routeProvider.when('/personal', {
|
||||
templateUrl: '/views/personal.html',
|
||||
controller: 'PersonalIndexCtrl'
|
||||
});
|
||||
$routeProvider.otherwise({
|
||||
redirectTo: '/'
|
||||
});
|
||||
|
||||
|
||||
}
|
||||
]);
|
||||
]);
|
||||
|
||||
@@ -16,9 +16,16 @@
|
||||
$timeout(refresh, 59 * 1000);
|
||||
};
|
||||
|
||||
$scope.flowerUrl = featureFlags.flowerUrl;
|
||||
|
||||
refresh();
|
||||
}
|
||||
|
||||
var AdminWorkersCtrl = function ($scope, $sce) {
|
||||
$scope.flowerUrl = $sce.trustAsResourceUrl(featureFlags.flowerUrl);
|
||||
};
|
||||
|
||||
angular.module('redash.admin_controllers', [])
|
||||
.controller('AdminStatusCtrl', ['$scope', 'Events', '$http', '$timeout', AdminStatusCtrl])
|
||||
.controller('AdminWorkersCtrl', ['$scope', '$sce', AdminWorkersCtrl])
|
||||
})();
|
||||
|
||||
@@ -1,12 +1,71 @@
|
||||
(function () {
|
||||
var QuerySearchCtrl = function($scope, $location, $filter, Events, Query) {
|
||||
$scope.$parent.pageTitle = "Queries Search";
|
||||
|
||||
$scope.gridConfig = {
|
||||
isPaginationEnabled: true,
|
||||
itemsByPage: 50,
|
||||
maxSize: 8,
|
||||
};
|
||||
|
||||
var dateFormatter = function (value) {
|
||||
if (!value) return "-";
|
||||
return value.format("DD/MM/YY HH:mm");
|
||||
}
|
||||
|
||||
$scope.gridColumns = [
|
||||
{
|
||||
"label": "Name",
|
||||
"map": "name",
|
||||
"cellTemplateUrl": "/views/queries_query_name_cell.html"
|
||||
},
|
||||
{
|
||||
'label': 'Created By',
|
||||
'map': 'user.name'
|
||||
},
|
||||
{
|
||||
'label': 'Created At',
|
||||
'map': 'created_at',
|
||||
'formatFunction': dateFormatter
|
||||
},
|
||||
{
|
||||
'label': 'Update Schedule',
|
||||
'map': 'ttl',
|
||||
'formatFunction': function (value) {
|
||||
return $filter('refreshRateHumanize')(value);
|
||||
}
|
||||
}
|
||||
];
|
||||
|
||||
$scope.queries = [];
|
||||
$scope.$parent.term = $location.search().q;
|
||||
|
||||
Query.search({q: $scope.term }, function(results) {
|
||||
$scope.queries = _.map(results, function(query) {
|
||||
query.created_at = moment(query.created_at);
|
||||
return query;
|
||||
});
|
||||
});
|
||||
|
||||
$scope.search = function() {
|
||||
if (!angular.isString($scope.term) || $scope.term.trim() == "") {
|
||||
$scope.queries = [];
|
||||
return;
|
||||
}
|
||||
|
||||
$location.search({q: $scope.term});
|
||||
};
|
||||
|
||||
Events.record(currentUser, "search", "query", "", {"term": $scope.term});
|
||||
};
|
||||
|
||||
var QueriesCtrl = function ($scope, $http, $location, $filter, Query) {
|
||||
$scope.$parent.pageTitle = "All Queries";
|
||||
$scope.gridConfig = {
|
||||
isPaginationEnabled: true,
|
||||
itemsByPage: 50,
|
||||
maxSize: 8,
|
||||
isGlobalSearchActivated: true
|
||||
}
|
||||
isGlobalSearchActivated: true};
|
||||
|
||||
$scope.allQueries = [];
|
||||
$scope.queries = [];
|
||||
@@ -35,7 +94,7 @@
|
||||
Query.query(function (queries) {
|
||||
$scope.allQueries = _.map(queries, function (query) {
|
||||
query.created_at = moment(query.created_at);
|
||||
query.last_retrieved_at = moment(query.last_retrieved_at);
|
||||
query.retrieved_at = moment(query.retrieved_at);
|
||||
return query;
|
||||
});
|
||||
|
||||
@@ -58,35 +117,17 @@
|
||||
'formatFunction': dateFormatter
|
||||
},
|
||||
{
|
||||
'label': 'Runtime (avg)',
|
||||
'map': 'avg_runtime',
|
||||
'formatFunction': function (value) {
|
||||
return $filter('durationHumanize')(value);
|
||||
}
|
||||
},
|
||||
{
|
||||
'label': 'Runtime (min)',
|
||||
'map': 'min_runtime',
|
||||
'formatFunction': function (value) {
|
||||
return $filter('durationHumanize')(value);
|
||||
}
|
||||
},
|
||||
{
|
||||
'label': 'Runtime (max)',
|
||||
'map': 'max_runtime',
|
||||
'label': 'Runtime',
|
||||
'map': 'runtime',
|
||||
'formatFunction': function (value) {
|
||||
return $filter('durationHumanize')(value);
|
||||
}
|
||||
},
|
||||
{
|
||||
'label': 'Last Executed At',
|
||||
'map': 'last_retrieved_at',
|
||||
'map': 'retrieved_at',
|
||||
'formatFunction': dateFormatter
|
||||
},
|
||||
{
|
||||
'label': 'Times Executed',
|
||||
'map': 'times_retrieved'
|
||||
},
|
||||
{
|
||||
'label': 'Update Schedule',
|
||||
'map': 'ttl',
|
||||
@@ -95,6 +136,7 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
$scope.tabs = [
|
||||
{"name": "My Queries", "key": "my"},
|
||||
{"key": "all", "name": "All Queries"},
|
||||
@@ -110,7 +152,7 @@
|
||||
});
|
||||
}
|
||||
|
||||
var MainCtrl = function ($scope, Dashboard, notifications) {
|
||||
var MainCtrl = function ($scope, $location, Dashboard, notifications) {
|
||||
if (featureFlags.clientSideMetrics) {
|
||||
$scope.$on('$locationChangeSuccess', function(event, newLocation, oldLocation) {
|
||||
// This will be called once per actual page load.
|
||||
@@ -133,7 +175,11 @@
|
||||
$scope.otherDashboards = $scope.allDashboards['Other'] || [];
|
||||
$scope.groupedDashboards = _.omit($scope.allDashboards, 'Other');
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
$scope.searchQueries = function() {
|
||||
$location.path('/queries/search').search({q: $scope.term});
|
||||
};
|
||||
|
||||
$scope.reloadDashboards();
|
||||
|
||||
@@ -146,7 +192,7 @@
|
||||
$(window).click(function () {
|
||||
notifications.getPermissions();
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
var IndexCtrl = function ($scope, Events, Dashboard) {
|
||||
Events.record(currentUser, "view", "page", "homepage");
|
||||
@@ -160,10 +206,29 @@
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
var PersonalIndexCtrl = function ($scope, Events, Dashboard, Query) {
|
||||
Events.record(currentUser, "view", "page", "personal_homepage");
|
||||
$scope.$parent.pageTitle = "Home";
|
||||
|
||||
$scope.recentQueries = Query.recent();
|
||||
$scope.recentDashboards = Dashboard.recent();
|
||||
|
||||
$scope.archiveDashboard = function (dashboard) {
|
||||
if (confirm('Are you sure you want to delete "' + dashboard.name + '" dashboard?')) {
|
||||
Events.record(currentUser, "archive", "dashboard", dashboard.id);
|
||||
dashboard.$delete(function () {
|
||||
$scope.$parent.reloadDashboards();
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
angular.module('redash.controllers', [])
|
||||
.controller('QueriesCtrl', ['$scope', '$http', '$location', '$filter', 'Query', QueriesCtrl])
|
||||
.controller('IndexCtrl', ['$scope', 'Events', 'Dashboard', IndexCtrl])
|
||||
.controller('MainCtrl', ['$scope', 'Dashboard', 'notifications', MainCtrl]);
|
||||
.controller('PersonalIndexCtrl', ['$scope', 'Events', 'Dashboard', 'Query', PersonalIndexCtrl])
|
||||
.controller('MainCtrl', ['$scope', '$location', 'Dashboard', 'notifications', MainCtrl])
|
||||
.controller('QuerySearchCtrl', ['$scope', '$location', '$filter', 'Events', 'Query', QuerySearchCtrl]);
|
||||
})();
|
||||
|
||||
@@ -1,46 +1,67 @@
|
||||
(function() {
|
||||
var DashboardCtrl = function($scope, Events, Widget, $routeParams, $http, $timeout, Dashboard) {
|
||||
Events.record(currentUser, "view", "dashboard", dashboard.id);
|
||||
|
||||
var DashboardCtrl = function($scope, Events, Widget, $routeParams, $location, $http, $timeout, $q, Dashboard) {
|
||||
$scope.refreshEnabled = false;
|
||||
$scope.refreshRate = 60;
|
||||
$scope.dashboard = Dashboard.get({ slug: $routeParams.dashboardSlug }, function (dashboard) {
|
||||
$scope.$parent.pageTitle = dashboard.name;
|
||||
var filters = {};
|
||||
|
||||
$scope.dashboard.widgets = _.map($scope.dashboard.widgets, function (row) {
|
||||
return _.map(row, function (widget) {
|
||||
var w = new Widget(widget);
|
||||
var loadDashboard = _.throttle(function() {
|
||||
$scope.dashboard = Dashboard.get({ slug: $routeParams.dashboardSlug }, function (dashboard) {
|
||||
Events.record(currentUser, "view", "dashboard", dashboard.id);
|
||||
|
||||
if (w.visualization && dashboard.dashboard_filters_enabled) {
|
||||
var queryFilters = w.getQuery().getQueryResult().getFilters();
|
||||
$scope.$parent.pageTitle = dashboard.name;
|
||||
|
||||
var promises = [];
|
||||
|
||||
$scope.dashboard.widgets = _.map($scope.dashboard.widgets, function (row) {
|
||||
return _.map(row, function (widget) {
|
||||
var w = new Widget(widget);
|
||||
|
||||
if (w.visualization) {
|
||||
promises.push(w.getQuery().getQueryResultPromise());
|
||||
}
|
||||
|
||||
return w;
|
||||
});
|
||||
});
|
||||
|
||||
$q.all(promises).then(function(queryResults) {
|
||||
var filters = {};
|
||||
_.each(queryResults, function(queryResult) {
|
||||
var queryFilters = queryResult.getFilters();
|
||||
_.each(queryFilters, function (filter) {
|
||||
if (!_.has(filters, filter.name)) {
|
||||
// TODO: first object should be a copy, otherwise one of the chart filters behaves different than the others.
|
||||
filters[filter.name] = filter;
|
||||
filters[filter.name].originFilters = [];
|
||||
if (_.has($location.search(), filter.name)) {
|
||||
filter.current = $location.search()[filter.name];
|
||||
}
|
||||
|
||||
$scope.$watch(function() { return filter.current }, function (value) {
|
||||
_.each(filter.originFilters, function(originFilter) {
|
||||
$scope.$watch(function () { return filter.current }, function (value) {
|
||||
_.each(filter.originFilters, function (originFilter) {
|
||||
originFilter.current = value;
|
||||
})
|
||||
});
|
||||
});
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
// TODO: merge values.
|
||||
filters[filter.name].originFilters.push(filter);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
return w;
|
||||
$scope.filters = _.values(filters);
|
||||
});
|
||||
});
|
||||
|
||||
if (dashboard.dashboard_filters_enabled) {
|
||||
$scope.filters = _.values(filters);
|
||||
}
|
||||
});
|
||||
|
||||
}, function () {
|
||||
// error...
|
||||
// try again. we wrap loadDashboard with throttle so it doesn't happen too often.\
|
||||
// we might want to consider exponential backoff and also move this as a general solution in $http/$resource for
|
||||
// all AJAX calls.
|
||||
loadDashboard();
|
||||
});
|
||||
}, 1000);
|
||||
|
||||
loadDashboard();
|
||||
|
||||
var autoRefresh = function() {
|
||||
if ($scope.refreshEnabled) {
|
||||
@@ -54,7 +75,7 @@
|
||||
_.each(row, function(widget, i) {
|
||||
var newWidget = newWidgets[widget.id];
|
||||
if (newWidget && newWidget[0].visualization.query.latest_query_data_id != widget.visualization.query.latest_query_data_id) {
|
||||
row[i] = newWidget[0];
|
||||
row[i] = new Widget(newWidget[0]);
|
||||
}
|
||||
});
|
||||
});
|
||||
@@ -63,8 +84,8 @@
|
||||
});
|
||||
|
||||
}, $scope.refreshRate);
|
||||
};
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
$scope.triggerRefresh = function() {
|
||||
$scope.refreshEnabled = !$scope.refreshEnabled;
|
||||
@@ -117,7 +138,7 @@
|
||||
};
|
||||
|
||||
angular.module('redash.controllers')
|
||||
.controller('DashboardCtrl', ['$scope', 'Events', 'Widget', '$routeParams', '$http', '$timeout', 'Dashboard', DashboardCtrl])
|
||||
.controller('DashboardCtrl', ['$scope', 'Events', 'Widget', '$routeParams', '$location', '$http', '$timeout', '$q', 'Dashboard', DashboardCtrl])
|
||||
.controller('WidgetCtrl', ['$scope', 'Events', 'Query', WidgetCtrl])
|
||||
|
||||
})();
|
||||
})();
|
||||
|
||||
@@ -21,8 +21,13 @@
|
||||
$scope.saveQuery();
|
||||
}
|
||||
},
|
||||
// Cmd+Enter for Mac
|
||||
'meta+enter': function () {
|
||||
$scope.executeQuery();
|
||||
},
|
||||
// Ctrl+Enter for PC
|
||||
'ctrl+enter': function () {
|
||||
$scope.executeQuery();
|
||||
}
|
||||
};
|
||||
|
||||
@@ -32,6 +37,14 @@
|
||||
|
||||
$scope.newVisualization = undefined;
|
||||
|
||||
// @override
|
||||
Object.defineProperty($scope, 'showDataset', {
|
||||
get: function() {
|
||||
return $scope.queryResult && $scope.queryResult.getStatus() == 'done';
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
KeyboardShortcuts.bind(shortcuts);
|
||||
|
||||
// @override
|
||||
@@ -109,4 +122,4 @@
|
||||
'Events', 'growl', '$controller', '$scope', '$location', 'Query',
|
||||
'Visualization', 'KeyboardShortcuts', QuerySourceCtrl
|
||||
]);
|
||||
})();
|
||||
})();
|
||||
|
||||
@@ -16,6 +16,10 @@
|
||||
$scope.query.data_source_id = $scope.query.data_source_id || dataSources[0].id;
|
||||
});
|
||||
|
||||
// in view mode, latest dataset is always visible
|
||||
// source mode changes this behavior
|
||||
$scope.showDataset = true;
|
||||
|
||||
$scope.lockButton = function(lock) {
|
||||
$scope.queryExecuting = lock;
|
||||
};
|
||||
@@ -24,7 +28,7 @@
|
||||
if (data) {
|
||||
data.id = $scope.query.id;
|
||||
} else {
|
||||
data = $scope.query;
|
||||
data = _.clone($scope.query);
|
||||
}
|
||||
|
||||
options = _.extend({}, {
|
||||
@@ -32,8 +36,8 @@
|
||||
errorMessage: 'Query could not be saved'
|
||||
}, options);
|
||||
|
||||
delete $scope.query.latest_query_data;
|
||||
delete $scope.query.queryResult;
|
||||
delete data.latest_query_data;
|
||||
delete data.queryResult;
|
||||
|
||||
return Query.save(data, function() {
|
||||
growl.addSuccessMessage(options.successMessage);
|
||||
@@ -64,16 +68,45 @@
|
||||
$scope.queryResult.cancelExecution();
|
||||
Events.record(currentUser, 'cancel_execute', 'query', $scope.query.id);
|
||||
};
|
||||
|
||||
$scope.archiveQuery = function(options, data) {
|
||||
if (data) {
|
||||
data.id = $scope.query.id;
|
||||
} else {
|
||||
data = $scope.query;
|
||||
}
|
||||
|
||||
$scope.isDirty = false;
|
||||
|
||||
options = _.extend({}, {
|
||||
successMessage: 'Query archived',
|
||||
errorMessage: 'Query could not be archived'
|
||||
}, options);
|
||||
|
||||
return Query.delete({id: data.id}, function() {
|
||||
$scope.query.is_archived = true;
|
||||
$scope.query.ttl = -1;
|
||||
growl.addSuccessMessage(options.successMessage);
|
||||
// This feels dirty.
|
||||
$('#archive-confirmation-modal').modal('hide');
|
||||
}, function(httpResponse) {
|
||||
growl.addErrorMessage(options.errorMessage);
|
||||
}).$promise;
|
||||
}
|
||||
|
||||
$scope.updateDataSource = function() {
|
||||
Events.record(currentUser, 'update_data_source', 'query', $scope.query.id);
|
||||
|
||||
$scope.query.latest_query_data = null;
|
||||
$scope.query.latest_query_data_id = null;
|
||||
Query.save({
|
||||
|
||||
if ($scope.query.id) {
|
||||
Query.save({
|
||||
'id': $scope.query.id,
|
||||
'data_source_id': $scope.query.data_source_id,
|
||||
'latest_query_data_id': null
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
$scope.executeQuery();
|
||||
};
|
||||
@@ -87,35 +120,12 @@
|
||||
$scope.$parent.pageTitle = $scope.query.name;
|
||||
});
|
||||
|
||||
$scope.$watch('queryResult && queryResult.getError()', function(newError, oldError) {
|
||||
if (newError == undefined) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (oldError == undefined && newError != undefined) {
|
||||
$scope.lockButton(false);
|
||||
}
|
||||
});
|
||||
|
||||
$scope.$watch('queryResult && queryResult.getData()', function(data, oldData) {
|
||||
if (!data) {
|
||||
return;
|
||||
}
|
||||
|
||||
$scope.filters = $scope.queryResult.getFilters();
|
||||
|
||||
if ($scope.queryResult.getId() == null) {
|
||||
$scope.dataUri = "";
|
||||
} else {
|
||||
$scope.dataUri =
|
||||
'/api/queries/' + $scope.query.id + '/results/' +
|
||||
$scope.queryResult.getId() + '.csv';
|
||||
|
||||
$scope.dataFilename =
|
||||
$scope.query.name.replace(" ", "_") +
|
||||
moment($scope.queryResult.getUpdatedAt()).format("_YYYY_MM_DD") +
|
||||
".csv";
|
||||
}
|
||||
});
|
||||
|
||||
$scope.$watch("queryResult && queryResult.getStatus()", function(status) {
|
||||
@@ -123,7 +133,7 @@
|
||||
return;
|
||||
}
|
||||
|
||||
if (status == "done") {
|
||||
if (status == 'done') {
|
||||
if ($scope.query.id &&
|
||||
$scope.query.latest_query_data_id != $scope.queryResult.getId() &&
|
||||
$scope.query.query_hash == $scope.queryResult.query_result.query_hash) {
|
||||
@@ -133,9 +143,12 @@
|
||||
})
|
||||
}
|
||||
$scope.query.latest_query_data_id = $scope.queryResult.getId();
|
||||
$scope.query.queryResult = $scope.queryResult;
|
||||
|
||||
notifications.showNotification("re:dash", $scope.query.name + " updated.");
|
||||
}
|
||||
|
||||
if (status === 'done' || status === 'failed') {
|
||||
$scope.lockButton(false);
|
||||
}
|
||||
});
|
||||
@@ -153,4 +166,4 @@
|
||||
angular.module('redash.controllers')
|
||||
.controller('QueryViewCtrl',
|
||||
['$scope', 'Events', '$route', '$location', 'notifications', 'growl', 'Query', 'DataSource', QueryViewCtrl]);
|
||||
})();
|
||||
})();
|
||||
|
||||
@@ -96,6 +96,11 @@
|
||||
'name': $scope.dashboard.name
|
||||
}).success(function(response) {
|
||||
$(element).modal('hide');
|
||||
$scope.dashboard = {
|
||||
'name': null,
|
||||
'layout': null
|
||||
};
|
||||
$scope.saveInProgress = false;
|
||||
$location.path('/dashboard/' + response.slug).replace();
|
||||
});
|
||||
Events.record(currentUser, 'create', 'dashboard');
|
||||
@@ -142,22 +147,22 @@
|
||||
var reset = function() {
|
||||
$scope.saveInProgress = false;
|
||||
$scope.widgetSize = 1;
|
||||
$scope.queryId = null;
|
||||
$scope.selectedVis = null;
|
||||
$scope.query = null;
|
||||
$scope.query = {};
|
||||
$scope.selected_query = undefined;
|
||||
$scope.text = "";
|
||||
};
|
||||
|
||||
reset();
|
||||
|
||||
$scope.loadVisualizations = function () {
|
||||
if (!$scope.queryId) {
|
||||
if (!$scope.query.selected) {
|
||||
return;
|
||||
}
|
||||
|
||||
Query.get({ id: $scope.queryId }, function(query) {
|
||||
Query.get({ id: $scope.query.selected.id }, function(query) {
|
||||
if (query) {
|
||||
$scope.query = query;
|
||||
$scope.selected_query = query;
|
||||
if (query.visualizations.length) {
|
||||
$scope.selectedVis = query.visualizations[0];
|
||||
}
|
||||
@@ -165,6 +170,20 @@
|
||||
});
|
||||
};
|
||||
|
||||
$scope.searchQueries = function (term) {
|
||||
if (!term || term.length < 3) {
|
||||
return;
|
||||
}
|
||||
|
||||
Query.search({q: term}, function(results) {
|
||||
$scope.queries = results;
|
||||
});
|
||||
};
|
||||
|
||||
$scope.$watch('query', function () {
|
||||
$scope.loadVisualizations();
|
||||
}, true);
|
||||
|
||||
$scope.saveWidget = function() {
|
||||
$scope.saveInProgress = true;
|
||||
|
||||
|
||||
@@ -1,219 +1,250 @@
|
||||
(function() {
|
||||
'use strict';
|
||||
(function () {
|
||||
'use strict';
|
||||
|
||||
var directives = angular.module('redash.directives', []);
|
||||
var directives = angular.module('redash.directives', []);
|
||||
|
||||
directives.directive('alertUnsavedChanges', ['$window', function($window) {
|
||||
return {
|
||||
restrict: 'E',
|
||||
replace: true,
|
||||
scope: {
|
||||
'isDirty': '='
|
||||
},
|
||||
link: function($scope) {
|
||||
var
|
||||
directives.directive('alertUnsavedChanges', ['$window', function ($window) {
|
||||
return {
|
||||
restrict: 'E',
|
||||
replace: true,
|
||||
scope: {
|
||||
'isDirty': '='
|
||||
},
|
||||
link: function ($scope) {
|
||||
var
|
||||
|
||||
unloadMessage = "You will lose your changes if you leave",
|
||||
confirmMessage = unloadMessage + "\n\nAre you sure you want to leave this page?",
|
||||
unloadMessage = "You will lose your changes if you leave",
|
||||
confirmMessage = unloadMessage + "\n\nAre you sure you want to leave this page?",
|
||||
|
||||
// store original handler (if any)
|
||||
_onbeforeunload = $window.onbeforeunload;
|
||||
// store original handler (if any)
|
||||
_onbeforeunload = $window.onbeforeunload;
|
||||
|
||||
$window.onbeforeunload = function() {
|
||||
return $scope.isDirty ? unloadMessage : null;
|
||||
}
|
||||
|
||||
$scope.$on('$locationChangeStart', function(event, next, current) {
|
||||
if (next.split("#")[0] == current.split("#")[0]) {
|
||||
return;
|
||||
}
|
||||
|
||||
if ($scope.isDirty && !confirm(confirmMessage)) {
|
||||
event.preventDefault();
|
||||
}
|
||||
});
|
||||
|
||||
$scope.$on('$destroy', function() {
|
||||
$window.onbeforeunload = _onbeforeunload;
|
||||
});
|
||||
}
|
||||
$window.onbeforeunload = function () {
|
||||
return $scope.isDirty ? unloadMessage : null;
|
||||
}
|
||||
}]);
|
||||
|
||||
directives.directive('rdTab', function() {
|
||||
return {
|
||||
restrict: 'E',
|
||||
scope: {
|
||||
'tabId': '@',
|
||||
'name': '@'
|
||||
},
|
||||
transclude: true,
|
||||
template: '<li class="rd-tab" ng-class="{active: tabId==selectedTab}"><a href="#{{tabId}}">{{name}}<span ng-transclude></span></a></li>',
|
||||
replace: true,
|
||||
link: function(scope) {
|
||||
scope.$watch(function(){return scope.$parent.selectedTab}, function(tab) {
|
||||
scope.selectedTab = tab;
|
||||
});
|
||||
}
|
||||
$scope.$on('$locationChangeStart', function (event, next, current) {
|
||||
if (next.split("#")[0] == current.split("#")[0]) {
|
||||
return;
|
||||
}
|
||||
|
||||
if ($scope.isDirty && !confirm(confirmMessage)) {
|
||||
event.preventDefault();
|
||||
}
|
||||
});
|
||||
|
||||
$scope.$on('$destroy', function () {
|
||||
$window.onbeforeunload = _onbeforeunload;
|
||||
});
|
||||
}
|
||||
}
|
||||
}]);
|
||||
|
||||
directives.directive('rdTab', function () {
|
||||
return {
|
||||
restrict: 'E',
|
||||
scope: {
|
||||
'tabId': '@',
|
||||
'name': '@'
|
||||
},
|
||||
transclude: true,
|
||||
template: '<li class="rd-tab" ng-class="{active: tabId==selectedTab}"><a href="#{{tabId}}">{{name}}<span ng-transclude></span></a></li>',
|
||||
replace: true,
|
||||
link: function (scope) {
|
||||
scope.$watch(function () {
|
||||
return scope.$parent.selectedTab
|
||||
}, function (tab) {
|
||||
scope.selectedTab = tab;
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
directives.directive('rdTabs', ['$location', function ($location) {
|
||||
return {
|
||||
restrict: 'E',
|
||||
scope: {
|
||||
tabsCollection: '=',
|
||||
selectedTab: '='
|
||||
},
|
||||
template: '<ul class="nav nav-tabs"><li ng-class="{active: tab==selectedTab}" ng-repeat="tab in tabsCollection"><a href="#{{tab.key}}">{{tab.name}}</a></li></ul>',
|
||||
replace: true,
|
||||
link: function ($scope, element, attrs) {
|
||||
$scope.selectTab = function (tabKey) {
|
||||
$scope.selectedTab = _.find($scope.tabsCollection, function (tab) {
|
||||
return tab.key == tabKey;
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
directives.directive('rdTabs', ['$location', function($location) {
|
||||
return {
|
||||
restrict: 'E',
|
||||
scope: {
|
||||
tabsCollection: '=',
|
||||
selectedTab: '='
|
||||
},
|
||||
template: '<ul class="nav nav-tabs"><li ng-class="{active: tab==selectedTab}" ng-repeat="tab in tabsCollection"><a href="#{{tab.key}}">{{tab.name}}</a></li></ul>',
|
||||
replace: true,
|
||||
link: function($scope, element, attrs) {
|
||||
$scope.selectTab = function(tabKey) {
|
||||
$scope.selectedTab = _.find($scope.tabsCollection, function(tab) { return tab.key == tabKey; });
|
||||
}
|
||||
$scope.$watch(function () {
|
||||
return $location.hash()
|
||||
}, function (hash) {
|
||||
if (hash) {
|
||||
$scope.selectTab($location.hash());
|
||||
} else {
|
||||
$scope.selectTab($scope.tabsCollection[0].key);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}]);
|
||||
|
||||
$scope.$watch(function() { return $location.hash()}, function(hash) {
|
||||
if (hash) {
|
||||
$scope.selectTab($location.hash());
|
||||
} else {
|
||||
$scope.selectTab($scope.tabsCollection[0].key);
|
||||
}
|
||||
});
|
||||
}
|
||||
// From: http://jsfiddle.net/joshdmiller/NDFHg/
|
||||
directives.directive('editInPlace', function () {
|
||||
return {
|
||||
restrict: 'E',
|
||||
scope: {
|
||||
value: '=',
|
||||
ignoreBlanks: '=',
|
||||
editable: '=',
|
||||
done: '=',
|
||||
},
|
||||
template: function (tElement, tAttrs) {
|
||||
var elType = tAttrs.editor || 'input';
|
||||
var placeholder = tAttrs.placeholder || 'Click to edit';
|
||||
|
||||
var viewMode = '';
|
||||
|
||||
if (tAttrs.markdown == "true") {
|
||||
viewMode = '<span ng-click="editable && edit()" ng-bind-html="value|markdown" ng-class="{editable: editable}"></span>';
|
||||
} else {
|
||||
viewMode = '<span ng-click="editable && edit()" ng-bind="value" ng-class="{editable: editable}"></span>';
|
||||
}
|
||||
}]);
|
||||
|
||||
// From: http://jsfiddle.net/joshdmiller/NDFHg/
|
||||
directives.directive('editInPlace', function () {
|
||||
return {
|
||||
restrict: 'E',
|
||||
scope: {
|
||||
value: '=',
|
||||
ignoreBlanks: '=',
|
||||
editable: '=',
|
||||
done: '='
|
||||
},
|
||||
template: function(tElement, tAttrs) {
|
||||
var elType = tAttrs.editor || 'input';
|
||||
var placeholder = tAttrs.placeholder || 'Click to edit';
|
||||
return '<span ng-click="editable && edit()" ng-bind="value" ng-class="{editable: editable}"></span>' +
|
||||
'<span ng-click="editable && edit()" ng-show="editable && !value" ng-class="{editable: editable}">' + placeholder + '</span>' +
|
||||
'<{elType} ng-model="value" class="rd-form-control"></{elType}>'.replace('{elType}', elType);
|
||||
},
|
||||
link: function ($scope, element, attrs) {
|
||||
// Let's get a reference to the input element, as we'll want to reference it.
|
||||
var inputElement = angular.element(element.children()[2]);
|
||||
var placeholderSpan = '<span ng-click="editable && edit()" ng-show="editable && !value" ng-class="{editable: editable}">' + placeholder + '</span>';
|
||||
var editor = '<{elType} ng-model="value" class="rd-form-control"></{elType}>'.replace('{elType}', elType);
|
||||
|
||||
// This directive should have a set class so we can style it.
|
||||
element.addClass('edit-in-place');
|
||||
return viewMode + placeholderSpan + editor;
|
||||
},
|
||||
link: function ($scope, element, attrs) {
|
||||
// Let's get a reference to the input element, as we'll want to reference it.
|
||||
var inputElement = angular.element(element.children()[2]);
|
||||
|
||||
// Initially, we're not editing.
|
||||
$scope.editing = false;
|
||||
// This directive should have a set class so we can style it.
|
||||
element.addClass('edit-in-place');
|
||||
|
||||
// ng-click handler to activate edit-in-place
|
||||
$scope.edit = function () {
|
||||
$scope.oldValue = $scope.value;
|
||||
// Initially, we're not editing.
|
||||
$scope.editing = false;
|
||||
|
||||
$scope.editing = true;
|
||||
// ng-click handler to activate edit-in-place
|
||||
$scope.edit = function () {
|
||||
$scope.oldValue = $scope.value;
|
||||
|
||||
// We control display through a class on the directive itself. See the CSS.
|
||||
element.addClass('active');
|
||||
$scope.editing = true;
|
||||
|
||||
// And we must focus the element.
|
||||
// `angular.element()` provides a chainable array, like jQuery so to access a native DOM function,
|
||||
// we have to reference the first element in the array.
|
||||
inputElement[0].focus();
|
||||
};
|
||||
// We control display through a class on the directive itself. See the CSS.
|
||||
element.addClass('active');
|
||||
|
||||
function save() {
|
||||
if ($scope.editing) {
|
||||
if ($scope.ignoreBlanks && _.isEmpty($scope.value)) {
|
||||
$scope.value = $scope.oldValue;
|
||||
}
|
||||
$scope.editing = false;
|
||||
element.removeClass('active');
|
||||
|
||||
if ($scope.value !== $scope.oldValue) {
|
||||
$scope.done && $scope.done();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
$(inputElement).keydown(function(e) {
|
||||
// 'return' or 'enter' key pressed
|
||||
// allow 'shift' to break lines
|
||||
if (e.which === 13 && !e.shiftKey) {
|
||||
save();
|
||||
} else if (e.which === 27) {
|
||||
$scope.value = $scope.oldValue;
|
||||
$scope.$apply(function() {
|
||||
$(inputElement[0]).blur();
|
||||
});
|
||||
}
|
||||
}).blur(function() {
|
||||
save();
|
||||
});
|
||||
}
|
||||
// And we must focus the element.
|
||||
// `angular.element()` provides a chainable array, like jQuery so to access a native DOM function,
|
||||
// we have to reference the first element in the array.
|
||||
inputElement[0].focus();
|
||||
};
|
||||
});
|
||||
|
||||
// http://stackoverflow.com/a/17904092/1559840
|
||||
directives.directive('jsonText', function() {
|
||||
return {
|
||||
restrict: 'A',
|
||||
require: 'ngModel',
|
||||
link: function(scope, element, attr, ngModel) {
|
||||
function into(input) {
|
||||
return JSON.parse(input);
|
||||
}
|
||||
function out(data) {
|
||||
return JSON.stringify(data, undefined, 2);
|
||||
}
|
||||
ngModel.$parsers.push(into);
|
||||
ngModel.$formatters.push(out);
|
||||
|
||||
scope.$watch(attr.ngModel, function(newValue) {
|
||||
element[0].value = out(newValue);
|
||||
}, true);
|
||||
function save() {
|
||||
if ($scope.editing) {
|
||||
if ($scope.ignoreBlanks && _.isEmpty($scope.value)) {
|
||||
$scope.value = $scope.oldValue;
|
||||
}
|
||||
};
|
||||
});
|
||||
$scope.editing = false;
|
||||
element.removeClass('active');
|
||||
|
||||
directives.directive('rdTimer', [function () {
|
||||
return {
|
||||
restrict: 'E',
|
||||
scope: { timestamp: '=' },
|
||||
template: '{{currentTime}}',
|
||||
controller: ['$scope' ,function ($scope) {
|
||||
$scope.currentTime = "00:00:00";
|
||||
|
||||
// We're using setInterval directly instead of $timeout, to avoid using $apply, to
|
||||
// prevent the digest loop being run every second.
|
||||
var currentTimer = setInterval(function() {
|
||||
$scope.currentTime = moment(moment() - moment($scope.timestamp)).utc().format("HH:mm:ss");
|
||||
$scope.$digest();
|
||||
}, 1000);
|
||||
|
||||
$scope.$on('$destroy', function () {
|
||||
if (currentTimer) {
|
||||
clearInterval(currentTimer);
|
||||
currentTimer = null;
|
||||
}
|
||||
});
|
||||
}]
|
||||
};
|
||||
}]);
|
||||
|
||||
directives.directive('rdTimeAgo', function() {
|
||||
return {
|
||||
restrict: 'E',
|
||||
scope: {
|
||||
value: '='
|
||||
},
|
||||
template: '<span>' +
|
||||
'<span ng-show="value" am-time-ago="value"></span>' +
|
||||
'<span ng-hide="value">-</span>' +
|
||||
'</span>'
|
||||
if ($scope.value !== $scope.oldValue) {
|
||||
$scope.done && $scope.done();
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
$(inputElement).keydown(function (e) {
|
||||
// 'return' or 'enter' key pressed
|
||||
// allow 'shift' to break lines
|
||||
if (e.which === 13 && !e.shiftKey) {
|
||||
save();
|
||||
} else if (e.which === 27) {
|
||||
$scope.value = $scope.oldValue;
|
||||
$scope.$apply(function () {
|
||||
$(inputElement[0]).blur();
|
||||
});
|
||||
}
|
||||
}).blur(function () {
|
||||
save();
|
||||
});
|
||||
}
|
||||
};
|
||||
});
|
||||
|
||||
// http://stackoverflow.com/a/17904092/1559840
|
||||
directives.directive('jsonText', function () {
|
||||
return {
|
||||
restrict: 'A',
|
||||
require: 'ngModel',
|
||||
link: function (scope, element, attr, ngModel) {
|
||||
function into(input) {
|
||||
return JSON.parse(input);
|
||||
}
|
||||
|
||||
function out(data) {
|
||||
return JSON.stringify(data, undefined, 2);
|
||||
}
|
||||
|
||||
ngModel.$parsers.push(into);
|
||||
ngModel.$formatters.push(out);
|
||||
|
||||
scope.$watch(attr.ngModel, function (newValue) {
|
||||
element[0].value = out(newValue);
|
||||
}, true);
|
||||
}
|
||||
};
|
||||
});
|
||||
|
||||
directives.directive('rdTimer', [function () {
|
||||
return {
|
||||
restrict: 'E',
|
||||
scope: { timestamp: '=' },
|
||||
template: '{{currentTime}}',
|
||||
controller: ['$scope' , function ($scope) {
|
||||
$scope.currentTime = "00:00:00";
|
||||
|
||||
// We're using setInterval directly instead of $timeout, to avoid using $apply, to
|
||||
// prevent the digest loop being run every second.
|
||||
var currentTimer = setInterval(function () {
|
||||
$scope.currentTime = moment(moment() - moment($scope.timestamp)).utc().format("HH:mm:ss");
|
||||
$scope.$digest();
|
||||
}, 1000);
|
||||
|
||||
$scope.$on('$destroy', function () {
|
||||
if (currentTimer) {
|
||||
clearInterval(currentTimer);
|
||||
currentTimer = null;
|
||||
}
|
||||
});
|
||||
}]
|
||||
};
|
||||
}]);
|
||||
|
||||
directives.directive('rdTimeAgo', function () {
|
||||
return {
|
||||
restrict: 'E',
|
||||
scope: {
|
||||
value: '='
|
||||
},
|
||||
template: '<span>' +
|
||||
'<span ng-show="value" am-time-ago="value"></span>' +
|
||||
'<span ng-hide="value">-</span>' +
|
||||
'</span>'
|
||||
}
|
||||
});
|
||||
|
||||
// Used instead of autofocus attribute, which doesn't work in Angular as there is no real page load.
|
||||
directives.directive('autofocus',
|
||||
['$timeout', function ($timeout) {
|
||||
return {
|
||||
link: function (scope, element) {
|
||||
$timeout(function () {
|
||||
element[0].focus();
|
||||
});
|
||||
}
|
||||
};
|
||||
}]
|
||||
);
|
||||
})();
|
||||
|
||||
@@ -38,6 +38,26 @@
|
||||
}
|
||||
}
|
||||
|
||||
function queryResultCSVLink() {
|
||||
return {
|
||||
restrict: 'A',
|
||||
link: function (scope, element) {
|
||||
scope.$watch('queryResult && queryResult.getData()', function(data) {
|
||||
if (!data) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (scope.queryResult.getId() == null) {
|
||||
element.attr('href', '');
|
||||
} else {
|
||||
element.attr('href', '/api/queries/' + scope.query.id + '/results/' + scope.queryResult.getId() + '.csv');
|
||||
element.attr('download', scope.query.name.replace(" ", "_") + moment(scope.queryResult.getUpdatedAt()).format("_YYYY_MM_DD") + ".csv");
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function queryEditor() {
|
||||
return {
|
||||
restrict: 'E',
|
||||
@@ -135,6 +155,7 @@
|
||||
angular.module('redash.directives')
|
||||
.directive('queryLink', queryLink)
|
||||
.directive('querySourceLink', querySourceLink)
|
||||
.directive('queryResultLink', queryResultCSVLink)
|
||||
.directive('queryEditor', queryEditor)
|
||||
.directive('queryRefreshSelect', queryRefreshSelect)
|
||||
.directive('queryFormatter', ['$http', queryFormatter]);
|
||||
|
||||
@@ -15,7 +15,9 @@ var durationHumanize = function (duration) {
|
||||
humanized = minutes + "m";
|
||||
}
|
||||
return humanized;
|
||||
}
|
||||
};
|
||||
|
||||
var urlPattern = /(^|[\s\n]|<br\/?>)((?:https?|ftp):\/\/[\-A-Z0-9+\u0026\u2019@#\/%?=()~_|!:,.;]*[\-A-Z0-9+\u0026@#\/%=~()_|])/gi;
|
||||
|
||||
angular.module('redash.filters', []).
|
||||
filter('durationHumanize', function () {
|
||||
@@ -60,8 +62,26 @@ angular.module('redash.filters', []).
|
||||
}
|
||||
})
|
||||
|
||||
.filter('markdown', ['$sce', function($sce) {
|
||||
return function(text) {
|
||||
.filter('linkify', function () {
|
||||
return function (text) {
|
||||
return text.replace(urlPattern, "$1<a href='$2' target='_blank'>$2</a>");
|
||||
};
|
||||
})
|
||||
|
||||
.filter('markdown', ['$sce', function ($sce) {
|
||||
return function (text) {
|
||||
if (!text) {
|
||||
return "";
|
||||
}
|
||||
return $sce.trustAsHtml(marked(text));
|
||||
}
|
||||
}]);
|
||||
}])
|
||||
|
||||
.filter('trustAsHtml', ['$sce', function ($sce) {
|
||||
return function (text) {
|
||||
if (!text) {
|
||||
return "";
|
||||
}
|
||||
return $sce.trustAsHtml(text);
|
||||
}
|
||||
}]);
|
||||
|
||||
@@ -1,9 +1,20 @@
|
||||
(function () {
|
||||
'use strict';
|
||||
|
||||
var ColorPalette = {
|
||||
'Blue':'#4572A7',
|
||||
'Red':'#AA4643',
|
||||
'Green': '#89A54E',
|
||||
'Purple': '#80699B',
|
||||
'Cyan': '#3D96AE',
|
||||
'Orange': '#DB843D',
|
||||
'Light Blue': '#92A8CD',
|
||||
'Lilac': '#A47D7C',
|
||||
'Light Green': '#B5CA92',
|
||||
};
|
||||
|
||||
Highcharts.setOptions({
|
||||
colors: ["#4572A7", "#AA4643", "#89A54E", "#80699B", "#3D96AE",
|
||||
"#DB843D", "#92A8CD", "#A47D7C", "#B5CA92"]
|
||||
colors: _.values(ColorPalette)
|
||||
});
|
||||
|
||||
var defaultOptions = {
|
||||
@@ -13,11 +24,23 @@
|
||||
xAxis: {
|
||||
type: 'datetime'
|
||||
},
|
||||
yAxis: {
|
||||
title: {
|
||||
text: null
|
||||
yAxis: [
|
||||
{
|
||||
title: {
|
||||
text: null
|
||||
},
|
||||
// showEmpty: true // by default
|
||||
},
|
||||
{
|
||||
title: {
|
||||
text: null
|
||||
},
|
||||
opposite: true,
|
||||
showEmpty: false
|
||||
}
|
||||
},
|
||||
],
|
||||
|
||||
|
||||
tooltip: {
|
||||
valueDecimals: 2,
|
||||
formatter: function () {
|
||||
@@ -81,6 +104,55 @@
|
||||
series.update({stacking: newStacking}, true);
|
||||
});
|
||||
}
|
||||
},
|
||||
{
|
||||
text: 'Select All',
|
||||
onclick: function () {
|
||||
_.each(this.series, function (s) {
|
||||
s.setVisible(true, false);
|
||||
});
|
||||
this.redraw();
|
||||
}
|
||||
},
|
||||
{
|
||||
text: 'Unselect All',
|
||||
onclick: function () {
|
||||
_.each(this.series, function (s) {
|
||||
s.setVisible(false, false);
|
||||
});
|
||||
this.redraw();
|
||||
}
|
||||
},
|
||||
{
|
||||
text: 'Show Total',
|
||||
onclick: function () {
|
||||
var hasTotalsAlready = _.some(this.series, function (s) {
|
||||
var res = (s.name == 'Total');
|
||||
//if 'Total' already exists - just make it visible
|
||||
if (res) s.setVisible(true, false);
|
||||
return res;
|
||||
})
|
||||
var data = {};
|
||||
_.each(this.series, function (s) {
|
||||
if (s.name != 'Total') s.setVisible(false, false);
|
||||
if (!hasTotalsAlready) {
|
||||
_.each(s.data, function (p) {
|
||||
data[p.x] = data[p.x] || {'x': p.x, 'y': 0};
|
||||
data[p.x].y = data[p.x].y + p.y;
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
if (!hasTotalsAlready) {
|
||||
this.addSeries({
|
||||
data: _.values(data),
|
||||
type: 'line',
|
||||
name: 'Total'
|
||||
}, false)
|
||||
}
|
||||
|
||||
this.redraw();
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -153,6 +225,7 @@
|
||||
};
|
||||
|
||||
angular.module('highchart', [])
|
||||
.constant('ColorPalette', ColorPalette)
|
||||
.directive('chart', ['$timeout', function ($timeout) {
|
||||
return {
|
||||
restrict: 'E',
|
||||
@@ -287,4 +360,4 @@
|
||||
};
|
||||
|
||||
}]);
|
||||
})();
|
||||
})();
|
||||
|
||||
@@ -217,7 +217,7 @@
|
||||
element.html('<div editable-cell="" row="dataRow" column="column" type="column.type"></div>');
|
||||
compile(element.contents())(scope);
|
||||
} else {
|
||||
element.text(scope.formatedValue);
|
||||
element.html(scope.formatedValue);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,12 @@
|
||||
(function () {
|
||||
var Dashboard = function($resource) {
|
||||
var resource = $resource('/api/dashboards/:slug', {slug: '@slug'});
|
||||
var resource = $resource('/api/dashboards/:slug', {slug: '@slug'}, {
|
||||
recent: {
|
||||
method: 'get',
|
||||
isArray: true,
|
||||
url: "/api/dashboards/recent"
|
||||
}});
|
||||
|
||||
resource.prototype.canEdit = function() {
|
||||
return currentUser.hasPermission('admin') || currentUser.canEdit(this);
|
||||
}
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
(function () {
|
||||
var notifications = function (Events) {
|
||||
var notificationService = {};
|
||||
var lastNotification = null;
|
||||
|
||||
notificationService.isSupported = function () {
|
||||
if (window.webkitNotifications) {
|
||||
if ("Notification" in window) {
|
||||
return true;
|
||||
} else {
|
||||
console.log("HTML5 notifications are not supported.");
|
||||
@@ -17,8 +16,12 @@
|
||||
return;
|
||||
}
|
||||
|
||||
if (!window.webkitNotifications.checkPermission() == 0) { // 0 is PERMISSION_ALLOWED
|
||||
window.webkitNotifications.requestPermission();
|
||||
if (Notification.permission !== "granted") {
|
||||
Notification.requestPermission(function (status) {
|
||||
if (Notification.permission !== status) {
|
||||
Notification.permission = status;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,23 +30,13 @@
|
||||
return;
|
||||
}
|
||||
|
||||
if (document.webkitVisibilityState && document.webkitVisibilityState == 'visible') {
|
||||
return;
|
||||
}
|
||||
|
||||
if (lastNotification) {
|
||||
lastNotification.cancel();
|
||||
}
|
||||
|
||||
var notification = window.webkitNotifications.createNotification('', title, content);
|
||||
lastNotification = notification;
|
||||
//using the 'tag' to avoid showing duplicate notifications
|
||||
var notification = new Notification(title, {'tag': title+content, 'body': content});
|
||||
notification.onclick = function () {
|
||||
window.focus();
|
||||
this.cancel();
|
||||
Events.record(currentUser, 'click', 'notification');
|
||||
};
|
||||
|
||||
notification.show()
|
||||
}
|
||||
|
||||
return notificationService;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
(function () {
|
||||
var QueryResult = function ($resource, $timeout) {
|
||||
var QueryResult = function ($resource, $timeout, $q) {
|
||||
var QueryResultResource = $resource('/api/query_results/:id', {id: '@id'}, {'post': {'method': 'POST'}});
|
||||
var Job = $resource('/api/jobs/:id', {id: '@id'});
|
||||
|
||||
@@ -10,13 +10,31 @@
|
||||
this.filters = undefined;
|
||||
this.filterFreeze = undefined;
|
||||
|
||||
var columnTypes = {};
|
||||
|
||||
_.each(this.query_result.data.rows, function (row) {
|
||||
_.each(row, function (v, k) {
|
||||
if (_.isString(v) && v.match(/^\d{4}-\d{2}-\d{2}/)) {
|
||||
if (angular.isNumber(v)) {
|
||||
columnTypes[k] = 'float';
|
||||
} else if (_.isString(v) && v.match(/^\d{4}-\d{2}-\d{2}T/)) {
|
||||
row[k] = moment(v);
|
||||
columnTypes[k] = 'datetime';
|
||||
} else if (_.isString(v) && v.match(/^\d{4}-\d{2}-\d{2}/)) {
|
||||
row[k] = moment(v);
|
||||
columnTypes[k] = 'date';
|
||||
} else if (typeof(v) == 'object' && v !== null) {
|
||||
row[k] = JSON.stringify(v);
|
||||
}
|
||||
});
|
||||
}, this);
|
||||
}, this);
|
||||
|
||||
_.each(this.query_result.data.columns, function(column) {
|
||||
if (columnTypes[column.name]) {
|
||||
column.type = columnTypes[column.name];
|
||||
}
|
||||
});
|
||||
|
||||
this.deferred.resolve(this);
|
||||
} else if (this.job.status == 3) {
|
||||
this.status = "processing";
|
||||
} else {
|
||||
@@ -25,6 +43,7 @@
|
||||
}
|
||||
|
||||
function QueryResult(props) {
|
||||
this.deferred = $q.defer();
|
||||
this.job = {};
|
||||
this.query_result = {};
|
||||
this.status = "waiting";
|
||||
@@ -133,7 +152,7 @@
|
||||
return this.filteredData;
|
||||
}
|
||||
|
||||
QueryResult.prototype.getChartData = function () {
|
||||
QueryResult.prototype.getChartData = function (mapping) {
|
||||
var series = {};
|
||||
|
||||
_.each(this.getData(), function (row) {
|
||||
@@ -143,8 +162,15 @@
|
||||
var yValues = {};
|
||||
|
||||
_.each(row, function (value, definition) {
|
||||
var type = definition.split("::")[1];
|
||||
var name = definition.split("::")[0];
|
||||
var type = definition.split("::")[1];
|
||||
if (mapping) {
|
||||
type = mapping[definition];
|
||||
}
|
||||
|
||||
if (type == 'unused') {
|
||||
return;
|
||||
}
|
||||
|
||||
if (type == 'x') {
|
||||
xValue = value;
|
||||
@@ -199,7 +225,7 @@
|
||||
if (this.columns == undefined && this.query_result.data) {
|
||||
this.columns = this.query_result.data.columns;
|
||||
}
|
||||
|
||||
|
||||
return this.columns;
|
||||
}
|
||||
|
||||
@@ -215,6 +241,9 @@
|
||||
|
||||
QueryResult.prototype.getColumnNameWithoutType = function (column) {
|
||||
var parts = column.split('::');
|
||||
if (parts[0] == "" && parts.length == 2) {
|
||||
return parts[1];
|
||||
}
|
||||
return parts[0];
|
||||
};
|
||||
|
||||
@@ -224,7 +253,9 @@
|
||||
'__qm': /\?/g,
|
||||
'__brkt': /[\(\)\[\]]/g,
|
||||
'__dash': /-/g,
|
||||
'__amp': /&/g
|
||||
'__amp': /&/g,
|
||||
'__sl': /\//g,
|
||||
'__fsl': /\\/g,
|
||||
};
|
||||
|
||||
QueryResult.prototype.getColumnCleanName = function (column) {
|
||||
@@ -322,6 +353,10 @@
|
||||
});
|
||||
|
||||
return queryResult;
|
||||
};
|
||||
|
||||
QueryResult.prototype.toPromise = function() {
|
||||
return this.deferred.promise;
|
||||
}
|
||||
|
||||
QueryResult.get = function (data_source_id, query, ttl) {
|
||||
@@ -342,7 +377,18 @@
|
||||
};
|
||||
|
||||
var Query = function ($resource, QueryResult, DataSource) {
|
||||
var Query = $resource('/api/queries/:id', {id: '@id'});
|
||||
var Query = $resource('/api/queries/:id', {id: '@id'},
|
||||
{
|
||||
search: {
|
||||
method: 'get',
|
||||
isArray: true,
|
||||
url: "/api/queries/search"
|
||||
},
|
||||
recent: {
|
||||
method: 'get',
|
||||
isArray: true,
|
||||
url: "/api/queries/recent"
|
||||
}});
|
||||
|
||||
Query.newQuery = function () {
|
||||
return new Query({
|
||||
@@ -362,24 +408,30 @@
|
||||
ttl = this.ttl;
|
||||
}
|
||||
|
||||
var queryResult = null;
|
||||
if (this.latest_query_data && ttl != 0) {
|
||||
if (!this.queryResult) {
|
||||
this.queryResult = new QueryResult({'query_result': this.latest_query_data});
|
||||
}
|
||||
queryResult = this.queryResult;
|
||||
} else if (this.latest_query_data_id && ttl != 0) {
|
||||
queryResult = QueryResult.getById(this.latest_query_data_id);
|
||||
if (!this.queryResult) {
|
||||
this.queryResult = QueryResult.getById(this.latest_query_data_id);
|
||||
}
|
||||
} else if (this.data_source_id) {
|
||||
queryResult = QueryResult.get(this.data_source_id, this.query, ttl);
|
||||
this.queryResult = QueryResult.get(this.data_source_id, this.query, ttl);
|
||||
}
|
||||
|
||||
return queryResult;
|
||||
return this.queryResult;
|
||||
};
|
||||
|
||||
Query.prototype.getQueryResultPromise = function() {
|
||||
return this.getQueryResult().toPromise();
|
||||
}
|
||||
|
||||
return Query;
|
||||
};
|
||||
|
||||
|
||||
|
||||
var DataSource = function ($resource) {
|
||||
var DataSourceResource = $resource('/api/data_sources/:id', {id: '@id'}, {'get': {'method': 'GET', 'cache': true, 'isArray': true}});
|
||||
|
||||
@@ -408,7 +460,7 @@
|
||||
}
|
||||
|
||||
angular.module('redash.services')
|
||||
.factory('QueryResult', ['$resource', '$timeout', QueryResult])
|
||||
.factory('QueryResult', ['$resource', '$timeout', '$q', QueryResult])
|
||||
.factory('Query', ['$resource', 'QueryResult', 'DataSource', Query])
|
||||
.factory('DataSource', ['$resource', DataSource])
|
||||
.factory('Widget', ['$resource', 'Query', Widget]);
|
||||
|
||||
@@ -55,7 +55,7 @@
|
||||
}];
|
||||
};
|
||||
|
||||
var VisualizationRenderer = function (Visualization) {
|
||||
var VisualizationRenderer = function ($location, Visualization) {
|
||||
return {
|
||||
restrict: 'E',
|
||||
scope: {
|
||||
@@ -70,10 +70,44 @@
|
||||
link: function (scope) {
|
||||
scope.select2Options = {
|
||||
width: '50%'
|
||||
};
|
||||
|
||||
function readURL() {
|
||||
var searchFilters = angular.fromJson($location.search().filters);
|
||||
if (searchFilters) {
|
||||
_.forEach(scope.filters, function(filter) {
|
||||
var value = searchFilters[filter.friendlyName];
|
||||
if (value) {
|
||||
filter.current = value;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function updateURL(filters) {
|
||||
var current = {};
|
||||
_.each(filters, function(filter) {
|
||||
if (filter.current) {
|
||||
current[filter.friendlyName] = filter.current;
|
||||
}
|
||||
});
|
||||
|
||||
var newSearch = angular.extend($location.search(), {
|
||||
filters: angular.toJson(current)
|
||||
});
|
||||
$location.search(newSearch);
|
||||
}
|
||||
|
||||
scope.$watch('queryResult && queryResult.getFilters()', function (filters) {
|
||||
if (filters) {
|
||||
scope.filters = filters;
|
||||
|
||||
if (filters.length && false) {
|
||||
readURL();
|
||||
|
||||
// start watching for changes and update URL
|
||||
scope.$watch('filters', updateURL, true);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -111,26 +145,23 @@
|
||||
scope.editRawOptions = currentUser.hasPermission('edit_raw_chart');
|
||||
scope.visTypes = Visualization.visualizationTypes;
|
||||
|
||||
scope.newVisualization = function (q) {
|
||||
scope.newVisualization = function () {
|
||||
return {
|
||||
'query_id': q.id,
|
||||
'type': Visualization.defaultVisualization.type,
|
||||
'name': Visualization.defaultVisualization.name,
|
||||
'description': q.description || '',
|
||||
'description': '',
|
||||
'options': Visualization.defaultVisualization.defaultOptions
|
||||
};
|
||||
}
|
||||
|
||||
if (!scope.visualization) {
|
||||
// create new visualization
|
||||
// wait for query to load to populate with defaults
|
||||
var unwatch = scope.$watch('query', function (q) {
|
||||
if (q && q.id) {
|
||||
var unwatch = scope.$watch('query.id', function (queryId) {
|
||||
if (queryId) {
|
||||
unwatch();
|
||||
|
||||
scope.visualization = scope.newVisualization(q);
|
||||
scope.visualization = scope.newVisualization();
|
||||
}
|
||||
}, true);
|
||||
});
|
||||
}
|
||||
|
||||
scope.$watch('visualization.type', function (type, oldType) {
|
||||
@@ -148,6 +179,8 @@
|
||||
Events.record(currentUser, "create", "visualization", null, {'type': scope.visualization.type});
|
||||
}
|
||||
|
||||
scope.visualization.query_id = scope.query.id;
|
||||
|
||||
Visualization.save(scope.visualization, function success(result) {
|
||||
growl.addSuccessMessage("Visualization saved");
|
||||
|
||||
@@ -173,7 +206,7 @@
|
||||
|
||||
angular.module('redash.visualization', [])
|
||||
.provider('Visualization', VisualizationProvider)
|
||||
.directive('visualizationRenderer', ['Visualization', VisualizationRenderer])
|
||||
.directive('visualizationRenderer', ['$location', 'Visualization', VisualizationRenderer])
|
||||
.directive('visualizationOptionsEditor', ['Visualization', VisualizationOptionsEditor])
|
||||
.directive('filters', Filters)
|
||||
.directive('editVisulatizationForm', ['Events', 'Visualization', 'growl', EditVisualizationForm])
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
var editTemplate = '<chart-editor></chart-editor>';
|
||||
var defaultOptions = {
|
||||
'series': {
|
||||
'type': 'column',
|
||||
// 'type': 'column',
|
||||
'stacking': null
|
||||
}
|
||||
};
|
||||
@@ -33,31 +33,54 @@
|
||||
$scope.chartSeries = [];
|
||||
$scope.chartOptions = {};
|
||||
|
||||
var reloadData = function(data) {
|
||||
if (!data || ($scope.queryResult && $scope.queryResult.getData()) == null) {
|
||||
$scope.chartSeries.splice(0, $scope.chartSeries.length);
|
||||
} else {
|
||||
$scope.chartSeries.splice(0, $scope.chartSeries.length);
|
||||
|
||||
_.each($scope.queryResult.getChartData($scope.options.columnMapping), function (s) {
|
||||
var additional = {'stacking': 'normal'};
|
||||
if ($scope.options.seriesOptions && $scope.options.seriesOptions[s.name]) {
|
||||
additional = $scope.options.seriesOptions[s.name];
|
||||
if (!additional.name || additional.name == "") {
|
||||
additional.name = s.name;
|
||||
}
|
||||
}
|
||||
$scope.chartSeries.push(_.extend(s, additional));
|
||||
});
|
||||
};
|
||||
};
|
||||
|
||||
$scope.$watch('options', function (chartOptions) {
|
||||
if (chartOptions) {
|
||||
$scope.chartOptions = chartOptions;
|
||||
}
|
||||
});
|
||||
$scope.$watch('queryResult && queryResult.getData()', function (data) {
|
||||
if (!data || $scope.queryResult.getData() == null) {
|
||||
$scope.chartSeries.splice(0, $scope.chartSeries.length);
|
||||
} else {
|
||||
$scope.chartSeries.splice(0, $scope.chartSeries.length);
|
||||
|
||||
_.each($scope.queryResult.getChartData(), function (s) {
|
||||
$scope.chartSeries.push(_.extend(s, {'stacking': 'normal'}));
|
||||
});
|
||||
}
|
||||
$scope.$watch('options.seriesOptions', function () {
|
||||
reloadData(true);
|
||||
}, true);
|
||||
|
||||
|
||||
$scope.$watchCollection('options.columnMapping', function (chartOptions) {
|
||||
reloadData(true);
|
||||
});
|
||||
|
||||
$scope.$watch('queryResult && queryResult.getData()', function (data) {
|
||||
reloadData(data);
|
||||
});
|
||||
}]
|
||||
}
|
||||
};
|
||||
});
|
||||
|
||||
chartVisualization.directive('chartEditor', function () {
|
||||
chartVisualization.directive('chartEditor', function (ColorPalette) {
|
||||
return {
|
||||
restrict: 'E',
|
||||
templateUrl: '/views/visualizations/chart_editor.html',
|
||||
link: function (scope, element, attrs) {
|
||||
scope.palette = ColorPalette;
|
||||
|
||||
scope.seriesTypes = {
|
||||
'Line': 'line',
|
||||
'Column': 'column',
|
||||
@@ -66,6 +89,8 @@
|
||||
'Pie': 'pie'
|
||||
};
|
||||
|
||||
scope.globalSeriesType = 'column';
|
||||
|
||||
scope.stackingOptions = {
|
||||
"None": "none",
|
||||
"Normal": "normal",
|
||||
@@ -81,10 +106,33 @@
|
||||
scope.xAxisType = "datetime";
|
||||
scope.stacking = "none";
|
||||
|
||||
var chartOptionsUnwatch = null;
|
||||
|
||||
scope.$watch('visualization', function (visualization) {
|
||||
if (visualization && visualization.type == 'CHART') {
|
||||
scope.columnTypes = {
|
||||
"X": "x",
|
||||
// "X (Date time)": "x",
|
||||
// "X (Linear)": "x-linear",
|
||||
// "X (Category)": "x-category",
|
||||
"Y": "y",
|
||||
"Series": "series",
|
||||
"Unused": "unused"
|
||||
};
|
||||
|
||||
scope.series = [];
|
||||
|
||||
scope.columnTypeSelection = {};
|
||||
|
||||
var chartOptionsUnwatch = null,
|
||||
columnsWatch = null;
|
||||
|
||||
scope.$watch('globalSeriesType', function(type, old) {
|
||||
if (type && old && type !== old && scope.visualization.options.seriesOptions) {
|
||||
_.each(scope.visualization.options.seriesOptions, function(sOptions) {
|
||||
sOptions.type = type;
|
||||
});
|
||||
}
|
||||
});
|
||||
scope.$watch('visualization.type', function (visualizationType) {
|
||||
if (visualizationType == 'CHART') {
|
||||
if (scope.visualization.options.series.stacking === null) {
|
||||
scope.stacking = "none";
|
||||
} else if (scope.visualization.options.series.stacking === undefined) {
|
||||
@@ -93,6 +141,74 @@
|
||||
scope.stacking = scope.visualization.options.series.stacking;
|
||||
}
|
||||
|
||||
var refreshSeries = function() {
|
||||
scope.series = _.map(scope.queryResult.getChartData(scope.visualization.options.columnMapping), function (s) { return s.name; });
|
||||
|
||||
// TODO: remove uneeded ones?
|
||||
if (scope.visualization.options.seriesOptions == undefined) {
|
||||
scope.visualization.options.seriesOptions = {
|
||||
type: scope.globalSeriesType
|
||||
};
|
||||
};
|
||||
|
||||
_.each(scope.series, function(s, i) {
|
||||
if (scope.visualization.options.seriesOptions[s] == undefined) {
|
||||
scope.visualization.options.seriesOptions[s] = {'type': 'column', 'yAxis': 0};
|
||||
}
|
||||
scope.visualization.options.seriesOptions[s].zIndex = i;
|
||||
|
||||
});
|
||||
scope.zIndexes = _.range(scope.series.length);
|
||||
scope.yAxes = [[0, 'left'], [1, 'right']];
|
||||
};
|
||||
|
||||
var initColumnMapping = function() {
|
||||
scope.columns = scope.queryResult.getColumns();
|
||||
|
||||
if (scope.visualization.options.columnMapping == undefined) {
|
||||
scope.visualization.options.columnMapping = {};
|
||||
}
|
||||
|
||||
scope.columnTypeSelection = scope.visualization.options.columnMapping;
|
||||
|
||||
_.each(scope.columns, function(column) {
|
||||
var definition = column.name.split("::"),
|
||||
definedColumns = _.keys(scope.visualization.options.columnMapping);
|
||||
|
||||
if (_.indexOf(definedColumns, column.name) != -1) {
|
||||
// Skip already defined columns.
|
||||
return;
|
||||
};
|
||||
|
||||
if (definition.length == 1) {
|
||||
scope.columnTypeSelection[column.name] = scope.visualization.options.columnMapping[column.name] = 'unused';
|
||||
} else if (definition == 'multi-filter') {
|
||||
scope.columnTypeSelection[column.name] = scope.visualization.options.columnMapping[column.name] = 'series';
|
||||
} else if (_.indexOf(_.values(scope.columnTypes), definition[1]) != -1) {
|
||||
scope.columnTypeSelection[column.name] = scope.visualization.options.columnMapping[column.name] = definition[1];
|
||||
} else {
|
||||
scope.columnTypeSelection[column.name] = scope.visualization.options.columnMapping[column.name] = 'unused';
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
columnsWatch = scope.$watch('queryResult.getId()', function(id) {
|
||||
if (!id) {
|
||||
return;
|
||||
}
|
||||
|
||||
initColumnMapping();
|
||||
refreshSeries();
|
||||
});
|
||||
|
||||
scope.$watchCollection('columnTypeSelection', function(selections) {
|
||||
_.each(scope.columnTypeSelection, function(type, name) {
|
||||
scope.visualization.options.columnMapping[name] = type;
|
||||
});
|
||||
|
||||
refreshSeries();
|
||||
});
|
||||
|
||||
chartOptionsUnwatch = scope.$watch("stacking", function (stacking) {
|
||||
if (stacking == "none") {
|
||||
scope.visualization.options.series.stacking = null;
|
||||
@@ -113,6 +229,11 @@
|
||||
chartOptionsUnwatch = null;
|
||||
}
|
||||
|
||||
if (columnsWatch) {
|
||||
columnWatch();
|
||||
columnWatch = null;
|
||||
}
|
||||
|
||||
if (xAxisUnwatch) {
|
||||
xAxisUnwatch();
|
||||
xAxisUnwatch = null;
|
||||
@@ -122,4 +243,4 @@
|
||||
}
|
||||
}
|
||||
});
|
||||
}());
|
||||
}());
|
||||
|
||||
@@ -28,9 +28,13 @@
|
||||
} else {
|
||||
var sortedData = _.sortBy($scope.queryResult.getData(), "date");
|
||||
var grouped = _.groupBy(sortedData, "date");
|
||||
var maxColumns = _.reduce(grouped, function(memo, data){
|
||||
return (data.length > memo)? data.length : memo;
|
||||
}, 0);
|
||||
var data = _.map(grouped, function(values, date) {
|
||||
var row = [values[0].total];
|
||||
_.each(values, function(value) { row.push(value.value); });
|
||||
_.each(_.range(values.length, maxColumns), function() { row.push(null); });
|
||||
return row;
|
||||
});
|
||||
|
||||
|
||||
61
rd_ui/app/scripts/visualizations/counter.js
Normal file
61
rd_ui/app/scripts/visualizations/counter.js
Normal file
@@ -0,0 +1,61 @@
|
||||
'use strict';
|
||||
|
||||
(function() {
|
||||
var module = angular.module('redash.visualization');
|
||||
|
||||
module.config(['VisualizationProvider', function(VisualizationProvider) {
|
||||
var renderTemplate =
|
||||
'<counter-renderer ' +
|
||||
'options="visualization.options" query-result="queryResult">' +
|
||||
'</counter-renderer>';
|
||||
|
||||
var editTemplate = '<counter-editor></counter-editor>';
|
||||
var defaultOptions = {};
|
||||
|
||||
VisualizationProvider.registerVisualization({
|
||||
type: 'COUNTER',
|
||||
name: 'Counter',
|
||||
renderTemplate: renderTemplate,
|
||||
editorTemplate: editTemplate,
|
||||
defaultOptions: defaultOptions
|
||||
});
|
||||
}
|
||||
]);
|
||||
|
||||
module.directive('counterRenderer', function() {
|
||||
return {
|
||||
restrict: 'E',
|
||||
templateUrl: '/views/visualizations/counter.html',
|
||||
link: function($scope, elm, attrs) {
|
||||
$scope.visualization.options.rowNumber =
|
||||
$scope.visualization.options.rowNumber || 0;
|
||||
|
||||
$scope.$watch('[queryResult && queryResult.getData(), visualization.options]',
|
||||
function() {
|
||||
var queryData = $scope.queryResult.getData();
|
||||
if (queryData) {
|
||||
var rowNumber = $scope.visualization.options.rowNumber || 0;
|
||||
var counterColName = $scope.visualization.options.counterColName || 'counter';
|
||||
var targetColName = $scope.visualization.options.targetColName || 'target';
|
||||
|
||||
$scope.counterValue = queryData[rowNumber][counterColName];
|
||||
$scope.targetValue = queryData[rowNumber][targetColName];
|
||||
|
||||
if ($scope.targetValue) {
|
||||
$scope.delta = $scope.counterValue - $scope.targetValue;
|
||||
$scope.trendPositive = $scope.delta >= 0;
|
||||
}
|
||||
}
|
||||
}, true);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
module.directive('counterEditor', function() {
|
||||
return {
|
||||
restrict: 'E',
|
||||
templateUrl: '/views/visualizations/counter_editor.html'
|
||||
}
|
||||
});
|
||||
|
||||
})();
|
||||
@@ -19,7 +19,7 @@
|
||||
},
|
||||
templateUrl: "/views/grid_renderer.html",
|
||||
replace: false,
|
||||
controller: ['$scope', function ($scope) {
|
||||
controller: ['$scope', '$filter', function ($scope, $filter) {
|
||||
$scope.gridColumns = [];
|
||||
$scope.gridData = [];
|
||||
$scope.gridConfig = {
|
||||
@@ -63,37 +63,40 @@
|
||||
|
||||
var columnType = columns[i].type;
|
||||
|
||||
if (!columnType) {
|
||||
var rawData = $scope.queryResult.getRawData();
|
||||
|
||||
if (rawData.length > 0) {
|
||||
var exampleData = rawData[0][col];
|
||||
if (angular.isNumber(exampleData)) {
|
||||
columnType = 'float';
|
||||
} else if (moment.isMoment(exampleData)) {
|
||||
if (exampleData._i.match(/^\d{4}-\d{2}-\d{2}T/)) {
|
||||
columnType = 'datetime';
|
||||
} else {
|
||||
columnType = 'date';
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (columnType === 'integer') {
|
||||
columnDefinition.formatFunction = 'number';
|
||||
columnDefinition.formatParameter = 0;
|
||||
} else if (columnType === 'float') {
|
||||
columnDefinition.formatFunction = 'number';
|
||||
columnDefinition.formatParameter = 2;
|
||||
} else if (columnType === 'boolean') {
|
||||
columnDefinition.formatFunction = function (value) {
|
||||
if (value !== undefined) {
|
||||
return "" + value;
|
||||
}
|
||||
return value;
|
||||
};
|
||||
} else if (columnType === 'date') {
|
||||
columnDefinition.formatFunction = function (value) {
|
||||
return value.format("DD/MM/YY");
|
||||
if (value) {
|
||||
return value.format("DD/MM/YY");
|
||||
}
|
||||
return value;
|
||||
};
|
||||
} else if (columnType === 'datetime') {
|
||||
columnDefinition.formatFunction = function (value) {
|
||||
return value.format("DD/MM/YY HH:mm");
|
||||
if (value) {
|
||||
return value.format("DD/MM/YY HH:mm");
|
||||
}
|
||||
return value;
|
||||
};
|
||||
} else {
|
||||
columnDefinition.formatFunction = function (value) {
|
||||
if (angular.isString(value)) {
|
||||
value = $filter('linkify')(value);
|
||||
}
|
||||
return value;
|
||||
}
|
||||
}
|
||||
|
||||
return columnDefinition;
|
||||
|
||||
@@ -1,14 +1,15 @@
|
||||
.main {
|
||||
max-width: 320px;
|
||||
margin: 0 auto;
|
||||
margin-top:20px;
|
||||
}
|
||||
|
||||
.login-or {
|
||||
position: relative;
|
||||
font-size: 18px;
|
||||
color: #aaa;
|
||||
margin-top: 10px;
|
||||
margin-bottom: 10px;
|
||||
margin-top: 20px;
|
||||
margin-bottom: 20px;
|
||||
padding-top: 10px;
|
||||
padding-bottom: 10px;
|
||||
}
|
||||
@@ -31,7 +32,9 @@
|
||||
margin-bottom: 0px !important;
|
||||
}
|
||||
|
||||
/*h3 {*/
|
||||
/*text-align: center;*/
|
||||
/*line-height: 300%;*/
|
||||
/*}*/
|
||||
img.login-button {
|
||||
width: 250px;
|
||||
display: block;
|
||||
margin-left: auto;
|
||||
margin-right: auto;
|
||||
}
|
||||
@@ -245,6 +245,9 @@ to add those CSS styles here. */
|
||||
background-color: #FF8080;
|
||||
border-radius: 50%;
|
||||
}
|
||||
.nav-tabs > li.rd-tab-btn {
|
||||
float: right;
|
||||
}
|
||||
|
||||
/* light version of bootstrap's form-control */
|
||||
.rd-form-control {
|
||||
@@ -264,9 +267,46 @@ to add those CSS styles here. */
|
||||
.rd-form-control {
|
||||
width: 100%;
|
||||
}
|
||||
visualization-renderer > div {
|
||||
pivot-table-renderer > table, grid-renderer > div, visualization-renderer > div {
|
||||
overflow: auto;
|
||||
}
|
||||
counter-renderer {
|
||||
display: block;
|
||||
text-align: center;
|
||||
}
|
||||
counter-renderer counter {
|
||||
margin: 0 auto;
|
||||
background: #f9f9f9;
|
||||
padding: 15px 50px;
|
||||
display: block;;
|
||||
}
|
||||
counter-renderer value,
|
||||
counter-renderer counter-target {
|
||||
font-size: 80px;
|
||||
display: block;
|
||||
}
|
||||
counter-renderer counter-target {
|
||||
color: #ccc;
|
||||
}
|
||||
counter-renderer counter.positive value {
|
||||
color: #5cb85c;
|
||||
}
|
||||
counter-renderer counter.negative value {
|
||||
color: #d9534f;
|
||||
margin-right: 15px;
|
||||
}
|
||||
counter-renderer counter-name {
|
||||
font-size: 40px;
|
||||
display: block;
|
||||
}
|
||||
|
||||
.rd-widget-textbox p {
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
.iframe-container {
|
||||
height: 100%;
|
||||
}
|
||||
|
||||
/*
|
||||
bootstrap's hidden-xs class adds display:block when not hidden
|
||||
@@ -276,4 +316,4 @@ use this class when you need to keep the original display value
|
||||
.rd-hidden-xs {
|
||||
display: none !important;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,30 +21,20 @@
|
||||
Started
|
||||
</li>
|
||||
<li class="list-group-item">
|
||||
<span class="badge">{{manager.queue_size}}</span>
|
||||
Queue Size
|
||||
<span class="badge">{{manager.outdated_queries_count}}</span>
|
||||
Outdated Queries Count
|
||||
</li>
|
||||
|
||||
<li class="list-group-item" ng-if="flowerUrl">
|
||||
<a href="/admin/workers">Workers' Status</a>
|
||||
</li>
|
||||
</ul>
|
||||
<ul class="list-group col-lg-4">
|
||||
<div ng-repeat="worker in workers">
|
||||
<li class="list-group-item active">Worker {{$index+1}}</li>
|
||||
<li class="list-group-item">
|
||||
<span class="badge" am-time-ago="worker.updated_at*1000.0"></span>
|
||||
Updated
|
||||
<li class="list-group-item active">Queues</li>
|
||||
<li class="list-group-item" ng-repeat="(name, value) in manager.queues">
|
||||
<span class="badge">{{value.size}}</span>
|
||||
{{name}} ({{value.data_sources}})
|
||||
</li>
|
||||
<li class="list-group-item">
|
||||
<span class="badge" am-time-ago="worker.started_at*1000.0"></span>
|
||||
Started
|
||||
</li>
|
||||
<li class="list-group-item">
|
||||
<span class="badge">{{worker.jobs_count}}</span>
|
||||
Jobs Received
|
||||
</li>
|
||||
<li class="list-group-item">
|
||||
<span class="badge">{{worker.done_jobs_count}}</span>
|
||||
Jobs Done
|
||||
</li>
|
||||
</div>
|
||||
</ul>
|
||||
</div>
|
||||
<div class="panel-footer">Next refresh: <span am-time-ago="refresh_time"></span></div>
|
||||
|
||||
3
rd_ui/app/views/admin_workers.html
Normal file
3
rd_ui/app/views/admin_workers.html
Normal file
@@ -0,0 +1,3 @@
|
||||
<div class="container-fluid iframe-container">
|
||||
<iframe src="{{flowerUrl}}" style="width:100%; height:100%; background-color:transparent;"></iframe>
|
||||
</div>
|
||||
@@ -14,7 +14,7 @@
|
||||
</button>
|
||||
</span>
|
||||
</h2>
|
||||
<filters></filters>
|
||||
<filters ng-if="dashboard.dashboard_filters_enabled"></filters>
|
||||
</div>
|
||||
|
||||
<div class="container" id="dashboard">
|
||||
@@ -29,7 +29,7 @@
|
||||
<span ng-hide="currentUser.hasPermission('view_query')">{{query.name}}</span>
|
||||
<query-link query="query" visualization="widget.visualization" ng-show="currentUser.hasPermission('view_query')"></query-link>
|
||||
</p>
|
||||
<div class="text-muted" ng-bind="query.description"></div>
|
||||
<div class="text-muted" ng-bind-html="query.description | markdown"></div>
|
||||
</h3>
|
||||
</div>
|
||||
|
||||
@@ -44,18 +44,29 @@
|
||||
<a class="btn btn-default btn-xs" ng-href="/queries/{{query.id}}#{{widget.visualization.id}}" ng-show="currentUser.hasPermission('view_query')"><span class="glyphicon glyphicon-link"></span></a>
|
||||
<button type="button" class="btn btn-default btn-xs" ng-show="dashboard.canEdit()" ng-click="deleteWidget()" title="Remove Widget"><span class="glyphicon glyphicon-trash"></span></button>
|
||||
</span>
|
||||
|
||||
<span class="pull-right">
|
||||
<a class="btn btn-default btn-xs" ng-disabled="!queryResult.getData()" query-result-link target="_self">
|
||||
<span class="glyphicon glyphicon-cloud-download"></span>
|
||||
</a>
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="panel panel-default" ng-if="type=='textbox'" ng-mouseenter="showControls = true" ng-mouseleave="showControls = false">
|
||||
<div class="panel panel-default rd-widget-textbox" ng-if="type=='textbox'" ng-mouseenter="showControls = true" ng-mouseleave="showControls = false">
|
||||
<div class="panel-body">
|
||||
<p ng-bind-html="widget.text | markdown"></p>
|
||||
|
||||
<span class="pull-right" ng-show="showControls">
|
||||
<button type="button" class="btn btn-default btn-xs" ng-show="dashboard.canEdit()" ng-click="deleteWidget()" title="Remove Widget"><span class="glyphicon glyphicon-trash"></span></button>
|
||||
</span>
|
||||
<div class="row">
|
||||
<div class="col-lg-11">
|
||||
<p ng-bind-html="widget.text | markdown"></p>
|
||||
</div>
|
||||
<div class="col-lg-1">
|
||||
<span class="pull-right" ng-show="showControls">
|
||||
<button type="button" class="btn btn-default btn-xs" ng-show="dashboard.canEdit()" ng-click="deleteWidget()" title="Remove Widget"><span class="glyphicon glyphicon-trash"></span></button>
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -22,22 +22,22 @@
|
||||
</div>
|
||||
|
||||
<div ng-show="isVisualization()">
|
||||
<p>
|
||||
<form class="form-inline" role="form" ng-submit="loadVisualizations()">
|
||||
<div class="form-group">
|
||||
<input class="form-control" placeholder="Query Id" ng-model="queryId">
|
||||
</div>
|
||||
<button type="submit" class="btn btn-primary" ng-disabled="!queryId">
|
||||
Load visualizations
|
||||
</button>
|
||||
</form>
|
||||
</p>
|
||||
<div class="form-group">
|
||||
<ui-select ng-model="query.selected" theme="bootstrap" reset-search-input="false">
|
||||
<ui-select-match placeholder="Search a query by name">{{$select.selected.name}}</ui-select-match>
|
||||
<ui-select-choices repeat="q in queries"
|
||||
refresh="searchQueries($select.search)"
|
||||
refresh-delay="0">
|
||||
<div ng-bind-html="q.name | highlight: $select.search | trustAsHtml"></div>
|
||||
</ui-select-choices>
|
||||
</ui-select>
|
||||
</div>
|
||||
|
||||
<div ng-show="query">
|
||||
<div class="form-group">
|
||||
<label for="">Choose Visualation</label>
|
||||
<select ng-model="selectedVis" ng-options="vis as vis.name group by vis.type for vis in query.visualizations" class="form-control"></select>
|
||||
</div>
|
||||
<div ng-show="selected_query">
|
||||
<div class="form-group">
|
||||
<label for="">Choose Visualization</label>
|
||||
<select ng-model="selectedVis" ng-options="vis as vis.name group by vis.type for vis in selected_query.visualizations" class="form-control"></select>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -56,4 +56,4 @@
|
||||
<!-- /.modal-content -->
|
||||
</div>
|
||||
<!-- /.modal-dialog -->
|
||||
</div>
|
||||
</div>
|
||||
|
||||
28
rd_ui/app/views/personal.html
Normal file
28
rd_ui/app/views/personal.html
Normal file
@@ -0,0 +1,28 @@
|
||||
<div class="container">
|
||||
<div class="row">
|
||||
<div class="list-group col-md-6">
|
||||
<div class="list-group-item active">
|
||||
Recent Dashboards
|
||||
<button ng-show="currentUser.hasPermission('create_dashboard')" type="button" class="btn btn-sm btn-link" data-toggle="modal" href="#new_dashboard_dialog" tooltip="New Dashboard"><span class="glyphicon glyphicon-plus-sign"></span></button>
|
||||
</div>
|
||||
<div class="list-group-item" ng-repeat="dashboard in recentDashboards" >
|
||||
<button type="button" class="close delete-button" aria-hidden="true" ng-show="dashboard.canEdit()" ng-click="archiveDashboard(dashboard)" tooltip="Delete Dashboard">×</button>
|
||||
<a ng-href="/dashboard/{{dashboard.slug}}">{{dashboard.name}}</a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="list-group col-md-6">
|
||||
<div class="list-group-item active">
|
||||
Recent Queries
|
||||
</div>
|
||||
<a ng-href="/queries/{{query.id}}" class="list-group-item" ng-repeat="query in recentQueries">{{query.name}}</a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div ng-show="currentUser.hasPermission('admin')" class="row">
|
||||
<div class="list-group">
|
||||
<div class="list-group-item active">Admin</div>
|
||||
<a href="/admin/status" class="list-group-item">Status</a>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
19
rd_ui/app/views/queries_search_results.html
Normal file
19
rd_ui/app/views/queries_search_results.html
Normal file
@@ -0,0 +1,19 @@
|
||||
<div class="container">
|
||||
<div class="row">
|
||||
<p>
|
||||
<form class="form-inline" role="form" ng-submit="search()">
|
||||
<div class="form-group">
|
||||
<input class="form-control" placeholder="Search..." ng-model="term" autofocus>
|
||||
</div>
|
||||
<button type="submit" class="btn btn-primary">
|
||||
<span class="glyphicon glyphicon-search"></span>
|
||||
</button>
|
||||
</form>
|
||||
</p>
|
||||
|
||||
<smart-table rows="queries" columns="gridColumns"
|
||||
config="gridConfig"
|
||||
class="table table-condensed table-hover"></smart-table>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
@@ -1,6 +1,7 @@
|
||||
|
||||
<div class="container">
|
||||
|
||||
<p class="alert alert-warning" ng-if="query.is_archived">This query is archived and can't be used in dashboards, and won't appear in search results.</p>
|
||||
<alert-unsaved-changes ng-if="canEdit" is-dirty="isDirty"></alert-unsaved-changes>
|
||||
|
||||
<div class="row">
|
||||
@@ -12,7 +13,14 @@
|
||||
</h2>
|
||||
<p>
|
||||
<em>
|
||||
<edit-in-place editable="isQueryOwner" done="saveDescription" editor="textarea" placeholder="No description" ignore-blanks='false' value="query.description"></edit-in-place>
|
||||
<edit-in-place editable="isQueryOwner"
|
||||
done="saveDescription"
|
||||
editor="textarea"
|
||||
placeholder="No description"
|
||||
ignore-blanks='false'
|
||||
value="query.description"
|
||||
markdown="true">
|
||||
</edit-in-place>
|
||||
</em>
|
||||
</p>
|
||||
</div>
|
||||
@@ -122,10 +130,33 @@
|
||||
<hr>
|
||||
|
||||
<p>
|
||||
<a class="btn btn-primary btn-sm" ng-disabled="queryExecuting || !queryResult.getData()" ng-href="{{dataUri}}" download="{{dataFilename}}" target="_self">
|
||||
<a class="btn btn-primary btn-sm" ng-disabled="queryExecuting || !queryResult.getData()" query-result-link target="_self">
|
||||
<span class="glyphicon glyphicon-cloud-download"></span>
|
||||
<span class="rd-hidden-xs">Download Dataset</span>
|
||||
</a>
|
||||
|
||||
<a class="btn btn-warning btn-sm" ng-disabled="queryExecuting" data-toggle="modal" data-target="#archive-confirmation-modal"
|
||||
ng-show="!query.is_archived && query.id != undefined && (isQueryOwner || currentUser.hasPermission('admin'))">
|
||||
<i class="fa fa-archive" title="Archive Query"></i>
|
||||
</a>
|
||||
|
||||
<div class="modal fade" id="archive-confirmation-modal" tabindex="-1" role="dialog" aria-labelledby="archiveConfirmationModal" aria-hidden="true">
|
||||
<div class="modal-dialog">
|
||||
<div class="modal-content">
|
||||
<div class="modal-header">
|
||||
<h4 class="modal-title">Query Archive</h4>
|
||||
</div>
|
||||
<div class="modal-body">
|
||||
Are you sure you want to archive this query? <br/>
|
||||
All dashboard widgets created with its visualizations will be deleted.
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-default" data-dismiss="modal">No</button>
|
||||
<button type="button" class="btn btn-primary" ng-click="archiveQuery()">Yes, archive.</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</p>
|
||||
</div>
|
||||
|
||||
@@ -142,7 +173,7 @@
|
||||
<div class="alert alert-danger" ng-show="queryResult.getError()">Error running query: <strong>{{queryResult.getError()}}</strong></div>
|
||||
|
||||
<!-- tabs and data -->
|
||||
<div ng-show="queryResult.getStatus() == 'done'">
|
||||
<div ng-show="showDataset">
|
||||
<div class="row">
|
||||
<div class="col-lg-12">
|
||||
<ul class="nav nav-tabs">
|
||||
@@ -152,6 +183,7 @@
|
||||
<span class="remove" ng-click="deleteVisualization($event, vis)" ng-show="canEdit"> ×</span>
|
||||
</rd-tab>
|
||||
<rd-tab tab-id="add" name="+ New" removeable="true" ng-show="canEdit"></rd-tab>
|
||||
<li ng-if="!sourceMode" class="rd-tab-btn"><button class="btn btn-sm btn-default" ng-click="executeQuery()" ng-disabled="queryExecuting" title="Refresh Dataset"><span class="glyphicon glyphicon-refresh"></span></button></li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
@@ -170,9 +202,9 @@
|
||||
<edit-visulatization-form visualization="vis" query="query" query-result="queryResult" ng-show="canEdit"></edit-visulatization-form>
|
||||
</div>
|
||||
|
||||
<div ng-show="selectedTab == 'add'">
|
||||
<div ng-if="canEdit" ng-show="selectedTab == 'add'">
|
||||
<visualization-renderer visualization="newVisualization" query-result="queryResult"></visualization-renderer>
|
||||
<edit-visulatization-form visualization="newVisualization" query="query" ng-show="canEdit" open-editor="true" on-new-success="setVisualizationTab"></edit-visulatization-form>
|
||||
<edit-visulatization-form visualization="newVisualization" query="query" query-result="queryResult" ng-show="canEdit" open-editor="true" on-new-success="setVisualizationTab"></edit-visulatization-form>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -1,14 +1,107 @@
|
||||
<div>
|
||||
<div class="form-group">
|
||||
<label class="control-label">Chart Type</label>
|
||||
<select required ng-model="visualization.options.series.type" ng-options="value as key for (key, value) in seriesTypes" class="form-control"></select>
|
||||
</div>
|
||||
<div class="form-horizontal">
|
||||
<div class="panel panel-default">
|
||||
<div class="panel-body">
|
||||
<div class="form-group">
|
||||
<label class="control-label col-sm-2">Stacking</label>
|
||||
|
||||
<div class="form-group">
|
||||
<label class="control-label">Stacking</label>
|
||||
<select required ng-model="stacking" ng-options="value as key for (key, value) in stackingOptions" class="form-control"></select>
|
||||
<div class="col-sm-10">
|
||||
<select required ng-model="stacking"
|
||||
ng-options="value as key for (key, value) in stackingOptions"
|
||||
class="form-control"></select>
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label class="control-label col-sm-2">X Axis Type</label>
|
||||
|
||||
<label class="control-label">X Axis Type</label>
|
||||
<select required ng-model="xAxisType" ng-options="value as key for (key, value) in xAxisOptions" class="form-control"></select>
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-sm-10">
|
||||
<select required ng-model="xAxisType" ng-options="value as key for (key, value) in xAxisOptions"
|
||||
class="form-control"></select>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="form-group">
|
||||
<label class="control-label col-sm-2">Series Type</label>
|
||||
|
||||
<div class="col-sm-10">
|
||||
<select required ng-options="value as key for (key, value) in seriesTypes"
|
||||
ng-model="globalSeriesType" class="form-control"></select>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="row">
|
||||
<div class="col-lg-6">
|
||||
<div class="list-group">
|
||||
<div class="list-group-item active">
|
||||
Columns Mapping
|
||||
</div>
|
||||
<div class="list-group-item">
|
||||
<div class="form-group" ng-repeat="column in columns">
|
||||
<label class="control-label col-sm-4">{{column.name}}</label>
|
||||
|
||||
<div class="col-sm-8">
|
||||
<select ng-options="value as key for (key, value) in columnTypes" class="form-control"
|
||||
ng-model="columnTypeSelection[column.name]"></select>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="col-lg-6" ng-if="series.length > 0">
|
||||
<div class="list-group" ng-repeat="seriesName in series">
|
||||
<div class="list-group-item active">
|
||||
{{seriesName}}
|
||||
</div>
|
||||
<div class="list-group-item">
|
||||
<div class="form-group">
|
||||
<label class="control-label col-sm-3">Type</label>
|
||||
|
||||
<div class="col-sm-9">
|
||||
<select required ng-model="visualization.options.seriesOptions[seriesName].type"
|
||||
ng-options="value as key for (key, value) in seriesTypes"
|
||||
class="form-control"></select>
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label class="control-label col-sm-3">zIndex</label>
|
||||
|
||||
<div class="col-sm-9">
|
||||
<select required ng-model="visualization.options.seriesOptions[seriesName].zIndex"
|
||||
ng-options="o as o for o in zIndexes"
|
||||
class="form-control"></select>
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label class="control-label col-sm-3">y Axis</label>
|
||||
|
||||
<div class="col-sm-9">
|
||||
<select required ng-model="visualization.options.seriesOptions[seriesName].yAxis"
|
||||
ng-options="o[0] as o[1] for o in yAxes"
|
||||
class="form-control"></select>
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label class="control-label col-sm-3">Name</label>
|
||||
|
||||
<div class="col-sm-9">
|
||||
<input name="seriesName" type="text" class="form-control"
|
||||
ng-model="visualization.options.seriesOptions[seriesName].name"
|
||||
placeholder="{{seriesName}}">
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label class="control-label col-sm-3">Color</label>
|
||||
|
||||
<div class="col-sm-9">
|
||||
<select class="form-control" ng-model="visualization.options.seriesOptions[seriesName].color" ng-options="val as key for (key,val) in palette"></select>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
5
rd_ui/app/views/visualizations/counter.html
Normal file
5
rd_ui/app/views/visualizations/counter.html
Normal file
@@ -0,0 +1,5 @@
|
||||
<counter ng-class="{'positive': targetValue && trendPositive, 'negative': targetValue && !trendPositive}">
|
||||
<value>{{counterValue|number}}</value>
|
||||
<counter-target ng-if="targetValue">({{targetValue|number}})</counter-target>
|
||||
<counter-name>{{visualization.name}}</counter-name>
|
||||
</counter>
|
||||
20
rd_ui/app/views/visualizations/counter_editor.html
Normal file
20
rd_ui/app/views/visualizations/counter_editor.html
Normal file
@@ -0,0 +1,20 @@
|
||||
<div class="form-horizontal">
|
||||
<div class="form-group">
|
||||
<label class="col-lg-6">Row Number</label>
|
||||
<div class="col-lg-6">
|
||||
<input type="number" ng-model="visualization.options.rowNumber" class="form-control">
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label class="col-lg-6">Counter Column Name</label>
|
||||
<div class="col-lg-6">
|
||||
<select ng-options="name for name in queryResult.columnNames" ng-model="visualization.options.counterColName" class="form-control"></select>
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label class="col-lg-6">Target Column Name</label>
|
||||
<div class="col-lg-6">
|
||||
<select ng-options="name for name in queryResult.columnNames" ng-model="visualization.options.targetColName" class="form-control"></select>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -24,4 +24,4 @@
|
||||
</div>
|
||||
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -2,7 +2,10 @@
|
||||
"name": "rdUi",
|
||||
"version": "0.1.0",
|
||||
"dependencies": {
|
||||
"angular": "1.2.7",
|
||||
"angular": "1.2.18",
|
||||
"angular-resource": "1.2.18",
|
||||
"angular-route": "1.2.18",
|
||||
"angular-growl": "0.4.0",
|
||||
"json3": "3.2.4",
|
||||
"jquery": "1.9.1",
|
||||
"bootstrap": "3.0.0",
|
||||
@@ -11,25 +14,26 @@
|
||||
"moment": "2.1.0",
|
||||
"angular-ui-bootstrap": "0.5.0",
|
||||
"angular-ui-codemirror": "0.0.5",
|
||||
"highcharts": "3.0.1",
|
||||
"highcharts": "3.0.10",
|
||||
"underscore": "1.5.1",
|
||||
"angular-resource": "1.2.15",
|
||||
"angular-growl": "0.3.1",
|
||||
"angular-route": "1.2.7",
|
||||
"pivottable": "https://github.com/arikfr/pivottable.git",
|
||||
"pivottable": "~1.1.1",
|
||||
"cornelius": "https://github.com/restorando/cornelius.git",
|
||||
"gridster": "0.2.0",
|
||||
"mousetrap": "~1.4.6",
|
||||
"angular-ui-select2": "~0.0.5",
|
||||
"jquery-ui": "~1.10.4",
|
||||
"underscore.string": "~2.3.3",
|
||||
"marked": "~0.3.2",
|
||||
"bucky": "~0.2.6"
|
||||
"bucky": "~0.2.6",
|
||||
"pace": "~0.5.1",
|
||||
"angular-ui-select": "0.8.2",
|
||||
"font-awesome": "~4.2.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"angular-mocks": "~1.0.7",
|
||||
"angular-scenario": "~1.0.7"
|
||||
"angular-mocks": "1.2.18",
|
||||
"angular-scenario": "1.2.18"
|
||||
},
|
||||
"resolutions": {
|
||||
"angular": "1.2.7"
|
||||
"angular": "1.2.18"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,58 +0,0 @@
|
||||
// Karma E2E configuration
|
||||
|
||||
// base path, that will be used to resolve files and exclude
|
||||
basePath = '';
|
||||
|
||||
// list of files / patterns to load in the browser
|
||||
files = [
|
||||
ANGULAR_SCENARIO,
|
||||
ANGULAR_SCENARIO_ADAPTER,
|
||||
'test/e2e/**/*.js'
|
||||
];
|
||||
|
||||
// list of files to exclude
|
||||
exclude = [];
|
||||
|
||||
// test results reporter to use
|
||||
// possible values: dots || progress || growl
|
||||
reporters = ['progress'];
|
||||
|
||||
// web server port
|
||||
port = 8080;
|
||||
|
||||
// cli runner port
|
||||
runnerPort = 9100;
|
||||
|
||||
// enable / disable colors in the output (reporters and logs)
|
||||
colors = true;
|
||||
|
||||
// level of logging
|
||||
// possible values: LOG_DISABLE || LOG_ERROR || LOG_WARN || LOG_INFO || LOG_DEBUG
|
||||
logLevel = LOG_INFO;
|
||||
|
||||
// enable / disable watching file and executing tests whenever any file changes
|
||||
autoWatch = false;
|
||||
|
||||
// Start these browsers, currently available:
|
||||
// - Chrome
|
||||
// - ChromeCanary
|
||||
// - Firefox
|
||||
// - Opera
|
||||
// - Safari (only Mac)
|
||||
// - PhantomJS
|
||||
// - IE (only Windows)
|
||||
browsers = ['Chrome'];
|
||||
|
||||
// If browser does not capture in given timeout [ms], kill it
|
||||
captureTimeout = 5000;
|
||||
|
||||
// Continuous Integration mode
|
||||
// if true, it capture browsers, run tests and exit
|
||||
singleRun = false;
|
||||
|
||||
// Uncomment the following lines if you are using grunt's server to run the tests
|
||||
// proxies = {
|
||||
// '/': 'http://localhost:9000/'
|
||||
// };
|
||||
// URL root prevent conflicts with the site root
|
||||
// urlRoot = '_karma_';
|
||||
@@ -1,56 +0,0 @@
|
||||
// Karma configuration
|
||||
|
||||
// base path, that will be used to resolve files and exclude
|
||||
basePath = '';
|
||||
|
||||
// list of files / patterns to load in the browser
|
||||
files = [
|
||||
JASMINE,
|
||||
JASMINE_ADAPTER,
|
||||
'app/bower_components/angular/angular.js',
|
||||
'app/bower_components/angular-mocks/angular-mocks.js',
|
||||
'app/scripts/*.js',
|
||||
'app/scripts/**/*.js',
|
||||
'test/mock/**/*.js',
|
||||
'test/spec/**/*.js'
|
||||
];
|
||||
|
||||
// list of files to exclude
|
||||
exclude = [];
|
||||
|
||||
// test results reporter to use
|
||||
// possible values: dots || progress || growl
|
||||
reporters = ['progress'];
|
||||
|
||||
// web server port
|
||||
port = 8080;
|
||||
|
||||
// cli runner port
|
||||
runnerPort = 9100;
|
||||
|
||||
// enable / disable colors in the output (reporters and logs)
|
||||
colors = true;
|
||||
|
||||
// level of logging
|
||||
// possible values: LOG_DISABLE || LOG_ERROR || LOG_WARN || LOG_INFO || LOG_DEBUG
|
||||
logLevel = LOG_INFO;
|
||||
|
||||
// enable / disable watching file and executing tests whenever any file changes
|
||||
autoWatch = false;
|
||||
|
||||
// Start these browsers, currently available:
|
||||
// - Chrome
|
||||
// - ChromeCanary
|
||||
// - Firefox
|
||||
// - Opera
|
||||
// - Safari (only Mac)
|
||||
// - PhantomJS
|
||||
// - IE (only Windows)
|
||||
browsers = ['Chrome'];
|
||||
|
||||
// If browser does not capture in given timeout [ms], kill it
|
||||
captureTimeout = 5000;
|
||||
|
||||
// Continuous Integration mode
|
||||
// if true, it capture browsers, run tests and exit
|
||||
singleRun = false;
|
||||
@@ -1,38 +1,39 @@
|
||||
{
|
||||
"name": "rd-ui",
|
||||
"version": "0.1.0",
|
||||
"dependencies": {
|
||||
},
|
||||
"name": "rdui",
|
||||
"version": "0.0.0",
|
||||
"dependencies": {},
|
||||
"devDependencies": {
|
||||
"grunt": "git+https://github.com/gruntjs/grunt.git#08a3af5",
|
||||
"grunt-contrib-copy": "~0.4.1",
|
||||
"grunt-contrib-concat": "~0.3.0",
|
||||
"grunt-contrib-coffee": "~0.7.0",
|
||||
"grunt-contrib-uglify": "~0.2.0",
|
||||
"grunt-contrib-compass": "~0.5.0",
|
||||
"grunt-contrib-jshint": "~0.6.0",
|
||||
"grunt-contrib-cssmin": "~0.6.0",
|
||||
"grunt-contrib-connect": "~0.3.0",
|
||||
"grunt-contrib-clean": "~0.5.0",
|
||||
"grunt-contrib-htmlmin": "~0.1.3",
|
||||
"grunt-contrib-imagemin": "~0.2.0",
|
||||
"grunt-contrib-watch": "~0.5.2",
|
||||
"grunt-autoprefixer": "~0.2.0",
|
||||
"grunt-usemin": "~0.1.11",
|
||||
"grunt-svgmin": "~0.2.0",
|
||||
"grunt-rev": "~0.1.0",
|
||||
"grunt-open": "~0.2.0",
|
||||
"grunt-concurrent": "~0.3.0",
|
||||
"load-grunt-tasks": "~0.1.0",
|
||||
"connect-livereload": "~0.2.0",
|
||||
"grunt-google-cdn": "~0.2.0",
|
||||
"grunt-ngmin": "~0.0.2",
|
||||
"time-grunt": "~0.1.0",
|
||||
"bower": "~1.2.7",
|
||||
"grunt-cli": "~0.1.9"
|
||||
"grunt": "^0.4.1",
|
||||
"grunt-autoprefixer": "^0.7.3",
|
||||
"grunt-concurrent": "^0.5.0",
|
||||
"grunt-contrib-clean": "^0.5.0",
|
||||
"grunt-contrib-concat": "^0.4.0",
|
||||
"grunt-contrib-connect": "^0.7.1",
|
||||
"grunt-contrib-copy": "^0.5.0",
|
||||
"grunt-contrib-cssmin": "^0.9.0",
|
||||
"grunt-contrib-htmlmin": "^0.3.0",
|
||||
"grunt-contrib-imagemin": "^0.7.0",
|
||||
"grunt-contrib-jshint": "^0.10.0",
|
||||
"grunt-contrib-uglify": "^0.4.0",
|
||||
"grunt-contrib-watch": "^0.6.1",
|
||||
"grunt-filerev": "^0.2.1",
|
||||
"grunt-google-cdn": "^0.4.0",
|
||||
"grunt-newer": "^0.7.0",
|
||||
"grunt-ngmin": "^0.0.3",
|
||||
"grunt-svgmin": "^0.4.0",
|
||||
"grunt-usemin": "^2.1.1",
|
||||
"grunt-wiredep": "^1.7.0",
|
||||
"jshint-stylish": "^0.2.0",
|
||||
"load-grunt-tasks": "^0.4.0",
|
||||
"time-grunt": "^0.3.1",
|
||||
"karma-jasmine": "~0.1.5",
|
||||
"grunt-karma": "~0.8.3",
|
||||
"karma-phantomjs-launcher": "~0.1.4",
|
||||
"karma": "~0.12.19",
|
||||
"karma-ng-html2js-preprocessor": "~0.1.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=0.8.0"
|
||||
"node": ">=0.10.0"
|
||||
},
|
||||
"scripts": {
|
||||
"test": "grunt test"
|
||||
|
||||
@@ -29,6 +29,7 @@
|
||||
"expect": false,
|
||||
"inject": false,
|
||||
"it": false,
|
||||
"jasmine": false,
|
||||
"spyOn": false
|
||||
}
|
||||
}
|
||||
|
||||
132
rd_ui/test/karma.conf.js
Normal file
132
rd_ui/test/karma.conf.js
Normal file
@@ -0,0 +1,132 @@
|
||||
// Karma configuration
|
||||
// http://karma-runner.github.io/0.12/config/configuration-file.html
|
||||
// Generated on 2014-07-30 using
|
||||
// generator-karma 0.8.3
|
||||
|
||||
module.exports = function(config) {
|
||||
'use strict';
|
||||
|
||||
config.set({
|
||||
// enable / disable watching file and executing tests whenever any file changes
|
||||
autoWatch: true,
|
||||
|
||||
// base path, that will be used to resolve files and exclude
|
||||
basePath: '../',
|
||||
|
||||
// testing framework to use (jasmine/mocha/qunit/...)
|
||||
frameworks: ['jasmine'],
|
||||
|
||||
// list of files / patterns to load in the browser
|
||||
files: [
|
||||
'app/bower_components/jquery/jquery.js',
|
||||
'app/bower_components/jquery-ui/ui/jquery-ui.js',
|
||||
|
||||
'app/bower_components/angular/angular.js',
|
||||
'app/bower_components/angular-route/angular-route.js',
|
||||
'app/bower_components/angular-mocks/angular-mocks.js',
|
||||
|
||||
'app/bower_components/bootstrap/js/collapse.js',
|
||||
'app/bower_components/bootstrap/js/modal.js',
|
||||
'app/bower_components/angular-resource/angular-resource.js',
|
||||
'app/bower_components/underscore/underscore.js',
|
||||
'app/bower_components/moment/moment.js',
|
||||
'app/bower_components/angular-moment/angular-moment.js',
|
||||
'app/bower_components/codemirror/lib/codemirror.js',
|
||||
'app/bower_components/codemirror/addon/edit/matchbrackets.js',
|
||||
'app/bower_components/codemirror/addon/edit/closebrackets.js',
|
||||
'app/bower_components/codemirror/mode/sql/sql.js',
|
||||
'app/bower_components/codemirror/mode/javascript/javascript.js',
|
||||
'app/bower_components/angular-ui-codemirror/ui-codemirror.js',
|
||||
'app/bower_components/highcharts/highcharts.js',
|
||||
'app/bower_components/highcharts/modules/exporting.js',
|
||||
'app/bower_components/gridster/dist/jquery.gridster.js',
|
||||
'app/bower_components/angular-growl/build/angular-growl.js',
|
||||
'app/bower_components/pivottable/dist/pivot.js',
|
||||
'app/bower_components/cornelius/src/cornelius.js',
|
||||
'app/bower_components/mousetrap/mousetrap.js',
|
||||
'app/bower_components/mousetrap/plugins/global-bind/mousetrap-global-bind.js',
|
||||
'app/bower_components/select2/select2.js',
|
||||
'app/bower_components/angular-ui-select2/src/select2.js',
|
||||
'app/bower_components/angular-ui-select/dist/select.js',
|
||||
'app/bower_components/underscore.string/lib/underscore.string.js',
|
||||
'app/bower_components/marked/lib/marked.js',
|
||||
'app/scripts/ng_highchart.js',
|
||||
'app/scripts/ng_smart_table.js',
|
||||
'app/scripts/ui-bootstrap-tpls-0.5.0.min.js',
|
||||
'app/bower_components/bucky/bucky.js',
|
||||
'app/bower_components/pace/pace.js',
|
||||
|
||||
'app/scripts/app.js',
|
||||
'app/scripts/services/services.js',
|
||||
'app/scripts/services/resources.js',
|
||||
'app/scripts/services/notifications.js',
|
||||
'app/scripts/services/dashboards.js',
|
||||
'app/scripts/controllers/controllers.js',
|
||||
'app/scripts/controllers/dashboard.js',
|
||||
'app/scripts/controllers/admin_controllers.js',
|
||||
'app/scripts/controllers/query_view.js',
|
||||
'app/scripts/controllers/query_source.js',
|
||||
'app/scripts/visualizations/base.js',
|
||||
'app/scripts/visualizations/chart.js',
|
||||
'app/scripts/visualizations/cohort.js',
|
||||
'app/scripts/visualizations/table.js',
|
||||
'app/scripts/visualizations/pivot.js',
|
||||
'app/scripts/directives/directives.js',
|
||||
'app/scripts/directives/query_directives.js',
|
||||
'app/scripts/directives/dashboard_directives.js',
|
||||
'app/scripts/filters.js',
|
||||
|
||||
'app/views/**/*.html',
|
||||
|
||||
'test/mocks/*.js',
|
||||
'test/unit/*.js'
|
||||
],
|
||||
|
||||
// generate js files from html templates
|
||||
preprocessors: {
|
||||
'app/views/**/*.html': 'ng-html2js'
|
||||
},
|
||||
|
||||
// list of files / patterns to exclude
|
||||
exclude: [],
|
||||
|
||||
// web server port
|
||||
port: 8080,
|
||||
|
||||
// Start these browsers, currently available:
|
||||
// - Chrome
|
||||
// - ChromeCanary
|
||||
// - Firefox
|
||||
// - Opera
|
||||
// - Safari (only Mac)
|
||||
// - PhantomJS
|
||||
// - IE (only Windows)
|
||||
browsers: [
|
||||
'PhantomJS'
|
||||
],
|
||||
|
||||
// Which plugins to enable
|
||||
plugins: [
|
||||
'karma-phantomjs-launcher',
|
||||
'karma-jasmine',
|
||||
'karma-ng-html2js-preprocessor'
|
||||
],
|
||||
|
||||
// Continuous Integration mode
|
||||
// if true, it capture browsers, run tests and exit
|
||||
singleRun: false,
|
||||
|
||||
colors: true,
|
||||
|
||||
// level of logging
|
||||
// possible values: LOG_DISABLE || LOG_ERROR || LOG_WARN || LOG_INFO || LOG_DEBUG
|
||||
logLevel: config.LOG_INFO,
|
||||
|
||||
// Uncomment the following lines if you are using grunt's server to run the tests
|
||||
// proxies: {
|
||||
// '/': 'http://localhost:9000/'
|
||||
// },
|
||||
// URL root prevent conflicts with the site root
|
||||
// urlRoot: '_karma_'
|
||||
});
|
||||
};
|
||||
108
rd_ui/test/mocks/redash_mocks.js
Normal file
108
rd_ui/test/mocks/redash_mocks.js
Normal file
@@ -0,0 +1,108 @@
|
||||
featureFlags = [];
|
||||
currentUser = {
|
||||
id: 1,
|
||||
name: 'John Mock',
|
||||
email: 'john@example.com',
|
||||
groups: ['default'],
|
||||
permissions: [],
|
||||
canEdit: function(object) {
|
||||
var user_id = object.user_id || (object.user && object.user.id);
|
||||
return user_id && (user_id == currentUser.id);
|
||||
},
|
||||
hasPermission: function(permission) {
|
||||
return this.permissions.indexOf(permission) != -1;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
angular.module('redashMocks', [])
|
||||
.value('MockData', {
|
||||
query: {
|
||||
"ttl": -1,
|
||||
"query": "select name from users;",
|
||||
"id": 1803,
|
||||
"description": "",
|
||||
"name": "my test query",
|
||||
"created_at": "2014-01-07T16:11:31.859528+02:00",
|
||||
"query_hash": "c89c235bc73e462e9702debc56adc309",
|
||||
|
||||
"user": {
|
||||
"email": "amirn@everything.me",
|
||||
"id": 48,
|
||||
"name": "Amir Nissim"
|
||||
},
|
||||
|
||||
"visualizations": [{
|
||||
"description": "",
|
||||
"options": {},
|
||||
"type": "TABLE",
|
||||
"id": 636,
|
||||
"name": "Table"
|
||||
}],
|
||||
|
||||
"api_key": "123456789",
|
||||
|
||||
"data_source_id": 1,
|
||||
|
||||
"latest_query_data_id": 106632,
|
||||
|
||||
"latest_query_data": {
|
||||
"retrieved_at": "2014-07-29T10:49:10.951364+03:00",
|
||||
"query_hash": "c89c235bc73e462e9702debc56adc309",
|
||||
"query": "select name from users;",
|
||||
"runtime": 0.0139260292053223,
|
||||
"data": {
|
||||
"rows": [{
|
||||
"name": "Amir Nissim"
|
||||
}, {
|
||||
"name": "Arik Fraimovich"
|
||||
}],
|
||||
"columns": [{
|
||||
"friendly_name": "name",
|
||||
"type": null,
|
||||
"name": "name"
|
||||
}, {
|
||||
"friendly_name": "mail::filter",
|
||||
"type": null,
|
||||
"name": "mail::filter"
|
||||
}]
|
||||
},
|
||||
"id": 106632,
|
||||
"data_source_id": 1
|
||||
}
|
||||
|
||||
},
|
||||
|
||||
queryResult: {
|
||||
"job": {},
|
||||
"query_result": {
|
||||
"retrieved_at": "2014-08-04T13:33:45.563486+03:00",
|
||||
"query_hash": "9951c38c9cf00e6ee8aecce026b51c19",
|
||||
"query": "select name as \"name::filter\" from users",
|
||||
"runtime": 0.00896096229553223,
|
||||
"data": {
|
||||
"rows": [],
|
||||
"columns": [{
|
||||
"friendly_name": "name::filter",
|
||||
"type": null,
|
||||
"name": "name::filter"
|
||||
}]
|
||||
},
|
||||
"id": 106673,
|
||||
"data_source_id": 1
|
||||
},
|
||||
"status": "done",
|
||||
"filters": [],
|
||||
"filterFreeze": "test@example.com",
|
||||
"updatedAt": "2014-08-05T13:13:40.833Z",
|
||||
"columnNames": ["name::filter"],
|
||||
"filteredData": [{
|
||||
"name::filter": "test@example.com"
|
||||
}],
|
||||
"columns": [{
|
||||
"friendly_name": "name::filter",
|
||||
"type": null,
|
||||
"name": "name::filter"
|
||||
}]
|
||||
}
|
||||
});
|
||||
5
rd_ui/test/unit/example_test.js
Normal file
5
rd_ui/test/unit/example_test.js
Normal file
@@ -0,0 +1,5 @@
|
||||
describe('example test', function() {
|
||||
it('should expect the obvious', function() {
|
||||
expect(0).toBe(0);
|
||||
});
|
||||
});
|
||||
34
rd_ui/test/unit/test_query_view.js
Normal file
34
rd_ui/test/unit/test_query_view.js
Normal file
@@ -0,0 +1,34 @@
|
||||
'use strict';
|
||||
|
||||
describe('QueryViewCtrl', function() {
|
||||
var scope;
|
||||
var MockData;
|
||||
|
||||
beforeEach(module('redash', 'redashMocks'));
|
||||
|
||||
beforeEach(inject(function($injector, $controller, $rootScope, Query, _MockData_) {
|
||||
MockData = _MockData_;
|
||||
scope = $rootScope.$new();
|
||||
|
||||
var route = {
|
||||
current: {
|
||||
locals: {
|
||||
query: new Query(MockData.query)
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
$controller('QueryViewCtrl', {$scope: scope, $route: route});
|
||||
}));
|
||||
|
||||
it('should have a query', function() {
|
||||
expect(scope.query).toBeDefined();
|
||||
});
|
||||
|
||||
it('should update the executing state', function() {
|
||||
expect(scope.queryExecuting).toBe(false);
|
||||
scope.executeQuery();
|
||||
expect(scope.queryExecuting).toBe(true);
|
||||
});
|
||||
|
||||
});
|
||||
89
rd_ui/test/unit/test_visualization_renderer.js
Normal file
89
rd_ui/test/unit/test_visualization_renderer.js
Normal file
@@ -0,0 +1,89 @@
|
||||
'use strict';
|
||||
|
||||
describe('VisualizationRenderer', function() {
|
||||
var element;
|
||||
var scope;
|
||||
|
||||
var filters = [{
|
||||
"name": "name::filter",
|
||||
"friendlyName": "Name",
|
||||
"values": ["test@example.com", "amirn@example.com"],
|
||||
"multiple": false
|
||||
}];
|
||||
|
||||
beforeEach(module('redash', 'redashMocks'));
|
||||
|
||||
// loading templates
|
||||
beforeEach(module('app/views/grid_renderer.html',
|
||||
'app/views/visualizations/filters.html'));
|
||||
|
||||
// serving templates
|
||||
beforeEach(inject(function($httpBackend, $templateCache) {
|
||||
$httpBackend.whenGET('/views/grid_renderer.html')
|
||||
.respond($templateCache.get('app/views/grid_renderer.html'));
|
||||
|
||||
$httpBackend.whenGET('/views/visualizations/filters.html')
|
||||
.respond($templateCache.get('app/views/visualizations/filters.html'));
|
||||
}));
|
||||
|
||||
// directive setup
|
||||
beforeEach(inject(function($rootScope, $compile, MockData, QueryResult) {
|
||||
var qr = new QueryResult(MockData.queryResult)
|
||||
qr.filters = filters;
|
||||
|
||||
$rootScope.queryResult = qr;
|
||||
|
||||
element = angular.element(
|
||||
'<visualization-renderer query-result="queryResult">' +
|
||||
'</visualization-renderer>');
|
||||
}));
|
||||
|
||||
|
||||
describe('scope', function() {
|
||||
beforeEach(inject(function($rootScope, $compile) {
|
||||
$compile(element)($rootScope);
|
||||
|
||||
// we will test the isolated scope of the directive
|
||||
scope = element.isolateScope();
|
||||
scope.$digest();
|
||||
}));
|
||||
|
||||
it('should have filters', function() {
|
||||
expect(scope.filters).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
/*describe('URL binding', function() {
|
||||
|
||||
beforeEach(inject(function($rootScope, $compile, $location) {
|
||||
spyOn($location, 'search').andCallThrough();
|
||||
|
||||
// set initial search
|
||||
var initialSearch = {};
|
||||
initialSearch[filters[0].friendlyName] = filters[0].values[0];
|
||||
$location.search('filters', initialSearch);
|
||||
|
||||
$compile(element)($rootScope);
|
||||
|
||||
// we will test the isolated scope of the directive
|
||||
scope = element.isolateScope();
|
||||
scope.$digest();
|
||||
}));
|
||||
|
||||
it('should update scope from URL',
|
||||
inject(function($location) {
|
||||
expect($location.search).toHaveBeenCalled();
|
||||
expect(scope.filters[0].current).toEqual(filters[0].values[0]);
|
||||
}));
|
||||
|
||||
it('should update URL from scope',
|
||||
inject(function($location) {
|
||||
scope.filters[0].current = 'newValue';
|
||||
scope.$digest();
|
||||
|
||||
var searchFilters = angular.fromJson($location.search().filters);
|
||||
expect(searchFilters[filters[0].friendlyName]).toEqual('newValue');
|
||||
}));
|
||||
});*/
|
||||
});
|
||||
@@ -1,16 +1,11 @@
|
||||
import json
|
||||
import urlparse
|
||||
import logging
|
||||
from flask import Flask, make_response
|
||||
from flask.ext.restful import Api
|
||||
from flask_peewee.db import Database
|
||||
|
||||
import urlparse
|
||||
import redis
|
||||
from statsd import StatsClient
|
||||
import events
|
||||
from redash import settings, utils
|
||||
|
||||
__version__ = '0.3.6'
|
||||
from redash import settings
|
||||
|
||||
__version__ = '0.5.0'
|
||||
|
||||
|
||||
def setup_logging():
|
||||
@@ -19,43 +14,21 @@ def setup_logging():
|
||||
handler.setFormatter(formatter)
|
||||
logging.getLogger().addHandler(handler)
|
||||
logging.getLogger().setLevel(settings.LOG_LEVEL)
|
||||
logging.getLogger("passlib").setLevel("ERROR")
|
||||
|
||||
|
||||
def create_redis_connection():
|
||||
redis_url = urlparse.urlparse(settings.REDIS_URL)
|
||||
if redis_url.path:
|
||||
redis_db = redis_url.path[1]
|
||||
else:
|
||||
redis_db = 0
|
||||
|
||||
r = redis.StrictRedis(host=redis_url.hostname, port=redis_url.port, db=redis_db, password=redis_url.password)
|
||||
|
||||
return r
|
||||
|
||||
events.setup_logging(settings.EVENTS_LOG_PATH, settings.EVENTS_CONSOLE_OUTPUT)
|
||||
|
||||
setup_logging()
|
||||
|
||||
app = Flask(__name__,
|
||||
template_folder=settings.STATIC_ASSETS_PATH,
|
||||
static_folder=settings.STATIC_ASSETS_PATH,
|
||||
static_path='/static')
|
||||
|
||||
api = Api(app)
|
||||
|
||||
# configure our database
|
||||
settings.DATABASE_CONFIG.update({'threadlocals': True})
|
||||
app.config['DATABASE'] = settings.DATABASE_CONFIG
|
||||
db = Database(app)
|
||||
|
||||
from redash.authentication import setup_authentication
|
||||
auth = setup_authentication(app)
|
||||
|
||||
@api.representation('application/json')
|
||||
def json_representation(data, code, headers=None):
|
||||
resp = make_response(json.dumps(data, cls=utils.JSONEncoder), code)
|
||||
resp.headers.extend(headers or {})
|
||||
return resp
|
||||
|
||||
|
||||
redis_url = urlparse.urlparse(settings.REDIS_URL)
|
||||
if redis_url.path:
|
||||
redis_db = redis_url.path[1]
|
||||
else:
|
||||
redis_db = 0
|
||||
|
||||
redis_connection = redis.StrictRedis(host=redis_url.hostname, port=redis_url.port, db=redis_db, password=redis_url.password)
|
||||
statsd_client = StatsClient(host=settings.STATSD_HOST, port=settings.STATSD_PORT, prefix=settings.STATSD_PREFIX)
|
||||
|
||||
from redash import data
|
||||
data_manager = data.Manager(redis_connection, statsd_client)
|
||||
|
||||
from redash import controllers
|
||||
redis_connection = create_redis_connection()
|
||||
statsd_client = StatsClient(host=settings.STATSD_HOST, port=settings.STATSD_PORT, prefix=settings.STATSD_PREFIX)
|
||||
@@ -5,13 +5,9 @@ import time
|
||||
import logging
|
||||
|
||||
from flask import request, make_response, redirect, url_for
|
||||
from flask.ext.googleauth import GoogleAuth, login
|
||||
from flask.ext.login import LoginManager, login_user, current_user
|
||||
from werkzeug.contrib.fixers import ProxyFix
|
||||
|
||||
from models import AnonymousUser
|
||||
from redash import models, settings
|
||||
|
||||
from redash import models, settings, google_oauth
|
||||
|
||||
login_manager = LoginManager()
|
||||
logger = logging.getLogger('authentication')
|
||||
@@ -59,49 +55,15 @@ class HMACAuthentication(object):
|
||||
return decorated
|
||||
|
||||
|
||||
def validate_email(email):
|
||||
if not settings.GOOGLE_APPS_DOMAIN:
|
||||
return True
|
||||
|
||||
return email in settings.ALLOWED_EXTERNAL_USERS or email.endswith("@%s" % settings.GOOGLE_APPS_DOMAIN)
|
||||
|
||||
|
||||
def create_and_login_user(app, user):
|
||||
if not validate_email(user.email):
|
||||
return
|
||||
|
||||
try:
|
||||
user_object = models.User.get(models.User.email == user.email)
|
||||
if user_object.name != user.name:
|
||||
logger.debug("Updating user name (%r -> %r)", user_object.name, user.name)
|
||||
user_object.name = user.name
|
||||
user_object.save()
|
||||
except models.User.DoesNotExist:
|
||||
logger.debug("Creating user object (%r)", user.name)
|
||||
user_object = models.User.create(name=user.name, email=user.email,
|
||||
is_admin=(user.email in settings.ADMINS))
|
||||
|
||||
login_user(user_object, remember=True)
|
||||
|
||||
login.connect(create_and_login_user)
|
||||
|
||||
|
||||
@login_manager.user_loader
|
||||
def load_user(user_id):
|
||||
return models.User.select().where(models.User.id == user_id).first()
|
||||
|
||||
|
||||
def setup_authentication(app):
|
||||
if settings.GOOGLE_OPENID_ENABLED:
|
||||
openid_auth = GoogleAuth(app, url_prefix="/google_auth")
|
||||
# If we don't have a list of external users, we can use Google's federated login, which limits
|
||||
# the domain with which you can sign in.
|
||||
if not settings.ALLOWED_EXTERNAL_USERS and settings.GOOGLE_APPS_DOMAIN:
|
||||
openid_auth._OPENID_ENDPOINT = "https://www.google.com/a/%s/o8/ud?be=o8" % settings.GOOGLE_APPS_DOMAIN
|
||||
|
||||
login_manager.init_app(app)
|
||||
login_manager.anonymous_user = AnonymousUser
|
||||
app.wsgi_app = ProxyFix(app.wsgi_app)
|
||||
login_manager.anonymous_user = models.AnonymousUser
|
||||
app.secret_key = settings.COOKIE_SECRET
|
||||
app.register_blueprint(google_oauth.blueprint)
|
||||
|
||||
return HMACAuthentication()
|
||||
|
||||
8
redash/cache.py
Normal file
8
redash/cache.py
Normal file
@@ -0,0 +1,8 @@
|
||||
from flask import make_response
|
||||
from functools import update_wrapper
|
||||
|
||||
ONE_YEAR = 60 * 60 * 24 * 365.25
|
||||
|
||||
headers = {
|
||||
'Cache-Control': 'max-age=%d' % ONE_YEAR
|
||||
}
|
||||
0
redash/cli/__init__.py
Normal file
0
redash/cli/__init__.py
Normal file
60
redash/cli/data_sources.py
Normal file
60
redash/cli/data_sources.py
Normal file
@@ -0,0 +1,60 @@
|
||||
from flask.ext.script import Manager
|
||||
from redash import models
|
||||
|
||||
manager = Manager(help="Data sources management commands.")
|
||||
|
||||
@manager.command
|
||||
def list():
|
||||
"""List currently configured data sources"""
|
||||
for i, ds in enumerate(models.DataSource.select()):
|
||||
if i > 0:
|
||||
print "-"*20
|
||||
|
||||
print "Id: {}\nName: {}\nType: {}\nOptions: {}".format(ds.id, ds.name, ds.type, ds.options)
|
||||
|
||||
|
||||
@manager.command
|
||||
def new(name, type, options):
|
||||
"""Create new data source"""
|
||||
# TODO: validate it's a valid type and in the future, validate the options.
|
||||
print "Creating {} data source ({}) with options:\n{}".format(type, name, options)
|
||||
data_source = models.DataSource.create(name=name,
|
||||
type=type,
|
||||
options=options)
|
||||
print "Id: {}".format(data_source.id)
|
||||
|
||||
|
||||
@manager.command
|
||||
def delete(name):
|
||||
"""Deletes data source by name"""
|
||||
try:
|
||||
data_source = models.DataSource.get(models.DataSource.name==name)
|
||||
print "Deleting data source: {} (id={})".format(name, data_source.id)
|
||||
data_source.delete_instance()
|
||||
except models.DataSource.DoesNotExist:
|
||||
print "Couldn't find data source named: {}".format(name)
|
||||
|
||||
|
||||
def update_attr(obj, attr, new_value):
|
||||
if new_value is not None:
|
||||
old_value = getattr(obj, attr)
|
||||
print "Updating {}: {} -> {}".format(attr, old_value, new_value)
|
||||
setattr(obj, attr, new_value)
|
||||
|
||||
|
||||
@manager.option('name', default=None, help="name of data source to edit")
|
||||
@manager.option('--name', dest='new_name', default=None, help="new name for the data source")
|
||||
@manager.option('--options', dest='options', default=None, help="updated options for the data source")
|
||||
@manager.option('--type', dest='type', default=None, help="new type for the data source")
|
||||
def edit(name, new_name=None, options=None, type=None):
|
||||
"""Edit data source settings (name, options, type)"""
|
||||
try:
|
||||
data_source = models.DataSource.get(models.DataSource.name==name)
|
||||
update_attr(data_source, "name", new_name)
|
||||
update_attr(data_source, "type", type)
|
||||
update_attr(data_source, "options", options)
|
||||
data_source.save()
|
||||
|
||||
except models.DataSource.DoesNotExist:
|
||||
print "Couldn't find data source named: {}".format(name)
|
||||
|
||||
19
redash/cli/database.py
Normal file
19
redash/cli/database.py
Normal file
@@ -0,0 +1,19 @@
|
||||
from flask.ext.script import Manager
|
||||
|
||||
manager = Manager(help="Manages the database (create/drop tables).")
|
||||
|
||||
@manager.command
|
||||
def create_tables():
|
||||
"""Creates the database tables."""
|
||||
from redash.models import create_db, init_db
|
||||
|
||||
create_db(True, False)
|
||||
init_db()
|
||||
|
||||
@manager.command
|
||||
def drop_tables():
|
||||
"""Drop the database tables."""
|
||||
from redash.models import create_db
|
||||
|
||||
create_db(False, True)
|
||||
|
||||
74
redash/cli/users.py
Normal file
74
redash/cli/users.py
Normal file
@@ -0,0 +1,74 @@
|
||||
from flask.ext.script import Manager, prompt_pass
|
||||
from redash import models
|
||||
|
||||
manager = Manager(help="Users management commands.")
|
||||
|
||||
@manager.option('email', help="email address of the user to grant admin to")
|
||||
def grant_admin(email):
|
||||
try:
|
||||
user = models.User.get_by_email(email)
|
||||
|
||||
user.groups.append('admin')
|
||||
user.save()
|
||||
|
||||
print "User updated."
|
||||
except models.User.DoesNotExist:
|
||||
print "User [%s] not found." % email
|
||||
|
||||
|
||||
@manager.option('email', help="User's email")
|
||||
@manager.option('name', help="User's full name")
|
||||
@manager.option('--admin', dest='is_admin', action="store_true", default=False, help="set user as admin")
|
||||
@manager.option('--google', dest='google_auth', action="store_true", default=False, help="user uses Google Auth to login")
|
||||
@manager.option('--password', dest='password', default=None, help="Password for users who don't use Google Auth (leave blank for prompt).")
|
||||
@manager.option('--groups', dest='groups', default=models.User.DEFAULT_GROUPS, help="Comma seperated list of groups (leave blank for default).")
|
||||
def create(email, name, groups, is_admin=False, google_auth=False, password=None):
|
||||
print "Creating user (%s, %s)..." % (email, name)
|
||||
print "Admin: %r" % is_admin
|
||||
print "Login with Google Auth: %r\n" % google_auth
|
||||
if isinstance(groups, basestring):
|
||||
groups= groups.split(',')
|
||||
groups.remove('') # in case it was empty string
|
||||
|
||||
if is_admin:
|
||||
groups += ['admin']
|
||||
|
||||
user = models.User(email=email, name=name, groups=groups)
|
||||
if not google_auth:
|
||||
password = password or prompt_pass("Password")
|
||||
user.hash_password(password)
|
||||
|
||||
try:
|
||||
user.save()
|
||||
except Exception, e:
|
||||
print "Failed creating user: %s" % e.message
|
||||
|
||||
|
||||
@manager.option('email', help="email address of user to delete")
|
||||
def delete(email):
|
||||
deleted_count = models.User.delete().where(models.User.email == email).execute()
|
||||
print "Deleted %d users." % deleted_count
|
||||
|
||||
|
||||
@manager.option('password', help="new password for the user")
|
||||
@manager.option('email', help="email address of the user to change password for")
|
||||
def password(email, password):
|
||||
try:
|
||||
user = models.User.get_by_email(email)
|
||||
|
||||
user.hash_password(password)
|
||||
user.save()
|
||||
|
||||
print "User updated."
|
||||
except models.User.DoesNotExist:
|
||||
print "User [%s] not found." % email
|
||||
|
||||
|
||||
@manager.command
|
||||
def list():
|
||||
"""List all users"""
|
||||
for i, user in enumerate(models.User.select()):
|
||||
if i > 0:
|
||||
print "-"*20
|
||||
|
||||
print "Id: {}\nName: {}\nEmail: {}".format(user.id, user.name.encode('utf-8'), user.email)
|
||||
@@ -10,20 +10,19 @@ import json
|
||||
import numbers
|
||||
import cStringIO
|
||||
import datetime
|
||||
import logging
|
||||
|
||||
from flask import render_template, send_from_directory, make_response, request, jsonify, redirect, \
|
||||
session, url_for
|
||||
from flask.ext.restful import Resource, abort
|
||||
from flask_login import current_user, login_user, logout_user
|
||||
|
||||
import sqlparse
|
||||
import events
|
||||
from permissions import require_permission
|
||||
from redash import settings, utils, __version__, statsd_client
|
||||
from redash import data
|
||||
|
||||
from redash import app, auth, api, redis_connection, data_manager
|
||||
from redash import models
|
||||
from redash import redis_connection, statsd_client, models, settings, utils, __version__
|
||||
from redash.wsgi import app, auth, api
|
||||
from redash.tasks import QueryTask, record_event
|
||||
from redash.cache import headers as cache_headers
|
||||
from redash.permissions import require_permission
|
||||
|
||||
|
||||
@app.route('/ping', methods=['GET'])
|
||||
@@ -36,6 +35,7 @@ def ping():
|
||||
@app.route('/queries')
|
||||
@app.route('/queries/<query_id>')
|
||||
@app.route('/queries/<query_id>/<anything>')
|
||||
@app.route('/personal')
|
||||
@app.route('/')
|
||||
@auth.required
|
||||
def index(**kwargs):
|
||||
@@ -44,15 +44,16 @@ def index(**kwargs):
|
||||
|
||||
user = {
|
||||
'gravatar_url': gravatar_url,
|
||||
'is_admin': current_user.is_admin,
|
||||
'id': current_user.id,
|
||||
'name': current_user.name,
|
||||
'email': current_user.email,
|
||||
'groups': current_user.groups,
|
||||
'permissions': current_user.permissions
|
||||
}
|
||||
|
||||
features = {
|
||||
'clientSideMetrics': settings.CLIENT_SIDE_METRICS
|
||||
'clientSideMetrics': settings.CLIENT_SIDE_METRICS,
|
||||
'flowerUrl': settings.CELERY_FLOWER_URL
|
||||
}
|
||||
|
||||
return render_template("index.html", user=json.dumps(user), name=settings.NAME,
|
||||
@@ -66,8 +67,7 @@ def login():
|
||||
return redirect(request.args.get('next') or '/')
|
||||
|
||||
if not settings.PASSWORD_LOGIN_ENABLED:
|
||||
blueprint = app.extensions['googleauth'].blueprint
|
||||
return redirect(url_for("%s.login" % blueprint.name, next=request.args.get('next')))
|
||||
return redirect(url_for("google_oauth.authorize", next=request.args.get('next')))
|
||||
|
||||
if request.method == 'POST':
|
||||
user = models.User.select().where(models.User.email == request.form['username']).first()
|
||||
@@ -81,7 +81,7 @@ def login():
|
||||
analytics=settings.ANALYTICS,
|
||||
next=request.args.get('next'),
|
||||
username=request.form.get('username', ''),
|
||||
show_google_openid=settings.GOOGLE_OPENID_ENABLED)
|
||||
show_google_openid=settings.GOOGLE_OAUTH_ENABLED)
|
||||
|
||||
|
||||
@app.route('/logout')
|
||||
@@ -101,15 +101,28 @@ def status_api():
|
||||
status['version'] = __version__
|
||||
status['queries_count'] = models.Query.select().count()
|
||||
status['query_results_count'] = models.QueryResult.select().count()
|
||||
status['unused_query_results_count'] = models.QueryResult.unused().count()
|
||||
status['dashboards_count'] = models.Dashboard.select().count()
|
||||
status['widgets_count'] = models.Widget.select().count()
|
||||
|
||||
status['workers'] = [redis_connection.hgetall(w)
|
||||
for w in redis_connection.smembers('workers')]
|
||||
status['workers'] = []
|
||||
|
||||
manager_status = redis_connection.hgetall('manager:status')
|
||||
manager_status = redis_connection.hgetall('redash:status')
|
||||
status['manager'] = manager_status
|
||||
status['manager']['queue_size'] = redis_connection.zcard('jobs')
|
||||
status['manager']['outdated_queries_count'] = models.Query.outdated_queries().count()
|
||||
|
||||
queues = {}
|
||||
for ds in models.DataSource.select():
|
||||
for queue in (ds.queue_name, ds.scheduled_queue_name):
|
||||
queues.setdefault(queue, set())
|
||||
queues[queue].add(ds.name)
|
||||
|
||||
status['manager']['queues'] = {}
|
||||
for queue, sources in queues.iteritems():
|
||||
status['manager']['queues'][queue] = {
|
||||
'data_sources': ', '.join(sources),
|
||||
'size': redis_connection.llen(queue)
|
||||
}
|
||||
|
||||
return jsonify(status)
|
||||
|
||||
@@ -144,7 +157,7 @@ class EventAPI(BaseResource):
|
||||
def post(self):
|
||||
events_list = request.get_json(force=True)
|
||||
for event in events_list:
|
||||
events.record_event(event)
|
||||
record_event.delay(event)
|
||||
|
||||
|
||||
api.add_resource(EventAPI, '/api/events', endpoint='events')
|
||||
@@ -163,12 +176,17 @@ api.add_resource(MetricsAPI, '/api/metrics/v1/send', endpoint='metrics')
|
||||
|
||||
class DataSourceListAPI(BaseResource):
|
||||
def get(self):
|
||||
data_sources = [ds.to_dict() for ds in models.DataSource.select()]
|
||||
data_sources = [ds.to_dict() for ds in models.DataSource.all()]
|
||||
return data_sources
|
||||
|
||||
api.add_resource(DataSourceListAPI, '/api/data_sources', endpoint='data_sources')
|
||||
|
||||
|
||||
class DashboardRecentAPI(BaseResource):
|
||||
def get(self):
|
||||
return [d.to_dict() for d in models.Dashboard.recent(current_user.id).limit(20)]
|
||||
|
||||
|
||||
class DashboardListAPI(BaseResource):
|
||||
def get(self):
|
||||
dashboards = [d.to_dict() for d in
|
||||
@@ -213,6 +231,7 @@ class DashboardAPI(BaseResource):
|
||||
dashboard.save()
|
||||
|
||||
api.add_resource(DashboardListAPI, '/api/dashboards', endpoint='dashboards')
|
||||
api.add_resource(DashboardRecentAPI, '/api/dashboards/recent', endpoint='recent_dashboards')
|
||||
api.add_resource(DashboardAPI, '/api/dashboards/<dashboard_slug>', endpoint='dashboard')
|
||||
|
||||
|
||||
@@ -252,19 +271,26 @@ class WidgetAPI(BaseResource):
|
||||
@require_permission('edit_dashboard')
|
||||
def delete(self, widget_id):
|
||||
widget = models.Widget.get(models.Widget.id == widget_id)
|
||||
# TODO: reposition existing ones
|
||||
layout = json.loads(widget.dashboard.layout)
|
||||
layout = map(lambda row: filter(lambda w: w != widget_id, row), layout)
|
||||
layout = filter(lambda row: len(row) > 0, layout)
|
||||
widget.dashboard.layout = json.dumps(layout)
|
||||
widget.dashboard.save()
|
||||
|
||||
widget.delete_instance()
|
||||
|
||||
api.add_resource(WidgetListAPI, '/api/widgets', endpoint='widgets')
|
||||
api.add_resource(WidgetAPI, '/api/widgets/<int:widget_id>', endpoint='widget')
|
||||
|
||||
|
||||
class QuerySearchAPI(BaseResource):
|
||||
@require_permission('view_query')
|
||||
def get(self):
|
||||
term = request.args.get('q', '')
|
||||
|
||||
return [q.to_dict() for q in models.Query.search(term)]
|
||||
|
||||
|
||||
class QueryRecentAPI(BaseResource):
|
||||
@require_permission('view_query')
|
||||
def get(self):
|
||||
return [q.to_dict() for q in models.Query.recent(current_user.id).limit(20)]
|
||||
|
||||
|
||||
class QueryListAPI(BaseResource):
|
||||
@require_permission('create_query')
|
||||
def post(self):
|
||||
@@ -279,16 +305,18 @@ class QueryListAPI(BaseResource):
|
||||
|
||||
query.create_default_visualizations()
|
||||
|
||||
return query.to_dict(with_result=False)
|
||||
return query.to_dict()
|
||||
|
||||
@require_permission('view_query')
|
||||
def get(self):
|
||||
return [q.to_dict(with_result=False, with_stats=True) for q in models.Query.all_queries()]
|
||||
return [q.to_dict(with_stats=True) for q in models.Query.all_queries()]
|
||||
|
||||
|
||||
class QueryAPI(BaseResource):
|
||||
@require_permission('edit_query')
|
||||
def post(self, query_id):
|
||||
query = models.Query.get_by_id(query_id)
|
||||
|
||||
query_def = request.get_json(force=True)
|
||||
for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'user']:
|
||||
query_def.pop(field, None)
|
||||
@@ -303,7 +331,7 @@ class QueryAPI(BaseResource):
|
||||
|
||||
query = models.Query.get_by_id(query_id)
|
||||
|
||||
return query.to_dict(with_result=False, with_visualizations=True)
|
||||
return query.to_dict(with_visualizations=True)
|
||||
|
||||
@require_permission('view_query')
|
||||
def get(self, query_id):
|
||||
@@ -313,6 +341,20 @@ class QueryAPI(BaseResource):
|
||||
else:
|
||||
abort(404, message="Query not found.")
|
||||
|
||||
# TODO: move to resource of its own? (POST /queries/{id}/archive)
|
||||
def delete(self, query_id):
|
||||
q = models.Query.get(models.Query.id == query_id)
|
||||
|
||||
if q:
|
||||
if q.user.id == self.current_user.id or self.current_user.has_permission('admin'):
|
||||
q.archive()
|
||||
else:
|
||||
self.delete_others_query(query_id)
|
||||
else:
|
||||
abort(404, message="Query not found.")
|
||||
|
||||
api.add_resource(QuerySearchAPI, '/api/queries/search', endpoint='queries_search')
|
||||
api.add_resource(QueryRecentAPI, '/api/queries/recent', endpoint='recent_queries')
|
||||
api.add_resource(QueryListAPI, '/api/queries', endpoint='queries')
|
||||
api.add_resource(QueryAPI, '/api/queries/<query_id>', endpoint='query')
|
||||
|
||||
@@ -323,7 +365,7 @@ class VisualizationListAPI(BaseResource):
|
||||
kwargs = request.get_json(force=True)
|
||||
kwargs['options'] = json.dumps(kwargs['options'])
|
||||
kwargs['query'] = kwargs.pop('query_id')
|
||||
|
||||
|
||||
vis = models.Visualization(**kwargs)
|
||||
vis.save()
|
||||
|
||||
@@ -337,6 +379,7 @@ class VisualizationAPI(BaseResource):
|
||||
if 'options' in kwargs:
|
||||
kwargs['options'] = json.dumps(kwargs['options'])
|
||||
kwargs.pop('id', None)
|
||||
kwargs.pop('query_id', None)
|
||||
|
||||
update = models.Visualization.update(**kwargs).where(models.Visualization.id == visualization_id)
|
||||
update.execute()
|
||||
@@ -359,6 +402,24 @@ class QueryResultListAPI(BaseResource):
|
||||
def post(self):
|
||||
params = request.json
|
||||
|
||||
if settings.FEATURE_TABLES_PERMISSIONS:
|
||||
metadata = utils.SQLMetaData(params['query'])
|
||||
|
||||
if metadata.has_non_select_dml_statements or metadata.has_ddl_statements:
|
||||
return {
|
||||
'job': {
|
||||
'error': 'Only SELECT statements are allowed'
|
||||
}
|
||||
}
|
||||
|
||||
if len(metadata.used_tables - current_user.allowed_tables) > 0 and '*' not in current_user.allowed_tables:
|
||||
logging.warning('Permission denied for user %s to table %s', self.current_user.name, metadata.used_tables)
|
||||
return {
|
||||
'job': {
|
||||
'error': 'Access denied for table(s): %s' % (metadata.used_tables)
|
||||
}
|
||||
}
|
||||
|
||||
models.ActivityLog(
|
||||
user=self.current_user,
|
||||
type=models.ActivityLog.QUERY_EXECUTION,
|
||||
@@ -374,62 +435,67 @@ class QueryResultListAPI(BaseResource):
|
||||
return {'query_result': query_result.to_dict()}
|
||||
else:
|
||||
data_source = models.DataSource.get_by_id(params['data_source_id'])
|
||||
job = data_manager.add_job(params['query'], data.Job.HIGH_PRIORITY, data_source)
|
||||
job = QueryTask.add_task(params['query'], data_source)
|
||||
return {'job': job.to_dict()}
|
||||
|
||||
|
||||
class QueryResultAPI(BaseResource):
|
||||
@require_permission('view_query')
|
||||
def get(self, query_result_id):
|
||||
query_result = models.QueryResult.get_by_id(query_result_id)
|
||||
if query_result:
|
||||
return {'query_result': query_result.to_dict()}
|
||||
else:
|
||||
abort(404)
|
||||
@staticmethod
|
||||
def csv_response(query_result):
|
||||
s = cStringIO.StringIO()
|
||||
|
||||
query_data = json.loads(query_result.data)
|
||||
writer = csv.DictWriter(s, fieldnames=[col['name'] for col in query_data['columns']])
|
||||
writer.writer = utils.UnicodeWriter(s)
|
||||
writer.writeheader()
|
||||
for row in query_data['rows']:
|
||||
for k, v in row.iteritems():
|
||||
if isinstance(v, numbers.Number) and (v > 1000 * 1000 * 1000 * 100):
|
||||
row[k] = datetime.datetime.fromtimestamp(v/1000.0)
|
||||
|
||||
writer.writerow(row)
|
||||
|
||||
headers = {'Content-Type': "text/csv; charset=UTF-8"}
|
||||
headers.update(cache_headers)
|
||||
return make_response(s.getvalue(), 200, headers)
|
||||
|
||||
class CsvQueryResultsAPI(BaseResource):
|
||||
@require_permission('view_query')
|
||||
def get(self, query_id, query_result_id=None):
|
||||
if not query_result_id:
|
||||
def get(self, query_id=None, query_result_id=None, filetype='json'):
|
||||
if query_result_id is None and query_id is not None:
|
||||
query = models.Query.get(models.Query.id == query_id)
|
||||
if query:
|
||||
query_result_id = query._data['latest_query_data']
|
||||
|
||||
query_result = query_result_id and models.QueryResult.get_by_id(query_result_id)
|
||||
if query_result_id:
|
||||
query_result = models.QueryResult.get_by_id(query_result_id)
|
||||
|
||||
if query_result:
|
||||
s = cStringIO.StringIO()
|
||||
if filetype == 'json':
|
||||
data = json.dumps({'query_result': query_result.to_dict()}, cls=utils.JSONEncoder)
|
||||
return make_response(data, 200, cache_headers)
|
||||
else:
|
||||
return self.csv_response(query_result)
|
||||
|
||||
query_data = json.loads(query_result.data)
|
||||
writer = csv.DictWriter(s, fieldnames=[col['name'] for col in query_data['columns']])
|
||||
writer.writer = utils.UnicodeWriter(s)
|
||||
writer.writeheader()
|
||||
for row in query_data['rows']:
|
||||
for k, v in row.iteritems():
|
||||
if isinstance(v, numbers.Number) and (v > 1000 * 1000 * 1000 * 100):
|
||||
row[k] = datetime.datetime.fromtimestamp(v/1000.0)
|
||||
|
||||
writer.writerow(row)
|
||||
|
||||
return make_response(s.getvalue(), 200, {'Content-Type': "text/csv; charset=UTF-8"})
|
||||
else:
|
||||
abort(404)
|
||||
|
||||
api.add_resource(CsvQueryResultsAPI, '/api/queries/<query_id>/results/<query_result_id>.csv',
|
||||
'/api/queries/<query_id>/results.csv',
|
||||
endpoint='csv_query_results')
|
||||
|
||||
api.add_resource(QueryResultListAPI, '/api/query_results', endpoint='query_results')
|
||||
api.add_resource(QueryResultAPI, '/api/query_results/<query_result_id>', endpoint='query_result')
|
||||
api.add_resource(QueryResultAPI,
|
||||
'/api/query_results/<query_result_id>',
|
||||
'/api/queries/<query_id>/results.<filetype>',
|
||||
'/api/queries/<query_id>/results/<query_result_id>.<filetype>',
|
||||
endpoint='query_result')
|
||||
|
||||
|
||||
class JobAPI(BaseResource):
|
||||
def get(self, job_id):
|
||||
# TODO: if finished, include the query result
|
||||
job = data.Job.load(data_manager.redis_connection, job_id)
|
||||
job = QueryTask(job_id=job_id)
|
||||
return {'job': job.to_dict()}
|
||||
|
||||
def delete(self, job_id):
|
||||
job = data.Job.load(data_manager.redis_connection, job_id)
|
||||
job = QueryTask(job_id=job_id)
|
||||
job.cancel()
|
||||
|
||||
api.add_resource(JobAPI, '/api/jobs/<job_id>', endpoint='job')
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
from manager import Manager
|
||||
from worker import Job
|
||||
|
||||
@@ -1,159 +0,0 @@
|
||||
"""
|
||||
Data manager. Used to manage and coordinate execution of queries.
|
||||
"""
|
||||
import time
|
||||
import logging
|
||||
import peewee
|
||||
import qr
|
||||
import redis
|
||||
import json
|
||||
from redash import models
|
||||
from redash.data import worker
|
||||
from redash.utils import gen_query_hash
|
||||
|
||||
|
||||
class JSONPriorityQueue(qr.PriorityQueue):
|
||||
""" Use a JSON serializer to help with cross language support """
|
||||
def __init__(self, key, **kwargs):
|
||||
super(qr.PriorityQueue, self).__init__(key, **kwargs)
|
||||
self.serializer = json
|
||||
|
||||
|
||||
class Manager(object):
|
||||
def __init__(self, redis_connection, statsd_client):
|
||||
self.statsd_client = statsd_client
|
||||
self.redis_connection = redis_connection
|
||||
self.workers = []
|
||||
self.queue = JSONPriorityQueue("jobs", **self.redis_connection.connection_pool.connection_kwargs)
|
||||
self.max_retries = 5
|
||||
self.status = {
|
||||
'last_refresh_at': 0,
|
||||
'started_at': time.time()
|
||||
}
|
||||
|
||||
self._save_status()
|
||||
|
||||
def add_job(self, query, priority, data_source):
|
||||
query_hash = gen_query_hash(query)
|
||||
logging.info("[Manager][%s] Inserting job with priority=%s", query_hash, priority)
|
||||
try_count = 0
|
||||
job = None
|
||||
|
||||
while try_count < self.max_retries:
|
||||
try_count += 1
|
||||
|
||||
pipe = self.redis_connection.pipeline()
|
||||
try:
|
||||
pipe.watch('query_hash_job:%s' % query_hash)
|
||||
job_id = pipe.get('query_hash_job:%s' % query_hash)
|
||||
if job_id:
|
||||
logging.info("[Manager][%s] Found existing job: %s", query_hash, job_id)
|
||||
job = worker.Job.load(self.redis_connection, job_id)
|
||||
else:
|
||||
job = worker.Job(self.redis_connection, query=query, priority=priority,
|
||||
data_source_id=data_source.id,
|
||||
data_source_name=data_source.name,
|
||||
data_source_type=data_source.type,
|
||||
data_source_options=data_source.options)
|
||||
pipe.multi()
|
||||
job.save(pipe)
|
||||
logging.info("[Manager][%s] Created new job: %s", query_hash, job.id)
|
||||
self.queue.push(job.id, job.priority)
|
||||
break
|
||||
|
||||
except redis.WatchError:
|
||||
continue
|
||||
|
||||
if not job:
|
||||
logging.error("[Manager][%s] Failed adding job for query.", query_hash)
|
||||
|
||||
return job
|
||||
|
||||
def report_status(self):
|
||||
workers = [self.redis_connection.hgetall(w)
|
||||
for w in self.redis_connection.smembers('workers')]
|
||||
|
||||
for w in workers:
|
||||
self.statsd_client.gauge('worker_{}.seconds_since_update'.format(w['id']),
|
||||
time.time() - float(w['updated_at']))
|
||||
self.statsd_client.gauge('worker_{}.jobs_received'.format(w['id']), int(w['jobs_count']))
|
||||
self.statsd_client.gauge('worker_{}.jobs_done'.format(w['id']), int(w['done_jobs_count']))
|
||||
|
||||
manager_status = self.redis_connection.hgetall('manager:status')
|
||||
self.statsd_client.gauge('manager.seconds_since_refresh',
|
||||
time.time() - float(manager_status['last_refresh_at']))
|
||||
|
||||
def refresh_queries(self):
|
||||
# TODO: this will only execute scheduled queries that were executed before. I think this is
|
||||
# a reasonable assumption, but worth revisiting.
|
||||
|
||||
# TODO: move this logic to the model.
|
||||
outdated_queries = models.Query.select(peewee.Func('first_value', models.Query.id)\
|
||||
.over(partition_by=[models.Query.query_hash, models.Query.data_source]))\
|
||||
.join(models.QueryResult)\
|
||||
.where(models.Query.ttl > 0,
|
||||
(models.QueryResult.retrieved_at +
|
||||
(models.Query.ttl * peewee.SQL("interval '1 second'"))) <
|
||||
peewee.SQL("(now() at time zone 'utc')"))
|
||||
|
||||
queries = models.Query.select(models.Query, models.DataSource).join(models.DataSource)\
|
||||
.where(models.Query.id << outdated_queries)
|
||||
|
||||
self.status['last_refresh_at'] = time.time()
|
||||
self._save_status()
|
||||
|
||||
logging.info("Refreshing queries...")
|
||||
|
||||
outdated_queries_count = 0
|
||||
for query in queries:
|
||||
self.add_job(query.query, worker.Job.LOW_PRIORITY, query.data_source)
|
||||
outdated_queries_count += 1
|
||||
|
||||
self.statsd_client.gauge('manager.outdated_queries', outdated_queries_count)
|
||||
self.statsd_client.gauge('manager.queue_size', self.redis_connection.zcard('jobs'))
|
||||
|
||||
logging.info("Done refreshing queries... %d" % outdated_queries_count)
|
||||
|
||||
def store_query_result(self, data_source_id, query, data, run_time, retrieved_at):
|
||||
query_hash = gen_query_hash(query)
|
||||
|
||||
query_result = models.QueryResult.create(query_hash=query_hash,
|
||||
query=query,
|
||||
runtime=run_time,
|
||||
data_source=data_source_id,
|
||||
retrieved_at=retrieved_at,
|
||||
data=data)
|
||||
|
||||
logging.info("[Manager][%s] Inserted query data; id=%s", query_hash, query_result.id)
|
||||
|
||||
# TODO: move this logic to the model?
|
||||
updated_count = models.Query.update(latest_query_data=query_result).\
|
||||
where(models.Query.query_hash==query_hash, models.Query.data_source==data_source_id).\
|
||||
execute()
|
||||
|
||||
logging.info("[Manager][%s] Updated %s queries.", query_hash, updated_count)
|
||||
|
||||
return query_result.id
|
||||
|
||||
def start_workers(self, workers_count):
|
||||
if self.workers:
|
||||
return self.workers
|
||||
|
||||
redis_connection_params = self.redis_connection.connection_pool.connection_kwargs
|
||||
self.workers = [worker.Worker(worker_id, self, redis_connection_params)
|
||||
for worker_id in xrange(workers_count)]
|
||||
|
||||
for w in self.workers:
|
||||
w.start()
|
||||
|
||||
return self.workers
|
||||
|
||||
def stop_workers(self):
|
||||
for w in self.workers:
|
||||
w.terminate()
|
||||
|
||||
for w in self.workers:
|
||||
w.join()
|
||||
|
||||
def _save_status(self):
|
||||
self.redis_connection.hmset('manager:status', self.status)
|
||||
@@ -23,8 +23,12 @@ def get_query_runner(connection_type, connection_string):
|
||||
elif connection_type == 'url':
|
||||
from redash.data import query_runner_url
|
||||
runner = query_runner_url.url(connection_string)
|
||||
elif connection_type == "mongo":
|
||||
from redash.data import query_runner_mongodb
|
||||
connection_params = json.loads(connection_string)
|
||||
runner = query_runner_mongodb.mongodb(connection_params)
|
||||
else:
|
||||
from redash.data import query_runner_pg
|
||||
runner = query_runner_pg.pg(connection_string)
|
||||
|
||||
return runner
|
||||
return runner
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
import datetime
|
||||
import httplib2
|
||||
import json
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
|
||||
try:
|
||||
import apiclient.errors
|
||||
@@ -14,6 +16,39 @@ except ImportError:
|
||||
|
||||
from redash.utils import JSONEncoder
|
||||
|
||||
types_map = {
|
||||
'INTEGER': 'integer',
|
||||
'FLOAT': 'float',
|
||||
'BOOLEAN': 'boolean',
|
||||
'STRING': 'string',
|
||||
'TIMESTAMP': 'datetime',
|
||||
}
|
||||
|
||||
def transform_row(row, fields):
|
||||
column_index = 0
|
||||
row_data = {}
|
||||
|
||||
for cell in row["f"]:
|
||||
field = fields[column_index]
|
||||
cell_value = cell['v']
|
||||
|
||||
if cell_value is None:
|
||||
pass
|
||||
# Otherwise just cast the value
|
||||
elif field['type'] == 'INTEGER':
|
||||
cell_value = int(cell_value)
|
||||
elif field['type'] == 'FLOAT':
|
||||
cell_value = float(cell_value)
|
||||
elif field['type'] == 'BOOLEAN':
|
||||
cell_value = cell_value.lower() == "true"
|
||||
elif field['type'] == 'TIMESTAMP':
|
||||
cell_value = datetime.datetime.fromtimestamp(float(cell_value))
|
||||
|
||||
row_data[field["name"]] = cell_value
|
||||
column_index += 1
|
||||
|
||||
return row_data
|
||||
|
||||
def bigquery(connection_string):
|
||||
def load_key(filename):
|
||||
f = file(filename, "rb")
|
||||
@@ -27,12 +62,22 @@ def bigquery(connection_string):
|
||||
"https://www.googleapis.com/auth/bigquery",
|
||||
]
|
||||
|
||||
credentials = SignedJwtAssertionCredentials(connection_string["serviceAccount"], load_key(connection_string["privateKey"]), scope=scope)
|
||||
credentials = SignedJwtAssertionCredentials(connection_string["serviceAccount"],
|
||||
load_key(connection_string["privateKey"]), scope=scope)
|
||||
http = httplib2.Http()
|
||||
http = credentials.authorize(http)
|
||||
|
||||
return build("bigquery", "v2", http=http)
|
||||
|
||||
def get_query_results(jobs, project_id, job_id, start_index):
|
||||
query_reply = jobs.getQueryResults(projectId=project_id, jobId=job_id, startIndex=start_index).execute()
|
||||
logging.debug('query_reply %s', query_reply)
|
||||
if not query_reply['jobComplete']:
|
||||
time.sleep(10)
|
||||
return get_query_results(jobs, project_id, job_id, start_index)
|
||||
|
||||
return query_reply
|
||||
|
||||
def query_runner(query):
|
||||
bigquery_service = get_bigquery_service()
|
||||
|
||||
@@ -52,44 +97,39 @@ def bigquery(connection_string):
|
||||
try:
|
||||
insert_response = jobs.insert(projectId=project_id, body=job_data).execute()
|
||||
current_row = 0
|
||||
query_reply = jobs.getQueryResults(projectId=project_id, jobId=insert_response['jobReference']['jobId'], startIndex=current_row).execute()
|
||||
query_reply = get_query_results(jobs, project_id=project_id,
|
||||
job_id=insert_response['jobReference']['jobId'], start_index=current_row)
|
||||
|
||||
logging.debug("bigquery replied: %s", query_reply)
|
||||
|
||||
rows = []
|
||||
field_names = []
|
||||
for f in query_reply["schema"]["fields"]:
|
||||
field_names.append(f["name"])
|
||||
|
||||
while(("rows" in query_reply) and current_row < query_reply['totalRows']):
|
||||
while ("rows" in query_reply) and current_row < query_reply['totalRows']:
|
||||
for row in query_reply["rows"]:
|
||||
row_data = {}
|
||||
column_index = 0
|
||||
for cell in row["f"]:
|
||||
row_data[field_names[column_index]] = cell["v"]
|
||||
column_index += 1
|
||||
|
||||
rows.append(row_data)
|
||||
rows.append(transform_row(row, query_reply["schema"]["fields"]))
|
||||
|
||||
current_row += len(query_reply['rows'])
|
||||
query_reply = jobs.getQueryResults(projectId=project_id, jobId=query_reply['jobReference']['jobId'], startIndex=current_row).execute()
|
||||
query_reply = jobs.getQueryResults(projectId=project_id, jobId=query_reply['jobReference']['jobId'],
|
||||
startIndex=current_row).execute()
|
||||
|
||||
columns = [{'name': name,
|
||||
'friendly_name': name,
|
||||
'type': None} for name in field_names]
|
||||
columns = [{'name': f["name"],
|
||||
'friendly_name': f["name"],
|
||||
'type': types_map.get(f['type'], "string")} for f in query_reply["schema"]["fields"]]
|
||||
|
||||
data = {
|
||||
"columns" : columns,
|
||||
"rows" : rows
|
||||
"columns": columns,
|
||||
"rows": rows
|
||||
}
|
||||
error = None
|
||||
|
||||
json_data = json.dumps(data, cls=JSONEncoder)
|
||||
except apiclient.errors.HttpError, e:
|
||||
json_data = None
|
||||
error = e.args[1]
|
||||
error = e.content
|
||||
except KeyboardInterrupt:
|
||||
error = "Query cancelled by user."
|
||||
json_data = None
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
raise sys.exc_info()[1], None, sys.exc_info()[2]
|
||||
|
||||
return json_data, error
|
||||
|
||||
242
redash/data/query_runner_mongodb.py
Normal file
242
redash/data/query_runner_mongodb.py
Normal file
@@ -0,0 +1,242 @@
|
||||
import datetime
|
||||
import logging
|
||||
import json
|
||||
import sys
|
||||
import re
|
||||
import time
|
||||
from redash.utils import JSONEncoder
|
||||
|
||||
try:
|
||||
import pymongo
|
||||
from bson.objectid import ObjectId
|
||||
from bson.son import SON
|
||||
except ImportError:
|
||||
print "Missing dependencies. Please install pymongo."
|
||||
print "You can use pip: pip install pymongo"
|
||||
raise
|
||||
|
||||
TYPES_MAP = {
|
||||
ObjectId : "string",
|
||||
str : "string",
|
||||
unicode : "string",
|
||||
int : "integer",
|
||||
long : "integer",
|
||||
float : "float",
|
||||
bool : "boolean",
|
||||
datetime.datetime: "datetime",
|
||||
}
|
||||
|
||||
date_regex = re.compile("ISODate\(\"(.*)\"\)", re.IGNORECASE)
|
||||
|
||||
# Simple query example:
|
||||
#
|
||||
# {
|
||||
# "collection" : "my_collection",
|
||||
# "query" : {
|
||||
# "date" : {
|
||||
# "$gt" : "ISODate(\"2015-01-15 11:41\")",
|
||||
# },
|
||||
# "type" : 1
|
||||
# },
|
||||
# "fields" : {
|
||||
# "_id" : 1,
|
||||
# "name" : 2
|
||||
# },
|
||||
# "sort" : [
|
||||
# {
|
||||
# "name" : "date",
|
||||
# "direction" : -1
|
||||
# }
|
||||
# ]
|
||||
#
|
||||
# }
|
||||
#
|
||||
#
|
||||
# Aggregation
|
||||
# ===========
|
||||
# Uses a syntax similar to the one used in PyMongo, however to support the
|
||||
# correct order of sorting, it uses a regular list for the "$sort" operation
|
||||
# that converts into a SON (sorted dictionary) object before execution.
|
||||
#
|
||||
# Aggregation query example:
|
||||
#
|
||||
# {
|
||||
# "collection" : "things",
|
||||
# "aggregate" : [
|
||||
# {
|
||||
# "$unwind" : "$tags"
|
||||
# },
|
||||
# {
|
||||
# "$group" : {
|
||||
# {
|
||||
# "_id" : "$tags",
|
||||
# "count" : { "$sum" : 1 }
|
||||
# }
|
||||
# }
|
||||
# },
|
||||
# {
|
||||
# "$sort" : [
|
||||
# {
|
||||
# "name" : "count",
|
||||
# "direction" : -1
|
||||
# },
|
||||
# {
|
||||
# "name" : "_id",
|
||||
# "direction" : -1
|
||||
# }
|
||||
# ]
|
||||
# }
|
||||
# ]
|
||||
# }
|
||||
#
|
||||
#
|
||||
def mongodb(connection_string):
|
||||
def _get_column_by_name(columns, column_name):
|
||||
for c in columns:
|
||||
if "name" in c and c["name"] == column_name:
|
||||
return c
|
||||
|
||||
return None
|
||||
|
||||
def _convert_date(q, field_name):
|
||||
m = date_regex.findall(q[field_name])
|
||||
if len(m) > 0:
|
||||
if q[field_name].find(":") == -1:
|
||||
q[field_name] = datetime.datetime.fromtimestamp(time.mktime(time.strptime(m[0], "%Y-%m-%d")))
|
||||
else:
|
||||
q[field_name] = datetime.datetime.fromtimestamp(time.mktime(time.strptime(m[0], "%Y-%m-%d %H:%M")))
|
||||
|
||||
def query_runner(query):
|
||||
if not "dbName" in connection_string or not connection_string["dbName"]:
|
||||
return None, "dbName is missing from connection string JSON or is empty"
|
||||
|
||||
db_name = connection_string["dbName"]
|
||||
|
||||
if not "connectionString" in connection_string or not connection_string["connectionString"]:
|
||||
return None, "connectionString is missing from connection string JSON or is empty"
|
||||
|
||||
is_replica_set = True if "replicaSetName" in connection_string and connection_string["replicaSetName"] else False
|
||||
|
||||
if is_replica_set:
|
||||
if not connection_string["replicaSetName"]:
|
||||
return None, "replicaSetName is set in the connection string JSON but is empty"
|
||||
|
||||
db_connection = pymongo.MongoReplicaSetClient(connection_string["connectionString"], replicaSet=connection_string["replicaSetName"])
|
||||
else:
|
||||
db_connection = pymongo.MongoClient(connection_string["connectionString"])
|
||||
|
||||
if db_name not in db_connection.database_names():
|
||||
return None, "Unknown database name '%s'" % db_name
|
||||
|
||||
db = db_connection[db_name]
|
||||
|
||||
logging.debug("mongodb connection string: %s", connection_string)
|
||||
logging.debug("mongodb got query: %s", query)
|
||||
|
||||
try:
|
||||
query_data = json.loads(query)
|
||||
except:
|
||||
return None, "Invalid query format. The query is not a valid JSON."
|
||||
|
||||
if "query" in query_data and "aggregate" in query_data:
|
||||
return None, "'query' and 'aggregate' sections cannot be used at the same time"
|
||||
|
||||
collection = None
|
||||
if not "collection" in query_data:
|
||||
return None, "'collection' must be set"
|
||||
else:
|
||||
collection = query_data["collection"]
|
||||
|
||||
q = None
|
||||
if "query" in query_data:
|
||||
q = query_data["query"]
|
||||
for k in q:
|
||||
if q[k] and type(q[k]) in [str, unicode]:
|
||||
logging.debug(q[k])
|
||||
_convert_date(q, k)
|
||||
elif q[k] and type(q[k]) is dict:
|
||||
for k2 in q[k]:
|
||||
if type(q[k][k2]) in [str, unicode]:
|
||||
_convert_date(q[k], k2)
|
||||
|
||||
f = None
|
||||
|
||||
aggregate = None
|
||||
if "aggregate" in query_data:
|
||||
aggregate = query_data["aggregate"]
|
||||
for step in aggregate:
|
||||
if "$sort" in step:
|
||||
sort_list = []
|
||||
for sort_item in step["$sort"]:
|
||||
sort_list.append((sort_item["name"], sort_item["direction"]))
|
||||
|
||||
step["$sort"] = SON(sort_list)
|
||||
|
||||
if aggregate:
|
||||
pass
|
||||
else:
|
||||
s = None
|
||||
if "sort" in query_data and query_data["sort"]:
|
||||
s = []
|
||||
for field in query_data["sort"]:
|
||||
s.append((field["name"], field["direction"]))
|
||||
|
||||
if "fields" in query_data:
|
||||
f = query_data["fields"]
|
||||
|
||||
columns = []
|
||||
rows = []
|
||||
|
||||
error = None
|
||||
json_data = None
|
||||
|
||||
cursor = None
|
||||
if q or (not q and not aggregate):
|
||||
if s:
|
||||
cursor = db[collection].find(q, f).sort(s)
|
||||
else:
|
||||
cursor = db[collection].find(q, f)
|
||||
|
||||
if "skip" in query_data:
|
||||
cursor = cursor.skip(query_data["skip"])
|
||||
|
||||
if "limit" in query_data:
|
||||
cursor = cursor.limit(query_data["limit"])
|
||||
|
||||
elif aggregate:
|
||||
r = db[collection].aggregate(aggregate)
|
||||
cursor = r["result"]
|
||||
|
||||
for r in cursor:
|
||||
for k in r:
|
||||
if _get_column_by_name(columns, k) is None:
|
||||
columns.append({
|
||||
"name": k,
|
||||
"friendly_name": k,
|
||||
"type": TYPES_MAP[type(r[k])] if type(r[k]) in TYPES_MAP else None
|
||||
})
|
||||
|
||||
# Convert ObjectId to string
|
||||
if type(r[k]) == ObjectId:
|
||||
r[k] = str(r[k])
|
||||
|
||||
rows.append(r)
|
||||
|
||||
if f:
|
||||
ordered_columns = []
|
||||
for k in sorted(f, key=f.get):
|
||||
ordered_columns.append(_get_column_by_name(columns, k))
|
||||
|
||||
columns = ordered_columns
|
||||
|
||||
data = {
|
||||
"columns": columns,
|
||||
"rows": rows
|
||||
}
|
||||
error = None
|
||||
json_data = json.dumps(data, cls=JSONEncoder)
|
||||
|
||||
return json_data, error
|
||||
|
||||
query_runner.annotate_query = False
|
||||
return query_runner
|
||||
@@ -18,7 +18,7 @@ def mysql(connection_string):
|
||||
|
||||
def query_runner(query):
|
||||
connections_params = [entry.split('=')[1] for entry in connection_string.split(';')]
|
||||
connection = MySQLdb.connect(*connections_params)
|
||||
connection = MySQLdb.connect(*connections_params, charset="utf8", use_unicode=True)
|
||||
cursor = connection.cursor()
|
||||
|
||||
logging.debug("mysql got query: %s", query)
|
||||
@@ -61,4 +61,4 @@ def mysql(connection_string):
|
||||
return json_data, error
|
||||
|
||||
|
||||
return query_runner
|
||||
return query_runner
|
||||
|
||||
@@ -88,11 +88,12 @@ def pg(connection_string):
|
||||
json_data = json.dumps(data, cls=JSONEncoder)
|
||||
error = None
|
||||
cursor.close()
|
||||
except (select.error, OSError, psycopg2.OperationalError) as e:
|
||||
except (select.error, OSError) as e:
|
||||
logging.exception(e)
|
||||
error = "Query interrupted. Please retry."
|
||||
json_data = None
|
||||
except psycopg2.DatabaseError as e:
|
||||
logging.exception(e)
|
||||
json_data = None
|
||||
error = e.message
|
||||
except KeyboardInterrupt:
|
||||
|
||||
@@ -17,6 +17,9 @@ def script(connection_string):
|
||||
json_data = None
|
||||
error = None
|
||||
|
||||
if connection_string is None:
|
||||
return None, "script execution path is not set. Please reconfigure the data source"
|
||||
|
||||
# Poor man's protection against running scripts from output the scripts directory
|
||||
if connection_string.find("../") > -1:
|
||||
return None, "Scripts can only be run from the configured scripts directory"
|
||||
|
||||
@@ -1,370 +0,0 @@
|
||||
"""
|
||||
Worker implementation to execute incoming queries.
|
||||
"""
|
||||
import json
|
||||
import logging
|
||||
import multiprocessing
|
||||
import os
|
||||
import uuid
|
||||
import datetime
|
||||
import time
|
||||
import signal
|
||||
import setproctitle
|
||||
import redis
|
||||
from statsd import StatsClient
|
||||
from redash.utils import gen_query_hash
|
||||
from redash.data.query_runner import get_query_runner
|
||||
from redash import settings
|
||||
|
||||
|
||||
class RedisObject(object):
|
||||
# The following should be overriden in the inheriting class:
|
||||
fields = {}
|
||||
conversions = {}
|
||||
id_field = ''
|
||||
name = ''
|
||||
|
||||
def __init__(self, redis_connection, **kwargs):
|
||||
self.redis_connection = redis_connection
|
||||
self.values = {}
|
||||
|
||||
if not self.fields:
|
||||
raise ValueError("You must set the fields dictionary, before using RedisObject.")
|
||||
|
||||
if not self.name:
|
||||
raise ValueError("You must set the name, before using RedisObject")
|
||||
|
||||
self.update(**kwargs)
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name in self.values:
|
||||
return self.values[name]
|
||||
else:
|
||||
raise AttributeError
|
||||
|
||||
def update(self, **kwargs):
|
||||
for field, default_value in self.fields.iteritems():
|
||||
value = kwargs.get(field, self.values.get(field, default_value))
|
||||
if callable(value):
|
||||
value = value()
|
||||
|
||||
if value == 'None':
|
||||
value = None
|
||||
|
||||
if field in self.conversions and value:
|
||||
value = self.conversions[field](value)
|
||||
|
||||
self.values[field] = value
|
||||
|
||||
@classmethod
|
||||
def _redis_key(cls, object_id):
|
||||
return '{}:{}'.format(cls.name, object_id)
|
||||
|
||||
def save(self, pipe):
|
||||
if not pipe:
|
||||
pipe = self.redis_connection.pipeline()
|
||||
|
||||
pipe.sadd('{}_set'.format(self.name), self.id)
|
||||
pipe.hmset(self._redis_key(self.id), self.values)
|
||||
pipe.publish(self._redis_key(self.id), json.dumps(self.to_dict()))
|
||||
|
||||
pipe.execute()
|
||||
|
||||
@classmethod
|
||||
def load(cls, redis_connection, object_id):
|
||||
object_dict = redis_connection.hgetall(cls._redis_key(object_id))
|
||||
obj = None
|
||||
if object_dict:
|
||||
obj = cls(redis_connection, **object_dict)
|
||||
|
||||
return obj
|
||||
|
||||
|
||||
def fix_unicode(string):
|
||||
if isinstance(string, unicode):
|
||||
return string
|
||||
|
||||
return string.decode('utf-8')
|
||||
|
||||
|
||||
class Job(RedisObject):
|
||||
HIGH_PRIORITY = 1
|
||||
LOW_PRIORITY = 2
|
||||
|
||||
WAITING = 1
|
||||
PROCESSING = 2
|
||||
DONE = 3
|
||||
FAILED = 4
|
||||
|
||||
fields = {
|
||||
'id': lambda: str(uuid.uuid1()),
|
||||
'query': None,
|
||||
'priority': None,
|
||||
'query_hash': None,
|
||||
'wait_time': 0,
|
||||
'query_time': 0,
|
||||
'error': None,
|
||||
'updated_at': time.time,
|
||||
'status': WAITING,
|
||||
'process_id': None,
|
||||
'query_result_id': None,
|
||||
'data_source_id': None,
|
||||
'data_source_name': None,
|
||||
'data_source_type': None,
|
||||
'data_source_options': None
|
||||
}
|
||||
|
||||
conversions = {
|
||||
'query': fix_unicode,
|
||||
'priority': int,
|
||||
'updated_at': float,
|
||||
'status': int,
|
||||
'wait_time': float,
|
||||
'query_time': float,
|
||||
'process_id': int,
|
||||
'query_result_id': int
|
||||
}
|
||||
|
||||
name = 'job'
|
||||
|
||||
def __init__(self, redis_connection, query, priority, **kwargs):
|
||||
kwargs['query'] = fix_unicode(query)
|
||||
kwargs['priority'] = priority
|
||||
kwargs['query_hash'] = gen_query_hash(kwargs['query'])
|
||||
self.new_job = 'id' not in kwargs
|
||||
super(Job, self).__init__(redis_connection, **kwargs)
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
'query': self.query,
|
||||
'priority': self.priority,
|
||||
'id': self.id,
|
||||
'wait_time': self.wait_time,
|
||||
'query_time': self.query_time,
|
||||
'updated_at': self.updated_at,
|
||||
'status': self.status,
|
||||
'error': self.error,
|
||||
'query_result_id': self.query_result_id,
|
||||
'process_id': self.process_id,
|
||||
'data_source_name': self.data_source_name,
|
||||
'data_source_type': self.data_source_type
|
||||
}
|
||||
|
||||
def cancel(self):
|
||||
# TODO: Race condition:
|
||||
# it's possible that it will be picked up by worker while processing the cancel order
|
||||
if self.is_finished():
|
||||
return
|
||||
|
||||
if self.status == self.PROCESSING:
|
||||
try:
|
||||
os.kill(self.process_id, signal.SIGINT)
|
||||
except OSError as e:
|
||||
logging.warning("[%s] Tried to cancel job but os.kill failed (pid=%d, error=%s)",
|
||||
self.id, self.process_id, e)
|
||||
|
||||
self.done(None, "Interrupted/Cancelled while running.")
|
||||
|
||||
def save(self, pipe=None):
|
||||
if not pipe:
|
||||
pipe = self.redis_connection.pipeline()
|
||||
|
||||
if self.new_job:
|
||||
pipe.set('query_hash_job:%s' % self.query_hash, self.id)
|
||||
|
||||
if self.is_finished():
|
||||
pipe.delete('query_hash_job:%s' % self.query_hash)
|
||||
|
||||
super(Job, self).save(pipe)
|
||||
|
||||
def expire(self, expire_time):
|
||||
self.redis_connection.expire(self._redis_key(self.id), expire_time)
|
||||
|
||||
def processing(self, process_id):
|
||||
self.update(status=self.PROCESSING,
|
||||
process_id=process_id,
|
||||
wait_time=time.time() - self.updated_at,
|
||||
updated_at=time.time())
|
||||
|
||||
self.save()
|
||||
|
||||
def is_finished(self):
|
||||
return self.status in (self.FAILED, self.DONE)
|
||||
|
||||
def done(self, query_result_id, error):
|
||||
if error:
|
||||
new_status = self.FAILED
|
||||
else:
|
||||
new_status = self.DONE
|
||||
|
||||
self.update(status=new_status,
|
||||
query_result_id=query_result_id,
|
||||
error=error,
|
||||
query_time=time.time() - self.updated_at,
|
||||
updated_at=time.time())
|
||||
|
||||
self.save()
|
||||
|
||||
def __str__(self):
|
||||
return "<Job:%s,priority:%d,status:%d>" % (self.id, self.priority, self.status)
|
||||
|
||||
|
||||
class Worker(multiprocessing.Process):
|
||||
def __init__(self, worker_id, manager, redis_connection_params, sleep_time=0.1):
|
||||
self.manager = manager
|
||||
|
||||
self.statsd_client = StatsClient(host=settings.STATSD_HOST, port=settings.STATSD_PORT,
|
||||
prefix=settings.STATSD_PREFIX)
|
||||
|
||||
self.redis_connection_params = {k: v for k, v in redis_connection_params.iteritems()
|
||||
if k in ('host', 'db', 'password', 'port')}
|
||||
|
||||
self.worker_id = None
|
||||
self.continue_working = True
|
||||
self.sleep_time = sleep_time
|
||||
self.child_pid = None
|
||||
self.current_job_id = None
|
||||
self.status = {
|
||||
'jobs_count': 0,
|
||||
'cancelled_jobs_count': 0,
|
||||
'done_jobs_count': 0,
|
||||
'updated_at': time.time(),
|
||||
'started_at': time.time()
|
||||
}
|
||||
|
||||
super(Worker, self).__init__(name="Worker")
|
||||
|
||||
def set_title(self, title=None):
|
||||
base_title = "redash worker:%s" % self.worker_id
|
||||
if title:
|
||||
full_title = "%s - %s" % (base_title, title)
|
||||
else:
|
||||
full_title = base_title
|
||||
|
||||
setproctitle.setproctitle(full_title)
|
||||
|
||||
def run(self):
|
||||
self.worker_id = os.getpid()
|
||||
self.status['id'] = self.worker_id
|
||||
self.name = "Worker:%d" % self.worker_id
|
||||
self.manager.redis_connection.sadd('workers', self._key)
|
||||
self._save_status()
|
||||
self.set_title()
|
||||
|
||||
logging.info("[%s] started.", self.name)
|
||||
|
||||
signal.signal(signal.SIGINT, self._stop)
|
||||
signal.signal(signal.SIGTERM, self._stop)
|
||||
|
||||
self._wait_for_jobs()
|
||||
|
||||
def _stop(self, signum, frame):
|
||||
self.continue_working = False
|
||||
if self.current_job_id:
|
||||
job = Job.load(self.manager.redis_connection, self.current_job_id)
|
||||
if job:
|
||||
job.cancel()
|
||||
|
||||
def _wait_for_jobs(self):
|
||||
while self.continue_working:
|
||||
job_id = self.manager.queue.pop()
|
||||
if job_id:
|
||||
self._update_status('jobs_count')
|
||||
logging.info("[%s] Processing %s", self.name, job_id)
|
||||
self._fork_and_process(job_id)
|
||||
if self.child_pid == 0:
|
||||
return
|
||||
else:
|
||||
time.sleep(self.sleep_time)
|
||||
|
||||
def _update_status(self, counter):
|
||||
self.status['updated_at'] = time.time()
|
||||
self.status[counter] += 1
|
||||
self._save_status()
|
||||
|
||||
@property
|
||||
def _key(self):
|
||||
return 'worker:%s' % self.worker_id
|
||||
|
||||
def _save_status(self):
|
||||
self.manager.redis_connection.hmset(self._key, self.status)
|
||||
|
||||
def _fork_and_process(self, job_id):
|
||||
self.current_job_id = job_id
|
||||
self.child_pid = os.fork()
|
||||
if self.child_pid == 0:
|
||||
self.set_title("processing %s" % job_id)
|
||||
self._process(job_id)
|
||||
else:
|
||||
logging.info("[%s] Waiting for pid: %d", self.name, self.child_pid)
|
||||
|
||||
try:
|
||||
_, status = os.waitpid(self.child_pid, 0)
|
||||
except OSError:
|
||||
logging.info("[%s] OSError while waiting for child to finish", self.name)
|
||||
# setting status to >0, so the job cleanup is triggered
|
||||
status = 1
|
||||
|
||||
self._update_status('done_jobs_count')
|
||||
|
||||
job = Job.load(self.manager.redis_connection, job_id)
|
||||
if status > 0 and not job.is_finished():
|
||||
self._update_status('cancelled_jobs_count')
|
||||
logging.info("[%s] process interrupted and job %s hasn't finished; registering interruption in job",
|
||||
self.name, job_id)
|
||||
job.done(None, "Interrupted/Cancelled while running.")
|
||||
|
||||
job.expire(settings.JOB_EXPIRY_TIME)
|
||||
|
||||
logging.info("[%s] Finished Processing %s (pid: %d status: %d)",
|
||||
self.name, job_id, self.child_pid, status)
|
||||
|
||||
self.child_pid = None
|
||||
self.current_job_id = None
|
||||
|
||||
def _process(self, job_id):
|
||||
redis_connection = redis.StrictRedis(**self.redis_connection_params)
|
||||
job = Job.load(redis_connection, job_id)
|
||||
if job.is_finished():
|
||||
logging.warning("[%s][%s] tried to process finished job.", self.name, job)
|
||||
return
|
||||
|
||||
pid = os.getpid()
|
||||
job.processing(pid)
|
||||
|
||||
logging.info("[%s][%s] running query...", self.name, job.id)
|
||||
start_time = time.time()
|
||||
self.set_title("running query %s" % job_id)
|
||||
|
||||
logging.info("[%s][%s] Loading query runner (%s, %s)...", self.name, job.id,
|
||||
job.data_source_name, job.data_source_type)
|
||||
|
||||
query_runner = get_query_runner(job.data_source_type, job.data_source_options)
|
||||
|
||||
if getattr(query_runner, 'annotate_query', True):
|
||||
annotated_query = "/* Pid: %s, Job Id: %s, Query hash: %s, Priority: %s */ %s" % \
|
||||
(pid, job.id, job.query_hash, job.priority, job.query)
|
||||
else:
|
||||
annotated_query = job.query
|
||||
|
||||
# TODO: here's the part that needs to be forked, not all of the worker process...
|
||||
with self.statsd_client.timer('worker_{}.query_runner.{}.{}.run_time'.format(self.worker_id,
|
||||
job.data_source_type,
|
||||
job.data_source_name)):
|
||||
data, error = query_runner(annotated_query)
|
||||
|
||||
run_time = time.time() - start_time
|
||||
logging.info("[%s][%s] query finished... data length=%s, error=%s",
|
||||
self.name, job.id, data and len(data), error)
|
||||
|
||||
# TODO: it is possible that storing the data will fail, and we will need to retry
|
||||
# while we already marked the job as done
|
||||
query_result_id = None
|
||||
if not error:
|
||||
self.set_title("storing results %s" % job_id)
|
||||
query_result_id = self.manager.store_query_result(job.data_source_id,
|
||||
job.query, data, run_time,
|
||||
datetime.datetime.utcnow())
|
||||
|
||||
self.set_title("marking job as done %s" % job_id)
|
||||
job.done(query_result_id, error)
|
||||
@@ -1,23 +0,0 @@
|
||||
import logging
|
||||
import json
|
||||
|
||||
logger = logging.getLogger("redash.events")
|
||||
logger.propagate = False
|
||||
|
||||
|
||||
def setup_logging(log_path, console_output=False):
|
||||
if log_path:
|
||||
fh = logging.FileHandler(log_path)
|
||||
formatter = logging.Formatter('%(message)s')
|
||||
fh.setFormatter(formatter)
|
||||
logger.addHandler(fh)
|
||||
|
||||
if console_output:
|
||||
handler = logging.StreamHandler()
|
||||
formatter = logging.Formatter('[%(name)s] %(message)s')
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
|
||||
|
||||
def record_event(event):
|
||||
logger.info(json.dumps(event))
|
||||
81
redash/google_oauth.py
Normal file
81
redash/google_oauth.py
Normal file
@@ -0,0 +1,81 @@
|
||||
import logging
|
||||
from flask.ext.login import login_user
|
||||
import requests
|
||||
from flask import redirect, url_for, Blueprint
|
||||
from flask_oauth import OAuth
|
||||
from redash import models, settings
|
||||
|
||||
logger = logging.getLogger('google_oauth')
|
||||
oauth = OAuth()
|
||||
|
||||
request_token_params = {'scope': 'https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/userinfo.profile', 'response_type': 'code'}
|
||||
|
||||
if settings.GOOGLE_APPS_DOMAIN:
|
||||
request_token_params['hd'] = settings.GOOGLE_APPS_DOMAIN
|
||||
else:
|
||||
logger.warning("No Google Apps domain defined, all Google accounts allowed.")
|
||||
|
||||
google = oauth.remote_app('google',
|
||||
base_url='https://www.google.com/accounts/',
|
||||
authorize_url='https://accounts.google.com/o/oauth2/auth',
|
||||
request_token_url=None,
|
||||
request_token_params=request_token_params,
|
||||
access_token_url='https://accounts.google.com/o/oauth2/token',
|
||||
access_token_method='POST',
|
||||
access_token_params={'grant_type': 'authorization_code'},
|
||||
consumer_key=settings.GOOGLE_CLIENT_ID,
|
||||
consumer_secret=settings.GOOGLE_CLIENT_SECRET)
|
||||
|
||||
|
||||
blueprint = Blueprint('google_oauth', __name__)
|
||||
|
||||
|
||||
def get_user_profile(access_token):
|
||||
headers = {'Authorization': 'OAuth '+access_token}
|
||||
response = requests.get('https://www.googleapis.com/oauth2/v1/userinfo', headers=headers)
|
||||
|
||||
if response.status_code == 401:
|
||||
logger.warning("Failed getting user profile (response code 401).")
|
||||
return None
|
||||
|
||||
return response.json()
|
||||
|
||||
|
||||
def create_and_login_user(name, email):
|
||||
try:
|
||||
user_object = models.User.get(models.User.email == email)
|
||||
if user_object.name != name:
|
||||
logger.debug("Updating user name (%r -> %r)", user_object.name, name)
|
||||
user_object.name = name
|
||||
user_object.save()
|
||||
except models.User.DoesNotExist:
|
||||
logger.debug("Creating user object (%r)", name)
|
||||
user_object = models.User.create(name=name, email=email, groups=models.User.DEFAULT_GROUPS)
|
||||
|
||||
login_user(user_object, remember=True)
|
||||
|
||||
|
||||
@blueprint.route('/oauth/google', endpoint="authorize")
|
||||
def login():
|
||||
# TODO, suport next
|
||||
callback=url_for('.callback', _external=True)
|
||||
logger.debug("Callback url: %s", callback)
|
||||
return google.authorize(callback=callback)
|
||||
|
||||
|
||||
@blueprint.route('/oauth/google_callback', endpoint="callback")
|
||||
@google.authorized_handler
|
||||
def authorized(resp):
|
||||
access_token = resp['access_token']
|
||||
|
||||
if access_token is None:
|
||||
logger.warning("Access token missing in call back request.")
|
||||
return redirect(url_for('login'))
|
||||
|
||||
profile = get_user_profile(access_token)
|
||||
if profile is None:
|
||||
return redirect(url_for('login'))
|
||||
|
||||
create_and_login_user(profile['name'], profile['email'])
|
||||
|
||||
return redirect(url_for('index'))
|
||||
@@ -1,8 +1,11 @@
|
||||
import contextlib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from redash import models
|
||||
from flask.ext.script import Manager
|
||||
|
||||
logger = logging.getLogger()
|
||||
|
||||
class Importer(object):
|
||||
def __init__(self, object_mapping=None, data_source=None):
|
||||
@@ -22,22 +25,17 @@ class Importer(object):
|
||||
|
||||
return query_result
|
||||
|
||||
|
||||
def import_query(self, user, query):
|
||||
query_result = self.import_query_result(query['latest_query_data'])
|
||||
|
||||
new_query = self._get_or_create(models.Query, query['id'], name=query['name'],
|
||||
user=user,
|
||||
ttl=-1,
|
||||
query=query['query'],
|
||||
query_hash=query['query_hash'],
|
||||
description=query['description'],
|
||||
latest_query_data=query_result,
|
||||
data_source=self.data_source)
|
||||
|
||||
return new_query
|
||||
|
||||
|
||||
def import_visualization(self, user, visualization):
|
||||
query = self.import_query(user, visualization['query'])
|
||||
|
||||
@@ -50,9 +48,13 @@ class Importer(object):
|
||||
return new_visualization
|
||||
|
||||
def import_widget(self, dashboard, widget):
|
||||
visualization = self.import_visualization(dashboard.user, widget['visualization'])
|
||||
if 'visualization' in widget:
|
||||
visualization = self.import_visualization(dashboard.user, widget['visualization'])
|
||||
else:
|
||||
visualization = None
|
||||
|
||||
new_widget = self._get_or_create(models.Widget, widget['id'],
|
||||
text=widget.get('text', None),
|
||||
dashboard=dashboard,
|
||||
width=widget['width'],
|
||||
options=json.dumps(widget['options']),
|
||||
@@ -91,6 +93,7 @@ class Importer(object):
|
||||
|
||||
def _get_or_create(self, object_type, external_id, **properties):
|
||||
internal_id = self._get_mapping(object_type, external_id)
|
||||
logger.info("Creating %s with external id: %s and internal id: %s", object_type, external_id, internal_id)
|
||||
if internal_id:
|
||||
update = object_type.update(**properties).where(object_type.id == internal_id)
|
||||
update.execute()
|
||||
@@ -114,11 +117,21 @@ export_manager = Manager(help="export utilities")
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def importer_with_mapping_file(mapping_filename):
|
||||
def importer_with_mapping_file(mapping_filename, data_source_id=None):
|
||||
# Touch file in case it doesn't exists
|
||||
if not os.path.isfile(mapping_filename):
|
||||
with open(mapping_filename, 'w') as f:
|
||||
f.write("{}")
|
||||
|
||||
with open(mapping_filename) as f:
|
||||
mapping = json.loads(f.read())
|
||||
|
||||
importer = Importer(object_mapping=mapping, data_source=get_data_source())
|
||||
if data_source_id is not None:
|
||||
data_source = models.DataSource.get_by_id(data_source_id)
|
||||
else:
|
||||
data_source = get_data_source()
|
||||
|
||||
importer = Importer(object_mapping=mapping, data_source=data_source)
|
||||
yield importer
|
||||
|
||||
with open(mapping_filename, 'w') as f:
|
||||
@@ -146,12 +159,13 @@ def query(mapping_filename, query_filename, user_id):
|
||||
|
||||
|
||||
@import_manager.command
|
||||
def dashboard(mapping_filename, dashboard_filename, user_id):
|
||||
def dashboard(mapping_filename, dashboard_filename, user_id, data_source_id=None):
|
||||
user = models.User.get_by_id(user_id)
|
||||
|
||||
with open(dashboard_filename) as f:
|
||||
dashboard = json.loads(f.read())
|
||||
|
||||
with importer_with_mapping_file(mapping_filename) as importer:
|
||||
with importer_with_mapping_file(mapping_filename, data_source_id) as importer:
|
||||
importer.import_dashboard(user, dashboard)
|
||||
|
||||
|
||||
|
||||
321
redash/models.py
321
redash/models.py
@@ -1,28 +1,85 @@
|
||||
import json
|
||||
import hashlib
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
import time
|
||||
import datetime
|
||||
from flask.ext.peewee.utils import slugify
|
||||
from flask.ext.login import UserMixin, AnonymousUserMixin
|
||||
from passlib.apps import custom_app_context as pwd_context
|
||||
import itertools
|
||||
|
||||
import peewee
|
||||
from passlib.apps import custom_app_context as pwd_context
|
||||
from playhouse.postgres_ext import ArrayField
|
||||
from redash import db, utils
|
||||
from flask.ext.login import UserMixin, AnonymousUserMixin
|
||||
|
||||
from redash import utils, settings
|
||||
|
||||
|
||||
class BaseModel(db.Model):
|
||||
class Database(object):
|
||||
def __init__(self):
|
||||
self.database_config = dict(settings.DATABASE_CONFIG)
|
||||
self.database_name = self.database_config.pop('name')
|
||||
self.database = peewee.PostgresqlDatabase(self.database_name, **self.database_config)
|
||||
self.app = None
|
||||
self.pid = os.getpid()
|
||||
|
||||
def init_app(self, app):
|
||||
self.app = app
|
||||
self.register_handlers()
|
||||
|
||||
def connect_db(self):
|
||||
self._check_pid()
|
||||
self.database.connect()
|
||||
|
||||
def close_db(self, exc):
|
||||
self._check_pid()
|
||||
if not self.database.is_closed():
|
||||
self.database.close()
|
||||
|
||||
def _check_pid(self):
|
||||
current_pid = os.getpid()
|
||||
if self.pid != current_pid:
|
||||
logging.info("New pid detected (%d!=%d); resetting database lock.", self.pid, current_pid)
|
||||
self.pid = os.getpid()
|
||||
self.database._conn_lock = threading.Lock()
|
||||
|
||||
def register_handlers(self):
|
||||
self.app.before_request(self.connect_db)
|
||||
self.app.teardown_request(self.close_db)
|
||||
|
||||
|
||||
db = Database()
|
||||
|
||||
|
||||
class BaseModel(peewee.Model):
|
||||
class Meta:
|
||||
database = db.database
|
||||
|
||||
@classmethod
|
||||
def get_by_id(cls, model_id):
|
||||
return cls.get(cls.id == model_id)
|
||||
|
||||
|
||||
class AnonymousUser(AnonymousUserMixin):
|
||||
class PermissionsCheckMixin(object):
|
||||
def has_permission(self, permission):
|
||||
return self.has_permissions((permission,))
|
||||
|
||||
def has_permissions(self, permissions):
|
||||
has_permissions = reduce(lambda a, b: a and b,
|
||||
map(lambda permission: permission in self.permissions,
|
||||
permissions),
|
||||
True)
|
||||
|
||||
return has_permissions
|
||||
|
||||
|
||||
class AnonymousUser(AnonymousUserMixin, PermissionsCheckMixin):
|
||||
@property
|
||||
def permissions(self):
|
||||
return []
|
||||
|
||||
|
||||
class ApiUser(UserMixin):
|
||||
class ApiUser(UserMixin, PermissionsCheckMixin):
|
||||
def __init__(self, api_key):
|
||||
self.id = api_key
|
||||
|
||||
@@ -31,16 +88,40 @@ class ApiUser(UserMixin):
|
||||
return ['view_query']
|
||||
|
||||
|
||||
class User(BaseModel, UserMixin):
|
||||
class Group(BaseModel):
|
||||
DEFAULT_PERMISSIONS = ['create_dashboard', 'create_query', 'edit_dashboard', 'edit_query',
|
||||
'view_query', 'view_source', 'execute_query']
|
||||
|
||||
id = peewee.PrimaryKeyField()
|
||||
name = peewee.CharField(max_length=100)
|
||||
permissions = ArrayField(peewee.CharField, default=DEFAULT_PERMISSIONS)
|
||||
tables = ArrayField(peewee.CharField)
|
||||
created_at = peewee.DateTimeField(default=datetime.datetime.now)
|
||||
|
||||
class Meta:
|
||||
db_table = 'groups'
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
'id': self.id,
|
||||
'name': self.name,
|
||||
'permissions': self.permissions,
|
||||
'tables': self.tables,
|
||||
'created_at': self.created_at
|
||||
}
|
||||
|
||||
def __unicode__(self):
|
||||
return unicode(self.id)
|
||||
|
||||
|
||||
class User(BaseModel, UserMixin, PermissionsCheckMixin):
|
||||
DEFAULT_GROUPS = ['default']
|
||||
|
||||
id = peewee.PrimaryKeyField()
|
||||
name = peewee.CharField(max_length=320)
|
||||
email = peewee.CharField(max_length=320, index=True, unique=True)
|
||||
password_hash = peewee.CharField(max_length=128, null=True)
|
||||
is_admin = peewee.BooleanField(default=False)
|
||||
permissions = ArrayField(peewee.CharField, default=DEFAULT_PERMISSIONS)
|
||||
groups = ArrayField(peewee.CharField, default=DEFAULT_GROUPS)
|
||||
|
||||
class Meta:
|
||||
db_table = 'users'
|
||||
@@ -49,10 +130,32 @@ class User(BaseModel, UserMixin):
|
||||
return {
|
||||
'id': self.id,
|
||||
'name': self.name,
|
||||
'email': self.email,
|
||||
'is_admin': self.is_admin
|
||||
'email': self.email
|
||||
}
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(User, self).__init__(*args, **kwargs)
|
||||
self._allowed_tables = None
|
||||
|
||||
@property
|
||||
def permissions(self):
|
||||
# TODO: this should be cached.
|
||||
return list(itertools.chain(*[g.permissions for g in
|
||||
Group.select().where(Group.name << self.groups)]))
|
||||
|
||||
@property
|
||||
def allowed_tables(self):
|
||||
# TODO: cache this as weel
|
||||
if self._allowed_tables is None:
|
||||
self._allowed_tables = set([t.lower() for t in itertools.chain(*[g.tables for g in
|
||||
Group.select().where(Group.name << self.groups)])])
|
||||
|
||||
return self._allowed_tables
|
||||
|
||||
@classmethod
|
||||
def get_by_email(cls, email):
|
||||
return cls.get(cls.email == email)
|
||||
|
||||
def __unicode__(self):
|
||||
return '%r, %r' % (self.name, self.email)
|
||||
|
||||
@@ -65,7 +168,7 @@ class User(BaseModel, UserMixin):
|
||||
|
||||
class ActivityLog(BaseModel):
|
||||
QUERY_EXECUTION = 1
|
||||
|
||||
|
||||
id = peewee.PrimaryKeyField()
|
||||
user = peewee.ForeignKeyField(User)
|
||||
type = peewee.IntegerField()
|
||||
@@ -93,6 +196,8 @@ class DataSource(BaseModel):
|
||||
name = peewee.CharField()
|
||||
type = peewee.CharField()
|
||||
options = peewee.TextField()
|
||||
queue_name = peewee.CharField(default="queries")
|
||||
scheduled_queue_name = peewee.CharField(default="queries")
|
||||
created_at = peewee.DateTimeField(default=datetime.datetime.now)
|
||||
|
||||
class Meta:
|
||||
@@ -105,6 +210,10 @@ class DataSource(BaseModel):
|
||||
'type': self.type
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def all(cls):
|
||||
return cls.select().order_by(cls.id.asc())
|
||||
|
||||
|
||||
class QueryResult(BaseModel):
|
||||
id = peewee.PrimaryKeyField()
|
||||
@@ -129,6 +238,15 @@ class QueryResult(BaseModel):
|
||||
'retrieved_at': self.retrieved_at
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def unused(cls):
|
||||
week_ago = datetime.datetime.now() - datetime.timedelta(days=7)
|
||||
|
||||
unused_results = cls.select().where(Query.id == None, cls.retrieved_at < week_ago)\
|
||||
.join(Query, join_type=peewee.JOIN_LEFT_OUTER)
|
||||
|
||||
return unused_results
|
||||
|
||||
@classmethod
|
||||
def get_latest(cls, data_source, query, ttl=0):
|
||||
query_hash = utils.gen_query_hash(query)
|
||||
@@ -143,6 +261,25 @@ class QueryResult(BaseModel):
|
||||
|
||||
return query.first()
|
||||
|
||||
@classmethod
|
||||
def store_result(cls, data_source_id, query_hash, query, data, run_time, retrieved_at):
|
||||
query_result = cls.create(query_hash=query_hash,
|
||||
query=query,
|
||||
runtime=run_time,
|
||||
data_source=data_source_id,
|
||||
retrieved_at=retrieved_at,
|
||||
data=data)
|
||||
|
||||
logging.info("Inserted query (%s) data; id=%s", query_hash, query_result.id)
|
||||
|
||||
updated_count = Query.update(latest_query_data=query_result).\
|
||||
where(Query.query_hash==query_hash, Query.data_source==data_source_id).\
|
||||
execute()
|
||||
|
||||
logging.info("Updated %s queries with result (%s).", updated_count, query_hash)
|
||||
|
||||
return query_result
|
||||
|
||||
def __unicode__(self):
|
||||
return u"%d | %s | %s" % (self.id, self.query_hash, self.retrieved_at)
|
||||
|
||||
@@ -159,6 +296,7 @@ class Query(BaseModel):
|
||||
ttl = peewee.IntegerField()
|
||||
user_email = peewee.CharField(max_length=360, null=True)
|
||||
user = peewee.ForeignKeyField(User)
|
||||
is_archived = peewee.BooleanField(default=False, index=True)
|
||||
created_at = peewee.DateTimeField(default=datetime.datetime.now)
|
||||
|
||||
class Meta:
|
||||
@@ -170,7 +308,7 @@ class Query(BaseModel):
|
||||
type="TABLE", options="{}")
|
||||
table_visualization.save()
|
||||
|
||||
def to_dict(self, with_result=True, with_stats=False, with_visualizations=False, with_user=True):
|
||||
def to_dict(self, with_stats=False, with_visualizations=False, with_user=True):
|
||||
d = {
|
||||
'id': self.id,
|
||||
'latest_query_data_id': self._data.get('latest_query_data', None),
|
||||
@@ -180,6 +318,7 @@ class Query(BaseModel):
|
||||
'query_hash': self.query_hash,
|
||||
'ttl': self.ttl,
|
||||
'api_key': self.api_key,
|
||||
'is_archived': self.is_archived,
|
||||
'created_at': self.created_at,
|
||||
'data_source_id': self._data.get('data_source', None)
|
||||
}
|
||||
@@ -190,35 +329,79 @@ class Query(BaseModel):
|
||||
d['user_id'] = self._data['user']
|
||||
|
||||
if with_stats:
|
||||
d['avg_runtime'] = self.avg_runtime
|
||||
d['min_runtime'] = self.min_runtime
|
||||
d['max_runtime'] = self.max_runtime
|
||||
d['last_retrieved_at'] = self.last_retrieved_at
|
||||
d['times_retrieved'] = self.times_retrieved
|
||||
d['retrieved_at'] = self.retrieved_at
|
||||
d['runtime'] = self.runtime
|
||||
|
||||
if with_visualizations:
|
||||
d['visualizations'] = [vis.to_dict(with_query=False)
|
||||
for vis in self.visualizations]
|
||||
|
||||
if with_result and self.latest_query_data:
|
||||
d['latest_query_data'] = self.latest_query_data.to_dict()
|
||||
|
||||
return d
|
||||
|
||||
def archive(self):
|
||||
self.is_archived = True
|
||||
self.ttl = -1
|
||||
|
||||
for vis in self.visualizations:
|
||||
for w in vis.widgets:
|
||||
w.delete_instance()
|
||||
|
||||
self.save()
|
||||
|
||||
@classmethod
|
||||
def all_queries(cls):
|
||||
q = Query.select(Query, User,
|
||||
peewee.fn.Count(QueryResult.id).alias('times_retrieved'),
|
||||
peewee.fn.Avg(QueryResult.runtime).alias('avg_runtime'),
|
||||
peewee.fn.Min(QueryResult.runtime).alias('min_runtime'),
|
||||
peewee.fn.Max(QueryResult.runtime).alias('max_runtime'),
|
||||
peewee.fn.Max(QueryResult.retrieved_at).alias('last_retrieved_at'))\
|
||||
q = Query.select(Query, User, QueryResult.retrieved_at, QueryResult.runtime)\
|
||||
.join(QueryResult, join_type=peewee.JOIN_LEFT_OUTER)\
|
||||
.switch(Query).join(User)\
|
||||
.group_by(Query.id, User.id)
|
||||
.where(Query.is_archived==False)\
|
||||
.group_by(Query.id, User.id, QueryResult.id, QueryResult.retrieved_at, QueryResult.runtime)\
|
||||
.order_by(cls.created_at.desc())
|
||||
|
||||
return q
|
||||
|
||||
@classmethod
|
||||
def outdated_queries(cls):
|
||||
# TODO: this will only find scheduled queries that were executed before. I think this is
|
||||
# a reasonable assumption, but worth revisiting.
|
||||
outdated_queries_ids = cls.select(
|
||||
peewee.Func('first_value', cls.id).over(partition_by=[cls.query_hash, cls.data_source])) \
|
||||
.join(QueryResult) \
|
||||
.where(cls.ttl > 0,
|
||||
cls.is_archived==False,
|
||||
(QueryResult.retrieved_at +
|
||||
(cls.ttl * peewee.SQL("interval '1 second'"))) <
|
||||
peewee.SQL("(now() at time zone 'utc')"))
|
||||
|
||||
queries = cls.select(cls, DataSource).join(DataSource) \
|
||||
.where(cls.id << outdated_queries_ids)
|
||||
|
||||
return queries
|
||||
|
||||
@classmethod
|
||||
def search(cls, term):
|
||||
# This is very naive implementation of search, to be replaced with PostgreSQL full-text-search solution.
|
||||
|
||||
where = (cls.name**"%{}%".format(term)) | (cls.description**"%{}%".format(term))
|
||||
|
||||
if term.isdigit():
|
||||
where |= cls.id == term
|
||||
|
||||
where &= cls.is_archived == False
|
||||
|
||||
return cls.select().where(where).order_by(cls.created_at.desc())
|
||||
|
||||
@classmethod
|
||||
def recent(cls, user_id):
|
||||
return cls.select().where(Event.created_at > peewee.SQL("current_date - 7")).\
|
||||
join(Event, on=(Query.id == peewee.SQL("t2.object_id::integer"))).\
|
||||
where(Event.action << ('edit', 'execute', 'edit_name', 'edit_description', 'view_source')).\
|
||||
where(Event.user == user_id).\
|
||||
where(~(Event.object_id >> None)).\
|
||||
where(Event.object_type == 'query'). \
|
||||
where(cls.is_archived == False).\
|
||||
group_by(Event.object_id, Query.id).\
|
||||
order_by(peewee.SQL("count(0) desc"))
|
||||
|
||||
@classmethod
|
||||
def update_instance(cls, query_id, **kwargs):
|
||||
if 'query' in kwargs:
|
||||
@@ -237,6 +420,14 @@ class Query(BaseModel):
|
||||
self.api_key = hashlib.sha1(
|
||||
u''.join((str(time.time()), self.query, str(self._data['user']), self.name)).encode('utf-8')).hexdigest()
|
||||
|
||||
@property
|
||||
def runtime(self):
|
||||
return self.latest_query_data.runtime
|
||||
|
||||
@property
|
||||
def retrieved_at(self):
|
||||
return self.latest_query_data.retrieved_at
|
||||
|
||||
def __unicode__(self):
|
||||
return unicode(self.id)
|
||||
|
||||
@@ -259,13 +450,12 @@ class Dashboard(BaseModel):
|
||||
layout = json.loads(self.layout)
|
||||
|
||||
if with_widgets:
|
||||
widgets = Widget.select(Widget, Visualization, Query, QueryResult, User)\
|
||||
widgets = Widget.select(Widget, Visualization, Query, User)\
|
||||
.where(Widget.dashboard == self.id)\
|
||||
.where(Query.is_archived == False)\
|
||||
.join(Visualization, join_type=peewee.JOIN_LEFT_OUTER)\
|
||||
.join(Query, join_type=peewee.JOIN_LEFT_OUTER)\
|
||||
.join(User, join_type=peewee.JOIN_LEFT_OUTER)\
|
||||
.switch(Query)\
|
||||
.join(QueryResult, join_type=peewee.JOIN_LEFT_OUTER)
|
||||
.join(User, join_type=peewee.JOIN_LEFT_OUTER)
|
||||
widgets = {w.id: w.to_dict() for w in widgets}
|
||||
|
||||
# The following is a workaround for cases when the widget object gets deleted without the dashboard layout
|
||||
@@ -301,13 +491,24 @@ class Dashboard(BaseModel):
|
||||
def get_by_slug(cls, slug):
|
||||
return cls.get(cls.slug == slug)
|
||||
|
||||
@classmethod
|
||||
def recent(cls, user_id):
|
||||
return cls.select().where(Event.created_at > peewee.SQL("current_date - 7")). \
|
||||
join(Event, on=(Dashboard.id == peewee.SQL("t2.object_id::integer"))). \
|
||||
where(Event.action << ('edit', 'view')).\
|
||||
where(Event.user == user_id). \
|
||||
where(~(Event.object_id >> None)). \
|
||||
where(Event.object_type == 'dashboard'). \
|
||||
group_by(Event.object_id, Dashboard.id). \
|
||||
order_by(peewee.SQL("count(0) desc"))
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
if not self.slug:
|
||||
self.slug = slugify(self.name)
|
||||
self.slug = utils.slugify(self.name)
|
||||
|
||||
tries = 1
|
||||
while self.select().where(Dashboard.slug == self.slug).first() is not None:
|
||||
self.slug = slugify(self.name) + "_{0}".format(tries)
|
||||
self.slug = utils.slugify(self.name) + "_{0}".format(tries)
|
||||
tries += 1
|
||||
|
||||
super(Dashboard, self).save(*args, **kwargs)
|
||||
@@ -374,11 +575,54 @@ class Widget(BaseModel):
|
||||
d['visualization'] = self.visualization.to_dict()
|
||||
|
||||
return d
|
||||
|
||||
|
||||
def __unicode__(self):
|
||||
return u"%s" % self.id
|
||||
|
||||
all_models = (DataSource, User, QueryResult, Query, Dashboard, Visualization, Widget, ActivityLog)
|
||||
def delete_instance(self, *args, **kwargs):
|
||||
layout = json.loads(self.dashboard.layout)
|
||||
layout = map(lambda row: filter(lambda w: w != self.id, row), layout)
|
||||
layout = filter(lambda row: len(row) > 0, layout)
|
||||
self.dashboard.layout = json.dumps(layout)
|
||||
self.dashboard.save()
|
||||
super(Widget, self).delete_instance(*args, **kwargs)
|
||||
|
||||
class Event(BaseModel):
|
||||
user = peewee.ForeignKeyField(User, related_name="events")
|
||||
action = peewee.CharField()
|
||||
object_type = peewee.CharField()
|
||||
object_id = peewee.CharField(null=True)
|
||||
additional_properties = peewee.TextField(null=True)
|
||||
created_at = peewee.DateTimeField(default=datetime.datetime.now)
|
||||
|
||||
class Meta:
|
||||
db_table = 'events'
|
||||
|
||||
def __unicode__(self):
|
||||
return u"%s,%s,%s,%s" % (self._data['user'], self.action, self.object_type, self.object_id)
|
||||
|
||||
@classmethod
|
||||
def record(cls, event):
|
||||
user = event.pop('user_id')
|
||||
action = event.pop('action')
|
||||
object_type = event.pop('object_type')
|
||||
object_id = event.pop('object_id', None)
|
||||
|
||||
created_at = datetime.datetime.utcfromtimestamp(event.pop('timestamp'))
|
||||
additional_properties = json.dumps(event)
|
||||
|
||||
event = cls.create(user=user, action=action, object_type=object_type, object_id=object_id,
|
||||
additional_properties=additional_properties, created_at=created_at)
|
||||
|
||||
return event
|
||||
|
||||
|
||||
all_models = (DataSource, User, QueryResult, Query, Dashboard, Visualization, Widget, ActivityLog, Group, Event)
|
||||
|
||||
|
||||
def init_db():
|
||||
Group.insert(name='admin', permissions=['admin'], tables=['*']).execute()
|
||||
Group.insert(name='default', permissions=Group.DEFAULT_PERMISSIONS, tables=['*']).execute()
|
||||
|
||||
|
||||
def create_db(create_tables, drop_tables):
|
||||
@@ -388,9 +632,8 @@ def create_db(create_tables, drop_tables):
|
||||
if drop_tables and model.table_exists():
|
||||
# TODO: submit PR to peewee to allow passing cascade option to drop_table.
|
||||
db.database.execute_sql('DROP TABLE %s CASCADE' % model._meta.db_table)
|
||||
#model.drop_table()
|
||||
|
||||
if create_tables and not model.table_exists():
|
||||
model.create_table()
|
||||
|
||||
db.close_db(None)
|
||||
db.close_db(None)
|
||||
|
||||
@@ -10,10 +10,7 @@ class require_permissions(object):
|
||||
def __call__(self, fn):
|
||||
@functools.wraps(fn)
|
||||
def decorated(*args, **kwargs):
|
||||
has_permissions = reduce(lambda a, b: a and b,
|
||||
map(lambda permission: permission in current_user.permissions,
|
||||
self.permissions),
|
||||
True)
|
||||
has_permissions = current_user.has_permissions(self.permissions)
|
||||
|
||||
if has_permissions:
|
||||
return fn(*args, **kwargs)
|
||||
|
||||
@@ -5,9 +5,7 @@ import urlparse
|
||||
|
||||
def parse_db_url(url):
|
||||
url_parts = urlparse.urlparse(url)
|
||||
connection = {
|
||||
'engine': 'peewee.PostgresqlDatabase',
|
||||
}
|
||||
connection = {'threadlocals': True}
|
||||
|
||||
if url_parts.hostname and not url_parts.path:
|
||||
connection['name'] = url_parts.hostname
|
||||
@@ -38,14 +36,14 @@ def parse_boolean(str):
|
||||
return json.loads(str.lower())
|
||||
|
||||
|
||||
REDIS_URL = os.environ.get('REDASH_REDIS_URL', "redis://localhost:6379")
|
||||
NAME = os.environ.get('REDASH_NAME', 're:dash')
|
||||
|
||||
REDIS_URL = os.environ.get('REDASH_REDIS_URL', "redis://localhost:6379/0")
|
||||
|
||||
STATSD_HOST = os.environ.get('REDASH_STATSD_HOST', "127.0.0.1")
|
||||
STATSD_PORT = int(os.environ.get('REDASH_STATSD_PORT', "8125"))
|
||||
STATSD_PREFIX = os.environ.get('REDASH_STATSD_PREFIX', "redash")
|
||||
|
||||
NAME = os.environ.get('REDASH_NAME', 're:dash')
|
||||
|
||||
# The following is kept for backward compatability, and shouldn't be used any more.
|
||||
CONNECTION_ADAPTER = os.environ.get("REDASH_CONNECTION_ADAPTER", "pg")
|
||||
CONNECTION_STRING = os.environ.get("REDASH_CONNECTION_STRING", "user= password= host= port=5439 dbname=")
|
||||
@@ -53,20 +51,31 @@ CONNECTION_STRING = os.environ.get("REDASH_CONNECTION_STRING", "user= password=
|
||||
# Connection settings for re:dash's own database (where we store the queries, results, etc)
|
||||
DATABASE_CONFIG = parse_db_url(os.environ.get("REDASH_DATABASE_URL", "postgresql://postgres"))
|
||||
|
||||
# Celery related settings
|
||||
CELERY_BROKER = os.environ.get("REDASH_CELERY_BROKER", REDIS_URL)
|
||||
CELERY_BACKEND = os.environ.get("REDASH_CELERY_BACKEND", REDIS_URL)
|
||||
CELERY_FLOWER_URL = os.environ.get("REDASH_CELERY_FLOWER_URL", "/flower")
|
||||
|
||||
# The following enables periodic job (every 5 minutes) of removing unused query results. Behind this "feature flag" until
|
||||
# proved to be "safe".
|
||||
QUERY_RESULTS_CLEANUP_ENABLED = parse_boolean(os.environ.get("REDASH_QUERY_RESULTS_CLEANUP_ENABLED", "false"))
|
||||
|
||||
# Google Apps domain to allow access from; any user with email in this Google Apps will be allowed
|
||||
# access
|
||||
GOOGLE_APPS_DOMAIN = os.environ.get("REDASH_GOOGLE_APPS_DOMAIN", "")
|
||||
GOOGLE_OPENID_ENABLED = parse_boolean(os.environ.get("REDASH_GOOGLE_OPENID_ENABLED", "true"))
|
||||
PASSWORD_LOGIN_ENABLED = parse_boolean(os.environ.get("REDASH_PASSWORD_LOGIN_ENABLED", "false"))
|
||||
# Email addresses of admin users (comma separated)
|
||||
ADMINS = array_from_string(os.environ.get("REDASH_ADMINS", ''))
|
||||
ALLOWED_EXTERNAL_USERS = array_from_string(os.environ.get("REDASH_ALLOWED_EXTERNAL_USERS", ''))
|
||||
|
||||
GOOGLE_CLIENT_ID = os.environ.get("REDASH_GOOGLE_CLIENT_ID", "")
|
||||
GOOGLE_CLIENT_SECRET = os.environ.get("REDASH_GOOGLE_CLIENT_SECRET", "")
|
||||
GOOGLE_OAUTH_ENABLED = GOOGLE_CLIENT_ID and GOOGLE_CLIENT_SECRET
|
||||
|
||||
PASSWORD_LOGIN_ENABLED = parse_boolean(os.environ.get("REDASH_PASSWORD_LOGIN_ENABLED", "true"))
|
||||
STATIC_ASSETS_PATH = fix_assets_path(os.environ.get("REDASH_STATIC_ASSETS_PATH", "../rd_ui/app/"))
|
||||
WORKERS_COUNT = int(os.environ.get("REDASH_WORKERS_COUNT", "2"))
|
||||
JOB_EXPIRY_TIME = int(os.environ.get("REDASH_JOB_EXPIRY_TIME", 3600*24))
|
||||
JOB_EXPIRY_TIME = int(os.environ.get("REDASH_JOB_EXPIRY_TIME", 3600*6))
|
||||
COOKIE_SECRET = os.environ.get("REDASH_COOKIE_SECRET", "c292a0a3aa32397cdb050e233733900f")
|
||||
LOG_LEVEL = os.environ.get("REDASH_LOG_LEVEL", "INFO")
|
||||
EVENTS_LOG_PATH = os.environ.get("REDASH_EVENTS_LOG_PATH", "")
|
||||
EVENTS_CONSOLE_OUTPUT = parse_boolean(os.environ.get("REDASH_EVENTS_CONSOLE_OUTPUT", "false"))
|
||||
CLIENT_SIDE_METRICS = parse_boolean(os.environ.get("REDASH_CLIENT_SIDE_METRICS", "false"))
|
||||
ANALYTICS = os.environ.get("REDASH_ANALYTICS", "")
|
||||
ANALYTICS = os.environ.get("REDASH_ANALYTICS", "")
|
||||
|
||||
# Features:
|
||||
FEATURE_TABLES_PERMISSIONS = parse_boolean(os.environ.get("REDASH_FEATURE_TABLES_PERMISSIONS", "false"))
|
||||
|
||||
268
redash/tasks.py
Normal file
268
redash/tasks.py
Normal file
@@ -0,0 +1,268 @@
|
||||
import time
|
||||
import datetime
|
||||
import logging
|
||||
import redis
|
||||
from celery import Task
|
||||
from celery.result import AsyncResult
|
||||
from celery.utils.log import get_task_logger
|
||||
from redash import redis_connection, models, statsd_client, settings
|
||||
from redash.utils import gen_query_hash
|
||||
from redash.worker import celery
|
||||
from redash.data.query_runner import get_query_runner
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
|
||||
class BaseTask(Task):
|
||||
abstract = True
|
||||
|
||||
def after_return(self, *args, **kwargs):
|
||||
models.db.close_db(None)
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
models.db.connect_db()
|
||||
return super(BaseTask, self).__call__(*args, **kwargs)
|
||||
|
||||
|
||||
class QueryTask(object):
|
||||
MAX_RETRIES = 5
|
||||
|
||||
# TODO: this is mapping to the old Job class statuses. Need to update the client side and remove this
|
||||
STATUSES = {
|
||||
'PENDING': 1,
|
||||
'STARTED': 2,
|
||||
'SUCCESS': 3,
|
||||
'FAILURE': 4,
|
||||
'REVOKED': 4
|
||||
}
|
||||
|
||||
def __init__(self, job_id=None, async_result=None):
|
||||
if async_result:
|
||||
self._async_result = async_result
|
||||
else:
|
||||
self._async_result = AsyncResult(job_id, app=celery)
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
return self._async_result.id
|
||||
|
||||
@classmethod
|
||||
def add_task(cls, query, data_source, scheduled=False):
|
||||
query_hash = gen_query_hash(query)
|
||||
logging.info("[Manager][%s] Inserting job", query_hash)
|
||||
try_count = 0
|
||||
job = None
|
||||
|
||||
while try_count < cls.MAX_RETRIES:
|
||||
try_count += 1
|
||||
|
||||
pipe = redis_connection.pipeline()
|
||||
try:
|
||||
pipe.watch(cls._job_lock_id(query_hash, data_source.id))
|
||||
job_id = pipe.get(cls._job_lock_id(query_hash, data_source.id))
|
||||
if job_id:
|
||||
logging.info("[Manager][%s] Found existing job: %s", query_hash, job_id)
|
||||
|
||||
job = cls(job_id=job_id)
|
||||
if job.ready():
|
||||
logging.info("[%s] job found is ready (%s), removing lock", query_hash, job.celery_status)
|
||||
redis_connection.delete(QueryTask._job_lock_id(query_hash, data_source.id))
|
||||
job = None
|
||||
|
||||
if not job:
|
||||
pipe.multi()
|
||||
|
||||
if scheduled:
|
||||
queue_name = data_source.scheduled_queue_name
|
||||
else:
|
||||
queue_name = data_source.queue_name
|
||||
|
||||
result = execute_query.apply_async(args=(query, data_source.id), queue=queue_name)
|
||||
job = cls(async_result=result)
|
||||
logging.info("[Manager][%s] Created new job: %s", query_hash, job.id)
|
||||
pipe.set(cls._job_lock_id(query_hash, data_source.id), job.id, settings.JOB_EXPIRY_TIME)
|
||||
pipe.execute()
|
||||
break
|
||||
|
||||
except redis.WatchError:
|
||||
continue
|
||||
|
||||
if not job:
|
||||
logging.error("[Manager][%s] Failed adding job for query.", query_hash)
|
||||
|
||||
return job
|
||||
|
||||
def to_dict(self):
|
||||
if self._async_result.status == 'STARTED':
|
||||
updated_at = self._async_result.result.get('start_time', 0)
|
||||
else:
|
||||
updated_at = 0
|
||||
|
||||
if self._async_result.failed() and isinstance(self._async_result.result, Exception):
|
||||
error = self._async_result.result.message
|
||||
elif self._async_result.status == 'REVOKED':
|
||||
error = 'Query execution cancelled.'
|
||||
else:
|
||||
error = ''
|
||||
|
||||
if self._async_result.successful():
|
||||
query_result_id = self._async_result.result
|
||||
else:
|
||||
query_result_id = None
|
||||
|
||||
return {
|
||||
'id': self._async_result.id,
|
||||
'updated_at': updated_at,
|
||||
'status': self.STATUSES[self._async_result.status],
|
||||
'error': error,
|
||||
'query_result_id': query_result_id,
|
||||
}
|
||||
|
||||
@property
|
||||
def is_cancelled(self):
|
||||
return self._async_result.status == 'REVOKED'
|
||||
|
||||
@property
|
||||
def celery_status(self):
|
||||
return self._async_result.status
|
||||
|
||||
def ready(self):
|
||||
return self._async_result.ready()
|
||||
|
||||
def cancel(self):
|
||||
return self._async_result.revoke(terminate=True)
|
||||
|
||||
@staticmethod
|
||||
def _job_lock_id(query_hash, data_source_id):
|
||||
return "query_hash_job:%s:%s" % (data_source_id, query_hash)
|
||||
|
||||
|
||||
@celery.task(base=BaseTask)
|
||||
def refresh_queries():
|
||||
# self.status['last_refresh_at'] = time.time()
|
||||
# self._save_status()
|
||||
|
||||
logger.info("Refreshing queries...")
|
||||
|
||||
outdated_queries_count = 0
|
||||
for query in models.Query.outdated_queries():
|
||||
# TODO: this should go into lower priority
|
||||
QueryTask.add_task(query.query, query.data_source, scheduled=True)
|
||||
outdated_queries_count += 1
|
||||
|
||||
statsd_client.gauge('manager.outdated_queries', outdated_queries_count)
|
||||
# TODO: decide if we still need this
|
||||
# statsd_client.gauge('manager.queue_size', self.redis_connection.zcard('jobs'))
|
||||
|
||||
logger.info("Done refreshing queries. Found %d outdated queries." % outdated_queries_count)
|
||||
|
||||
status = redis_connection.hgetall('redash:status')
|
||||
now = time.time()
|
||||
|
||||
redis_connection.hmset('redash:status', {
|
||||
'outdated_queries_count': outdated_queries_count,
|
||||
'last_refresh_at': now
|
||||
})
|
||||
|
||||
statsd_client.gauge('manager.seconds_since_refresh', now - float(status.get('last_refresh_at', now)))
|
||||
|
||||
|
||||
@celery.task(base=BaseTask)
|
||||
def cleanup_tasks():
|
||||
# in case of cold restart of the workers, there might be jobs that still have their "lock" object, but aren't really
|
||||
# going to run. this job removes them.
|
||||
|
||||
lock_keys = redis_connection.keys("query_hash_job:*") # TODO: use set instead of keys command
|
||||
query_tasks = [QueryTask(job_id=j) for j in redis_connection.mget(lock_keys)]
|
||||
|
||||
logger.info("Found %d locks", len(query_tasks))
|
||||
|
||||
inspect = celery.control.inspect()
|
||||
active_tasks = inspect.active()
|
||||
if active_tasks is None:
|
||||
active_tasks = []
|
||||
else:
|
||||
active_tasks = active_tasks.values()
|
||||
|
||||
all_tasks = set()
|
||||
for task_list in active_tasks:
|
||||
for task in task_list:
|
||||
all_tasks.add(task['id'])
|
||||
|
||||
logger.info("Active jobs count: %d", len(all_tasks))
|
||||
|
||||
for i, t in enumerate(query_tasks):
|
||||
if t.ready():
|
||||
# if locked task is ready already (failed, finished, revoked), we don't need the lock anymore
|
||||
logger.warning("%s is ready (%s), removing lock.", lock_keys[i], t.celery_status)
|
||||
redis_connection.delete(lock_keys[i])
|
||||
|
||||
if t.celery_status == 'STARTED' and t.id not in all_tasks:
|
||||
logger.warning("Couldn't find active job for: %s, removing lock.", lock_keys[i])
|
||||
redis_connection.delete(lock_keys[i])
|
||||
|
||||
|
||||
@celery.task(base=BaseTask)
|
||||
def cleanup_query_results():
|
||||
"""
|
||||
Job to cleanup unused query results -- such that no query links to them anymore, and older than a week (so it's less
|
||||
likely to be open in someone's browser and be used).
|
||||
|
||||
Each time the job deletes only 100 query results so it won't choke the database in case of many such results.
|
||||
"""
|
||||
|
||||
unused_query_results = models.QueryResult.unused().limit(100)
|
||||
total_unused_query_results = models.QueryResult.unused().count()
|
||||
deleted_count = models.QueryResult.delete().where(models.QueryResult.id << unused_query_results).execute()
|
||||
|
||||
logger.info("Deleted %d unused query results out of total of %d." % (deleted_count, total_unused_query_results))
|
||||
|
||||
|
||||
@celery.task(bind=True, base=BaseTask, track_started=True)
|
||||
def execute_query(self, query, data_source_id):
|
||||
# TODO: maybe this should be a class?
|
||||
start_time = time.time()
|
||||
|
||||
logger.info("Loading data source (%d)...", data_source_id)
|
||||
|
||||
# TODO: we should probably cache data sources in Redis
|
||||
data_source = models.DataSource.get_by_id(data_source_id)
|
||||
|
||||
self.update_state(state='STARTED', meta={'start_time': start_time, 'custom_message': ''})
|
||||
|
||||
logger.info("Executing query:\n%s", query)
|
||||
|
||||
query_hash = gen_query_hash(query)
|
||||
query_runner = get_query_runner(data_source.type, data_source.options)
|
||||
|
||||
if getattr(query_runner, 'annotate_query', True):
|
||||
# TODO: anotate with queu ename
|
||||
annotated_query = "/* Task Id: %s, Query hash: %s */ %s" % \
|
||||
(self.request.id, query_hash, query)
|
||||
else:
|
||||
annotated_query = query
|
||||
|
||||
with statsd_client.timer('query_runner.{}.{}.run_time'.format(data_source.type, data_source.name)):
|
||||
data, error = query_runner(annotated_query)
|
||||
|
||||
run_time = time.time() - start_time
|
||||
logger.info("Query finished... data length=%s, error=%s", data and len(data), error)
|
||||
|
||||
self.update_state(state='STARTED', meta={'start_time': start_time, 'error': error, 'custom_message': ''})
|
||||
|
||||
# Delete query_hash
|
||||
redis_connection.delete(QueryTask._job_lock_id(query_hash, data_source.id))
|
||||
|
||||
# TODO: it is possible that storing the data will fail, and we will need to retry
|
||||
# while we already marked the job as done
|
||||
if not error:
|
||||
query_result = models.QueryResult.store_result(data_source.id, query_hash, query, data, run_time, datetime.datetime.utcnow())
|
||||
else:
|
||||
raise Exception(error)
|
||||
|
||||
return query_result.id
|
||||
|
||||
|
||||
@celery.task(base=BaseTask)
|
||||
def record_event(event):
|
||||
models.Event.record(event)
|
||||
@@ -6,10 +6,66 @@ import datetime
|
||||
import json
|
||||
import re
|
||||
import hashlib
|
||||
import sqlparse
|
||||
|
||||
COMMENTS_REGEX = re.compile("/\*.*?\*/")
|
||||
|
||||
|
||||
class SQLMetaData(object):
|
||||
TABLE_SELECTION_KEYWORDS = ('FROM', 'JOIN', 'LEFT JOIN', 'FULL JOIN', 'RIGHT JOIN', 'CROSS JOIN', 'INNER JOIN',
|
||||
'OUTER JOIN', 'LEFT OUTER JOIN', 'RIGHT OUTER JOIN', 'FULL OUTER JOIN')
|
||||
|
||||
def __init__(self, sql):
|
||||
self.sql = sql
|
||||
self.parsed_sql = sqlparse.parse(self.sql)
|
||||
|
||||
self.has_ddl_statements = self._find_ddl_statements()
|
||||
self.has_non_select_dml_statements = self._find_dml_statements()
|
||||
self.used_tables = self._find_tables()
|
||||
|
||||
def _find_ddl_statements(self):
|
||||
for statement in self.parsed_sql:
|
||||
if len([x for x in statement.flatten() if x.ttype == sqlparse.tokens.DDL]):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def _find_tables(self):
|
||||
tables = set()
|
||||
for statement in self.parsed_sql:
|
||||
tables.update(self.extract_table_names(statement.tokens))
|
||||
|
||||
return tables
|
||||
|
||||
def extract_table_names(self, tokens):
|
||||
tables = set()
|
||||
tokens = [t for t in tokens if t.ttype not in (sqlparse.tokens.Whitespace, sqlparse.tokens.Newline)]
|
||||
|
||||
for i in range(len(tokens)):
|
||||
if tokens[i].is_group():
|
||||
tables.update(self.extract_table_names(tokens[i].tokens))
|
||||
else:
|
||||
if tokens[i].ttype == sqlparse.tokens.Keyword and tokens[i].normalized in self.TABLE_SELECTION_KEYWORDS:
|
||||
if isinstance(tokens[i + 1], sqlparse.sql.Identifier):
|
||||
tables.add(tokens[i + 1].value)
|
||||
|
||||
if isinstance(tokens[i + 1], sqlparse.sql.IdentifierList):
|
||||
tables.update(set([t.value for t in tokens[i+1].get_identifiers()]))
|
||||
return tables
|
||||
|
||||
def _find_dml_statements(self):
|
||||
for statement in self.parsed_sql:
|
||||
for token in statement.flatten():
|
||||
if token.ttype == sqlparse.tokens.DML and token.normalized != 'SELECT':
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def slugify(s):
|
||||
return re.sub('[^a-z0-9_\-]+', '-', s.lower())
|
||||
|
||||
|
||||
def gen_query_hash(sql):
|
||||
"""Returns hash of the given query after stripping all comments, line breaks and multiple
|
||||
spaces, and lower casing all text.
|
||||
|
||||
32
redash/worker.py
Normal file
32
redash/worker.py
Normal file
@@ -0,0 +1,32 @@
|
||||
from celery import Celery
|
||||
from datetime import timedelta
|
||||
from redash import settings
|
||||
|
||||
|
||||
celery = Celery('redash',
|
||||
broker=settings.CELERY_BROKER,
|
||||
include='redash.tasks')
|
||||
|
||||
celery_schedule = {
|
||||
'refresh_queries': {
|
||||
'task': 'redash.tasks.refresh_queries',
|
||||
'schedule': timedelta(seconds=30)
|
||||
},
|
||||
'cleanup_tasks': {
|
||||
'task': 'redash.tasks.cleanup_tasks',
|
||||
'schedule': timedelta(minutes=5)
|
||||
}
|
||||
}
|
||||
|
||||
if settings.QUERY_RESULTS_CLEANUP_ENABLED:
|
||||
celery_schedule['cleanup_query_results'] = {
|
||||
'task': 'redash.tasks.cleanup_query_results',
|
||||
'schedule': timedelta(minutes=5)
|
||||
}
|
||||
|
||||
celery.conf.update(CELERY_RESULT_BACKEND=settings.CELERY_BACKEND,
|
||||
CELERYBEAT_SCHEDULE=celery_schedule,
|
||||
CELERY_TIMEZONE='UTC')
|
||||
|
||||
if __name__ == '__main__':
|
||||
celery.start()
|
||||
32
redash/wsgi.py
Normal file
32
redash/wsgi.py
Normal file
@@ -0,0 +1,32 @@
|
||||
import json
|
||||
from flask import Flask, make_response
|
||||
from flask.ext.restful import Api
|
||||
|
||||
from redash import settings, utils
|
||||
from redash.models import db
|
||||
|
||||
__version__ = '0.4.0'
|
||||
|
||||
app = Flask(__name__,
|
||||
template_folder=settings.STATIC_ASSETS_PATH,
|
||||
static_folder=settings.STATIC_ASSETS_PATH,
|
||||
static_path='/static')
|
||||
|
||||
|
||||
api = Api(app)
|
||||
|
||||
# configure our database
|
||||
settings.DATABASE_CONFIG.update({'threadlocals': True})
|
||||
app.config['DATABASE'] = settings.DATABASE_CONFIG
|
||||
db.init_app(app)
|
||||
|
||||
from redash.authentication import setup_authentication
|
||||
auth = setup_authentication(app)
|
||||
|
||||
@api.representation('application/json')
|
||||
def json_representation(data, code, headers=None):
|
||||
resp = make_response(json.dumps(data, cls=utils.JSONEncoder), code)
|
||||
resp.headers.extend(headers or {})
|
||||
return resp
|
||||
|
||||
from redash import controllers
|
||||
@@ -1,28 +1,25 @@
|
||||
Flask==0.10.1
|
||||
Flask-GoogleAuth==0.4
|
||||
Flask-RESTful==0.2.10
|
||||
Flask-Login==0.2.9
|
||||
Flask-OAuth==0.12
|
||||
passlib==1.6.2
|
||||
Jinja2==2.7.2
|
||||
MarkupSafe==0.18
|
||||
WTForms==1.0.5
|
||||
Werkzeug==0.9.4
|
||||
aniso8601==0.82
|
||||
blinker==1.3
|
||||
flask-peewee==0.6.5
|
||||
itsdangerous==0.23
|
||||
peewee==2.2.2
|
||||
psycopg2==2.5.1
|
||||
psycopg2==2.5.2
|
||||
python-dateutil==2.1
|
||||
pytz==2013.9
|
||||
qr==0.6.0
|
||||
redis==2.7.5
|
||||
requests==2.2.0
|
||||
setproctitle==1.1.8
|
||||
six==1.5.2
|
||||
sqlparse==0.1.8
|
||||
wsgiref==0.1.2
|
||||
wtf-peewee==0.2.2
|
||||
Flask-Script==0.6.6
|
||||
honcho==0.5.0
|
||||
statsd==2.1.2
|
||||
gunicorn==18.0
|
||||
celery==3.1.11
|
||||
|
||||
12
setup/Vagrantfile_debian
Normal file
12
setup/Vagrantfile_debian
Normal file
@@ -0,0 +1,12 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
|
||||
VAGRANTFILE_API_VERSION = "2"
|
||||
|
||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||
# Every Vagrant virtual environment requires a box to build off of.
|
||||
config.vm.box = "box-cutter/debian76"
|
||||
config.vm.provision "shell", path: "setup.sh"
|
||||
config.vm.network "forwarded_port", guest: 80, host: 9001
|
||||
end
|
||||
177
setup/bootstrap.sh
Normal file
177
setup/bootstrap.sh
Normal file
@@ -0,0 +1,177 @@
|
||||
#!/bin/bash
|
||||
set -eu
|
||||
|
||||
REDASH_BASE_PATH=/opt/redash
|
||||
FILES_BASE_URL=https://raw.githubusercontent.com/EverythingMe/redash/docs_setup/setup/files/
|
||||
|
||||
# Verify running as root:
|
||||
if [ "$(id -u)" != "0" ]; then
|
||||
if [ $# -ne 0 ]; then
|
||||
echo "Failed running with sudo. Exiting." 1>&2
|
||||
exit 1
|
||||
fi
|
||||
echo "This script must be run as root. Trying to run with sudo."
|
||||
sudo bash $0 --with-sudo
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Base packages
|
||||
apt-get update
|
||||
apt-get install -y python-pip python-dev nginx curl build-essential pwgen
|
||||
|
||||
# redash user
|
||||
# TODO: check user doesn't exist yet?
|
||||
adduser --system --no-create-home --disabled-login --gecos "" redash
|
||||
|
||||
# PostgreSQL
|
||||
pg_available=0
|
||||
psql --version || pg_available=$?
|
||||
if [ $pg_available -ne 0 ]; then
|
||||
wget $FILES_BASE_URL"postgres_apt.sh" -O /tmp/postgres_apt.sh
|
||||
bash /tmp/postgres_apt.sh
|
||||
apt-get update
|
||||
apt-get -y install postgresql-9.3 postgresql-server-dev-9.3
|
||||
fi
|
||||
|
||||
add_service() {
|
||||
service_name=$1
|
||||
service_command="/etc/init.d/$service_name"
|
||||
|
||||
echo "Adding service: $service_name (/etc/init.d/$service_name)."
|
||||
chmod +x $service_command
|
||||
|
||||
if command -v chkconfig >/dev/null 2>&1; then
|
||||
# we're chkconfig, so lets add to chkconfig and put in runlevel 345
|
||||
chkconfig --add $service_name && echo "Successfully added to chkconfig!"
|
||||
chkconfig --level 345 $service_name on && echo "Successfully added to runlevels 345!"
|
||||
elif command -v update-rc.d >/dev/null 2>&1; then
|
||||
#if we're not a chkconfig box assume we're able to use update-rc.d
|
||||
update-rc.d $service_name defaults && echo "Success!"
|
||||
else
|
||||
echo "No supported init tool found."
|
||||
fi
|
||||
|
||||
$service_command start
|
||||
}
|
||||
|
||||
# Redis
|
||||
redis_available=0
|
||||
redis-cli --version || redis_available=$?
|
||||
if [ $redis_available -ne 0 ]; then
|
||||
wget http://download.redis.io/releases/redis-2.8.17.tar.gz
|
||||
tar xzf redis-2.8.17.tar.gz
|
||||
rm redis-2.8.17.tar.gz
|
||||
cd redis-2.8.17
|
||||
make
|
||||
make install
|
||||
|
||||
# Setup process init & configuration
|
||||
|
||||
REDIS_PORT=6379
|
||||
REDIS_CONFIG_FILE="/etc/redis/$REDIS_PORT.conf"
|
||||
REDIS_LOG_FILE="/var/log/redis_$REDIS_PORT.log"
|
||||
REDIS_DATA_DIR="/var/lib/redis/$REDIS_PORT"
|
||||
|
||||
mkdir -p `dirname "$REDIS_CONFIG_FILE"` || die "Could not create redis config directory"
|
||||
mkdir -p `dirname "$REDIS_LOG_FILE"` || die "Could not create redis log dir"
|
||||
mkdir -p "$REDIS_DATA_DIR" || die "Could not create redis data directory"
|
||||
|
||||
wget -O /etc/init.d/redis_6379 $FILES_BASE_URL"redis_init"
|
||||
wget -O $REDIS_CONFIG_FILE $FILES_BASE_URL"redis.conf"
|
||||
|
||||
add_service "redis_$REDIS_PORT"
|
||||
|
||||
cd ..
|
||||
rm -rf redis-2.8.17
|
||||
fi
|
||||
|
||||
# Directories
|
||||
if [ ! -d "$REDASH_BASE_PATH" ]; then
|
||||
sudo mkdir /opt/redash
|
||||
sudo chown redash /opt/redash
|
||||
sudo -u redash mkdir /opt/redash/logs
|
||||
fi
|
||||
|
||||
# Default config file
|
||||
if [ ! -f "/opt/redash/.env" ]; then
|
||||
sudo -u redash wget $FILES_BASE_URL"env" -O /opt/redash/.env
|
||||
fi
|
||||
|
||||
# Install latest version
|
||||
REDASH_VERSION=${REDASH_VERSION-0.4.0.b589}
|
||||
LATEST_URL="https://github.com/EverythingMe/redash/releases/download/v${REDASH_VERSION/.b/%2Bb}/redash.$REDASH_VERSION.tar.gz"
|
||||
VERSION_DIR="/opt/redash/redash.$REDASH_VERSION"
|
||||
REDASH_TARBALL=/tmp/redash.tar.gz
|
||||
REDASH_TARBALL=/tmp/redash.tar.gz
|
||||
|
||||
if [ ! -d "$VERSION_DIR" ]; then
|
||||
sudo -u redash wget $LATEST_URL -O $REDASH_TARBALL
|
||||
sudo -u redash mkdir $VERSION_DIR
|
||||
sudo -u redash tar -C $VERSION_DIR -xvf $REDASH_TARBALL
|
||||
ln -nfs $VERSION_DIR /opt/redash/current
|
||||
ln -nfs /opt/redash/.env /opt/redash/current/.env
|
||||
|
||||
cd /opt/redash/current
|
||||
|
||||
# TODO: venv?
|
||||
pip install -r requirements.txt
|
||||
fi
|
||||
|
||||
# Create database / tables
|
||||
pg_user_exists=0
|
||||
sudo -u postgres psql postgres -tAc "SELECT 1 FROM pg_roles WHERE rolname='redash'" | grep -q 1 || pg_user_exists=$?
|
||||
if [ $pg_user_exists -ne 0 ]; then
|
||||
echo "Creating redash postgres user & database."
|
||||
sudo -u postgres createuser redash --no-superuser --no-createdb --no-createrole
|
||||
sudo -u postgres createdb redash --owner=redash
|
||||
|
||||
cd /opt/redash/current
|
||||
sudo -u redash bin/run ./manage.py database create_tables
|
||||
fi
|
||||
|
||||
# Create default admin user
|
||||
cd /opt/redash/current
|
||||
# TODO: make sure user created only once
|
||||
# TODO: generate temp password and print to screen
|
||||
sudo -u redash bin/run ./manage.py users create --admin --password admin "Admin" "admin"
|
||||
|
||||
# Create re:dash read only pg user & setup data source
|
||||
pg_user_exists=0
|
||||
sudo -u postgres psql postgres -tAc "SELECT 1 FROM pg_roles WHERE rolname='redash_reader'" | grep -q 1 || pg_user_exists=$?
|
||||
if [ $pg_user_exists -ne 0 ]; then
|
||||
echo "Creating redash reader postgres user."
|
||||
REDASH_READER_PASSWORD=$(pwgen -1)
|
||||
sudo -u postgres psql -c "CREATE ROLE redash_reader WITH PASSWORD '$REDASH_READER_PASSWORD' NOCREATEROLE NOCREATEDB NOSUPERUSER LOGIN"
|
||||
sudo -u redash psql -c "grant select(id,name,type) ON data_sources to redash_reader;" redash
|
||||
sudo -u redash psql -c "grant select on activity_log, events, queries, dashboards, widgets, visualizations, query_results to redash_reader;" redash
|
||||
|
||||
cd /opt/redash/current
|
||||
sudo -u redash bin/run ./manage.py ds new "re:dash metadata" "pg" "user=redash_reader password=$REDASH_READER_PASSWORD host=localhost dbname=redash"
|
||||
fi
|
||||
|
||||
# BigQuery dependencies:
|
||||
apt-get install -y libffi-dev libssl-dev
|
||||
pip install google-api-python-client==1.2 pyOpenSSL==0.14 oauth2client==1.2
|
||||
|
||||
# MySQL dependencies:
|
||||
apt-get install -y libmysqlclient-dev
|
||||
pip install MySQL-python==1.2.5
|
||||
|
||||
# Mongo dependencies:
|
||||
pip install pymongo==2.7.2
|
||||
|
||||
# Setup supervisord + sysv init startup script
|
||||
sudo -u redash mkdir -p /opt/redash/supervisord
|
||||
pip install supervisor==3.1.2 # TODO: move to requirements.txt
|
||||
|
||||
# Get supervisord startup script
|
||||
sudo -u redash wget -O /opt/redash/supervisord/supervisord.conf $FILES_BASE_URL"supervisord.conf"
|
||||
|
||||
wget -O /etc/init.d/redash_supervisord $FILES_BASE_URL"redash_supervisord_init"
|
||||
add_service "redash_supervisord"
|
||||
|
||||
# Nginx setup
|
||||
rm /etc/nginx/sites-enabled/default
|
||||
wget -O /etc/nginx/sites-available/redash $FILES_BASE_URL"nginx_redash_site"
|
||||
ln -nfs /etc/nginx/sites-available/redash /etc/nginx/sites-enabled/redash
|
||||
service nginx restart
|
||||
9
setup/files/env
Normal file
9
setup/files/env
Normal file
@@ -0,0 +1,9 @@
|
||||
export REDASH_CONNECTION_ADAPTER=pg
|
||||
export REDASH_CONNECTION_STRING="dbname=redash"
|
||||
export REDASH_STATIC_ASSETS_PATH="../rd_ui/dist/"
|
||||
export REDASH_LOG_LEVEL="INFO"
|
||||
export REDASH_WORKERS_COUNT=6
|
||||
export REDASH_REDIS_URL=redis://localhost:6379/1
|
||||
export REDASH_DATABASE_URL="postgresql://redash"
|
||||
export REDASH_COOKIE_SECRET=veryverysecret
|
||||
export REDASH_GOOGLE_APPS_DOMAIN=
|
||||
20
setup/files/nginx_redash_site
Normal file
20
setup/files/nginx_redash_site
Normal file
@@ -0,0 +1,20 @@
|
||||
upstream rd_servers {
|
||||
server 127.0.0.1:5000;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 80 default;
|
||||
|
||||
access_log /var/log/nginx/rd.access.log;
|
||||
|
||||
gzip on;
|
||||
gzip_types *;
|
||||
gzip_proxied any;
|
||||
|
||||
location / {
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_pass http://rd_servers;
|
||||
}
|
||||
}
|
||||
162
setup/files/postgres_apt.sh
Normal file
162
setup/files/postgres_apt.sh
Normal file
@@ -0,0 +1,162 @@
|
||||
#!/bin/sh
|
||||
|
||||
# script to add apt.postgresql.org to sources.list
|
||||
|
||||
# from command line
|
||||
CODENAME="$1"
|
||||
# lsb_release is the best interface, but not always available
|
||||
if [ -z "$CODENAME" ]; then
|
||||
CODENAME=$(lsb_release -cs 2>/dev/null)
|
||||
fi
|
||||
# parse os-release (unreliable, does not work on Ubuntu)
|
||||
if [ -z "$CODENAME" -a -f /etc/os-release ]; then
|
||||
. /etc/os-release
|
||||
# Debian: VERSION="7.0 (wheezy)"
|
||||
# Ubuntu: VERSION="13.04, Raring Ringtail"
|
||||
CODENAME=$(echo $VERSION | sed -ne 's/.*(\(.*\)).*/\1/')
|
||||
fi
|
||||
# guess from sources.list
|
||||
if [ -z "$CODENAME" ]; then
|
||||
CODENAME=$(grep '^deb ' /etc/apt/sources.list | head -n1 | awk '{ print $3 }')
|
||||
fi
|
||||
# complain if no result yet
|
||||
if [ -z "$CODENAME" ]; then
|
||||
cat <<EOF
|
||||
Could not determine the distribution codename. Please report this as a bug to
|
||||
pgsql-pkg-debian@postgresql.org. As a workaround, you can call this script with
|
||||
the proper codename as parameter, e.g. "$0 squeeze".
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# errors are non-fatal above
|
||||
set -e
|
||||
|
||||
cat <<EOF
|
||||
This script will enable the PostgreSQL APT repository on apt.postgresql.org on
|
||||
your system. The distribution codename used will be $CODENAME-pgdg.
|
||||
|
||||
EOF
|
||||
|
||||
case $CODENAME in
|
||||
# known distributions
|
||||
sid|wheezy|squeeze|lenny|etch) ;;
|
||||
precise|lucid) ;;
|
||||
*) # unknown distribution, verify on the web
|
||||
DISTURL="http://apt.postgresql.org/pub/repos/apt/dists/"
|
||||
if [ -x /usr/bin/curl ]; then
|
||||
DISTHTML=$(curl -s $DISTURL)
|
||||
elif [ -x /usr/bin/wget ]; then
|
||||
DISTHTML=$(wget --quiet -O - $DISTURL)
|
||||
fi
|
||||
if [ "$DISTHTML" ]; then
|
||||
if ! echo "$DISTHTML" | grep -q "$CODENAME-pgdg"; then
|
||||
cat <<EOF
|
||||
Your system is using the distribution codename $CODENAME, but $CODENAME-pgdg
|
||||
does not seem to be a valid distribution on
|
||||
$DISTURL
|
||||
|
||||
We abort the installation here. If you want to use a distribution different
|
||||
from your system, you can call this script with an explicit codename, e.g.
|
||||
"$0 precise".
|
||||
|
||||
Specifically, if you are using a non-LTS Ubuntu release, refer to
|
||||
https://wiki.postgresql.org/wiki/Apt/FAQ#I_am_using_a_non-LTS_release_of_Ubuntu
|
||||
|
||||
For more information, refer to https://wiki.postgresql.org/wiki/Apt
|
||||
or ask on the mailing list for assistance: pgsql-pkg-debian@postgresql.org
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "Writing /etc/apt/sources.list.d/pgdg.list ..."
|
||||
cat > /etc/apt/sources.list.d/pgdg.list <<EOF
|
||||
deb http://apt.postgresql.org/pub/repos/apt/ $CODENAME-pgdg main
|
||||
#deb-src http://apt.postgresql.org/pub/repos/apt/ $CODENAME-pgdg main
|
||||
EOF
|
||||
|
||||
echo "Importing repository signing key ..."
|
||||
KEYRING="/etc/apt/trusted.gpg.d/apt.postgresql.org.gpg"
|
||||
test -e $KEYRING || touch $KEYRING
|
||||
apt-key --keyring $KEYRING add - <<EOF
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
Version: GnuPG v1
|
||||
|
||||
mQINBE6XR8IBEACVdDKT2HEH1IyHzXkb4nIWAY7echjRxo7MTcj4vbXAyBKOfjja
|
||||
UrBEJWHN6fjKJXOYWXHLIYg0hOGeW9qcSiaa1/rYIbOzjfGfhE4x0Y+NJHS1db0V
|
||||
G6GUj3qXaeyqIJGS2z7m0Thy4Lgr/LpZlZ78Nf1fliSzBlMo1sV7PpP/7zUO+aA4
|
||||
bKa8Rio3weMXQOZgclzgeSdqtwKnyKTQdXY5MkH1QXyFIk1nTfWwyqpJjHlgtwMi
|
||||
c2cxjqG5nnV9rIYlTTjYG6RBglq0SmzF/raBnF4Lwjxq4qRqvRllBXdFu5+2pMfC
|
||||
IZ10HPRdqDCTN60DUix+BTzBUT30NzaLhZbOMT5RvQtvTVgWpeIn20i2NrPWNCUh
|
||||
hj490dKDLpK/v+A5/i8zPvN4c6MkDHi1FZfaoz3863dylUBR3Ip26oM0hHXf4/2U
|
||||
A/oA4pCl2W0hc4aNtozjKHkVjRx5Q8/hVYu+39csFWxo6YSB/KgIEw+0W8DiTII3
|
||||
RQj/OlD68ZDmGLyQPiJvaEtY9fDrcSpI0Esm0i4sjkNbuuh0Cvwwwqo5EF1zfkVj
|
||||
Tqz2REYQGMJGc5LUbIpk5sMHo1HWV038TWxlDRwtOdzw08zQA6BeWe9FOokRPeR2
|
||||
AqhyaJJwOZJodKZ76S+LDwFkTLzEKnYPCzkoRwLrEdNt1M7wQBThnC5z6wARAQAB
|
||||
tBxQb3N0Z3JlU1FMIERlYmlhbiBSZXBvc2l0b3J5iQI9BBMBCAAnAhsDBQsJCAcD
|
||||
BRUKCQgLBRYCAwEAAh4BAheABQJS6RUZBQkOhCctAAoJEH/MfUaszEz4zmQP/2ad
|
||||
HtuaXL5Xu3C3NGLha/aQb9iSJC8z5vN55HMCpsWlmslCBuEr+qR+oZvPkvwh0Io/
|
||||
8hQl/qN54DMNifRwVL2n2eG52yNERie9BrAMK2kNFZZCH4OxlMN0876BmDuNq2U6
|
||||
7vUtCv+pxT+g9R1LvlPgLCTjS3m+qMqUICJ310BMT2cpYlJx3YqXouFkdWBVurI0
|
||||
pGU/+QtydcJALz5eZbzlbYSPWbOm2ZSS2cLrCsVNFDOAbYLtUn955yXB5s4rIscE
|
||||
vTzBxPgID1iBknnPzdu2tCpk07yJleiupxI1yXstCtvhGCbiAbGFDaKzhgcAxSIX
|
||||
0ZPahpaYLdCkcoLlfgD+ar4K8veSK2LazrhO99O0onRG0p7zuXszXphO4E/WdbTO
|
||||
yDD35qCqYeAX6TaB+2l4kIdVqPgoXT/doWVLUK2NjZtd3JpMWI0OGYDFn2DAvgwP
|
||||
xqKEoGTOYuoWKssnwLlA/ZMETegak27gFAKfoQlmHjeA/PLC2KRYd6Wg2DSifhn+
|
||||
2MouoE4XFfeekVBQx98rOQ5NLwy/TYlsHXm1n0RW86ETN3chj/PPWjsi80t5oepx
|
||||
82azRoVu95LJUkHpPLYyqwfueoVzp2+B2hJU2Rg7w+cJq64TfeJG8hrc93MnSKIb
|
||||
zTvXfdPtvYdHhhA2LYu4+5mh5ASlAMJXD7zIOZt2iEYEEBEIAAYFAk6XSO4ACgkQ
|
||||
xa93SlhRC1qmjwCg9U7U+XN7Gc/dhY/eymJqmzUGT/gAn0guvoX75Y+BsZlI6dWn
|
||||
qaFU6N8HiQIcBBABCAAGBQJOl0kLAAoJEExaa6sS0qeuBfEP/3AnLrcKx+dFKERX
|
||||
o4NBCGWr+i1CnowupKS3rm2xLbmiB969szG5TxnOIvnjECqPz6skK3HkV3jTZaju
|
||||
v3sR6M2ItpnrncWuiLnYcCSDp9TEMpCWzTEgtrBlKdVuTNTeRGILeIcvqoZX5w+u
|
||||
i0eBvvbeRbHEyUsvOEnYjrqoAjqUJj5FUZtR1+V9fnZp8zDgpOSxx0LomnFdKnhj
|
||||
uyXAQlRCA6/roVNR9ruRjxTR5ubteZ9ubTsVYr2/eMYOjQ46LhAgR+3Alblu/WHB
|
||||
MR/9F9//RuOa43R5Sjx9TiFCYol+Ozk8XRt3QGweEH51YkSYY3oRbHBb2Fkql6N6
|
||||
YFqlLBL7/aiWnNmRDEs/cdpo9HpFsbjOv4RlsSXQfvvfOayHpT5nO1UQFzoyMVpJ
|
||||
615zwmQDJT5Qy7uvr2eQYRV9AXt8t/H+xjQsRZCc5YVmeAo91qIzI/tA2gtXik49
|
||||
6yeziZbfUvcZzuzjjxFExss4DSAwMgorvBeIbiz2k2qXukbqcTjB2XqAlZasd6Ll
|
||||
nLXpQdqDV3McYkP/MvttWh3w+J/woiBcA7yEI5e3YJk97uS6+ssbqLEd0CcdT+qz
|
||||
+Waw0z/ZIU99Lfh2Qm77OT6vr//Zulw5ovjZVO2boRIcve7S97gQ4KC+G/+QaRS+
|
||||
VPZ67j5UMxqtT/Y4+NHcQGgwF/1iiQI9BBMBCAAnAhsDBQsJCAcDBRUKCQgLBRYC
|
||||
AwEAAh4BAheABQJQeSssBQkDwxbfAAoJEH/MfUaszEz4bgkP/0AI0UgDgkNNqplA
|
||||
IpE/pkwem2jgGpJGKurh2xDu6j2ZL+BPzPhzyCeMHZwTXkkI373TXGQQP8dIa+RD
|
||||
HAZ3iijw4+ISdKWpziEUJjUk04UMPTlN+dYJt2EHLQDD0VLtX0yQC/wLmVEH/REp
|
||||
oclbVjZR/+ehwX2IxOIlXmkZJDSycl975FnSUjMAvyzty8P9DN0fIrQ7Ju+BfMOM
|
||||
TnUkOdp0kRUYez7pxbURJfkM0NxAP1geACI91aISBpFg3zxQs1d3MmUIhJ4wHvYB
|
||||
uaR7Fx1FkLAxWddre/OCYJBsjucE9uqc04rgKVjN5P/VfqNxyUoB+YZ+8Lk4t03p
|
||||
RBcD9XzcyOYlFLWXbcWxTn1jJ2QMqRIWi5lzZIOMw5B+OK9LLPX0dAwIFGr9WtuV
|
||||
J2zp+D4CBEMtn4Byh8EaQsttHeqAkpZoMlrEeNBDz2L7RquPQNmiuom15nb7xU/k
|
||||
7PGfqtkpBaaGBV9tJkdp7BdH27dZXx+uT+uHbpMXkRrXliHjWpAw+NGwADh/Pjmq
|
||||
ExlQSdgAiXy1TTOdzxKH7WrwMFGDK0fddKr8GH3f+Oq4eOoNRa6/UhTCmBPbryCS
|
||||
IA7EAd0Aae9YaLlOB+eTORg/F1EWLPm34kKSRtae3gfHuY2cdUmoDVnOF8C9hc0P
|
||||
bL65G4NWPt+fW7lIj+0+kF19s2PviQI9BBMBCAAnAhsDBQsJCAcDBRUKCQgLBRYC
|
||||
AwEAAh4BAheABQJRKm2VBQkINsBBAAoJEH/MfUaszEz4RTEP/1sQHyjHaUiAPaCA
|
||||
v8jw/3SaWP/g8qLjpY6ROjLnDMvwKwRAoxUwcIv4/TWDOMpwJN+CJIbjXsXNYvf9
|
||||
OX+UTOvq4iwi4ADrAAw2xw+Jomc6EsYla+hkN2FzGzhpXfZFfUsuphjY3FKL+4hX
|
||||
H+R8ucNwIz3yrkfc17MMn8yFNWFzm4omU9/JeeaafwUoLxlULL2zY7H3+QmxCl0u
|
||||
6t8VvlszdEFhemLHzVYRY0Ro/ISrR78CnANNsMIy3i11U5uvdeWVCoWV1BXNLzOD
|
||||
4+BIDbMB/Do8PQCWiliSGZi8lvmj/sKbumMFQonMQWOfQswTtqTyQ3yhUM1LaxK5
|
||||
PYq13rggi3rA8oq8SYb/KNCQL5pzACji4TRVK0kNpvtxJxe84X8+9IB1vhBvF/Ji
|
||||
/xDd/3VDNPY+k1a47cON0S8Qc8DA3mq4hRfcgvuWy7ZxoMY7AfSJOhleb9+PzRBB
|
||||
n9agYgMxZg1RUWZazQ5KuoJqbxpwOYVFja/stItNS4xsmi0lh2I4MNlBEDqnFLUx
|
||||
SvTDc22c3uJlWhzBM/f2jH19uUeqm4jaggob3iJvJmK+Q7Ns3WcfhuWwCnc1+58d
|
||||
iFAMRUCRBPeFS0qd56QGk1r97B6+3UfLUslCfaaA8IMOFvQSHJwDO87xWGyxeRTY
|
||||
IIP9up4xwgje9LB7fMxsSkCDTHOk
|
||||
=s3DI
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
EOF
|
||||
|
||||
echo "Running apt-get update ..."
|
||||
apt-get update
|
||||
|
||||
cat <<EOF
|
||||
|
||||
You can now start installing packages from apt.postgresql.org.
|
||||
|
||||
Have a look at https://wiki.postgresql.org/wiki/Apt for more information;
|
||||
most notably the FAQ at https://wiki.postgresql.org/wiki/Apt/FAQ
|
||||
EOF
|
||||
129
setup/files/redash_supervisord_init
Normal file
129
setup/files/redash_supervisord_init
Normal file
@@ -0,0 +1,129 @@
|
||||
#!/bin/sh
|
||||
# /etc/init.d/redash_supervisord
|
||||
### BEGIN INIT INFO
|
||||
# Provides: supervisord
|
||||
# Required-Start: $remote_fs $syslog
|
||||
# Required-Stop: $remote_fs $syslog
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: process supervisor
|
||||
### END INIT INFO
|
||||
|
||||
# Author: Ron DuPlain <ron.duplain@gmail.com>
|
||||
|
||||
# Do NOT "set -e"
|
||||
|
||||
# PATH should only include /usr/* if it runs after the mountnfs.sh script
|
||||
PATH=/sbin:/usr/sbin:/usr/local/sbin:/bin:/usr/bin:/usr/local/bin
|
||||
NAME=supervisord
|
||||
DESC="process supervisor"
|
||||
DAEMON=/usr/local/bin/$NAME
|
||||
DAEMON_ARGS="--configuration /opt/redash/supervisord/supervisord.conf "
|
||||
PIDFILE=/opt/redash/supervisord/supervisord.pid
|
||||
SCRIPTNAME=/etc/init.d/redash_supervisord
|
||||
USER=redash
|
||||
|
||||
# Exit if the package is not installed
|
||||
[ -x "$DAEMON" ] || exit 0
|
||||
|
||||
# Read configuration variable file if it is present
|
||||
[ -r /etc/default/$NAME ] && . /etc/default/$NAME
|
||||
|
||||
# Load the VERBOSE setting and other rcS variables
|
||||
. /lib/init/vars.sh
|
||||
|
||||
# Define LSB log_* functions.
|
||||
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
|
||||
# and status_of_proc is working.
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
#
|
||||
# Function that starts the daemon/service
|
||||
#
|
||||
do_start()
|
||||
{
|
||||
# Return
|
||||
# 0 if daemon has been started
|
||||
# 1 if daemon was already running
|
||||
# 2 if daemon could not be started
|
||||
start-stop-daemon --start --quiet --pidfile $PIDFILE --user $USER --chuid $USER --exec $DAEMON --test > /dev/null \
|
||||
|| return 1
|
||||
start-stop-daemon --start --quiet --pidfile $PIDFILE --user $USER --chuid $USER --exec $DAEMON -- \
|
||||
$DAEMON_ARGS \
|
||||
|| return 2
|
||||
# Add code here, if necessary, that waits for the process to be ready
|
||||
# to handle requests from services started subsequently which depend
|
||||
# on this one. As a last resort, sleep for some time.
|
||||
}
|
||||
|
||||
#
|
||||
# Function that stops the daemon/service
|
||||
#
|
||||
do_stop()
|
||||
{
|
||||
# Return
|
||||
# 0 if daemon has been stopped
|
||||
# 1 if daemon was already stopped
|
||||
# 2 if daemon could not be stopped
|
||||
# other if a failure occurred
|
||||
start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --user $USER --chuid $USER --name $NAME
|
||||
RETVAL="$?"
|
||||
[ "$RETVAL" = 2 ] && return 2
|
||||
# Wait for children to finish too if this is a daemon that forks
|
||||
# and if the daemon is only ever run from this initscript.
|
||||
# If the above conditions are not satisfied then add some other code
|
||||
# that waits for the process to drop all resources that could be
|
||||
# needed by services started subsequently. A last resort is to
|
||||
# sleep for some time.
|
||||
start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 --user $USER --chuid $USER --exec $DAEMON
|
||||
[ "$?" = 2 ] && return 2
|
||||
# Many daemons don't delete their pidfiles when they exit.
|
||||
rm -f $PIDFILE
|
||||
return "$RETVAL"
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
[ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
|
||||
do_start
|
||||
case "$?" in
|
||||
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
|
||||
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
|
||||
esac
|
||||
;;
|
||||
stop)
|
||||
[ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
|
||||
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
|
||||
esac
|
||||
;;
|
||||
status)
|
||||
status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
|
||||
;;
|
||||
restart)
|
||||
log_daemon_msg "Restarting $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1)
|
||||
do_start
|
||||
case "$?" in
|
||||
0) log_end_msg 0 ;;
|
||||
1) log_end_msg 1 ;; # Old process is still running
|
||||
*) log_end_msg 1 ;; # Failed to start
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
# Failed to stop
|
||||
log_end_msg 1
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $SCRIPTNAME {start|stop|status|restart}" >&2
|
||||
exit 3
|
||||
;;
|
||||
esac
|
||||
|
||||
:
|
||||
785
setup/files/redis.conf
Normal file
785
setup/files/redis.conf
Normal file
@@ -0,0 +1,785 @@
|
||||
## Generated by install_server.sh ##
|
||||
# Redis configuration file example
|
||||
|
||||
# Note on units: when memory size is needed, it is possible to specify
|
||||
# it in the usual form of 1k 5GB 4M and so forth:
|
||||
#
|
||||
# 1k => 1000 bytes
|
||||
# 1kb => 1024 bytes
|
||||
# 1m => 1000000 bytes
|
||||
# 1mb => 1024*1024 bytes
|
||||
# 1g => 1000000000 bytes
|
||||
# 1gb => 1024*1024*1024 bytes
|
||||
#
|
||||
# units are case insensitive so 1GB 1Gb 1gB are all the same.
|
||||
|
||||
################################## INCLUDES ###################################
|
||||
|
||||
# Include one or more other config files here. This is useful if you
|
||||
# have a standard template that goes to all Redis server but also need
|
||||
# to customize a few per-server settings. Include files can include
|
||||
# other files, so use this wisely.
|
||||
#
|
||||
# Notice option "include" won't be rewritten by command "CONFIG REWRITE"
|
||||
# from admin or Redis Sentinel. Since Redis always uses the last processed
|
||||
# line as value of a configuration directive, you'd better put includes
|
||||
# at the beginning of this file to avoid overwriting config change at runtime.
|
||||
#
|
||||
# If instead you are interested in using includes to override configuration
|
||||
# options, it is better to use include as the last line.
|
||||
#
|
||||
# include /path/to/local.conf
|
||||
# include /path/to/other.conf
|
||||
|
||||
################################ GENERAL #####################################
|
||||
|
||||
# By default Redis does not run as a daemon. Use 'yes' if you need it.
|
||||
# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
|
||||
daemonize yes
|
||||
|
||||
# When running daemonized, Redis writes a pid file in /var/run/redis.pid by
|
||||
# default. You can specify a custom pid file location here.
|
||||
pidfile /var/run/redis_6379.pid
|
||||
|
||||
# Accept connections on the specified port, default is 6379.
|
||||
# If port 0 is specified Redis will not listen on a TCP socket.
|
||||
port 6379
|
||||
|
||||
# TCP listen() backlog.
|
||||
#
|
||||
# In high requests-per-second environments you need an high backlog in order
|
||||
# to avoid slow clients connections issues. Note that the Linux kernel
|
||||
# will silently truncate it to the value of /proc/sys/net/core/somaxconn so
|
||||
# make sure to raise both the value of somaxconn and tcp_max_syn_backlog
|
||||
# in order to get the desired effect.
|
||||
tcp-backlog 511
|
||||
|
||||
# By default Redis listens for connections from all the network interfaces
|
||||
# available on the server. It is possible to listen to just one or multiple
|
||||
# interfaces using the "bind" configuration directive, followed by one or
|
||||
# more IP addresses.
|
||||
#
|
||||
# Examples:
|
||||
#
|
||||
# bind 192.168.1.100 10.0.0.1
|
||||
# bind 127.0.0.1
|
||||
|
||||
# Specify the path for the Unix socket that will be used to listen for
|
||||
# incoming connections. There is no default, so Redis will not listen
|
||||
# on a unix socket when not specified.
|
||||
#
|
||||
# unixsocket /tmp/redis.sock
|
||||
# unixsocketperm 700
|
||||
|
||||
# Close the connection after a client is idle for N seconds (0 to disable)
|
||||
timeout 0
|
||||
|
||||
# TCP keepalive.
|
||||
#
|
||||
# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
|
||||
# of communication. This is useful for two reasons:
|
||||
#
|
||||
# 1) Detect dead peers.
|
||||
# 2) Take the connection alive from the point of view of network
|
||||
# equipment in the middle.
|
||||
#
|
||||
# On Linux, the specified value (in seconds) is the period used to send ACKs.
|
||||
# Note that to close the connection the double of the time is needed.
|
||||
# On other kernels the period depends on the kernel configuration.
|
||||
#
|
||||
# A reasonable value for this option is 60 seconds.
|
||||
tcp-keepalive 0
|
||||
|
||||
# Specify the server verbosity level.
|
||||
# This can be one of:
|
||||
# debug (a lot of information, useful for development/testing)
|
||||
# verbose (many rarely useful info, but not a mess like the debug level)
|
||||
# notice (moderately verbose, what you want in production probably)
|
||||
# warning (only very important / critical messages are logged)
|
||||
loglevel notice
|
||||
|
||||
# Specify the log file name. Also the empty string can be used to force
|
||||
# Redis to log on the standard output. Note that if you use standard
|
||||
# output for logging but daemonize, logs will be sent to /dev/null
|
||||
logfile /var/log/redis_6379.log
|
||||
|
||||
# To enable logging to the system logger, just set 'syslog-enabled' to yes,
|
||||
# and optionally update the other syslog parameters to suit your needs.
|
||||
# syslog-enabled no
|
||||
|
||||
# Specify the syslog identity.
|
||||
# syslog-ident redis
|
||||
|
||||
# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
|
||||
# syslog-facility local0
|
||||
|
||||
# Set the number of databases. The default database is DB 0, you can select
|
||||
# a different one on a per-connection basis using SELECT <dbid> where
|
||||
# dbid is a number between 0 and 'databases'-1
|
||||
databases 16
|
||||
|
||||
################################ SNAPSHOTTING ################################
|
||||
#
|
||||
# Save the DB on disk:
|
||||
#
|
||||
# save <seconds> <changes>
|
||||
#
|
||||
# Will save the DB if both the given number of seconds and the given
|
||||
# number of write operations against the DB occurred.
|
||||
#
|
||||
# In the example below the behaviour will be to save:
|
||||
# after 900 sec (15 min) if at least 1 key changed
|
||||
# after 300 sec (5 min) if at least 10 keys changed
|
||||
# after 60 sec if at least 10000 keys changed
|
||||
#
|
||||
# Note: you can disable saving at all commenting all the "save" lines.
|
||||
#
|
||||
# It is also possible to remove all the previously configured save
|
||||
# points by adding a save directive with a single empty string argument
|
||||
# like in the following example:
|
||||
#
|
||||
# save ""
|
||||
|
||||
save 900 1
|
||||
save 300 10
|
||||
save 60 10000
|
||||
|
||||
# By default Redis will stop accepting writes if RDB snapshots are enabled
|
||||
# (at least one save point) and the latest background save failed.
|
||||
# This will make the user aware (in a hard way) that data is not persisting
|
||||
# on disk properly, otherwise chances are that no one will notice and some
|
||||
# disaster will happen.
|
||||
#
|
||||
# If the background saving process will start working again Redis will
|
||||
# automatically allow writes again.
|
||||
#
|
||||
# However if you have setup your proper monitoring of the Redis server
|
||||
# and persistence, you may want to disable this feature so that Redis will
|
||||
# continue to work as usual even if there are problems with disk,
|
||||
# permissions, and so forth.
|
||||
stop-writes-on-bgsave-error yes
|
||||
|
||||
# Compress string objects using LZF when dump .rdb databases?
|
||||
# For default that's set to 'yes' as it's almost always a win.
|
||||
# If you want to save some CPU in the saving child set it to 'no' but
|
||||
# the dataset will likely be bigger if you have compressible values or keys.
|
||||
rdbcompression yes
|
||||
|
||||
# Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
|
||||
# This makes the format more resistant to corruption but there is a performance
|
||||
# hit to pay (around 10%) when saving and loading RDB files, so you can disable it
|
||||
# for maximum performances.
|
||||
#
|
||||
# RDB files created with checksum disabled have a checksum of zero that will
|
||||
# tell the loading code to skip the check.
|
||||
rdbchecksum yes
|
||||
|
||||
# The filename where to dump the DB
|
||||
dbfilename dump.rdb
|
||||
|
||||
# The working directory.
|
||||
#
|
||||
# The DB will be written inside this directory, with the filename specified
|
||||
# above using the 'dbfilename' configuration directive.
|
||||
#
|
||||
# The Append Only File will also be created inside this directory.
|
||||
#
|
||||
# Note that you must specify a directory here, not a file name.
|
||||
dir /var/lib/redis/6379
|
||||
|
||||
################################# REPLICATION #################################
|
||||
|
||||
# Master-Slave replication. Use slaveof to make a Redis instance a copy of
|
||||
# another Redis server. A few things to understand ASAP about Redis replication.
|
||||
#
|
||||
# 1) Redis replication is asynchronous, but you can configure a master to
|
||||
# stop accepting writes if it appears to be not connected with at least
|
||||
# a given number of slaves.
|
||||
# 2) Redis slaves are able to perform a partial resynchronization with the
|
||||
# master if the replication link is lost for a relatively small amount of
|
||||
# time. You may want to configure the replication backlog size (see the next
|
||||
# sections of this file) with a sensible value depending on your needs.
|
||||
# 3) Replication is automatic and does not need user intervention. After a
|
||||
# network partition slaves automatically try to reconnect to masters
|
||||
# and resynchronize with them.
|
||||
#
|
||||
# slaveof <masterip> <masterport>
|
||||
|
||||
# If the master is password protected (using the "requirepass" configuration
|
||||
# directive below) it is possible to tell the slave to authenticate before
|
||||
# starting the replication synchronization process, otherwise the master will
|
||||
# refuse the slave request.
|
||||
#
|
||||
# masterauth <master-password>
|
||||
|
||||
# When a slave loses its connection with the master, or when the replication
|
||||
# is still in progress, the slave can act in two different ways:
|
||||
#
|
||||
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
|
||||
# still reply to client requests, possibly with out of date data, or the
|
||||
# data set may just be empty if this is the first synchronization.
|
||||
#
|
||||
# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
|
||||
# an error "SYNC with master in progress" to all the kind of commands
|
||||
# but to INFO and SLAVEOF.
|
||||
#
|
||||
slave-serve-stale-data yes
|
||||
|
||||
# You can configure a slave instance to accept writes or not. Writing against
|
||||
# a slave instance may be useful to store some ephemeral data (because data
|
||||
# written on a slave will be easily deleted after resync with the master) but
|
||||
# may also cause problems if clients are writing to it because of a
|
||||
# misconfiguration.
|
||||
#
|
||||
# Since Redis 2.6 by default slaves are read-only.
|
||||
#
|
||||
# Note: read only slaves are not designed to be exposed to untrusted clients
|
||||
# on the internet. It's just a protection layer against misuse of the instance.
|
||||
# Still a read only slave exports by default all the administrative commands
|
||||
# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
|
||||
# security of read only slaves using 'rename-command' to shadow all the
|
||||
# administrative / dangerous commands.
|
||||
slave-read-only yes
|
||||
|
||||
# Slaves send PINGs to server in a predefined interval. It's possible to change
|
||||
# this interval with the repl_ping_slave_period option. The default value is 10
|
||||
# seconds.
|
||||
#
|
||||
# repl-ping-slave-period 10
|
||||
|
||||
# The following option sets the replication timeout for:
|
||||
#
|
||||
# 1) Bulk transfer I/O during SYNC, from the point of view of slave.
|
||||
# 2) Master timeout from the point of view of slaves (data, pings).
|
||||
# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings).
|
||||
#
|
||||
# It is important to make sure that this value is greater than the value
|
||||
# specified for repl-ping-slave-period otherwise a timeout will be detected
|
||||
# every time there is low traffic between the master and the slave.
|
||||
#
|
||||
# repl-timeout 60
|
||||
|
||||
# Disable TCP_NODELAY on the slave socket after SYNC?
|
||||
#
|
||||
# If you select "yes" Redis will use a smaller number of TCP packets and
|
||||
# less bandwidth to send data to slaves. But this can add a delay for
|
||||
# the data to appear on the slave side, up to 40 milliseconds with
|
||||
# Linux kernels using a default configuration.
|
||||
#
|
||||
# If you select "no" the delay for data to appear on the slave side will
|
||||
# be reduced but more bandwidth will be used for replication.
|
||||
#
|
||||
# By default we optimize for low latency, but in very high traffic conditions
|
||||
# or when the master and slaves are many hops away, turning this to "yes" may
|
||||
# be a good idea.
|
||||
repl-disable-tcp-nodelay no
|
||||
|
||||
# Set the replication backlog size. The backlog is a buffer that accumulates
|
||||
# slave data when slaves are disconnected for some time, so that when a slave
|
||||
# wants to reconnect again, often a full resync is not needed, but a partial
|
||||
# resync is enough, just passing the portion of data the slave missed while
|
||||
# disconnected.
|
||||
#
|
||||
# The biggest the replication backlog, the longer the time the slave can be
|
||||
# disconnected and later be able to perform a partial resynchronization.
|
||||
#
|
||||
# The backlog is only allocated once there is at least a slave connected.
|
||||
#
|
||||
# repl-backlog-size 1mb
|
||||
|
||||
# After a master has no longer connected slaves for some time, the backlog
|
||||
# will be freed. The following option configures the amount of seconds that
|
||||
# need to elapse, starting from the time the last slave disconnected, for
|
||||
# the backlog buffer to be freed.
|
||||
#
|
||||
# A value of 0 means to never release the backlog.
|
||||
#
|
||||
# repl-backlog-ttl 3600
|
||||
|
||||
# The slave priority is an integer number published by Redis in the INFO output.
|
||||
# It is used by Redis Sentinel in order to select a slave to promote into a
|
||||
# master if the master is no longer working correctly.
|
||||
#
|
||||
# A slave with a low priority number is considered better for promotion, so
|
||||
# for instance if there are three slaves with priority 10, 100, 25 Sentinel will
|
||||
# pick the one with priority 10, that is the lowest.
|
||||
#
|
||||
# However a special priority of 0 marks the slave as not able to perform the
|
||||
# role of master, so a slave with priority of 0 will never be selected by
|
||||
# Redis Sentinel for promotion.
|
||||
#
|
||||
# By default the priority is 100.
|
||||
slave-priority 100
|
||||
|
||||
# It is possible for a master to stop accepting writes if there are less than
|
||||
# N slaves connected, having a lag less or equal than M seconds.
|
||||
#
|
||||
# The N slaves need to be in "online" state.
|
||||
#
|
||||
# The lag in seconds, that must be <= the specified value, is calculated from
|
||||
# the last ping received from the slave, that is usually sent every second.
|
||||
#
|
||||
# This option does not GUARANTEES that N replicas will accept the write, but
|
||||
# will limit the window of exposure for lost writes in case not enough slaves
|
||||
# are available, to the specified number of seconds.
|
||||
#
|
||||
# For example to require at least 3 slaves with a lag <= 10 seconds use:
|
||||
#
|
||||
# min-slaves-to-write 3
|
||||
# min-slaves-max-lag 10
|
||||
#
|
||||
# Setting one or the other to 0 disables the feature.
|
||||
#
|
||||
# By default min-slaves-to-write is set to 0 (feature disabled) and
|
||||
# min-slaves-max-lag is set to 10.
|
||||
|
||||
################################## SECURITY ###################################
|
||||
|
||||
# Require clients to issue AUTH <PASSWORD> before processing any other
|
||||
# commands. This might be useful in environments in which you do not trust
|
||||
# others with access to the host running redis-server.
|
||||
#
|
||||
# This should stay commented out for backward compatibility and because most
|
||||
# people do not need auth (e.g. they run their own servers).
|
||||
#
|
||||
# Warning: since Redis is pretty fast an outside user can try up to
|
||||
# 150k passwords per second against a good box. This means that you should
|
||||
# use a very strong password otherwise it will be very easy to break.
|
||||
#
|
||||
# requirepass foobared
|
||||
|
||||
# Command renaming.
|
||||
#
|
||||
# It is possible to change the name of dangerous commands in a shared
|
||||
# environment. For instance the CONFIG command may be renamed into something
|
||||
# hard to guess so that it will still be available for internal-use tools
|
||||
# but not available for general clients.
|
||||
#
|
||||
# Example:
|
||||
#
|
||||
# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
|
||||
#
|
||||
# It is also possible to completely kill a command by renaming it into
|
||||
# an empty string:
|
||||
#
|
||||
# rename-command CONFIG ""
|
||||
#
|
||||
# Please note that changing the name of commands that are logged into the
|
||||
# AOF file or transmitted to slaves may cause problems.
|
||||
|
||||
################################### LIMITS ####################################
|
||||
|
||||
# Set the max number of connected clients at the same time. By default
|
||||
# this limit is set to 10000 clients, however if the Redis server is not
|
||||
# able to configure the process file limit to allow for the specified limit
|
||||
# the max number of allowed clients is set to the current file limit
|
||||
# minus 32 (as Redis reserves a few file descriptors for internal uses).
|
||||
#
|
||||
# Once the limit is reached Redis will close all the new connections sending
|
||||
# an error 'max number of clients reached'.
|
||||
#
|
||||
# maxclients 10000
|
||||
|
||||
# Don't use more memory than the specified amount of bytes.
|
||||
# When the memory limit is reached Redis will try to remove keys
|
||||
# according to the eviction policy selected (see maxmemory-policy).
|
||||
#
|
||||
# If Redis can't remove keys according to the policy, or if the policy is
|
||||
# set to 'noeviction', Redis will start to reply with errors to commands
|
||||
# that would use more memory, like SET, LPUSH, and so on, and will continue
|
||||
# to reply to read-only commands like GET.
|
||||
#
|
||||
# This option is usually useful when using Redis as an LRU cache, or to set
|
||||
# a hard memory limit for an instance (using the 'noeviction' policy).
|
||||
#
|
||||
# WARNING: If you have slaves attached to an instance with maxmemory on,
|
||||
# the size of the output buffers needed to feed the slaves are subtracted
|
||||
# from the used memory count, so that network problems / resyncs will
|
||||
# not trigger a loop where keys are evicted, and in turn the output
|
||||
# buffer of slaves is full with DELs of keys evicted triggering the deletion
|
||||
# of more keys, and so forth until the database is completely emptied.
|
||||
#
|
||||
# In short... if you have slaves attached it is suggested that you set a lower
|
||||
# limit for maxmemory so that there is some free RAM on the system for slave
|
||||
# output buffers (but this is not needed if the policy is 'noeviction').
|
||||
#
|
||||
# maxmemory <bytes>
|
||||
|
||||
# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
|
||||
# is reached. You can select among five behaviors:
|
||||
#
|
||||
# volatile-lru -> remove the key with an expire set using an LRU algorithm
|
||||
# allkeys-lru -> remove any key accordingly to the LRU algorithm
|
||||
# volatile-random -> remove a random key with an expire set
|
||||
# allkeys-random -> remove a random key, any key
|
||||
# volatile-ttl -> remove the key with the nearest expire time (minor TTL)
|
||||
# noeviction -> don't expire at all, just return an error on write operations
|
||||
#
|
||||
# Note: with any of the above policies, Redis will return an error on write
|
||||
# operations, when there are not suitable keys for eviction.
|
||||
#
|
||||
# At the date of writing this commands are: set setnx setex append
|
||||
# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
|
||||
# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
|
||||
# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
|
||||
# getset mset msetnx exec sort
|
||||
#
|
||||
# The default is:
|
||||
#
|
||||
# maxmemory-policy volatile-lru
|
||||
|
||||
# LRU and minimal TTL algorithms are not precise algorithms but approximated
|
||||
# algorithms (in order to save memory), so you can select as well the sample
|
||||
# size to check. For instance for default Redis will check three keys and
|
||||
# pick the one that was used less recently, you can change the sample size
|
||||
# using the following configuration directive.
|
||||
#
|
||||
# maxmemory-samples 3
|
||||
|
||||
############################## APPEND ONLY MODE ###############################
|
||||
|
||||
# By default Redis asynchronously dumps the dataset on disk. This mode is
|
||||
# good enough in many applications, but an issue with the Redis process or
|
||||
# a power outage may result into a few minutes of writes lost (depending on
|
||||
# the configured save points).
|
||||
#
|
||||
# The Append Only File is an alternative persistence mode that provides
|
||||
# much better durability. For instance using the default data fsync policy
|
||||
# (see later in the config file) Redis can lose just one second of writes in a
|
||||
# dramatic event like a server power outage, or a single write if something
|
||||
# wrong with the Redis process itself happens, but the operating system is
|
||||
# still running correctly.
|
||||
#
|
||||
# AOF and RDB persistence can be enabled at the same time without problems.
|
||||
# If the AOF is enabled on startup Redis will load the AOF, that is the file
|
||||
# with the better durability guarantees.
|
||||
#
|
||||
# Please check http://redis.io/topics/persistence for more information.
|
||||
|
||||
appendonly no
|
||||
|
||||
# The name of the append only file (default: "appendonly.aof")
|
||||
|
||||
appendfilename "appendonly.aof"
|
||||
|
||||
# The fsync() call tells the Operating System to actually write data on disk
|
||||
# instead to wait for more data in the output buffer. Some OS will really flush
|
||||
# data on disk, some other OS will just try to do it ASAP.
|
||||
#
|
||||
# Redis supports three different modes:
|
||||
#
|
||||
# no: don't fsync, just let the OS flush the data when it wants. Faster.
|
||||
# always: fsync after every write to the append only log . Slow, Safest.
|
||||
# everysec: fsync only one time every second. Compromise.
|
||||
#
|
||||
# The default is "everysec", as that's usually the right compromise between
|
||||
# speed and data safety. It's up to you to understand if you can relax this to
|
||||
# "no" that will let the operating system flush the output buffer when
|
||||
# it wants, for better performances (but if you can live with the idea of
|
||||
# some data loss consider the default persistence mode that's snapshotting),
|
||||
# or on the contrary, use "always" that's very slow but a bit safer than
|
||||
# everysec.
|
||||
#
|
||||
# More details please check the following article:
|
||||
# http://antirez.com/post/redis-persistence-demystified.html
|
||||
#
|
||||
# If unsure, use "everysec".
|
||||
|
||||
# appendfsync always
|
||||
appendfsync everysec
|
||||
# appendfsync no
|
||||
|
||||
# When the AOF fsync policy is set to always or everysec, and a background
|
||||
# saving process (a background save or AOF log background rewriting) is
|
||||
# performing a lot of I/O against the disk, in some Linux configurations
|
||||
# Redis may block too long on the fsync() call. Note that there is no fix for
|
||||
# this currently, as even performing fsync in a different thread will block
|
||||
# our synchronous write(2) call.
|
||||
#
|
||||
# In order to mitigate this problem it's possible to use the following option
|
||||
# that will prevent fsync() from being called in the main process while a
|
||||
# BGSAVE or BGREWRITEAOF is in progress.
|
||||
#
|
||||
# This means that while another child is saving, the durability of Redis is
|
||||
# the same as "appendfsync none". In practical terms, this means that it is
|
||||
# possible to lose up to 30 seconds of log in the worst scenario (with the
|
||||
# default Linux settings).
|
||||
#
|
||||
# If you have latency problems turn this to "yes". Otherwise leave it as
|
||||
# "no" that is the safest pick from the point of view of durability.
|
||||
|
||||
no-appendfsync-on-rewrite no
|
||||
|
||||
# Automatic rewrite of the append only file.
|
||||
# Redis is able to automatically rewrite the log file implicitly calling
|
||||
# BGREWRITEAOF when the AOF log size grows by the specified percentage.
|
||||
#
|
||||
# This is how it works: Redis remembers the size of the AOF file after the
|
||||
# latest rewrite (if no rewrite has happened since the restart, the size of
|
||||
# the AOF at startup is used).
|
||||
#
|
||||
# This base size is compared to the current size. If the current size is
|
||||
# bigger than the specified percentage, the rewrite is triggered. Also
|
||||
# you need to specify a minimal size for the AOF file to be rewritten, this
|
||||
# is useful to avoid rewriting the AOF file even if the percentage increase
|
||||
# is reached but it is still pretty small.
|
||||
#
|
||||
# Specify a percentage of zero in order to disable the automatic AOF
|
||||
# rewrite feature.
|
||||
|
||||
auto-aof-rewrite-percentage 100
|
||||
auto-aof-rewrite-min-size 64mb
|
||||
|
||||
# An AOF file may be found to be truncated at the end during the Redis
|
||||
# startup process, when the AOF data gets loaded back into memory.
|
||||
# This may happen when the system where Redis is running
|
||||
# crashes, especially when an ext4 filesystem is mounted without the
|
||||
# data=ordered option (however this can't happen when Redis itself
|
||||
# crashes or aborts but the operating system still works correctly).
|
||||
#
|
||||
# Redis can either exit with an error when this happens, or load as much
|
||||
# data as possible (the default now) and start if the AOF file is found
|
||||
# to be truncated at the end. The following option controls this behavior.
|
||||
#
|
||||
# If aof-load-truncated is set to yes, a truncated AOF file is loaded and
|
||||
# the Redis server starts emitting a log to inform the user of the event.
|
||||
# Otherwise if the option is set to no, the server aborts with an error
|
||||
# and refuses to start. When the option is set to no, the user requires
|
||||
# to fix the AOF file using the "redis-check-aof" utility before to restart
|
||||
# the server.
|
||||
#
|
||||
# Note that if the AOF file will be found to be corrupted in the middle
|
||||
# the server will still exit with an error. This option only applies when
|
||||
# Redis will try to read more data from the AOF file but not enough bytes
|
||||
# will be found.
|
||||
aof-load-truncated yes
|
||||
|
||||
################################ LUA SCRIPTING ###############################
|
||||
|
||||
# Max execution time of a Lua script in milliseconds.
|
||||
#
|
||||
# If the maximum execution time is reached Redis will log that a script is
|
||||
# still in execution after the maximum allowed time and will start to
|
||||
# reply to queries with an error.
|
||||
#
|
||||
# When a long running script exceed the maximum execution time only the
|
||||
# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
|
||||
# used to stop a script that did not yet called write commands. The second
|
||||
# is the only way to shut down the server in the case a write commands was
|
||||
# already issue by the script but the user don't want to wait for the natural
|
||||
# termination of the script.
|
||||
#
|
||||
# Set it to 0 or a negative value for unlimited execution without warnings.
|
||||
lua-time-limit 5000
|
||||
|
||||
################################## SLOW LOG ###################################
|
||||
|
||||
# The Redis Slow Log is a system to log queries that exceeded a specified
|
||||
# execution time. The execution time does not include the I/O operations
|
||||
# like talking with the client, sending the reply and so forth,
|
||||
# but just the time needed to actually execute the command (this is the only
|
||||
# stage of command execution where the thread is blocked and can not serve
|
||||
# other requests in the meantime).
|
||||
#
|
||||
# You can configure the slow log with two parameters: one tells Redis
|
||||
# what is the execution time, in microseconds, to exceed in order for the
|
||||
# command to get logged, and the other parameter is the length of the
|
||||
# slow log. When a new command is logged the oldest one is removed from the
|
||||
# queue of logged commands.
|
||||
|
||||
# The following time is expressed in microseconds, so 1000000 is equivalent
|
||||
# to one second. Note that a negative number disables the slow log, while
|
||||
# a value of zero forces the logging of every command.
|
||||
slowlog-log-slower-than 10000
|
||||
|
||||
# There is no limit to this length. Just be aware that it will consume memory.
|
||||
# You can reclaim memory used by the slow log with SLOWLOG RESET.
|
||||
slowlog-max-len 128
|
||||
|
||||
################################ LATENCY MONITOR ##############################
|
||||
|
||||
# The Redis latency monitoring subsystem samples different operations
|
||||
# at runtime in order to collect data related to possible sources of
|
||||
# latency of a Redis instance.
|
||||
#
|
||||
# Via the LATENCY command this information is available to the user that can
|
||||
# print graphs and obtain reports.
|
||||
#
|
||||
# The system only logs operations that were performed in a time equal or
|
||||
# greater than the amount of milliseconds specified via the
|
||||
# latency-monitor-threshold configuration directive. When its value is set
|
||||
# to zero, the latency monitor is turned off.
|
||||
#
|
||||
# By default latency monitoring is disabled since it is mostly not needed
|
||||
# if you don't have latency issues, and collecting data has a performance
|
||||
# impact, that while very small, can be measured under big load. Latency
|
||||
# monitoring can easily be enalbed at runtime using the command
|
||||
# "CONFIG SET latency-monitor-threshold <milliseconds>" if needed.
|
||||
latency-monitor-threshold 0
|
||||
|
||||
############################# Event notification ##############################
|
||||
|
||||
# Redis can notify Pub/Sub clients about events happening in the key space.
|
||||
# This feature is documented at http://redis.io/topics/notifications
|
||||
#
|
||||
# For instance if keyspace events notification is enabled, and a client
|
||||
# performs a DEL operation on key "foo" stored in the Database 0, two
|
||||
# messages will be published via Pub/Sub:
|
||||
#
|
||||
# PUBLISH __keyspace@0__:foo del
|
||||
# PUBLISH __keyevent@0__:del foo
|
||||
#
|
||||
# It is possible to select the events that Redis will notify among a set
|
||||
# of classes. Every class is identified by a single character:
|
||||
#
|
||||
# K Keyspace events, published with __keyspace@<db>__ prefix.
|
||||
# E Keyevent events, published with __keyevent@<db>__ prefix.
|
||||
# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
|
||||
# $ String commands
|
||||
# l List commands
|
||||
# s Set commands
|
||||
# h Hash commands
|
||||
# z Sorted set commands
|
||||
# x Expired events (events generated every time a key expires)
|
||||
# e Evicted events (events generated when a key is evicted for maxmemory)
|
||||
# A Alias for g$lshzxe, so that the "AKE" string means all the events.
|
||||
#
|
||||
# The "notify-keyspace-events" takes as argument a string that is composed
|
||||
# by zero or multiple characters. The empty string means that notifications
|
||||
# are disabled at all.
|
||||
#
|
||||
# Example: to enable list and generic events, from the point of view of the
|
||||
# event name, use:
|
||||
#
|
||||
# notify-keyspace-events Elg
|
||||
#
|
||||
# Example 2: to get the stream of the expired keys subscribing to channel
|
||||
# name __keyevent@0__:expired use:
|
||||
#
|
||||
# notify-keyspace-events Ex
|
||||
#
|
||||
# By default all notifications are disabled because most users don't need
|
||||
# this feature and the feature has some overhead. Note that if you don't
|
||||
# specify at least one of K or E, no events will be delivered.
|
||||
notify-keyspace-events ""
|
||||
|
||||
############################### ADVANCED CONFIG ###############################
|
||||
|
||||
# Hashes are encoded using a memory efficient data structure when they have a
|
||||
# small number of entries, and the biggest entry does not exceed a given
|
||||
# threshold. These thresholds can be configured using the following directives.
|
||||
hash-max-ziplist-entries 512
|
||||
hash-max-ziplist-value 64
|
||||
|
||||
# Similarly to hashes, small lists are also encoded in a special way in order
|
||||
# to save a lot of space. The special representation is only used when
|
||||
# you are under the following limits:
|
||||
list-max-ziplist-entries 512
|
||||
list-max-ziplist-value 64
|
||||
|
||||
# Sets have a special encoding in just one case: when a set is composed
|
||||
# of just strings that happens to be integers in radix 10 in the range
|
||||
# of 64 bit signed integers.
|
||||
# The following configuration setting sets the limit in the size of the
|
||||
# set in order to use this special memory saving encoding.
|
||||
set-max-intset-entries 512
|
||||
|
||||
# Similarly to hashes and lists, sorted sets are also specially encoded in
|
||||
# order to save a lot of space. This encoding is only used when the length and
|
||||
# elements of a sorted set are below the following limits:
|
||||
zset-max-ziplist-entries 128
|
||||
zset-max-ziplist-value 64
|
||||
|
||||
# HyperLogLog sparse representation bytes limit. The limit includes the
|
||||
# 16 bytes header. When an HyperLogLog using the sparse representation crosses
|
||||
# this limit, it is converted into the dense representation.
|
||||
#
|
||||
# A value greater than 16000 is totally useless, since at that point the
|
||||
# dense representation is more memory efficient.
|
||||
#
|
||||
# The suggested value is ~ 3000 in order to have the benefits of
|
||||
# the space efficient encoding without slowing down too much PFADD,
|
||||
# which is O(N) with the sparse encoding. The value can be raised to
|
||||
# ~ 10000 when CPU is not a concern, but space is, and the data set is
|
||||
# composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
|
||||
hll-sparse-max-bytes 3000
|
||||
|
||||
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
|
||||
# order to help rehashing the main Redis hash table (the one mapping top-level
|
||||
# keys to values). The hash table implementation Redis uses (see dict.c)
|
||||
# performs a lazy rehashing: the more operation you run into a hash table
|
||||
# that is rehashing, the more rehashing "steps" are performed, so if the
|
||||
# server is idle the rehashing is never complete and some more memory is used
|
||||
# by the hash table.
|
||||
#
|
||||
# The default is to use this millisecond 10 times every second in order to
|
||||
# active rehashing the main dictionaries, freeing memory when possible.
|
||||
#
|
||||
# If unsure:
|
||||
# use "activerehashing no" if you have hard latency requirements and it is
|
||||
# not a good thing in your environment that Redis can reply form time to time
|
||||
# to queries with 2 milliseconds delay.
|
||||
#
|
||||
# use "activerehashing yes" if you don't have such hard requirements but
|
||||
# want to free memory asap when possible.
|
||||
activerehashing yes
|
||||
|
||||
# The client output buffer limits can be used to force disconnection of clients
|
||||
# that are not reading data from the server fast enough for some reason (a
|
||||
# common reason is that a Pub/Sub client can't consume messages as fast as the
|
||||
# publisher can produce them).
|
||||
#
|
||||
# The limit can be set differently for the three different classes of clients:
|
||||
#
|
||||
# normal -> normal clients including MONITOR clients
|
||||
# slave -> slave clients
|
||||
# pubsub -> clients subscribed to at least one pubsub channel or pattern
|
||||
#
|
||||
# The syntax of every client-output-buffer-limit directive is the following:
|
||||
#
|
||||
# client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
|
||||
#
|
||||
# A client is immediately disconnected once the hard limit is reached, or if
|
||||
# the soft limit is reached and remains reached for the specified number of
|
||||
# seconds (continuously).
|
||||
# So for instance if the hard limit is 32 megabytes and the soft limit is
|
||||
# 16 megabytes / 10 seconds, the client will get disconnected immediately
|
||||
# if the size of the output buffers reach 32 megabytes, but will also get
|
||||
# disconnected if the client reaches 16 megabytes and continuously overcomes
|
||||
# the limit for 10 seconds.
|
||||
#
|
||||
# By default normal clients are not limited because they don't receive data
|
||||
# without asking (in a push way), but just after a request, so only
|
||||
# asynchronous clients may create a scenario where data is requested faster
|
||||
# than it can read.
|
||||
#
|
||||
# Instead there is a default limit for pubsub and slave clients, since
|
||||
# subscribers and slaves receive data in a push fashion.
|
||||
#
|
||||
# Both the hard or the soft limit can be disabled by setting them to zero.
|
||||
client-output-buffer-limit normal 0 0 0
|
||||
client-output-buffer-limit slave 256mb 64mb 60
|
||||
client-output-buffer-limit pubsub 32mb 8mb 60
|
||||
|
||||
# Redis calls an internal function to perform many background tasks, like
|
||||
# closing connections of clients in timeout, purging expired keys that are
|
||||
# never requested, and so forth.
|
||||
#
|
||||
# Not all tasks are performed with the same frequency, but Redis checks for
|
||||
# tasks to perform accordingly to the specified "hz" value.
|
||||
#
|
||||
# By default "hz" is set to 10. Raising the value will use more CPU when
|
||||
# Redis is idle, but at the same time will make Redis more responsive when
|
||||
# there are many keys expiring at the same time, and timeouts may be
|
||||
# handled with more precision.
|
||||
#
|
||||
# The range is between 1 and 500, however a value over 100 is usually not
|
||||
# a good idea. Most users should use the default of 10 and raise this up to
|
||||
# 100 only in environments where very low latency is required.
|
||||
hz 10
|
||||
|
||||
# When a child rewrites the AOF file, if the following option is enabled
|
||||
# the file will be fsync-ed every 32 MB of data generated. This is useful
|
||||
# in order to commit the file to the disk more incrementally and avoid
|
||||
# big latency spikes.
|
||||
aof-rewrite-incremental-fsync yes
|
||||
66
setup/files/redis_init
Normal file
66
setup/files/redis_init
Normal file
@@ -0,0 +1,66 @@
|
||||
#!/bin/sh
|
||||
|
||||
EXEC=/usr/local/bin/redis-server
|
||||
CLIEXEC=/usr/local/bin/redis-cli
|
||||
PIDFILE=/var/run/redis_6379.pid
|
||||
CONF="/etc/redis/6379.conf"
|
||||
REDISPORT="6379"
|
||||
###############
|
||||
# SysV Init Information
|
||||
# chkconfig: - 58 74
|
||||
# description: redis_6379 is the redis daemon.
|
||||
### BEGIN INIT INFO
|
||||
# Provides: redis_6379
|
||||
# Required-Start: $network $local_fs $remote_fs
|
||||
# Required-Stop: $network $local_fs $remote_fs
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Should-Start: $syslog $named
|
||||
# Should-Stop: $syslog $named
|
||||
# Short-Description: start and stop redis_6379
|
||||
# Description: Redis daemon
|
||||
### END INIT INFO
|
||||
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
if [ -f $PIDFILE ]
|
||||
then
|
||||
echo "$PIDFILE exists, process is already running or crashed"
|
||||
else
|
||||
echo "Starting Redis server..."
|
||||
$EXEC $CONF
|
||||
fi
|
||||
;;
|
||||
stop)
|
||||
if [ ! -f $PIDFILE ]
|
||||
then
|
||||
echo "$PIDFILE does not exist, process is not running"
|
||||
else
|
||||
PID=$(cat $PIDFILE)
|
||||
echo "Stopping ..."
|
||||
$CLIEXEC -p $REDISPORT shutdown
|
||||
while [ -x /proc/${PID} ]
|
||||
do
|
||||
echo "Waiting for Redis to shutdown ..."
|
||||
sleep 1
|
||||
done
|
||||
echo "Redis stopped"
|
||||
fi
|
||||
;;
|
||||
status)
|
||||
if [ ! -f $PIDFILE ]
|
||||
then
|
||||
echo 'Redis is not running'
|
||||
else
|
||||
echo "Redis is running ($(<$PIDFILE))"
|
||||
fi
|
||||
;;
|
||||
restart)
|
||||
$0 stop
|
||||
$0 start
|
||||
;;
|
||||
*)
|
||||
echo "Please use start, stop, restart or status as first argument"
|
||||
;;
|
||||
esac
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user