Compare commits

...

112 Commits

Author SHA1 Message Date
Arik Fraimovich
939aae086f ADd changelog entry for favicons fix 2017-04-18 15:22:05 +03:00
Arik Fraimovich
742e38b08d Update CHANGELOG and bump version 2017-04-18 15:20:21 +03:00
Arik Fraimovich
3c7c93fc9f Fix: favicon wasn't showing up.
Closes #1719.
2017-04-18 15:19:57 +03:00
Arik Fraimovich
53ffff9759 Merge pull request #1716 from deecay/dashboard-tag-m17n
Fix: Non-ASCII dashboard tag
2017-04-18 15:02:27 +03:00
Arik Fraimovich
2e7fafc4d8 CHANGELOG update. 2017-04-18 14:59:44 +03:00
Arik Fraimovich
c66b09effe Merge pull request #1717 from getredash/fix_embeds
Fix: page freezes when rendering large result set.
2017-04-11 18:33:11 +03:00
Arik Fraimovich
a087fe4bcd Fix: page freezes when rendering large result set.
Closes #1711.
2017-04-11 18:05:43 +03:00
Arik Fraimovich
1f4946cc04 Merge pull request #1710 from getredash/fix_embeds
Fix: embeds were not rendering in PhantomJS.
2017-04-05 12:58:05 +03:00
Arik Fraimovich
08505a2208 Add changelog entry 2017-04-05 12:40:56 +03:00
Arik Fraimovich
e1c186bbf8 Fix: embeds were not rendering in PhantomJS.
Include polyfill for missing ArrayView functions.

Closes #1708.
2017-04-05 12:38:21 +03:00
Arik Fraimovich
c83d354eed Merge pull request #1707 from getredash/docker-compose
Update docker-compose configuration:
2017-04-03 18:30:55 +03:00
Arik Fraimovich
81063731c9 Update docker-compose configuration:
* Use newer versions of Redis & PostgreSQL
* Use image for production docker-compose.
2017-04-03 18:28:46 +03:00
Arik Fraimovich
f66fe5ff80 Update packer configuration to create GCE image 2017-04-03 18:07:19 +03:00
Arik Fraimovich
8425698583 Update env 2017-04-03 13:18:34 +03:00
Arik Fraimovich
8b08b1a563 Merge pull request #1704 from getredash/new_bootstrap
New bootstrap script for Ubuntu 16.04
2017-04-03 13:16:53 +03:00
Arik Fraimovich
15b228b754 Update README 2017-04-03 12:54:23 +03:00
Arik Fraimovich
1db4157b29 Fix bootstrap script to be headless 2017-04-03 12:54:17 +03:00
Arik Fraimovich
079530cf63 Remove unused files 2017-04-03 12:54:05 +03:00
Arik Fraimovich
d2370a94c7 New bootstrap script 2017-04-03 10:30:06 +03:00
Arik Fraimovich
903463972b Fix: handle the case when a scheduled query wasn't run before 2017-04-02 15:20:45 +03:00
Arik Fraimovich
2707e24f30 Update CHANGELOG & version 2017-04-02 14:43:02 +03:00
Arik Fraimovich
3df826692c Merge pull request #1703 from getredash/queries
Fix: optimize queries to avoid N+1 cases
2017-04-02 14:09:12 +03:00
Arik Fraimovich
1142a441fc Fix: optimize queries to avoid N+1 cases 2017-04-02 14:01:23 +03:00
Arik Fraimovich
53268989c5 Merge pull request #1701 from akiray03/refactor-next-to-next_path
Change: rename local variable `next` to `next_path`
2017-04-02 11:15:46 +03:00
deecay
83ed9fdc51 Fix: Dashboard tag for unicode dashboard names 2017-04-01 23:28:53 +09:00
Akira Yumiyama
0dc98e87a6 rename local variable next to next_path. because shadow built-in methods. 2017-04-01 20:25:23 +09:00
Arik Fraimovich
0cf4db1137 Update atsd_client version. 2017-03-30 15:19:57 +03:00
Arik Fraimovich
4e27069d07 Merge pull request #1696 from spasovski/percentstack
Fix: wrong percent stacking math
2017-03-30 12:15:05 +03:00
Davor Spasovski
3fcd07bc1c fix percent stacking math (issue 41) 2017-03-29 15:07:37 -04:00
Arik Fraimovich
3414ff7331 Update CHANGELOG.md 2017-03-27 15:07:11 +03:00
Arik Fraimovich
04cd798c48 Update changelog 2017-03-27 15:07:00 +03:00
Arik Fraimovich
50dcf23b1a Merge pull request #1665 from benmagro/filter_params
Fix: Set query filter to match dashboard level filters
2017-03-27 13:13:30 +03:00
Arik Fraimovich
1bb4d6d534 Fix condition to only have effect when there is a value in query string. 2017-03-27 13:12:50 +03:00
Arik Fraimovich
66a5e394de Merge pull request #1688 from akiray03/refactoring-query-results-export
[Refactoring] make_{csv,excel}_content move to models from handlers
2017-03-27 12:42:58 +03:00
Arik Fraimovich
c4ab0916cc Merge pull request #1682 from denisov-vlad/clickhouse-types-fix
[Clickhouse] Fix: better support for types
2017-03-27 12:18:17 +03:00
Arik Fraimovich
73cb6925d3 Merge pull request #1689 from getredash/feature/bubble
Fix: user can't edit their own alert
2017-03-26 11:55:39 +03:00
Arik Fraimovich
aaf0da4b70 Fix: user can't edit their own alert 2017-03-26 11:45:13 +03:00
Arik Fraimovich
c99bd03d99 Merge pull request #1666 from deecay/pivot-getdata
Change: add support for filtered data in Pivot table visualization
2017-03-26 11:33:57 +03:00
Akira Yumiyama
7fbb1b9229 [Refactoring] make_{csv,excel}_content move to models from handlers
I want to be able to use like: `python manage.py queries export <query_id>`
2017-03-26 12:24:10 +09:00
Arik Fraimovich
ba54d68513 Merge pull request #1686 from msnider/salesforce-sandbox
[Salesforce] Change: Sandbox cannot be required or it will force it to be True.
2017-03-25 21:08:22 +03:00
Matt Snider
f73cbf3b51 Sandbox cannot be required or it will force it to be True. Also, dont annotate SOQL queries as they dont allow comments 2017-03-25 12:19:53 -05:00
deecay
3f047348e2 Pivottable shows filtered data 2017-03-23 21:08:57 +09:00
deecay
10fe3c5373 Revert "Pivottable shows filtered and formatted data"
This reverts commit f011d3060a.
2017-03-23 21:07:56 +09:00
Vladislav Denisov
9c8755c9ae clickhouse: added support for nullable columns 2017-03-22 10:28:44 +03:00
Vladislav Denisov
e8908d04bb issues/1616: fixed clickhouse types 2017-03-22 09:44:21 +03:00
Arik Fraimovich
293f9dcaf6 Merge pull request #1680 from getredash/feature/bubble
Add: bubble charts support
2017-03-21 10:58:36 +02:00
Arik Fraimovich
ce31b13ff6 Add: bubble charts support 2017-03-21 10:46:11 +02:00
Arik Fraimovich
a033dc4569 Fix: angular minification issue in textbox editor 2017-03-20 17:41:52 +02:00
Arik Fraimovich
6ff338964b Fix: angular minification issue in schema browser 2017-03-20 17:37:32 +02:00
Arik Fraimovich
97a7701879 Merge pull request #1617 from 44px/refresh-schema-button
Add "Refresh Schema" button to the datasource
2017-03-20 14:11:05 +02:00
Arik Fraimovich
7558b391a9 Merge pull request #1673 from 44px/editorconfig
Add: .editorconfig to keep code style consistent
2017-03-20 10:54:58 +02:00
Alexander Shepelin
b6bed112ee Add .editorconfig to keep code style consistent 2017-03-17 01:09:44 +03:00
Alexander Shepelin
9417dcb2c2 preserve old schema if we get error on refresh 2017-03-16 23:56:56 +03:00
Alexander Shepelin
5f106a1eee Merge remote-tracking branch 'origin/master' into refresh-schema-button 2017-03-16 23:20:21 +03:00
Arik Fraimovich
cda05c73c7 Merge pull request #1657 from deecay/ie-scrollbar
Show vertical scrollbar for IE
2017-03-16 11:59:42 +02:00
deecay
95398697cb Set ms-overflow-style to auto for html 2017-03-16 18:29:25 +09:00
Arik Fraimovich
dc019cc37a Merge pull request #1649 from getredash/fixes201602
Fix: datetime parameters were not using a date picker.
2017-03-15 18:08:32 +02:00
Arik Fraimovich
72cb5babe6 Change datetime serialization format 2017-03-15 18:00:25 +02:00
Arik Fraimovich
ebc2e12621 Merge pull request #1622 from axibase/master
[Data Sources] Add: ATSD query runner
2017-03-15 16:31:03 +02:00
deecay
f011d3060a Pivottable shows filtered and formatted data 2017-03-10 19:10:31 +09:00
Ben Magro
8c5f71a0a1 set filter paramas in query to match dashboard level filters when they are present 2017-03-10 11:28:39 +11:00
Arik Fraimovich
da00e74491 Merge pull request #1660 from akiray03/docker-entrypoint-support-manage-py
Add: docker-entrypoint for manage.py's commands.
2017-03-08 15:49:14 +02:00
Akira Yumiyama
b56ff1357e docker-entrypoint supports manage.py's any commands. 2017-03-08 22:41:15 +09:00
Arik Fraimovich
ecd4d659a8 Merge pull request #1494 from yuua/impala-schema
[Impala] enable schema browser
2017-03-07 14:37:42 +02:00
Arik Fraimovich
fec5565396 Merge pull request #1650 from deecay/v1-ie11
Add babel-plugin-transform-object-assign for IE support
2017-03-07 14:32:41 +02:00
Arik Fraimovich
6ec5ea5c28 Resume to building Docker images 2017-03-07 12:26:00 +02:00
Arik Fraimovich
3f8e32cc1f Merge pull request #1656 from getredash/docker
Fix Docker file ownership issues:
2017-03-07 11:40:18 +02:00
Arik Fraimovich
be6426014d Fix Docker file ownership issues:
1. Simplify user creation to create a non system user (so the uid will usually
   match the host user).
2. Set the user to redash & remove the need to change user in docker entrypoint.
2017-03-07 11:37:31 +02:00
Arik Fraimovich
8b4643d6ac Remove nosiy log 2017-03-06 21:22:41 +02:00
Arik Fraimovich
d8a0885953 Fix: tests were using old method signature 2017-03-06 21:22:29 +02:00
Arik Fraimovich
83e6b6f50c Tests use the same session as the tested code, and we can't use the same
objects after the tested code calls commit() without disabling expire
on commit.

It seems like a safe thing in our case.
2017-03-06 13:49:29 +02:00
rmakulov
928bd83967 minor change 2017-03-06 13:16:02 +03:00
Arik Fraimovich
230fe15cde Merge pull request #1653 from r0fls/fix-embed-close
Fix: query embed dialog close button wasn't working
2017-03-05 20:08:14 +02:00
Arik Fraimovich
72ad16a8b3 Fix: use correct format string 2017-03-05 20:03:44 +02:00
Arik Fraimovich
23cc632d5a Duplicate favicons instead of symlinks 2017-03-05 09:15:11 +02:00
Raphael Deem
1cf2bb1bb2 fix query embed close button 2017-03-04 14:51:00 -08:00
yuua
181031957f impala get_table remove filtter and str to unicode 2017-03-03 17:58:13 +09:00
deecay
cfa9a45fc8 Add babel-plugin-transform-object-assign 2017-03-03 09:26:56 +09:00
Arik Fraimovich
9bb87e711a Fix: datetime parameters were not using a date picker. 2017-03-02 15:56:40 +02:00
Rustam Makulov
255a01f786 Merge branch 'master' into master 2017-03-01 12:29:09 +04:00
Arik Fraimovich
69c26f2c0d Merge pull request #1643 from msnider/salesforce
[Data Sources] Add: SalesForce query runner
2017-03-01 09:51:43 +02:00
Matt Snider
3650e21458 Move import to top of file 2017-02-28 22:06:34 -06:00
Matt Snider
8eefd0e9c4 Format to PEP8 2017-02-28 21:54:32 -06:00
Matt Snider
c72a097808 Added Salesforce SOQL query runner 2017-02-28 21:44:38 -06:00
rmakulov
2ffda6f5c5 code revised 2017-02-28 19:03:17 +03:00
Arik Fraimovich
ce8ffae152 Merge pull request #1584 from washort/scheduled-query-backoff
Scheduled query backoff
2017-02-28 13:19:34 +02:00
Arik Fraimovich
b54dd27959 Merge pull request #1624 from washort/presto-errors
Fix: make errors from Presto runner JSON-serializable
2017-02-28 13:04:46 +02:00
Arik Fraimovich
3e807e5b41 Merge pull request #1623 from washort/jobs-race
Bugfix: race condition in query task status reporting
2017-02-28 13:04:10 +02:00
Arik Fraimovich
20f1a60f90 Merge pull request #1619 from deecay/count-rows
Add: use results row count as the value for the counter visualization.
2017-02-28 13:03:36 +02:00
Arik Fraimovich
9d2619b856 Merge pull request #1641 from getredash/fixes201602
UI Fixes
2017-02-28 12:59:41 +02:00
Arik Fraimovich
a2c7f6df7a Friendlier labels for archived state of dashboard/query 2017-02-28 12:50:27 +02:00
Arik Fraimovich
15a87db5d5 Fix: remove 29402hashKey from Pivot table 2017-02-28 12:46:58 +02:00
Arik Fraimovich
2f86466309 Merge pull request #1639 from getredash/fixes201602
Small UI fixes
2017-02-28 12:10:22 +02:00
Arik Fraimovich
bccfef533e Fix: wrong timestamps in admin tasks page 2017-02-27 17:51:00 +02:00
Arik Fraimovich
ef020e88e7 Fix: word cloud visualization didn't show column names 2017-02-27 17:42:44 +02:00
Arik Fraimovich
222a6069cb Fix: pemrission dialog wasn't rendering.
Closes #1633.
2017-02-27 15:44:19 +02:00
Arik Fraimovich
6b6df84bce Fix: map visualization had severe performance issue.
Close #1603.
2017-02-27 15:30:30 +02:00
rmakulov
fcfd204ec6 atsd query runner 2017-02-24 17:31:01 +03:00
Arik Fraimovich
57e6c5f05e Merge pull request #1630 from MichaelJAndy/MichaelJAndy-sorting-patch
Fix: dashboard-list.js to sort dashboards and update page-header
2017-02-23 15:10:34 +02:00
Michael Andy
683e369d86 dashboard-list.js sorts dashboards and updates page-header 2017-02-23 23:41:58 +11:00
Arik Fraimovich
f12596a6fc Merge pull request #1629 from getredash/fix_too_many_connections
Fix: keyboard shortcuts didn't work in parameter inputs
2017-02-23 14:38:22 +02:00
Arik Fraimovich
09239439ae Fix: keyboard shortcuts didn't work in parameter inputs 2017-02-23 12:51:38 +02:00
Arik Fraimovich
2bb11dffca add v1-rc.2 release date. 2017-02-22 22:11:26 +02:00
Allen Short
2407b115e4 Exponential backoff for failing queries 2017-02-22 10:29:08 -06:00
Allen Short
ca3e125da8 Refactor outdated_queries/refresh_queries tests 2017-02-22 10:28:35 -06:00
Allen Short
2a447137d4 Fix: make errors from Presto runner JSON-serializable 2017-02-21 13:11:32 -06:00
deecay
3864f11694 Add: Counter option to count resultset row count. 2017-02-21 16:56:45 +09:00
Alexander Shepelin
8b59815bf2 add 'refresh schema' button to schema-browser 2017-02-20 23:56:21 +03:00
Allen Short
a98df94399 Fix race condition in query task status reporting 2017-02-20 12:20:55 -06:00
Alexander Shepelin
b2e747caef refactor schema-browser directive to component style 2017-02-18 23:10:15 +03:00
deecay
af978e966d Show vertical scrollbar for IE 2017-02-14 16:53:11 +09:00
yuua
78408e50c5 impala query_runner get_tables error fix 2016-12-29 10:54:57 +09:00
78 changed files with 1247 additions and 1698 deletions

14
.editorconfig Normal file
View File

@@ -0,0 +1,14 @@
root = true
[*]
end_of_line = lf
insert_final_newline = true
trim_trailing_whitespace = true
[*.py]
indent_style = space
indent_size = 4
[*.{js,css,html}]
indent_style = space
indent_size = 2

View File

@@ -1,6 +1,54 @@
# Change Log
## v1.0.0-rc.2 - Unreleased
## v1.0.2 - 2017-04-18
### Fixed
- Fix: favicon wasn't showing up.
- Fix: support for unicode in dashboard tags. @deecay
- Fix: page freezes when rendering large result set.
- Fix: chart embeds were not rendering in PhantomJS.
## v1.0.1 - 2017-04-02
### Added
- Add: bubble charts support.
- Add "Refresh Schema" button to the datasource @44px
- [Data Sources] Add: ATSD query runner @rmakulov
- [Data Sources] Add: SalesForce query runner @msnider
- Add: scheduled query backoff in case of errors @washort
- Add: use results row count as the value for the counter visualization. @deecay
### Changed
- Moved CSV/Excel query results generation code to models. @akiray03
- Add support for filtered data in Pivot table visualization @deecay
- Friendlier labels for archived state of dashboard/query
### Fixed
- Fix: optimize queries to avoid N+1 queries.
- Fix: percent stacking math was wrong. @spasovski
- Fix: set query filter to match value from URL query string. @benmargo
- [Clickhouse] Fix: detection of various data types. @denisov-vlad
- Fix: user can't edit their own alert.
- Fix: angular minification issue in textbox editor and schema browser.
- Fixes to better support IE11 (add polyfill for Object.assign and show vertical scrollbar). @deecay
- Fix: datetime parameters were not using a date picker.
- Fix: Impala schema wasn't loading.
- Fix: query embed dialog close button wasn't working @r0fls
- Fix: make errors from Presto runner JSON-serializable @washort
- Fix: race condition in query task status reporting @washort
- Fix: remove $$hashKey from Pivot table
- Fix: map visualization had severe performance issue.
- Fix: pemrission dialog wasn't rendering.
- Fix: word cloud visualization didn't show column names.
- Fix: wrong timestamps in admin tasks page.
- Fix: page header wasn't updating on dashboards page @MichaelJAndy
- Fix: keyboard shortcuts didn't work in parameter inputs
## v1.0.0-rc.2 - 2017-02-22
### Changed

View File

@@ -8,5 +8,6 @@ RUN pip install -r requirements.txt -r requirements_dev.txt -r requirements_all_
COPY . ./
RUN npm install && npm run build && rm -rf node_modules
RUN chown -R redash /app
USER redash
ENTRYPOINT ["/app/bin/docker-entrypoint"]

View File

@@ -1,19 +1,12 @@
#!/bin/bash
set -e
# When the /app folder is a host volume, on Linux machines the redash user can't
# change files in the mounted volume if it has a different uid than the host user.
#
# In most cases the host uid will be 1000...
# This feels hackish, if anyone got a better solution I'll be happy to hear.
usermod -u 1000 redash
worker() {
WORKERS_COUNT=${WORKERS_COUNT:-2}
QUEUES=${QUEUES:-queries,scheduled_queries,celery}
echo "Starting $WORKERS_COUNT workers for queues: $QUEUES..."
exec sudo -E -u redash /usr/local/bin/celery worker --app=redash.worker -c$WORKERS_COUNT -Q$QUEUES -linfo --maxtasksperchild=10 -Ofair
exec /usr/local/bin/celery worker --app=redash.worker -c$WORKERS_COUNT -Q$QUEUES -linfo --maxtasksperchild=10 -Ofair
}
scheduler() {
@@ -22,11 +15,11 @@ scheduler() {
echo "Starting scheduler and $WORKERS_COUNT workers for queues: $QUEUES..."
exec sudo -E -u redash /usr/local/bin/celery worker --app=redash.worker --beat -c$WORKERS_COUNT -Q$QUEUES -linfo --maxtasksperchild=10 -Ofair
exec /usr/local/bin/celery worker --app=redash.worker --beat -c$WORKERS_COUNT -Q$QUEUES -linfo --maxtasksperchild=10 -Ofair
}
server() {
exec sudo -E -u redash /usr/local/bin/gunicorn -b 0.0.0.0:5000 --name redash -w4 redash.wsgi:app
exec /usr/local/bin/gunicorn -b 0.0.0.0:5000 --name redash -w4 redash.wsgi:app
}
help() {
@@ -42,11 +35,12 @@ help() {
echo "shell -- open shell"
echo "dev_server -- start Flask development server with debugger and auto reload"
echo "create_db -- create database tables"
echo "manage -- CLI to manage redash"
}
tests() {
export REDASH_DATABASE_URL="postgresql://postgres@postgres/tests"
exec sudo -E -u redash make test
exec make test
}
case "$1" in
@@ -63,13 +57,17 @@ case "$1" in
scheduler
;;
dev_server)
exec sudo -E -u redash /app/manage.py runserver --debugger --reload -h 0.0.0.0
exec /app/manage.py runserver --debugger --reload -h 0.0.0.0
;;
shell)
exec sudo -E -u redash /app/manage.py shell
exec /app/manage.py shell
;;
create_db)
exec sudo -E -u redash /app/manage.py database create_tables
exec /app/manage.py database create_tables
;;
manage)
shift
exec /app/manage.py $*
;;
tests)
tests

View File

@@ -25,9 +25,9 @@ deployment:
# - make upload
#- echo "client/app" >> .dockerignore
#- docker pull redash/redash:latest
#- docker build -t redash/redash:$(./manage.py version | sed -e "s/\+/./") .
#- docker login -e $DOCKER_EMAIL -u $DOCKER_USER -p $DOCKER_PASS
#- docker push redash/redash:$(./manage.py version | sed -e "s/\+/./")
- docker login -e $DOCKER_EMAIL -u $DOCKER_USER -p $DOCKER_PASS
- docker build -t redash/redash:$(./manage.py version | sed -e "s/\+/./") .
- docker push redash/redash:$(./manage.py version | sed -e "s/\+/./")
notify:
webhooks:
- url: https://webhooks.gitter.im/e/895d09c3165a0913ac2f

View File

@@ -1,3 +1,4 @@
{
"presets": ["es2015", "stage-2"]
"presets": ["es2015", "stage-2"],
"plugins": ["transform-object-assign"]
}

View File

@@ -4,12 +4,17 @@ body {
body.headless {
padding-top: 0px;
padding-bottom: 0px;
}
body.headless nav.app-header {
display: none;
}
body.headless div#footer {
display: none;
}
a[ng-click] {
cursor: pointer;
}
@@ -415,6 +420,16 @@ counter-renderer counter-name {
background-color: white;
}
.schema-control {
display: flex;
padding: 5px 0;
}
.schema-control .form-control {
height: 30px;
margin-right: 5px;
}
.schema-browser {
height: calc(100% - 45px);
overflow-y: auto;

View File

@@ -6430,7 +6430,7 @@ a {
}
html {
overflow-x: hidden\0/;
-ms-overflow-style: none;
-ms-overflow-style: auto;
}
html,
body {

View File

@@ -9,7 +9,7 @@
</thead>
<tbody>
<tr ng-repeat="row in $ctrl.rows">
<tr ng-repeat="row in $ctrl.rowsToDisplay">
<td ng-repeat="column in $ctrl.columns" ng-bind-html="$ctrl.sanitize(column.formatFunction(row[column.name]))">
</td>
</tr>

View File

@@ -15,7 +15,7 @@ function DynamicTable($sanitize) {
const first = this.count * (this.page - 1);
const last = this.count * (this.page);
this.rows = this.allRows.slice(first, last);
this.rowsToDisplay = this.rows.slice(first, last);
};
this.$onChanges = (changes) => {
@@ -24,10 +24,10 @@ function DynamicTable($sanitize) {
}
if (changes.rows) {
this.allRows = changes.rows.currentValue;
this.rows = changes.rows.currentValue;
}
this.rowsCount = this.allRows.length;
this.rowsCount = this.rows.length;
this.pageChanged();
};

View File

@@ -3,10 +3,10 @@
<label>{{param.title}}</label>
<button class="btn btn-default btn-xs" ng-click="showParameterSettings(param)" ng-if="editable"><i class="zmdi zmdi-settings"></i></button>
<span ng-switch="param.type">
<input ng-switch-when="datetime-with-seconds" type="datetime-local" step="1" class="form-control" ng-model="param.value">
<input ng-switch-when="datetime" type="text" class="form-control" ng-model="param.value">
<input ng-switch-when="date" type="text" class="form-control" ng-model="param.value">
<input ng-switch-default type="{{param.type}}" class="form-control" ng-model="param.value">
<input ng-switch-when="datetime-with-seconds" type="datetime-local" step="1" class="form-control" ng-model="param.ngModel">
<input ng-switch-when="datetime-local" type="datetime-local" class="form-control" ng-model="param.ngModel">
<input ng-switch-when="date" type="date" class="form-control" ng-model="param.ngModel">
<input ng-switch-default type="{{param.type}}" class="form-control" ng-model="param.ngModel">
</span>
</div>
</div>

View File

@@ -9,6 +9,8 @@ const PermissionsEditorComponent = {
dismiss: '&',
},
controller($http, User) {
'ngInject';
this.grantees = [];
this.newGrantees = {};
this.aclUrl = this.resolve.aclUrl.url;

View File

@@ -1,6 +1,8 @@
import moment from 'moment';
export default function (ngModule) {
ngModule.filter('toMilliseconds', () => value => value * 1000.0);
ngModule.filter('dateTime', clientConfig =>
function dateTime(value) {
if (!value) {

View File

@@ -6,9 +6,9 @@
<base href="/">
<title>Redash</title>
<link rel="icon" type="image/png" sizes="32x32" href="./assets/images/favicon-32x32.png">
<link rel="icon" type="image/png" sizes="96x96" href="./assets/images/favicon-96x96.png">
<link rel="icon" type="image/png" sizes="16x16" href="./assets/images/favicon-16x16.png">
<link rel="icon" type="image/png" sizes="32x32" href="/images/favicon-32x32.png">
<link rel="icon" type="image/png" sizes="96x96" href="/images/favicon-96x96.png">
<link rel="icon" type="image/png" sizes="16x16" href="/images/favicon-16x16.png">
</head>
<body>

View File

@@ -1,3 +1,6 @@
// This polyfill is needed to support PhantomJS which we use to generate PNGs from embeds.
import 'core-js/fn/typed/array-buffer';
import 'material-design-iconic-font/dist/css/material-design-iconic-font.css';
import 'font-awesome/css/font-awesome.css';
import 'ui-select/dist/select.css';

View File

@@ -38,9 +38,9 @@
<td>{{row.query_id}}</td>
<td>{{row.query_hash}}</td>
<td>{{row.run_time | durationHumanize}}</td>
<td>{{row.created_at | dateTime }}</td>
<td>{{row.started_at | dateTime }}</td>
<td>{{row.updated_at | dateTime }}</td>
<td>{{row.created_at | toMilliseconds | dateTime }}</td>
<td>{{row.started_at | toMilliseconds | dateTime }}</td>
<td>{{row.updated_at | toMilliseconds | dateTime }}</td>
<td ng-if="selectedTab === 'in_progress'">
<cancel-query-button query-id="dataRow.query_id" task-id="dataRow.task_id"></cancel-query-button>
</td>

View File

@@ -27,8 +27,8 @@ function AlertCtrl($routeParams, $location, $sce, toastr, currentUser, Query, Ev
} else {
this.alert = Alert.get({ id: this.alertId }, (alert) => {
this.onQuerySelected(new Query(alert.query));
this.canEdit = currentUser.canEdit(this.alert);
});
this.canEdit = currentUser.canEdit(this.alert);
}
this.ops = ['greater than', 'less than', 'equals'];

View File

@@ -24,7 +24,7 @@
<td>
<a href="dashboard/{{ dashboard.slug }}">
<span class="label label-primary m-2" ng-bind="tag" ng-repeat="tag in dashboard.tags"></span> {{ dashboard.untagged_name }}
<span class="label label-warning" ng-if="dashboard.is_draft">Unpublished</span>
<span class="label label-default" ng-if="dashboard.is_draft">Unpublished</span>
</a>
</td>
<td>{{ dashboard.created_at | dateTime }}</td>

View File

@@ -6,7 +6,7 @@ import './dashboard-list.css';
function DashboardListCtrl(Dashboard, $location, clientConfig) {
const TAGS_REGEX = /(^[\w\s]+):|(#[\w-]+)/ig;
const TAGS_REGEX = /(^([\w\s]|[^\u0000-\u007F])+):|(#([\w-]|[^\u0000-\u007F])+)/ig;
this.logoUrl = clientConfig.logoUrl;
const page = parseInt($location.search().page || 1, 10);
@@ -39,6 +39,7 @@ function DashboardListCtrl(Dashboard, $location, clientConfig) {
this.dashboards.$promise.then((data) => {
const out = data.map(dashboard => dashboard.name.match(TAGS_REGEX));
this.allTags = _.unique(_.flatten(out)).filter(e => e).map(tag => tag.replace(/:$/, ''));
this.allTags.sort();
});
this.paginator = new Paginator([], { page });
@@ -82,6 +83,7 @@ export default function (ngModule) {
const route = {
template: '<page-dashboard-list></page-dashboard-list>',
reloadOnSearch: false,
title: 'Dashboards',
};
return {

View File

@@ -1,7 +1,10 @@
<div class="container">
<div class="row bg-white p-t-10 p-b-10 m-b-10">
<div class="col-sm-9">
<h3>{{$ctrl.dashboard.name}} <span class="label label-warning" ng-if="$ctrl.dashboard.is_draft">Unpublished</span></h3>
<h3>{{$ctrl.dashboard.name}}
<span class="label label-default" ng-if="$ctrl.dashboard.is_draft && !$ctrl.dashboard.is_archived">Unpublished</span>
<span class="label label-warning" ng-if="$ctrl.dashboard.is_archived" uib-popover="This dashboard is archived and and won't appear in the dashboards list or search results." popover-placement="right" popover-trigger="'mouseenter'">Archived</span>
</h3>
</div>
<div class="col-sm-3 text-right">
<h3>
@@ -33,7 +36,7 @@
<span class="zmdi zmdi-share"></span>
</button>
</span>
<div class="btn-group hidden-print" role="group" ng-show="$ctrl.dashboard.canEdit()" uib-dropdown>
<div class="btn-group hidden-print" role="group" ng-show="$ctrl.dashboard.canEdit()" uib-dropdown ng-if="!$ctrl.dashboard.is_archived">
<button class="btn btn-default btn-sm dropdown-toggle" uib-dropdown-toggle>
<span class="zmdi zmdi-more"></span>
</button>
@@ -50,10 +53,6 @@
</div>
</div>
<div class="col-lg-12 p-5 m-b-10 bg-orange c-white" ng-if="$ctrl.dashboard.is_archived">
This dashboard is archived and won't appear in the dashboards list or search results.
</div>
<div class="m-b-5">
<parameters parameters="$ctrl.globalParameters" on-change="$ctrl.onGlobalParametersChange()"></parameters>
</div>

View File

@@ -34,7 +34,7 @@ function DashboardCtrl($rootScope, $routeParams, $location, $timeout, $q, $uibMo
if (widget.getQuery()) {
widget.getQuery().getParametersDefs().filter(p => p.global).forEach((param) => {
const defaults = {};
defaults[param.name] = _.clone(param);
defaults[param.name] = _.create(Object.getPrototypeOf(param), param);
defaults[param.name].locals = [];
globalParams = _.defaults(globalParams, defaults);
globalParams[param.name].locals.push(param);
@@ -84,13 +84,14 @@ function DashboardCtrl($rootScope, $routeParams, $location, $timeout, $q, $uibMo
return;
}
if (hasQueryStringValue) {
queryFilter.current = $location.search()[queryFilter.name];
}
if (!_.has(filters, queryFilter.name)) {
const filter = _.extend({}, queryFilter);
filters[filter.name] = filter;
filters[filter.name].originFilters = [];
if (hasQueryStringValue) {
filter.current = $location.search()[filter.name];
}
}
// TODO: merge values.

View File

@@ -9,6 +9,8 @@ const EditTextBoxComponent = {
dismiss: '&',
},
controller(toastr) {
'ngInject';
this.saveInProgress = false;
this.widget = this.resolve.widget;
this.saveWidget = () => {

View File

@@ -15,7 +15,7 @@
<p class="f-500 m-b-20 c-black">Recent Dashboards</p>
<div class="list-group">
<a ng-href="dashboard/{{dashboard.slug}}" class="list-group-item" ng-repeat="dashboard in $ctrl.recentDashboards">
{{dashboard.name}} <span class="label label-warning" ng-if="dashboard.is_draft">Unpublished</span>
{{dashboard.name}} <span class="label label-default" ng-if="dashboard.is_draft">Unpublished</span>
</a>
</div>
</div>
@@ -24,7 +24,7 @@
<p class="f-500 m-b-20 c-black">Recent Queries</p>
<div class="list-group">
<a ng-href="queries/{{query.id}}" class="list-group-item"
ng-repeat="query in $ctrl.recentQueries">{{query.name}} <span class="label label-warning" ng-if="query.is_draft">Unpublished</span></a>
ng-repeat="query in $ctrl.recentQueries">{{query.name}} <span class="label label-default" ng-if="query.is_draft">Unpublished</span></a>
</div>
</div>
</div>

View File

@@ -16,7 +16,7 @@
</thead>
<tbody>
<tr ng-repeat="query in $ctrl.paginator.getPageRows()">
<td><a href="queries/{{query.id}}">{{query.name}}</a> <span class="label label-warning" ng-if="query.is_draft">Unpublished</span></td>
<td><a href="queries/{{query.id}}">{{query.name}}</a> <span class="label label-default" ng-if="query.is_draft">Unpublished</span></td>
<td>{{query.user.name}}</td>
<td>{{query.created_at | dateTime}}</td>
<td>{{query.runtime | durationHumanize}}</td>

View File

@@ -1,5 +1,5 @@
<div class="modal-header">
<button type="button" class="close" aria-label="Close" ng-click="close()"><span aria-hidden="true">&times;</span></button>
<button type="button" class="close" aria-label="Close" ng-click="$ctrl.close()"><span aria-hidden="true">&times;</span></button>
<h4 class="modal-title">Embed Code</h4>
</div>
<div class="modal-body">

View File

@@ -21,7 +21,7 @@
</thead>
<tbody>
<tr ng-repeat="query in $ctrl.paginator.getPageRows()">
<td><a href="queries/{{query.id}}">{{query.name}}</a> <span class="label label-warning" ng-if="query.is_draft">Unpublished</span></td>
<td><a href="queries/{{query.id}}">{{query.name}}</a> <span class="label label-default" ng-if="query.is_draft">Unpublished</span></td>
<td>{{query.user.name}}</td>
<td>{{query.created_at | dateTime}}</td>
<td>{{query.schedule | scheduleHumanize}}</td>

View File

@@ -3,7 +3,7 @@ import 'brace/mode/python';
import 'brace/mode/sql';
import 'brace/mode/json';
import 'brace/ext/language_tools';
import { each, map } from 'underscore';
import { map } from 'underscore';
// By default Ace will try to load snippet files for the different modes and fail.
// We don't need them, so we use these placeholders until we define our own.
@@ -25,7 +25,6 @@ function queryEditor(QuerySnippet) {
query: '=',
schema: '=',
syntax: '=',
shortcuts: '=',
},
template: '<div ui-ace="editorOptions" ng-model="query.query"></div>',
link: {
@@ -47,11 +46,6 @@ function queryEditor(QuerySnippet) {
editor.commands.bindKey('Cmd+L', null);
editor.commands.bindKey('Ctrl+L', null);
each($scope.shortcuts, (fn, key) => {
key = key.replace('meta', 'Cmd').replace('ctrl', 'Ctrl');
editor.commands.bindKey(key, () => fn());
});
QuerySnippet.query((snippets) => {
window.ace.acequire(['ace/snippets'], (snippetsModule) => {
const snippetManager = snippetsModule.snippetManager;

View File

@@ -21,7 +21,8 @@
<div class="col-sm-9">
<h3>
<edit-in-place editable="canEdit" done="saveName" ignore-blanks="true" value="query.name"></edit-in-place>
<span class="label label-warning" ng-if="query.is_draft">Unpublished</span>
<span class="label label-default" ng-if="query.is_draft && !query.is_archived">Unpublished</span>
<span class="label label-warning" ng-if="query.is_archived" uib-popover="This query is archived and can't be used in dashboards, and won't appear in search results." popover-placement="right" popover-trigger="'mouseenter'">Archived</span>
</h3>
<p>
<em>
@@ -75,15 +76,16 @@
</span>
</h3>
</div>
<div class="col-lg-12 p-5 bg-orange c-white" ng-if="query.is_archived">
This query is archived and can't be used in dashboards, and won't appear in search results.
</div>
</div>
<!-- editor -->
<div class="container">
<div class="row bg-white p-b-5" ng-if="sourceMode" resizable r-directions="['bottom']" r-height="300" style="min-height:100px;">
<schema-browser schema="schema" class="col-md-3 hidden-sm hidden-xs schema-container" ng-show="hasSchema"></schema-browser>
<schema-browser class="col-md-3 hidden-sm hidden-xs schema-container"
schema="schema"
on-refresh="refreshSchema()"
ng-show="hasSchema">
</schema-browser>
<div ng-class="editorSize" style="height:100%;">
<div class="p-5">
@@ -128,8 +130,7 @@
<p style="height:calc(100% - 40px);">
<query-editor query="query"
schema="schema"
syntax="dataSource.syntax"
shortcuts="shortcuts"></query-editor>
syntax="dataSource.syntax"></query-editor>
</p>
</div>
</div>

View File

@@ -1,13 +1,21 @@
<div class="schema-container">
<div class="p-t-5 p-b-5">
<input type="text" placeholder="Search schema..." class="form-control" ng-model="schemaFilter">
<div class="schema-control">
<input type="text" placeholder="Search schema..." class="form-control" ng-model="$ctrl.schemaFilter">
<button class="btn btn-default"
title="Refresh Schema"
ng-click="$ctrl.onRefresh()">
<span class="zmdi zmdi-refresh"></span>
</button>
</div>
<div class="schema-browser" vs-repeat vs-size="getSize(table)">
<div ng-repeat="table in schema | filter:schemaFilter track by table.name">
<div class="table-name" ng-click="showTable(table)">
<i class="fa fa-table"></i> <strong><span title="{{table.name}}">{{table.name}}</span>
<span ng-if="table.size !== undefined"> ({{table.size}})</span></strong>
<div class="schema-browser" vs-repeat vs-size="$ctrl.getSize(table)">
<div ng-repeat="table in $ctrl.schema | filter:$ctrl.schemaFilter track by table.name">
<div class="table-name" ng-click="$ctrl.showTable(table)">
<i class="fa fa-table"></i>
<strong>
<span title="{{table.name}}">{{table.name}}</span>
<span ng-if="table.size !== undefined"> ({{table.size}})</span>
</strong>
</div>
<div uib-collapse="table.collapsed">
<div ng-repeat="column in table.columns track by column" style="padding-left:16px;">{{column}}</div>

View File

@@ -1,31 +1,33 @@
import template from './schema-browser.html';
function schemaBrowser() {
return {
restrict: 'E',
scope: {
schema: '=',
},
template,
link($scope) {
$scope.showTable = (table) => {
table.collapsed = !table.collapsed;
$scope.$broadcast('vsRepeatTrigger');
};
function SchemaBrowserCtrl($scope) {
'ngInject';
$scope.getSize = (table) => {
let size = 18;
this.showTable = (table) => {
table.collapsed = !table.collapsed;
$scope.$broadcast('vsRepeatTrigger');
};
if (!table.collapsed) {
size += 18 * table.columns.length;
}
this.getSize = (table) => {
let size = 18;
return size;
};
},
if (!table.collapsed) {
size += 18 * table.columns.length;
}
return size;
};
}
const SchemaBrowser = {
bindings: {
schema: '<',
onRefresh: '&',
},
controller: SchemaBrowserCtrl,
template,
};
export default function (ngModule) {
ngModule.directive('schemaBrowser', schemaBrowser);
ngModule.component('schemaBrowser', SchemaBrowser);
}

View File

@@ -29,24 +29,19 @@ function QuerySourceCtrl(Events, toastr, $controller, $scope, $location, $http,
},
});
$scope.shortcuts = {
'meta+s': function save() {
const shortcuts = {
'mod+s': function save() {
if ($scope.canEdit) {
$scope.saveQuery();
}
},
'ctrl+s': function save() {
if ($scope.canEdit) {
$scope.saveQuery();
}
},
// Cmd+Enter for Mac
'meta+enter': $scope.executeQuery,
// Ctrl+Enter for PC
'ctrl+enter': $scope.executeQuery,
};
KeyboardShortcuts.bind($scope.shortcuts);
KeyboardShortcuts.bind(shortcuts);
$scope.$on('$destroy', () => {
KeyboardShortcuts.unbind(shortcuts);
});
// @override
$scope.saveQuery = (options, data) => {
@@ -106,10 +101,6 @@ function QuerySourceCtrl(Events, toastr, $controller, $scope, $location, $http,
$scope.$watch('query.query', (newQueryText) => {
$scope.isDirty = (newQueryText !== queryText);
});
$scope.$on('$destroy', () => {
KeyboardShortcuts.unbind($scope.shortcuts);
});
}
export default function (ngModule) {

View File

@@ -1,9 +1,9 @@
import { pick, any, some, find } from 'underscore';
import template from './query.html';
function QueryViewCtrl($scope, Events, $route, $routeParams, $http, $location, $window, $q,
Title, AlertDialog, Notifications, clientConfig, toastr, $uibModal, currentUser,
Query, DataSource) {
function QueryViewCtrl($scope, Events, $route, $routeParams, $location, $window, $q,
KeyboardShortcuts, Title, AlertDialog, Notifications, clientConfig, toastr, $uibModal,
currentUser, Query, DataSource) {
const DEFAULT_TAB = 'table';
function getQueryResult(maxAge) {
@@ -43,26 +43,36 @@ function QueryViewCtrl($scope, Events, $route, $routeParams, $http, $location, $
return dataSourceId;
}
function updateSchema() {
$scope.hasSchema = false;
$scope.editorSize = 'col-md-12';
DataSource.getSchema({ id: $scope.query.data_source_id }, (data) => {
if (data && data.length > 0) {
function toggleSchemaBrowser(hasSchema) {
$scope.hasSchema = hasSchema;
$scope.editorSize = hasSchema ? 'col-md-9' : 'col-md-12';
}
function getSchema(refresh = undefined) {
DataSource.getSchema({ id: $scope.query.data_source_id, refresh }, (data) => {
const hasPrevSchema = refresh ? ($scope.schema && ($scope.schema.length > 0)) : false;
const hasSchema = data && (data.length > 0);
if (hasSchema) {
$scope.schema = data;
data.forEach((table) => {
table.collapsed = true;
});
$scope.editorSize = 'col-md-9';
$scope.hasSchema = true;
} else {
$scope.schema = undefined;
$scope.hasSchema = false;
$scope.editorSize = 'col-md-12';
} else if (hasPrevSchema) {
toastr.error('Schema refresh failed. Please try again later.');
}
toggleSchemaBrowser(hasSchema || hasPrevSchema);
});
}
function updateSchema() {
toggleSchemaBrowser(false);
getSchema();
}
$scope.refreshSchema = () => getSchema(true);
function updateDataSources(dataSources) {
// Filter out data sources the user can't query (or used by current query):
$scope.dataSources = dataSources.filter(dataSource =>
@@ -85,11 +95,38 @@ function QueryViewCtrl($scope, Events, $route, $routeParams, $http, $location, $
updateSchema();
}
$scope.executeQuery = () => {
if (!$scope.canExecuteQuery()) {
return;
}
if (!$scope.query.query) {
return;
}
getQueryResult(0);
$scope.lockButton(true);
$scope.cancelling = false;
Events.record('execute', 'query', $scope.query.id);
Notifications.getPermissions();
};
$scope.currentUser = currentUser;
$scope.dataSource = {};
$scope.query = $route.current.locals.query;
$scope.showPermissionsControl = clientConfig.showPermissionsControl;
const shortcuts = {
'mod+enter': $scope.executeQuery,
};
KeyboardShortcuts.bind(shortcuts);
$scope.$on('$destroy', () => {
KeyboardShortcuts.unbind(shortcuts);
});
Events.record('view', 'query', $scope.query.id);
if ($scope.query.hasResult() || $scope.query.paramsRequired()) {
@@ -172,23 +209,6 @@ function QueryViewCtrl($scope, Events, $route, $routeParams, $http, $location, $
$scope.saveQuery(undefined, { name: $scope.query.name });
};
$scope.executeQuery = () => {
if (!$scope.canExecuteQuery()) {
return;
}
if (!$scope.query.query) {
return;
}
getQueryResult(0);
$scope.lockButton(true);
$scope.cancelling = false;
Events.record('execute', 'query', $scope.query.id);
Notifications.getPermissions();
};
$scope.cancelExecution = () => {
$scope.cancelling = true;
$scope.queryResult.cancelExecution();

View File

@@ -2,7 +2,7 @@
<div class="t-heading p-10">
<h3 class="th-title">
<p>
<img src="{{$ctrl.logoUrl}}" style="height: 24px;"/>
<img ng-src="{{$ctrl.logoUrl}}" style="height: 24px;"/>
{{$ctrl.query.name}}
<small><visualization-name visualization="$ctrl.visualization"/></small>
</p>

View File

@@ -3,7 +3,7 @@ function DataSource($resource) {
get: { method: 'GET', cache: false, isArray: false },
query: { method: 'GET', cache: false, isArray: true },
test: { method: 'POST', cache: false, isArray: false, url: 'api/data_sources/:id/test' },
getSchema: { method: 'GET', cache: true, isArray: true, url: 'api/data_sources/:id/schema' },
getSchema: { method: 'GET', cache: false, isArray: true, url: 'api/data_sources/:id/schema' },
};
const DataSourceResource = $resource('api/data_sources/:id', { id: '@id' }, actions);

View File

@@ -1,10 +1,12 @@
import { each } from 'underscore';
import Mousetrap from 'mousetrap';
import 'mousetrap/plugins/global-bind/mousetrap-global-bind';
function KeyboardShortcuts() {
this.bind = function bind(keymap) {
each(keymap, (fn, key) => {
Mousetrap.bind(key, (e) => {
Mousetrap.bindGlobal(key, (e) => {
e.preventDefault();
fn();
});

View File

@@ -224,11 +224,12 @@ function QueryResultService($resource, $timeout, $q) {
const series = {};
this.getData().forEach((row) => {
const point = {};
let point = {};
let seriesName;
let xValue = 0;
const yValues = {};
let eValue = null;
let sizeValue = null;
each(row, (v, definition) => {
const name = definition.split('::')[0] || definition.split('__')[0];
@@ -262,6 +263,11 @@ function QueryResultService($resource, $timeout, $q) {
seriesName = String(value);
}
if (type === 'size') {
point[type] = value;
sizeValue = value;
}
if (type === 'multiFilter' || type === 'multi-filter') {
seriesName = String(value);
}
@@ -269,11 +275,15 @@ function QueryResultService($resource, $timeout, $q) {
if (seriesName === undefined) {
each(yValues, (yValue, ySeriesName) => {
point = { x: xValue, y: yValue };
if (eValue !== null) {
addPointToSeries({ x: xValue, y: yValue, yError: eValue }, series, ySeriesName);
} else {
addPointToSeries({ x: xValue, y: yValue }, series, ySeriesName);
point.yError = eValue;
}
if (sizeValue !== null) {
point.size = sizeValue;
}
addPointToSeries(point, series, ySeriesName);
});
} else {
addPointToSeries(point, series, seriesName);

View File

@@ -43,6 +43,43 @@ class QueryResultError {
}
class Parameter {
constructor(parameter) {
this.title = parameter.title;
this.name = parameter.name;
this.type = parameter.type;
this.value = parameter.value;
this.global = parameter.global;
}
get ngModel() {
if (this.type === 'date' || this.type === 'datetime-local' || this.type === 'datetime-with-seconds') {
this.$$value = this.$$value || moment(this.value).toDate();
return this.$$value;
} else if (this.type === 'number') {
this.$$value = this.$$value || parseInt(this.value, 10);
return this.$$value;
}
return this.value;
}
set ngModel(value) {
if (value && this.type === 'date') {
this.value = moment(value).format('YYYY-MM-DD');
this.$$value = moment(this.value).toDate();
} else if (value && this.type === 'datetime-local') {
this.value = moment(value).format('YYYY-MM-DD HH:mm');
this.$$value = moment(this.value).toDate();
} else if (value && this.type === 'datetime-with-seconds') {
this.value = moment(value).format('YYYY-MM-DD HH:mm:ss');
this.$$value = moment(this.value).toDate();
} else {
this.value = this.$$value = value;
}
}
}
class Parameters {
constructor(query, queryString) {
this.query = query;
@@ -84,7 +121,8 @@ class Parameters {
});
const parameterExists = p => contains(parameterNames, p.name);
this.query.options.parameters = this.query.options.parameters.filter(parameterExists);
this.query.options.parameters =
this.query.options.parameters.filter(parameterExists).map(p => new Parameter(p));
}
initFromQueryString(queryString) {

View File

@@ -72,6 +72,18 @@
</ui-select>
</div>
<div class="form-group" ng-if="showSizeColumnPicker()">
<label class="control-label">Bubble size column</label>
<ui-select name="sizeColumn" ng-model="form.sizeColumn">
<ui-select-match allow-clear="true" placeholder="Choose column...">{{$select.selected}}</ui-select-match>
<ui-select-choices repeat="column in columnNames | remove:form.yAxisColumns | remove:form.groupby">
<span ng-bind-html="column | highlight: $select.search"></span><span> </span>
<small class="text-muted" ng-bind="columns[column].type"></small>
</ui-select-choices>
</ui-select>
</div>
<div class="form-group" ng-if="options.globalSeriesType != 'custom'">
<label class="control-label">Errors column</label>

View File

@@ -1,4 +1,4 @@
import { extend, has, partial, intersection, without, contains, isUndefined, sortBy, each, pluck, keys, difference } from 'underscore';
import { some, extend, has, partial, intersection, without, contains, isUndefined, sortBy, each, pluck, keys, difference } from 'underscore';
import plotly from './plotly';
import template from './chart.html';
import editorTemplate from './chart-editor.html';
@@ -68,6 +68,7 @@ function ChartEditor(ColorPalette, clientConfig) {
area: { name: 'Area', icon: 'area-chart' },
pie: { name: 'Pie', icon: 'pie-chart' },
scatter: { name: 'Scatter', icon: 'circle-o' },
bubble: { name: 'Bubble', icon: 'circle-o' },
};
if (clientConfig.allowCustomJSVisualizations) {
@@ -83,6 +84,8 @@ function ChartEditor(ColorPalette, clientConfig) {
});
};
scope.showSizeColumnPicker = () => some(scope.options.seriesOptions, options => options.type === 'bubble');
scope.options.customCode = `// Available variables are x, ys, element, and Plotly
// Type console.log(x, ys); for more info about x and ys
// To plot your graph call Plotly.plot(element, ...)
@@ -191,6 +194,15 @@ function ChartEditor(ColorPalette, clientConfig) {
}
});
scope.$watch('form.sizeColumn', (value, old) => {
if (old !== undefined) {
unsetColumn(old);
}
if (value !== undefined) {
setColumnRole('size', value);
}
});
scope.$watch('form.groupby', (value, old) => {
if (old !== undefined) {
@@ -222,6 +234,8 @@ function ChartEditor(ColorPalette, clientConfig) {
scope.form.groupby = key;
} else if (value === 'yError') {
scope.form.errorColumn = key;
} else if (value === 'size') {
scope.form.sizeColumn = key;
}
});
}

View File

@@ -140,7 +140,7 @@ function percentBarStacking(seriesList) {
sum += seriesList[j].y[i];
}
for (let j = 0; j < seriesList.length; j += 1) {
const value = seriesList[j].y[i] / (sum * 100);
const value = seriesList[j].y[i] / sum * 100;
seriesList[j].text.push(`Value: ${seriesList[j].y[i]}<br>Relative: ${value.toFixed(2)}%`);
seriesList[j].y[i] = value;
}
@@ -211,6 +211,8 @@ const PlotlyChart = () => {
} else if (type === 'scatter') {
series.type = 'scatter';
series.mode = 'markers';
} else if (type === 'bubble') {
series.mode = 'markers';
}
}
@@ -333,6 +335,12 @@ const PlotlyChart = () => {
if (!plotlySeries.error_y.length) {
delete plotlySeries.error_y.length;
}
if (seriesOptions.type === 'bubble') {
plotlySeries.marker = {
size: pluck(data, 'size'),
};
}
scope.data.push(plotlySeries);
});

View File

@@ -2,13 +2,13 @@
<div class="form-group">
<label class="col-lg-6">Counter Value Column Name</label>
<div class="col-lg-6">
<select ng-options="name for name in queryResult.getColumnNames()" ng-model="visualization.options.counterColName" class="form-control"></select>
<select ng-options="name for name in queryResult.getColumnNames()" ng-model="visualization.options.counterColName" class="form-control" ng-disabled="visualization.options.countRow"></select>
</div>
</div>
<div class="form-group">
<label class="col-lg-6">Counter Value Row Number</label>
<div class="col-lg-6">
<input type="number" ng-model="visualization.options.rowNumber" min="1" class="form-control">
<input type="number" ng-model="visualization.options.rowNumber" min="1" class="form-control" ng-disabled="visualization.options.countRow">
</div>
</div>
<div class="form-group">
@@ -25,4 +25,10 @@
<input type="number" ng-model="visualization.options.targetRowNumber" min="1" class="form-control">
</div>
</div>
<div class="form-group">
<div class="col-lg-6">
<input type="checkbox" ng-model="visualization.options.countRow">
<i class="input-helper"></i> Count Rows
</div>
</div>
</div>

View File

@@ -14,10 +14,11 @@ function CounterRenderer() {
const counterColName = $scope.visualization.options.counterColName;
const targetColName = $scope.visualization.options.targetColName;
if (counterColName) {
if ($scope.visualization.options.countRow) {
$scope.counterValue = queryData.length;
} else if (counterColName) {
$scope.counterValue = queryData[rowNumber][counterColName];
}
if (targetColName) {
$scope.targetValue = queryData[targetRowNumber][targetColName];

View File

@@ -204,7 +204,7 @@ function mapRenderer() {
}
}
$scope.$watch('queryResult && queryResult.getData()', render, true);
$scope.$watch('queryResult && queryResult.getData()', render);
$scope.$watch('visualization.options', render, true);
angular.element(window).on('resize', resize);
$scope.$watch('visualization.options.height', resize);

View File

@@ -1,3 +1,4 @@
import angular from 'angular';
import $ from 'jquery';
import 'pivottable';
import 'pivottable/dist/pivot.css';
@@ -20,7 +21,7 @@ function pivotTableRenderer() {
if ($scope.queryResult.getData() !== null) {
// We need to give the pivot table its own copy of the data, because it changes
// it which interferes with other visualizations.
data = $.extend(true, [], $scope.queryResult.getRawData());
data = angular.copy($scope.queryResult.getData());
const options = {
renderers: $.pivotUtilities.renderers,
onRefresh(config) {

View File

@@ -2,7 +2,7 @@
<div class="form-group">
<label class="col-lg-6">Word Cloud Column Name</label>
<div class="col-lg-6">
<select ng-options="name for name in queryResult.columnNames" ng-model="visualization.options.column" class="form-control"></select>
<select ng-options="name for name in queryResult.getColumnNames()" ng-model="visualization.options.column" class="form-control"></select>
</div>
</div>
</div>

View File

@@ -7,7 +7,7 @@
version: '2'
services:
server:
build: .
image: redash/redash:latest
command: server
depends_on:
- postgres
@@ -21,7 +21,7 @@ services:
REDASH_DATABASE_URL: "postgresql://postgres@postgres/postgres"
REDASH_COOKIE_SECRET: veryverysecret
worker:
build: .
image: redash/redash:latest
command: scheduler
environment:
PYTHONUNBUFFERED: 0
@@ -31,9 +31,9 @@ services:
QUEUES: "queries,scheduled_queries,celery"
WORKERS_COUNT: 2
redis:
image: redis:2.8
image: redis:3.0-alpine
postgres:
image: postgres:9.3
image: postgres:9.5.6-alpine
# volumes:
# - /opt/postgres-data:/var/lib/postgresql/data
nginx:

View File

@@ -32,9 +32,9 @@ services:
QUEUES: "queries,scheduled_queries,celery"
WORKERS_COUNT: 2
redis:
image: redis:2.8
image: redis:3.0-alpine
postgres:
image: postgres:9.3
image: postgres:9.5.6-alpine
# The following turns the DB into less durable, but gains significant performance improvements for the tests run (x3
# improvement on my personal machine). We should consider moving this into a dedicated Docker Compose configuration for
# tests.

View File

@@ -0,0 +1,25 @@
"""add Query.schedule_failures
Revision ID: d1eae8b9893e
Revises: 65fc9ede4746
Create Date: 2017-02-03 01:45:02.954923
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd1eae8b9893e'
down_revision = '65fc9ede4746'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('queries', sa.Column('schedule_failures', sa.Integer(),
nullable=False, server_default='0'))
def downgrade():
op.drop_column('queries', 'schedule_failures')

View File

@@ -1,6 +1,6 @@
{
"name": "redash-client",
"version": "1.0.0",
"version": "1.0.2",
"description": "The frontend part of Redash.",
"main": "index.js",
"scripts": {
@@ -33,6 +33,7 @@
"angular-ui-bootstrap": "^2.2.0",
"angular-vs-repeat": "^1.1.7",
"brace": "^0.9.0",
"core-js": "https://registry.npmjs.org/core-js/-/core-js-2.4.1.tgz",
"cornelius": "git+https://github.com/restorando/cornelius.git",
"d3": "^3.5.17",
"d3-cloud": "^1.2.1",
@@ -59,6 +60,7 @@
"devDependencies": {
"babel-core": "^6.18.0",
"babel-loader": "^6.2.7",
"babel-plugin-transform-object-assign": "^6.22.0",
"babel-preset-es2015": "^6.18.0",
"babel-preset-stage-2": "^6.18.0",
"css-loader": "^0.25.0",

View File

@@ -16,7 +16,7 @@ from redash.query_runner import import_query_runners
from redash.destinations import import_destinations
__version__ = '1.0.0'
__version__ = '1.0.2'
def setup_logging():

View File

@@ -85,10 +85,10 @@ def org_login(org_slug):
@blueprint.route('/oauth/google', endpoint="authorize")
def login():
callback = url_for('.callback', _external=True)
next = request.args.get('next', url_for("redash.index", org_slug=session.get('org_slug')))
next_path = request.args.get('next', url_for("redash.index", org_slug=session.get('org_slug')))
logger.debug("Callback url: %s", callback)
logger.debug("Next is: %s", next)
return google_remote_app().authorize(callback=callback, state=next)
logger.debug("Next is: %s", next_path)
return google_remote_app().authorize(callback=callback, state=next_path)
@blueprint.route('/oauth/google_callback', endpoint="callback")
@@ -118,6 +118,6 @@ def authorized():
create_and_login_user(org, profile['name'], profile['email'])
next = request.args.get('state') or url_for("redash.index", org_slug=org.slug)
next_path = request.args.get('state') or url_for("redash.index", org_slug=org.slug)
return redirect(next)
return redirect(next_path)

View File

@@ -1,13 +1,10 @@
import csv
import json
import cStringIO
import time
import pystache
from flask import make_response, request
from flask_login import current_user
from flask_restful import abort
import xlsxwriter
from redash import models, settings, utils
from redash.tasks import QueryTask, record_event
from redash.permissions import require_permission, not_view_only, has_access, require_access, view_only
@@ -189,39 +186,13 @@ class QueryResultResource(BaseResource):
@staticmethod
def make_csv_response(query_result):
s = cStringIO.StringIO()
query_data = json.loads(query_result.data)
writer = csv.DictWriter(s, fieldnames=[col['name'] for col in query_data['columns']])
writer.writer = utils.UnicodeWriter(s)
writer.writeheader()
for row in query_data['rows']:
writer.writerow(row)
headers = {'Content-Type': "text/csv; charset=UTF-8"}
return make_response(s.getvalue(), 200, headers)
return make_response(query_result.make_csv_content(), 200, headers)
@staticmethod
def make_excel_response(query_result):
s = cStringIO.StringIO()
query_data = json.loads(query_result.data)
book = xlsxwriter.Workbook(s)
sheet = book.add_worksheet("result")
column_names = []
for (c, col) in enumerate(query_data['columns']):
sheet.write(0, c, col['name'])
column_names.append(col['name'])
for (r, row) in enumerate(query_data['rows']):
for (c, name) in enumerate(column_names):
sheet.write(r + 1, c, row.get(name))
book.close()
headers = {'Content-Type': "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"}
return make_response(s.getvalue(), 200, headers)
return make_response(query_result.make_excel_content(), 200, headers)
class JobResource(BaseResource):

View File

@@ -4,6 +4,9 @@ import hashlib
import itertools
import json
import logging
import cStringIO
import csv
import xlsxwriter
from funcy import project
from flask_sqlalchemy import SQLAlchemy
@@ -13,7 +16,7 @@ from sqlalchemy.event import listens_for
from sqlalchemy.inspection import inspect
from sqlalchemy.types import TypeDecorator
from sqlalchemy.ext.mutable import Mutable
from sqlalchemy.orm import object_session, backref
from sqlalchemy.orm import object_session, backref, joinedload, subqueryload
# noinspection PyUnresolvedReferences
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy import or_
@@ -28,7 +31,9 @@ from redash.utils import generate_token, json_dumps
from redash.utils.configuration import ConfigurationContainer
from redash.metrics import database
db = SQLAlchemy()
db = SQLAlchemy(session_options={
'expire_on_commit': False
})
Column = functools.partial(db.Column, nullable=False)
# AccessPermission and Change use a 'generic foreign key' approach to refer to
@@ -644,8 +649,40 @@ class QueryResult(db.Model, BelongsToOrgMixin):
def groups(self):
return self.data_source.groups
def make_csv_content(self):
s = cStringIO.StringIO()
def should_schedule_next(previous_iteration, now, schedule):
query_data = json.loads(self.data)
writer = csv.DictWriter(s, fieldnames=[col['name'] for col in query_data['columns']])
writer.writer = utils.UnicodeWriter(s)
writer.writeheader()
for row in query_data['rows']:
writer.writerow(row)
return s.getvalue()
def make_excel_content(self):
s = cStringIO.StringIO()
query_data = json.loads(self.data)
book = xlsxwriter.Workbook(s)
sheet = book.add_worksheet("result")
column_names = []
for (c, col) in enumerate(query_data['columns']):
sheet.write(0, c, col['name'])
column_names.append(col['name'])
for (r, row) in enumerate(query_data['rows']):
for (c, name) in enumerate(column_names):
sheet.write(r + 1, c, row.get(name))
book.close()
return s.getvalue()
def should_schedule_next(previous_iteration, now, schedule, failures):
if schedule.isdigit():
ttl = int(schedule)
next_iteration = previous_iteration + datetime.timedelta(seconds=ttl)
@@ -662,7 +699,8 @@ def should_schedule_next(previous_iteration, now, schedule):
previous_iteration = normalized_previous_iteration - datetime.timedelta(days=1)
next_iteration = (previous_iteration + datetime.timedelta(days=1)).replace(hour=hour, minute=minute)
if failures:
next_iteration += datetime.timedelta(minutes=2**failures)
return now > next_iteration
@@ -688,6 +726,7 @@ class Query(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):
is_archived = Column(db.Boolean, default=False, index=True)
is_draft = Column(db.Boolean, default=True, index=True)
schedule = Column(db.String(10), nullable=True)
schedule_failures = Column(db.Integer, default=0)
visualizations = db.relationship("Visualization", cascade="all, delete-orphan")
options = Column(MutableDict.as_mutable(PseudoJSON), default={})
@@ -767,12 +806,12 @@ class Query(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):
@classmethod
def all_queries(cls, group_ids, user_id=None, drafts=False):
q = (cls.query.join(User, Query.user_id == User.id)
.outerjoin(QueryResult)
q = (cls.query
.options(joinedload(Query.user),
joinedload(Query.latest_query_data).load_only('runtime', 'retrieved_at'))
.join(DataSourceGroup, Query.data_source_id == DataSourceGroup.data_source_id)
.filter(Query.is_archived == False)
.filter(DataSourceGroup.group_id.in_(group_ids))\
.group_by(Query.id, User.id, QueryResult.id, QueryResult.retrieved_at, QueryResult.runtime)
.order_by(Query.created_at.desc()))
if not drafts:
@@ -787,15 +826,20 @@ class Query(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):
@classmethod
def outdated_queries(cls):
queries = (db.session.query(Query)
.join(QueryResult)
.join(DataSource)
.filter(Query.schedule != None))
.options(joinedload(Query.latest_query_data).load_only('retrieved_at'))
.filter(Query.schedule != None)
.order_by(Query.id))
now = utils.utcnow()
outdated_queries = {}
for query in queries:
if should_schedule_next(query.latest_query_data.retrieved_at, now, query.schedule):
key = "{}:{}".format(query.query_hash, query.data_source.id)
if query.latest_query_data:
retrieved_at = query.latest_query_data.retrieved_at
else:
retrieved_at = now
if should_schedule_next(retrieved_at, now, query.schedule, query.schedule_failures):
key = "{}:{}".format(query.query_hash, query.data_source_id)
outdated_queries[key] = query
return outdated_queries.values()
@@ -821,12 +865,11 @@ class Query(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):
Query.data_source_id == DataSourceGroup.data_source_id)
.filter(where)).distinct()
return Query.query.join(User, Query.user_id == User.id).filter(
Query.id.in_(query_ids))
return Query.query.options(joinedload(Query.user)).filter(Query.id.in_(query_ids))
@classmethod
def recent(cls, group_ids, user_id=None, limit=20):
query = (cls.query.join(User, Query.user_id == User.id)
query = (cls.query.options(subqueryload(Query.user))
.filter(Event.created_at > (db.func.current_date() - 7))
.join(Event, Query.id == Event.object_id.cast(db.Integer))
.join(DataSourceGroup, Query.data_source_id == DataSourceGroup.data_source_id)
@@ -838,7 +881,7 @@ class Query(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):
DataSourceGroup.group_id.in_(group_ids),
or_(Query.is_draft == False, Query.user_id == user_id),
Query.is_archived == False)
.group_by(Event.object_id, Query.id, User.id)
.group_by(Event.object_id, Query.id)
.order_by(db.desc(db.func.count(0))))
if user_id:
@@ -892,6 +935,7 @@ class Query(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):
@listens_for(Query.query_text, 'set')
def gen_query_hash(target, val, oldval, initiator):
target.query_hash = utils.gen_query_hash(val)
target.schedule_failures = 0
@listens_for(Query.user_id, 'set')
@@ -1027,12 +1071,11 @@ class Alert(TimestampMixin, db.Model):
@classmethod
def all(cls, group_ids):
# TODO: there was a join with user here to prevent N+1 queries. need to revisit this.
return db.session.query(Alert)\
.options(joinedload(Alert.user), joinedload(Alert.query_rel))\
.join(Query)\
.join(DataSourceGroup, DataSourceGroup.data_source_id==Query.data_source_id)\
.filter(DataSourceGroup.group_id.in_(group_ids))\
.group_by(Alert)
.filter(DataSourceGroup.group_id.in_(group_ids))
@classmethod
def get_by_id_and_org(cls, id, org):

View File

@@ -0,0 +1,201 @@
from io import StringIO
import json
import logging
import sys
import uuid
import csv
from redash.query_runner import *
from redash.utils import JSONEncoder
logger = logging.getLogger(__name__)
try:
import atsd_client
from atsd_client.exceptions import SQLException
from atsd_client.services import SQLService, MetricsService
enabled = True
except ImportError:
enabled = False
types_map = {
'long': TYPE_INTEGER,
'bigint': TYPE_INTEGER,
'integer': TYPE_INTEGER,
'smallint': TYPE_INTEGER,
'float': TYPE_FLOAT,
'double': TYPE_FLOAT,
'decimal': TYPE_FLOAT,
'string': TYPE_STRING,
'date': TYPE_DATE,
'xsd:dateTimeStamp': TYPE_DATETIME
}
def resolve_redash_type(type_in_atsd):
"""
Retrieve corresponding redash type
:param type_in_atsd: `str`
:return: redash type constant
"""
if isinstance(type_in_atsd, dict):
type_in_redash = types_map.get(type_in_atsd['base'])
else:
type_in_redash = types_map.get(type_in_atsd)
return type_in_redash
def generate_rows_and_columns(csv_response):
"""
Prepare rows and columns in redash format from ATSD csv response
:param csv_response: `str`
:return: prepared rows and columns
"""
meta, data = csv_response.split('\n', 1)
meta = meta[1:]
meta_with_padding = meta + '=' * (4 - len(meta) % 4)
meta_decoded = meta_with_padding.decode('base64')
meta_json = json.loads(meta_decoded)
meta_columns = meta_json['tableSchema']['columns']
reader = csv.reader(data.splitlines())
next(reader)
columns = [{'friendly_name': i['titles'],
'type': resolve_redash_type(i['datatype']),
'name': i['name']}
for i in meta_columns]
column_names = [c['name'] for c in columns]
rows = [dict(zip(column_names, row)) for row in reader]
return columns, rows
class AxibaseTSD(BaseQueryRunner):
noop_query = "SELECT 1"
@classmethod
def enabled(cls):
return enabled
@classmethod
def name(cls):
return "Axibase Time Series Database"
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
'properties': {
'protocol': {
'type': 'string',
'title': 'Protocol',
'default': 'http'
},
'hostname': {
'type': 'string',
'title': 'Host',
'default': 'axibase_tsd_hostname'
},
'port': {
'type': 'number',
'title': 'Port',
'default': 8088
},
'username': {
'type': 'string'
},
'password': {
'type': 'string',
'title': 'Password'
},
'timeout': {
'type': 'number',
'default': 600,
'title': 'Connection Timeout'
},
'min_insert_date': {
'type': 'string',
'title': 'Metric Minimum Insert Date'
},
'expression': {
'type': 'string',
'title': 'Metric Filter'
},
'limit': {
'type': 'number',
'default': 5000,
'title': 'Metric Limit'
},
'trust_certificate': {
'type': 'boolean',
'title': 'Trust SSL Certificate'
}
},
'required': ['username', 'password', 'hostname', 'protocol', 'port'],
'secret': ['password']
}
def __init__(self, configuration):
super(AxibaseTSD, self).__init__(configuration)
self.url = '{0}://{1}:{2}'.format(self.configuration.get('protocol', 'http'),
self.configuration.get('hostname', 'localhost'),
self.configuration.get('port', 8088))
def run_query(self, query, user):
connection = atsd_client.connect_url(self.url,
self.configuration.get('username'),
self.configuration.get('password'),
verify=self.configuration.get('trust_certificate', False),
timeout=self.configuration.get('timeout', 600))
sql = SQLService(connection)
query_id = str(uuid.uuid4())
try:
logger.debug("SQL running query: %s", query)
data = sql.query_with_params(query, {'outputFormat': 'csv', 'metadataFormat': 'EMBED',
'queryId': query_id})
columns, rows = generate_rows_and_columns(data)
data = {'columns': columns, 'rows': rows}
json_data = json.dumps(data, cls=JSONEncoder)
error = None
except SQLException as e:
json_data = None
error = e.content
except (KeyboardInterrupt, InterruptException):
sql.cancel_query(query_id)
error = "Query cancelled by user."
json_data = None
except Exception:
raise sys.exc_info()[1], None, sys.exc_info()[2]
return json_data, error
def get_schema(self, get_stats=False):
connection = atsd_client.connect_url(self.url,
self.configuration.get('username'),
self.configuration.get('password'),
verify=self.configuration.get('trust_certificate', False),
timeout=self.configuration.get('timeout', 600))
metrics = MetricsService(connection)
ml = metrics.list(expression=self.configuration.get('expression', None),
minInsertDate=self.configuration.get('min_insert_date', None),
limit=self.configuration.get('limit', 5000))
metrics_list = [i.name.encode('utf-8') for i in ml]
metrics_list.append('atsd_series')
schema = {}
default_columns = ['entity', 'datetime', 'time', 'metric', 'value', 'text',
'tags', 'entity.tags', 'metric.tags']
for table_name in metrics_list:
schema[table_name] = {'name': "'{}'".format(table_name),
'columns': default_columns}
values = schema.values()
return values
register(AxibaseTSD)

View File

@@ -3,6 +3,7 @@ import logging
from redash.query_runner import *
from redash.utils import JSONEncoder
import requests
import re
logger = logging.getLogger(__name__)
@@ -74,13 +75,16 @@ class ClickHouse(BaseSQLQueryRunner):
@staticmethod
def _define_column_type(column):
c = column.lower()
if 'int' in c:
f = re.search(r'^nullable\((.*)\)$', c)
if f is not None:
c = f.group(1)
if c.startswith('int') or c.startswith('uint'):
return TYPE_INTEGER
elif 'float' in c:
elif c.startswith('float'):
return TYPE_FLOAT
elif 'datetime' == c:
elif c == 'datetime':
return TYPE_DATETIME
elif 'date' == c:
elif c == 'date':
return TYPE_DATE
else:
return TYPE_STRING

View File

@@ -82,11 +82,11 @@ class Impala(BaseSQLQueryRunner):
def _get_tables(self, schema_dict):
schemas_query = "show schemas;"
tables_query = "show tables in %s;"
columns_query = "show column stats %s;"
columns_query = "show column stats %s.%s;"
for schema_name in map(lambda a: a['name'], self._run_query_internal(schemas_query)):
for table_name in map(lambda a: a['name'], self._run_query_internal(tables_query % schema_name)):
columns = map(lambda a: a['Column'], self._run_query_internal(columns_query % table_name))
for schema_name in map(lambda a: unicode(a['name']), self._run_query_internal(schemas_query)):
for table_name in map(lambda a: unicode(a['name']), self._run_query_internal(tables_query % schema_name)):
columns = map(lambda a: unicode(a['Column']), self._run_query_internal(columns_query % (schema_name, table_name)))
if schema_name != 'default':
table_name = '{}.{}'.format(schema_name, table_name)

View File

@@ -121,6 +121,8 @@ class Presto(BaseQueryRunner):
except Exception, ex:
json_data = None
error = ex.message
if not isinstance(error, basestring):
error = unicode(error)
return json_data, error

View File

@@ -0,0 +1,181 @@
# -*- coding: utf-8 -*-
import re
import logging
from collections import OrderedDict
from redash.query_runner import BaseQueryRunner, register
from redash.query_runner import TYPE_STRING, TYPE_DATE, TYPE_DATETIME, TYPE_INTEGER, TYPE_FLOAT, TYPE_BOOLEAN
from redash.utils import json_dumps
logger = logging.getLogger(__name__)
try:
from simple_salesforce import Salesforce as SimpleSalesforce
from simple_salesforce.api import SalesforceError
enabled = True
except ImportError as e:
enabled = False
# See https://developer.salesforce.com/docs/atlas.en-us.api.meta/api/field_types.htm
TYPES_MAP = dict(
id=TYPE_STRING,
string=TYPE_STRING,
currency=TYPE_FLOAT,
reference=TYPE_STRING,
double=TYPE_FLOAT,
picklist=TYPE_STRING,
date=TYPE_DATE,
url=TYPE_STRING,
phone=TYPE_STRING,
textarea=TYPE_STRING,
int=TYPE_INTEGER,
datetime=TYPE_DATETIME,
boolean=TYPE_BOOLEAN,
percent=TYPE_FLOAT,
multipicklist=TYPE_STRING,
masterrecord=TYPE_STRING,
location=TYPE_STRING,
JunctionIdList=TYPE_STRING,
encryptedstring=TYPE_STRING,
email=TYPE_STRING,
DataCategoryGroupReference=TYPE_STRING,
combobox=TYPE_STRING,
calculated=TYPE_STRING,
anyType=TYPE_STRING,
address=TYPE_STRING
)
# Query Runner for Salesforce SOQL Queries
# For example queries, see:
# https://developer.salesforce.com/docs/atlas.en-us.soql_sosl.meta/soql_sosl/sforce_api_calls_soql_select_examples.htm
class Salesforce(BaseQueryRunner):
@classmethod
def enabled(cls):
return enabled
@classmethod
def annotate_query(cls):
return False
@classmethod
def configuration_schema(cls):
return {
"type": "object",
"properties": {
"username": {
"type": "string"
},
"password": {
"type": "string"
},
"token": {
"type": "string",
"title": "Security Token"
},
"sandbox": {
"type": "boolean"
}
},
"required": ["username", "password", "token"],
"secret": ["password", "token"]
}
def test_connection(self):
response = self._get_sf().describe()
if response is None:
raise Exception("Failed describing objects.")
pass
def _get_sf(self):
sf = SimpleSalesforce(username=self.configuration['username'],
password=self.configuration['password'],
security_token=self.configuration['token'],
sandbox=self.configuration['sandbox'],
client_id='Redash')
return sf
def _clean_value(self, value):
if isinstance(value, OrderedDict) and 'records' in value:
value = value['records']
for row in value:
row.pop('attributes', None)
return value
def _get_value(self, dct, dots):
for key in dots.split('.'):
dct = dct.get(key)
return dct
def _get_column_name(self, key, parents=[]):
return '.'.join(parents + [key])
def _build_columns(self, sf, child, parents=[]):
child_type = child['attributes']['type']
child_desc = sf.__getattr__(child_type).describe()
child_type_map = dict((f['name'], f['type'])for f in child_desc['fields'])
columns = []
for key in child.keys():
if key != 'attributes':
if isinstance(child[key], OrderedDict) and 'attributes' in child[key]:
columns.extend(self._build_columns(sf, child[key], parents + [key]))
else:
column_name = self._get_column_name(key, parents)
key_type = child_type_map.get(key, 'string')
column_type = TYPES_MAP.get(key_type, TYPE_STRING)
columns.append((column_name, column_type))
return columns
def _build_rows(self, columns, records):
rows = []
for record in records:
record.pop('attributes', None)
row = dict()
for column in columns:
key = column[0]
value = self._get_value(record, key)
row[key] = self._clean_value(value)
rows.append(row)
return rows
def run_query(self, query, user):
logger.debug("Salesforce is about to execute query: %s", query)
query = re.sub(r"/\*(.|\n)*?\*/", "", query).strip()
try:
columns = []
rows = []
sf = self._get_sf()
response = sf.query_all(query)
records = response['records']
if response['totalSize'] > 0 and len(records) == 0:
columns = self.fetch_columns([('Count', TYPE_INTEGER)])
rows = [{'Count': response['totalSize']}]
elif len(records) > 0:
cols = self._build_columns(sf, records[0])
rows = self._build_rows(cols, records)
columns = self.fetch_columns(cols)
error = None
data = {'columns': columns, 'rows': rows}
json_data = json_dumps(data)
except SalesforceError as err:
error = err.message
json_data = None
return json_data, error
def get_schema(self, get_stats=False):
sf = self._get_sf()
response = sf.describe()
if response is None:
raise Exception("Failed describing objects.")
schema = {}
for sobject in response['sobjects']:
table_name = sobject['name']
if sobject['queryable'] is True and table_name not in schema:
desc = sf.__getattr__(sobject['name']).describe()
fields = desc['fields']
schema[table_name] = {'name': table_name, 'columns': [f['name'] for f in fields]}
return schema.values()
register(Salesforce)

View File

@@ -185,7 +185,9 @@ default_query_runners = [
'redash.query_runner.mssql',
'redash.query_runner.jql',
'redash.query_runner.google_analytics',
'redash.query_runner.snowflake'
'redash.query_runner.snowflake',
'redash.query_runner.axibase_tsd',
'redash.query_runner.salesforce'
]
enabled_query_runners = array_from_string(os.environ.get("REDASH_ENABLED_QUERY_RUNNERS", ",".join(default_query_runners)))

View File

@@ -1 +0,0 @@
../../../frontend/app/assets/images/favicon-16x16.png

Before

Width:  |  Height:  |  Size: 53 B

After

Width:  |  Height:  |  Size: 1.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 53 B

After

Width:  |  Height:  |  Size: 1.3 KiB

View File

@@ -1 +0,0 @@
../../../frontend/app/assets/images/favicon-32x32.png

Before

Width:  |  Height:  |  Size: 53 B

After

Width:  |  Height:  |  Size: 2.0 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 53 B

After

Width:  |  Height:  |  Size: 2.0 KiB

View File

@@ -1 +0,0 @@
../../../frontend/app/assets/images/favicon-96x96.png

Before

Width:  |  Height:  |  Size: 53 B

After

Width:  |  Height:  |  Size: 3.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 53 B

After

Width:  |  Height:  |  Size: 3.8 KiB

View File

@@ -154,23 +154,25 @@ class QueryTask(object):
return self._async_result.id
def to_dict(self):
if self._async_result.status == 'STARTED':
updated_at = self._async_result.result.get('start_time', 0)
task_info = self._async_result._get_task_meta()
result, task_status = task_info['result'], task_info['status']
if task_status == 'STARTED':
updated_at = result.get('start_time', 0)
else:
updated_at = 0
status = self.STATUSES[self._async_result.status]
status = self.STATUSES[task_status]
if isinstance(self._async_result.result, Exception):
error = self._async_result.result.message
if isinstance(result, Exception):
error = result.message
status = 4
elif self._async_result.status == 'REVOKED':
elif task_status == 'REVOKED':
error = 'Query execution cancelled.'
else:
error = ''
if self._async_result.successful() and not error:
query_result_id = self._async_result.result
if task_status == 'SUCCESS' and not error:
query_result_id = result
else:
query_result_id = None
@@ -197,7 +199,7 @@ class QueryTask(object):
return self._async_result.revoke(terminate=True, signal='SIGINT')
def enqueue_query(query, data_source, user_id, scheduled=False, metadata={}):
def enqueue_query(query, data_source, user_id, scheduled_query=None, metadata={}):
query_hash = gen_query_hash(query)
logging.info("Inserting job for %s with metadata=%s", query_hash, metadata)
try_count = 0
@@ -223,14 +225,21 @@ def enqueue_query(query, data_source, user_id, scheduled=False, metadata={}):
if not job:
pipe.multi()
if scheduled:
if scheduled_query:
queue_name = data_source.scheduled_queue_name
scheduled_query_id = scheduled_query.id
else:
queue_name = data_source.queue_name
scheduled_query_id = None
result = execute_query.apply_async(args=(query, data_source.id, metadata, user_id), queue=queue_name)
result = execute_query.apply_async(args=(
query, data_source.id, metadata, user_id,
scheduled_query_id),
queue=queue_name)
job = QueryTask(async_result=result)
tracker = QueryTaskTracker.create(result.id, 'created', query_hash, data_source.id, scheduled, metadata)
tracker = QueryTaskTracker.create(
result.id, 'created', query_hash, data_source.id,
scheduled_query is not None, metadata)
tracker.save(connection=pipe)
logging.info("[%s] Created new job: %s", query_hash, job.id)
@@ -262,7 +271,7 @@ def refresh_queries():
logging.info("Skipping refresh of %s because datasource - %s is paused (%s).", query.id, query.data_source.name, query.data_source.pause_reason)
else:
enqueue_query(query.query_text, query.data_source, query.user_id,
scheduled=True,
scheduled_query=query,
metadata={'Query ID': query.id, 'Username': 'Scheduled'})
query_ids.append(query.id)
@@ -378,7 +387,8 @@ class QueryExecutionError(Exception):
# We could have created this as a celery.Task derived class, and act as the task itself. But this might result in weird
# issues as the task class created once per process, so decided to have a plain object instead.
class QueryExecutor(object):
def __init__(self, task, query, data_source_id, user_id, metadata):
def __init__(self, task, query, data_source_id, user_id, metadata,
scheduled_query):
self.task = task
self.query = query
self.data_source_id = data_source_id
@@ -389,6 +399,7 @@ class QueryExecutor(object):
else:
self.user = None
self.query_hash = gen_query_hash(self.query)
self.scheduled_query = scheduled_query
# Load existing tracker or create a new one if the job was created before code update:
self.tracker = QueryTaskTracker.get_by_task_id(task.request.id) or QueryTaskTracker.create(task.request.id,
'created',
@@ -423,7 +434,14 @@ class QueryExecutor(object):
if error:
self.tracker.update(state='failed')
result = QueryExecutionError(error)
if self.scheduled_query:
self.scheduled_query.schedule_failures += 1
models.db.session.add(self.scheduled_query)
else:
if (self.scheduled_query and
self.scheduled_query.schedule_failures > 0):
self.scheduled_query.schedule_failures = 0
models.db.session.add(self.scheduled_query)
query_result, updated_query_ids = models.QueryResult.store_result(
self.data_source.org, self.data_source,
self.query_hash, self.query, data,
@@ -450,10 +468,14 @@ class QueryExecutor(object):
return annotated_query
def _log_progress(self, state):
logger.info(u"task=execute_query state=%s query_hash=%s type=%s ds_id=%d task_id=%s queue=%s query_id=%s username=%s",
state,
self.query_hash, self.data_source.type, self.data_source.id, self.task.request.id, self.task.request.delivery_info['routing_key'],
self.metadata.get('Query ID', 'unknown'), self.metadata.get('Username', 'unknown'))
logger.info(
u"task=execute_query state=%s query_hash=%s type=%s ds_id=%d "
"task_id=%s queue=%s query_id=%s username=%s",
state, self.query_hash, self.data_source.type, self.data_source.id,
self.task.request.id,
self.task.request.delivery_info['routing_key'],
self.metadata.get('Query ID', 'unknown'),
self.metadata.get('Username', 'unknown'))
self.tracker.update(state=state)
def _load_data_source(self):
@@ -464,5 +486,11 @@ class QueryExecutor(object):
# user_id is added last as a keyword argument for backward compatability -- to support executing previously submitted
# jobs before the upgrade to this version.
@celery.task(name="redash.tasks.execute_query", bind=True, track_started=True)
def execute_query(self, query, data_source_id, metadata, user_id=None):
return QueryExecutor(self, query, data_source_id, user_id, metadata).run()
def execute_query(self, query, data_source_id, metadata, user_id=None,
scheduled_query_id=None):
if scheduled_query_id is not None:
scheduled_query = models.Query.query.get(scheduled_query_id)
else:
scheduled_query = None
return QueryExecutor(self, query, data_source_id, user_id, metadata,
scheduled_query).run()

View File

@@ -18,5 +18,7 @@ thrift>=0.8.0
thrift_sasl>=0.1.0
cassandra-driver==3.1.1
snowflake_connector_python==1.3.7
atsd_client==2.0.12
simple_salesforce==0.72.2
# certifi is needed to support MongoDB and SSL:
certifi

View File

@@ -1 +1,4 @@
# DEPRECATED
(left for reference purposes only)
Bootstrap script for Amazon Linux AMI. *Not supported*, we recommend to use the Docker images instead.

View File

@@ -7,18 +7,32 @@
},
"builders": [
{
"name": "redash-eu-west-1",
"name": "redash-us-east-1",
"type": "amazon-ebs",
"access_key": "{{user `aws_access_key`}}",
"secret_key": "{{user `aws_secret_key`}}",
"region": "eu-west-1",
"source_ami": "ami-6177f712",
"region": "us-east-1",
"source_ami": "ami-4dd2575b",
"instance_type": "t2.micro",
"ssh_username": "ubuntu",
"ami_name": "redash-{{user `image_version`}}-eu-west-1"
"ami_name": "redash-{{user `image_version`}}-us-east-1"
},
{
"type": "googlecompute",
"account_file": "account.json",
"project_id": "redash-bird-123",
"source_image_family": "ubuntu-1604-lts",
"zone": "us-central1-a",
"ssh_username": "arik"
}
],
"provisioners": [
{
"type": "shell",
"inline": [
"sleep 30"
]
},
{
"type": "shell",
"script": "ubuntu/bootstrap.sh",
@@ -33,5 +47,15 @@
"type": "shell",
"inline": "sudo rm /home/ubuntu/.ssh/authorized_keys || true"
}
],
"post-processors": [
{
"type": "googlecompute-export",
"only": ["googlecompute"],
"paths": [
"gs://redash-images/redash.{{user `redash_version`}}.tar.gz"
],
"keep_input_artifact": true
}
]
}

View File

@@ -1 +1 @@
Bootstrap scripts for Ubuntu (tested on Ubuntu 14.04, although should work with 12.04).
Bootstrap scripts for Ubuntu 16.04.

View File

@@ -1,195 +1,110 @@
#!/bin/bash
#
# This script setups Redash along with supervisor, nginx, PostgreSQL and Redis. It was written to be used on
# Ubuntu 16.04. Technically it can work with other Ubuntu versions, but you might get non compatible versions
# of PostgreSQL, Redis and maybe some other dependencies.
#
# This script is not idempotent and if it stops in the middle, you can't just run it again. You should either
# understand what parts of it to exclude or just start over on a new VM (assuming you're using a VM).
set -eu
REDASH_BASE_PATH=/opt/redash
# Default branch/version to master if not specified in REDASH_BRANCH env var
REDASH_BRANCH="${REDASH_BRANCH:-master}"
# Install latest version if not specified in REDASH_VERSION env var
REDASH_VERSION=${REDASH_VERSION-0.12.0.b2449}
LATEST_URL="https://github.com/getredash/redash/releases/download/v${REDASH_VERSION}/redash.${REDASH_VERSION}.tar.gz"
REDASH_BRANCH="${REDASH_BRANCH:-master}" # Default branch/version to master if not specified in REDASH_BRANCH env var
REDASH_VERSION=${REDASH_VERSION-1.0.1.b2833} # Install latest version if not specified in REDASH_VERSION env var
LATEST_URL="https://s3.amazonaws.com/redash-releases/redash.${REDASH_VERSION}.tar.gz"
VERSION_DIR="/opt/redash/redash.${REDASH_VERSION}"
REDASH_TARBALL=/tmp/redash.tar.gz
FILES_BASE_URL=https://raw.githubusercontent.com/getredash/redash/${REDASH_BRANCH}/setup/ubuntu/files
FILES_BASE_URL=https://raw.githubusercontent.com/getredash/redash/${REDASH_BRANCH}/setup/ubuntu/files/
cd /tmp/
# Verify running as root:
if [ "$(id -u)" != "0" ]; then
if [ $# -ne 0 ]; then
echo "Failed running with sudo. Exiting." 1>&2
exit 1
verify_root() {
# Verify running as root:
if [ "$(id -u)" != "0" ]; then
if [ $# -ne 0 ]; then
echo "Failed running with sudo. Exiting." 1>&2
exit 1
fi
echo "This script must be run as root. Trying to run with sudo."
sudo bash "$0" --with-sudo
exit 0
fi
echo "This script must be run as root. Trying to run with sudo."
sudo bash "$0" --with-sudo
exit 0
fi
# Base packages
apt-get -y update
DEBIAN_FRONTEND=noninteractive apt-get -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" dist-upgrade
apt-get install -y python-pip python-dev nginx curl build-essential pwgen
# BigQuery dependencies:
apt-get install -y libffi-dev libssl-dev
# MySQL dependencies:
apt-get install -y libmysqlclient-dev
# Microsoft SQL Server dependencies:
apt-get install -y freetds-dev
# Hive dependencies:
apt-get install -y libsasl2-dev
#Saml dependency
apt-get install -y xmlsec1
# Upgrade pip if host is Ubuntu 16.04
if [[ $(lsb_release -d) = *Ubuntu* ]] && [[ $(lsb_release -rs) = *16.04* ]]; then
pip install --upgrade pip
fi
pip install -U setuptools==23.1.0
# redash user
# TODO: check user doesn't exist yet?
adduser --system --no-create-home --disabled-login --gecos "" redash
# PostgreSQL
pg_available=0
psql --version || pg_available=$?
if [ $pg_available -ne 0 ]; then
wget $FILES_BASE_URL"postgres_apt.sh" -O /tmp/postgres_apt.sh
bash /tmp/postgres_apt.sh
apt-get update
apt-get -y install postgresql-9.3 postgresql-server-dev-9.3
fi
add_service() {
service_name=$1
service_command="/etc/init.d/$service_name"
echo "Adding service: $service_name (/etc/init.d/$service_name)."
chmod +x "$service_command"
if command -v chkconfig >/dev/null 2>&1; then
# we're chkconfig, so lets add to chkconfig and put in runlevel 345
chkconfig --add "$service_name" && echo "Successfully added to chkconfig!"
chkconfig --level 345 "$service_name" on && echo "Successfully added to runlevels 345!"
elif command -v update-rc.d >/dev/null 2>&1; then
#if we're not a chkconfig box assume we're able to use update-rc.d
update-rc.d "$service_name" defaults && echo "Success!"
else
echo "No supported init tool found."
fi
$service_command start
}
# Redis
redis_available=0
redis-cli --version || redis_available=$?
if [ $redis_available -ne 0 ]; then
wget http://download.redis.io/releases/redis-2.8.17.tar.gz
tar xzf redis-2.8.17.tar.gz
rm redis-2.8.17.tar.gz
(cd redis-2.8.17
make
make install
create_redash_user() {
adduser --system --no-create-home --disabled-login --gecos "" redash
}
# Setup process init & configuration
install_system_packages() {
apt-get -y update
# Base packages
apt install -y python-pip python-dev nginx curl build-essential pwgen
# Data sources dependencies:
apt install -y libffi-dev libssl-dev libmysqlclient-dev libpq-dev freetds-dev libsasl2-dev
# SAML dependency
apt install -y xmlsec1
# Storage servers
apt install -y postgresql redis-server
apt install -y supervisor
}
REDIS_PORT=6379
REDIS_CONFIG_FILE="/etc/redis/$REDIS_PORT.conf"
REDIS_LOG_FILE="/var/log/redis_$REDIS_PORT.log"
REDIS_DATA_DIR="/var/lib/redis/$REDIS_PORT"
create_directories() {
mkdir /opt/redash
chown redash /opt/redash
# Default config file
if [ ! -f "/opt/redash/.env" ]; then
sudo -u redash wget "$FILES_BASE_URL/env" -O /opt/redash/.env
fi
mkdir -p "$(dirname "$REDIS_CONFIG_FILE")" || die "Could not create redis config directory"
mkdir -p "$(dirname "$REDIS_LOG_FILE")" || die "Could not create redis log dir"
mkdir -p "$REDIS_DATA_DIR" || die "Could not create redis data directory"
COOKIE_SECRET=$(pwgen -1s 32)
echo "export REDASH_COOKIE_SECRET=$COOKIE_SECRET" >> /opt/redash/.env
}
wget -O /etc/init.d/redis_6379 $FILES_BASE_URL"redis_init"
wget -O $REDIS_CONFIG_FILE $FILES_BASE_URL"redis.conf"
add_service "redis_$REDIS_PORT"
)
rm -rf redis-2.8.17
fi
# Directories
if [ ! -d "$REDASH_BASE_PATH" ]; then
sudo mkdir /opt/redash
sudo chown redash /opt/redash
sudo -u redash mkdir /opt/redash/logs
fi
# Default config file
if [ ! -f "/opt/redash/.env" ]; then
sudo -u redash wget $FILES_BASE_URL"env" -O /opt/redash/.env
echo 'export REDASH_STATIC_ASSETS_PATH="../rd_ui/dist/"' >> /opt/redash/.env
fi
if [ ! -d "$VERSION_DIR" ]; then
extract_redash_sources() {
sudo -u redash wget "$LATEST_URL" -O "$REDASH_TARBALL"
sudo -u redash mkdir "$VERSION_DIR"
sudo -u redash tar -C "$VERSION_DIR" -xvf "$REDASH_TARBALL"
ln -nfs "$VERSION_DIR" /opt/redash/current
ln -nfs /opt/redash/.env /opt/redash/current/.env
}
cd /opt/redash/current
install_python_packages() {
pip install --upgrade pip
# TODO: venv?
pip install -r requirements.txt
fi
pip install setproctitle # setproctitle is used by Celery for "pretty" process titles
pip install -r /opt/redash/current/requirements.txt
pip install -r /opt/redash/current/requirements_all_ds.txt
}
# Create database / tables
pg_user_exists=0
sudo -u postgres psql postgres -tAc "SELECT 1 FROM pg_roles WHERE rolname='redash'" | grep -q 1 || pg_user_exists=$?
if [ $pg_user_exists -ne 0 ]; then
echo "Creating redash postgres user & database."
create_database() {
# Create user and database
sudo -u postgres createuser redash --no-superuser --no-createdb --no-createrole
sudo -u postgres createdb redash --owner=redash
cd /opt/redash/current
sudo -u redash bin/run ./manage.py database create_tables
fi
}
# Create default admin user
cd /opt/redash/current
# TODO: make sure user created only once
# TODO: generate temp password and print to screen
sudo -u redash bin/run ./manage.py users create --admin --password admin "Admin" "admin"
setup_supervisor() {
wget -O /etc/supervisor/conf.d/redash.conf "$FILES_BASE_URL/supervisord.conf"
service supervisor restart
}
# Create Redash read only pg user & setup data source
pg_user_exists=0
sudo -u postgres psql postgres -tAc "SELECT 1 FROM pg_roles WHERE rolname='redash_reader'" | grep -q 1 || pg_user_exists=$?
if [ $pg_user_exists -ne 0 ]; then
echo "Creating redash reader postgres user."
REDASH_READER_PASSWORD=$(pwgen -1)
sudo -u postgres psql -c "CREATE ROLE redash_reader WITH PASSWORD '$REDASH_READER_PASSWORD' NOCREATEROLE NOCREATEDB NOSUPERUSER LOGIN"
sudo -u redash psql -c "grant select(id,name,type) ON data_sources to redash_reader;" redash
sudo -u redash psql -c "grant select(id,name) ON users to redash_reader;" redash
sudo -u redash psql -c "grant select on alerts, alert_subscriptions, groups, events, queries, dashboards, widgets, visualizations, query_results to redash_reader;" redash
setup_nginx() {
rm /etc/nginx/sites-enabled/default
wget -O /etc/nginx/sites-available/redash "$FILES_BASE_URL/nginx_redash_site"
ln -nfs /etc/nginx/sites-available/redash /etc/nginx/sites-enabled/redash
service nginx restart
}
cd /opt/redash/current
sudo -u redash bin/run ./manage.py ds new "Redash Metadata" --type "pg" --options "{\"user\": \"redash_reader\", \"password\": \"$REDASH_READER_PASSWORD\", \"host\": \"localhost\", \"dbname\": \"redash\"}"
fi
# Pip requirements for all data source types
cd /opt/redash/current
pip install -r requirements_all_ds.txt
# Setup supervisord + sysv init startup script
sudo -u redash mkdir -p /opt/redash/supervisord
pip install supervisor==3.1.2 # TODO: move to requirements.txt
# Get supervisord startup script
sudo -u redash wget -O /opt/redash/supervisord/supervisord.conf $FILES_BASE_URL"supervisord.conf"
wget -O /etc/init.d/redash_supervisord $FILES_BASE_URL"redash_supervisord_init"
add_service "redash_supervisord"
# Nginx setup
rm /etc/nginx/sites-enabled/default
wget -O /etc/nginx/sites-available/redash $FILES_BASE_URL"nginx_redash_site"
ln -nfs /etc/nginx/sites-available/redash /etc/nginx/sites-enabled/redash
service nginx restart
# Hotfix: missing query snippets table:
cd /opt/redash/current
sudo -u redash bin/run python -c "from redash import models; models.QuerySnippet.create_table()"
verify_root
install_system_packages
create_redash_user
create_directories
extract_redash_sources
install_python_packages
create_database
setup_supervisor
setup_nginx

View File

@@ -1,4 +1,3 @@
export REDASH_LOG_LEVEL="INFO"
export REDASH_REDIS_URL=redis://localhost:6379/0
export REDASH_DATABASE_URL="postgresql://redash"
export REDASH_COOKIE_SECRET=veryverysecret
export REDASH_DATABASE_URL="postgresql:///redash"

View File

@@ -1,162 +0,0 @@
#!/bin/sh
# script to add apt.postgresql.org to sources.list
# from command line
CODENAME="$1"
# lsb_release is the best interface, but not always available
if [ -z "$CODENAME" ]; then
CODENAME=$(lsb_release -cs 2>/dev/null)
fi
# parse os-release (unreliable, does not work on Ubuntu)
if [ -z "$CODENAME" -a -f /etc/os-release ]; then
. /etc/os-release
# Debian: VERSION="7.0 (wheezy)"
# Ubuntu: VERSION="13.04, Raring Ringtail"
CODENAME=$(echo $VERSION | sed -ne 's/.*(\(.*\)).*/\1/')
fi
# guess from sources.list
if [ -z "$CODENAME" ]; then
CODENAME=$(grep '^deb ' /etc/apt/sources.list | head -n1 | awk '{ print $3 }')
fi
# complain if no result yet
if [ -z "$CODENAME" ]; then
cat <<EOF
Could not determine the distribution codename. Please report this as a bug to
pgsql-pkg-debian@postgresql.org. As a workaround, you can call this script with
the proper codename as parameter, e.g. "$0 squeeze".
EOF
exit 1
fi
# errors are non-fatal above
set -e
cat <<EOF
This script will enable the PostgreSQL APT repository on apt.postgresql.org on
your system. The distribution codename used will be $CODENAME-pgdg.
EOF
case $CODENAME in
# known distributions
sid|wheezy|squeeze|lenny|etch) ;;
precise|lucid) ;;
*) # unknown distribution, verify on the web
DISTURL="http://apt.postgresql.org/pub/repos/apt/dists/"
if [ -x /usr/bin/curl ]; then
DISTHTML=$(curl -s $DISTURL)
elif [ -x /usr/bin/wget ]; then
DISTHTML=$(wget --quiet -O - $DISTURL)
fi
if [ "$DISTHTML" ]; then
if ! echo "$DISTHTML" | grep -q "$CODENAME-pgdg"; then
cat <<EOF
Your system is using the distribution codename $CODENAME, but $CODENAME-pgdg
does not seem to be a valid distribution on
$DISTURL
We abort the installation here. If you want to use a distribution different
from your system, you can call this script with an explicit codename, e.g.
"$0 precise".
Specifically, if you are using a non-LTS Ubuntu release, refer to
https://wiki.postgresql.org/wiki/Apt/FAQ#I_am_using_a_non-LTS_release_of_Ubuntu
For more information, refer to https://wiki.postgresql.org/wiki/Apt
or ask on the mailing list for assistance: pgsql-pkg-debian@postgresql.org
EOF
exit 1
fi
fi
;;
esac
echo "Writing /etc/apt/sources.list.d/pgdg.list ..."
cat > /etc/apt/sources.list.d/pgdg.list <<EOF
deb http://apt.postgresql.org/pub/repos/apt/ $CODENAME-pgdg main
#deb-src http://apt.postgresql.org/pub/repos/apt/ $CODENAME-pgdg main
EOF
echo "Importing repository signing key ..."
KEYRING="/etc/apt/trusted.gpg.d/apt.postgresql.org.gpg"
test -e $KEYRING || touch $KEYRING
apt-key --keyring $KEYRING add - <<EOF
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1
mQINBE6XR8IBEACVdDKT2HEH1IyHzXkb4nIWAY7echjRxo7MTcj4vbXAyBKOfjja
UrBEJWHN6fjKJXOYWXHLIYg0hOGeW9qcSiaa1/rYIbOzjfGfhE4x0Y+NJHS1db0V
G6GUj3qXaeyqIJGS2z7m0Thy4Lgr/LpZlZ78Nf1fliSzBlMo1sV7PpP/7zUO+aA4
bKa8Rio3weMXQOZgclzgeSdqtwKnyKTQdXY5MkH1QXyFIk1nTfWwyqpJjHlgtwMi
c2cxjqG5nnV9rIYlTTjYG6RBglq0SmzF/raBnF4Lwjxq4qRqvRllBXdFu5+2pMfC
IZ10HPRdqDCTN60DUix+BTzBUT30NzaLhZbOMT5RvQtvTVgWpeIn20i2NrPWNCUh
hj490dKDLpK/v+A5/i8zPvN4c6MkDHi1FZfaoz3863dylUBR3Ip26oM0hHXf4/2U
A/oA4pCl2W0hc4aNtozjKHkVjRx5Q8/hVYu+39csFWxo6YSB/KgIEw+0W8DiTII3
RQj/OlD68ZDmGLyQPiJvaEtY9fDrcSpI0Esm0i4sjkNbuuh0Cvwwwqo5EF1zfkVj
Tqz2REYQGMJGc5LUbIpk5sMHo1HWV038TWxlDRwtOdzw08zQA6BeWe9FOokRPeR2
AqhyaJJwOZJodKZ76S+LDwFkTLzEKnYPCzkoRwLrEdNt1M7wQBThnC5z6wARAQAB
tBxQb3N0Z3JlU1FMIERlYmlhbiBSZXBvc2l0b3J5iQI9BBMBCAAnAhsDBQsJCAcD
BRUKCQgLBRYCAwEAAh4BAheABQJS6RUZBQkOhCctAAoJEH/MfUaszEz4zmQP/2ad
HtuaXL5Xu3C3NGLha/aQb9iSJC8z5vN55HMCpsWlmslCBuEr+qR+oZvPkvwh0Io/
8hQl/qN54DMNifRwVL2n2eG52yNERie9BrAMK2kNFZZCH4OxlMN0876BmDuNq2U6
7vUtCv+pxT+g9R1LvlPgLCTjS3m+qMqUICJ310BMT2cpYlJx3YqXouFkdWBVurI0
pGU/+QtydcJALz5eZbzlbYSPWbOm2ZSS2cLrCsVNFDOAbYLtUn955yXB5s4rIscE
vTzBxPgID1iBknnPzdu2tCpk07yJleiupxI1yXstCtvhGCbiAbGFDaKzhgcAxSIX
0ZPahpaYLdCkcoLlfgD+ar4K8veSK2LazrhO99O0onRG0p7zuXszXphO4E/WdbTO
yDD35qCqYeAX6TaB+2l4kIdVqPgoXT/doWVLUK2NjZtd3JpMWI0OGYDFn2DAvgwP
xqKEoGTOYuoWKssnwLlA/ZMETegak27gFAKfoQlmHjeA/PLC2KRYd6Wg2DSifhn+
2MouoE4XFfeekVBQx98rOQ5NLwy/TYlsHXm1n0RW86ETN3chj/PPWjsi80t5oepx
82azRoVu95LJUkHpPLYyqwfueoVzp2+B2hJU2Rg7w+cJq64TfeJG8hrc93MnSKIb
zTvXfdPtvYdHhhA2LYu4+5mh5ASlAMJXD7zIOZt2iEYEEBEIAAYFAk6XSO4ACgkQ
xa93SlhRC1qmjwCg9U7U+XN7Gc/dhY/eymJqmzUGT/gAn0guvoX75Y+BsZlI6dWn
qaFU6N8HiQIcBBABCAAGBQJOl0kLAAoJEExaa6sS0qeuBfEP/3AnLrcKx+dFKERX
o4NBCGWr+i1CnowupKS3rm2xLbmiB969szG5TxnOIvnjECqPz6skK3HkV3jTZaju
v3sR6M2ItpnrncWuiLnYcCSDp9TEMpCWzTEgtrBlKdVuTNTeRGILeIcvqoZX5w+u
i0eBvvbeRbHEyUsvOEnYjrqoAjqUJj5FUZtR1+V9fnZp8zDgpOSxx0LomnFdKnhj
uyXAQlRCA6/roVNR9ruRjxTR5ubteZ9ubTsVYr2/eMYOjQ46LhAgR+3Alblu/WHB
MR/9F9//RuOa43R5Sjx9TiFCYol+Ozk8XRt3QGweEH51YkSYY3oRbHBb2Fkql6N6
YFqlLBL7/aiWnNmRDEs/cdpo9HpFsbjOv4RlsSXQfvvfOayHpT5nO1UQFzoyMVpJ
615zwmQDJT5Qy7uvr2eQYRV9AXt8t/H+xjQsRZCc5YVmeAo91qIzI/tA2gtXik49
6yeziZbfUvcZzuzjjxFExss4DSAwMgorvBeIbiz2k2qXukbqcTjB2XqAlZasd6Ll
nLXpQdqDV3McYkP/MvttWh3w+J/woiBcA7yEI5e3YJk97uS6+ssbqLEd0CcdT+qz
+Waw0z/ZIU99Lfh2Qm77OT6vr//Zulw5ovjZVO2boRIcve7S97gQ4KC+G/+QaRS+
VPZ67j5UMxqtT/Y4+NHcQGgwF/1iiQI9BBMBCAAnAhsDBQsJCAcDBRUKCQgLBRYC
AwEAAh4BAheABQJQeSssBQkDwxbfAAoJEH/MfUaszEz4bgkP/0AI0UgDgkNNqplA
IpE/pkwem2jgGpJGKurh2xDu6j2ZL+BPzPhzyCeMHZwTXkkI373TXGQQP8dIa+RD
HAZ3iijw4+ISdKWpziEUJjUk04UMPTlN+dYJt2EHLQDD0VLtX0yQC/wLmVEH/REp
oclbVjZR/+ehwX2IxOIlXmkZJDSycl975FnSUjMAvyzty8P9DN0fIrQ7Ju+BfMOM
TnUkOdp0kRUYez7pxbURJfkM0NxAP1geACI91aISBpFg3zxQs1d3MmUIhJ4wHvYB
uaR7Fx1FkLAxWddre/OCYJBsjucE9uqc04rgKVjN5P/VfqNxyUoB+YZ+8Lk4t03p
RBcD9XzcyOYlFLWXbcWxTn1jJ2QMqRIWi5lzZIOMw5B+OK9LLPX0dAwIFGr9WtuV
J2zp+D4CBEMtn4Byh8EaQsttHeqAkpZoMlrEeNBDz2L7RquPQNmiuom15nb7xU/k
7PGfqtkpBaaGBV9tJkdp7BdH27dZXx+uT+uHbpMXkRrXliHjWpAw+NGwADh/Pjmq
ExlQSdgAiXy1TTOdzxKH7WrwMFGDK0fddKr8GH3f+Oq4eOoNRa6/UhTCmBPbryCS
IA7EAd0Aae9YaLlOB+eTORg/F1EWLPm34kKSRtae3gfHuY2cdUmoDVnOF8C9hc0P
bL65G4NWPt+fW7lIj+0+kF19s2PviQI9BBMBCAAnAhsDBQsJCAcDBRUKCQgLBRYC
AwEAAh4BAheABQJRKm2VBQkINsBBAAoJEH/MfUaszEz4RTEP/1sQHyjHaUiAPaCA
v8jw/3SaWP/g8qLjpY6ROjLnDMvwKwRAoxUwcIv4/TWDOMpwJN+CJIbjXsXNYvf9
OX+UTOvq4iwi4ADrAAw2xw+Jomc6EsYla+hkN2FzGzhpXfZFfUsuphjY3FKL+4hX
H+R8ucNwIz3yrkfc17MMn8yFNWFzm4omU9/JeeaafwUoLxlULL2zY7H3+QmxCl0u
6t8VvlszdEFhemLHzVYRY0Ro/ISrR78CnANNsMIy3i11U5uvdeWVCoWV1BXNLzOD
4+BIDbMB/Do8PQCWiliSGZi8lvmj/sKbumMFQonMQWOfQswTtqTyQ3yhUM1LaxK5
PYq13rggi3rA8oq8SYb/KNCQL5pzACji4TRVK0kNpvtxJxe84X8+9IB1vhBvF/Ji
/xDd/3VDNPY+k1a47cON0S8Qc8DA3mq4hRfcgvuWy7ZxoMY7AfSJOhleb9+PzRBB
n9agYgMxZg1RUWZazQ5KuoJqbxpwOYVFja/stItNS4xsmi0lh2I4MNlBEDqnFLUx
SvTDc22c3uJlWhzBM/f2jH19uUeqm4jaggob3iJvJmK+Q7Ns3WcfhuWwCnc1+58d
iFAMRUCRBPeFS0qd56QGk1r97B6+3UfLUslCfaaA8IMOFvQSHJwDO87xWGyxeRTY
IIP9up4xwgje9LB7fMxsSkCDTHOk
=s3DI
-----END PGP PUBLIC KEY BLOCK-----
EOF
echo "Running apt-get update ..."
apt-get update
cat <<EOF
You can now start installing packages from apt.postgresql.org.
Have a look at https://wiki.postgresql.org/wiki/Apt for more information;
most notably the FAQ at https://wiki.postgresql.org/wiki/Apt/FAQ
EOF

View File

@@ -1,129 +0,0 @@
#!/bin/sh
# /etc/init.d/redash_supervisord
### BEGIN INIT INFO
# Provides: supervisord
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: process supervisor
### END INIT INFO
# Author: Ron DuPlain <ron.duplain@gmail.com>
# Do NOT "set -e"
# PATH should only include /usr/* if it runs after the mountnfs.sh script
PATH=/sbin:/usr/sbin:/usr/local/sbin:/bin:/usr/bin:/usr/local/bin
NAME=supervisord
DESC="process supervisor"
DAEMON=/usr/local/bin/$NAME
DAEMON_ARGS="--configuration /opt/redash/supervisord/supervisord.conf "
PIDFILE=/opt/redash/supervisord/supervisord.pid
SCRIPTNAME=/etc/init.d/redash_supervisord
USER=redash
# Exit if the package is not installed
[ -x "$DAEMON" ] || exit 0
# Read configuration variable file if it is present
[ -r /etc/default/$NAME ] && . /etc/default/$NAME
# Load the VERBOSE setting and other rcS variables
. /lib/init/vars.sh
# Define LSB log_* functions.
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
# and status_of_proc is working.
. /lib/lsb/init-functions
#
# Function that starts the daemon/service
#
do_start()
{
# Return
# 0 if daemon has been started
# 1 if daemon was already running
# 2 if daemon could not be started
start-stop-daemon --start --quiet --pidfile $PIDFILE --user $USER --chuid $USER --exec $DAEMON --test > /dev/null \
|| return 1
start-stop-daemon --start --quiet --pidfile $PIDFILE --user $USER --chuid $USER --exec $DAEMON -- \
$DAEMON_ARGS \
|| return 2
# Add code here, if necessary, that waits for the process to be ready
# to handle requests from services started subsequently which depend
# on this one. As a last resort, sleep for some time.
}
#
# Function that stops the daemon/service
#
do_stop()
{
# Return
# 0 if daemon has been stopped
# 1 if daemon was already stopped
# 2 if daemon could not be stopped
# other if a failure occurred
start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --user $USER --chuid $USER --name $NAME
RETVAL="$?"
[ "$RETVAL" = 2 ] && return 2
# Wait for children to finish too if this is a daemon that forks
# and if the daemon is only ever run from this initscript.
# If the above conditions are not satisfied then add some other code
# that waits for the process to drop all resources that could be
# needed by services started subsequently. A last resort is to
# sleep for some time.
start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 --user $USER --chuid $USER --exec $DAEMON
[ "$?" = 2 ] && return 2
# Many daemons don't delete their pidfiles when they exit.
rm -f $PIDFILE
return "$RETVAL"
}
case "$1" in
start)
[ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
do_start
case "$?" in
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
esac
;;
stop)
[ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
do_stop
case "$?" in
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
esac
;;
status)
status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
;;
restart)
log_daemon_msg "Restarting $DESC" "$NAME"
do_stop
case "$?" in
0|1)
do_start
case "$?" in
0) log_end_msg 0 ;;
1) log_end_msg 1 ;; # Old process is still running
*) log_end_msg 1 ;; # Failed to start
esac
;;
*)
# Failed to stop
log_end_msg 1
;;
esac
;;
*)
echo "Usage: $SCRIPTNAME {start|stop|status|restart}" >&2
exit 3
;;
esac
:

View File

@@ -1,785 +0,0 @@
## Generated by install_server.sh ##
# Redis configuration file example
# Note on units: when memory size is needed, it is possible to specify
# it in the usual form of 1k 5GB 4M and so forth:
#
# 1k => 1000 bytes
# 1kb => 1024 bytes
# 1m => 1000000 bytes
# 1mb => 1024*1024 bytes
# 1g => 1000000000 bytes
# 1gb => 1024*1024*1024 bytes
#
# units are case insensitive so 1GB 1Gb 1gB are all the same.
################################## INCLUDES ###################################
# Include one or more other config files here. This is useful if you
# have a standard template that goes to all Redis server but also need
# to customize a few per-server settings. Include files can include
# other files, so use this wisely.
#
# Notice option "include" won't be rewritten by command "CONFIG REWRITE"
# from admin or Redis Sentinel. Since Redis always uses the last processed
# line as value of a configuration directive, you'd better put includes
# at the beginning of this file to avoid overwriting config change at runtime.
#
# If instead you are interested in using includes to override configuration
# options, it is better to use include as the last line.
#
# include /path/to/local.conf
# include /path/to/other.conf
################################ GENERAL #####################################
# By default Redis does not run as a daemon. Use 'yes' if you need it.
# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
daemonize yes
# When running daemonized, Redis writes a pid file in /var/run/redis.pid by
# default. You can specify a custom pid file location here.
pidfile /var/run/redis_6379.pid
# Accept connections on the specified port, default is 6379.
# If port 0 is specified Redis will not listen on a TCP socket.
port 6379
# TCP listen() backlog.
#
# In high requests-per-second environments you need an high backlog in order
# to avoid slow clients connections issues. Note that the Linux kernel
# will silently truncate it to the value of /proc/sys/net/core/somaxconn so
# make sure to raise both the value of somaxconn and tcp_max_syn_backlog
# in order to get the desired effect.
tcp-backlog 511
# By default Redis listens for connections from all the network interfaces
# available on the server. It is possible to listen to just one or multiple
# interfaces using the "bind" configuration directive, followed by one or
# more IP addresses.
#
# Examples:
#
# bind 192.168.1.100 10.0.0.1
bind 127.0.0.1
# Specify the path for the Unix socket that will be used to listen for
# incoming connections. There is no default, so Redis will not listen
# on a unix socket when not specified.
#
# unixsocket /tmp/redis.sock
# unixsocketperm 700
# Close the connection after a client is idle for N seconds (0 to disable)
timeout 0
# TCP keepalive.
#
# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
# of communication. This is useful for two reasons:
#
# 1) Detect dead peers.
# 2) Take the connection alive from the point of view of network
# equipment in the middle.
#
# On Linux, the specified value (in seconds) is the period used to send ACKs.
# Note that to close the connection the double of the time is needed.
# On other kernels the period depends on the kernel configuration.
#
# A reasonable value for this option is 60 seconds.
tcp-keepalive 0
# Specify the server verbosity level.
# This can be one of:
# debug (a lot of information, useful for development/testing)
# verbose (many rarely useful info, but not a mess like the debug level)
# notice (moderately verbose, what you want in production probably)
# warning (only very important / critical messages are logged)
loglevel notice
# Specify the log file name. Also the empty string can be used to force
# Redis to log on the standard output. Note that if you use standard
# output for logging but daemonize, logs will be sent to /dev/null
logfile /var/log/redis_6379.log
# To enable logging to the system logger, just set 'syslog-enabled' to yes,
# and optionally update the other syslog parameters to suit your needs.
# syslog-enabled no
# Specify the syslog identity.
# syslog-ident redis
# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
# syslog-facility local0
# Set the number of databases. The default database is DB 0, you can select
# a different one on a per-connection basis using SELECT <dbid> where
# dbid is a number between 0 and 'databases'-1
databases 16
################################ SNAPSHOTTING ################################
#
# Save the DB on disk:
#
# save <seconds> <changes>
#
# Will save the DB if both the given number of seconds and the given
# number of write operations against the DB occurred.
#
# In the example below the behaviour will be to save:
# after 900 sec (15 min) if at least 1 key changed
# after 300 sec (5 min) if at least 10 keys changed
# after 60 sec if at least 10000 keys changed
#
# Note: you can disable saving at all commenting all the "save" lines.
#
# It is also possible to remove all the previously configured save
# points by adding a save directive with a single empty string argument
# like in the following example:
#
# save ""
save 900 1
save 300 10
save 60 10000
# By default Redis will stop accepting writes if RDB snapshots are enabled
# (at least one save point) and the latest background save failed.
# This will make the user aware (in a hard way) that data is not persisting
# on disk properly, otherwise chances are that no one will notice and some
# disaster will happen.
#
# If the background saving process will start working again Redis will
# automatically allow writes again.
#
# However if you have setup your proper monitoring of the Redis server
# and persistence, you may want to disable this feature so that Redis will
# continue to work as usual even if there are problems with disk,
# permissions, and so forth.
stop-writes-on-bgsave-error yes
# Compress string objects using LZF when dump .rdb databases?
# For default that's set to 'yes' as it's almost always a win.
# If you want to save some CPU in the saving child set it to 'no' but
# the dataset will likely be bigger if you have compressible values or keys.
rdbcompression yes
# Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
# This makes the format more resistant to corruption but there is a performance
# hit to pay (around 10%) when saving and loading RDB files, so you can disable it
# for maximum performances.
#
# RDB files created with checksum disabled have a checksum of zero that will
# tell the loading code to skip the check.
rdbchecksum yes
# The filename where to dump the DB
dbfilename dump.rdb
# The working directory.
#
# The DB will be written inside this directory, with the filename specified
# above using the 'dbfilename' configuration directive.
#
# The Append Only File will also be created inside this directory.
#
# Note that you must specify a directory here, not a file name.
dir /var/lib/redis/6379
################################# REPLICATION #################################
# Master-Slave replication. Use slaveof to make a Redis instance a copy of
# another Redis server. A few things to understand ASAP about Redis replication.
#
# 1) Redis replication is asynchronous, but you can configure a master to
# stop accepting writes if it appears to be not connected with at least
# a given number of slaves.
# 2) Redis slaves are able to perform a partial resynchronization with the
# master if the replication link is lost for a relatively small amount of
# time. You may want to configure the replication backlog size (see the next
# sections of this file) with a sensible value depending on your needs.
# 3) Replication is automatic and does not need user intervention. After a
# network partition slaves automatically try to reconnect to masters
# and resynchronize with them.
#
# slaveof <masterip> <masterport>
# If the master is password protected (using the "requirepass" configuration
# directive below) it is possible to tell the slave to authenticate before
# starting the replication synchronization process, otherwise the master will
# refuse the slave request.
#
# masterauth <master-password>
# When a slave loses its connection with the master, or when the replication
# is still in progress, the slave can act in two different ways:
#
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
# still reply to client requests, possibly with out of date data, or the
# data set may just be empty if this is the first synchronization.
#
# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
# an error "SYNC with master in progress" to all the kind of commands
# but to INFO and SLAVEOF.
#
slave-serve-stale-data yes
# You can configure a slave instance to accept writes or not. Writing against
# a slave instance may be useful to store some ephemeral data (because data
# written on a slave will be easily deleted after resync with the master) but
# may also cause problems if clients are writing to it because of a
# misconfiguration.
#
# Since Redis 2.6 by default slaves are read-only.
#
# Note: read only slaves are not designed to be exposed to untrusted clients
# on the internet. It's just a protection layer against misuse of the instance.
# Still a read only slave exports by default all the administrative commands
# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
# security of read only slaves using 'rename-command' to shadow all the
# administrative / dangerous commands.
slave-read-only yes
# Slaves send PINGs to server in a predefined interval. It's possible to change
# this interval with the repl_ping_slave_period option. The default value is 10
# seconds.
#
# repl-ping-slave-period 10
# The following option sets the replication timeout for:
#
# 1) Bulk transfer I/O during SYNC, from the point of view of slave.
# 2) Master timeout from the point of view of slaves (data, pings).
# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings).
#
# It is important to make sure that this value is greater than the value
# specified for repl-ping-slave-period otherwise a timeout will be detected
# every time there is low traffic between the master and the slave.
#
# repl-timeout 60
# Disable TCP_NODELAY on the slave socket after SYNC?
#
# If you select "yes" Redis will use a smaller number of TCP packets and
# less bandwidth to send data to slaves. But this can add a delay for
# the data to appear on the slave side, up to 40 milliseconds with
# Linux kernels using a default configuration.
#
# If you select "no" the delay for data to appear on the slave side will
# be reduced but more bandwidth will be used for replication.
#
# By default we optimize for low latency, but in very high traffic conditions
# or when the master and slaves are many hops away, turning this to "yes" may
# be a good idea.
repl-disable-tcp-nodelay no
# Set the replication backlog size. The backlog is a buffer that accumulates
# slave data when slaves are disconnected for some time, so that when a slave
# wants to reconnect again, often a full resync is not needed, but a partial
# resync is enough, just passing the portion of data the slave missed while
# disconnected.
#
# The biggest the replication backlog, the longer the time the slave can be
# disconnected and later be able to perform a partial resynchronization.
#
# The backlog is only allocated once there is at least a slave connected.
#
# repl-backlog-size 1mb
# After a master has no longer connected slaves for some time, the backlog
# will be freed. The following option configures the amount of seconds that
# need to elapse, starting from the time the last slave disconnected, for
# the backlog buffer to be freed.
#
# A value of 0 means to never release the backlog.
#
# repl-backlog-ttl 3600
# The slave priority is an integer number published by Redis in the INFO output.
# It is used by Redis Sentinel in order to select a slave to promote into a
# master if the master is no longer working correctly.
#
# A slave with a low priority number is considered better for promotion, so
# for instance if there are three slaves with priority 10, 100, 25 Sentinel will
# pick the one with priority 10, that is the lowest.
#
# However a special priority of 0 marks the slave as not able to perform the
# role of master, so a slave with priority of 0 will never be selected by
# Redis Sentinel for promotion.
#
# By default the priority is 100.
slave-priority 100
# It is possible for a master to stop accepting writes if there are less than
# N slaves connected, having a lag less or equal than M seconds.
#
# The N slaves need to be in "online" state.
#
# The lag in seconds, that must be <= the specified value, is calculated from
# the last ping received from the slave, that is usually sent every second.
#
# This option does not GUARANTEES that N replicas will accept the write, but
# will limit the window of exposure for lost writes in case not enough slaves
# are available, to the specified number of seconds.
#
# For example to require at least 3 slaves with a lag <= 10 seconds use:
#
# min-slaves-to-write 3
# min-slaves-max-lag 10
#
# Setting one or the other to 0 disables the feature.
#
# By default min-slaves-to-write is set to 0 (feature disabled) and
# min-slaves-max-lag is set to 10.
################################## SECURITY ###################################
# Require clients to issue AUTH <PASSWORD> before processing any other
# commands. This might be useful in environments in which you do not trust
# others with access to the host running redis-server.
#
# This should stay commented out for backward compatibility and because most
# people do not need auth (e.g. they run their own servers).
#
# Warning: since Redis is pretty fast an outside user can try up to
# 150k passwords per second against a good box. This means that you should
# use a very strong password otherwise it will be very easy to break.
#
# requirepass foobared
# Command renaming.
#
# It is possible to change the name of dangerous commands in a shared
# environment. For instance the CONFIG command may be renamed into something
# hard to guess so that it will still be available for internal-use tools
# but not available for general clients.
#
# Example:
#
# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
#
# It is also possible to completely kill a command by renaming it into
# an empty string:
#
# rename-command CONFIG ""
#
# Please note that changing the name of commands that are logged into the
# AOF file or transmitted to slaves may cause problems.
################################### LIMITS ####################################
# Set the max number of connected clients at the same time. By default
# this limit is set to 10000 clients, however if the Redis server is not
# able to configure the process file limit to allow for the specified limit
# the max number of allowed clients is set to the current file limit
# minus 32 (as Redis reserves a few file descriptors for internal uses).
#
# Once the limit is reached Redis will close all the new connections sending
# an error 'max number of clients reached'.
#
# maxclients 10000
# Don't use more memory than the specified amount of bytes.
# When the memory limit is reached Redis will try to remove keys
# according to the eviction policy selected (see maxmemory-policy).
#
# If Redis can't remove keys according to the policy, or if the policy is
# set to 'noeviction', Redis will start to reply with errors to commands
# that would use more memory, like SET, LPUSH, and so on, and will continue
# to reply to read-only commands like GET.
#
# This option is usually useful when using Redis as an LRU cache, or to set
# a hard memory limit for an instance (using the 'noeviction' policy).
#
# WARNING: If you have slaves attached to an instance with maxmemory on,
# the size of the output buffers needed to feed the slaves are subtracted
# from the used memory count, so that network problems / resyncs will
# not trigger a loop where keys are evicted, and in turn the output
# buffer of slaves is full with DELs of keys evicted triggering the deletion
# of more keys, and so forth until the database is completely emptied.
#
# In short... if you have slaves attached it is suggested that you set a lower
# limit for maxmemory so that there is some free RAM on the system for slave
# output buffers (but this is not needed if the policy is 'noeviction').
#
# maxmemory <bytes>
# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
# is reached. You can select among five behaviors:
#
# volatile-lru -> remove the key with an expire set using an LRU algorithm
# allkeys-lru -> remove any key accordingly to the LRU algorithm
# volatile-random -> remove a random key with an expire set
# allkeys-random -> remove a random key, any key
# volatile-ttl -> remove the key with the nearest expire time (minor TTL)
# noeviction -> don't expire at all, just return an error on write operations
#
# Note: with any of the above policies, Redis will return an error on write
# operations, when there are not suitable keys for eviction.
#
# At the date of writing this commands are: set setnx setex append
# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
# getset mset msetnx exec sort
#
# The default is:
#
# maxmemory-policy volatile-lru
# LRU and minimal TTL algorithms are not precise algorithms but approximated
# algorithms (in order to save memory), so you can select as well the sample
# size to check. For instance for default Redis will check three keys and
# pick the one that was used less recently, you can change the sample size
# using the following configuration directive.
#
# maxmemory-samples 3
############################## APPEND ONLY MODE ###############################
# By default Redis asynchronously dumps the dataset on disk. This mode is
# good enough in many applications, but an issue with the Redis process or
# a power outage may result into a few minutes of writes lost (depending on
# the configured save points).
#
# The Append Only File is an alternative persistence mode that provides
# much better durability. For instance using the default data fsync policy
# (see later in the config file) Redis can lose just one second of writes in a
# dramatic event like a server power outage, or a single write if something
# wrong with the Redis process itself happens, but the operating system is
# still running correctly.
#
# AOF and RDB persistence can be enabled at the same time without problems.
# If the AOF is enabled on startup Redis will load the AOF, that is the file
# with the better durability guarantees.
#
# Please check http://redis.io/topics/persistence for more information.
appendonly no
# The name of the append only file (default: "appendonly.aof")
appendfilename "appendonly.aof"
# The fsync() call tells the Operating System to actually write data on disk
# instead to wait for more data in the output buffer. Some OS will really flush
# data on disk, some other OS will just try to do it ASAP.
#
# Redis supports three different modes:
#
# no: don't fsync, just let the OS flush the data when it wants. Faster.
# always: fsync after every write to the append only log . Slow, Safest.
# everysec: fsync only one time every second. Compromise.
#
# The default is "everysec", as that's usually the right compromise between
# speed and data safety. It's up to you to understand if you can relax this to
# "no" that will let the operating system flush the output buffer when
# it wants, for better performances (but if you can live with the idea of
# some data loss consider the default persistence mode that's snapshotting),
# or on the contrary, use "always" that's very slow but a bit safer than
# everysec.
#
# More details please check the following article:
# http://antirez.com/post/redis-persistence-demystified.html
#
# If unsure, use "everysec".
# appendfsync always
appendfsync everysec
# appendfsync no
# When the AOF fsync policy is set to always or everysec, and a background
# saving process (a background save or AOF log background rewriting) is
# performing a lot of I/O against the disk, in some Linux configurations
# Redis may block too long on the fsync() call. Note that there is no fix for
# this currently, as even performing fsync in a different thread will block
# our synchronous write(2) call.
#
# In order to mitigate this problem it's possible to use the following option
# that will prevent fsync() from being called in the main process while a
# BGSAVE or BGREWRITEAOF is in progress.
#
# This means that while another child is saving, the durability of Redis is
# the same as "appendfsync none". In practical terms, this means that it is
# possible to lose up to 30 seconds of log in the worst scenario (with the
# default Linux settings).
#
# If you have latency problems turn this to "yes". Otherwise leave it as
# "no" that is the safest pick from the point of view of durability.
no-appendfsync-on-rewrite no
# Automatic rewrite of the append only file.
# Redis is able to automatically rewrite the log file implicitly calling
# BGREWRITEAOF when the AOF log size grows by the specified percentage.
#
# This is how it works: Redis remembers the size of the AOF file after the
# latest rewrite (if no rewrite has happened since the restart, the size of
# the AOF at startup is used).
#
# This base size is compared to the current size. If the current size is
# bigger than the specified percentage, the rewrite is triggered. Also
# you need to specify a minimal size for the AOF file to be rewritten, this
# is useful to avoid rewriting the AOF file even if the percentage increase
# is reached but it is still pretty small.
#
# Specify a percentage of zero in order to disable the automatic AOF
# rewrite feature.
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
# An AOF file may be found to be truncated at the end during the Redis
# startup process, when the AOF data gets loaded back into memory.
# This may happen when the system where Redis is running
# crashes, especially when an ext4 filesystem is mounted without the
# data=ordered option (however this can't happen when Redis itself
# crashes or aborts but the operating system still works correctly).
#
# Redis can either exit with an error when this happens, or load as much
# data as possible (the default now) and start if the AOF file is found
# to be truncated at the end. The following option controls this behavior.
#
# If aof-load-truncated is set to yes, a truncated AOF file is loaded and
# the Redis server starts emitting a log to inform the user of the event.
# Otherwise if the option is set to no, the server aborts with an error
# and refuses to start. When the option is set to no, the user requires
# to fix the AOF file using the "redis-check-aof" utility before to restart
# the server.
#
# Note that if the AOF file will be found to be corrupted in the middle
# the server will still exit with an error. This option only applies when
# Redis will try to read more data from the AOF file but not enough bytes
# will be found.
aof-load-truncated yes
################################ LUA SCRIPTING ###############################
# Max execution time of a Lua script in milliseconds.
#
# If the maximum execution time is reached Redis will log that a script is
# still in execution after the maximum allowed time and will start to
# reply to queries with an error.
#
# When a long running script exceed the maximum execution time only the
# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
# used to stop a script that did not yet called write commands. The second
# is the only way to shut down the server in the case a write commands was
# already issue by the script but the user don't want to wait for the natural
# termination of the script.
#
# Set it to 0 or a negative value for unlimited execution without warnings.
lua-time-limit 5000
################################## SLOW LOG ###################################
# The Redis Slow Log is a system to log queries that exceeded a specified
# execution time. The execution time does not include the I/O operations
# like talking with the client, sending the reply and so forth,
# but just the time needed to actually execute the command (this is the only
# stage of command execution where the thread is blocked and can not serve
# other requests in the meantime).
#
# You can configure the slow log with two parameters: one tells Redis
# what is the execution time, in microseconds, to exceed in order for the
# command to get logged, and the other parameter is the length of the
# slow log. When a new command is logged the oldest one is removed from the
# queue of logged commands.
# The following time is expressed in microseconds, so 1000000 is equivalent
# to one second. Note that a negative number disables the slow log, while
# a value of zero forces the logging of every command.
slowlog-log-slower-than 10000
# There is no limit to this length. Just be aware that it will consume memory.
# You can reclaim memory used by the slow log with SLOWLOG RESET.
slowlog-max-len 128
################################ LATENCY MONITOR ##############################
# The Redis latency monitoring subsystem samples different operations
# at runtime in order to collect data related to possible sources of
# latency of a Redis instance.
#
# Via the LATENCY command this information is available to the user that can
# print graphs and obtain reports.
#
# The system only logs operations that were performed in a time equal or
# greater than the amount of milliseconds specified via the
# latency-monitor-threshold configuration directive. When its value is set
# to zero, the latency monitor is turned off.
#
# By default latency monitoring is disabled since it is mostly not needed
# if you don't have latency issues, and collecting data has a performance
# impact, that while very small, can be measured under big load. Latency
# monitoring can easily be enalbed at runtime using the command
# "CONFIG SET latency-monitor-threshold <milliseconds>" if needed.
latency-monitor-threshold 0
############################# Event notification ##############################
# Redis can notify Pub/Sub clients about events happening in the key space.
# This feature is documented at http://redis.io/topics/notifications
#
# For instance if keyspace events notification is enabled, and a client
# performs a DEL operation on key "foo" stored in the Database 0, two
# messages will be published via Pub/Sub:
#
# PUBLISH __keyspace@0__:foo del
# PUBLISH __keyevent@0__:del foo
#
# It is possible to select the events that Redis will notify among a set
# of classes. Every class is identified by a single character:
#
# K Keyspace events, published with __keyspace@<db>__ prefix.
# E Keyevent events, published with __keyevent@<db>__ prefix.
# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
# $ String commands
# l List commands
# s Set commands
# h Hash commands
# z Sorted set commands
# x Expired events (events generated every time a key expires)
# e Evicted events (events generated when a key is evicted for maxmemory)
# A Alias for g$lshzxe, so that the "AKE" string means all the events.
#
# The "notify-keyspace-events" takes as argument a string that is composed
# by zero or multiple characters. The empty string means that notifications
# are disabled at all.
#
# Example: to enable list and generic events, from the point of view of the
# event name, use:
#
# notify-keyspace-events Elg
#
# Example 2: to get the stream of the expired keys subscribing to channel
# name __keyevent@0__:expired use:
#
# notify-keyspace-events Ex
#
# By default all notifications are disabled because most users don't need
# this feature and the feature has some overhead. Note that if you don't
# specify at least one of K or E, no events will be delivered.
notify-keyspace-events ""
############################### ADVANCED CONFIG ###############################
# Hashes are encoded using a memory efficient data structure when they have a
# small number of entries, and the biggest entry does not exceed a given
# threshold. These thresholds can be configured using the following directives.
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
# Similarly to hashes, small lists are also encoded in a special way in order
# to save a lot of space. The special representation is only used when
# you are under the following limits:
list-max-ziplist-entries 512
list-max-ziplist-value 64
# Sets have a special encoding in just one case: when a set is composed
# of just strings that happens to be integers in radix 10 in the range
# of 64 bit signed integers.
# The following configuration setting sets the limit in the size of the
# set in order to use this special memory saving encoding.
set-max-intset-entries 512
# Similarly to hashes and lists, sorted sets are also specially encoded in
# order to save a lot of space. This encoding is only used when the length and
# elements of a sorted set are below the following limits:
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
# HyperLogLog sparse representation bytes limit. The limit includes the
# 16 bytes header. When an HyperLogLog using the sparse representation crosses
# this limit, it is converted into the dense representation.
#
# A value greater than 16000 is totally useless, since at that point the
# dense representation is more memory efficient.
#
# The suggested value is ~ 3000 in order to have the benefits of
# the space efficient encoding without slowing down too much PFADD,
# which is O(N) with the sparse encoding. The value can be raised to
# ~ 10000 when CPU is not a concern, but space is, and the data set is
# composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
hll-sparse-max-bytes 3000
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
# order to help rehashing the main Redis hash table (the one mapping top-level
# keys to values). The hash table implementation Redis uses (see dict.c)
# performs a lazy rehashing: the more operation you run into a hash table
# that is rehashing, the more rehashing "steps" are performed, so if the
# server is idle the rehashing is never complete and some more memory is used
# by the hash table.
#
# The default is to use this millisecond 10 times every second in order to
# active rehashing the main dictionaries, freeing memory when possible.
#
# If unsure:
# use "activerehashing no" if you have hard latency requirements and it is
# not a good thing in your environment that Redis can reply form time to time
# to queries with 2 milliseconds delay.
#
# use "activerehashing yes" if you don't have such hard requirements but
# want to free memory asap when possible.
activerehashing yes
# The client output buffer limits can be used to force disconnection of clients
# that are not reading data from the server fast enough for some reason (a
# common reason is that a Pub/Sub client can't consume messages as fast as the
# publisher can produce them).
#
# The limit can be set differently for the three different classes of clients:
#
# normal -> normal clients including MONITOR clients
# slave -> slave clients
# pubsub -> clients subscribed to at least one pubsub channel or pattern
#
# The syntax of every client-output-buffer-limit directive is the following:
#
# client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
#
# A client is immediately disconnected once the hard limit is reached, or if
# the soft limit is reached and remains reached for the specified number of
# seconds (continuously).
# So for instance if the hard limit is 32 megabytes and the soft limit is
# 16 megabytes / 10 seconds, the client will get disconnected immediately
# if the size of the output buffers reach 32 megabytes, but will also get
# disconnected if the client reaches 16 megabytes and continuously overcomes
# the limit for 10 seconds.
#
# By default normal clients are not limited because they don't receive data
# without asking (in a push way), but just after a request, so only
# asynchronous clients may create a scenario where data is requested faster
# than it can read.
#
# Instead there is a default limit for pubsub and slave clients, since
# subscribers and slaves receive data in a push fashion.
#
# Both the hard or the soft limit can be disabled by setting them to zero.
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
# Redis calls an internal function to perform many background tasks, like
# closing connections of clients in timeout, purging expired keys that are
# never requested, and so forth.
#
# Not all tasks are performed with the same frequency, but Redis checks for
# tasks to perform accordingly to the specified "hz" value.
#
# By default "hz" is set to 10. Raising the value will use more CPU when
# Redis is idle, but at the same time will make Redis more responsive when
# there are many keys expiring at the same time, and timeouts may be
# handled with more precision.
#
# The range is between 1 and 500, however a value over 100 is usually not
# a good idea. Most users should use the default of 10 and raise this up to
# 100 only in environments where very low latency is required.
hz 10
# When a child rewrites the AOF file, if the following option is enabled
# the file will be fsync-ed every 32 MB of data generated. This is useful
# in order to commit the file to the disk more incrementally and avoid
# big latency spikes.
aof-rewrite-incremental-fsync yes

View File

@@ -1,66 +0,0 @@
#!/bin/sh
EXEC=/usr/local/bin/redis-server
CLIEXEC=/usr/local/bin/redis-cli
PIDFILE=/var/run/redis_6379.pid
CONF="/etc/redis/6379.conf"
REDISPORT="6379"
###############
# SysV Init Information
# chkconfig: - 58 74
# description: redis_6379 is the redis daemon.
### BEGIN INIT INFO
# Provides: redis_6379
# Required-Start: $network $local_fs $remote_fs
# Required-Stop: $network $local_fs $remote_fs
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Should-Start: $syslog $named
# Should-Stop: $syslog $named
# Short-Description: start and stop redis_6379
# Description: Redis daemon
### END INIT INFO
case "$1" in
start)
if [ -f $PIDFILE ]
then
echo "$PIDFILE exists, process is already running or crashed"
else
echo "Starting Redis server..."
$EXEC $CONF
fi
;;
stop)
if [ ! -f $PIDFILE ]
then
echo "$PIDFILE does not exist, process is not running"
else
PID=$(cat $PIDFILE)
echo "Stopping ..."
$CLIEXEC -p $REDISPORT shutdown
while [ -x /proc/${PID} ]
do
echo "Waiting for Redis to shutdown ..."
sleep 1
done
echo "Redis stopped"
fi
;;
status)
if [ ! -f $PIDFILE ]
then
echo 'Redis is not running'
else
echo "Redis is running ($(<$PIDFILE))"
fi
;;
restart)
$0 stop
$0 start
;;
*)
echo "Please use start, stop, restart or status as first argument"
;;
esac

View File

@@ -1,24 +1,14 @@
[supervisord]
nodaemon=false
logfile=/opt/redash/logs/supervisord.log
pidfile=/opt/redash/supervisord/supervisord.pid
directory=/opt/redash/current
[inet_http_server]
port = 127.0.0.1:9001
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
[program:redash_server]
command=/opt/redash/current/bin/run gunicorn -b 127.0.0.1:5000 --name redash -w 4 --max-requests 1000 redash.wsgi:app
directory=/opt/redash/current
process_name=redash_server
user=redash
numprocs=1
priority=999
autostart=true
autorestart=true
stdout_logfile=/opt/redash/logs/api.log
stderr_logfile=/opt/redash/logs/api_error.log
# There are two queue types here: one for ad-hoc queries, and one for the refresh of scheduled queries
# (note that "scheduled_queries" appears only in the queue list of "redash_celery_scheduled").
@@ -26,20 +16,18 @@ stderr_logfile=/opt/redash/logs/api_error.log
[program:redash_celery]
command=/opt/redash/current/bin/run celery worker --app=redash.worker --beat -c2 -Qqueries,celery --maxtasksperchild=10 -Ofair
directory=/opt/redash/current
process_name=redash_celery
user=redash
numprocs=1
priority=999
autostart=true
autorestart=true
stdout_logfile=/opt/redash/logs/celery.log
stderr_logfile=/opt/redash/logs/celery_error.log
[program:redash_celery_scheduled]
command=/opt/redash/current/bin/run celery worker --app=redash.worker -c2 -Qscheduled_queries --maxtasksperchild=10 -Ofair
directory=/opt/redash/current
process_name=redash_celery_scheduled
user=redash
numprocs=1
priority=999
autostart=true
autorestart=true
stdout_logfile=/opt/redash/logs/celery.log
stderr_logfile=/opt/redash/logs/celery_error.log

View File

@@ -69,7 +69,7 @@ api_key_factory = ModelFactory(redash.models.ApiKey,
query_factory = ModelFactory(redash.models.Query,
name='Query',
description='',
query_text='SELECT 1',
query_text=u'SELECT 1',
user=user_factory.create,
is_archived=False,
is_draft=False,

View File

@@ -1,11 +1,14 @@
from tests import BaseTestCase
from redash import redis_connection
from redash.tasks.queries import QueryTaskTracker, enqueue_query, execute_query
from unittest import TestCase
from mock import MagicMock
from collections import namedtuple
import uuid
import mock
from tests import BaseTestCase
from redash import redis_connection, models
from redash.query_runner.pg import PostgreSQL
from redash.tasks.queries import QueryTaskTracker, enqueue_query, execute_query
class TestPrune(TestCase):
def setUp(self):
@@ -45,11 +48,11 @@ def gen_hash(*args, **kwargs):
class TestEnqueueTask(BaseTestCase):
def test_multiple_enqueue_of_same_query(self):
query = self.factory.create_query()
execute_query.apply_async = MagicMock(side_effect=gen_hash)
execute_query.apply_async = mock.MagicMock(side_effect=gen_hash)
enqueue_query(query.query_text, query.data_source, True, {'Username': 'Arik', 'Query ID': query.id})
enqueue_query(query.query_text, query.data_source, True, {'Username': 'Arik', 'Query ID': query.id})
enqueue_query(query.query_text, query.data_source, True, {'Username': 'Arik', 'Query ID': query.id})
enqueue_query(query.query_text, query.data_source, query.user_id, query, {'Username': 'Arik', 'Query ID': query.id})
enqueue_query(query.query_text, query.data_source, query.user_id, query, {'Username': 'Arik', 'Query ID': query.id})
enqueue_query(query.query_text, query.data_source, query.user_id, query, {'Username': 'Arik', 'Query ID': query.id})
self.assertEqual(1, execute_query.apply_async.call_count)
self.assertEqual(1, redis_connection.zcard(QueryTaskTracker.WAITING_LIST))
@@ -58,13 +61,84 @@ class TestEnqueueTask(BaseTestCase):
def test_multiple_enqueue_of_different_query(self):
query = self.factory.create_query()
execute_query.apply_async = MagicMock(side_effect=gen_hash)
execute_query.apply_async = mock.MagicMock(side_effect=gen_hash)
enqueue_query(query.query_text, query.data_source, True, {'Username': 'Arik', 'Query ID': query.id})
enqueue_query(query.query_text + '2', query.data_source, True, {'Username': 'Arik', 'Query ID': query.id})
enqueue_query(query.query_text + '3', query.data_source, True, {'Username': 'Arik', 'Query ID': query.id})
enqueue_query(query.query_text, query.data_source, query.user_id, None, {'Username': 'Arik', 'Query ID': query.id})
enqueue_query(query.query_text + '2', query.data_source, query.user_id, None, {'Username': 'Arik', 'Query ID': query.id})
enqueue_query(query.query_text + '3', query.data_source, query.user_id, None, {'Username': 'Arik', 'Query ID': query.id})
self.assertEqual(3, execute_query.apply_async.call_count)
self.assertEqual(3, redis_connection.zcard(QueryTaskTracker.WAITING_LIST))
self.assertEqual(0, redis_connection.zcard(QueryTaskTracker.IN_PROGRESS_LIST))
self.assertEqual(0, redis_connection.zcard(QueryTaskTracker.DONE_LIST))
class QueryExecutorTests(BaseTestCase):
def test_success(self):
"""
``execute_query`` invokes the query runner and stores a query result.
"""
cm = mock.patch("celery.app.task.Context.delivery_info", {'routing_key': 'test'})
with cm, mock.patch.object(PostgreSQL, "run_query") as qr:
qr.return_value = ([1, 2], None)
result_id = execute_query("SELECT 1, 2", self.factory.data_source.id, {})
self.assertEqual(1, qr.call_count)
result = models.QueryResult.query.get(result_id)
self.assertEqual(result.data, '{1,2}')
def test_success_scheduled(self):
"""
Scheduled queries remember their latest results.
"""
cm = mock.patch("celery.app.task.Context.delivery_info",
{'routing_key': 'test'})
q = self.factory.create_query(query_text="SELECT 1, 2", schedule=300)
with cm, mock.patch.object(PostgreSQL, "run_query") as qr:
qr.return_value = ([1, 2], None)
result_id = execute_query(
"SELECT 1, 2",
self.factory.data_source.id, {},
scheduled_query_id=q.id)
q = models.Query.get_by_id(q.id)
self.assertEqual(q.schedule_failures, 0)
result = models.QueryResult.query.get(result_id)
self.assertEqual(q.latest_query_data, result)
def test_failure_scheduled(self):
"""
Scheduled queries that fail have their failure recorded.
"""
cm = mock.patch("celery.app.task.Context.delivery_info",
{'routing_key': 'test'})
q = self.factory.create_query(query_text="SELECT 1, 2", schedule=300)
with cm, mock.patch.object(PostgreSQL, "run_query") as qr:
qr.exception = ValueError("broken")
execute_query("SELECT 1, 2", self.factory.data_source.id, {}, scheduled_query_id=q.id)
self.assertEqual(q.schedule_failures, 1)
execute_query("SELECT 1, 2", self.factory.data_source.id, {}, scheduled_query_id=q.id)
q = models.Query.get_by_id(q.id)
self.assertEqual(q.schedule_failures, 2)
def test_success_after_failure(self):
"""
Query execution success resets the failure counter.
"""
cm = mock.patch("celery.app.task.Context.delivery_info",
{'routing_key': 'test'})
q = self.factory.create_query(query_text="SELECT 1, 2", schedule=300)
with cm, mock.patch.object(PostgreSQL, "run_query") as qr:
qr.exception = ValueError("broken")
execute_query("SELECT 1, 2",
self.factory.data_source.id, {},
scheduled_query_id=q.id)
q = models.Query.get_by_id(q.id)
self.assertEqual(q.schedule_failures, 1)
with cm, mock.patch.object(PostgreSQL, "run_query") as qr:
qr.return_value = ([1, 2], None)
execute_query("SELECT 1, 2",
self.factory.data_source.id, {},
scheduled_query_id=q.id)
q = models.Query.get_by_id(q.id)
self.assertEqual(q.schedule_failures, 0)

View File

@@ -1,109 +1,47 @@
import datetime
from mock import patch, call, ANY
from tests import BaseTestCase
from redash.utils import utcnow
from redash.tasks import refresh_queries
from redash.models import db
from redash.models import Query
# TODO: this test should be split into two:
# 1. tests for Query.outdated_queries method
# 2. test for the refresh_query task
class TestRefreshQueries(BaseTestCase):
class TestRefreshQuery(BaseTestCase):
def test_enqueues_outdated_queries(self):
query = self.factory.create_query(schedule="60")
retrieved_at = utcnow() - datetime.timedelta(minutes=10)
query_result = self.factory.create_query_result(retrieved_at=retrieved_at, query_text=query.query_text,
query_hash=query.query_hash)
query.latest_query_data = query_result
db.session.add(query)
with patch('redash.tasks.queries.enqueue_query') as add_job_mock:
"""
refresh_queries() launches an execution task for each query returned
from Query.outdated_queries().
"""
query1 = self.factory.create_query()
query2 = self.factory.create_query(
query_text="select 42;",
data_source=self.factory.create_data_source())
oq = staticmethod(lambda: [query1, query2])
with patch('redash.tasks.queries.enqueue_query') as add_job_mock, \
patch.object(Query, 'outdated_queries', oq):
refresh_queries()
add_job_mock.assert_called_with(query.query_text, query.data_source, query.user_id, scheduled=True, metadata=ANY)
self.assertEqual(add_job_mock.call_count, 2)
add_job_mock.assert_has_calls([
call(query1.query_text, query1.data_source, query1.user_id,
scheduled_query=query1, metadata=ANY),
call(query2.query_text, query2.data_source, query2.user_id,
scheduled_query=query2, metadata=ANY)], any_order=True)
def test_doesnt_enqueue_outdated_queries_for_paused_data_source(self):
query = self.factory.create_query(schedule="60")
retrieved_at = utcnow() - datetime.timedelta(minutes=10)
query_result = self.factory.create_query_result(retrieved_at=retrieved_at, query_text=query.query_text,
query_hash=query.query_hash)
query.latest_query_data = query_result
db.session.add(query)
db.session.commit()
"""
refresh_queries() does not launch execution tasks for queries whose
data source is paused.
"""
query = self.factory.create_query()
oq = staticmethod(lambda: [query])
query.data_source.pause()
with patch.object(Query, 'outdated_queries', oq):
with patch('redash.tasks.queries.enqueue_query') as add_job_mock:
refresh_queries()
add_job_mock.assert_not_called()
with patch('redash.tasks.queries.enqueue_query') as add_job_mock:
refresh_queries()
add_job_mock.assert_not_called()
query.data_source.resume()
query.data_source.resume()
with patch('redash.tasks.queries.enqueue_query') as add_job_mock:
refresh_queries()
add_job_mock.assert_called_with(query.query_text, query.data_source, query.user_id, scheduled=True, metadata=ANY)
def test_skips_fresh_queries(self):
query = self.factory.create_query(schedule="1200")
retrieved_at = utcnow() - datetime.timedelta(minutes=10)
query_result = self.factory.create_query_result(retrieved_at=retrieved_at, query_text=query.query_text,
query_hash=query.query_hash)
with patch('redash.tasks.queries.enqueue_query') as add_job_mock:
refresh_queries()
self.assertFalse(add_job_mock.called)
def test_skips_queries_with_no_ttl(self):
query = self.factory.create_query(schedule=None)
retrieved_at = utcnow() - datetime.timedelta(minutes=10)
query_result = self.factory.create_query_result(retrieved_at=retrieved_at, query_text=query.query_text,
query_hash=query.query_hash)
with patch('redash.tasks.queries.enqueue_query') as add_job_mock:
refresh_queries()
self.assertFalse(add_job_mock.called)
def test_enqueues_query_only_once(self):
query = self.factory.create_query(schedule="60")
query2 = self.factory.create_query(schedule="60", query_text=query.query_text, query_hash=query.query_hash)
retrieved_at = utcnow() - datetime.timedelta(minutes=10)
query_result = self.factory.create_query_result(retrieved_at=retrieved_at, query_text=query.query_text,
query_hash=query.query_hash)
query.latest_query_data = query_result
query2.latest_query_data = query_result
db.session.add_all([query, query2])
with patch('redash.tasks.queries.enqueue_query') as add_job_mock:
refresh_queries()
add_job_mock.assert_called_once_with(query.query_text, query.data_source, query.user_id, scheduled=True, metadata=ANY)#{'Query ID': query.id, 'Username': 'Scheduled'})
def test_enqueues_query_with_correct_data_source(self):
query = self.factory.create_query(schedule="60", data_source=self.factory.create_data_source())
query2 = self.factory.create_query(schedule="60", query_text=query.query_text, query_hash=query.query_hash)
retrieved_at = utcnow() - datetime.timedelta(minutes=10)
query_result = self.factory.create_query_result(retrieved_at=retrieved_at, query_text=query.query_text,
query_hash=query.query_hash)
query.latest_query_data = query_result
query2.latest_query_data = query_result
db.session.add_all([query, query2])
with patch('redash.tasks.queries.enqueue_query') as add_job_mock:
refresh_queries()
add_job_mock.assert_has_calls([call(query2.query_text, query2.data_source, query2.user_id, scheduled=True, metadata=ANY),
call(query.query_text, query.data_source, query.user_id, scheduled=True, metadata=ANY)],
any_order=True)
self.assertEquals(2, add_job_mock.call_count)
def test_enqueues_only_for_relevant_data_source(self):
query = self.factory.create_query(schedule="60")
query2 = self.factory.create_query(schedule="3600", query_text=query.query_text, query_hash=query.query_hash)
retrieved_at = utcnow() - datetime.timedelta(minutes=10)
query_result = self.factory.create_query_result(retrieved_at=retrieved_at, query_text=query.query_text,
query_hash=query.query_hash)
query.latest_query_data = query_result
query2.latest_query_data = query_result
db.session.add_all([query, query2])
with patch('redash.tasks.queries.enqueue_query') as add_job_mock:
refresh_queries()
add_job_mock.assert_called_once_with(query.query_text, query.data_source, query.user_id, scheduled=True, metadata=ANY)
with patch('redash.tasks.queries.enqueue_query') as add_job_mock:
refresh_queries()
add_job_mock.assert_called_with(
query.query_text, query.data_source, query.user_id,
scheduled_query=query, metadata=ANY)

View File

@@ -30,31 +30,45 @@ class ShouldScheduleNextTest(TestCase):
def test_interval_schedule_that_needs_reschedule(self):
now = utcnow()
two_hours_ago = now - datetime.timedelta(hours=2)
self.assertTrue(models.should_schedule_next(two_hours_ago, now, "3600"))
self.assertTrue(models.should_schedule_next(two_hours_ago, now, "3600",
0))
def test_interval_schedule_that_doesnt_need_reschedule(self):
now = utcnow()
half_an_hour_ago = now - datetime.timedelta(minutes=30)
self.assertFalse(models.should_schedule_next(half_an_hour_ago, now, "3600"))
self.assertFalse(models.should_schedule_next(half_an_hour_ago, now,
"3600", 0))
def test_exact_time_that_needs_reschedule(self):
now = utcnow()
yesterday = now - datetime.timedelta(days=1)
scheduled_datetime = now - datetime.timedelta(hours=3)
scheduled_time = "{:02d}:00".format(scheduled_datetime.hour)
self.assertTrue(models.should_schedule_next(yesterday, now, scheduled_time))
self.assertTrue(models.should_schedule_next(yesterday, now,
scheduled_time, 0))
def test_exact_time_that_doesnt_need_reschedule(self):
now = date_parse("2015-10-16 20:10")
yesterday = date_parse("2015-10-15 23:07")
schedule = "23:00"
self.assertFalse(models.should_schedule_next(yesterday, now, schedule))
self.assertFalse(models.should_schedule_next(yesterday, now, schedule,
0))
def test_exact_time_with_day_change(self):
now = utcnow().replace(hour=0, minute=1)
previous = (now - datetime.timedelta(days=2)).replace(hour=23, minute=59)
previous = (now - datetime.timedelta(days=2)).replace(hour=23,
minute=59)
schedule = "23:59".format(now.hour + 3)
self.assertTrue(models.should_schedule_next(previous, now, schedule))
self.assertTrue(models.should_schedule_next(previous, now, schedule,
0))
def test_backoff(self):
now = utcnow()
two_hours_ago = now - datetime.timedelta(hours=2)
self.assertTrue(models.should_schedule_next(two_hours_ago, now, "3600",
5))
self.assertFalse(models.should_schedule_next(two_hours_ago, now,
"3600", 10))
class QueryOutdatedQueriesTest(BaseTestCase):
@@ -65,6 +79,12 @@ class QueryOutdatedQueriesTest(BaseTestCase):
self.assertNotIn(query, queries)
def test_outdated_queries_skips_unscheduled_queries(self):
query = self.factory.create_query(schedule='60')
queries = models.Query.outdated_queries()
self.assertNotIn(query, queries)
def test_outdated_queries_works_with_ttl_based_schedule(self):
two_hours_ago = utcnow() - datetime.timedelta(hours=2)
query = self.factory.create_query(schedule="3600")
@@ -92,6 +112,79 @@ class QueryOutdatedQueriesTest(BaseTestCase):
queries = models.Query.outdated_queries()
self.assertIn(query, queries)
def test_enqueues_query_only_once(self):
"""
Only one query per data source with the same text will be reported by
Query.outdated_queries().
"""
query = self.factory.create_query(schedule="60")
query2 = self.factory.create_query(
schedule="60", query_text=query.query_text,
query_hash=query.query_hash)
retrieved_at = utcnow() - datetime.timedelta(minutes=10)
query_result = self.factory.create_query_result(
retrieved_at=retrieved_at, query_text=query.query_text,
query_hash=query.query_hash)
query.latest_query_data = query_result
query2.latest_query_data = query_result
self.assertEqual(list(models.Query.outdated_queries()), [query2])
def test_enqueues_query_with_correct_data_source(self):
"""
Queries from different data sources will be reported by
Query.outdated_queries() even if they have the same query text.
"""
query = self.factory.create_query(
schedule="60", data_source=self.factory.create_data_source())
query2 = self.factory.create_query(
schedule="60", query_text=query.query_text,
query_hash=query.query_hash)
retrieved_at = utcnow() - datetime.timedelta(minutes=10)
query_result = self.factory.create_query_result(
retrieved_at=retrieved_at, query_text=query.query_text,
query_hash=query.query_hash)
query.latest_query_data = query_result
query2.latest_query_data = query_result
self.assertEqual(list(models.Query.outdated_queries()),
[query2, query])
def test_enqueues_only_for_relevant_data_source(self):
"""
If multiple queries with the same text exist, only ones that are
scheduled to be refreshed are reported by Query.outdated_queries().
"""
query = self.factory.create_query(schedule="60")
query2 = self.factory.create_query(
schedule="3600", query_text=query.query_text,
query_hash=query.query_hash)
retrieved_at = utcnow() - datetime.timedelta(minutes=10)
query_result = self.factory.create_query_result(
retrieved_at=retrieved_at, query_text=query.query_text,
query_hash=query.query_hash)
query.latest_query_data = query_result
query2.latest_query_data = query_result
self.assertEqual(list(models.Query.outdated_queries()), [query])
def test_failure_extends_schedule(self):
"""
Execution failures recorded for a query result in exponential backoff
for scheduling future execution.
"""
query = self.factory.create_query(schedule="60", schedule_failures=4)
retrieved_at = utcnow() - datetime.timedelta(minutes=16)
query_result = self.factory.create_query_result(
retrieved_at=retrieved_at, query_text=query.query_text,
query_hash=query.query_hash)
query.latest_query_data = query_result
self.assertEqual(list(models.Query.outdated_queries()), [])
query_result.retrieved_at = utcnow() - datetime.timedelta(minutes=17)
self.assertEqual(list(models.Query.outdated_queries()), [query])
class QueryArchiveTest(BaseTestCase):
def setUp(self):