Compare commits

..

145 Commits

Author SHA1 Message Date
Arik Fraimovich
f8878d3006 Merge pull request #358 from EverythingMe/DAT-825
Feature: archive query
2015-01-25 16:24:32 +02:00
Arik Fraimovich
1c0d596f26 Bump version due to migration. 2015-01-25 16:23:58 +02:00
Arik Fraimovich
1afd2ab388 Refactoring of @christophervalles work on query delete feature:
- Change delete into archive.
- Safely remove widgets.
- Make sure archived queries don't get scheduled, or show up in search.
- If direct link to query used, show notification.
- Tests.
- Some more.
2015-01-25 16:17:52 +02:00
Christopher Valles
4aa9500402 Working on delete query 2015-01-25 16:16:31 +02:00
Arik Fraimovich
83727ae931 Merge pull request #356 from alexanderlz/master
Bug #307 Fix: Notifications stopped working
2015-01-22 12:01:30 +02:00
Alexander Leibzon
0b0b88a255 remove unused line 2015-01-22 00:27:01 +02:00
Alexander Leibzon
f23d709f4e Bug #307 fix. Notifications stopped working 2015-01-22 00:16:31 +02:00
Arik Fraimovich
88abbc7ea6 Merge pull request #355 from EverythingMe/feature/personal_home
Several small fixes
2015-01-20 16:36:32 +02:00
Arik Fraimovich
16f0413af8 Fix: don't show dashboard filters where it's not enabled 2015-01-20 16:35:55 +02:00
Arik Fraimovich
f47020a64d Report personal page as different page 2015-01-20 16:32:31 +02:00
Arik Fraimovich
55e1ef81f7 Add activity_log to list of tables redash_reader can query 2015-01-20 16:32:31 +02:00
Arik Fraimovich
6bb43d0411 Merge pull request #354 from EverythingMe/feature/personal_home
Feature: personal home with recent queries & dashboards
2015-01-19 12:15:13 +02:00
Arik Fraimovich
f51c2328c9 Feature: personal home with recent queries & dashboards 2015-01-19 12:09:06 +02:00
Arik Fraimovich
fd37188ace Merge pull request #353 from EverythingMe/bug/dashboard_auto_refresh
Fix: nulls converted to strings in UI
2015-01-19 10:51:44 +02:00
Arik Fraimovich
758e27ce91 Fix: nulls converted to strings in UI 2015-01-19 10:49:39 +02:00
Arik Fraimovich
9a3b25eb50 Merge pull request #352 from EverythingMe/bug/dashboard_auto_refresh
Fix: dashboard auto refresh stopped working
2015-01-19 09:14:32 +02:00
Arik Fraimovich
6da890dfb8 FIX: dashboard auto refresh stopped working 2015-01-19 08:32:40 +02:00
Arik Fraimovich
0d35ec7139 Merge pull request #349 from erans/master
Minor bug fixes + supprot for limit and skip in simple query
2015-01-18 10:11:27 +02:00
Arik Fraimovich
dc0f9a63cb Merge pull request #351 from joeysim/search_improvements
Search improvements
2015-01-18 09:22:41 +02:00
Arik Fraimovich
21c042996e Merge pull request #350 from joeysim/ctrl_enter_exec
Added support for Cmd+Enter query execution for PCs
2015-01-18 09:21:32 +02:00
Joey Simhon
5f22adadf2 ordering all_queries by created_at desc for better relevancy with big lists 2015-01-17 21:19:22 +02:00
Joey Simhon
4e8888ce2f sort searched queries by creation time, assuming the newer queries are usually more relevant 2015-01-17 21:14:56 +02:00
Joey Simhon
0a69609d38 Added support for Cmd+Enter query execution for PCs 2015-01-17 00:32:21 +02:00
Eran Sandler
2dbcd88313 added support for skip and limit 2015-01-15 17:14:48 +02:00
Eran Sandler
6b0775f7c7 fixed an issue where 'query' element is missing as well as a bad sort order in simple queries 2015-01-15 17:11:32 +02:00
Arik Fraimovich
e85d3c3c9f Merge pull request #348 from EverythingMe/feature/additional_manage_commands
Feature: new data source management commands in manage.py
2015-01-14 12:35:13 +02:00
Arik Fraimovich
e20f57bba8 Added edit & delete commands to data source cli 2015-01-14 12:23:53 +02:00
Arik Fraimovich
933ace2e38 Split CLI commands to several files for easier editing and naming. 2015-01-14 10:52:11 +02:00
Arik Fraimovich
4c1e5aed6b Remove import from settings command (obsolete). 2015-01-14 10:27:53 +02:00
Arik Fraimovich
77d982b4aa Merge pull request #347 from barnash/query-params-for-filters
Query params for filters
2015-01-13 22:35:32 +02:00
barnash
02c8163265 Changed the query param to something more url friendly 2015-01-12 18:56:44 +02:00
Arik Fraimovich
ef868dbb6e Merge pull request #346 from erans/master
Initial support for Mongo's aggregation framework.
2015-01-12 18:17:41 +02:00
Iftach Bar
b2bab33baa added support for deep links to dashboards with saved filters 2015-01-12 09:23:27 +02:00
Iftach Bar
149e0835f8 fixed jshint stuff - semicolon in different places 2015-01-12 09:22:53 +02:00
Eran Sandler
50bed1d8f2 Initial support for Mongo's aggregation framework. 2015-01-11 12:37:37 +02:00
Eran Sandler
d4b5d78743 Perform a JSON.stringify on values who's type is "object" 2015-01-11 12:28:21 +02:00
Arik Fraimovich
7fc82a2562 Merge pull request #345 from EverythingMe/vagrant_dev
Developer Vagrant box for easier contribution
2014-12-30 07:52:07 +02:00
Arik Fraimovich
92fb138c2c Vagrant file to use the redash/dev box 2014-12-30 07:45:30 +02:00
Arik Fraimovich
71b4b45a3c Merge pull request #344 from EverythingMe/feature/query_results_cleanup
Job to cleanup unused query results
2014-12-25 15:58:10 +02:00
Arik Fraimovich
07f4a1b227 Fix: wiredep failing after version upgrade 2014-12-25 15:52:52 +02:00
Arik Fraimovich
e116e88e98 Job to cleanup unused query results 2014-12-25 15:39:49 +02:00
Arik Fraimovich
2278a181ca Merge pull request #339 from EverythingMe/counter-vis
bugfix: Counter visualization font size issues
2014-11-11 18:21:29 +02:00
Amir Nissim
98dc75a404 bugfix: Counter visualization was not watching for filter changes 2014-11-11 13:04:45 +02:00
Amir Nissim
536918aab3 bugfix: Counter visualization font size issues 2014-11-10 15:21:03 +02:00
Arik Fraimovich
c75ac80c7a Merge pull request #333 from EverythingMe/fix/import
Fix: mixed number columns was wrongly detected as integer
2014-11-05 11:33:46 +02:00
Arik Fraimovich
522d8542e9 Fix: mixed number columns was wrongly detected as integer 2014-11-05 11:30:17 +02:00
Arik Fraimovich
562df44c22 Merge pull request #331 from EverythingMe/fix/import
Fixes and improvements to import dashboard command:
2014-11-04 07:34:59 +02:00
Arik Fraimovich
86e6798c96 manage.py: better output for list data sources command 2014-11-04 07:26:32 +02:00
Arik Fraimovich
db7a287e82 manage.py: list all users command 2014-11-04 07:26:16 +02:00
Arik Fraimovich
518206f208 Fixes and imporvements to import dashboard:
- Update it to not expect query result.
- Add support for specifying data source.
- Create mapping file if it doesn't exist yet.
2014-11-04 07:24:51 +02:00
Arik Fraimovich
bcee1e12b4 Merge pull request #325 from EverythingMe/feature/search-by-id
Add support for searching for query by id
2014-10-30 08:23:41 +02:00
Arik Fraimovich
410f4f35e2 Add support for searching for query by id 2014-10-30 07:58:53 +02:00
Arik Fraimovich
84ea9fec43 Merge pull request #323 from EverythingMe/counter-vis
Counter visualization
2014-10-27 13:58:58 +02:00
Amir Nissim
cda82b7adc #27: use <select> for columns names 2014-10-27 11:47:38 +02:00
Amir Nissim
f2d8c2020b #27: counter and target as query params, change UI 2014-10-27 11:34:56 +02:00
Amir Nissim
1b82ecbc46 #27: Counter visualization draft 2014-10-26 15:42:57 +02:00
Arik Fraimovich
e381331c36 Merge pull request #319 from EverythingMe/bug_292
#292: Customizable series colors
2014-10-23 14:44:02 +03:00
Amir Nissim
ff58247987 #292: move color palette to ng_highcharts 2014-10-23 14:36:30 +03:00
Amir Nissim
dcf0d2cbe3 #292: Customizable series colors 2014-10-23 13:46:43 +03:00
Arik Fraimovich
eb99fa5671 Merge pull request #318 from EverythingMe/docs_setup
Packer: make re:dash version configurable
2014-10-22 12:01:32 +03:00
Arik Fraimovich
ce3e19f212 Make redash version configurable 2014-10-22 11:55:17 +03:00
Arik Fraimovich
44dca6da01 Spelling mistakes. 2014-10-21 19:02:17 +03:00
Arik Fraimovich
34c9fee540 Link to new setup instructions. 2014-10-21 19:01:40 +03:00
Arik Fraimovich
e0b13b2ffa Merge pull request #316 from EverythingMe/feature_users_cli
Add commands to change user's password and grant admin
2014-10-21 18:57:40 +03:00
Arik Fraimovich
df362c12b6 Add commands to change user password and grant admin 2014-10-21 18:51:23 +03:00
Arik Fraimovich
0d1f8c948a Merge pull request #309 from EverythingMe/docs_setup
Setup script for Ubuntu/Debian + packer configuration
2014-10-21 18:42:26 +03:00
Arik Fraimovich
f523378326 Setup script for Ubuntu/Debian + packer configuration
This script is intended to work on Ubuntu 12.04, Ubuntu 14.04 and Debian Wheezy (for GCE users).
To make sure we use the same version of Redis across all distributions we install from source,
and to make sure we use the same version of PostgreSQL we install it from PostgreSQL's apt.

Also included Packer configuration to generate GCE & AWS images.
2014-10-21 18:28:39 +03:00
Arik Fraimovich
b0f9e49709 Merge pull request #313 from erans/master
Forced setting a script execution path
2014-10-21 14:32:03 +03:00
Eran Sandler
b6dbb4e3f8 forced setting a script execution path 2014-10-21 11:20:31 +03:00
Arik Fraimovich
3f6a0e8ffa Merge pull request #312 from erans/master
MongoDB ReplicaSet support and a new connection string format.
2014-10-21 10:21:49 +03:00
Eran Sandler
a7bcc6d31e Added support for MongoDB ReplicaSet as well as changed the connection string format to a JSON based one (like BigQuery). Check the wiki for an example. 2014-10-21 10:16:48 +03:00
Arik Fraimovich
8aa2d8e70a landscape.io configuration file 2014-10-19 13:41:29 +03:00
Arik Fraimovich
4720e12be7 add angular-ui-select to list of dependencies 2014-10-15 17:56:32 +03:00
Arik Fraimovich
5463591f0d Merge branch 'feature/dashboard_add_query_by_name' 2014-10-15 17:45:57 +03:00
Arik Fraimovich
2a0198fba8 Make search expect at least 2 characters 2014-10-15 17:45:39 +03:00
Arik Fraimovich
652f214b25 Updated bower dependencies:
- Angular 1.2.7 -> 1.2.18 (to support angular-ui-select).
- angular-resource and angular-route to match Angular version.
- angular-growl to latest version that supports ~1.2.
- Change version of angular-ui-select to specific one.
2014-10-15 17:42:08 +03:00
Arik Fraimovich
aa49780134 Use unminified version of angular-ui-select 2014-10-15 17:41:55 +03:00
Raymond
f483b61cfb add global html sanitizer 2014-10-15 20:55:29 +08:00
Arik Fraimovich
38a189b671 Merge pull request #306 from raymoondtang/fix/clomun_type_ingeter
Client fix, clomun type support ingeter
2014-10-15 15:46:15 +03:00
Raymond
c2331988db use selected_query for ng-show of visualisation form 2014-10-15 20:32:15 +08:00
Raymond
eff5bdb454 Merge branch 'master' of github-yalo:EverythingMe/redash into fix/clomun_type_ingeter 2014-10-15 19:29:01 +08:00
Raymond
bd1babec3a Add query to dashboard based on name not query id, issue #171 2014-10-15 14:46:55 +08:00
Raymond
d43c2bbf62 table column type handle both integer and float 2014-10-13 12:57:42 +08:00
Arik Fraimovich
87db8099d6 Fix: need to group by runtime and retrieved_at 2014-10-06 09:53:02 +03:00
Arik Fraimovich
ebea118c7d Merge pull request #300 from EverythingMe/feature_google_oauth
Remove query stats (runtime, last retrieve) from search as it was too slow
2014-10-06 09:45:03 +03:00
Arik Fraimovich
297ac5c9bd Fix markdown filter (failing for undefined) 2014-10-06 09:41:56 +03:00
Arik Fraimovich
9b23fb4235 Remove query stats from search, as it was too slow 2014-10-06 09:41:40 +03:00
Arik Fraimovich
0a71f5e22d Merge pull request #298 from erans/master
Initial support for MongoDB.
2014-10-06 08:26:03 +03:00
Arik Fraimovich
0a8aaceb85 Merge pull request #299 from EverythingMe/feature_google_oauth
Show last execution time & runtime in search results + event tracking
2014-10-06 08:25:17 +03:00
Arik Fraimovich
00979f3ad7 Event tracking for search 2014-10-06 08:00:56 +03:00
Arik Fraimovich
c7b48837f2 Show last execution time & runtime in search results 2014-10-06 07:55:17 +03:00
Eran Sandler
418c5322c1 added extra error handling for invalid query and invalid database name 2014-10-02 12:42:46 +03:00
Arik Fraimovich
dc5b4c26a3 Updated README: link to new demo instance. 2014-10-02 07:57:52 +03:00
Eran Sandler
9ed0a5ba85 removed a debug message and change to a better error message when collection is not specified. 2014-09-30 18:43:40 +03:00
Eran Sandler
db0770fc17 Initial support for MongoDB.
Support simple queries using the a JSON format:
{
	"collection" : THE NAME OF THE COLLECTION TO QUERY,
	"query" : {
		A DICTIONARY FOR QUERYING FIELDS (similar to what you would find in PyMongo
	},
	"fields" : {
		LIST OF FIELDS TO RETURN IN THE SPECIFIED ORDER
	},
	"sort" : {
		LIST OF FIELDS TO SORT BY (1 - Ascending, -1 - descending)
	}
}

For example:
{
	"collection" : "mycoolcollection",
	"query" : {
		"fieldA" : { "$gte" : 5 },
		"created" : { "$lt" : "ISODate(\"2014-09-01 23:43\")" }
	},
	"fields" : {
		"fieldA" : 1,
		"created" : 2
	},
	"sort" : {
		"created" : -1
	}
}
2014-09-30 18:34:35 +03:00
Arik Fraimovich
9bb58e71d2 Merge pull request #296 from EverythingMe/feature_google_oauth
Feature: basic search page for queries
2014-09-30 08:43:16 +03:00
Arik Fraimovich
560598eaad Search UI. 2014-09-30 08:39:13 +03:00
Arik Fraimovich
f9144fc927 Naive search implementation. 2014-09-30 08:37:59 +03:00
Arik Fraimovich
883bf173c0 Merge pull request #295 from EverythingMe/feature_google_oauth
Feature: support markdown in query description (fixes #293)
2014-09-29 18:15:24 +03:00
Arik Fraimovich
3f2bb65b32 Show markdown in query view too 2014-09-29 18:10:17 +03:00
Arik Fraimovich
3917af019a Feature: support markdown in query description 2014-09-29 17:59:40 +03:00
Arik Fraimovich
e88837e835 Merge pull request #291 from EverythingMe/feature_google_oauth
Move event recording to Celery/database instead of log file
2014-09-27 17:45:55 +03:00
Arik Fraimovich
7abdc2543e update manage.py to use new Event.record method. 2014-09-27 17:45:04 +03:00
Arik Fraimovich
91ab90a6fe Move event recording to Celery/database instead of log file 2014-09-27 17:41:50 +03:00
Arik Fraimovich
7fd2bd3d24 Merge pull request #290 from EverythingMe/feature_google_oauth
Clearer google login button
2014-09-27 16:26:02 +03:00
Arik Fraimovich
3ed1ea1e33 Clearer google login button 2014-09-26 13:13:05 +03:00
Arik Fraimovich
a4486c56b9 Merge pull request #289 from EverythingMe/feature_google_oauth
Fix: add necessary scope to get user's name
2014-09-26 00:40:11 +03:00
Arik Fraimovich
3da0ecf36c Fix: add necessary scope to get user's name 2014-09-25 17:55:43 +03:00
Arik Fraimovich
11a1095b18 Merge pull request #284 from EverythingMe/feature_google_oauth
Feature: Google OAuth support (instead of deprecated OpenID)
2014-09-24 18:13:45 +03:00
Arik Fraimovich
b43485f322 Update tests 2014-09-21 10:11:03 +03:00
Arik Fraimovich
d83675326b Only enable google oauth if client id & secret provided 2014-09-21 09:07:52 +03:00
Arik Fraimovich
8d7b9a552e Google OAuth support (fixes #223) 2014-09-21 08:53:41 +03:00
Arik Fraimovich
e1eb75b786 Add to requirements flask-oauth and remove flask-googleopenid 2014-09-21 08:48:15 +03:00
Arik Fraimovich
34a3c9e91c Link to wiki in readme 2014-09-17 16:14:49 +03:00
Arik Fraimovich
e007a2891d Fix build status image in readme 2014-09-17 16:06:15 +03:00
Arik Fraimovich
febe6e4aa7 Update readme 2014-09-17 16:04:30 +03:00
Arik Fraimovich
8099dafc68 Merge pull request #283 from EverythingMe/fix_stuck_jobs
Update psycopg2 to 2.5.2.
2014-09-15 09:28:47 +03:00
Arik Fraimovich
ce3d5e637f Update psycopg2 to 2.5.2.
In 2.5.1 they had an issue, where OperationalError exception was causing SEGFAULT
when being pickled. This was crashing the Celery worker, causing the jobs to be lost.
2014-09-15 07:25:35 +03:00
Arik Fraimovich
4a52ccd4fa Gitter integration for CircleCI. 2014-09-14 18:23:02 +03:00
Arik Fraimovich
a0c81f8a31 Merge pull request #281 from EverythingMe/fix_stuck_jobs
Several fixes to reduce cases of stuck jobs
2014-09-11 07:50:35 +03:00
Arik Fraimovich
ce13b79bdc Use correct logging level 2014-09-11 07:47:30 +03:00
Arik Fraimovich
c580db277d Add cleanup_tasks job.
Enumerates all locks and removes those of non existing jobs. Useful
for case the worker is being cold restarted, and jobs are finished
properly.
2014-09-11 07:42:36 +03:00
Arik Fraimovich
5e944e9a8f If found lock is for a ready job, ignore it.
ready - revoked, finished or failed.
2014-09-11 07:41:43 +03:00
Arik Fraimovich
4b94cf706a Set default locks expiry time to 12 hours 2014-09-11 07:41:23 +03:00
Arik Fraimovich
364c51456d Set expiry time to locks, just in case for some reason they get stuck. 2014-09-11 07:40:20 +03:00
Arik Fraimovich
1274d36abc Merge pull request #280 from EverythingMe/fix_stuck_jobs
Fix #261: cancelling jobs sends them to limbo
2014-09-06 18:12:03 +03:00
Arik Fraimovich
f6bd562dd2 Remove cleanup_tasks, as it's not stable 2014-09-06 18:09:04 +03:00
Arik Fraimovich
065d2bc2c6 Schedule removal of dead tasks 2014-09-06 14:18:35 +03:00
Arik Fraimovich
653ed1c57a Add cleanup task to remove locks of dead jobs 2014-09-06 14:18:15 +03:00
Arik Fraimovich
7dc1176628 Fix #261: cancelling jobs sends them to limbo 2014-09-06 13:56:36 +03:00
Arik Fraimovich
365b8a8c93 Merge pull request #279 from EverythingMe/json-results
API - query results in JSON format. fixes #278
2014-09-03 12:07:36 +03:00
Arik Fraimovich
6e1e0a9967 Merge QueryResultAPI with CSVQueryResultAPI 2014-09-03 11:55:17 +03:00
Amir Nissim
170640a63f API - query results in JSON format. fixes #278 2014-09-02 17:52:04 +03:00
Arik Fraimovich
5e970b73d5 Merge pull request #270 from olgakogan/master
added handling for querying strings with non standard characters
2014-08-25 12:00:02 +03:00
olgakogan
a4643472a5 added handling for querying strings with non standard characters 2014-08-24 19:08:10 +03:00
Arik Fraimovich
7aa01f2bd2 Comment out filters url sync tests. 2014-08-20 09:07:08 +03:00
Arik Fraimovich
cb4b0e0296 Merge pull request #269 from EverythingMe/257-chart-editor
Disable filters url syncing
2014-08-20 08:59:22 +03:00
Arik Fraimovich
2c05e921c4 Disable filters url syncing 2014-08-20 08:58:56 +03:00
Arik Fraimovich
c4877f254e Merge pull request #268 from EverythingMe/257-chart-editor
[#257] chart editor: global series type
2014-08-19 19:51:57 +03:00
Arik Fraimovich
9fc59de35f remove throttling of redrawData 2014-08-19 18:37:32 +03:00
Amir Nissim
eb50f3fc94 [#257] chart editor: use globalSeriesType when creating new series 2014-08-19 14:44:53 +03:00
Arik Fraimovich
12fe59827f Merge pull request #267 from EverythingMe/257-chart-editor
[#257] chart editor: global series type
2014-08-19 14:04:44 +03:00
Arik Fraimovich
d32caff31d Merge pull request #266 from EverythingMe/265-db-reloads
disable reloadOnSearch for /dashboard. fixes #265
2014-08-19 13:17:17 +03:00
Amir Nissim
ba540ff380 [#257] chart editor: global series type 2014-08-19 13:14:24 +03:00
Amir Nissim
2112faab02 disable reloadOnSearch for /dashboard. fixes #265 2014-08-19 12:01:23 +03:00
76 changed files with 3035 additions and 520 deletions

2
.landscape.yaml Normal file
View File

@@ -0,0 +1,2 @@
ignore-paths:
- migrations

View File

@@ -1,2 +1,2 @@
web: ./manage.py runserver -p $PORT
web: ./manage.py runserver -p $PORT --host 0.0.0.0
worker: ./bin/run celery worker --app=redash.worker --beat -Qqueries,celery,scheduled_queries

View File

@@ -1,72 +1,46 @@
# [_re:dash_](https://github.com/everythingme/redash)
![Build Status](https://circleci.com/gh/EverythingMe/redash.png?circle-token=8a695aa5ec2cbfa89b48c275aea298318016f040 "Build Status")
<p align="center">
<img title="re:dash" src='https://raw.githubusercontent.com/EverythingMe/redash/screenshots/redash_logo.png' />
</p>
<p align="center">
<img title="Build Status" src='https://circleci.com/gh/EverythingMe/redash.png?circle-token=8a695aa5ec2cbfa89b48c275aea298318016f040'/>
</p>
**_re:dash_** is our take on freeing the data within our company in a way that will better fit our culture and usage patterns.
Prior to **_re:dash_**, we tried to use tranditional BI suites and discovered a set of bloated, technically challenged and slow tools/flows. What we were looking for was a more hacker'ish way to look at data, so we built one.
Prior to **_re:dash_**, we tried to use traditional BI suites and discovered a set of bloated, technically challenged and slow tools/flows. What we were looking for was a more hacker'ish way to look at data, so we built one.
**_re:dash_** was built to allow fast and easy access to billions of records, that we process and collect using Amazon Redshift ("petabyte scale data warehouse" that "speaks" PostgreSQL).
Today **_re:dash_** has support for querying multiple databases, including: Redshift, Google BigQuery, PostgreSQL, MySQL, Graphite and custom scripts.
**_re:dash_** consists of two parts:
1. **Query Editor**: think of [JS Fiddle](http://jsfiddle.net) for SQL queries. It's your way to share data in the organization in an open way, by sharing both the dataset and the query that generated it. This way everyone can peer review not only the resulting dataset but also the process that generated it. Also it's possible to fork it and generate new datasets and reach new insights.
2. **Dashboards/Visualizations**: once you have a dataset, you can create different visualizations out of it, and then combine several visualizations into a single dashboard. Currently it supports bar charts, pivot table and cohorts.
1. **Query Editor**: think of [JS Fiddle](http://jsfiddle.net) for SQL queries. It's your way to share data in the organization in an open way, by sharing both the dataset and the query that generated it. This way everyone can peer review not only the resulting dataset but also the process that generated it. Also it's possible to fork it and generate new datasets and reach new insights.
2. **Dashboards/Visualizations**: once you have a dataset, you can create different visualizations out of it, and then combine several visualizations into a single dashboard. Currently it supports charts, pivot table and cohorts.
This is the first release, which is more than usable but still has its rough edges and way to go to fulfill its full potential. The Query Editor part is quite solid, but the visualizations need more work to enrich them and to make them more user friendly.
**_re:dash_** is a work in progress and has its rough edges and way to go to fulfill its full potential. The Query Editor part is quite solid, but the visualizations need more work to enrich them and to make them more user friendly.
## Demo
![Screenshots](https://raw.github.com/EverythingMe/redash/screenshots/screenshots.gif)
You can try out the demo instance: http://rd-demo.herokuapp.com/ (login with any Google account).
You can try out the demo instance: http://demo.redash.io/ (login with any Google account).
## Getting Started
* [Setting up re:dash instance](https://github.com/EverythingMe/redash/wiki/Setting-up-re:dash-instance) (includes links to ready made AWS/GCE images).
* Additional documentation in the [Wiki](https://github.com/everythingme/redash/wiki).
Due to Heroku dev plan limits, it has a small database of flights (see schema [here](http://rd-demo.herokuapp.com/dashboard/schema)). Also due to another Heroku limitation, it is running with the regular user, hence you can DELETE or INSERT data/tables. Please be nice and don't do this.
## Getting help
* [Google Group (mailing list)](https://groups.google.com/forum/#!forum/redash-users): the best place to get updates about new releases or ask general questions.
* #redash IRC channel on [Freenode](http://www.freenode.net/).
## Technology
* Python
* [AngularJS](http://angularjs.org/)
* [PostgreSQL](http://www.postgresql.org/) / [AWS Redshift](http://aws.amazon.com/redshift/)
* [Redis](http://redis.io)
PostgreSQL is used both as the operatinal database for the system, but also as the data store that is being queried. To be exact, we built this system to use on top of Amazon's Redshift, which supports the PG driver. But it's quite simple to add support for other datastores, and we do plan to do so.
This is our first large scale AngularJS project, and we learned a lot during the development of it. There are still things we need to iron out, and comments on the way we use AngularJS are more than welcome (and pull requests just as well).
### HighCharts
HighCharts is really great, but it's not free for commercial use. Please refer to their [licensing options](http://shop.highsoft.com/highcharts.html), to see what applies for your use.
It's very likely that in the future we will switch to [D3.js](http://d3js.org/) instead.
## Getting Started
* [Setting up re:dash on Heroku in 5 minutes](https://github.com/EverythingMe/redash/wiki/Setting-up-re:dash-on-Heroku-in-5-minutes)
* [Setting re:dash on your own server (Ubuntu)](https://github.com/EverythingMe/redash/wiki/Setting-re:dash-on-your-own-server-(Ubuntu))
**Need help setting re:dash or one of the dependencies up?** Ping @arikfr on the IRC #redash channel or send a message to the [mailing list](https://groups.google.com/forum/#!forum/redash-users), and he will gladly help.
* Find us [on gitter](https://gitter.im/EverythingMe/redash#) (chat).
* Contact Arik, the maintainer directly: arik@everything.me.
## Roadmap
Below you can see the "big" features of the next 3 releases (for full list, click on the link):
### [v0.3](https://github.com/EverythingMe/redash/issues?milestone=2&state=open)
- Dashboard filters: ability to filter/slice the data you see in a single dashboard using filters (date or selectors).
- Multiple databases support (including other database type than PostgreSQL).
- Scheduled reports by email.
- Comments on queries.
### [v0.4](https://github.com/EverythingMe/redash/issues?milestone=3&state=open)
- Query versioning.
- More "realtime" UI (using websockets).
- More visualizations.
TBD.
## Reporting Bugs and Contributing Code

11
Vagrantfile vendored Normal file
View File

@@ -0,0 +1,11 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
VAGRANTFILE_API_VERSION = "2"
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.box = "redash/dev"
config.vm.synced_folder "./", "/opt/redash/current"
config.vm.network "forwarded_port", guest: 5000, host: 9001
end

View File

@@ -23,3 +23,6 @@ deployment:
branch: master
commands:
- make upload
notify:
webhooks:
- url: https://webhooks.gitter.im/e/895d09c3165a0913ac2f

142
manage.py
View File

@@ -2,17 +2,19 @@
"""
CLI to manage redash.
"""
import datetime
from flask.ext.script import Manager, prompt_pass
from flask.ext.script import Manager
from redash import settings, models, __version__
from redash.wsgi import app
from redash.import_export import import_manager
from redash.cli import users, database, data_sources
manager = Manager(app)
database_manager = Manager(help="Manages the database (create/drop tables).")
users_manager = Manager(help="Users management commands.")
data_sources_manager = Manager(help="Data sources management commands.")
manager.add_command("database", database.manager)
manager.add_command("users", users.manager)
manager.add_command("import", import_manager)
manager.add_command("ds", data_sources.manager)
@manager.command
def version():
@@ -22,7 +24,7 @@ def version():
@manager.command
def runworkers():
"""Prints deprecation warning."""
"""Start workers (deprecated)."""
print "** This command is deprecated. Please use Celery's CLI to control the workers. **"
@@ -31,8 +33,10 @@ def make_shell_context():
from redash.models import db
return dict(app=app, db=db, models=models)
@manager.command
def check_settings():
"""Show the settings as re:dash sees them (useful for debugging)."""
from types import ModuleType
for name in dir(settings):
@@ -40,130 +44,6 @@ def check_settings():
if not callable(item) and not name.startswith("__") and not isinstance(item, ModuleType):
print "{} = {}".format(name, item)
@manager.command
def import_events(events_file):
import json
from collections import Counter
count = Counter()
with open(events_file) as f:
for line in f:
try:
event = json.loads(line)
user = event.pop('user_id')
action = event.pop('action')
object_type = event.pop('object_type')
object_id = event.pop('object_id', None)
if object_id == 'dashboard' and object_type == 'dashboard':
count['bad dashboard id'] += 1
continue
created_at = datetime.datetime.utcfromtimestamp(event.pop('timestamp'))
additional_properties = json.dumps(event)
models.Event.create(user=user, action=action, object_type=object_type, object_id=object_id,
additional_properties=additional_properties, created_at=created_at)
count['imported'] += 1
except Exception as ex:
print "Failed importing line:"
print line
print ex.message
count[ex.message] += 1
count['failed'] += 1
models.db.close_db(None)
for k, v in count.iteritems():
print k
print v
@database_manager.command
def create_tables():
"""Creates the database tables."""
from redash.models import create_db, init_db
create_db(True, False)
init_db()
@database_manager.command
def drop_tables():
"""Drop the database tables."""
from redash.models import create_db
create_db(False, True)
@users_manager.option('email', help="User's email")
@users_manager.option('name', help="User's full name")
@users_manager.option('--admin', dest='is_admin', action="store_true", default=False, help="set user as admin")
@users_manager.option('--google', dest='google_auth', action="store_true", default=False, help="user uses Google Auth to login")
@users_manager.option('--password', dest='password', default=None, help="Password for users who don't use Google Auth (leave blank for prompt).")
@users_manager.option('--groups', dest='groups', default=models.User.DEFAULT_GROUPS, help="Comma seperated list of groups (leave blank for default).")
def create(email, name, groups, is_admin=False, google_auth=False, password=None):
print "Creating user (%s, %s)..." % (email, name)
print "Admin: %r" % is_admin
print "Login with Google Auth: %r\n" % google_auth
if isinstance(groups, basestring):
groups= groups.split(',')
groups.remove('') # in case it was empty string
if is_admin:
groups += ['admin']
user = models.User(email=email, name=name, groups=groups)
if not google_auth:
password = password or prompt_pass("Password")
user.hash_password(password)
try:
user.save()
except Exception, e:
print "Failed creating user: %s" % e.message
@users_manager.option('email', help="email address of user to delete")
def delete(email):
deleted_count = models.User.delete().where(models.User.email == email).execute()
print "Deleted %d users." % deleted_count
@data_sources_manager.command
def import_from_settings(name=None):
"""Import data source from settings (env variables)."""
name = name or "Default"
data_source = models.DataSource.create(name=name,
type=settings.CONNECTION_ADAPTER,
options=settings.CONNECTION_STRING)
print "Imported data source from settings (id={}).".format(data_source.id)
@data_sources_manager.command
def list():
"""List currently configured data sources"""
for ds in models.DataSource.select():
print "Name: {}\nType: {}\nOptions: {}".format(ds.name, ds.type, ds.options)
@data_sources_manager.command
def new(name, type, options):
"""Create new data source"""
# TODO: validate it's a valid type and in the future, validate the options.
print "Creating {} data source ({}) with options:\n{}".format(type, name, options)
data_source = models.DataSource.create(name=name,
type=type,
options=options)
print "Id: {}".format(data_source.id)
manager.add_command("database", database_manager)
manager.add_command("users", users_manager)
manager.add_command("import", import_manager)
manager.add_command("ds", data_sources_manager)
if __name__ == '__main__':
manager.run()
manager.run()

View File

@@ -0,0 +1,12 @@
from playhouse.migrate import Migrator
from redash.models import db
from redash import models
if __name__ == '__main__':
db.connect_db()
migrator = Migrator(db.database)
with db.database.transaction():
migrator.add_column(models.Query, models.Query.is_archived, 'is_archived')
db.close_db(None)

View File

@@ -163,7 +163,6 @@ module.exports = function (grunt) {
// Automatically inject Bower components into the app
wiredep: {
options: {
cwd: '<%= yeoman.app %>'
},
app: {
src: ['<%= yeoman.app %>/index.html'],

BIN
rd_ui/app/google_login.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

View File

@@ -15,7 +15,9 @@
<link rel="stylesheet" href="/bower_components/pivottable/dist/pivot.css">
<link rel="stylesheet" href="/bower_components/cornelius/src/cornelius.css">
<link rel="stylesheet" href="/bower_components/select2/select2.css">
<link rel="stylesheet" href="/bower_components/angular-ui-select/dist/select.css">
<link rel="stylesheet" href="/bower_components/pace/themes/pace-theme-minimal.css">
<link rel="stylesheet" href="/bower_components/font-awesome/css/font-awesome.css">
<link rel="stylesheet" href="/styles/redash.css">
<!-- endbuild -->
</head>
@@ -65,6 +67,12 @@
</ul>
</li>
</ul>
<form class="navbar-form navbar-left" role="search" ng-submit="searchQueries()">
<div class="form-group">
<input type="text" ng-model="term" class="form-control" placeholder="Search queries...">
</div>
<button type="submit" class="btn btn-default"><span class="glyphicon glyphicon-search"></span></button>
</form>
<ul class="nav navbar-nav navbar-right">
<p class="navbar-text avatar" ng-show="currentUser.id" ng-cloak>
<img ng-src="{{currentUser.gravatar_url}}" class="img-circle" alt="{{currentUser.name}}"/>
@@ -110,6 +118,7 @@
<script src="/bower_components/mousetrap/plugins/global-bind/mousetrap-global-bind.js"></script>
<script src="/bower_components/select2/select2.js"></script>
<script src="/bower_components/angular-ui-select2/src/select2.js"></script>
<script src="/bower_components/angular-ui-select/dist/select.js"></script>
<script src="/bower_components/underscore.string/lib/underscore.string.js"></script>
<script src="/bower_components/marked/lib/marked.js"></script>
<script src="/scripts/ng_highchart.js"></script>
@@ -133,6 +142,7 @@
<script src="/scripts/visualizations/base.js"></script>
<script src="/scripts/visualizations/chart.js"></script>
<script src="/scripts/visualizations/cohort.js"></script>
<script src="/scripts/visualizations/counter.js"></script>
<script src="/scripts/visualizations/table.js"></script>
<script src="/scripts/visualizations/pivot.js"></script>
<script src="/scripts/directives/directives.js"></script>

View File

@@ -35,6 +35,19 @@
<div class="row">
<div class="main">
{% if show_google_openid %}
<div class="row">
<a href="/oauth/google?next={{next}}"><img src="/google_login.png" class="login-button"/></a>
</div>
<div class="login-or">
<hr class="hr-or">
<span class="span-or">or</span>
</div>
{% endif %}
<form role="form" method="post" name="login">
<div class="form-group">
<label for="inputUsernameEmail">Username or email</label>
@@ -56,20 +69,7 @@
</button>
</form>
{% if show_google_openid %}
<div class="login-or">
<hr class="hr-or">
<span class="span-or">or</span>
</div>
<div class="row">
<div class="col-xs-6 col-sm-6 col-md-6">
<a href="/google_auth/login?next={{next}}" class="btn btn-lg btn-info btn-block">Google</a>
</div>
</div>
{% endif %}
</div>
</div>

View File

@@ -14,7 +14,8 @@ angular.module('redash', [
'ui.bootstrap',
'smartTable.table',
'ngResource',
'ngRoute'
'ngRoute',
'ui.select'
]).config(['$routeProvider', '$locationProvider', '$compileProvider', 'growlProvider',
function ($routeProvider, $locationProvider, $compileProvider, growlProvider) {
if (featureFlags.clientSideMetrics) {
@@ -37,7 +38,8 @@ angular.module('redash', [
$routeProvider.when('/dashboard/:dashboardSlug', {
templateUrl: '/views/dashboard.html',
controller: 'DashboardCtrl'
controller: 'DashboardCtrl',
reloadOnSearch: false
});
$routeProvider.when('/queries', {
templateUrl: '/views/queries.html',
@@ -54,6 +56,11 @@ angular.module('redash', [
}]
}
});
$routeProvider.when('/queries/search', {
templateUrl: '/views/queries_search_results.html',
controller: 'QuerySearchCtrl',
reloadOnSearch: true,
});
$routeProvider.when('/queries/:queryId', {
templateUrl: '/views/query.html',
controller: 'QueryViewCtrl',
@@ -83,6 +90,10 @@ angular.module('redash', [
templateUrl: '/views/index.html',
controller: 'IndexCtrl'
});
$routeProvider.when('/personal', {
templateUrl: '/views/personal.html',
controller: 'PersonalIndexCtrl'
});
$routeProvider.otherwise({
redirectTo: '/'
});

View File

@@ -1,12 +1,71 @@
(function () {
var QuerySearchCtrl = function($scope, $location, $filter, Events, Query) {
$scope.$parent.pageTitle = "Queries Search";
$scope.gridConfig = {
isPaginationEnabled: true,
itemsByPage: 50,
maxSize: 8,
};
var dateFormatter = function (value) {
if (!value) return "-";
return value.format("DD/MM/YY HH:mm");
}
$scope.gridColumns = [
{
"label": "Name",
"map": "name",
"cellTemplateUrl": "/views/queries_query_name_cell.html"
},
{
'label': 'Created By',
'map': 'user.name'
},
{
'label': 'Created At',
'map': 'created_at',
'formatFunction': dateFormatter
},
{
'label': 'Update Schedule',
'map': 'ttl',
'formatFunction': function (value) {
return $filter('refreshRateHumanize')(value);
}
}
];
$scope.queries = [];
$scope.$parent.term = $location.search().q;
Query.search({q: $scope.term }, function(results) {
$scope.queries = _.map(results, function(query) {
query.created_at = moment(query.created_at);
return query;
});
});
$scope.search = function() {
if (!angular.isString($scope.term) || $scope.term.trim() == "") {
$scope.queries = [];
return;
}
$location.search({q: $scope.term});
};
Events.record(currentUser, "search", "query", "", {"term": $scope.term});
};
var QueriesCtrl = function ($scope, $http, $location, $filter, Query) {
$scope.$parent.pageTitle = "All Queries";
$scope.gridConfig = {
isPaginationEnabled: true,
itemsByPage: 50,
maxSize: 8,
isGlobalSearchActivated: true
}
isGlobalSearchActivated: true};
$scope.allQueries = [];
$scope.queries = [];
@@ -35,7 +94,7 @@
Query.query(function (queries) {
$scope.allQueries = _.map(queries, function (query) {
query.created_at = moment(query.created_at);
query.last_retrieved_at = moment(query.last_retrieved_at);
query.retrieved_at = moment(query.retrieved_at);
return query;
});
@@ -58,35 +117,17 @@
'formatFunction': dateFormatter
},
{
'label': 'Runtime (avg)',
'map': 'avg_runtime',
'formatFunction': function (value) {
return $filter('durationHumanize')(value);
}
},
{
'label': 'Runtime (min)',
'map': 'min_runtime',
'formatFunction': function (value) {
return $filter('durationHumanize')(value);
}
},
{
'label': 'Runtime (max)',
'map': 'max_runtime',
'label': 'Runtime',
'map': 'runtime',
'formatFunction': function (value) {
return $filter('durationHumanize')(value);
}
},
{
'label': 'Last Executed At',
'map': 'last_retrieved_at',
'map': 'retrieved_at',
'formatFunction': dateFormatter
},
{
'label': 'Times Executed',
'map': 'times_retrieved'
},
{
'label': 'Update Schedule',
'map': 'ttl',
@@ -95,6 +136,7 @@
}
}
]
$scope.tabs = [
{"name": "My Queries", "key": "my"},
{"key": "all", "name": "All Queries"},
@@ -110,7 +152,7 @@
});
}
var MainCtrl = function ($scope, Dashboard, notifications) {
var MainCtrl = function ($scope, $location, Dashboard, notifications) {
if (featureFlags.clientSideMetrics) {
$scope.$on('$locationChangeSuccess', function(event, newLocation, oldLocation) {
// This will be called once per actual page load.
@@ -133,7 +175,11 @@
$scope.otherDashboards = $scope.allDashboards['Other'] || [];
$scope.groupedDashboards = _.omit($scope.allDashboards, 'Other');
});
}
};
$scope.searchQueries = function() {
$location.path('/queries/search').search({q: $scope.term});
};
$scope.reloadDashboards();
@@ -146,7 +192,7 @@
$(window).click(function () {
notifications.getPermissions();
});
}
};
var IndexCtrl = function ($scope, Events, Dashboard) {
Events.record(currentUser, "view", "page", "homepage");
@@ -160,10 +206,29 @@
});
}
}
}
};
var PersonalIndexCtrl = function ($scope, Events, Dashboard, Query) {
Events.record(currentUser, "view", "page", "personal_homepage");
$scope.$parent.pageTitle = "Home";
$scope.recentQueries = Query.recent();
$scope.recentDashboards = Dashboard.recent();
$scope.archiveDashboard = function (dashboard) {
if (confirm('Are you sure you want to delete "' + dashboard.name + '" dashboard?')) {
Events.record(currentUser, "archive", "dashboard", dashboard.id);
dashboard.$delete(function () {
$scope.$parent.reloadDashboards();
});
}
}
};
angular.module('redash.controllers', [])
.controller('QueriesCtrl', ['$scope', '$http', '$location', '$filter', 'Query', QueriesCtrl])
.controller('IndexCtrl', ['$scope', 'Events', 'Dashboard', IndexCtrl])
.controller('MainCtrl', ['$scope', 'Dashboard', 'notifications', MainCtrl]);
.controller('PersonalIndexCtrl', ['$scope', 'Events', 'Dashboard', 'Query', PersonalIndexCtrl])
.controller('MainCtrl', ['$scope', '$location', 'Dashboard', 'notifications', MainCtrl])
.controller('QuerySearchCtrl', ['$scope', '$location', '$filter', 'Events', 'Query', QuerySearchCtrl]);
})();

View File

@@ -1,5 +1,5 @@
(function() {
var DashboardCtrl = function($scope, Events, Widget, $routeParams, $http, $timeout, $q, Dashboard) {
var DashboardCtrl = function($scope, Events, Widget, $routeParams, $location, $http, $timeout, $q, Dashboard) {
$scope.refreshEnabled = false;
$scope.refreshRate = 60;
@@ -15,7 +15,7 @@
return _.map(row, function (widget) {
var w = new Widget(widget);
if (w.visualization && dashboard.dashboard_filters_enabled) {
if (w.visualization) {
promises.push(w.getQuery().getQueryResultPromise());
}
@@ -32,22 +32,23 @@
// TODO: first object should be a copy, otherwise one of the chart filters behaves different than the others.
filters[filter.name] = filter;
filters[filter.name].originFilters = [];
if (_.has($location.search(), filter.name)) {
filter.current = $location.search()[filter.name];
}
$scope.$watch(function () { return filter.current }, function (value) {
_.each(filter.originFilters, function (originFilter) {
originFilter.current = value;
});
});
};
}
// TODO: merge values.
filters[filter.name].originFilters.push(filter);
});
});
if (dashboard.dashboard_filters_enabled) {
$scope.filters = _.values(filters);
}
$scope.filters = _.values(filters);
});
@@ -74,7 +75,7 @@
_.each(row, function(widget, i) {
var newWidget = newWidgets[widget.id];
if (newWidget && newWidget[0].visualization.query.latest_query_data_id != widget.visualization.query.latest_query_data_id) {
row[i] = newWidget[0];
row[i] = new Widget(newWidget[0]);
}
});
});
@@ -83,8 +84,8 @@
});
}, $scope.refreshRate);
};
}
}
};
$scope.triggerRefresh = function() {
$scope.refreshEnabled = !$scope.refreshEnabled;
@@ -137,7 +138,7 @@
};
angular.module('redash.controllers')
.controller('DashboardCtrl', ['$scope', 'Events', 'Widget', '$routeParams', '$http', '$timeout', '$q', 'Dashboard', DashboardCtrl])
.controller('DashboardCtrl', ['$scope', 'Events', 'Widget', '$routeParams', '$location', '$http', '$timeout', '$q', 'Dashboard', DashboardCtrl])
.controller('WidgetCtrl', ['$scope', 'Events', 'Query', WidgetCtrl])
})();
})();

View File

@@ -21,8 +21,13 @@
$scope.saveQuery();
}
},
// Cmd+Enter for Mac
'meta+enter': function () {
$scope.executeQuery();
},
// Ctrl+Enter for PC
'ctrl+enter': function () {
$scope.executeQuery();
}
};

View File

@@ -68,6 +68,31 @@
$scope.queryResult.cancelExecution();
Events.record(currentUser, 'cancel_execute', 'query', $scope.query.id);
};
$scope.archiveQuery = function(options, data) {
if (data) {
data.id = $scope.query.id;
} else {
data = $scope.query;
}
$scope.isDirty = false;
options = _.extend({}, {
successMessage: 'Query archived',
errorMessage: 'Query could not be archived'
}, options);
return Query.delete({id: data.id}, function() {
$scope.query.is_archived = true;
$scope.query.ttl = -1;
growl.addSuccessMessage(options.successMessage);
// This feels dirty.
$('#archive-confirmation-modal').modal('hide');
}, function(httpResponse) {
growl.addErrorMessage(options.errorMessage);
}).$promise;
}
$scope.updateDataSource = function() {
Events.record(currentUser, 'update_data_source', 'query', $scope.query.id);

View File

@@ -147,22 +147,22 @@
var reset = function() {
$scope.saveInProgress = false;
$scope.widgetSize = 1;
$scope.queryId = null;
$scope.selectedVis = null;
$scope.query = null;
$scope.query = {};
$scope.selected_query = undefined;
$scope.text = "";
};
reset();
$scope.loadVisualizations = function () {
if (!$scope.queryId) {
if (!$scope.query.selected) {
return;
}
Query.get({ id: $scope.queryId }, function(query) {
Query.get({ id: $scope.query.selected.id }, function(query) {
if (query) {
$scope.query = query;
$scope.selected_query = query;
if (query.visualizations.length) {
$scope.selectedVis = query.visualizations[0];
}
@@ -170,6 +170,20 @@
});
};
$scope.searchQueries = function (term) {
if (!term || term.length < 3) {
return;
}
Query.search({q: term}, function(results) {
$scope.queries = results;
});
};
$scope.$watch('query', function () {
$scope.loadVisualizations();
}, true);
$scope.saveWidget = function() {
$scope.saveInProgress = true;

View File

@@ -97,14 +97,24 @@
value: '=',
ignoreBlanks: '=',
editable: '=',
done: '='
done: '=',
},
template: function (tElement, tAttrs) {
var elType = tAttrs.editor || 'input';
var placeholder = tAttrs.placeholder || 'Click to edit';
return '<span ng-click="editable && edit()" ng-bind="value" ng-class="{editable: editable}"></span>' +
'<span ng-click="editable && edit()" ng-show="editable && !value" ng-class="{editable: editable}">' + placeholder + '</span>' +
'<{elType} ng-model="value" class="rd-form-control"></{elType}>'.replace('{elType}', elType);
var viewMode = '';
if (tAttrs.markdown == "true") {
viewMode = '<span ng-click="editable && edit()" ng-bind-html="value|markdown" ng-class="{editable: editable}"></span>';
} else {
viewMode = '<span ng-click="editable && edit()" ng-bind="value" ng-class="{editable: editable}"></span>';
}
var placeholderSpan = '<span ng-click="editable && edit()" ng-show="editable && !value" ng-class="{editable: editable}">' + placeholder + '</span>';
var editor = '<{elType} ng-model="value" class="rd-form-control"></{elType}>'.replace('{elType}', elType);
return viewMode + placeholderSpan + editor;
},
link: function ($scope, element, attrs) {
// Let's get a reference to the input element, as we'll want to reference it.
@@ -224,4 +234,17 @@
'</span>'
}
});
// Used instead of autofocus attribute, which doesn't work in Angular as there is no real page load.
directives.directive('autofocus',
['$timeout', function ($timeout) {
return {
link: function (scope, element) {
$timeout(function () {
element[0].focus();
});
}
};
}]
);
})();

View File

@@ -70,6 +70,18 @@ angular.module('redash.filters', []).
.filter('markdown', ['$sce', function ($sce) {
return function (text) {
if (!text) {
return "";
}
return $sce.trustAsHtml(marked(text));
}
}]);
}])
.filter('trustAsHtml', ['$sce', function ($sce) {
return function (text) {
if (!text) {
return "";
}
return $sce.trustAsHtml(text);
}
}]);

View File

@@ -1,9 +1,20 @@
(function () {
'use strict';
var ColorPalette = {
'Blue':'#4572A7',
'Red':'#AA4643',
'Green': '#89A54E',
'Purple': '#80699B',
'Cyan': '#3D96AE',
'Orange': '#DB843D',
'Light Blue': '#92A8CD',
'Lilac': '#A47D7C',
'Light Green': '#B5CA92',
};
Highcharts.setOptions({
colors: ["#4572A7", "#AA4643", "#89A54E", "#80699B", "#3D96AE",
"#DB843D", "#92A8CD", "#A47D7C", "#B5CA92"]
colors: _.values(ColorPalette)
});
var defaultOptions = {
@@ -204,6 +215,7 @@
};
angular.module('highchart', [])
.constant('ColorPalette', ColorPalette)
.directive('chart', ['$timeout', function ($timeout) {
return {
restrict: 'E',
@@ -338,4 +350,4 @@
};
}]);
})();
})();

View File

@@ -1,6 +1,12 @@
(function () {
var Dashboard = function($resource) {
var resource = $resource('/api/dashboards/:slug', {slug: '@slug'});
var resource = $resource('/api/dashboards/:slug', {slug: '@slug'}, {
recent: {
method: 'get',
isArray: true,
url: "/api/dashboards/recent"
}});
resource.prototype.canEdit = function() {
return currentUser.hasPermission('admin') || currentUser.canEdit(this);
}

View File

@@ -1,10 +1,9 @@
(function () {
var notifications = function (Events) {
var notificationService = {};
var lastNotification = null;
notificationService.isSupported = function () {
if (window.webkitNotifications) {
if ("Notification" in window) {
return true;
} else {
console.log("HTML5 notifications are not supported.");
@@ -17,8 +16,12 @@
return;
}
if (!window.webkitNotifications.checkPermission() == 0) { // 0 is PERMISSION_ALLOWED
window.webkitNotifications.requestPermission();
if (Notification.permission !== "granted") {
Notification.requestPermission(function (status) {
if (Notification.permission !== status) {
Notification.permission = status;
}
});
}
}
@@ -27,23 +30,13 @@
return;
}
if (document.webkitVisibilityState && document.webkitVisibilityState == 'visible') {
return;
}
if (lastNotification) {
lastNotification.cancel();
}
var notification = window.webkitNotifications.createNotification('', title, content);
lastNotification = notification;
//using the 'tag' to avoid showing duplicate notifications
var notification = new Notification(title, {'tag': title+content, 'body': content});
notification.onclick = function () {
window.focus();
this.cancel();
Events.record(currentUser, 'click', 'notification');
};
notification.show()
}
return notificationService;

View File

@@ -22,6 +22,8 @@
} else if (_.isString(v) && v.match(/^\d{4}-\d{2}-\d{2}/)) {
row[k] = moment(v);
columnTypes[k] = 'date';
} else if (typeof(v) == 'object' && v !== null) {
row[k] = JSON.stringify(v);
}
}, this);
}, this);
@@ -375,7 +377,18 @@
};
var Query = function ($resource, QueryResult, DataSource) {
var Query = $resource('/api/queries/:id', {id: '@id'});
var Query = $resource('/api/queries/:id', {id: '@id'},
{
search: {
method: 'get',
isArray: true,
url: "/api/queries/search"
},
recent: {
method: 'get',
isArray: true,
url: "/api/queries/recent"
}});
Query.newQuery = function () {
return new Query({

View File

@@ -68,7 +68,6 @@
template: '<filters></filters>\n' + Visualization.renderVisualizationsTemplate,
replace: false,
link: function (scope) {
scope.select2Options = {
width: '50%'
};
@@ -103,7 +102,7 @@
if (filters) {
scope.filters = filters;
if (filters.length) {
if (filters.length && false) {
readURL();
// start watching for changes and update URL

View File

@@ -33,7 +33,7 @@
$scope.chartSeries = [];
$scope.chartOptions = {};
var reloadData = _.throttle(function(data) {
var reloadData = function(data) {
if (!data || ($scope.queryResult && $scope.queryResult.getData()) == null) {
$scope.chartSeries.splice(0, $scope.chartSeries.length);
} else {
@@ -49,8 +49,8 @@
}
$scope.chartSeries.push(_.extend(s, additional));
});
}
}, 500);
};
};
$scope.$watch('options', function (chartOptions) {
if (chartOptions) {
@@ -74,11 +74,13 @@
};
});
chartVisualization.directive('chartEditor', function () {
chartVisualization.directive('chartEditor', function (ColorPalette) {
return {
restrict: 'E',
templateUrl: '/views/visualizations/chart_editor.html',
link: function (scope, element, attrs) {
scope.palette = ColorPalette;
scope.seriesTypes = {
'Line': 'line',
'Column': 'column',
@@ -87,6 +89,8 @@
'Pie': 'pie'
};
scope.globalSeriesType = 'column';
scope.stackingOptions = {
"None": "none",
"Normal": "normal",
@@ -120,6 +124,13 @@
var chartOptionsUnwatch = null,
columnsWatch = null;
scope.$watch('globalSeriesType', function(type, old) {
if (type && old && type !== old && scope.visualization.options.seriesOptions) {
_.each(scope.visualization.options.seriesOptions, function(sOptions) {
sOptions.type = type;
});
}
});
scope.$watch('visualization.type', function (visualizationType) {
if (visualizationType == 'CHART') {
if (scope.visualization.options.series.stacking === null) {
@@ -135,7 +146,9 @@
// TODO: remove uneeded ones?
if (scope.visualization.options.seriesOptions == undefined) {
scope.visualization.options.seriesOptions = {};
scope.visualization.options.seriesOptions = {
type: scope.globalSeriesType
};
};
_.each(scope.series, function(s, i) {
@@ -230,4 +243,4 @@
}
}
});
}());
}());

View File

@@ -0,0 +1,61 @@
'use strict';
(function() {
var module = angular.module('redash.visualization');
module.config(['VisualizationProvider', function(VisualizationProvider) {
var renderTemplate =
'<counter-renderer ' +
'options="visualization.options" query-result="queryResult">' +
'</counter-renderer>';
var editTemplate = '<counter-editor></counter-editor>';
var defaultOptions = {};
VisualizationProvider.registerVisualization({
type: 'COUNTER',
name: 'Counter',
renderTemplate: renderTemplate,
editorTemplate: editTemplate,
defaultOptions: defaultOptions
});
}
]);
module.directive('counterRenderer', function() {
return {
restrict: 'E',
templateUrl: '/views/visualizations/counter.html',
link: function($scope, elm, attrs) {
$scope.visualization.options.rowNumber =
$scope.visualization.options.rowNumber || 0;
$scope.$watch('[queryResult && queryResult.getData(), visualization.options]',
function() {
var queryData = $scope.queryResult.getData();
if (queryData) {
var rowNumber = $scope.visualization.options.rowNumber || 0;
var counterColName = $scope.visualization.options.counterColName || 'counter';
var targetColName = $scope.visualization.options.targetColName || 'target';
$scope.counterValue = queryData[rowNumber][counterColName];
$scope.targetValue = queryData[rowNumber][targetColName];
if ($scope.targetValue) {
$scope.delta = $scope.counterValue - $scope.targetValue;
$scope.trendPositive = $scope.delta >= 0;
}
}
}, true);
}
}
});
module.directive('counterEditor', function() {
return {
restrict: 'E',
templateUrl: '/views/visualizations/counter_editor.html'
}
});
})();

View File

@@ -1,14 +1,15 @@
.main {
max-width: 320px;
margin: 0 auto;
margin-top:20px;
}
.login-or {
position: relative;
font-size: 18px;
color: #aaa;
margin-top: 10px;
margin-bottom: 10px;
margin-top: 20px;
margin-bottom: 20px;
padding-top: 10px;
padding-bottom: 10px;
}
@@ -31,7 +32,9 @@
margin-bottom: 0px !important;
}
/*h3 {*/
/*text-align: center;*/
/*line-height: 300%;*/
/*}*/
img.login-button {
width: 250px;
display: block;
margin-left: auto;
margin-right: auto;
}

View File

@@ -270,6 +270,35 @@ to add those CSS styles here. */
pivot-table-renderer > table, grid-renderer > div, visualization-renderer > div {
overflow: auto;
}
counter-renderer {
display: block;
text-align: center;
}
counter-renderer counter {
margin: 0 auto;
background: #f9f9f9;
padding: 15px 50px;
display: block;;
}
counter-renderer value,
counter-renderer counter-target {
font-size: 80px;
display: block;
}
counter-renderer counter-target {
color: #ccc;
}
counter-renderer counter.positive value {
color: #5cb85c;
}
counter-renderer counter.negative value {
color: #d9534f;
margin-right: 15px;
}
counter-renderer counter-name {
font-size: 40px;
display: block;
}
.rd-widget-textbox p {
margin-bottom: 0;

View File

@@ -14,7 +14,7 @@
</button>
</span>
</h2>
<filters></filters>
<filters ng-if="dashboard.dashboard_filters_enabled"></filters>
</div>
<div class="container" id="dashboard">
@@ -29,7 +29,7 @@
<span ng-hide="currentUser.hasPermission('view_query')">{{query.name}}</span>
<query-link query="query" visualization="widget.visualization" ng-show="currentUser.hasPermission('view_query')"></query-link>
</p>
<div class="text-muted" ng-bind="query.description"></div>
<div class="text-muted" ng-bind-html="query.description | markdown"></div>
</h3>
</div>

View File

@@ -22,22 +22,22 @@
</div>
<div ng-show="isVisualization()">
<p>
<form class="form-inline" role="form" ng-submit="loadVisualizations()">
<div class="form-group">
<input class="form-control" placeholder="Query Id" ng-model="queryId">
</div>
<button type="submit" class="btn btn-primary" ng-disabled="!queryId">
Load visualizations
</button>
</form>
</p>
<div class="form-group">
<ui-select ng-model="query.selected" theme="bootstrap" reset-search-input="false">
<ui-select-match placeholder="Search a query by name">{{$select.selected.name}}</ui-select-match>
<ui-select-choices repeat="q in queries"
refresh="searchQueries($select.search)"
refresh-delay="0">
<div ng-bind-html="q.name | highlight: $select.search | trustAsHtml"></div>
</ui-select-choices>
</ui-select>
</div>
<div ng-show="query">
<div class="form-group">
<label for="">Choose Visualization</label>
<select ng-model="selectedVis" ng-options="vis as vis.name group by vis.type for vis in query.visualizations" class="form-control"></select>
</div>
<div ng-show="selected_query">
<div class="form-group">
<label for="">Choose Visualization</label>
<select ng-model="selectedVis" ng-options="vis as vis.name group by vis.type for vis in selected_query.visualizations" class="form-control"></select>
</div>
</div>
</div>

View File

@@ -0,0 +1,28 @@
<div class="container">
<div class="row">
<div class="list-group col-md-6">
<div class="list-group-item active">
Recent Dashboards
<button ng-show="currentUser.hasPermission('create_dashboard')" type="button" class="btn btn-sm btn-link" data-toggle="modal" href="#new_dashboard_dialog" tooltip="New Dashboard"><span class="glyphicon glyphicon-plus-sign"></span></button>
</div>
<div class="list-group-item" ng-repeat="dashboard in recentDashboards" >
<button type="button" class="close delete-button" aria-hidden="true" ng-show="dashboard.canEdit()" ng-click="archiveDashboard(dashboard)" tooltip="Delete Dashboard">&times;</button>
<a ng-href="/dashboard/{{dashboard.slug}}">{{dashboard.name}}</a>
</div>
</div>
<div class="list-group col-md-6">
<div class="list-group-item active">
Recent Queries
</div>
<a ng-href="/queries/{{query.id}}" class="list-group-item" ng-repeat="query in recentQueries">{{query.name}}</a>
</div>
</div>
<div ng-show="currentUser.hasPermission('admin')" class="row">
<div class="list-group">
<div class="list-group-item active">Admin</div>
<a href="/admin/status" class="list-group-item">Status</a>
</div>
</div>
</div>

View File

@@ -0,0 +1,19 @@
<div class="container">
<div class="row">
<p>
<form class="form-inline" role="form" ng-submit="search()">
<div class="form-group">
<input class="form-control" placeholder="Search..." ng-model="term" autofocus>
</div>
<button type="submit" class="btn btn-primary">
<span class="glyphicon glyphicon-search"></span>
</button>
</form>
</p>
<smart-table rows="queries" columns="gridColumns"
config="gridConfig"
class="table table-condensed table-hover"></smart-table>
</div>
</div>

View File

@@ -1,6 +1,7 @@
<div class="container">
<p class="alert alert-warning" ng-if="query.is_archived">This query is archived and can't be used in dashboards, and won't appear in search results.</p>
<alert-unsaved-changes ng-if="canEdit" is-dirty="isDirty"></alert-unsaved-changes>
<div class="row">
@@ -12,7 +13,14 @@
</h2>
<p>
<em>
<edit-in-place editable="isQueryOwner" done="saveDescription" editor="textarea" placeholder="No description" ignore-blanks='false' value="query.description"></edit-in-place>
<edit-in-place editable="isQueryOwner"
done="saveDescription"
editor="textarea"
placeholder="No description"
ignore-blanks='false'
value="query.description"
markdown="true">
</edit-in-place>
</em>
</p>
</div>
@@ -126,6 +134,29 @@
<span class="glyphicon glyphicon-cloud-download"></span>
<span class="rd-hidden-xs">Download Dataset</span>
</a>
<a class="btn btn-warning btn-sm" ng-disabled="queryExecuting" data-toggle="modal" data-target="#archive-confirmation-modal"
ng-show="!query.is_archived && query.id != undefined && (isQueryOwner || currentUser.hasPermission('admin'))">
<i class="fa fa-archive" title="Archive Query"></i>
</a>
<div class="modal fade" id="archive-confirmation-modal" tabindex="-1" role="dialog" aria-labelledby="archiveConfirmationModal" aria-hidden="true">
<div class="modal-dialog">
<div class="modal-content">
<div class="modal-header">
<h4 class="modal-title">Query Archive</h4>
</div>
<div class="modal-body">
Are you sure you want to archive this query? <br/>
All dashboard widgets created with its visualizations will be deleted.
</div>
<div class="modal-footer">
<button type="button" class="btn btn-default" data-dismiss="modal">No</button>
<button type="button" class="btn btn-primary" ng-click="archiveQuery()">Yes, archive.</button>
</div>
</div>
</div>
</div>
</p>
</div>

View File

@@ -18,6 +18,15 @@
class="form-control"></select>
</div>
</div>
<div class="form-group">
<label class="control-label col-sm-2">Series Type</label>
<div class="col-sm-10">
<select required ng-options="value as key for (key, value) in seriesTypes"
ng-model="globalSeriesType" class="form-control"></select>
</div>
</div>
</div>
</div>
@@ -82,10 +91,17 @@
placeholder="{{seriesName}}">
</div>
</div>
<div class="form-group">
<label class="control-label col-sm-3">Color</label>
<div class="col-sm-9">
<select class="form-control" ng-model="visualization.options.seriesOptions[seriesName].color" ng-options="val as key for (key,val) in palette"></select>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</div>

View File

@@ -0,0 +1,5 @@
<counter ng-class="{'positive': targetValue && trendPositive, 'negative': targetValue && !trendPositive}">
<value>{{counterValue|number}}</value>
<counter-target ng-if="targetValue">({{targetValue|number}})</counter-target>
<counter-name>{{visualization.name}}</counter-name>
</counter>

View File

@@ -0,0 +1,20 @@
<div class="form-horizontal">
<div class="form-group">
<label class="col-lg-6">Row Number</label>
<div class="col-lg-6">
<input type="number" ng-model="visualization.options.rowNumber" class="form-control">
</div>
</div>
<div class="form-group">
<label class="col-lg-6">Counter Column Name</label>
<div class="col-lg-6">
<select ng-options="name for name in queryResult.columnNames" ng-model="visualization.options.counterColName" class="form-control"></select>
</div>
</div>
<div class="form-group">
<label class="col-lg-6">Target Column Name</label>
<div class="col-lg-6">
<select ng-options="name for name in queryResult.columnNames" ng-model="visualization.options.targetColName" class="form-control"></select>
</div>
</div>
</div>

View File

@@ -24,4 +24,4 @@
</div>
</form>
</div>
</div>

View File

@@ -2,7 +2,10 @@
"name": "rdUi",
"version": "0.1.0",
"dependencies": {
"angular": "1.2.7",
"angular": "1.2.18",
"angular-resource": "1.2.18",
"angular-route": "1.2.18",
"angular-growl": "0.4.0",
"json3": "3.2.4",
"jquery": "1.9.1",
"bootstrap": "3.0.0",
@@ -13,9 +16,6 @@
"angular-ui-codemirror": "0.0.5",
"highcharts": "3.0.10",
"underscore": "1.5.1",
"angular-resource": "1.2.15",
"angular-growl": "0.3.1",
"angular-route": "1.2.7",
"pivottable": "~1.1.1",
"cornelius": "https://github.com/restorando/cornelius.git",
"gridster": "0.2.0",
@@ -25,13 +25,15 @@
"underscore.string": "~2.3.3",
"marked": "~0.3.2",
"bucky": "~0.2.6",
"pace": "~0.5.1"
"pace": "~0.5.1",
"angular-ui-select": "0.8.2",
"font-awesome": "~4.2.0"
},
"devDependencies": {
"angular-mocks": "~1.0.7",
"angular-scenario": "~1.0.7"
"angular-mocks": "1.2.18",
"angular-scenario": "1.2.18"
},
"resolutions": {
"angular": "1.2.7"
"angular": "1.2.18"
}
}

View File

@@ -47,6 +47,7 @@ module.exports = function(config) {
'app/bower_components/mousetrap/plugins/global-bind/mousetrap-global-bind.js',
'app/bower_components/select2/select2.js',
'app/bower_components/angular-ui-select2/src/select2.js',
'app/bower_components/angular-ui-select/dist/select.js',
'app/bower_components/underscore.string/lib/underscore.string.js',
'app/bower_components/marked/lib/marked.js',
'app/scripts/ng_highchart.js',

View File

@@ -54,7 +54,7 @@ describe('VisualizationRenderer', function() {
});
describe('URL binding', function() {
/*describe('URL binding', function() {
beforeEach(inject(function($rootScope, $compile, $location) {
spyOn($location, 'search').andCallThrough();
@@ -85,5 +85,5 @@ describe('VisualizationRenderer', function() {
var searchFilters = angular.fromJson($location.search().filters);
expect(searchFilters[filters[0].friendlyName]).toEqual('newValue');
}));
});
});*/
});

View File

@@ -3,9 +3,9 @@ import urlparse
import redis
from statsd import StatsClient
from redash import settings, events
from redash import settings
__version__ = '0.4.0'
__version__ = '0.5.0'
def setup_logging():
@@ -14,8 +14,7 @@ def setup_logging():
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
logging.getLogger().setLevel(settings.LOG_LEVEL)
events.setup_logging(settings.EVENTS_LOG_PATH, settings.EVENTS_CONSOLE_OUTPUT)
logging.getLogger("passlib").setLevel("ERROR")
def create_redis_connection():

View File

@@ -6,10 +6,8 @@ import logging
from flask import request, make_response, redirect, url_for
from flask.ext.login import LoginManager, login_user, current_user
from flask.ext.googleauth import GoogleAuth, login
from werkzeug.contrib.fixers import ProxyFix
from redash import models, settings
from redash import models, settings, google_oauth
login_manager = LoginManager()
logger = logging.getLogger('authentication')
@@ -57,48 +55,15 @@ class HMACAuthentication(object):
return decorated
def validate_email(email):
if not settings.GOOGLE_APPS_DOMAIN:
return True
return email in settings.ALLOWED_EXTERNAL_USERS or email.endswith("@%s" % settings.GOOGLE_APPS_DOMAIN)
def create_and_login_user(app, user):
if not validate_email(user.email):
return
try:
user_object = models.User.get(models.User.email == user.email)
if user_object.name != user.name:
logger.debug("Updating user name (%r -> %r)", user_object.name, user.name)
user_object.name = user.name
user_object.save()
except models.User.DoesNotExist:
logger.debug("Creating user object (%r)", user.name)
user_object = models.User.create(name=user.name, email=user.email, groups = models.User.DEFAULT_GROUPS)
login_user(user_object, remember=True)
login.connect(create_and_login_user)
@login_manager.user_loader
def load_user(user_id):
return models.User.select().where(models.User.id == user_id).first()
def setup_authentication(app):
if settings.GOOGLE_OPENID_ENABLED:
openid_auth = GoogleAuth(app, url_prefix="/google_auth")
# If we don't have a list of external users, we can use Google's federated login, which limits
# the domain with which you can sign in.
if not settings.ALLOWED_EXTERNAL_USERS and settings.GOOGLE_APPS_DOMAIN:
openid_auth._OPENID_ENDPOINT = "https://www.google.com/a/%s/o8/ud?be=o8" % settings.GOOGLE_APPS_DOMAIN
login_manager.init_app(app)
login_manager.anonymous_user = models.AnonymousUser
app.wsgi_app = ProxyFix(app.wsgi_app)
app.secret_key = settings.COOKIE_SECRET
app.register_blueprint(google_oauth.blueprint)
return HMACAuthentication()

0
redash/cli/__init__.py Normal file
View File

View File

@@ -0,0 +1,60 @@
from flask.ext.script import Manager
from redash import models
manager = Manager(help="Data sources management commands.")
@manager.command
def list():
"""List currently configured data sources"""
for i, ds in enumerate(models.DataSource.select()):
if i > 0:
print "-"*20
print "Id: {}\nName: {}\nType: {}\nOptions: {}".format(ds.id, ds.name, ds.type, ds.options)
@manager.command
def new(name, type, options):
"""Create new data source"""
# TODO: validate it's a valid type and in the future, validate the options.
print "Creating {} data source ({}) with options:\n{}".format(type, name, options)
data_source = models.DataSource.create(name=name,
type=type,
options=options)
print "Id: {}".format(data_source.id)
@manager.command
def delete(name):
"""Deletes data source by name"""
try:
data_source = models.DataSource.get(models.DataSource.name==name)
print "Deleting data source: {} (id={})".format(name, data_source.id)
data_source.delete_instance()
except models.DataSource.DoesNotExist:
print "Couldn't find data source named: {}".format(name)
def update_attr(obj, attr, new_value):
if new_value is not None:
old_value = getattr(obj, attr)
print "Updating {}: {} -> {}".format(attr, old_value, new_value)
setattr(obj, attr, new_value)
@manager.option('name', default=None, help="name of data source to edit")
@manager.option('--name', dest='new_name', default=None, help="new name for the data source")
@manager.option('--options', dest='options', default=None, help="updated options for the data source")
@manager.option('--type', dest='type', default=None, help="new type for the data source")
def edit(name, new_name=None, options=None, type=None):
"""Edit data source settings (name, options, type)"""
try:
data_source = models.DataSource.get(models.DataSource.name==name)
update_attr(data_source, "name", new_name)
update_attr(data_source, "type", type)
update_attr(data_source, "options", options)
data_source.save()
except models.DataSource.DoesNotExist:
print "Couldn't find data source named: {}".format(name)

19
redash/cli/database.py Normal file
View File

@@ -0,0 +1,19 @@
from flask.ext.script import Manager
manager = Manager(help="Manages the database (create/drop tables).")
@manager.command
def create_tables():
"""Creates the database tables."""
from redash.models import create_db, init_db
create_db(True, False)
init_db()
@manager.command
def drop_tables():
"""Drop the database tables."""
from redash.models import create_db
create_db(False, True)

74
redash/cli/users.py Normal file
View File

@@ -0,0 +1,74 @@
from flask.ext.script import Manager, prompt_pass
from redash import models
manager = Manager(help="Users management commands.")
@manager.option('email', help="email address of the user to grant admin to")
def grant_admin(email):
try:
user = models.User.get_by_email(email)
user.groups.append('admin')
user.save()
print "User updated."
except models.User.DoesNotExist:
print "User [%s] not found." % email
@manager.option('email', help="User's email")
@manager.option('name', help="User's full name")
@manager.option('--admin', dest='is_admin', action="store_true", default=False, help="set user as admin")
@manager.option('--google', dest='google_auth', action="store_true", default=False, help="user uses Google Auth to login")
@manager.option('--password', dest='password', default=None, help="Password for users who don't use Google Auth (leave blank for prompt).")
@manager.option('--groups', dest='groups', default=models.User.DEFAULT_GROUPS, help="Comma seperated list of groups (leave blank for default).")
def create(email, name, groups, is_admin=False, google_auth=False, password=None):
print "Creating user (%s, %s)..." % (email, name)
print "Admin: %r" % is_admin
print "Login with Google Auth: %r\n" % google_auth
if isinstance(groups, basestring):
groups= groups.split(',')
groups.remove('') # in case it was empty string
if is_admin:
groups += ['admin']
user = models.User(email=email, name=name, groups=groups)
if not google_auth:
password = password or prompt_pass("Password")
user.hash_password(password)
try:
user.save()
except Exception, e:
print "Failed creating user: %s" % e.message
@manager.option('email', help="email address of user to delete")
def delete(email):
deleted_count = models.User.delete().where(models.User.email == email).execute()
print "Deleted %d users." % deleted_count
@manager.option('password', help="new password for the user")
@manager.option('email', help="email address of the user to change password for")
def password(email, password):
try:
user = models.User.get_by_email(email)
user.hash_password(password)
user.save()
print "User updated."
except models.User.DoesNotExist:
print "User [%s] not found." % email
@manager.command
def list():
"""List all users"""
for i, user in enumerate(models.User.select()):
if i > 0:
print "-"*20
print "Id: {}\nName: {}\nEmail: {}".format(user.id, user.name.encode('utf-8'), user.email)

View File

@@ -10,23 +10,20 @@ import json
import numbers
import cStringIO
import datetime
import logging
from flask import render_template, send_from_directory, make_response, request, jsonify, redirect, \
session, url_for
from flask.ext.restful import Resource, abort
from flask_login import current_user, login_user, logout_user
import sqlparse
import events
from permissions import require_permission
from redash import redis_connection, statsd_client, models, settings, utils, __version__
from redash.wsgi import app, auth, api
from redash.tasks import QueryTask, record_event
from redash.cache import headers as cache_headers
from redash.permissions import require_permission
import logging
from tasks import QueryTask
from cache import headers as cache_headers
@app.route('/ping', methods=['GET'])
def ping():
@@ -38,6 +35,7 @@ def ping():
@app.route('/queries')
@app.route('/queries/<query_id>')
@app.route('/queries/<query_id>/<anything>')
@app.route('/personal')
@app.route('/')
@auth.required
def index(**kwargs):
@@ -69,8 +67,7 @@ def login():
return redirect(request.args.get('next') or '/')
if not settings.PASSWORD_LOGIN_ENABLED:
blueprint = app.extensions['googleauth'].blueprint
return redirect(url_for("%s.login" % blueprint.name, next=request.args.get('next')))
return redirect(url_for("google_oauth.authorize", next=request.args.get('next')))
if request.method == 'POST':
user = models.User.select().where(models.User.email == request.form['username']).first()
@@ -84,7 +81,7 @@ def login():
analytics=settings.ANALYTICS,
next=request.args.get('next'),
username=request.form.get('username', ''),
show_google_openid=settings.GOOGLE_OPENID_ENABLED)
show_google_openid=settings.GOOGLE_OAUTH_ENABLED)
@app.route('/logout')
@@ -104,6 +101,7 @@ def status_api():
status['version'] = __version__
status['queries_count'] = models.Query.select().count()
status['query_results_count'] = models.QueryResult.select().count()
status['unused_query_results_count'] = models.QueryResult.unused().count()
status['dashboards_count'] = models.Dashboard.select().count()
status['widgets_count'] = models.Widget.select().count()
@@ -111,7 +109,6 @@ def status_api():
manager_status = redis_connection.hgetall('redash:status')
status['manager'] = manager_status
status['manager']['queue_size'] = redis_connection.llen('queries') + redis_connection.llen('scheduled_queries')
status['manager']['outdated_queries_count'] = models.Query.outdated_queries().count()
queues = {}
@@ -160,7 +157,7 @@ class EventAPI(BaseResource):
def post(self):
events_list = request.get_json(force=True)
for event in events_list:
events.record_event(event)
record_event.delay(event)
api.add_resource(EventAPI, '/api/events', endpoint='events')
@@ -185,6 +182,11 @@ class DataSourceListAPI(BaseResource):
api.add_resource(DataSourceListAPI, '/api/data_sources', endpoint='data_sources')
class DashboardRecentAPI(BaseResource):
def get(self):
return [d.to_dict() for d in models.Dashboard.recent(current_user.id).limit(20)]
class DashboardListAPI(BaseResource):
def get(self):
dashboards = [d.to_dict() for d in
@@ -229,6 +231,7 @@ class DashboardAPI(BaseResource):
dashboard.save()
api.add_resource(DashboardListAPI, '/api/dashboards', endpoint='dashboards')
api.add_resource(DashboardRecentAPI, '/api/dashboards/recent', endpoint='recent_dashboards')
api.add_resource(DashboardAPI, '/api/dashboards/<dashboard_slug>', endpoint='dashboard')
@@ -268,19 +271,26 @@ class WidgetAPI(BaseResource):
@require_permission('edit_dashboard')
def delete(self, widget_id):
widget = models.Widget.get(models.Widget.id == widget_id)
# TODO: reposition existing ones
layout = json.loads(widget.dashboard.layout)
layout = map(lambda row: filter(lambda w: w != widget_id, row), layout)
layout = filter(lambda row: len(row) > 0, layout)
widget.dashboard.layout = json.dumps(layout)
widget.dashboard.save()
widget.delete_instance()
api.add_resource(WidgetListAPI, '/api/widgets', endpoint='widgets')
api.add_resource(WidgetAPI, '/api/widgets/<int:widget_id>', endpoint='widget')
class QuerySearchAPI(BaseResource):
@require_permission('view_query')
def get(self):
term = request.args.get('q', '')
return [q.to_dict() for q in models.Query.search(term)]
class QueryRecentAPI(BaseResource):
@require_permission('view_query')
def get(self):
return [q.to_dict() for q in models.Query.recent(current_user.id).limit(20)]
class QueryListAPI(BaseResource):
@require_permission('create_query')
def post(self):
@@ -305,6 +315,8 @@ class QueryListAPI(BaseResource):
class QueryAPI(BaseResource):
@require_permission('edit_query')
def post(self, query_id):
query = models.Query.get_by_id(query_id)
query_def = request.get_json(force=True)
for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'user']:
query_def.pop(field, None)
@@ -329,6 +341,20 @@ class QueryAPI(BaseResource):
else:
abort(404, message="Query not found.")
# TODO: move to resource of its own? (POST /queries/{id}/archive)
def delete(self, query_id):
q = models.Query.get(models.Query.id == query_id)
if q:
if q.user.id == self.current_user.id or self.current_user.has_permission('admin'):
q.archive()
else:
self.delete_others_query(query_id)
else:
abort(404, message="Query not found.")
api.add_resource(QuerySearchAPI, '/api/queries/search', endpoint='queries_search')
api.add_resource(QueryRecentAPI, '/api/queries/recent', endpoint='recent_queries')
api.add_resource(QueryListAPI, '/api/queries', endpoint='queries')
api.add_resource(QueryAPI, '/api/queries/<query_id>', endpoint='query')
@@ -339,7 +365,7 @@ class VisualizationListAPI(BaseResource):
kwargs = request.get_json(force=True)
kwargs['options'] = json.dumps(kwargs['options'])
kwargs['query'] = kwargs.pop('query_id')
vis = models.Visualization(**kwargs)
vis.save()
@@ -414,48 +440,52 @@ class QueryResultListAPI(BaseResource):
class QueryResultAPI(BaseResource):
@require_permission('view_query')
def get(self, query_result_id):
query_result = models.QueryResult.get_by_id(query_result_id)
if query_result:
data = json.dumps({'query_result': query_result.to_dict()}, cls=utils.JSONEncoder)
return make_response(data, 200, cache_headers)
else:
abort(404)
@staticmethod
def csv_response(query_result):
s = cStringIO.StringIO()
query_data = json.loads(query_result.data)
writer = csv.DictWriter(s, fieldnames=[col['name'] for col in query_data['columns']])
writer.writer = utils.UnicodeWriter(s)
writer.writeheader()
for row in query_data['rows']:
for k, v in row.iteritems():
if isinstance(v, numbers.Number) and (v > 1000 * 1000 * 1000 * 100):
row[k] = datetime.datetime.fromtimestamp(v/1000.0)
writer.writerow(row)
headers = {'Content-Type': "text/csv; charset=UTF-8"}
headers.update(cache_headers)
return make_response(s.getvalue(), 200, headers)
class CsvQueryResultsAPI(BaseResource):
@require_permission('view_query')
def get(self, query_id, query_result_id=None):
if not query_result_id:
def get(self, query_id=None, query_result_id=None, filetype='json'):
if query_result_id is None and query_id is not None:
query = models.Query.get(models.Query.id == query_id)
if query:
query_result_id = query._data['latest_query_data']
query_result = query_result_id and models.QueryResult.get_by_id(query_result_id)
if query_result_id:
query_result = models.QueryResult.get_by_id(query_result_id)
if query_result:
s = cStringIO.StringIO()
if filetype == 'json':
data = json.dumps({'query_result': query_result.to_dict()}, cls=utils.JSONEncoder)
return make_response(data, 200, cache_headers)
else:
return self.csv_response(query_result)
query_data = json.loads(query_result.data)
writer = csv.DictWriter(s, fieldnames=[col['name'] for col in query_data['columns']])
writer.writer = utils.UnicodeWriter(s)
writer.writeheader()
for row in query_data['rows']:
for k, v in row.iteritems():
if isinstance(v, numbers.Number) and (v > 1000 * 1000 * 1000 * 100):
row[k] = datetime.datetime.fromtimestamp(v/1000.0)
writer.writerow(row)
return make_response(s.getvalue(), 200, {'Content-Type': "text/csv; charset=UTF-8"})
else:
abort(404)
api.add_resource(CsvQueryResultsAPI, '/api/queries/<query_id>/results/<query_result_id>.csv',
'/api/queries/<query_id>/results.csv',
endpoint='csv_query_results')
api.add_resource(QueryResultListAPI, '/api/query_results', endpoint='query_results')
api.add_resource(QueryResultAPI, '/api/query_results/<query_result_id>', endpoint='query_result')
api.add_resource(QueryResultAPI,
'/api/query_results/<query_result_id>',
'/api/queries/<query_id>/results.<filetype>',
'/api/queries/<query_id>/results/<query_result_id>.<filetype>',
endpoint='query_result')
class JobAPI(BaseResource):

View File

@@ -23,8 +23,12 @@ def get_query_runner(connection_type, connection_string):
elif connection_type == 'url':
from redash.data import query_runner_url
runner = query_runner_url.url(connection_string)
elif connection_type == "mongo":
from redash.data import query_runner_mongodb
connection_params = json.loads(connection_string)
runner = query_runner_mongodb.mongodb(connection_params)
else:
from redash.data import query_runner_pg
runner = query_runner_pg.pg(connection_string)
return runner
return runner

View File

@@ -0,0 +1,242 @@
import datetime
import logging
import json
import sys
import re
import time
from redash.utils import JSONEncoder
try:
import pymongo
from bson.objectid import ObjectId
from bson.son import SON
except ImportError:
print "Missing dependencies. Please install pymongo."
print "You can use pip: pip install pymongo"
raise
TYPES_MAP = {
ObjectId : "string",
str : "string",
unicode : "string",
int : "integer",
long : "integer",
float : "float",
bool : "boolean",
datetime.datetime: "datetime",
}
date_regex = re.compile("ISODate\(\"(.*)\"\)", re.IGNORECASE)
# Simple query example:
#
# {
# "collection" : "my_collection",
# "query" : {
# "date" : {
# "$gt" : "ISODate(\"2015-01-15 11:41\")",
# },
# "type" : 1
# },
# "fields" : {
# "_id" : 1,
# "name" : 2
# },
# "sort" : [
# {
# "name" : "date",
# "direction" : -1
# }
# ]
#
# }
#
#
# Aggregation
# ===========
# Uses a syntax similar to the one used in PyMongo, however to support the
# correct order of sorting, it uses a regular list for the "$sort" operation
# that converts into a SON (sorted dictionary) object before execution.
#
# Aggregation query example:
#
# {
# "collection" : "things",
# "aggregate" : [
# {
# "$unwind" : "$tags"
# },
# {
# "$group" : {
# {
# "_id" : "$tags",
# "count" : { "$sum" : 1 }
# }
# }
# },
# {
# "$sort" : [
# {
# "name" : "count",
# "direction" : -1
# },
# {
# "name" : "_id",
# "direction" : -1
# }
# ]
# }
# ]
# }
#
#
def mongodb(connection_string):
def _get_column_by_name(columns, column_name):
for c in columns:
if "name" in c and c["name"] == column_name:
return c
return None
def _convert_date(q, field_name):
m = date_regex.findall(q[field_name])
if len(m) > 0:
if q[field_name].find(":") == -1:
q[field_name] = datetime.datetime.fromtimestamp(time.mktime(time.strptime(m[0], "%Y-%m-%d")))
else:
q[field_name] = datetime.datetime.fromtimestamp(time.mktime(time.strptime(m[0], "%Y-%m-%d %H:%M")))
def query_runner(query):
if not "dbName" in connection_string or not connection_string["dbName"]:
return None, "dbName is missing from connection string JSON or is empty"
db_name = connection_string["dbName"]
if not "connectionString" in connection_string or not connection_string["connectionString"]:
return None, "connectionString is missing from connection string JSON or is empty"
is_replica_set = True if "replicaSetName" in connection_string and connection_string["replicaSetName"] else False
if is_replica_set:
if not connection_string["replicaSetName"]:
return None, "replicaSetName is set in the connection string JSON but is empty"
db_connection = pymongo.MongoReplicaSetClient(connection_string["connectionString"], replicaSet=connection_string["replicaSetName"])
else:
db_connection = pymongo.MongoClient(connection_string["connectionString"])
if db_name not in db_connection.database_names():
return None, "Unknown database name '%s'" % db_name
db = db_connection[db_name]
logging.debug("mongodb connection string: %s", connection_string)
logging.debug("mongodb got query: %s", query)
try:
query_data = json.loads(query)
except:
return None, "Invalid query format. The query is not a valid JSON."
if "query" in query_data and "aggregate" in query_data:
return None, "'query' and 'aggregate' sections cannot be used at the same time"
collection = None
if not "collection" in query_data:
return None, "'collection' must be set"
else:
collection = query_data["collection"]
q = None
if "query" in query_data:
q = query_data["query"]
for k in q:
if q[k] and type(q[k]) in [str, unicode]:
logging.debug(q[k])
_convert_date(q, k)
elif q[k] and type(q[k]) is dict:
for k2 in q[k]:
if type(q[k][k2]) in [str, unicode]:
_convert_date(q[k], k2)
f = None
aggregate = None
if "aggregate" in query_data:
aggregate = query_data["aggregate"]
for step in aggregate:
if "$sort" in step:
sort_list = []
for sort_item in step["$sort"]:
sort_list.append((sort_item["name"], sort_item["direction"]))
step["$sort"] = SON(sort_list)
if aggregate:
pass
else:
s = None
if "sort" in query_data and query_data["sort"]:
s = []
for field in query_data["sort"]:
s.append((field["name"], field["direction"]))
if "fields" in query_data:
f = query_data["fields"]
columns = []
rows = []
error = None
json_data = None
cursor = None
if q or (not q and not aggregate):
if s:
cursor = db[collection].find(q, f).sort(s)
else:
cursor = db[collection].find(q, f)
if "skip" in query_data:
cursor = cursor.skip(query_data["skip"])
if "limit" in query_data:
cursor = cursor.limit(query_data["limit"])
elif aggregate:
r = db[collection].aggregate(aggregate)
cursor = r["result"]
for r in cursor:
for k in r:
if _get_column_by_name(columns, k) is None:
columns.append({
"name": k,
"friendly_name": k,
"type": TYPES_MAP[type(r[k])] if type(r[k]) in TYPES_MAP else None
})
# Convert ObjectId to string
if type(r[k]) == ObjectId:
r[k] = str(r[k])
rows.append(r)
if f:
ordered_columns = []
for k in sorted(f, key=f.get):
ordered_columns.append(_get_column_by_name(columns, k))
columns = ordered_columns
data = {
"columns": columns,
"rows": rows
}
error = None
json_data = json.dumps(data, cls=JSONEncoder)
return json_data, error
query_runner.annotate_query = False
return query_runner

View File

@@ -18,7 +18,7 @@ def mysql(connection_string):
def query_runner(query):
connections_params = [entry.split('=')[1] for entry in connection_string.split(';')]
connection = MySQLdb.connect(*connections_params)
connection = MySQLdb.connect(*connections_params, charset="utf8", use_unicode=True)
cursor = connection.cursor()
logging.debug("mysql got query: %s", query)
@@ -61,4 +61,4 @@ def mysql(connection_string):
return json_data, error
return query_runner
return query_runner

View File

@@ -17,6 +17,9 @@ def script(connection_string):
json_data = None
error = None
if connection_string is None:
return None, "script execution path is not set. Please reconfigure the data source"
# Poor man's protection against running scripts from output the scripts directory
if connection_string.find("../") > -1:
return None, "Scripts can only be run from the configured scripts directory"

View File

@@ -1,23 +0,0 @@
import logging
import json
logger = logging.getLogger("redash.events")
logger.propagate = False
def setup_logging(log_path, console_output=False):
if log_path:
fh = logging.FileHandler(log_path)
formatter = logging.Formatter('%(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
if console_output:
handler = logging.StreamHandler()
formatter = logging.Formatter('[%(name)s] %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
def record_event(event):
logger.info(json.dumps(event))

81
redash/google_oauth.py Normal file
View File

@@ -0,0 +1,81 @@
import logging
from flask.ext.login import login_user
import requests
from flask import redirect, url_for, Blueprint
from flask_oauth import OAuth
from redash import models, settings
logger = logging.getLogger('google_oauth')
oauth = OAuth()
request_token_params = {'scope': 'https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/userinfo.profile', 'response_type': 'code'}
if settings.GOOGLE_APPS_DOMAIN:
request_token_params['hd'] = settings.GOOGLE_APPS_DOMAIN
else:
logger.warning("No Google Apps domain defined, all Google accounts allowed.")
google = oauth.remote_app('google',
base_url='https://www.google.com/accounts/',
authorize_url='https://accounts.google.com/o/oauth2/auth',
request_token_url=None,
request_token_params=request_token_params,
access_token_url='https://accounts.google.com/o/oauth2/token',
access_token_method='POST',
access_token_params={'grant_type': 'authorization_code'},
consumer_key=settings.GOOGLE_CLIENT_ID,
consumer_secret=settings.GOOGLE_CLIENT_SECRET)
blueprint = Blueprint('google_oauth', __name__)
def get_user_profile(access_token):
headers = {'Authorization': 'OAuth '+access_token}
response = requests.get('https://www.googleapis.com/oauth2/v1/userinfo', headers=headers)
if response.status_code == 401:
logger.warning("Failed getting user profile (response code 401).")
return None
return response.json()
def create_and_login_user(name, email):
try:
user_object = models.User.get(models.User.email == email)
if user_object.name != name:
logger.debug("Updating user name (%r -> %r)", user_object.name, name)
user_object.name = name
user_object.save()
except models.User.DoesNotExist:
logger.debug("Creating user object (%r)", name)
user_object = models.User.create(name=name, email=email, groups=models.User.DEFAULT_GROUPS)
login_user(user_object, remember=True)
@blueprint.route('/oauth/google', endpoint="authorize")
def login():
# TODO, suport next
callback=url_for('.callback', _external=True)
logger.debug("Callback url: %s", callback)
return google.authorize(callback=callback)
@blueprint.route('/oauth/google_callback', endpoint="callback")
@google.authorized_handler
def authorized(resp):
access_token = resp['access_token']
if access_token is None:
logger.warning("Access token missing in call back request.")
return redirect(url_for('login'))
profile = get_user_profile(access_token)
if profile is None:
return redirect(url_for('login'))
create_and_login_user(profile['name'], profile['email'])
return redirect(url_for('index'))

View File

@@ -1,8 +1,11 @@
import contextlib
import json
import logging
import os
from redash import models
from flask.ext.script import Manager
logger = logging.getLogger()
class Importer(object):
def __init__(self, object_mapping=None, data_source=None):
@@ -22,22 +25,17 @@ class Importer(object):
return query_result
def import_query(self, user, query):
query_result = self.import_query_result(query['latest_query_data'])
new_query = self._get_or_create(models.Query, query['id'], name=query['name'],
user=user,
ttl=-1,
query=query['query'],
query_hash=query['query_hash'],
description=query['description'],
latest_query_data=query_result,
data_source=self.data_source)
return new_query
def import_visualization(self, user, visualization):
query = self.import_query(user, visualization['query'])
@@ -50,9 +48,13 @@ class Importer(object):
return new_visualization
def import_widget(self, dashboard, widget):
visualization = self.import_visualization(dashboard.user, widget['visualization'])
if 'visualization' in widget:
visualization = self.import_visualization(dashboard.user, widget['visualization'])
else:
visualization = None
new_widget = self._get_or_create(models.Widget, widget['id'],
text=widget.get('text', None),
dashboard=dashboard,
width=widget['width'],
options=json.dumps(widget['options']),
@@ -91,6 +93,7 @@ class Importer(object):
def _get_or_create(self, object_type, external_id, **properties):
internal_id = self._get_mapping(object_type, external_id)
logger.info("Creating %s with external id: %s and internal id: %s", object_type, external_id, internal_id)
if internal_id:
update = object_type.update(**properties).where(object_type.id == internal_id)
update.execute()
@@ -114,11 +117,21 @@ export_manager = Manager(help="export utilities")
@contextlib.contextmanager
def importer_with_mapping_file(mapping_filename):
def importer_with_mapping_file(mapping_filename, data_source_id=None):
# Touch file in case it doesn't exists
if not os.path.isfile(mapping_filename):
with open(mapping_filename, 'w') as f:
f.write("{}")
with open(mapping_filename) as f:
mapping = json.loads(f.read())
importer = Importer(object_mapping=mapping, data_source=get_data_source())
if data_source_id is not None:
data_source = models.DataSource.get_by_id(data_source_id)
else:
data_source = get_data_source()
importer = Importer(object_mapping=mapping, data_source=data_source)
yield importer
with open(mapping_filename, 'w') as f:
@@ -146,12 +159,13 @@ def query(mapping_filename, query_filename, user_id):
@import_manager.command
def dashboard(mapping_filename, dashboard_filename, user_id):
def dashboard(mapping_filename, dashboard_filename, user_id, data_source_id=None):
user = models.User.get_by_id(user_id)
with open(dashboard_filename) as f:
dashboard = json.loads(f.read())
with importer_with_mapping_file(mapping_filename) as importer:
with importer_with_mapping_file(mapping_filename, data_source_id) as importer:
importer.import_dashboard(user, dashboard)

View File

@@ -60,13 +60,26 @@ class BaseModel(peewee.Model):
return cls.get(cls.id == model_id)
class AnonymousUser(AnonymousUserMixin):
class PermissionsCheckMixin(object):
def has_permission(self, permission):
return self.has_permissions((permission,))
def has_permissions(self, permissions):
has_permissions = reduce(lambda a, b: a and b,
map(lambda permission: permission in self.permissions,
permissions),
True)
return has_permissions
class AnonymousUser(AnonymousUserMixin, PermissionsCheckMixin):
@property
def permissions(self):
return []
class ApiUser(UserMixin):
class ApiUser(UserMixin, PermissionsCheckMixin):
def __init__(self, api_key):
self.id = api_key
@@ -101,7 +114,7 @@ class Group(BaseModel):
return unicode(self.id)
class User(BaseModel, UserMixin):
class User(BaseModel, UserMixin, PermissionsCheckMixin):
DEFAULT_GROUPS = ['default']
id = peewee.PrimaryKeyField()
@@ -225,6 +238,15 @@ class QueryResult(BaseModel):
'retrieved_at': self.retrieved_at
}
@classmethod
def unused(cls):
week_ago = datetime.datetime.now() - datetime.timedelta(days=7)
unused_results = cls.select().where(Query.id == None, cls.retrieved_at < week_ago)\
.join(Query, join_type=peewee.JOIN_LEFT_OUTER)
return unused_results
@classmethod
def get_latest(cls, data_source, query, ttl=0):
query_hash = utils.gen_query_hash(query)
@@ -274,6 +296,7 @@ class Query(BaseModel):
ttl = peewee.IntegerField()
user_email = peewee.CharField(max_length=360, null=True)
user = peewee.ForeignKeyField(User)
is_archived = peewee.BooleanField(default=False, index=True)
created_at = peewee.DateTimeField(default=datetime.datetime.now)
class Meta:
@@ -295,6 +318,7 @@ class Query(BaseModel):
'query_hash': self.query_hash,
'ttl': self.ttl,
'api_key': self.api_key,
'is_archived': self.is_archived,
'created_at': self.created_at,
'data_source_id': self._data.get('data_source', None)
}
@@ -305,11 +329,8 @@ class Query(BaseModel):
d['user_id'] = self._data['user']
if with_stats:
d['avg_runtime'] = self.avg_runtime
d['min_runtime'] = self.min_runtime
d['max_runtime'] = self.max_runtime
d['last_retrieved_at'] = self.last_retrieved_at
d['times_retrieved'] = self.times_retrieved
d['retrieved_at'] = self.retrieved_at
d['runtime'] = self.runtime
if with_visualizations:
d['visualizations'] = [vis.to_dict(with_query=False)
@@ -317,17 +338,24 @@ class Query(BaseModel):
return d
def archive(self):
self.is_archived = True
self.ttl = -1
for vis in self.visualizations:
for w in vis.widgets:
w.delete_instance()
self.save()
@classmethod
def all_queries(cls):
q = Query.select(Query, User,
peewee.fn.Count(QueryResult.id).alias('times_retrieved'),
peewee.fn.Avg(QueryResult.runtime).alias('avg_runtime'),
peewee.fn.Min(QueryResult.runtime).alias('min_runtime'),
peewee.fn.Max(QueryResult.runtime).alias('max_runtime'),
peewee.fn.Max(QueryResult.retrieved_at).alias('last_retrieved_at'))\
q = Query.select(Query, User, QueryResult.retrieved_at, QueryResult.runtime)\
.join(QueryResult, join_type=peewee.JOIN_LEFT_OUTER)\
.switch(Query).join(User)\
.group_by(Query.id, User.id)
.where(Query.is_archived==False)\
.group_by(Query.id, User.id, QueryResult.id, QueryResult.retrieved_at, QueryResult.runtime)\
.order_by(cls.created_at.desc())
return q
@@ -339,6 +367,7 @@ class Query(BaseModel):
peewee.Func('first_value', cls.id).over(partition_by=[cls.query_hash, cls.data_source])) \
.join(QueryResult) \
.where(cls.ttl > 0,
cls.is_archived==False,
(QueryResult.retrieved_at +
(cls.ttl * peewee.SQL("interval '1 second'"))) <
peewee.SQL("(now() at time zone 'utc')"))
@@ -348,6 +377,31 @@ class Query(BaseModel):
return queries
@classmethod
def search(cls, term):
# This is very naive implementation of search, to be replaced with PostgreSQL full-text-search solution.
where = (cls.name**"%{}%".format(term)) | (cls.description**"%{}%".format(term))
if term.isdigit():
where |= cls.id == term
where &= cls.is_archived == False
return cls.select().where(where).order_by(cls.created_at.desc())
@classmethod
def recent(cls, user_id):
return cls.select().where(Event.created_at > peewee.SQL("current_date - 7")).\
join(Event, on=(Query.id == peewee.SQL("t2.object_id::integer"))).\
where(Event.action << ('edit', 'execute', 'edit_name', 'edit_description', 'view_source')).\
where(Event.user == user_id).\
where(~(Event.object_id >> None)).\
where(Event.object_type == 'query'). \
where(cls.is_archived == False).\
group_by(Event.object_id, Query.id).\
order_by(peewee.SQL("count(0) desc"))
@classmethod
def update_instance(cls, query_id, **kwargs):
if 'query' in kwargs:
@@ -366,6 +420,14 @@ class Query(BaseModel):
self.api_key = hashlib.sha1(
u''.join((str(time.time()), self.query, str(self._data['user']), self.name)).encode('utf-8')).hexdigest()
@property
def runtime(self):
return self.latest_query_data.runtime
@property
def retrieved_at(self):
return self.latest_query_data.retrieved_at
def __unicode__(self):
return unicode(self.id)
@@ -390,6 +452,7 @@ class Dashboard(BaseModel):
if with_widgets:
widgets = Widget.select(Widget, Visualization, Query, User)\
.where(Widget.dashboard == self.id)\
.where(Query.is_archived == False)\
.join(Visualization, join_type=peewee.JOIN_LEFT_OUTER)\
.join(Query, join_type=peewee.JOIN_LEFT_OUTER)\
.join(User, join_type=peewee.JOIN_LEFT_OUTER)
@@ -428,6 +491,17 @@ class Dashboard(BaseModel):
def get_by_slug(cls, slug):
return cls.get(cls.slug == slug)
@classmethod
def recent(cls, user_id):
return cls.select().where(Event.created_at > peewee.SQL("current_date - 7")). \
join(Event, on=(Dashboard.id == peewee.SQL("t2.object_id::integer"))). \
where(Event.action << ('edit', 'view')).\
where(Event.user == user_id). \
where(~(Event.object_id >> None)). \
where(Event.object_type == 'dashboard'). \
group_by(Event.object_id, Dashboard.id). \
order_by(peewee.SQL("count(0) desc"))
def save(self, *args, **kwargs):
if not self.slug:
self.slug = utils.slugify(self.name)
@@ -501,10 +575,17 @@ class Widget(BaseModel):
d['visualization'] = self.visualization.to_dict()
return d
def __unicode__(self):
return u"%s" % self.id
def delete_instance(self, *args, **kwargs):
layout = json.loads(self.dashboard.layout)
layout = map(lambda row: filter(lambda w: w != self.id, row), layout)
layout = filter(lambda row: len(row) > 0, layout)
self.dashboard.layout = json.dumps(layout)
self.dashboard.save()
super(Widget, self).delete_instance(*args, **kwargs)
class Event(BaseModel):
user = peewee.ForeignKeyField(User, related_name="events")
@@ -520,6 +601,21 @@ class Event(BaseModel):
def __unicode__(self):
return u"%s,%s,%s,%s" % (self._data['user'], self.action, self.object_type, self.object_id)
@classmethod
def record(cls, event):
user = event.pop('user_id')
action = event.pop('action')
object_type = event.pop('object_type')
object_id = event.pop('object_id', None)
created_at = datetime.datetime.utcfromtimestamp(event.pop('timestamp'))
additional_properties = json.dumps(event)
event = cls.create(user=user, action=action, object_type=object_type, object_id=object_id,
additional_properties=additional_properties, created_at=created_at)
return event
all_models = (DataSource, User, QueryResult, Query, Dashboard, Visualization, Widget, ActivityLog, Group, Event)
@@ -536,7 +632,6 @@ def create_db(create_tables, drop_tables):
if drop_tables and model.table_exists():
# TODO: submit PR to peewee to allow passing cascade option to drop_table.
db.database.execute_sql('DROP TABLE %s CASCADE' % model._meta.db_table)
#model.drop_table()
if create_tables and not model.table_exists():
model.create_table()

View File

@@ -10,10 +10,7 @@ class require_permissions(object):
def __call__(self, fn):
@functools.wraps(fn)
def decorated(*args, **kwargs):
has_permissions = reduce(lambda a, b: a and b,
map(lambda permission: permission in current_user.permissions,
self.permissions),
True)
has_permissions = current_user.has_permissions(self.permissions)
if has_permissions:
return fn(*args, **kwargs)

View File

@@ -56,21 +56,26 @@ CELERY_BROKER = os.environ.get("REDASH_CELERY_BROKER", REDIS_URL)
CELERY_BACKEND = os.environ.get("REDASH_CELERY_BACKEND", REDIS_URL)
CELERY_FLOWER_URL = os.environ.get("REDASH_CELERY_FLOWER_URL", "/flower")
# The following enables periodic job (every 5 minutes) of removing unused query results. Behind this "feature flag" until
# proved to be "safe".
QUERY_RESULTS_CLEANUP_ENABLED = parse_boolean(os.environ.get("REDASH_QUERY_RESULTS_CLEANUP_ENABLED", "false"))
# Google Apps domain to allow access from; any user with email in this Google Apps will be allowed
# access
GOOGLE_APPS_DOMAIN = os.environ.get("REDASH_GOOGLE_APPS_DOMAIN", "")
GOOGLE_OPENID_ENABLED = parse_boolean(os.environ.get("REDASH_GOOGLE_OPENID_ENABLED", "true"))
PASSWORD_LOGIN_ENABLED = parse_boolean(os.environ.get("REDASH_PASSWORD_LOGIN_ENABLED", "false"))
ALLOWED_EXTERNAL_USERS = array_from_string(os.environ.get("REDASH_ALLOWED_EXTERNAL_USERS", ''))
GOOGLE_CLIENT_ID = os.environ.get("REDASH_GOOGLE_CLIENT_ID", "")
GOOGLE_CLIENT_SECRET = os.environ.get("REDASH_GOOGLE_CLIENT_SECRET", "")
GOOGLE_OAUTH_ENABLED = GOOGLE_CLIENT_ID and GOOGLE_CLIENT_SECRET
PASSWORD_LOGIN_ENABLED = parse_boolean(os.environ.get("REDASH_PASSWORD_LOGIN_ENABLED", "true"))
STATIC_ASSETS_PATH = fix_assets_path(os.environ.get("REDASH_STATIC_ASSETS_PATH", "../rd_ui/app/"))
WORKERS_COUNT = int(os.environ.get("REDASH_WORKERS_COUNT", "2"))
JOB_EXPIRY_TIME = int(os.environ.get("REDASH_JOB_EXPIRY_TIME", 3600*24))
JOB_EXPIRY_TIME = int(os.environ.get("REDASH_JOB_EXPIRY_TIME", 3600*6))
COOKIE_SECRET = os.environ.get("REDASH_COOKIE_SECRET", "c292a0a3aa32397cdb050e233733900f")
LOG_LEVEL = os.environ.get("REDASH_LOG_LEVEL", "INFO")
EVENTS_LOG_PATH = os.environ.get("REDASH_EVENTS_LOG_PATH", "")
EVENTS_CONSOLE_OUTPUT = parse_boolean(os.environ.get("REDASH_EVENTS_CONSOLE_OUTPUT", "false"))
CLIENT_SIDE_METRICS = parse_boolean(os.environ.get("REDASH_CLIENT_SIDE_METRICS", "false"))
ANALYTICS = os.environ.get("REDASH_ANALYTICS", "")
# Features:
FEATURE_TABLES_PERMISSIONS = parse_boolean(os.environ.get("REDASH_FEATURE_TABLES_PERMISSIONS", "false"))
FEATURE_TABLES_PERMISSIONS = parse_boolean(os.environ.get("REDASH_FEATURE_TABLES_PERMISSIONS", "false"))

View File

@@ -5,7 +5,7 @@ import redis
from celery import Task
from celery.result import AsyncResult
from celery.utils.log import get_task_logger
from redash import redis_connection, models, statsd_client
from redash import redis_connection, models, statsd_client, settings
from redash.utils import gen_query_hash
from redash.worker import celery
from redash.data.query_runner import get_query_runner
@@ -64,7 +64,12 @@ class QueryTask(object):
logging.info("[Manager][%s] Found existing job: %s", query_hash, job_id)
job = cls(job_id=job_id)
else:
if job.ready():
logging.info("[%s] job found is ready (%s), removing lock", query_hash, job.celery_status)
redis_connection.delete(QueryTask._job_lock_id(query_hash, data_source.id))
job = None
if not job:
pipe.multi()
if scheduled:
@@ -75,7 +80,7 @@ class QueryTask(object):
result = execute_query.apply_async(args=(query, data_source.id), queue=queue_name)
job = cls(async_result=result)
logging.info("[Manager][%s] Created new job: %s", query_hash, job.id)
pipe.set(cls._job_lock_id(query_hash, data_source.id), job.id)
pipe.set(cls._job_lock_id(query_hash, data_source.id), job.id, settings.JOB_EXPIRY_TIME)
pipe.execute()
break
@@ -113,6 +118,17 @@ class QueryTask(object):
'query_result_id': query_result_id,
}
@property
def is_cancelled(self):
return self._async_result.status == 'REVOKED'
@property
def celery_status(self):
return self._async_result.status
def ready(self):
return self._async_result.ready()
def cancel(self):
return self._async_result.revoke(terminate=True)
@@ -151,6 +167,57 @@ def refresh_queries():
statsd_client.gauge('manager.seconds_since_refresh', now - float(status.get('last_refresh_at', now)))
@celery.task(base=BaseTask)
def cleanup_tasks():
# in case of cold restart of the workers, there might be jobs that still have their "lock" object, but aren't really
# going to run. this job removes them.
lock_keys = redis_connection.keys("query_hash_job:*") # TODO: use set instead of keys command
query_tasks = [QueryTask(job_id=j) for j in redis_connection.mget(lock_keys)]
logger.info("Found %d locks", len(query_tasks))
inspect = celery.control.inspect()
active_tasks = inspect.active()
if active_tasks is None:
active_tasks = []
else:
active_tasks = active_tasks.values()
all_tasks = set()
for task_list in active_tasks:
for task in task_list:
all_tasks.add(task['id'])
logger.info("Active jobs count: %d", len(all_tasks))
for i, t in enumerate(query_tasks):
if t.ready():
# if locked task is ready already (failed, finished, revoked), we don't need the lock anymore
logger.warning("%s is ready (%s), removing lock.", lock_keys[i], t.celery_status)
redis_connection.delete(lock_keys[i])
if t.celery_status == 'STARTED' and t.id not in all_tasks:
logger.warning("Couldn't find active job for: %s, removing lock.", lock_keys[i])
redis_connection.delete(lock_keys[i])
@celery.task(base=BaseTask)
def cleanup_query_results():
"""
Job to cleanup unused query results -- such that no query links to them anymore, and older than a week (so it's less
likely to be open in someone's browser and be used).
Each time the job deletes only 100 query results so it won't choke the database in case of many such results.
"""
unused_query_results = models.QueryResult.unused().limit(100)
total_unused_query_results = models.QueryResult.unused().count()
deleted_count = models.QueryResult.delete().where(models.QueryResult.id << unused_query_results).execute()
logger.info("Deleted %d unused query results out of total of %d." % (deleted_count, total_unused_query_results))
@celery.task(bind=True, base=BaseTask, track_started=True)
def execute_query(self, query, data_source_id):
# TODO: maybe this should be a class?
@@ -195,3 +262,7 @@ def execute_query(self, query, data_source_id):
return query_result.id
@celery.task(base=BaseTask)
def record_event(event):
models.Event.record(event)

View File

@@ -7,15 +7,26 @@ celery = Celery('redash',
broker=settings.CELERY_BROKER,
include='redash.tasks')
celery.conf.update(CELERY_RESULT_BACKEND=settings.CELERY_BACKEND,
CELERYBEAT_SCHEDULE={
'refresh_queries': {
'task': 'redash.tasks.refresh_queries',
'schedule': timedelta(seconds=30)
},
},
CELERY_TIMEZONE='UTC')
celery_schedule = {
'refresh_queries': {
'task': 'redash.tasks.refresh_queries',
'schedule': timedelta(seconds=30)
},
'cleanup_tasks': {
'task': 'redash.tasks.cleanup_tasks',
'schedule': timedelta(minutes=5)
}
}
if settings.QUERY_RESULTS_CLEANUP_ENABLED:
celery_schedule['cleanup_query_results'] = {
'task': 'redash.tasks.cleanup_query_results',
'schedule': timedelta(minutes=5)
}
celery.conf.update(CELERY_RESULT_BACKEND=settings.CELERY_BACKEND,
CELERYBEAT_SCHEDULE=celery_schedule,
CELERY_TIMEZONE='UTC')
if __name__ == '__main__':
celery.start()

View File

@@ -1,7 +1,7 @@
Flask==0.10.1
Flask-GoogleAuth==0.4
Flask-RESTful==0.2.10
Flask-Login==0.2.9
Flask-OAuth==0.12
passlib==1.6.2
Jinja2==2.7.2
MarkupSafe==0.18
@@ -10,7 +10,7 @@ aniso8601==0.82
blinker==1.3
itsdangerous==0.23
peewee==2.2.2
psycopg2==2.5.1
psycopg2==2.5.2
python-dateutil==2.1
pytz==2013.9
redis==2.7.5

12
setup/Vagrantfile_debian Normal file
View File

@@ -0,0 +1,12 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
VAGRANTFILE_API_VERSION = "2"
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# Every Vagrant virtual environment requires a box to build off of.
config.vm.box = "box-cutter/debian76"
config.vm.provision "shell", path: "setup.sh"
config.vm.network "forwarded_port", guest: 80, host: 9001
end

177
setup/bootstrap.sh Normal file
View File

@@ -0,0 +1,177 @@
#!/bin/bash
set -eu
REDASH_BASE_PATH=/opt/redash
FILES_BASE_URL=https://raw.githubusercontent.com/EverythingMe/redash/docs_setup/setup/files/
# Verify running as root:
if [ "$(id -u)" != "0" ]; then
if [ $# -ne 0 ]; then
echo "Failed running with sudo. Exiting." 1>&2
exit 1
fi
echo "This script must be run as root. Trying to run with sudo."
sudo bash $0 --with-sudo
exit 0
fi
# Base packages
apt-get update
apt-get install -y python-pip python-dev nginx curl build-essential pwgen
# redash user
# TODO: check user doesn't exist yet?
adduser --system --no-create-home --disabled-login --gecos "" redash
# PostgreSQL
pg_available=0
psql --version || pg_available=$?
if [ $pg_available -ne 0 ]; then
wget $FILES_BASE_URL"postgres_apt.sh" -O /tmp/postgres_apt.sh
bash /tmp/postgres_apt.sh
apt-get update
apt-get -y install postgresql-9.3 postgresql-server-dev-9.3
fi
add_service() {
service_name=$1
service_command="/etc/init.d/$service_name"
echo "Adding service: $service_name (/etc/init.d/$service_name)."
chmod +x $service_command
if command -v chkconfig >/dev/null 2>&1; then
# we're chkconfig, so lets add to chkconfig and put in runlevel 345
chkconfig --add $service_name && echo "Successfully added to chkconfig!"
chkconfig --level 345 $service_name on && echo "Successfully added to runlevels 345!"
elif command -v update-rc.d >/dev/null 2>&1; then
#if we're not a chkconfig box assume we're able to use update-rc.d
update-rc.d $service_name defaults && echo "Success!"
else
echo "No supported init tool found."
fi
$service_command start
}
# Redis
redis_available=0
redis-cli --version || redis_available=$?
if [ $redis_available -ne 0 ]; then
wget http://download.redis.io/releases/redis-2.8.17.tar.gz
tar xzf redis-2.8.17.tar.gz
rm redis-2.8.17.tar.gz
cd redis-2.8.17
make
make install
# Setup process init & configuration
REDIS_PORT=6379
REDIS_CONFIG_FILE="/etc/redis/$REDIS_PORT.conf"
REDIS_LOG_FILE="/var/log/redis_$REDIS_PORT.log"
REDIS_DATA_DIR="/var/lib/redis/$REDIS_PORT"
mkdir -p `dirname "$REDIS_CONFIG_FILE"` || die "Could not create redis config directory"
mkdir -p `dirname "$REDIS_LOG_FILE"` || die "Could not create redis log dir"
mkdir -p "$REDIS_DATA_DIR" || die "Could not create redis data directory"
wget -O /etc/init.d/redis_6379 $FILES_BASE_URL"redis_init"
wget -O $REDIS_CONFIG_FILE $FILES_BASE_URL"redis.conf"
add_service "redis_$REDIS_PORT"
cd ..
rm -rf redis-2.8.17
fi
# Directories
if [ ! -d "$REDASH_BASE_PATH" ]; then
sudo mkdir /opt/redash
sudo chown redash /opt/redash
sudo -u redash mkdir /opt/redash/logs
fi
# Default config file
if [ ! -f "/opt/redash/.env" ]; then
sudo -u redash wget $FILES_BASE_URL"env" -O /opt/redash/.env
fi
# Install latest version
REDASH_VERSION=${REDASH_VERSION-0.4.0.b589}
LATEST_URL="https://github.com/EverythingMe/redash/releases/download/v${REDASH_VERSION/.b/%2Bb}/redash.$REDASH_VERSION.tar.gz"
VERSION_DIR="/opt/redash/redash.$REDASH_VERSION"
REDASH_TARBALL=/tmp/redash.tar.gz
REDASH_TARBALL=/tmp/redash.tar.gz
if [ ! -d "$VERSION_DIR" ]; then
sudo -u redash wget $LATEST_URL -O $REDASH_TARBALL
sudo -u redash mkdir $VERSION_DIR
sudo -u redash tar -C $VERSION_DIR -xvf $REDASH_TARBALL
ln -nfs $VERSION_DIR /opt/redash/current
ln -nfs /opt/redash/.env /opt/redash/current/.env
cd /opt/redash/current
# TODO: venv?
pip install -r requirements.txt
fi
# Create database / tables
pg_user_exists=0
sudo -u postgres psql postgres -tAc "SELECT 1 FROM pg_roles WHERE rolname='redash'" | grep -q 1 || pg_user_exists=$?
if [ $pg_user_exists -ne 0 ]; then
echo "Creating redash postgres user & database."
sudo -u postgres createuser redash --no-superuser --no-createdb --no-createrole
sudo -u postgres createdb redash --owner=redash
cd /opt/redash/current
sudo -u redash bin/run ./manage.py database create_tables
fi
# Create default admin user
cd /opt/redash/current
# TODO: make sure user created only once
# TODO: generate temp password and print to screen
sudo -u redash bin/run ./manage.py users create --admin --password admin "Admin" "admin"
# Create re:dash read only pg user & setup data source
pg_user_exists=0
sudo -u postgres psql postgres -tAc "SELECT 1 FROM pg_roles WHERE rolname='redash_reader'" | grep -q 1 || pg_user_exists=$?
if [ $pg_user_exists -ne 0 ]; then
echo "Creating redash reader postgres user."
REDASH_READER_PASSWORD=$(pwgen -1)
sudo -u postgres psql -c "CREATE ROLE redash_reader WITH PASSWORD '$REDASH_READER_PASSWORD' NOCREATEROLE NOCREATEDB NOSUPERUSER LOGIN"
sudo -u redash psql -c "grant select(id,name,type) ON data_sources to redash_reader;" redash
sudo -u redash psql -c "grant select on activity_log, events, queries, dashboards, widgets, visualizations, query_results to redash_reader;" redash
cd /opt/redash/current
sudo -u redash bin/run ./manage.py ds new "re:dash metadata" "pg" "user=redash_reader password=$REDASH_READER_PASSWORD host=localhost dbname=redash"
fi
# BigQuery dependencies:
apt-get install -y libffi-dev libssl-dev
pip install google-api-python-client==1.2 pyOpenSSL==0.14 oauth2client==1.2
# MySQL dependencies:
apt-get install -y libmysqlclient-dev
pip install MySQL-python==1.2.5
# Mongo dependencies:
pip install pymongo==2.7.2
# Setup supervisord + sysv init startup script
sudo -u redash mkdir -p /opt/redash/supervisord
pip install supervisor==3.1.2 # TODO: move to requirements.txt
# Get supervisord startup script
sudo -u redash wget -O /opt/redash/supervisord/supervisord.conf $FILES_BASE_URL"supervisord.conf"
wget -O /etc/init.d/redash_supervisord $FILES_BASE_URL"redash_supervisord_init"
add_service "redash_supervisord"
# Nginx setup
rm /etc/nginx/sites-enabled/default
wget -O /etc/nginx/sites-available/redash $FILES_BASE_URL"nginx_redash_site"
ln -nfs /etc/nginx/sites-available/redash /etc/nginx/sites-enabled/redash
service nginx restart

9
setup/files/env Normal file
View File

@@ -0,0 +1,9 @@
export REDASH_CONNECTION_ADAPTER=pg
export REDASH_CONNECTION_STRING="dbname=redash"
export REDASH_STATIC_ASSETS_PATH="../rd_ui/dist/"
export REDASH_LOG_LEVEL="INFO"
export REDASH_WORKERS_COUNT=6
export REDASH_REDIS_URL=redis://localhost:6379/1
export REDASH_DATABASE_URL="postgresql://redash"
export REDASH_COOKIE_SECRET=veryverysecret
export REDASH_GOOGLE_APPS_DOMAIN=

View File

@@ -0,0 +1,20 @@
upstream rd_servers {
server 127.0.0.1:5000;
}
server {
listen 80 default;
access_log /var/log/nginx/rd.access.log;
gzip on;
gzip_types *;
gzip_proxied any;
location / {
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_pass http://rd_servers;
}
}

162
setup/files/postgres_apt.sh Normal file
View File

@@ -0,0 +1,162 @@
#!/bin/sh
# script to add apt.postgresql.org to sources.list
# from command line
CODENAME="$1"
# lsb_release is the best interface, but not always available
if [ -z "$CODENAME" ]; then
CODENAME=$(lsb_release -cs 2>/dev/null)
fi
# parse os-release (unreliable, does not work on Ubuntu)
if [ -z "$CODENAME" -a -f /etc/os-release ]; then
. /etc/os-release
# Debian: VERSION="7.0 (wheezy)"
# Ubuntu: VERSION="13.04, Raring Ringtail"
CODENAME=$(echo $VERSION | sed -ne 's/.*(\(.*\)).*/\1/')
fi
# guess from sources.list
if [ -z "$CODENAME" ]; then
CODENAME=$(grep '^deb ' /etc/apt/sources.list | head -n1 | awk '{ print $3 }')
fi
# complain if no result yet
if [ -z "$CODENAME" ]; then
cat <<EOF
Could not determine the distribution codename. Please report this as a bug to
pgsql-pkg-debian@postgresql.org. As a workaround, you can call this script with
the proper codename as parameter, e.g. "$0 squeeze".
EOF
exit 1
fi
# errors are non-fatal above
set -e
cat <<EOF
This script will enable the PostgreSQL APT repository on apt.postgresql.org on
your system. The distribution codename used will be $CODENAME-pgdg.
EOF
case $CODENAME in
# known distributions
sid|wheezy|squeeze|lenny|etch) ;;
precise|lucid) ;;
*) # unknown distribution, verify on the web
DISTURL="http://apt.postgresql.org/pub/repos/apt/dists/"
if [ -x /usr/bin/curl ]; then
DISTHTML=$(curl -s $DISTURL)
elif [ -x /usr/bin/wget ]; then
DISTHTML=$(wget --quiet -O - $DISTURL)
fi
if [ "$DISTHTML" ]; then
if ! echo "$DISTHTML" | grep -q "$CODENAME-pgdg"; then
cat <<EOF
Your system is using the distribution codename $CODENAME, but $CODENAME-pgdg
does not seem to be a valid distribution on
$DISTURL
We abort the installation here. If you want to use a distribution different
from your system, you can call this script with an explicit codename, e.g.
"$0 precise".
Specifically, if you are using a non-LTS Ubuntu release, refer to
https://wiki.postgresql.org/wiki/Apt/FAQ#I_am_using_a_non-LTS_release_of_Ubuntu
For more information, refer to https://wiki.postgresql.org/wiki/Apt
or ask on the mailing list for assistance: pgsql-pkg-debian@postgresql.org
EOF
exit 1
fi
fi
;;
esac
echo "Writing /etc/apt/sources.list.d/pgdg.list ..."
cat > /etc/apt/sources.list.d/pgdg.list <<EOF
deb http://apt.postgresql.org/pub/repos/apt/ $CODENAME-pgdg main
#deb-src http://apt.postgresql.org/pub/repos/apt/ $CODENAME-pgdg main
EOF
echo "Importing repository signing key ..."
KEYRING="/etc/apt/trusted.gpg.d/apt.postgresql.org.gpg"
test -e $KEYRING || touch $KEYRING
apt-key --keyring $KEYRING add - <<EOF
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1
mQINBE6XR8IBEACVdDKT2HEH1IyHzXkb4nIWAY7echjRxo7MTcj4vbXAyBKOfjja
UrBEJWHN6fjKJXOYWXHLIYg0hOGeW9qcSiaa1/rYIbOzjfGfhE4x0Y+NJHS1db0V
G6GUj3qXaeyqIJGS2z7m0Thy4Lgr/LpZlZ78Nf1fliSzBlMo1sV7PpP/7zUO+aA4
bKa8Rio3weMXQOZgclzgeSdqtwKnyKTQdXY5MkH1QXyFIk1nTfWwyqpJjHlgtwMi
c2cxjqG5nnV9rIYlTTjYG6RBglq0SmzF/raBnF4Lwjxq4qRqvRllBXdFu5+2pMfC
IZ10HPRdqDCTN60DUix+BTzBUT30NzaLhZbOMT5RvQtvTVgWpeIn20i2NrPWNCUh
hj490dKDLpK/v+A5/i8zPvN4c6MkDHi1FZfaoz3863dylUBR3Ip26oM0hHXf4/2U
A/oA4pCl2W0hc4aNtozjKHkVjRx5Q8/hVYu+39csFWxo6YSB/KgIEw+0W8DiTII3
RQj/OlD68ZDmGLyQPiJvaEtY9fDrcSpI0Esm0i4sjkNbuuh0Cvwwwqo5EF1zfkVj
Tqz2REYQGMJGc5LUbIpk5sMHo1HWV038TWxlDRwtOdzw08zQA6BeWe9FOokRPeR2
AqhyaJJwOZJodKZ76S+LDwFkTLzEKnYPCzkoRwLrEdNt1M7wQBThnC5z6wARAQAB
tBxQb3N0Z3JlU1FMIERlYmlhbiBSZXBvc2l0b3J5iQI9BBMBCAAnAhsDBQsJCAcD
BRUKCQgLBRYCAwEAAh4BAheABQJS6RUZBQkOhCctAAoJEH/MfUaszEz4zmQP/2ad
HtuaXL5Xu3C3NGLha/aQb9iSJC8z5vN55HMCpsWlmslCBuEr+qR+oZvPkvwh0Io/
8hQl/qN54DMNifRwVL2n2eG52yNERie9BrAMK2kNFZZCH4OxlMN0876BmDuNq2U6
7vUtCv+pxT+g9R1LvlPgLCTjS3m+qMqUICJ310BMT2cpYlJx3YqXouFkdWBVurI0
pGU/+QtydcJALz5eZbzlbYSPWbOm2ZSS2cLrCsVNFDOAbYLtUn955yXB5s4rIscE
vTzBxPgID1iBknnPzdu2tCpk07yJleiupxI1yXstCtvhGCbiAbGFDaKzhgcAxSIX
0ZPahpaYLdCkcoLlfgD+ar4K8veSK2LazrhO99O0onRG0p7zuXszXphO4E/WdbTO
yDD35qCqYeAX6TaB+2l4kIdVqPgoXT/doWVLUK2NjZtd3JpMWI0OGYDFn2DAvgwP
xqKEoGTOYuoWKssnwLlA/ZMETegak27gFAKfoQlmHjeA/PLC2KRYd6Wg2DSifhn+
2MouoE4XFfeekVBQx98rOQ5NLwy/TYlsHXm1n0RW86ETN3chj/PPWjsi80t5oepx
82azRoVu95LJUkHpPLYyqwfueoVzp2+B2hJU2Rg7w+cJq64TfeJG8hrc93MnSKIb
zTvXfdPtvYdHhhA2LYu4+5mh5ASlAMJXD7zIOZt2iEYEEBEIAAYFAk6XSO4ACgkQ
xa93SlhRC1qmjwCg9U7U+XN7Gc/dhY/eymJqmzUGT/gAn0guvoX75Y+BsZlI6dWn
qaFU6N8HiQIcBBABCAAGBQJOl0kLAAoJEExaa6sS0qeuBfEP/3AnLrcKx+dFKERX
o4NBCGWr+i1CnowupKS3rm2xLbmiB969szG5TxnOIvnjECqPz6skK3HkV3jTZaju
v3sR6M2ItpnrncWuiLnYcCSDp9TEMpCWzTEgtrBlKdVuTNTeRGILeIcvqoZX5w+u
i0eBvvbeRbHEyUsvOEnYjrqoAjqUJj5FUZtR1+V9fnZp8zDgpOSxx0LomnFdKnhj
uyXAQlRCA6/roVNR9ruRjxTR5ubteZ9ubTsVYr2/eMYOjQ46LhAgR+3Alblu/WHB
MR/9F9//RuOa43R5Sjx9TiFCYol+Ozk8XRt3QGweEH51YkSYY3oRbHBb2Fkql6N6
YFqlLBL7/aiWnNmRDEs/cdpo9HpFsbjOv4RlsSXQfvvfOayHpT5nO1UQFzoyMVpJ
615zwmQDJT5Qy7uvr2eQYRV9AXt8t/H+xjQsRZCc5YVmeAo91qIzI/tA2gtXik49
6yeziZbfUvcZzuzjjxFExss4DSAwMgorvBeIbiz2k2qXukbqcTjB2XqAlZasd6Ll
nLXpQdqDV3McYkP/MvttWh3w+J/woiBcA7yEI5e3YJk97uS6+ssbqLEd0CcdT+qz
+Waw0z/ZIU99Lfh2Qm77OT6vr//Zulw5ovjZVO2boRIcve7S97gQ4KC+G/+QaRS+
VPZ67j5UMxqtT/Y4+NHcQGgwF/1iiQI9BBMBCAAnAhsDBQsJCAcDBRUKCQgLBRYC
AwEAAh4BAheABQJQeSssBQkDwxbfAAoJEH/MfUaszEz4bgkP/0AI0UgDgkNNqplA
IpE/pkwem2jgGpJGKurh2xDu6j2ZL+BPzPhzyCeMHZwTXkkI373TXGQQP8dIa+RD
HAZ3iijw4+ISdKWpziEUJjUk04UMPTlN+dYJt2EHLQDD0VLtX0yQC/wLmVEH/REp
oclbVjZR/+ehwX2IxOIlXmkZJDSycl975FnSUjMAvyzty8P9DN0fIrQ7Ju+BfMOM
TnUkOdp0kRUYez7pxbURJfkM0NxAP1geACI91aISBpFg3zxQs1d3MmUIhJ4wHvYB
uaR7Fx1FkLAxWddre/OCYJBsjucE9uqc04rgKVjN5P/VfqNxyUoB+YZ+8Lk4t03p
RBcD9XzcyOYlFLWXbcWxTn1jJ2QMqRIWi5lzZIOMw5B+OK9LLPX0dAwIFGr9WtuV
J2zp+D4CBEMtn4Byh8EaQsttHeqAkpZoMlrEeNBDz2L7RquPQNmiuom15nb7xU/k
7PGfqtkpBaaGBV9tJkdp7BdH27dZXx+uT+uHbpMXkRrXliHjWpAw+NGwADh/Pjmq
ExlQSdgAiXy1TTOdzxKH7WrwMFGDK0fddKr8GH3f+Oq4eOoNRa6/UhTCmBPbryCS
IA7EAd0Aae9YaLlOB+eTORg/F1EWLPm34kKSRtae3gfHuY2cdUmoDVnOF8C9hc0P
bL65G4NWPt+fW7lIj+0+kF19s2PviQI9BBMBCAAnAhsDBQsJCAcDBRUKCQgLBRYC
AwEAAh4BAheABQJRKm2VBQkINsBBAAoJEH/MfUaszEz4RTEP/1sQHyjHaUiAPaCA
v8jw/3SaWP/g8qLjpY6ROjLnDMvwKwRAoxUwcIv4/TWDOMpwJN+CJIbjXsXNYvf9
OX+UTOvq4iwi4ADrAAw2xw+Jomc6EsYla+hkN2FzGzhpXfZFfUsuphjY3FKL+4hX
H+R8ucNwIz3yrkfc17MMn8yFNWFzm4omU9/JeeaafwUoLxlULL2zY7H3+QmxCl0u
6t8VvlszdEFhemLHzVYRY0Ro/ISrR78CnANNsMIy3i11U5uvdeWVCoWV1BXNLzOD
4+BIDbMB/Do8PQCWiliSGZi8lvmj/sKbumMFQonMQWOfQswTtqTyQ3yhUM1LaxK5
PYq13rggi3rA8oq8SYb/KNCQL5pzACji4TRVK0kNpvtxJxe84X8+9IB1vhBvF/Ji
/xDd/3VDNPY+k1a47cON0S8Qc8DA3mq4hRfcgvuWy7ZxoMY7AfSJOhleb9+PzRBB
n9agYgMxZg1RUWZazQ5KuoJqbxpwOYVFja/stItNS4xsmi0lh2I4MNlBEDqnFLUx
SvTDc22c3uJlWhzBM/f2jH19uUeqm4jaggob3iJvJmK+Q7Ns3WcfhuWwCnc1+58d
iFAMRUCRBPeFS0qd56QGk1r97B6+3UfLUslCfaaA8IMOFvQSHJwDO87xWGyxeRTY
IIP9up4xwgje9LB7fMxsSkCDTHOk
=s3DI
-----END PGP PUBLIC KEY BLOCK-----
EOF
echo "Running apt-get update ..."
apt-get update
cat <<EOF
You can now start installing packages from apt.postgresql.org.
Have a look at https://wiki.postgresql.org/wiki/Apt for more information;
most notably the FAQ at https://wiki.postgresql.org/wiki/Apt/FAQ
EOF

View File

@@ -0,0 +1,129 @@
#!/bin/sh
# /etc/init.d/redash_supervisord
### BEGIN INIT INFO
# Provides: supervisord
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: process supervisor
### END INIT INFO
# Author: Ron DuPlain <ron.duplain@gmail.com>
# Do NOT "set -e"
# PATH should only include /usr/* if it runs after the mountnfs.sh script
PATH=/sbin:/usr/sbin:/usr/local/sbin:/bin:/usr/bin:/usr/local/bin
NAME=supervisord
DESC="process supervisor"
DAEMON=/usr/local/bin/$NAME
DAEMON_ARGS="--configuration /opt/redash/supervisord/supervisord.conf "
PIDFILE=/opt/redash/supervisord/supervisord.pid
SCRIPTNAME=/etc/init.d/redash_supervisord
USER=redash
# Exit if the package is not installed
[ -x "$DAEMON" ] || exit 0
# Read configuration variable file if it is present
[ -r /etc/default/$NAME ] && . /etc/default/$NAME
# Load the VERBOSE setting and other rcS variables
. /lib/init/vars.sh
# Define LSB log_* functions.
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
# and status_of_proc is working.
. /lib/lsb/init-functions
#
# Function that starts the daemon/service
#
do_start()
{
# Return
# 0 if daemon has been started
# 1 if daemon was already running
# 2 if daemon could not be started
start-stop-daemon --start --quiet --pidfile $PIDFILE --user $USER --chuid $USER --exec $DAEMON --test > /dev/null \
|| return 1
start-stop-daemon --start --quiet --pidfile $PIDFILE --user $USER --chuid $USER --exec $DAEMON -- \
$DAEMON_ARGS \
|| return 2
# Add code here, if necessary, that waits for the process to be ready
# to handle requests from services started subsequently which depend
# on this one. As a last resort, sleep for some time.
}
#
# Function that stops the daemon/service
#
do_stop()
{
# Return
# 0 if daemon has been stopped
# 1 if daemon was already stopped
# 2 if daemon could not be stopped
# other if a failure occurred
start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --user $USER --chuid $USER --name $NAME
RETVAL="$?"
[ "$RETVAL" = 2 ] && return 2
# Wait for children to finish too if this is a daemon that forks
# and if the daemon is only ever run from this initscript.
# If the above conditions are not satisfied then add some other code
# that waits for the process to drop all resources that could be
# needed by services started subsequently. A last resort is to
# sleep for some time.
start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 --user $USER --chuid $USER --exec $DAEMON
[ "$?" = 2 ] && return 2
# Many daemons don't delete their pidfiles when they exit.
rm -f $PIDFILE
return "$RETVAL"
}
case "$1" in
start)
[ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
do_start
case "$?" in
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
esac
;;
stop)
[ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
do_stop
case "$?" in
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
esac
;;
status)
status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
;;
restart)
log_daemon_msg "Restarting $DESC" "$NAME"
do_stop
case "$?" in
0|1)
do_start
case "$?" in
0) log_end_msg 0 ;;
1) log_end_msg 1 ;; # Old process is still running
*) log_end_msg 1 ;; # Failed to start
esac
;;
*)
# Failed to stop
log_end_msg 1
;;
esac
;;
*)
echo "Usage: $SCRIPTNAME {start|stop|status|restart}" >&2
exit 3
;;
esac
:

785
setup/files/redis.conf Normal file
View File

@@ -0,0 +1,785 @@
## Generated by install_server.sh ##
# Redis configuration file example
# Note on units: when memory size is needed, it is possible to specify
# it in the usual form of 1k 5GB 4M and so forth:
#
# 1k => 1000 bytes
# 1kb => 1024 bytes
# 1m => 1000000 bytes
# 1mb => 1024*1024 bytes
# 1g => 1000000000 bytes
# 1gb => 1024*1024*1024 bytes
#
# units are case insensitive so 1GB 1Gb 1gB are all the same.
################################## INCLUDES ###################################
# Include one or more other config files here. This is useful if you
# have a standard template that goes to all Redis server but also need
# to customize a few per-server settings. Include files can include
# other files, so use this wisely.
#
# Notice option "include" won't be rewritten by command "CONFIG REWRITE"
# from admin or Redis Sentinel. Since Redis always uses the last processed
# line as value of a configuration directive, you'd better put includes
# at the beginning of this file to avoid overwriting config change at runtime.
#
# If instead you are interested in using includes to override configuration
# options, it is better to use include as the last line.
#
# include /path/to/local.conf
# include /path/to/other.conf
################################ GENERAL #####################################
# By default Redis does not run as a daemon. Use 'yes' if you need it.
# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
daemonize yes
# When running daemonized, Redis writes a pid file in /var/run/redis.pid by
# default. You can specify a custom pid file location here.
pidfile /var/run/redis_6379.pid
# Accept connections on the specified port, default is 6379.
# If port 0 is specified Redis will not listen on a TCP socket.
port 6379
# TCP listen() backlog.
#
# In high requests-per-second environments you need an high backlog in order
# to avoid slow clients connections issues. Note that the Linux kernel
# will silently truncate it to the value of /proc/sys/net/core/somaxconn so
# make sure to raise both the value of somaxconn and tcp_max_syn_backlog
# in order to get the desired effect.
tcp-backlog 511
# By default Redis listens for connections from all the network interfaces
# available on the server. It is possible to listen to just one or multiple
# interfaces using the "bind" configuration directive, followed by one or
# more IP addresses.
#
# Examples:
#
# bind 192.168.1.100 10.0.0.1
# bind 127.0.0.1
# Specify the path for the Unix socket that will be used to listen for
# incoming connections. There is no default, so Redis will not listen
# on a unix socket when not specified.
#
# unixsocket /tmp/redis.sock
# unixsocketperm 700
# Close the connection after a client is idle for N seconds (0 to disable)
timeout 0
# TCP keepalive.
#
# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
# of communication. This is useful for two reasons:
#
# 1) Detect dead peers.
# 2) Take the connection alive from the point of view of network
# equipment in the middle.
#
# On Linux, the specified value (in seconds) is the period used to send ACKs.
# Note that to close the connection the double of the time is needed.
# On other kernels the period depends on the kernel configuration.
#
# A reasonable value for this option is 60 seconds.
tcp-keepalive 0
# Specify the server verbosity level.
# This can be one of:
# debug (a lot of information, useful for development/testing)
# verbose (many rarely useful info, but not a mess like the debug level)
# notice (moderately verbose, what you want in production probably)
# warning (only very important / critical messages are logged)
loglevel notice
# Specify the log file name. Also the empty string can be used to force
# Redis to log on the standard output. Note that if you use standard
# output for logging but daemonize, logs will be sent to /dev/null
logfile /var/log/redis_6379.log
# To enable logging to the system logger, just set 'syslog-enabled' to yes,
# and optionally update the other syslog parameters to suit your needs.
# syslog-enabled no
# Specify the syslog identity.
# syslog-ident redis
# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
# syslog-facility local0
# Set the number of databases. The default database is DB 0, you can select
# a different one on a per-connection basis using SELECT <dbid> where
# dbid is a number between 0 and 'databases'-1
databases 16
################################ SNAPSHOTTING ################################
#
# Save the DB on disk:
#
# save <seconds> <changes>
#
# Will save the DB if both the given number of seconds and the given
# number of write operations against the DB occurred.
#
# In the example below the behaviour will be to save:
# after 900 sec (15 min) if at least 1 key changed
# after 300 sec (5 min) if at least 10 keys changed
# after 60 sec if at least 10000 keys changed
#
# Note: you can disable saving at all commenting all the "save" lines.
#
# It is also possible to remove all the previously configured save
# points by adding a save directive with a single empty string argument
# like in the following example:
#
# save ""
save 900 1
save 300 10
save 60 10000
# By default Redis will stop accepting writes if RDB snapshots are enabled
# (at least one save point) and the latest background save failed.
# This will make the user aware (in a hard way) that data is not persisting
# on disk properly, otherwise chances are that no one will notice and some
# disaster will happen.
#
# If the background saving process will start working again Redis will
# automatically allow writes again.
#
# However if you have setup your proper monitoring of the Redis server
# and persistence, you may want to disable this feature so that Redis will
# continue to work as usual even if there are problems with disk,
# permissions, and so forth.
stop-writes-on-bgsave-error yes
# Compress string objects using LZF when dump .rdb databases?
# For default that's set to 'yes' as it's almost always a win.
# If you want to save some CPU in the saving child set it to 'no' but
# the dataset will likely be bigger if you have compressible values or keys.
rdbcompression yes
# Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
# This makes the format more resistant to corruption but there is a performance
# hit to pay (around 10%) when saving and loading RDB files, so you can disable it
# for maximum performances.
#
# RDB files created with checksum disabled have a checksum of zero that will
# tell the loading code to skip the check.
rdbchecksum yes
# The filename where to dump the DB
dbfilename dump.rdb
# The working directory.
#
# The DB will be written inside this directory, with the filename specified
# above using the 'dbfilename' configuration directive.
#
# The Append Only File will also be created inside this directory.
#
# Note that you must specify a directory here, not a file name.
dir /var/lib/redis/6379
################################# REPLICATION #################################
# Master-Slave replication. Use slaveof to make a Redis instance a copy of
# another Redis server. A few things to understand ASAP about Redis replication.
#
# 1) Redis replication is asynchronous, but you can configure a master to
# stop accepting writes if it appears to be not connected with at least
# a given number of slaves.
# 2) Redis slaves are able to perform a partial resynchronization with the
# master if the replication link is lost for a relatively small amount of
# time. You may want to configure the replication backlog size (see the next
# sections of this file) with a sensible value depending on your needs.
# 3) Replication is automatic and does not need user intervention. After a
# network partition slaves automatically try to reconnect to masters
# and resynchronize with them.
#
# slaveof <masterip> <masterport>
# If the master is password protected (using the "requirepass" configuration
# directive below) it is possible to tell the slave to authenticate before
# starting the replication synchronization process, otherwise the master will
# refuse the slave request.
#
# masterauth <master-password>
# When a slave loses its connection with the master, or when the replication
# is still in progress, the slave can act in two different ways:
#
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
# still reply to client requests, possibly with out of date data, or the
# data set may just be empty if this is the first synchronization.
#
# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
# an error "SYNC with master in progress" to all the kind of commands
# but to INFO and SLAVEOF.
#
slave-serve-stale-data yes
# You can configure a slave instance to accept writes or not. Writing against
# a slave instance may be useful to store some ephemeral data (because data
# written on a slave will be easily deleted after resync with the master) but
# may also cause problems if clients are writing to it because of a
# misconfiguration.
#
# Since Redis 2.6 by default slaves are read-only.
#
# Note: read only slaves are not designed to be exposed to untrusted clients
# on the internet. It's just a protection layer against misuse of the instance.
# Still a read only slave exports by default all the administrative commands
# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
# security of read only slaves using 'rename-command' to shadow all the
# administrative / dangerous commands.
slave-read-only yes
# Slaves send PINGs to server in a predefined interval. It's possible to change
# this interval with the repl_ping_slave_period option. The default value is 10
# seconds.
#
# repl-ping-slave-period 10
# The following option sets the replication timeout for:
#
# 1) Bulk transfer I/O during SYNC, from the point of view of slave.
# 2) Master timeout from the point of view of slaves (data, pings).
# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings).
#
# It is important to make sure that this value is greater than the value
# specified for repl-ping-slave-period otherwise a timeout will be detected
# every time there is low traffic between the master and the slave.
#
# repl-timeout 60
# Disable TCP_NODELAY on the slave socket after SYNC?
#
# If you select "yes" Redis will use a smaller number of TCP packets and
# less bandwidth to send data to slaves. But this can add a delay for
# the data to appear on the slave side, up to 40 milliseconds with
# Linux kernels using a default configuration.
#
# If you select "no" the delay for data to appear on the slave side will
# be reduced but more bandwidth will be used for replication.
#
# By default we optimize for low latency, but in very high traffic conditions
# or when the master and slaves are many hops away, turning this to "yes" may
# be a good idea.
repl-disable-tcp-nodelay no
# Set the replication backlog size. The backlog is a buffer that accumulates
# slave data when slaves are disconnected for some time, so that when a slave
# wants to reconnect again, often a full resync is not needed, but a partial
# resync is enough, just passing the portion of data the slave missed while
# disconnected.
#
# The biggest the replication backlog, the longer the time the slave can be
# disconnected and later be able to perform a partial resynchronization.
#
# The backlog is only allocated once there is at least a slave connected.
#
# repl-backlog-size 1mb
# After a master has no longer connected slaves for some time, the backlog
# will be freed. The following option configures the amount of seconds that
# need to elapse, starting from the time the last slave disconnected, for
# the backlog buffer to be freed.
#
# A value of 0 means to never release the backlog.
#
# repl-backlog-ttl 3600
# The slave priority is an integer number published by Redis in the INFO output.
# It is used by Redis Sentinel in order to select a slave to promote into a
# master if the master is no longer working correctly.
#
# A slave with a low priority number is considered better for promotion, so
# for instance if there are three slaves with priority 10, 100, 25 Sentinel will
# pick the one with priority 10, that is the lowest.
#
# However a special priority of 0 marks the slave as not able to perform the
# role of master, so a slave with priority of 0 will never be selected by
# Redis Sentinel for promotion.
#
# By default the priority is 100.
slave-priority 100
# It is possible for a master to stop accepting writes if there are less than
# N slaves connected, having a lag less or equal than M seconds.
#
# The N slaves need to be in "online" state.
#
# The lag in seconds, that must be <= the specified value, is calculated from
# the last ping received from the slave, that is usually sent every second.
#
# This option does not GUARANTEES that N replicas will accept the write, but
# will limit the window of exposure for lost writes in case not enough slaves
# are available, to the specified number of seconds.
#
# For example to require at least 3 slaves with a lag <= 10 seconds use:
#
# min-slaves-to-write 3
# min-slaves-max-lag 10
#
# Setting one or the other to 0 disables the feature.
#
# By default min-slaves-to-write is set to 0 (feature disabled) and
# min-slaves-max-lag is set to 10.
################################## SECURITY ###################################
# Require clients to issue AUTH <PASSWORD> before processing any other
# commands. This might be useful in environments in which you do not trust
# others with access to the host running redis-server.
#
# This should stay commented out for backward compatibility and because most
# people do not need auth (e.g. they run their own servers).
#
# Warning: since Redis is pretty fast an outside user can try up to
# 150k passwords per second against a good box. This means that you should
# use a very strong password otherwise it will be very easy to break.
#
# requirepass foobared
# Command renaming.
#
# It is possible to change the name of dangerous commands in a shared
# environment. For instance the CONFIG command may be renamed into something
# hard to guess so that it will still be available for internal-use tools
# but not available for general clients.
#
# Example:
#
# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
#
# It is also possible to completely kill a command by renaming it into
# an empty string:
#
# rename-command CONFIG ""
#
# Please note that changing the name of commands that are logged into the
# AOF file or transmitted to slaves may cause problems.
################################### LIMITS ####################################
# Set the max number of connected clients at the same time. By default
# this limit is set to 10000 clients, however if the Redis server is not
# able to configure the process file limit to allow for the specified limit
# the max number of allowed clients is set to the current file limit
# minus 32 (as Redis reserves a few file descriptors for internal uses).
#
# Once the limit is reached Redis will close all the new connections sending
# an error 'max number of clients reached'.
#
# maxclients 10000
# Don't use more memory than the specified amount of bytes.
# When the memory limit is reached Redis will try to remove keys
# according to the eviction policy selected (see maxmemory-policy).
#
# If Redis can't remove keys according to the policy, or if the policy is
# set to 'noeviction', Redis will start to reply with errors to commands
# that would use more memory, like SET, LPUSH, and so on, and will continue
# to reply to read-only commands like GET.
#
# This option is usually useful when using Redis as an LRU cache, or to set
# a hard memory limit for an instance (using the 'noeviction' policy).
#
# WARNING: If you have slaves attached to an instance with maxmemory on,
# the size of the output buffers needed to feed the slaves are subtracted
# from the used memory count, so that network problems / resyncs will
# not trigger a loop where keys are evicted, and in turn the output
# buffer of slaves is full with DELs of keys evicted triggering the deletion
# of more keys, and so forth until the database is completely emptied.
#
# In short... if you have slaves attached it is suggested that you set a lower
# limit for maxmemory so that there is some free RAM on the system for slave
# output buffers (but this is not needed if the policy is 'noeviction').
#
# maxmemory <bytes>
# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
# is reached. You can select among five behaviors:
#
# volatile-lru -> remove the key with an expire set using an LRU algorithm
# allkeys-lru -> remove any key accordingly to the LRU algorithm
# volatile-random -> remove a random key with an expire set
# allkeys-random -> remove a random key, any key
# volatile-ttl -> remove the key with the nearest expire time (minor TTL)
# noeviction -> don't expire at all, just return an error on write operations
#
# Note: with any of the above policies, Redis will return an error on write
# operations, when there are not suitable keys for eviction.
#
# At the date of writing this commands are: set setnx setex append
# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
# getset mset msetnx exec sort
#
# The default is:
#
# maxmemory-policy volatile-lru
# LRU and minimal TTL algorithms are not precise algorithms but approximated
# algorithms (in order to save memory), so you can select as well the sample
# size to check. For instance for default Redis will check three keys and
# pick the one that was used less recently, you can change the sample size
# using the following configuration directive.
#
# maxmemory-samples 3
############################## APPEND ONLY MODE ###############################
# By default Redis asynchronously dumps the dataset on disk. This mode is
# good enough in many applications, but an issue with the Redis process or
# a power outage may result into a few minutes of writes lost (depending on
# the configured save points).
#
# The Append Only File is an alternative persistence mode that provides
# much better durability. For instance using the default data fsync policy
# (see later in the config file) Redis can lose just one second of writes in a
# dramatic event like a server power outage, or a single write if something
# wrong with the Redis process itself happens, but the operating system is
# still running correctly.
#
# AOF and RDB persistence can be enabled at the same time without problems.
# If the AOF is enabled on startup Redis will load the AOF, that is the file
# with the better durability guarantees.
#
# Please check http://redis.io/topics/persistence for more information.
appendonly no
# The name of the append only file (default: "appendonly.aof")
appendfilename "appendonly.aof"
# The fsync() call tells the Operating System to actually write data on disk
# instead to wait for more data in the output buffer. Some OS will really flush
# data on disk, some other OS will just try to do it ASAP.
#
# Redis supports three different modes:
#
# no: don't fsync, just let the OS flush the data when it wants. Faster.
# always: fsync after every write to the append only log . Slow, Safest.
# everysec: fsync only one time every second. Compromise.
#
# The default is "everysec", as that's usually the right compromise between
# speed and data safety. It's up to you to understand if you can relax this to
# "no" that will let the operating system flush the output buffer when
# it wants, for better performances (but if you can live with the idea of
# some data loss consider the default persistence mode that's snapshotting),
# or on the contrary, use "always" that's very slow but a bit safer than
# everysec.
#
# More details please check the following article:
# http://antirez.com/post/redis-persistence-demystified.html
#
# If unsure, use "everysec".
# appendfsync always
appendfsync everysec
# appendfsync no
# When the AOF fsync policy is set to always or everysec, and a background
# saving process (a background save or AOF log background rewriting) is
# performing a lot of I/O against the disk, in some Linux configurations
# Redis may block too long on the fsync() call. Note that there is no fix for
# this currently, as even performing fsync in a different thread will block
# our synchronous write(2) call.
#
# In order to mitigate this problem it's possible to use the following option
# that will prevent fsync() from being called in the main process while a
# BGSAVE or BGREWRITEAOF is in progress.
#
# This means that while another child is saving, the durability of Redis is
# the same as "appendfsync none". In practical terms, this means that it is
# possible to lose up to 30 seconds of log in the worst scenario (with the
# default Linux settings).
#
# If you have latency problems turn this to "yes". Otherwise leave it as
# "no" that is the safest pick from the point of view of durability.
no-appendfsync-on-rewrite no
# Automatic rewrite of the append only file.
# Redis is able to automatically rewrite the log file implicitly calling
# BGREWRITEAOF when the AOF log size grows by the specified percentage.
#
# This is how it works: Redis remembers the size of the AOF file after the
# latest rewrite (if no rewrite has happened since the restart, the size of
# the AOF at startup is used).
#
# This base size is compared to the current size. If the current size is
# bigger than the specified percentage, the rewrite is triggered. Also
# you need to specify a minimal size for the AOF file to be rewritten, this
# is useful to avoid rewriting the AOF file even if the percentage increase
# is reached but it is still pretty small.
#
# Specify a percentage of zero in order to disable the automatic AOF
# rewrite feature.
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
# An AOF file may be found to be truncated at the end during the Redis
# startup process, when the AOF data gets loaded back into memory.
# This may happen when the system where Redis is running
# crashes, especially when an ext4 filesystem is mounted without the
# data=ordered option (however this can't happen when Redis itself
# crashes or aborts but the operating system still works correctly).
#
# Redis can either exit with an error when this happens, or load as much
# data as possible (the default now) and start if the AOF file is found
# to be truncated at the end. The following option controls this behavior.
#
# If aof-load-truncated is set to yes, a truncated AOF file is loaded and
# the Redis server starts emitting a log to inform the user of the event.
# Otherwise if the option is set to no, the server aborts with an error
# and refuses to start. When the option is set to no, the user requires
# to fix the AOF file using the "redis-check-aof" utility before to restart
# the server.
#
# Note that if the AOF file will be found to be corrupted in the middle
# the server will still exit with an error. This option only applies when
# Redis will try to read more data from the AOF file but not enough bytes
# will be found.
aof-load-truncated yes
################################ LUA SCRIPTING ###############################
# Max execution time of a Lua script in milliseconds.
#
# If the maximum execution time is reached Redis will log that a script is
# still in execution after the maximum allowed time and will start to
# reply to queries with an error.
#
# When a long running script exceed the maximum execution time only the
# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
# used to stop a script that did not yet called write commands. The second
# is the only way to shut down the server in the case a write commands was
# already issue by the script but the user don't want to wait for the natural
# termination of the script.
#
# Set it to 0 or a negative value for unlimited execution without warnings.
lua-time-limit 5000
################################## SLOW LOG ###################################
# The Redis Slow Log is a system to log queries that exceeded a specified
# execution time. The execution time does not include the I/O operations
# like talking with the client, sending the reply and so forth,
# but just the time needed to actually execute the command (this is the only
# stage of command execution where the thread is blocked and can not serve
# other requests in the meantime).
#
# You can configure the slow log with two parameters: one tells Redis
# what is the execution time, in microseconds, to exceed in order for the
# command to get logged, and the other parameter is the length of the
# slow log. When a new command is logged the oldest one is removed from the
# queue of logged commands.
# The following time is expressed in microseconds, so 1000000 is equivalent
# to one second. Note that a negative number disables the slow log, while
# a value of zero forces the logging of every command.
slowlog-log-slower-than 10000
# There is no limit to this length. Just be aware that it will consume memory.
# You can reclaim memory used by the slow log with SLOWLOG RESET.
slowlog-max-len 128
################################ LATENCY MONITOR ##############################
# The Redis latency monitoring subsystem samples different operations
# at runtime in order to collect data related to possible sources of
# latency of a Redis instance.
#
# Via the LATENCY command this information is available to the user that can
# print graphs and obtain reports.
#
# The system only logs operations that were performed in a time equal or
# greater than the amount of milliseconds specified via the
# latency-monitor-threshold configuration directive. When its value is set
# to zero, the latency monitor is turned off.
#
# By default latency monitoring is disabled since it is mostly not needed
# if you don't have latency issues, and collecting data has a performance
# impact, that while very small, can be measured under big load. Latency
# monitoring can easily be enalbed at runtime using the command
# "CONFIG SET latency-monitor-threshold <milliseconds>" if needed.
latency-monitor-threshold 0
############################# Event notification ##############################
# Redis can notify Pub/Sub clients about events happening in the key space.
# This feature is documented at http://redis.io/topics/notifications
#
# For instance if keyspace events notification is enabled, and a client
# performs a DEL operation on key "foo" stored in the Database 0, two
# messages will be published via Pub/Sub:
#
# PUBLISH __keyspace@0__:foo del
# PUBLISH __keyevent@0__:del foo
#
# It is possible to select the events that Redis will notify among a set
# of classes. Every class is identified by a single character:
#
# K Keyspace events, published with __keyspace@<db>__ prefix.
# E Keyevent events, published with __keyevent@<db>__ prefix.
# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
# $ String commands
# l List commands
# s Set commands
# h Hash commands
# z Sorted set commands
# x Expired events (events generated every time a key expires)
# e Evicted events (events generated when a key is evicted for maxmemory)
# A Alias for g$lshzxe, so that the "AKE" string means all the events.
#
# The "notify-keyspace-events" takes as argument a string that is composed
# by zero or multiple characters. The empty string means that notifications
# are disabled at all.
#
# Example: to enable list and generic events, from the point of view of the
# event name, use:
#
# notify-keyspace-events Elg
#
# Example 2: to get the stream of the expired keys subscribing to channel
# name __keyevent@0__:expired use:
#
# notify-keyspace-events Ex
#
# By default all notifications are disabled because most users don't need
# this feature and the feature has some overhead. Note that if you don't
# specify at least one of K or E, no events will be delivered.
notify-keyspace-events ""
############################### ADVANCED CONFIG ###############################
# Hashes are encoded using a memory efficient data structure when they have a
# small number of entries, and the biggest entry does not exceed a given
# threshold. These thresholds can be configured using the following directives.
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
# Similarly to hashes, small lists are also encoded in a special way in order
# to save a lot of space. The special representation is only used when
# you are under the following limits:
list-max-ziplist-entries 512
list-max-ziplist-value 64
# Sets have a special encoding in just one case: when a set is composed
# of just strings that happens to be integers in radix 10 in the range
# of 64 bit signed integers.
# The following configuration setting sets the limit in the size of the
# set in order to use this special memory saving encoding.
set-max-intset-entries 512
# Similarly to hashes and lists, sorted sets are also specially encoded in
# order to save a lot of space. This encoding is only used when the length and
# elements of a sorted set are below the following limits:
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
# HyperLogLog sparse representation bytes limit. The limit includes the
# 16 bytes header. When an HyperLogLog using the sparse representation crosses
# this limit, it is converted into the dense representation.
#
# A value greater than 16000 is totally useless, since at that point the
# dense representation is more memory efficient.
#
# The suggested value is ~ 3000 in order to have the benefits of
# the space efficient encoding without slowing down too much PFADD,
# which is O(N) with the sparse encoding. The value can be raised to
# ~ 10000 when CPU is not a concern, but space is, and the data set is
# composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
hll-sparse-max-bytes 3000
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
# order to help rehashing the main Redis hash table (the one mapping top-level
# keys to values). The hash table implementation Redis uses (see dict.c)
# performs a lazy rehashing: the more operation you run into a hash table
# that is rehashing, the more rehashing "steps" are performed, so if the
# server is idle the rehashing is never complete and some more memory is used
# by the hash table.
#
# The default is to use this millisecond 10 times every second in order to
# active rehashing the main dictionaries, freeing memory when possible.
#
# If unsure:
# use "activerehashing no" if you have hard latency requirements and it is
# not a good thing in your environment that Redis can reply form time to time
# to queries with 2 milliseconds delay.
#
# use "activerehashing yes" if you don't have such hard requirements but
# want to free memory asap when possible.
activerehashing yes
# The client output buffer limits can be used to force disconnection of clients
# that are not reading data from the server fast enough for some reason (a
# common reason is that a Pub/Sub client can't consume messages as fast as the
# publisher can produce them).
#
# The limit can be set differently for the three different classes of clients:
#
# normal -> normal clients including MONITOR clients
# slave -> slave clients
# pubsub -> clients subscribed to at least one pubsub channel or pattern
#
# The syntax of every client-output-buffer-limit directive is the following:
#
# client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
#
# A client is immediately disconnected once the hard limit is reached, or if
# the soft limit is reached and remains reached for the specified number of
# seconds (continuously).
# So for instance if the hard limit is 32 megabytes and the soft limit is
# 16 megabytes / 10 seconds, the client will get disconnected immediately
# if the size of the output buffers reach 32 megabytes, but will also get
# disconnected if the client reaches 16 megabytes and continuously overcomes
# the limit for 10 seconds.
#
# By default normal clients are not limited because they don't receive data
# without asking (in a push way), but just after a request, so only
# asynchronous clients may create a scenario where data is requested faster
# than it can read.
#
# Instead there is a default limit for pubsub and slave clients, since
# subscribers and slaves receive data in a push fashion.
#
# Both the hard or the soft limit can be disabled by setting them to zero.
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
# Redis calls an internal function to perform many background tasks, like
# closing connections of clients in timeout, purging expired keys that are
# never requested, and so forth.
#
# Not all tasks are performed with the same frequency, but Redis checks for
# tasks to perform accordingly to the specified "hz" value.
#
# By default "hz" is set to 10. Raising the value will use more CPU when
# Redis is idle, but at the same time will make Redis more responsive when
# there are many keys expiring at the same time, and timeouts may be
# handled with more precision.
#
# The range is between 1 and 500, however a value over 100 is usually not
# a good idea. Most users should use the default of 10 and raise this up to
# 100 only in environments where very low latency is required.
hz 10
# When a child rewrites the AOF file, if the following option is enabled
# the file will be fsync-ed every 32 MB of data generated. This is useful
# in order to commit the file to the disk more incrementally and avoid
# big latency spikes.
aof-rewrite-incremental-fsync yes

66
setup/files/redis_init Normal file
View File

@@ -0,0 +1,66 @@
#!/bin/sh
EXEC=/usr/local/bin/redis-server
CLIEXEC=/usr/local/bin/redis-cli
PIDFILE=/var/run/redis_6379.pid
CONF="/etc/redis/6379.conf"
REDISPORT="6379"
###############
# SysV Init Information
# chkconfig: - 58 74
# description: redis_6379 is the redis daemon.
### BEGIN INIT INFO
# Provides: redis_6379
# Required-Start: $network $local_fs $remote_fs
# Required-Stop: $network $local_fs $remote_fs
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Should-Start: $syslog $named
# Should-Stop: $syslog $named
# Short-Description: start and stop redis_6379
# Description: Redis daemon
### END INIT INFO
case "$1" in
start)
if [ -f $PIDFILE ]
then
echo "$PIDFILE exists, process is already running or crashed"
else
echo "Starting Redis server..."
$EXEC $CONF
fi
;;
stop)
if [ ! -f $PIDFILE ]
then
echo "$PIDFILE does not exist, process is not running"
else
PID=$(cat $PIDFILE)
echo "Stopping ..."
$CLIEXEC -p $REDISPORT shutdown
while [ -x /proc/${PID} ]
do
echo "Waiting for Redis to shutdown ..."
sleep 1
done
echo "Redis stopped"
fi
;;
status)
if [ ! -f $PIDFILE ]
then
echo 'Redis is not running'
else
echo "Redis is running ($(<$PIDFILE))"
fi
;;
restart)
$0 stop
$0 start
;;
*)
echo "Please use start, stop, restart or status as first argument"
;;
esac

View File

@@ -0,0 +1,31 @@
[supervisord]
nodaemon=false
logfile=/opt/redash/logs/supervisord.log
pidfile=/opt/redash/supervisord/supervisord.pid
directory=/opt/redash/current
[inet_http_server]
port = 127.0.0.1:9001
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
[program:redash_server]
command=/opt/redash/current/bin/run gunicorn -b 127.0.0.1:5000 --name redash -w 4 redash.wsgi:app
process_name=redash_server
numprocs=1
priority=999
autostart=true
autorestart=true
stdout_logfile=/opt/redash/logs/api.log
stderr_logfile=/opt/redash/logs/api_error.log
[program:redash_celery]
command=/opt/redash/current/bin/run celery worker --app=redash.worker --beat -Qqueries,celery,scheduled_queries
process_name=redash_celery
numprocs=1
priority=999
autostart=true
autorestart=true
stdout_logfile=/opt/redash/logs/celery.log
stderr_logfile=/opt/redash/logs/celery_error.log

49
setup/packer.json Normal file
View File

@@ -0,0 +1,49 @@
{
"variables": {
"aws_access_key": "",
"aws_secret_key": "",
"redash_version": "0.4.0.b589",
"image_version": "040b589"
},
"builders": [
{
"name": "redash-us-east-1",
"type": "amazon-ebs",
"access_key": "{{user `aws_access_key`}}",
"secret_key": "{{user `aws_secret_key`}}",
"region": "us-east-1",
"source_ami": "ami-fe7cc796",
"instance_type": "t2.micro",
"ssh_username": "ubuntu",
"ami_name": "redash-{{user `image_version`}}-us-east-1"
},
{
"name": "redash-eu-west-1",
"type": "amazon-ebs",
"access_key": "{{user `aws_access_key`}}",
"secret_key": "{{user `aws_secret_key`}}",
"region": "eu-west-1",
"source_ami": "ami-d2ff50a5",
"instance_type": "t2.micro",
"ssh_username": "ubuntu",
"ami_name": "redash-{{user `image_version`}}-eu-west-1"
},
{
"type": "googlecompute",
"bucket_name": "redash-images",
"account_file": "account.json",
"client_secrets_file": "client_secret.json",
"project_id": "redash-bird-123",
"source_image": "debian-7-wheezy-v20141017",
"zone": "us-central1-a",
"image_name": "redash-{{user `image_version`}}"
}
],
"provisioners": [
{
"type": "shell",
"script": "bootstrap.sh",
"environment_vars": ["REDASH_VERSION={{user `redash_version`}}"]
}
]
}

View File

@@ -60,6 +60,7 @@ query_factory = ModelFactory(redash.models.Query,
query='SELECT 1',
ttl=-1,
user=user_factory.create,
is_archived=False,
data_source=data_source_factory.create)
query_result_factory = ModelFactory(redash.models.QueryResult,

File diff suppressed because one or more lines are too long

View File

@@ -1,52 +1,25 @@
from unittest import TestCase
from mock import patch
from flask_googleauth import ObjectDict
from tests import BaseTestCase
from redash.authentication import validate_email, create_and_login_user
from redash import settings, models
from redash import models
from redash.google_oauth import create_and_login_user
from tests.factories import user_factory
class TestEmailValidation(TestCase):
def test_accepts_address_with_correct_domain(self):
with patch.object(settings, 'GOOGLE_APPS_DOMAIN', 'example.com'):
self.assertTrue(validate_email('example@example.com'))
def test_accepts_address_from_exception_list(self):
with patch.multiple(settings, GOOGLE_APPS_DOMAIN='example.com', ALLOWED_EXTERNAL_USERS=['whatever@whatever.com']):
self.assertTrue(validate_email('whatever@whatever.com'))
def test_accept_any_address_when_domain_empty(self):
with patch.object(settings, 'GOOGLE_APPS_DOMAIN', None):
self.assertTrue(validate_email('whatever@whatever.com'))
def test_rejects_address_with_incorrect_domain(self):
with patch.object(settings, 'GOOGLE_APPS_DOMAIN', 'example.com'):
self.assertFalse(validate_email('whatever@whatever.com'))
class TestCreateAndLoginUser(BaseTestCase):
def test_logins_valid_user(self):
user = user_factory.create(email='test@example.com')
with patch.object(settings, 'GOOGLE_APPS_DOMAIN', 'example.com'), patch('redash.authentication.login_user') as login_user_mock:
create_and_login_user(None, user)
with patch('redash.google_oauth.login_user') as login_user_mock:
create_and_login_user(user.name, user.email)
login_user_mock.assert_called_once_with(user, remember=True)
def test_creates_vaild_new_user(self):
openid_user = ObjectDict({'email': 'test@example.com', 'name': 'Test User'})
email = 'test@example.com'
name = 'Test User'
with patch.multiple(settings, GOOGLE_APPS_DOMAIN='example.com'), \
patch('redash.authentication.login_user') as login_user_mock:
with patch('redash.google_oauth.login_user') as login_user_mock:
create_and_login_user(None, openid_user)
create_and_login_user(name, email)
self.assertTrue(login_user_mock.called)
user = models.User.get(models.User.email == openid_user.email)
def test_ignores_invliad_user(self):
user = ObjectDict({'email': 'test@whatever.com'})
with patch.object(settings, 'GOOGLE_APPS_DOMAIN', 'example.com'), patch('redash.authentication.login_user') as login_user_mock:
create_and_login_user(None, user)
self.assertFalse(login_user_mock.called)
user = models.User.get(models.User.email == email)

View File

@@ -380,7 +380,7 @@ class TestLogin(BaseTestCase):
with app.test_client() as c, patch.object(settings, 'PASSWORD_LOGIN_ENABLED', False):
rv = c.get('/login')
self.assertEquals(rv.status_code, 302)
self.assertTrue(rv.location.endswith(url_for('GoogleAuth.login')))
self.assertTrue(rv.location.endswith(url_for('google_oauth.authorize')))
def test_get_login_form(self):
with app.test_client() as c:

View File

@@ -26,9 +26,8 @@ class ImportTest(BaseTestCase):
self.assertEqual(dashboard.widgets.count(),
reduce(lambda s, row: s + len(row), self.dashboard['widgets'], 0))
self.assertEqual(models.Visualization.select().count(), dashboard.widgets.count())
self.assertEqual(models.Query.select().count(), dashboard.widgets.count()-1)
self.assertEqual(models.QueryResult.select().count(), dashboard.widgets.count()-1)
self.assertEqual(models.Visualization.select().count(), dashboard.widgets.count()-1)
self.assertEqual(models.Query.select().count(), dashboard.widgets.count()-2)
def test_imports_updates_existing_models(self):
importer = import_export.Importer(data_source=data_source_factory.create())

View File

@@ -1,7 +1,8 @@
import datetime
import json
from tests import BaseTestCase
from redash import models
from factories import dashboard_factory, query_factory, data_source_factory, query_result_factory
from factories import dashboard_factory, query_factory, data_source_factory, query_result_factory, user_factory, widget_factory
from redash.utils import gen_query_hash
@@ -29,6 +30,87 @@ class QueryTest(BaseTestCase):
self.assertNotEquals(old_hash, q.query_hash)
def test_search_finds_in_name(self):
q1 = query_factory.create(name="Testing search")
q2 = query_factory.create(name="Testing searching")
q3 = query_factory.create(name="Testing sea rch")
queries = models.Query.search("search")
self.assertIn(q1, queries)
self.assertIn(q2, queries)
self.assertNotIn(q3, queries)
def test_search_finds_in_description(self):
q1 = query_factory.create(description="Testing search")
q2 = query_factory.create(description="Testing searching")
q3 = query_factory.create(description="Testing sea rch")
queries = models.Query.search("search")
self.assertIn(q1, queries)
self.assertIn(q2, queries)
self.assertNotIn(q3, queries)
def test_search_by_id_returns_query(self):
q1 = query_factory.create(description="Testing search")
q2 = query_factory.create(description="Testing searching")
q3 = query_factory.create(description="Testing sea rch")
queries = models.Query.search(str(q3.id))
self.assertIn(q3, queries)
self.assertNotIn(q1, queries)
self.assertNotIn(q2, queries)
class QueryArchiveTest(BaseTestCase):
def setUp(self):
super(QueryArchiveTest, self).setUp()
def test_archive_query_sets_flag(self):
query = query_factory.create(ttl=1)
query.archive()
query = models.Query.get_by_id(query.id)
self.assertEquals(query.is_archived, True)
def test_archived_query_doesnt_return_in_all(self):
query = query_factory.create(ttl=1)
yesterday = datetime.datetime.now() - datetime.timedelta(days=1)
query_result = models.QueryResult.store_result(query.data_source.id, query.query_hash, query.query, "1",
123, yesterday)
query.latest_query_data = query_result
query.save()
self.assertIn(query, models.Query.all_queries())
self.assertIn(query, models.Query.outdated_queries())
query.archive()
self.assertNotIn(query, models.Query.all_queries())
self.assertNotIn(query, models.Query.outdated_queries())
def test_removes_associated_widgets_from_dashboards(self):
widget = widget_factory.create()
query = widget.visualization.query
query.archive()
self.assertRaises(models.Widget.DoesNotExist, models.Widget.get_by_id, widget.id)
def test_removes_scheduling(self):
query = query_factory.create(ttl=1)
query.archive()
query = models.Query.get_by_id(query.id)
self.assertEqual(-1, query.ttl)
class QueryResultTest(BaseTestCase):
def setUp(self):
@@ -93,6 +175,26 @@ class QueryResultTest(BaseTestCase):
self.assertEqual(found_query_result.id, qr.id)
class TestUnusedQueryResults(BaseTestCase):
def test_returns_only_unused_query_results(self):
two_weeks_ago = datetime.datetime.now() - datetime.timedelta(days=14)
qr = query_result_factory.create()
query = query_factory.create(latest_query_data=qr)
unused_qr = query_result_factory.create(retrieved_at=two_weeks_ago)
self.assertIn(unused_qr, models.QueryResult.unused())
self.assertNotIn(qr, models.QueryResult.unused())
def test_returns_only_over_a_week_old_results(self):
two_weeks_ago = datetime.datetime.now() - datetime.timedelta(days=14)
unused_qr = query_result_factory.create(retrieved_at=two_weeks_ago)
new_unused_qr = query_result_factory.create()
self.assertIn(unused_qr, models.QueryResult.unused())
self.assertNotIn(new_unused_qr, models.QueryResult.unused())
class TestQueryResultStoreResult(BaseTestCase):
def setUp(self):
super(TestQueryResultStoreResult, self).setUp()
@@ -148,4 +250,59 @@ class TestQueryResultStoreResult(BaseTestCase):
self.assertEqual(models.Query.get_by_id(query1.id)._data['latest_query_data'], query_result.id)
self.assertEqual(models.Query.get_by_id(query2.id)._data['latest_query_data'], query_result.id)
self.assertNotEqual(models.Query.get_by_id(query3.id)._data['latest_query_data'], query_result.id)
self.assertNotEqual(models.Query.get_by_id(query3.id)._data['latest_query_data'], query_result.id)
class TestEvents(BaseTestCase):
def raw_event(self):
timestamp = 1411778709.791
user = user_factory.create()
created_at = datetime.datetime.utcfromtimestamp(timestamp)
raw_event = {"action": "view",
"timestamp": timestamp,
"object_type": "dashboard",
"user_id": user.id,
"object_id": 1}
return raw_event, user, created_at
def test_records_event(self):
raw_event, user, created_at = self.raw_event()
event = models.Event.record(raw_event)
self.assertEqual(event.user, user)
self.assertEqual(event.action, "view")
self.assertEqual(event.object_type, "dashboard")
self.assertEqual(event.object_id, 1)
self.assertEqual(event.created_at, created_at)
def test_records_additional_properties(self):
raw_event, _, _ = self.raw_event()
additional_properties = {'test': 1, 'test2': 2, 'whatever': "abc"}
raw_event.update(additional_properties)
event = models.Event.record(raw_event)
self.assertDictEqual(json.loads(event.additional_properties), additional_properties)
class TestWidgetDeleteInstance(BaseTestCase):
def test_delete_removes_from_layout(self):
widget = widget_factory.create()
widget2 = widget_factory.create(dashboard=widget.dashboard)
widget.dashboard.layout = json.dumps([[widget.id, widget2.id]])
widget.dashboard.save()
widget.delete_instance()
self.assertEquals(json.dumps([[widget2.id]]), widget.dashboard.layout)
def test_delete_removes_empty_rows(self):
widget = widget_factory.create()
widget2 = widget_factory.create(dashboard=widget.dashboard)
widget.dashboard.layout = json.dumps([[widget.id, widget2.id]])
widget.dashboard.save()
widget.delete_instance()
widget2.delete_instance()
self.assertEquals("[]", widget.dashboard.layout)