Compare commits

...

227 Commits

Author SHA1 Message Date
Arik Fraimovich
7db5449dad Merge pull request #387 from EverythingMe/feature/api_key_auth
Record event when accessing query result from API
2015-03-12 11:46:35 +02:00
Arik Fraimovich
7f6c7f0634 Record event when accessing query result from API 2015-03-12 11:43:21 +02:00
Arik Fraimovich
73955c74f7 Merge pull request #386 from EverythingMe/feature/api_key_auth
Code cleanup (remove "worker's status" dead link & unused settings)
2015-03-11 11:30:15 +02:00
Arik Fraimovich
7de85da8ef Remove unused settings 2015-03-11 07:50:49 +02:00
Arik Fraimovich
0aab35252a Remove broken "Worker's Status" page 2015-03-11 07:47:10 +02:00
Arik Fraimovich
141dbc9e70 Merge pull request #385 from EverythingMe/feature/api_key_auth
Feature: optional API Key authentication instead of HMAC
2015-03-10 18:29:01 +02:00
Arik Fraimovich
2e513c347c Cleanup 2015-03-10 18:21:51 +02:00
Arik Fraimovich
335c136ec2 Show API Key button in query view 2015-03-10 18:08:02 +02:00
Arik Fraimovich
df1170eb9b Feature: optional api key only authentication 2015-03-10 17:51:17 +02:00
Arik Fraimovich
69bcaddbe0 Fix: migrations stopped working due to peewee upgrade 2015-03-09 16:55:55 +02:00
Arik Fraimovich
67958cc27b MySQL query runner: make configuration access safer 2015-03-09 10:16:06 +02:00
Arik Fraimovich
6c716f23d9 Fix migration & query runner for mysql 2015-03-09 08:58:03 +02:00
Arik Fraimovich
bea11b0ac2 Merge pull request #384 from EverythingMe/feature/python_query_runner
Experimental Python query runner
2015-03-08 15:03:59 +02:00
Arik Fraimovich
4927386299 Experimental Python query runner 2015-03-08 15:02:57 +02:00
Arik Fraimovich
30a8550f6b Merge pull request #383 from EverythingMe/fix/migration
Fix: make migration work with new peewee
2015-03-08 14:37:42 +02:00
Arik Fraimovich
0389a45be4 Fix: make migration work with new peewee 2015-03-08 13:28:18 +02:00
Arik Fraimovich
707c169867 Merge pull request #382 from EverythingMe/feature/datasources_v2
Fix: import should be global
2015-03-08 12:27:34 +02:00
Arik Fraimovich
fca034ac0d Fix: import should be global 2015-03-08 12:23:51 +02:00
Arik Fraimovich
97691ea5ee Merge pull request #380 from EverythingMe/feature/datasources_v2
Refactor datasources (query runners)
2015-03-08 11:50:09 +02:00
Arik Fraimovich
40335a0e21 Fix: add missing option flags 2015-03-08 11:00:56 +02:00
Arik Fraimovich
9344cbd078 Update bootstrap script to support new format 2015-03-08 10:38:50 +02:00
Arik Fraimovich
9442fd9465 Update logging messages 2015-03-02 09:49:17 +02:00
Arik Fraimovich
c816f1003d Bump version 2015-03-02 09:45:29 +02:00
Arik Fraimovich
2107b79a80 Use validation for data source editing 2015-03-02 09:44:55 +02:00
Arik Fraimovich
8fae6de8c7 Update datasource CLI to use new format 2015-03-02 09:40:15 +02:00
Arik Fraimovich
d798c77574 Support for already valid data source config 2015-03-02 07:34:06 +02:00
Arik Fraimovich
0abce27381 Set configuration in base ctor 2015-02-24 07:50:10 +02:00
Arik Fraimovich
8a171ba39a Use JSON Schema for data source configuration 2015-02-24 07:50:10 +02:00
Arik Fraimovich
20af276772 Updated configuration spec to include friendly name and more 2015-02-24 07:50:10 +02:00
Arik Fraimovich
4058342763 WIP: configuration object 2015-02-24 07:50:10 +02:00
Arik Fraimovich
af64657260 Migration to update all data source options 2015-02-24 07:50:09 +02:00
Arik Fraimovich
b6bd46e59e New query runners implementation 2015-02-24 07:50:09 +02:00
Arik Fraimovich
31fe547e03 Merge pull request #378 from EverythingMe/feature/variables
Fix #263: timestamp fields should be with time zone
2015-02-23 11:10:20 +02:00
Arik Fraimovich
aff324071e Update peewee version 2015-02-23 09:19:39 +02:00
Arik Fraimovich
131266e408 Fix #263: timestamp fields should be with time zone 2015-02-23 09:02:16 +02:00
Arik Fraimovich
b1f97e8c8d Merge pull request #377 from olgakogan/master
'Download Dataset' fix - error in case of big numeric values
2015-02-21 15:21:18 +02:00
Arik Fraimovich
9783d6e839 Merge pull request #374 from akariv/master
Support unicode queries in search API
2015-02-21 14:48:36 +02:00
akariv
8eea2fb367 Support unicode queries in search API
Modify query test case to use unicode strings
2015-02-20 23:49:37 +02:00
olgakogan
b585480c81 removed redundant handling of large numbers when generating a csv file (causes ValueError: timestamp out of range) 2015-02-20 22:33:02 +02:00
Arik Fraimovich
89e307daba Merge pull request #373 from EverythingMe/feature/variables
UI Fixes
2015-02-08 18:18:37 +02:00
Arik Fraimovich
a5eb0e293c Fix: don't lock query editing while executing 2015-02-08 18:17:08 +02:00
Arik Fraimovich
48d1113225 Fix #371: show notification when query fails. 2015-02-08 18:08:24 +02:00
Arik Fraimovich
d82d5c3bdc Merge pull request #372 from EverythingMe/feature/variables
Several UI fixes
2015-02-08 18:05:05 +02:00
Arik Fraimovich
dfe58b3953 Give the user the option to disable sorting of chart data 2015-02-08 18:02:36 +02:00
Arik Fraimovich
44019b8357 Variables: allow nesting variables 2015-02-08 17:07:20 +02:00
Arik Fraimovich
3c15a44faf Fix: keyboard shortcuts were not unbinded 2015-02-08 17:07:06 +02:00
Arik Fraimovich
8d113dadd2 Revert "Fix #242: handle the case there is no connection to the server"
This reverts commit 3960005002.

Conflicts:
	rd_ui/app/index.html
	rd_ui/bower.json
2015-02-02 18:02:42 +02:00
Arik Fraimovich
c1dd26aee7 Merge pull request #370 from alexanderlz/master
add ISO datetime to filename when saving chart as image
2015-02-02 10:52:00 +02:00
Alexander Leibzon
b2228c2a39 replace 'possibly dangerous for some OSs' characters 2015-02-01 15:29:46 +02:00
Alexander Leibzon
d9618cb09c add ISO datetime to filename when saving chart as image 2015-02-01 14:52:58 +02:00
Arik Fraimovich
c8ca683d3a Merge pull request #368 from alexanderlz/master
Issue #168. Visualization: save as image.
2015-02-01 13:22:40 +02:00
Alexander Leibzon
888963ffaa Merge branch 'master' of https://github.com/alexanderlz/redash
Conflicts:
	rd_ui/app/index.html
	rd_ui/bower.json
2015-02-01 13:20:26 +02:00
Alexander Leibzon
ae947a8310 removing unwanted commit 2015-02-01 13:18:57 +02:00
Alexander Leibzon
bee9cde347 removing unwanted commit 2015-02-01 12:00:23 +02:00
Arik Fraimovich
c131dab125 Merge pull request #369 from EverythingMe/fix/dashboard_filters
Fix: filters got linked when they shouldn't have.
2015-02-01 11:57:46 +02:00
Arik Fraimovich
e113642ae4 Fix: filters got linked when they shouldn't have.
- Make a copy of the first filter, to prevent it controlling the other filters.
- If no query string value given or dashboard filters enabled, don't link filters.
2015-02-01 11:51:07 +02:00
Arik Fraimovich
b76906b168 Merge pull request #367 from EverythingMe/feature/offline
Fix #242: handle the case there is no connection to the server
2015-01-29 20:46:32 +02:00
Arik Fraimovich
3960005002 Fix #242: handle the case there is no connection to the server 2015-01-29 20:43:03 +02:00
Arik Fraimovich
3dde578b86 Only try to render params if they are required. 2015-01-29 15:33:08 +02:00
Arik Fraimovich
813f0e74ff Merge pull request #366 from EverythingMe/fix/mget_error
Several chart editor fixes and additions
2015-01-27 22:49:41 +02:00
Arik Fraimovich
1e4e37c2ce Ability to set y axis min/max (closes #257) 2015-01-27 22:45:16 +02:00
Arik Fraimovich
a00c80eab2 Don't change zIndex if it was already set 2015-01-27 22:30:26 +02:00
Arik Fraimovich
496e5ebe8c Fix: if new series was created in result set, it wasn't using the default chart type 2015-01-27 22:23:10 +02:00
Arik Fraimovich
18cc8434a0 Merge pull request #365 from EverythingMe/fix/mget_error
Fix: when no queries are being run, cleanup job fails with error
2015-01-27 18:39:26 +02:00
Arik Fraimovich
5eba318019 Fix: when no queries are being run, cleanup job fails with error 2015-01-27 18:23:21 +02:00
Arik Fraimovich
63274dbb17 Merge pull request #363 from EverythingMe/feature/query_parameters
Feature: support for query parameters
2015-01-27 18:22:29 +02:00
Arik Fraimovich
4c73e788ae Ability to set ttl (max age) from query string 2015-01-27 17:17:58 +02:00
Arik Fraimovich
b71a2b3651 Enable query params in dashboard 2015-01-27 17:00:21 +02:00
Arik Fraimovich
521a32dfff Merge pull request #364 from EverythingMe/fix/missing_text_widgets
Remove unneeded where clause which was preventing from text widgets to show.
2015-01-27 12:59:58 +02:00
Arik Fraimovich
fd6ebe6e12 Remove unneeded where clause which was preventing from text widgets to show. 2015-01-27 12:52:17 +02:00
Arik Fraimovich
6fb97675ad Add mustache to Karma conf 2015-01-27 11:28:51 +02:00
Arik Fraimovich
c0c102207d Initial work on support for query parameters 2015-01-27 10:28:11 +02:00
Arik Fraimovich
3b9d9ac75d Merge pull request #362 from joeysim/ctrl-s-for-save
added support for saving query with ctrl+s
2015-01-26 07:31:53 +02:00
Joey Simhon
2536fd57ed added support for saving query with cmd+s 2015-01-25 22:52:31 +02:00
Arik Fraimovich
d941e5e5b1 Merge pull request #361 from EverythingMe/DAT-825
Fix: fail with 403 when user not allowed to archive query.
2015-01-25 17:44:58 +02:00
Arik Fraimovich
039b0a89bb Merge pull request #359 from alexanderlz/master
add 'autoclose' to notifications (i.e. close after 3 seconds)
2015-01-25 17:35:53 +02:00
Arik Fraimovich
febf9939c8 Fix: fail with 403 when user not allowed to archive query. 2015-01-25 17:30:10 +02:00
Arik Fraimovich
bb84c6dab8 Merge pull request #360 from EverythingMe/DAT-825
Fix Gruntfile.js settings to copy font files.
2015-01-25 17:29:57 +02:00
Arik Fraimovich
cddc00e2cc Fix Gruntfile.js settings to copy font files. 2015-01-25 17:28:41 +02:00
Alexander Leibzon
091e3d41e1 add 'autoclose' to notifications (i.e. close after 3 seconds) 2015-01-25 17:14:06 +02:00
Arik Fraimovich
9dc3a35c1a Merge pull request #357 from alexanderlz/master
Bug #303: 'Show Total' duplicates values on multiple runs
2015-01-25 16:27:24 +02:00
Arik Fraimovich
f8878d3006 Merge pull request #358 from EverythingMe/DAT-825
Feature: archive query
2015-01-25 16:24:32 +02:00
Arik Fraimovich
1c0d596f26 Bump version due to migration. 2015-01-25 16:23:58 +02:00
Arik Fraimovich
1afd2ab388 Refactoring of @christophervalles work on query delete feature:
- Change delete into archive.
- Safely remove widgets.
- Make sure archived queries don't get scheduled, or show up in search.
- If direct link to query used, show notification.
- Tests.
- Some more.
2015-01-25 16:17:52 +02:00
Christopher Valles
4aa9500402 Working on delete query 2015-01-25 16:16:31 +02:00
Alexander Leibzon
4a8a4482fc add {} for readability 2015-01-25 15:29:45 +02:00
Alexander Leibzon
d83849a1b5 fix to apply with the original logic 2015-01-25 14:44:02 +02:00
Alexander Leibzon
44272f5d66 Bug #303: 'Show Total' duplicates values on multiple runs 2015-01-24 22:02:13 +02:00
Arik Fraimovich
83727ae931 Merge pull request #356 from alexanderlz/master
Bug #307 Fix: Notifications stopped working
2015-01-22 12:01:30 +02:00
Alexander Leibzon
0b0b88a255 remove unused line 2015-01-22 00:27:01 +02:00
Alexander Leibzon
f23d709f4e Bug #307 fix. Notifications stopped working 2015-01-22 00:16:31 +02:00
Arik Fraimovich
88abbc7ea6 Merge pull request #355 from EverythingMe/feature/personal_home
Several small fixes
2015-01-20 16:36:32 +02:00
Arik Fraimovich
16f0413af8 Fix: don't show dashboard filters where it's not enabled 2015-01-20 16:35:55 +02:00
Arik Fraimovich
f47020a64d Report personal page as different page 2015-01-20 16:32:31 +02:00
Arik Fraimovich
55e1ef81f7 Add activity_log to list of tables redash_reader can query 2015-01-20 16:32:31 +02:00
Arik Fraimovich
6bb43d0411 Merge pull request #354 from EverythingMe/feature/personal_home
Feature: personal home with recent queries & dashboards
2015-01-19 12:15:13 +02:00
Arik Fraimovich
f51c2328c9 Feature: personal home with recent queries & dashboards 2015-01-19 12:09:06 +02:00
Arik Fraimovich
fd37188ace Merge pull request #353 from EverythingMe/bug/dashboard_auto_refresh
Fix: nulls converted to strings in UI
2015-01-19 10:51:44 +02:00
Arik Fraimovich
758e27ce91 Fix: nulls converted to strings in UI 2015-01-19 10:49:39 +02:00
Arik Fraimovich
9a3b25eb50 Merge pull request #352 from EverythingMe/bug/dashboard_auto_refresh
Fix: dashboard auto refresh stopped working
2015-01-19 09:14:32 +02:00
Arik Fraimovich
6da890dfb8 FIX: dashboard auto refresh stopped working 2015-01-19 08:32:40 +02:00
Arik Fraimovich
0d35ec7139 Merge pull request #349 from erans/master
Minor bug fixes + supprot for limit and skip in simple query
2015-01-18 10:11:27 +02:00
Arik Fraimovich
dc0f9a63cb Merge pull request #351 from joeysim/search_improvements
Search improvements
2015-01-18 09:22:41 +02:00
Arik Fraimovich
21c042996e Merge pull request #350 from joeysim/ctrl_enter_exec
Added support for Cmd+Enter query execution for PCs
2015-01-18 09:21:32 +02:00
Joey Simhon
5f22adadf2 ordering all_queries by created_at desc for better relevancy with big lists 2015-01-17 21:19:22 +02:00
Joey Simhon
4e8888ce2f sort searched queries by creation time, assuming the newer queries are usually more relevant 2015-01-17 21:14:56 +02:00
Joey Simhon
0a69609d38 Added support for Cmd+Enter query execution for PCs 2015-01-17 00:32:21 +02:00
Eran Sandler
2dbcd88313 added support for skip and limit 2015-01-15 17:14:48 +02:00
Eran Sandler
6b0775f7c7 fixed an issue where 'query' element is missing as well as a bad sort order in simple queries 2015-01-15 17:11:32 +02:00
Arik Fraimovich
e85d3c3c9f Merge pull request #348 from EverythingMe/feature/additional_manage_commands
Feature: new data source management commands in manage.py
2015-01-14 12:35:13 +02:00
Arik Fraimovich
e20f57bba8 Added edit & delete commands to data source cli 2015-01-14 12:23:53 +02:00
Arik Fraimovich
933ace2e38 Split CLI commands to several files for easier editing and naming. 2015-01-14 10:52:11 +02:00
Arik Fraimovich
4c1e5aed6b Remove import from settings command (obsolete). 2015-01-14 10:27:53 +02:00
Arik Fraimovich
77d982b4aa Merge pull request #347 from barnash/query-params-for-filters
Query params for filters
2015-01-13 22:35:32 +02:00
barnash
02c8163265 Changed the query param to something more url friendly 2015-01-12 18:56:44 +02:00
Arik Fraimovich
ef868dbb6e Merge pull request #346 from erans/master
Initial support for Mongo's aggregation framework.
2015-01-12 18:17:41 +02:00
Iftach Bar
b2bab33baa added support for deep links to dashboards with saved filters 2015-01-12 09:23:27 +02:00
Iftach Bar
149e0835f8 fixed jshint stuff - semicolon in different places 2015-01-12 09:22:53 +02:00
Eran Sandler
50bed1d8f2 Initial support for Mongo's aggregation framework. 2015-01-11 12:37:37 +02:00
Eran Sandler
d4b5d78743 Perform a JSON.stringify on values who's type is "object" 2015-01-11 12:28:21 +02:00
Arik Fraimovich
7fc82a2562 Merge pull request #345 from EverythingMe/vagrant_dev
Developer Vagrant box for easier contribution
2014-12-30 07:52:07 +02:00
Arik Fraimovich
92fb138c2c Vagrant file to use the redash/dev box 2014-12-30 07:45:30 +02:00
Arik Fraimovich
71b4b45a3c Merge pull request #344 from EverythingMe/feature/query_results_cleanup
Job to cleanup unused query results
2014-12-25 15:58:10 +02:00
Arik Fraimovich
07f4a1b227 Fix: wiredep failing after version upgrade 2014-12-25 15:52:52 +02:00
Arik Fraimovich
e116e88e98 Job to cleanup unused query results 2014-12-25 15:39:49 +02:00
Arik Fraimovich
2278a181ca Merge pull request #339 from EverythingMe/counter-vis
bugfix: Counter visualization font size issues
2014-11-11 18:21:29 +02:00
Amir Nissim
98dc75a404 bugfix: Counter visualization was not watching for filter changes 2014-11-11 13:04:45 +02:00
Amir Nissim
536918aab3 bugfix: Counter visualization font size issues 2014-11-10 15:21:03 +02:00
Arik Fraimovich
c75ac80c7a Merge pull request #333 from EverythingMe/fix/import
Fix: mixed number columns was wrongly detected as integer
2014-11-05 11:33:46 +02:00
Arik Fraimovich
522d8542e9 Fix: mixed number columns was wrongly detected as integer 2014-11-05 11:30:17 +02:00
Arik Fraimovich
562df44c22 Merge pull request #331 from EverythingMe/fix/import
Fixes and improvements to import dashboard command:
2014-11-04 07:34:59 +02:00
Arik Fraimovich
86e6798c96 manage.py: better output for list data sources command 2014-11-04 07:26:32 +02:00
Arik Fraimovich
db7a287e82 manage.py: list all users command 2014-11-04 07:26:16 +02:00
Arik Fraimovich
518206f208 Fixes and imporvements to import dashboard:
- Update it to not expect query result.
- Add support for specifying data source.
- Create mapping file if it doesn't exist yet.
2014-11-04 07:24:51 +02:00
Arik Fraimovich
bcee1e12b4 Merge pull request #325 from EverythingMe/feature/search-by-id
Add support for searching for query by id
2014-10-30 08:23:41 +02:00
Arik Fraimovich
410f4f35e2 Add support for searching for query by id 2014-10-30 07:58:53 +02:00
Arik Fraimovich
84ea9fec43 Merge pull request #323 from EverythingMe/counter-vis
Counter visualization
2014-10-27 13:58:58 +02:00
Amir Nissim
cda82b7adc #27: use <select> for columns names 2014-10-27 11:47:38 +02:00
Amir Nissim
f2d8c2020b #27: counter and target as query params, change UI 2014-10-27 11:34:56 +02:00
Amir Nissim
1b82ecbc46 #27: Counter visualization draft 2014-10-26 15:42:57 +02:00
Arik Fraimovich
e381331c36 Merge pull request #319 from EverythingMe/bug_292
#292: Customizable series colors
2014-10-23 14:44:02 +03:00
Amir Nissim
ff58247987 #292: move color palette to ng_highcharts 2014-10-23 14:36:30 +03:00
Amir Nissim
dcf0d2cbe3 #292: Customizable series colors 2014-10-23 13:46:43 +03:00
Arik Fraimovich
eb99fa5671 Merge pull request #318 from EverythingMe/docs_setup
Packer: make re:dash version configurable
2014-10-22 12:01:32 +03:00
Arik Fraimovich
ce3e19f212 Make redash version configurable 2014-10-22 11:55:17 +03:00
Arik Fraimovich
44dca6da01 Spelling mistakes. 2014-10-21 19:02:17 +03:00
Arik Fraimovich
34c9fee540 Link to new setup instructions. 2014-10-21 19:01:40 +03:00
Arik Fraimovich
e0b13b2ffa Merge pull request #316 from EverythingMe/feature_users_cli
Add commands to change user's password and grant admin
2014-10-21 18:57:40 +03:00
Arik Fraimovich
df362c12b6 Add commands to change user password and grant admin 2014-10-21 18:51:23 +03:00
Arik Fraimovich
0d1f8c948a Merge pull request #309 from EverythingMe/docs_setup
Setup script for Ubuntu/Debian + packer configuration
2014-10-21 18:42:26 +03:00
Arik Fraimovich
f523378326 Setup script for Ubuntu/Debian + packer configuration
This script is intended to work on Ubuntu 12.04, Ubuntu 14.04 and Debian Wheezy (for GCE users).
To make sure we use the same version of Redis across all distributions we install from source,
and to make sure we use the same version of PostgreSQL we install it from PostgreSQL's apt.

Also included Packer configuration to generate GCE & AWS images.
2014-10-21 18:28:39 +03:00
Arik Fraimovich
b0f9e49709 Merge pull request #313 from erans/master
Forced setting a script execution path
2014-10-21 14:32:03 +03:00
Eran Sandler
b6dbb4e3f8 forced setting a script execution path 2014-10-21 11:20:31 +03:00
Arik Fraimovich
3f6a0e8ffa Merge pull request #312 from erans/master
MongoDB ReplicaSet support and a new connection string format.
2014-10-21 10:21:49 +03:00
Eran Sandler
a7bcc6d31e Added support for MongoDB ReplicaSet as well as changed the connection string format to a JSON based one (like BigQuery). Check the wiki for an example. 2014-10-21 10:16:48 +03:00
Arik Fraimovich
8aa2d8e70a landscape.io configuration file 2014-10-19 13:41:29 +03:00
Arik Fraimovich
4720e12be7 add angular-ui-select to list of dependencies 2014-10-15 17:56:32 +03:00
Arik Fraimovich
5463591f0d Merge branch 'feature/dashboard_add_query_by_name' 2014-10-15 17:45:57 +03:00
Arik Fraimovich
2a0198fba8 Make search expect at least 2 characters 2014-10-15 17:45:39 +03:00
Arik Fraimovich
652f214b25 Updated bower dependencies:
- Angular 1.2.7 -> 1.2.18 (to support angular-ui-select).
- angular-resource and angular-route to match Angular version.
- angular-growl to latest version that supports ~1.2.
- Change version of angular-ui-select to specific one.
2014-10-15 17:42:08 +03:00
Arik Fraimovich
aa49780134 Use unminified version of angular-ui-select 2014-10-15 17:41:55 +03:00
Raymond
f483b61cfb add global html sanitizer 2014-10-15 20:55:29 +08:00
Arik Fraimovich
38a189b671 Merge pull request #306 from raymoondtang/fix/clomun_type_ingeter
Client fix, clomun type support ingeter
2014-10-15 15:46:15 +03:00
Raymond
c2331988db use selected_query for ng-show of visualisation form 2014-10-15 20:32:15 +08:00
Raymond
eff5bdb454 Merge branch 'master' of github-yalo:EverythingMe/redash into fix/clomun_type_ingeter 2014-10-15 19:29:01 +08:00
Raymond
bd1babec3a Add query to dashboard based on name not query id, issue #171 2014-10-15 14:46:55 +08:00
Raymond
d43c2bbf62 table column type handle both integer and float 2014-10-13 12:57:42 +08:00
Arik Fraimovich
87db8099d6 Fix: need to group by runtime and retrieved_at 2014-10-06 09:53:02 +03:00
Arik Fraimovich
ebea118c7d Merge pull request #300 from EverythingMe/feature_google_oauth
Remove query stats (runtime, last retrieve) from search as it was too slow
2014-10-06 09:45:03 +03:00
Arik Fraimovich
297ac5c9bd Fix markdown filter (failing for undefined) 2014-10-06 09:41:56 +03:00
Arik Fraimovich
9b23fb4235 Remove query stats from search, as it was too slow 2014-10-06 09:41:40 +03:00
Arik Fraimovich
0a71f5e22d Merge pull request #298 from erans/master
Initial support for MongoDB.
2014-10-06 08:26:03 +03:00
Arik Fraimovich
0a8aaceb85 Merge pull request #299 from EverythingMe/feature_google_oauth
Show last execution time & runtime in search results + event tracking
2014-10-06 08:25:17 +03:00
Arik Fraimovich
00979f3ad7 Event tracking for search 2014-10-06 08:00:56 +03:00
Arik Fraimovich
c7b48837f2 Show last execution time & runtime in search results 2014-10-06 07:55:17 +03:00
Eran Sandler
418c5322c1 added extra error handling for invalid query and invalid database name 2014-10-02 12:42:46 +03:00
Arik Fraimovich
dc5b4c26a3 Updated README: link to new demo instance. 2014-10-02 07:57:52 +03:00
Eran Sandler
9ed0a5ba85 removed a debug message and change to a better error message when collection is not specified. 2014-09-30 18:43:40 +03:00
Eran Sandler
db0770fc17 Initial support for MongoDB.
Support simple queries using the a JSON format:
{
	"collection" : THE NAME OF THE COLLECTION TO QUERY,
	"query" : {
		A DICTIONARY FOR QUERYING FIELDS (similar to what you would find in PyMongo
	},
	"fields" : {
		LIST OF FIELDS TO RETURN IN THE SPECIFIED ORDER
	},
	"sort" : {
		LIST OF FIELDS TO SORT BY (1 - Ascending, -1 - descending)
	}
}

For example:
{
	"collection" : "mycoolcollection",
	"query" : {
		"fieldA" : { "$gte" : 5 },
		"created" : { "$lt" : "ISODate(\"2014-09-01 23:43\")" }
	},
	"fields" : {
		"fieldA" : 1,
		"created" : 2
	},
	"sort" : {
		"created" : -1
	}
}
2014-09-30 18:34:35 +03:00
Arik Fraimovich
9bb58e71d2 Merge pull request #296 from EverythingMe/feature_google_oauth
Feature: basic search page for queries
2014-09-30 08:43:16 +03:00
Arik Fraimovich
560598eaad Search UI. 2014-09-30 08:39:13 +03:00
Arik Fraimovich
f9144fc927 Naive search implementation. 2014-09-30 08:37:59 +03:00
Arik Fraimovich
883bf173c0 Merge pull request #295 from EverythingMe/feature_google_oauth
Feature: support markdown in query description (fixes #293)
2014-09-29 18:15:24 +03:00
Arik Fraimovich
3f2bb65b32 Show markdown in query view too 2014-09-29 18:10:17 +03:00
Arik Fraimovich
3917af019a Feature: support markdown in query description 2014-09-29 17:59:40 +03:00
Arik Fraimovich
e88837e835 Merge pull request #291 from EverythingMe/feature_google_oauth
Move event recording to Celery/database instead of log file
2014-09-27 17:45:55 +03:00
Arik Fraimovich
7abdc2543e update manage.py to use new Event.record method. 2014-09-27 17:45:04 +03:00
Arik Fraimovich
91ab90a6fe Move event recording to Celery/database instead of log file 2014-09-27 17:41:50 +03:00
Arik Fraimovich
7fd2bd3d24 Merge pull request #290 from EverythingMe/feature_google_oauth
Clearer google login button
2014-09-27 16:26:02 +03:00
Arik Fraimovich
3ed1ea1e33 Clearer google login button 2014-09-26 13:13:05 +03:00
Arik Fraimovich
a4486c56b9 Merge pull request #289 from EverythingMe/feature_google_oauth
Fix: add necessary scope to get user's name
2014-09-26 00:40:11 +03:00
Arik Fraimovich
3da0ecf36c Fix: add necessary scope to get user's name 2014-09-25 17:55:43 +03:00
Arik Fraimovich
11a1095b18 Merge pull request #284 from EverythingMe/feature_google_oauth
Feature: Google OAuth support (instead of deprecated OpenID)
2014-09-24 18:13:45 +03:00
Arik Fraimovich
b43485f322 Update tests 2014-09-21 10:11:03 +03:00
Arik Fraimovich
d83675326b Only enable google oauth if client id & secret provided 2014-09-21 09:07:52 +03:00
Arik Fraimovich
8d7b9a552e Google OAuth support (fixes #223) 2014-09-21 08:53:41 +03:00
Arik Fraimovich
e1eb75b786 Add to requirements flask-oauth and remove flask-googleopenid 2014-09-21 08:48:15 +03:00
Arik Fraimovich
34a3c9e91c Link to wiki in readme 2014-09-17 16:14:49 +03:00
Arik Fraimovich
e007a2891d Fix build status image in readme 2014-09-17 16:06:15 +03:00
Arik Fraimovich
febe6e4aa7 Update readme 2014-09-17 16:04:30 +03:00
Arik Fraimovich
8099dafc68 Merge pull request #283 from EverythingMe/fix_stuck_jobs
Update psycopg2 to 2.5.2.
2014-09-15 09:28:47 +03:00
Arik Fraimovich
ce3d5e637f Update psycopg2 to 2.5.2.
In 2.5.1 they had an issue, where OperationalError exception was causing SEGFAULT
when being pickled. This was crashing the Celery worker, causing the jobs to be lost.
2014-09-15 07:25:35 +03:00
Arik Fraimovich
4a52ccd4fa Gitter integration for CircleCI. 2014-09-14 18:23:02 +03:00
Arik Fraimovich
a0c81f8a31 Merge pull request #281 from EverythingMe/fix_stuck_jobs
Several fixes to reduce cases of stuck jobs
2014-09-11 07:50:35 +03:00
Arik Fraimovich
ce13b79bdc Use correct logging level 2014-09-11 07:47:30 +03:00
Arik Fraimovich
c580db277d Add cleanup_tasks job.
Enumerates all locks and removes those of non existing jobs. Useful
for case the worker is being cold restarted, and jobs are finished
properly.
2014-09-11 07:42:36 +03:00
Arik Fraimovich
5e944e9a8f If found lock is for a ready job, ignore it.
ready - revoked, finished or failed.
2014-09-11 07:41:43 +03:00
Arik Fraimovich
4b94cf706a Set default locks expiry time to 12 hours 2014-09-11 07:41:23 +03:00
Arik Fraimovich
364c51456d Set expiry time to locks, just in case for some reason they get stuck. 2014-09-11 07:40:20 +03:00
Arik Fraimovich
1274d36abc Merge pull request #280 from EverythingMe/fix_stuck_jobs
Fix #261: cancelling jobs sends them to limbo
2014-09-06 18:12:03 +03:00
Arik Fraimovich
f6bd562dd2 Remove cleanup_tasks, as it's not stable 2014-09-06 18:09:04 +03:00
Arik Fraimovich
065d2bc2c6 Schedule removal of dead tasks 2014-09-06 14:18:35 +03:00
Arik Fraimovich
653ed1c57a Add cleanup task to remove locks of dead jobs 2014-09-06 14:18:15 +03:00
Arik Fraimovich
7dc1176628 Fix #261: cancelling jobs sends them to limbo 2014-09-06 13:56:36 +03:00
Arik Fraimovich
365b8a8c93 Merge pull request #279 from EverythingMe/json-results
API - query results in JSON format. fixes #278
2014-09-03 12:07:36 +03:00
Arik Fraimovich
6e1e0a9967 Merge QueryResultAPI with CSVQueryResultAPI 2014-09-03 11:55:17 +03:00
Amir Nissim
170640a63f API - query results in JSON format. fixes #278 2014-09-02 17:52:04 +03:00
Arik Fraimovich
5e970b73d5 Merge pull request #270 from olgakogan/master
added handling for querying strings with non standard characters
2014-08-25 12:00:02 +03:00
olgakogan
a4643472a5 added handling for querying strings with non standard characters 2014-08-24 19:08:10 +03:00
Arik Fraimovich
7aa01f2bd2 Comment out filters url sync tests. 2014-08-20 09:07:08 +03:00
Arik Fraimovich
cb4b0e0296 Merge pull request #269 from EverythingMe/257-chart-editor
Disable filters url syncing
2014-08-20 08:59:22 +03:00
Arik Fraimovich
2c05e921c4 Disable filters url syncing 2014-08-20 08:58:56 +03:00
Arik Fraimovich
c4877f254e Merge pull request #268 from EverythingMe/257-chart-editor
[#257] chart editor: global series type
2014-08-19 19:51:57 +03:00
Arik Fraimovich
9fc59de35f remove throttling of redrawData 2014-08-19 18:37:32 +03:00
Amir Nissim
eb50f3fc94 [#257] chart editor: use globalSeriesType when creating new series 2014-08-19 14:44:53 +03:00
Arik Fraimovich
12fe59827f Merge pull request #267 from EverythingMe/257-chart-editor
[#257] chart editor: global series type
2014-08-19 14:04:44 +03:00
Amir Nissim
ba540ff380 [#257] chart editor: global series type 2014-08-19 13:14:24 +03:00
97 changed files with 4247 additions and 1203 deletions

2
.landscape.yaml Normal file
View File

@@ -0,0 +1,2 @@
ignore-paths:
- migrations

View File

@@ -1,2 +1,2 @@
web: ./manage.py runserver -p $PORT
web: ./manage.py runserver -p $PORT --host 0.0.0.0
worker: ./bin/run celery worker --app=redash.worker --beat -Qqueries,celery,scheduled_queries

View File

@@ -1,72 +1,46 @@
# [_re:dash_](https://github.com/everythingme/redash)
![Build Status](https://circleci.com/gh/EverythingMe/redash.png?circle-token=8a695aa5ec2cbfa89b48c275aea298318016f040 "Build Status")
<p align="center">
<img title="re:dash" src='https://raw.githubusercontent.com/EverythingMe/redash/screenshots/redash_logo.png' />
</p>
<p align="center">
<img title="Build Status" src='https://circleci.com/gh/EverythingMe/redash.png?circle-token=8a695aa5ec2cbfa89b48c275aea298318016f040'/>
</p>
**_re:dash_** is our take on freeing the data within our company in a way that will better fit our culture and usage patterns.
Prior to **_re:dash_**, we tried to use tranditional BI suites and discovered a set of bloated, technically challenged and slow tools/flows. What we were looking for was a more hacker'ish way to look at data, so we built one.
Prior to **_re:dash_**, we tried to use traditional BI suites and discovered a set of bloated, technically challenged and slow tools/flows. What we were looking for was a more hacker'ish way to look at data, so we built one.
**_re:dash_** was built to allow fast and easy access to billions of records, that we process and collect using Amazon Redshift ("petabyte scale data warehouse" that "speaks" PostgreSQL).
Today **_re:dash_** has support for querying multiple databases, including: Redshift, Google BigQuery, PostgreSQL, MySQL, Graphite and custom scripts.
**_re:dash_** consists of two parts:
1. **Query Editor**: think of [JS Fiddle](http://jsfiddle.net) for SQL queries. It's your way to share data in the organization in an open way, by sharing both the dataset and the query that generated it. This way everyone can peer review not only the resulting dataset but also the process that generated it. Also it's possible to fork it and generate new datasets and reach new insights.
2. **Dashboards/Visualizations**: once you have a dataset, you can create different visualizations out of it, and then combine several visualizations into a single dashboard. Currently it supports bar charts, pivot table and cohorts.
1. **Query Editor**: think of [JS Fiddle](http://jsfiddle.net) for SQL queries. It's your way to share data in the organization in an open way, by sharing both the dataset and the query that generated it. This way everyone can peer review not only the resulting dataset but also the process that generated it. Also it's possible to fork it and generate new datasets and reach new insights.
2. **Dashboards/Visualizations**: once you have a dataset, you can create different visualizations out of it, and then combine several visualizations into a single dashboard. Currently it supports charts, pivot table and cohorts.
This is the first release, which is more than usable but still has its rough edges and way to go to fulfill its full potential. The Query Editor part is quite solid, but the visualizations need more work to enrich them and to make them more user friendly.
**_re:dash_** is a work in progress and has its rough edges and way to go to fulfill its full potential. The Query Editor part is quite solid, but the visualizations need more work to enrich them and to make them more user friendly.
## Demo
![Screenshots](https://raw.github.com/EverythingMe/redash/screenshots/screenshots.gif)
You can try out the demo instance: http://rd-demo.herokuapp.com/ (login with any Google account).
You can try out the demo instance: http://demo.redash.io/ (login with any Google account).
## Getting Started
* [Setting up re:dash instance](https://github.com/EverythingMe/redash/wiki/Setting-up-re:dash-instance) (includes links to ready made AWS/GCE images).
* Additional documentation in the [Wiki](https://github.com/everythingme/redash/wiki).
Due to Heroku dev plan limits, it has a small database of flights (see schema [here](http://rd-demo.herokuapp.com/dashboard/schema)). Also due to another Heroku limitation, it is running with the regular user, hence you can DELETE or INSERT data/tables. Please be nice and don't do this.
## Getting help
* [Google Group (mailing list)](https://groups.google.com/forum/#!forum/redash-users): the best place to get updates about new releases or ask general questions.
* #redash IRC channel on [Freenode](http://www.freenode.net/).
## Technology
* Python
* [AngularJS](http://angularjs.org/)
* [PostgreSQL](http://www.postgresql.org/) / [AWS Redshift](http://aws.amazon.com/redshift/)
* [Redis](http://redis.io)
PostgreSQL is used both as the operatinal database for the system, but also as the data store that is being queried. To be exact, we built this system to use on top of Amazon's Redshift, which supports the PG driver. But it's quite simple to add support for other datastores, and we do plan to do so.
This is our first large scale AngularJS project, and we learned a lot during the development of it. There are still things we need to iron out, and comments on the way we use AngularJS are more than welcome (and pull requests just as well).
### HighCharts
HighCharts is really great, but it's not free for commercial use. Please refer to their [licensing options](http://shop.highsoft.com/highcharts.html), to see what applies for your use.
It's very likely that in the future we will switch to [D3.js](http://d3js.org/) instead.
## Getting Started
* [Setting up re:dash on Heroku in 5 minutes](https://github.com/EverythingMe/redash/wiki/Setting-up-re:dash-on-Heroku-in-5-minutes)
* [Setting re:dash on your own server (Ubuntu)](https://github.com/EverythingMe/redash/wiki/Setting-re:dash-on-your-own-server-(Ubuntu))
**Need help setting re:dash or one of the dependencies up?** Ping @arikfr on the IRC #redash channel or send a message to the [mailing list](https://groups.google.com/forum/#!forum/redash-users), and he will gladly help.
* Find us [on gitter](https://gitter.im/EverythingMe/redash#) (chat).
* Contact Arik, the maintainer directly: arik@everything.me.
## Roadmap
Below you can see the "big" features of the next 3 releases (for full list, click on the link):
### [v0.3](https://github.com/EverythingMe/redash/issues?milestone=2&state=open)
- Dashboard filters: ability to filter/slice the data you see in a single dashboard using filters (date or selectors).
- Multiple databases support (including other database type than PostgreSQL).
- Scheduled reports by email.
- Comments on queries.
### [v0.4](https://github.com/EverythingMe/redash/issues?milestone=3&state=open)
- Query versioning.
- More "realtime" UI (using websockets).
- More visualizations.
TBD.
## Reporting Bugs and Contributing Code

11
Vagrantfile vendored Normal file
View File

@@ -0,0 +1,11 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
VAGRANTFILE_API_VERSION = "2"
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.box = "redash/dev"
config.vm.synced_folder "./", "/opt/redash/current"
config.vm.network "forwarded_port", guest: 5000, host: 9001
end

View File

@@ -23,3 +23,6 @@ deployment:
branch: master
commands:
- make upload
notify:
webhooks:
- url: https://webhooks.gitter.im/e/895d09c3165a0913ac2f

142
manage.py
View File

@@ -2,17 +2,19 @@
"""
CLI to manage redash.
"""
import datetime
from flask.ext.script import Manager, prompt_pass
from flask.ext.script import Manager
from redash import settings, models, __version__
from redash.wsgi import app
from redash.import_export import import_manager
from redash.cli import users, database, data_sources
manager = Manager(app)
database_manager = Manager(help="Manages the database (create/drop tables).")
users_manager = Manager(help="Users management commands.")
data_sources_manager = Manager(help="Data sources management commands.")
manager.add_command("database", database.manager)
manager.add_command("users", users.manager)
manager.add_command("import", import_manager)
manager.add_command("ds", data_sources.manager)
@manager.command
def version():
@@ -22,7 +24,7 @@ def version():
@manager.command
def runworkers():
"""Prints deprecation warning."""
"""Start workers (deprecated)."""
print "** This command is deprecated. Please use Celery's CLI to control the workers. **"
@@ -31,8 +33,10 @@ def make_shell_context():
from redash.models import db
return dict(app=app, db=db, models=models)
@manager.command
def check_settings():
"""Show the settings as re:dash sees them (useful for debugging)."""
from types import ModuleType
for name in dir(settings):
@@ -40,130 +44,6 @@ def check_settings():
if not callable(item) and not name.startswith("__") and not isinstance(item, ModuleType):
print "{} = {}".format(name, item)
@manager.command
def import_events(events_file):
import json
from collections import Counter
count = Counter()
with open(events_file) as f:
for line in f:
try:
event = json.loads(line)
user = event.pop('user_id')
action = event.pop('action')
object_type = event.pop('object_type')
object_id = event.pop('object_id', None)
if object_id == 'dashboard' and object_type == 'dashboard':
count['bad dashboard id'] += 1
continue
created_at = datetime.datetime.utcfromtimestamp(event.pop('timestamp'))
additional_properties = json.dumps(event)
models.Event.create(user=user, action=action, object_type=object_type, object_id=object_id,
additional_properties=additional_properties, created_at=created_at)
count['imported'] += 1
except Exception as ex:
print "Failed importing line:"
print line
print ex.message
count[ex.message] += 1
count['failed'] += 1
models.db.close_db(None)
for k, v in count.iteritems():
print k
print v
@database_manager.command
def create_tables():
"""Creates the database tables."""
from redash.models import create_db, init_db
create_db(True, False)
init_db()
@database_manager.command
def drop_tables():
"""Drop the database tables."""
from redash.models import create_db
create_db(False, True)
@users_manager.option('email', help="User's email")
@users_manager.option('name', help="User's full name")
@users_manager.option('--admin', dest='is_admin', action="store_true", default=False, help="set user as admin")
@users_manager.option('--google', dest='google_auth', action="store_true", default=False, help="user uses Google Auth to login")
@users_manager.option('--password', dest='password', default=None, help="Password for users who don't use Google Auth (leave blank for prompt).")
@users_manager.option('--groups', dest='groups', default=models.User.DEFAULT_GROUPS, help="Comma seperated list of groups (leave blank for default).")
def create(email, name, groups, is_admin=False, google_auth=False, password=None):
print "Creating user (%s, %s)..." % (email, name)
print "Admin: %r" % is_admin
print "Login with Google Auth: %r\n" % google_auth
if isinstance(groups, basestring):
groups= groups.split(',')
groups.remove('') # in case it was empty string
if is_admin:
groups += ['admin']
user = models.User(email=email, name=name, groups=groups)
if not google_auth:
password = password or prompt_pass("Password")
user.hash_password(password)
try:
user.save()
except Exception, e:
print "Failed creating user: %s" % e.message
@users_manager.option('email', help="email address of user to delete")
def delete(email):
deleted_count = models.User.delete().where(models.User.email == email).execute()
print "Deleted %d users." % deleted_count
@data_sources_manager.command
def import_from_settings(name=None):
"""Import data source from settings (env variables)."""
name = name or "Default"
data_source = models.DataSource.create(name=name,
type=settings.CONNECTION_ADAPTER,
options=settings.CONNECTION_STRING)
print "Imported data source from settings (id={}).".format(data_source.id)
@data_sources_manager.command
def list():
"""List currently configured data sources"""
for ds in models.DataSource.select():
print "Name: {}\nType: {}\nOptions: {}".format(ds.name, ds.type, ds.options)
@data_sources_manager.command
def new(name, type, options):
"""Create new data source"""
# TODO: validate it's a valid type and in the future, validate the options.
print "Creating {} data source ({}) with options:\n{}".format(type, name, options)
data_source = models.DataSource.create(name=name,
type=type,
options=options)
print "Id: {}".format(data_source.id)
manager.add_command("database", database_manager)
manager.add_command("users", users_manager)
manager.add_command("import", import_manager)
manager.add_command("ds", data_sources_manager)
if __name__ == '__main__':
manager.run()
manager.run()

View File

@@ -0,0 +1,15 @@
from playhouse.migrate import PostgresqlMigrator, migrate
from redash.models import db
from redash import models
if __name__ == '__main__':
db.connect_db()
migrator = PostgresqlMigrator(db.database)
with db.database.transaction():
migrate(
migrator.add_column('queries', 'is_archived', models.Query.is_archived)
)
db.close_db(None)

View File

@@ -0,0 +1,21 @@
from redash.models import db
if __name__ == '__main__':
db.connect_db()
columns = (
('activity_log', 'created_at'),
('dashboards', 'created_at'),
('data_sources', 'created_at'),
('events', 'created_at'),
('groups', 'created_at'),
('queries', 'created_at'),
('widgets', 'created_at'),
('query_results', 'retrieved_at')
)
with db.database.transaction():
for column in columns:
db.database.execute_sql("ALTER TABLE {} ALTER COLUMN {} TYPE timestamp with time zone;".format(*column))
db.close_db(None)

View File

@@ -0,0 +1,73 @@
import json
from redash import query_runner
from redash.models import DataSource
def update(data_source):
print "[%s] Old options: %s" % (data_source.name, data_source.options)
if query_runner.validate_configuration(data_source.type, data_source.options):
print "[%s] configuration already valid. skipping." % data_source.name
return
if data_source.type == 'pg':
values = data_source.options.split(" ")
configuration = {}
for value in values:
k, v = value.split("=", 1)
configuration[k] = v
if k == 'port':
configuration[k] = int(v)
data_source.options = json.dumps(configuration)
elif data_source.type == 'mysql':
mapping = {
'Server': 'host',
'User': 'user',
'Pwd': 'passwd',
'Database': 'db'
}
values = data_source.options.split(";")
configuration = {}
for value in values:
k, v = value.split("=", 1)
configuration[mapping[k]] = v
data_source.options = json.dumps(configuration)
elif data_source.type == 'graphite':
old_config = json.loads(data_source.options)
configuration = {
"url": old_config["url"]
}
if "verify" in old_config:
configuration['verify'] = old_config['verify']
if "auth" in old_config:
configuration['username'], configuration['password'] = old_config["auth"]
data_source.options = json.dumps(configuration)
elif data_source.type == 'url':
data_source.options = json.dumps({"url": data_source.options})
elif data_source.type == 'script':
data_source.options = json.dumps({"path": data_source.options})
elif data_source.type == 'mongo':
data_source.type = 'mongodb'
else:
print "[%s] No need to convert type of: %s" % (data_source.name, data_source.type)
print "[%s] New options: %s" % (data_source.name, data_source.options)
data_source.save()
if __name__ == '__main__':
for data_source in DataSource.all():
update(data_source)

View File

@@ -0,0 +1,12 @@
from playhouse.migrate import PostgresqlMigrator, migrate
from redash.models import db
if __name__ == '__main__':
db.connect_db()
migrator = PostgresqlMigrator(db.database)
with db.database.transaction():
migrate(
migrator.drop_not_null('events', 'user_id')
)

View File

@@ -163,7 +163,6 @@ module.exports = function (grunt) {
// Automatically inject Bower components into the app
wiredep: {
options: {
cwd: '<%= yeoman.app %>'
},
app: {
src: ['<%= yeoman.app %>/index.html'],
@@ -321,7 +320,12 @@ module.exports = function (grunt) {
src: ['generated/*']
}, {
expand: true,
cwd: 'bower_components/bootstrap/dist',
cwd: '<%= yeoman.app %>/bower_components/bootstrap/dist',
src: 'fonts/*',
dest: '<%= yeoman.dist %>'
}, {
expand: true,
cwd: '<%= yeoman.app %>/bower_components/font-awesome',
src: 'fonts/*',
dest: '<%= yeoman.dist %>'
}]

View File

@@ -1,228 +0,0 @@
<?xml version="1.0" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd" >
<svg xmlns="http://www.w3.org/2000/svg">
<metadata></metadata>
<defs>
<font id="glyphicons_halflingsregular" horiz-adv-x="1200" >
<font-face units-per-em="1200" ascent="960" descent="-240" />
<missing-glyph horiz-adv-x="500" />
<glyph />
<glyph />
<glyph unicode=" " />
<glyph unicode="*" d="M1100 500h-259l183 -183l-141 -141l-183 183v-259h-200v259l-183 -183l-141 141l183 183h-259v200h259l-183 183l141 141l183 -183v259h200v-259l183 183l141 -141l-183 -183h259v-200z" />
<glyph unicode="+" d="M1100 400h-400v-400h-300v400h-400v300h400v400h300v-400h400v-300z" />
<glyph unicode="&#xa0;" />
<glyph unicode="&#x2000;" horiz-adv-x="652" />
<glyph unicode="&#x2001;" horiz-adv-x="1304" />
<glyph unicode="&#x2002;" horiz-adv-x="652" />
<glyph unicode="&#x2003;" horiz-adv-x="1304" />
<glyph unicode="&#x2004;" horiz-adv-x="434" />
<glyph unicode="&#x2005;" horiz-adv-x="326" />
<glyph unicode="&#x2006;" horiz-adv-x="217" />
<glyph unicode="&#x2007;" horiz-adv-x="217" />
<glyph unicode="&#x2008;" horiz-adv-x="163" />
<glyph unicode="&#x2009;" horiz-adv-x="260" />
<glyph unicode="&#x200a;" horiz-adv-x="72" />
<glyph unicode="&#x202f;" horiz-adv-x="260" />
<glyph unicode="&#x205f;" horiz-adv-x="326" />
<glyph unicode="&#x20ac;" d="M800 500h-300q9 -74 33 -132t52.5 -91t62 -54.5t59 -29t46.5 -7.5q29 0 66 13t75 37t63.5 67.5t25.5 96.5h174q-31 -172 -128 -278q-107 -117 -274 -117q-205 0 -324 158q-36 46 -69 131.5t-45 205.5h-217l100 100h113q0 47 5 100h-218l100 100h135q37 167 112 257 q117 141 297 141q242 0 354 -189q60 -103 66 -209h-181q0 55 -25.5 99t-63.5 68t-75 36.5t-67 12.5q-24 0 -52.5 -10t-62.5 -32t-65.5 -67t-50.5 -107h379l-100 -100h-300q-6 -46 -6 -100h406z" />
<glyph unicode="&#x2212;" d="M1100 700h-900v-300h900v300z" />
<glyph unicode="&#x2601;" d="M178 300h750q120 0 205 86t85 208q0 120 -85 206.5t-205 86.5q-46 0 -90 -14q-44 97 -134.5 156.5t-200.5 59.5q-152 0 -260 -107.5t-108 -260.5q0 -25 2 -37q-66 -14 -108.5 -67.5t-42.5 -122.5q0 -80 56.5 -137t135.5 -57z" />
<glyph unicode="&#x2709;" d="M1200 1100h-1200l600 -603zM300 600l-300 -300v600zM1200 900v-600l-300 300zM800 500l400 -400h-1200l400 400l200 -200z" />
<glyph unicode="&#x270f;" d="M1101 889l99 92q13 13 13 32.5t-13 33.5l-153 153q-15 13 -33 13t-33 -13l-94 -97zM401 189l614 614l-214 214l-614 -614zM-13 -13l333 112l-223 223z" />
<glyph unicode="&#xe000;" horiz-adv-x="500" d="M0 0z" />
<glyph unicode="&#xe001;" d="M700 100h300v-100h-800v100h300v550l-500 550h1200l-500 -550v-550z" />
<glyph unicode="&#xe002;" d="M1000 934v-521q-64 16 -138 -7q-79 -26 -122.5 -83t-25.5 -111q17 -55 85.5 -75.5t147.5 4.5q70 23 111.5 63.5t41.5 95.5v881q0 10 -7 15.5t-17 2.5l-752 -193q-10 -3 -17 -12.5t-7 -19.5v-689q-64 17 -138 -7q-79 -25 -122.5 -82t-25.5 -112t86 -75.5t147 5.5 q65 21 109 69t44 90v606z" />
<glyph unicode="&#xe003;" d="M913 432l300 -300q7 -8 7 -18t-7 -18l-109 -109q-8 -7 -18 -7t-18 7l-300 300q-119 -78 -261 -78q-200 0 -342 142t-142 342t142 342t342 142t342 -142t142 -342q0 -142 -78 -261zM176 693q0 -136 97 -233t234 -97t233.5 96.5t96.5 233.5t-96.5 233.5t-233.5 96.5 t-234 -97t-97 -233z" />
<glyph unicode="&#xe005;" d="M649 949q48 69 109.5 105t121.5 38t118.5 -20.5t102.5 -64t71 -100.5t27 -123q0 -57 -33.5 -117.5t-94 -124.5t-126.5 -127.5t-150 -152.5t-146 -174q-62 85 -145.5 174t-149.5 152.5t-126.5 127.5t-94 124.5t-33.5 117.5q0 64 28 123t73 100.5t104.5 64t119 20.5 t120 -38.5t104.5 -104.5z" />
<glyph unicode="&#xe006;" d="M791 522l145 -449l-384 275l-382 -275l146 447l-388 280h479l146 400h2l146 -400h472zM168 71l2 1z" />
<glyph unicode="&#xe007;" d="M791 522l145 -449l-384 275l-382 -275l146 447l-388 280h479l146 400h2l146 -400h472zM747 331l-74 229l193 140h-235l-77 211l-78 -211h-239l196 -142l-73 -226l192 140zM168 71l2 1z" />
<glyph unicode="&#xe008;" d="M1200 143v-143h-1200v143l400 257v100q-37 0 -68.5 74.5t-31.5 125.5v200q0 124 88 212t212 88t212 -88t88 -212v-200q0 -51 -31.5 -125.5t-68.5 -74.5v-100z" />
<glyph unicode="&#xe009;" d="M1200 1100v-1100h-1200v1100h1200zM200 1000h-100v-100h100v100zM900 1000h-600v-400h600v400zM1100 1000h-100v-100h100v100zM200 800h-100v-100h100v100zM1100 800h-100v-100h100v100zM200 600h-100v-100h100v100zM1100 600h-100v-100h100v100zM900 500h-600v-400h600 v400zM200 400h-100v-100h100v100zM1100 400h-100v-100h100v100zM200 200h-100v-100h100v100zM1100 200h-100v-100h100v100z" />
<glyph unicode="&#xe010;" d="M500 1050v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5h400q21 0 35.5 -14.5t14.5 -35.5zM1100 1050v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5h400 q21 0 35.5 -14.5t14.5 -35.5zM500 450v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5h400q21 0 35.5 -14.5t14.5 -35.5zM1100 450v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v400 q0 21 14.5 35.5t35.5 14.5h400q21 0 35.5 -14.5t14.5 -35.5z" />
<glyph unicode="&#xe011;" d="M300 1050v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5zM700 1050v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200 q21 0 35.5 -14.5t14.5 -35.5zM1100 1050v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5zM300 650v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200 q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5zM700 650v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5zM1100 650v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200 q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5zM300 250v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5zM700 250v-200 q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5zM1100 250v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5 t14.5 -35.5z" />
<glyph unicode="&#xe012;" d="M300 1050v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5zM1200 1050v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h700 q21 0 35.5 -14.5t14.5 -35.5zM300 450v200q0 21 -14.5 35.5t-35.5 14.5h-200q-21 0 -35.5 -14.5t-14.5 -35.5v-200q0 -21 14.5 -35.5t35.5 -14.5h200q21 0 35.5 14.5t14.5 35.5zM1200 650v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5v200 q0 21 14.5 35.5t35.5 14.5h700q21 0 35.5 -14.5t14.5 -35.5zM300 250v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5zM1200 250v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-700 q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h700q21 0 35.5 -14.5t14.5 -35.5z" />
<glyph unicode="&#xe013;" d="M448 34l818 820l-212 212l-607 -607l-206 207l-212 -212z" />
<glyph unicode="&#xe014;" d="M882 106l-282 282l-282 -282l-212 212l282 282l-282 282l212 212l282 -282l282 282l212 -212l-282 -282l282 -282z" />
<glyph unicode="&#xe015;" d="M913 432l300 -300q7 -8 7 -18t-7 -18l-109 -109q-8 -7 -18 -7t-18 7l-300 300q-119 -78 -261 -78q-200 0 -342 142t-142 342t142 342t342 142t342 -142t142 -342q0 -142 -78 -261zM507 363q137 0 233.5 96.5t96.5 233.5t-96.5 233.5t-233.5 96.5t-234 -97t-97 -233 t97 -233t234 -97zM600 800h100v-200h-100v-100h-200v100h-100v200h100v100h200v-100z" />
<glyph unicode="&#xe016;" d="M913 432l300 -299q7 -7 7 -18t-7 -18l-109 -109q-8 -8 -18 -8t-18 8l-300 299q-120 -77 -261 -77q-200 0 -342 142t-142 342t142 342t342 142t342 -142t142 -342q0 -141 -78 -262zM176 694q0 -136 97 -233t234 -97t233.5 97t96.5 233t-96.5 233t-233.5 97t-234 -97 t-97 -233zM300 801v-200h400v200h-400z" />
<glyph unicode="&#xe017;" d="M700 750v400q0 21 -14.5 35.5t-35.5 14.5h-100q-21 0 -35.5 -14.5t-14.5 -35.5v-400q0 -21 14.5 -35.5t35.5 -14.5h100q21 0 35.5 14.5t14.5 35.5zM800 975v166q167 -62 272 -210t105 -331q0 -118 -45.5 -224.5t-123 -184t-184 -123t-224.5 -45.5t-224.5 45.5t-184 123 t-123 184t-45.5 224.5q0 183 105 331t272 210v-166q-103 -55 -165 -155t-62 -220q0 -177 125 -302t302 -125t302 125t125 302q0 120 -62 220t-165 155z" />
<glyph unicode="&#xe018;" d="M1200 1h-200v1200h200v-1200zM900 1h-200v800h200v-800zM600 1h-200v500h200v-500zM300 301h-200v-300h200v300z" />
<glyph unicode="&#xe019;" d="M488 183l38 -151q40 -5 74 -5q27 0 74 5l38 151l6 2q46 13 93 39l5 3l134 -81q56 44 104 105l-80 134l3 5q24 44 39 93l1 6l152 38q5 40 5 74q0 28 -5 73l-152 38l-1 6q-16 51 -39 93l-3 5l80 134q-44 58 -104 105l-134 -81l-5 3q-45 25 -93 39l-6 1l-38 152q-40 5 -74 5 q-27 0 -74 -5l-38 -152l-5 -1q-50 -14 -94 -39l-5 -3l-133 81q-59 -47 -105 -105l80 -134l-3 -5q-25 -47 -38 -93l-2 -6l-151 -38q-6 -48 -6 -73q0 -33 6 -74l151 -38l2 -6q14 -49 38 -93l3 -5l-80 -134q45 -59 105 -105l133 81l5 -3q45 -26 94 -39zM600 815q89 0 152 -63 t63 -151q0 -89 -63 -152t-152 -63t-152 63t-63 152q0 88 63 151t152 63z" />
<glyph unicode="&#xe020;" d="M900 1100h275q10 0 17.5 -7.5t7.5 -17.5v-50q0 -11 -7 -18t-18 -7h-1050q-11 0 -18 7t-7 18v50q0 10 7.5 17.5t17.5 7.5h275v100q0 41 29.5 70.5t70.5 29.5h300q41 0 70.5 -29.5t29.5 -70.5v-100zM800 1100v100h-300v-100h300zM200 900h900v-800q0 -41 -29.5 -71 t-70.5 -30h-700q-41 0 -70.5 30t-29.5 71v800zM300 100h100v700h-100v-700zM500 100h100v700h-100v-700zM700 100h100v700h-100v-700zM900 100h100v700h-100v-700z" />
<glyph unicode="&#xe021;" d="M1301 601h-200v-600h-300v400h-300v-400h-300v600h-200l656 644z" />
<glyph unicode="&#xe022;" d="M600 700h400v-675q0 -11 -7 -18t-18 -7h-850q-11 0 -18 7t-7 18v1150q0 11 7 18t18 7h475v-500zM1000 800h-300v300z" />
<glyph unicode="&#xe023;" d="M600 1196q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM600 1014q-171 0 -292.5 -121.5t-121.5 -292.5t121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5zM600 600h200 v-100h-300v400h100v-300z" />
<glyph unicode="&#xe024;" d="M721 400h-242l-40 -400h-539l431 1200h209l-21 -300h162l-20 300h208l431 -1200h-538zM712 500l-27 300h-170l-27 -300h224z" />
<glyph unicode="&#xe025;" d="M1100 400v-400h-1100v400h490l-290 300h200v500h300v-500h200l-290 -300h490zM988 300h-175v-100h175v100z" />
<glyph unicode="&#xe026;" d="M600 1199q122 0 233 -47.5t191 -127.5t127.5 -191t47.5 -233t-47.5 -233t-127.5 -191t-191 -127.5t-233 -47.5t-233 47.5t-191 127.5t-127.5 191t-47.5 233t47.5 233t127.5 191t191 127.5t233 47.5zM600 1012q-170 0 -291 -121t-121 -291t121 -291t291 -121t291 121 t121 291t-121 291t-291 121zM700 600h150l-250 -300l-250 300h150v300h200v-300z" />
<glyph unicode="&#xe027;" d="M600 1196q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM600 1014q-171 0 -292.5 -121.5t-121.5 -292.5t121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5zM850 600h-150 v-300h-200v300h-150l250 300z" />
<glyph unicode="&#xe028;" d="M0 500l200 700h800q199 -700 200 -700v-475q0 -11 -7 -18t-18 -7h-1150q-11 0 -18 7t-7 18v475zM903 1000h-606l-97 -500h200l50 -200h300l50 200h200z" />
<glyph unicode="&#xe029;" d="M600 1196q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM600 1014q-171 0 -292.5 -121.5t-121.5 -292.5q0 -172 121.5 -293t292.5 -121t292.5 121t121.5 293q0 171 -121.5 292.5t-292.5 121.5zM797 598 l-297 -201v401z" />
<glyph unicode="&#xe030;" d="M1177 600h-150q0 -177 -125 -302t-302 -125t-302 125t-125 302t125 302t302 125q136 0 246 -81l-146 -146h400v400l-145 -145q-157 122 -355 122q-118 0 -224.5 -45.5t-184 -123t-123 -184t-45.5 -224.5t45.5 -224.5t123 -184t184 -123t224.5 -45.5t224.5 45.5t184 123 t123 184t45.5 224.5z" />
<glyph unicode="&#xe031;" d="M700 800l147 147q-112 80 -247 80q-177 0 -302 -125t-125 -302h-150q0 118 45.5 224.5t123 184t184 123t224.5 45.5q198 0 355 -122l145 145v-400h-400zM500 400l-147 -147q112 -80 247 -80q177 0 302 125t125 302h150q0 -118 -45.5 -224.5t-123 -184t-184 -123 t-224.5 -45.5q-198 0 -355 122l-145 -145v400h400z" />
<glyph unicode="&#xe032;" d="M100 1200v-1200h1100v1200h-1100zM1100 100h-900v900h900v-900zM400 800h-100v100h100v-100zM1000 800h-500v100h500v-100zM400 600h-100v100h100v-100zM1000 600h-500v100h500v-100zM400 400h-100v100h100v-100zM1000 400h-500v100h500v-100zM400 200h-100v100h100v-100 zM1000 300h-500v-100h500v100z" />
<glyph unicode="&#xe034;" d="M200 0h-100v1100h100v-1100zM1100 600v500q-40 -81 -101.5 -115.5t-127.5 -29.5t-138 25t-139.5 40t-125.5 25t-103 -29.5t-65 -115.5v-500q60 60 127.5 84t127.5 17.5t122 -23t119 -30t110 -11t103 42t91 120.5z" />
<glyph unicode="&#xe035;" d="M1200 275v300q0 116 -49.5 227t-131 192.5t-192.5 131t-227 49.5t-227 -49.5t-192.5 -131t-131 -192.5t-49.5 -227v-300q0 -11 7 -18t18 -7h50q11 0 18 7t7 18v300q0 127 70.5 231.5t184.5 161.5t245 57t245 -57t184.5 -161.5t70.5 -231.5v-300q0 -11 7 -18t18 -7h50 q11 0 18 7t7 18zM400 480v-460q0 -8 -6 -14t-14 -6h-160q-8 0 -14 6t-6 14v460q0 8 6 14t14 6h160q8 0 14 -6t6 -14zM1000 480v-460q0 -8 -6 -14t-14 -6h-160q-8 0 -14 6t-6 14v460q0 8 6 14t14 6h160q8 0 14 -6t6 -14z" />
<glyph unicode="&#xe036;" d="M0 800v-400h300l300 -200v800l-300 -200h-300zM971 600l141 -141l-71 -71l-141 141l-141 -141l-71 71l141 141l-141 141l71 71l141 -141l141 141l71 -71z" />
<glyph unicode="&#xe037;" d="M0 800v-400h300l300 -200v800l-300 -200h-300zM700 857l69 53q111 -135 111 -310q0 -169 -106 -302l-67 54q86 110 86 248q0 146 -93 257z" />
<glyph unicode="&#xe038;" d="M974 186l6 8q142 178 142 405q0 230 -144 408l-6 8l-83 -64l7 -8q123 -151 123 -344q0 -189 -119 -339l-7 -8zM300 801l300 200v-800l-300 200h-300v400h300zM702 858l69 53q111 -135 111 -310q0 -170 -106 -303l-67 55q86 110 86 248q0 145 -93 257z" />
<glyph unicode="&#xe039;" d="M100 700h400v100h100v100h-100v300h-500v-600h100v100zM1200 700v500h-600v-200h100v-300h200v-300h300v200h-200v100h200zM100 1100h300v-300h-300v300zM800 800v300h300v-300h-300zM200 900h100v100h-100v-100zM900 1000h100v-100h-100v100zM300 600h-100v-100h-200 v-500h500v500h-200v100zM900 200v-100h-200v100h-100v100h100v200h-200v100h300v-300h200v-100h-100zM400 400v-300h-300v300h300zM300 200h-100v100h100v-100zM1100 300h100v-100h-100v100zM600 100h100v-100h-100v100zM1200 100v-100h-300v100h300z" />
<glyph unicode="&#xe040;" d="M100 1200h-100v-1000h100v1000zM300 200h-100v1000h100v-1000zM700 200h-200v1000h200v-1000zM900 200h-100v1000h100v-1000zM1200 1200v-1000h-200v1000h200zM400 100v-100h-300v100h300zM500 91h100v-91h-100v91zM700 91h100v-91h-100v91zM1100 91v-91h-200v91h200z " />
<glyph unicode="&#xe041;" d="M1200 500l-500 -500l-699 700v475q0 10 7.5 17.5t17.5 7.5h474zM320 882q29 29 29 71t-29 71q-30 30 -71.5 30t-71.5 -30q-29 -29 -29 -71t29 -71q30 -30 71.5 -30t71.5 30z" />
<glyph unicode="&#xe042;" d="M1201 500l-500 -500l-699 700v475q0 11 7 18t18 7h474zM1501 500l-500 -500l-50 50l450 450l-700 700h100zM320 882q30 29 30 71t-30 71q-29 30 -71 30t-71 -30q-30 -29 -30 -71t30 -71q29 -30 71 -30t71 30z" />
<glyph unicode="&#xe043;" d="M1200 1200v-1000l-100 -100v1000h-750l-100 -100h750v-1000h-900v1025l175 175h925z" />
<glyph unicode="&#xe045;" d="M947 829l-94 346q-2 11 -10 18t-18 7h-450q-10 0 -18 -7t-10 -18l-94 -346l40 -124h592zM1200 800v-700h-200v200h-800v-200h-200v700h200l100 -200h600l100 200h200zM881 176l38 -152q2 -10 -3.5 -17t-15.5 -7h-600q-10 0 -15.5 7t-3.5 17l38 152q2 10 11.5 17t19.5 7 h500q10 0 19.5 -7t11.5 -17z" />
<glyph unicode="&#xe047;" d="M1200 0v66q-34 1 -74 43q-18 19 -33 42t-21 37l-6 13l-385 998h-93l-399 -1006q-24 -48 -52 -75q-12 -12 -33 -25t-36 -20l-15 -7v-66h365v66q-41 0 -72 11t-49 38t1 71l92 234h391l82 -222q16 -45 -5.5 -88.5t-74.5 -43.5v-66h417zM416 521l178 457l46 -140l116 -317 h-340z" />
<glyph unicode="&#xe048;" d="M100 1199h471q120 0 213 -88t93 -228q0 -55 -11.5 -101.5t-28 -74t-33.5 -47.5t-28 -28l-12 -7q8 -3 21.5 -9t48 -31.5t60.5 -58t47.5 -91.5t21.5 -129q0 -84 -59 -156.5t-142 -111t-162 -38.5h-500v89q41 7 70.5 32.5t29.5 65.5v827q0 28 -1 39.5t-5.5 26t-15.5 21 t-29 14t-49 14.5v70zM400 1079v-379h139q76 0 130 61.5t54 138.5q0 82 -84 130.5t-239 48.5zM400 200h161q89 0 153 48.5t64 132.5q0 90 -62.5 154.5t-156.5 64.5h-159v-400z" />
<glyph unicode="&#xe049;" d="M877 1200l2 -57q-33 -8 -62 -25.5t-46 -37t-29.5 -38t-17.5 -30.5l-5 -12l-128 -825q-10 -52 14 -82t95 -36v-57h-500v57q77 7 134.5 40.5t65.5 80.5l173 849q10 56 -10 74t-91 37q-6 1 -10.5 2.5t-9.5 2.5v57h425z" />
<glyph unicode="&#xe050;" d="M1150 1200h150v-300h-50q0 29 -8 48.5t-18.5 30t-33.5 15t-39.5 5.5t-50.5 1h-200v-850l100 -50v-100h-400v100l100 50v850h-200q-34 0 -50.5 -1t-40 -5.5t-33.5 -15t-18.5 -30t-8.5 -48.5h-49v300h150h700zM100 1000v-800h75l-125 -167l-125 167h75v800h-75l125 167 l125 -167h-75z" />
<glyph unicode="&#xe051;" d="M950 1201h150v-300h-50q0 29 -8 48.5t-18 30t-33.5 15t-40 5.5t-50.5 1h-200v-650l100 -50v-100h-400v100l100 50v650h-200q-34 0 -50.5 -1t-39.5 -5.5t-33.5 -15t-18.5 -30t-8 -48.5h-50v300h150h700zM200 101h800v75l167 -125l-167 -125v75h-800v-75l-167 125l167 125 v-75z" />
<glyph unicode="&#xe052;" d="M700 950v100q0 21 -14.5 35.5t-35.5 14.5h-600q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h600q21 0 35.5 15t14.5 35zM1100 650v100q0 21 -14.5 35.5t-35.5 14.5h-1000q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h1000 q21 0 35.5 15t14.5 35zM900 350v100q0 21 -14.5 35.5t-35.5 14.5h-800q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h800q21 0 35.5 15t14.5 35zM1200 50v100q0 21 -14.5 35.5t-35.5 14.5h-1100q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35 t35.5 -15h1100q21 0 35.5 15t14.5 35z" />
<glyph unicode="&#xe053;" d="M1000 950v100q0 21 -14.5 35.5t-35.5 14.5h-700q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h700q21 0 35.5 15t14.5 35zM1200 650v100q0 21 -14.5 35.5t-35.5 14.5h-1100q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h1100 q21 0 35.5 15t14.5 35zM1000 350v100q0 21 -14.5 35.5t-35.5 14.5h-700q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h700q21 0 35.5 15t14.5 35zM1200 50v100q0 21 -14.5 35.5t-35.5 14.5h-1100q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35 t35.5 -15h1100q21 0 35.5 15t14.5 35z" />
<glyph unicode="&#xe054;" d="M500 950v100q0 21 14.5 35.5t35.5 14.5h600q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-600q-21 0 -35.5 15t-14.5 35zM100 650v100q0 21 14.5 35.5t35.5 14.5h1000q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1000q-21 0 -35.5 15 t-14.5 35zM300 350v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-800q-21 0 -35.5 15t-14.5 35zM0 50v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100 q-21 0 -35.5 15t-14.5 35z" />
<glyph unicode="&#xe055;" d="M0 950v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100q-21 0 -35.5 15t-14.5 35zM0 650v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100q-21 0 -35.5 15 t-14.5 35zM0 350v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100q-21 0 -35.5 15t-14.5 35zM0 50v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100 q-21 0 -35.5 15t-14.5 35z" />
<glyph unicode="&#xe056;" d="M0 950v100q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-100q-21 0 -35.5 15t-14.5 35zM300 950v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-800q-21 0 -35.5 15 t-14.5 35zM0 650v100q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-100q-21 0 -35.5 15t-14.5 35zM300 650v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-800 q-21 0 -35.5 15t-14.5 35zM0 350v100q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-100q-21 0 -35.5 15t-14.5 35zM300 350v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15 h-800q-21 0 -35.5 15t-14.5 35zM0 50v100q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-100q-21 0 -35.5 15t-14.5 35zM300 50v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15 h-800q-21 0 -35.5 15t-14.5 35z" />
<glyph unicode="&#xe057;" d="M400 1100h-100v-1100h100v1100zM700 950v100q0 21 -15 35.5t-35 14.5h-100q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h100q20 0 35 15t15 35zM1100 650v100q0 21 -15 35.5t-35 14.5h-500q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15 h500q20 0 35 15t15 35zM100 425v75h-201v100h201v75l166 -125zM900 350v100q0 21 -15 35.5t-35 14.5h-300q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h300q20 0 35 15t15 35zM1200 50v100q0 21 -15 35.5t-35 14.5h-600q-21 0 -35.5 -14.5t-14.5 -35.5 v-100q0 -20 14.5 -35t35.5 -15h600q20 0 35 15t15 35z" />
<glyph unicode="&#xe058;" d="M201 950v100q0 21 -15 35.5t-35 14.5h-100q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h100q20 0 35 15t15 35zM801 1100h100v-1100h-100v1100zM601 650v100q0 21 -15 35.5t-35 14.5h-500q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15 h500q20 0 35 15t15 35zM1101 425v75h200v100h-200v75l-167 -125zM401 350v100q0 21 -15 35.5t-35 14.5h-300q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h300q20 0 35 15t15 35zM701 50v100q0 21 -15 35.5t-35 14.5h-600q-21 0 -35.5 -14.5t-14.5 -35.5 v-100q0 -20 14.5 -35t35.5 -15h600q20 0 35 15t15 35z" />
<glyph unicode="&#xe059;" d="M900 925v-650q0 -31 -22 -53t-53 -22h-750q-31 0 -53 22t-22 53v650q0 31 22 53t53 22h750q31 0 53 -22t22 -53zM1200 300l-300 300l300 300v-600z" />
<glyph unicode="&#xe060;" d="M1200 1056v-1012q0 -18 -12.5 -31t-31.5 -13h-1112q-18 0 -31 13t-13 31v1012q0 18 13 31t31 13h1112q19 0 31.5 -13t12.5 -31zM1100 1000h-1000v-737l247 182l298 -131l-74 156l293 318l236 -288v500zM476 750q0 -56 -39 -95t-95 -39t-95 39t-39 95t39 95t95 39t95 -39 t39 -95z" />
<glyph unicode="&#xe062;" d="M600 1213q123 0 227 -63t164.5 -169.5t60.5 -229.5t-73 -272q-73 -114 -166.5 -237t-150.5 -189l-57 -66q-10 9 -27 26t-66.5 70.5t-96 109t-104 135.5t-100.5 155q-63 139 -63 262q0 124 60.5 231.5t165 172t226.5 64.5zM599 514q107 0 182.5 75.5t75.5 182.5t-75.5 182 t-182.5 75t-182 -75.5t-75 -181.5q0 -107 75.5 -182.5t181.5 -75.5z" />
<glyph unicode="&#xe063;" d="M600 1199q122 0 233 -47.5t191 -127.5t127.5 -191t47.5 -233t-47.5 -233t-127.5 -191t-191 -127.5t-233 -47.5t-233 47.5t-191 127.5t-127.5 191t-47.5 233t47.5 233t127.5 191t191 127.5t233 47.5zM600 173v854q-176 0 -301.5 -125t-125.5 -302t125.5 -302t301.5 -125z " />
<glyph unicode="&#xe064;" d="M554 1295q21 -71 57.5 -142.5t76 -130.5t83 -118.5t82 -117t70 -116t50 -125.5t18.5 -136q0 -89 -39 -165.5t-102 -126.5t-140 -79.5t-156 -33.5q-114 6 -211.5 53t-161.5 138.5t-64 210.5q0 94 34 186t88.5 172.5t112 159t115 177t87.5 194.5zM455 296q-7 6 -18 17 t-34 48t-33 77q-15 73 -14 143.5t10 122.5l9 51q-92 -110 -119.5 -185t-12.5 -156q14 -82 59.5 -136t136.5 -80z" />
<glyph unicode="&#xe065;" d="M1108 902l113 113l-21 85l-92 28l-113 -113zM1100 625v-225q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-165 0 -282.5 117.5t-117.5 282.5v300q0 165 117.5 282.5t282.5 117.5q366 -6 397 -14l-186 -186h-311q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5 t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v125zM436 341l161 50l412 412l-114 113l-405 -405z" />
<glyph unicode="&#xe066;" d="M1100 453v-53q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-165 0 -282.5 117.5t-117.5 282.5v300q0 165 117.5 282.5t282.5 117.5h261l2 -80q-133 -32 -218 -120h-145q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5z M813 431l360 324l-359 318v-216q-7 0 -19 -1t-48 -8t-69.5 -18.5t-76.5 -37t-76.5 -59t-62 -88t-39.5 -121.5q30 38 81.5 64t103 35.5t99 14t77.5 3.5l29 -1v-209z" />
<glyph unicode="&#xe067;" d="M1100 569v-169q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-165 0 -282.5 117.5t-117.5 282.5v300q0 165 117.5 282.5t282.5 117.5h300q60 0 127 -23l-178 -177h-349q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v69z M625 348l566 567l-136 137l-430 -431l-147 147l-136 -136z" />
<glyph unicode="&#xe068;" d="M900 303v198h-200v-200h195l-295 -300l-300 300h200v200h-200v-198l-300 300l300 296v-198h200v200h-200l300 300l295 -300h-195v-200h200v198l300 -296z" />
<glyph unicode="&#xe069;" d="M900 0l-500 488v-438q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v1000q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-437l500 487v-1100z" />
<glyph unicode="&#xe070;" d="M1200 0l-500 488v-488l-500 488v-438q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v1000q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-437l500 487v-487l500 487v-1100z" />
<glyph unicode="&#xe071;" d="M1200 0l-500 488v-488l-564 550l564 550v-487l500 487v-1100z" />
<glyph unicode="&#xe072;" d="M1100 550l-900 550v-1100z" />
<glyph unicode="&#xe073;" d="M500 150v800q0 21 -14.5 35.5t-35.5 14.5h-200q-21 0 -35.5 -14.5t-14.5 -35.5v-800q0 -21 14.5 -35.5t35.5 -14.5h200q21 0 35.5 14.5t14.5 35.5zM900 150v800q0 21 -14.5 35.5t-35.5 14.5h-200q-21 0 -35.5 -14.5t-14.5 -35.5v-800q0 -21 14.5 -35.5t35.5 -14.5h200 q21 0 35.5 14.5t14.5 35.5z" />
<glyph unicode="&#xe074;" d="M1100 150v800q0 21 -14.5 35.5t-35.5 14.5h-800q-21 0 -35.5 -14.5t-14.5 -35.5v-800q0 -20 14.5 -35t35.5 -15h800q21 0 35.5 15t14.5 35z" />
<glyph unicode="&#xe075;" d="M500 0v488l-500 -488v1100l500 -487v487l564 -550z" />
<glyph unicode="&#xe076;" d="M1050 1100h100q21 0 35.5 -14.5t14.5 -35.5v-1000q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v438l-500 -488v488l-500 -488v1100l500 -487v487l500 -487v437q0 21 14.5 35.5t35.5 14.5z" />
<glyph unicode="&#xe077;" d="M850 1100h100q21 0 35.5 -14.5t14.5 -35.5v-1000q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v438l-500 -488v1100l500 -487v437q0 21 14.5 35.5t35.5 14.5z" />
<glyph unicode="&#xe078;" d="M650 1064l-550 -564h1100zM1200 350v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1000q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5h1000q21 0 35.5 -14.5t14.5 -35.5z" />
<glyph unicode="&#xe079;" d="M777 7l240 240l-353 353l353 353l-240 240l-592 -594z" />
<glyph unicode="&#xe080;" d="M513 -46l-241 240l353 353l-353 353l241 240l572 -571l21 -22l-1 -1v-1z" />
<glyph unicode="&#xe081;" d="M600 1197q162 0 299.5 -80t217.5 -217.5t80 -299.5t-80 -299.5t-217.5 -217.5t-299.5 -80t-299.5 80t-217.5 217.5t-80 299.5t80 299.5t217.5 217.5t299.5 80zM500 900v-200h-200v-200h200v-200h200v200h200v200h-200v200h-200z" />
<glyph unicode="&#xe082;" d="M600 1197q162 0 299.5 -80t217.5 -217.5t80 -299.5t-80 -299.5t-217.5 -217.5t-299.5 -80t-299.5 80t-217.5 217.5t-80 299.5t80 299.5t217.5 217.5t299.5 80zM300 700v-200h600v200h-600z" />
<glyph unicode="&#xe083;" d="M600 1197q162 0 299.5 -80t217.5 -217.5t80 -299.5t-80 -299.5t-217.5 -217.5t-299.5 -80t-299.5 80t-217.5 217.5t-80 299.5t80 299.5t217.5 217.5t299.5 80zM247 741l141 -141l-142 -141l213 -213l141 142l141 -142l213 213l-142 141l142 141l-213 212l-141 -141 l-141 142z" />
<glyph unicode="&#xe084;" d="M600 1197q162 0 299.5 -80t217.5 -217.5t80 -299.5t-80 -299.5t-217.5 -217.5t-299.5 -80t-299.5 80t-217.5 217.5t-80 299.5t80 299.5t217.5 217.5t299.5 80zM546 623l-102 102l-174 -174l276 -277l411 411l-175 174z" />
<glyph unicode="&#xe085;" d="M600 1197q162 0 299.5 -80t217.5 -217.5t80 -299.5t-80 -299.5t-217.5 -217.5t-299.5 -80t-299.5 80t-217.5 217.5t-80 299.5t80 299.5t217.5 217.5t299.5 80zM500 500h200q5 3 14 8t31.5 25.5t39.5 45.5t31 69t14 94q0 51 -17.5 89t-42 58t-58.5 32t-58.5 15t-51.5 3 q-105 0 -172 -56t-67 -183h144q4 0 11.5 -1t11 -1t6.5 3t3 9t1 11t3.5 8.5t3.5 6t5.5 4t6.5 2.5t9 1.5t9 0.5h11.5h12.5q19 0 30 -10t11 -26q0 -22 -4 -28t-27 -22q-5 -1 -12.5 -3t-27 -13.5t-34 -27t-26.5 -46t-11 -68.5zM500 400v-100h200v100h-200z" />
<glyph unicode="&#xe086;" d="M600 1197q162 0 299.5 -80t217.5 -217.5t80 -299.5t-80 -299.5t-217.5 -217.5t-299.5 -80t-299.5 80t-217.5 217.5t-80 299.5t80 299.5t217.5 217.5t299.5 80zM500 900v-100h200v100h-200zM400 700v-100h100v-200h-100v-100h400v100h-100v300h-300z" />
<glyph unicode="&#xe087;" d="M1200 700v-200h-203q-25 -102 -116.5 -186t-180.5 -117v-197h-200v197q-140 27 -208 102.5t-98 200.5h-194v200h194q15 60 36 104.5t55.5 86t88 69t126.5 40.5v200h200v-200q54 -20 113 -60t112.5 -105.5t71.5 -134.5h203zM700 500v-206q149 48 201 206h-201v200h200 q-25 74 -76 127.5t-124 76.5v-204h-200v203q-75 -24 -130 -77.5t-79 -125.5h209v-200h-210q24 -73 79.5 -127.5t130.5 -78.5v206h200z" />
<glyph unicode="&#xe088;" d="M600 1196q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM600 1014q-171 0 -292.5 -121.5t-121.5 -292.5t121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5zM844 735 l-135 -135l135 -135l-109 -109l-135 135l-135 -135l-109 109l135 135l-135 135l109 109l135 -135l135 135z" />
<glyph unicode="&#xe089;" d="M600 1196q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM600 1014q-171 0 -292.5 -121.5t-121.5 -292.5t121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5zM896 654 l-346 -345l-228 228l141 141l87 -87l204 205z" />
<glyph unicode="&#xe090;" d="M600 1196q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM248 385l568 567q-100 62 -216 62q-171 0 -292.5 -121.5t-121.5 -292.5q0 -115 62 -215zM955 809l-564 -564q97 -59 209 -59q171 0 292.5 121.5 t121.5 292.5q0 112 -59 209z" />
<glyph unicode="&#xe091;" d="M1200 400h-600v-301l-600 448l600 453v-300h600v-300z" />
<glyph unicode="&#xe092;" d="M600 400h-600v300h600v300l600 -453l-600 -448v301z" />
<glyph unicode="&#xe093;" d="M1098 600h-298v-600h-300v600h-296l450 600z" />
<glyph unicode="&#xe094;" d="M998 600l-449 -600l-445 600h296v600h300v-600h298z" />
<glyph unicode="&#xe095;" d="M600 199v301q-95 -2 -183 -20t-170 -52t-147 -92.5t-100 -135.5q6 132 41 238.5t103.5 193t184 138t271.5 59.5v271l600 -453z" />
<glyph unicode="&#xe096;" d="M1200 1200h-400l129 -129l-294 -294l142 -142l294 294l129 -129v400zM565 423l-294 -294l129 -129h-400v400l129 -129l294 294z" />
<glyph unicode="&#xe097;" d="M871 730l129 -130h-400v400l129 -129l295 295l142 -141zM200 600h400v-400l-129 130l-295 -295l-142 141l295 295z" />
<glyph unicode="&#xe101;" d="M600 1177q118 0 224.5 -45.5t184 -123t123 -184t45.5 -224.5t-45.5 -224.5t-123 -184t-184 -123t-224.5 -45.5t-224.5 45.5t-184 123t-123 184t-45.5 224.5t45.5 224.5t123 184t184 123t224.5 45.5zM686 549l58 302q4 20 -8 34.5t-33 14.5h-207q-20 0 -32 -14.5t-8 -34.5 l58 -302q4 -20 21.5 -34.5t37.5 -14.5h54q20 0 37.5 14.5t21.5 34.5zM700 400h-200v-100h200v100z" />
<glyph unicode="&#xe102;" d="M1200 900h-111v6t-1 15t-3 18l-34 172q-11 39 -41.5 63t-69.5 24q-32 0 -61 -17l-239 -144q-22 -13 -40 -35q-19 24 -40 36l-238 144q-33 18 -62 18q-39 0 -69.5 -23t-40.5 -61l-35 -177q-2 -8 -3 -18t-1 -15v-6h-111v-100h100v-200h400v300h200v-300h400v200h100v100z M731 900l202 197q5 -12 12 -32.5t23 -64t25 -72t7 -28.5h-269zM481 900h-281q-3 0 14 48t35 96l18 47zM100 0h400v400h-400v-400zM700 400h400v-400h-400v400z" />
<glyph unicode="&#xe103;" d="M0 121l216 193q-9 53 -13 83t-5.5 94t9 113t38.5 114t74 124q47 60 99.5 102.5t103 68t127.5 48t145.5 37.5t184.5 43.5t220 58.5q0 -189 -22 -343t-59 -258t-89 -181.5t-108.5 -120t-122 -68t-125.5 -30t-121.5 -1.5t-107.5 12.5t-87.5 17t-56.5 7.5l-99 -55l-201 -202 v143zM692 611q70 38 118.5 69.5t102 79t99 111.5t86.5 148q22 50 24 60t-6 19q-7 5 -17 5t-26.5 -14.5t-33.5 -39.5q-35 -51 -113.5 -108.5t-139.5 -89.5l-61 -32q-369 -197 -458 -401q-48 -111 -28.5 -117.5t86.5 76.5q55 66 367 234z" />
<glyph unicode="&#xe105;" d="M1261 600l-26 -40q-6 -10 -20 -30t-49 -63.5t-74.5 -85.5t-97 -90t-116.5 -83.5t-132.5 -59t-145.5 -23.5t-145.5 23.5t-132.5 59t-116.5 83.5t-97 90t-74.5 85.5t-49 63.5t-20 30l-26 40l26 40q6 10 20 30t49 63.5t74.5 85.5t97 90t116.5 83.5t132.5 59t145.5 23.5 t145.5 -23.5t132.5 -59t116.5 -83.5t97 -90t74.5 -85.5t49 -63.5t20 -30zM600 240q64 0 123.5 20t100.5 45.5t85.5 71.5t66.5 75.5t58 81.5t47 66q-1 1 -28.5 37.5t-42 55t-43.5 53t-57.5 63.5t-58.5 54q49 -74 49 -163q0 -124 -88 -212t-212 -88t-212 88t-88 212 q0 85 46 158q-102 -87 -226 -258q7 -10 40.5 -58t56 -78.5t68 -77.5t87.5 -75t103 -49.5t125 -21.5zM484 762l-107 -106q49 -124 154 -191l105 105q-37 24 -75 72t-57 84z" />
<glyph unicode="&#xe106;" d="M906 1200l-314 -1200h-148l37 143q-82 21 -165 71.5t-140 102t-109.5 112t-72 88.5t-29.5 43l-26 40l26 40q6 10 20 30t49 63.5t74.5 85.5t97 90t116.5 83.5t132.5 59t145.5 23.5q61 0 121 -17l37 142h148zM1261 600l-26 -40q-7 -12 -25.5 -38t-63.5 -79.5t-95.5 -102.5 t-124 -100t-146.5 -79l38 145q22 15 44.5 34t46 44t40.5 44t41 50.5t33.5 43.5t33 44t24.5 34q-97 127 -140 175l39 146q67 -54 131.5 -125.5t87.5 -103.5t36 -52zM513 264l37 141q-107 18 -178.5 101.5t-71.5 193.5q0 85 46 158q-102 -87 -226 -258q210 -282 393 -336z M484 762l-107 -106q49 -124 154 -191l47 47l23 87q-30 28 -59 69t-44 68z" />
<glyph unicode="&#xe107;" d="M-47 0h1294q37 0 50.5 35.5t-7.5 67.5l-642 1056q-20 33 -48 36t-48 -29l-642 -1066q-21 -32 -7.5 -66t50.5 -34zM700 200v100h-200v-100h-345l445 723l445 -723h-345zM700 700h-200v-100l100 -300l100 300v100z" />
<glyph unicode="&#xe108;" d="M800 711l363 -325q15 -14 26 -38.5t11 -44.5v-41q0 -20 -12 -26.5t-29 5.5l-359 249v-263q100 -91 100 -113v-64q0 -21 -13 -29t-32 1l-94 78h-222l-94 -78q-19 -9 -32 -1t-13 29v64q0 22 100 113v263l-359 -249q-17 -12 -29 -5.5t-12 26.5v41q0 20 11 44.5t26 38.5 l363 325v339q0 62 44 106t106 44t106 -44t44 -106v-339z" />
<glyph unicode="&#xe110;" d="M941 800l-600 -600h-341v200h259l600 600h241v198l300 -295l-300 -300v197h-159zM381 678l141 142l-181 180h-341v-200h259zM1100 598l300 -295l-300 -300v197h-241l-181 181l141 142l122 -123h159v198z" />
<glyph unicode="&#xe111;" d="M100 1100h1000q41 0 70.5 -29.5t29.5 -70.5v-600q0 -41 -29.5 -70.5t-70.5 -29.5h-596l-304 -300v300h-100q-41 0 -70.5 29.5t-29.5 70.5v600q0 41 29.5 70.5t70.5 29.5z" />
<glyph unicode="&#xe112;" d="M400 900h-300v300h300v-300zM1100 900h-300v300h300v-300zM1100 800v-200q0 -42 -3 -83t-15 -104t-31.5 -116t-58 -109.5t-89 -96.5t-129 -65.5t-174.5 -25.5t-174.5 25.5t-129 65.5t-89 96.5t-58 109.5t-31.5 116t-15 104t-3 83v200h300v-250q0 -113 6 -145 q17 -92 102 -117q39 -11 92 -11q37 0 66.5 5.5t50 15.5t36 24t24 31.5t14 37.5t7 42t2.5 45t0 47v25v250h300z" />
<glyph unicode="&#xe113;" d="M902 184l226 227l-578 579l-580 -579l227 -227l352 353z" />
<glyph unicode="&#xe114;" d="M650 218l578 579l-226 227l-353 -353l-352 353l-227 -227z" />
<glyph unicode="&#xe115;" d="M1198 400v600h-796l215 -200h381v-400h-198l299 -283l299 283h-200zM-198 700l299 283l300 -283h-203v-400h385l215 -200h-800v600h-196z" />
<glyph unicode="&#xe116;" d="M1050 1200h94q20 0 35 -14.5t15 -35.5t-15 -35.5t-35 -14.5h-54l-201 -961q-2 -4 -6 -10.5t-19 -17.5t-33 -11h-31v-50q0 -20 -14.5 -35t-35.5 -15t-35.5 15t-14.5 35v50h-300v-50q0 -20 -14.5 -35t-35.5 -15t-35.5 15t-14.5 35v50h-50q-21 0 -35.5 15t-14.5 35 q0 21 14.5 35.5t35.5 14.5h535l48 200h-633q-32 0 -54.5 21t-27.5 43l-100 475q-5 24 10 42q14 19 39 19h896l38 162q5 17 18.5 27.5t30.5 10.5z" />
<glyph unicode="&#xe117;" d="M1200 1000v-100h-1200v100h200q0 41 29.5 70.5t70.5 29.5h300q41 0 70.5 -29.5t29.5 -70.5h500zM0 800h1200v-800h-1200v800z" />
<glyph unicode="&#xe118;" d="M201 800l-200 -400v600h200q0 41 29.5 70.5t70.5 29.5h300q41 0 70.5 -29.5t29.5 -70.5h500v-200h-1000zM1501 700l-300 -700h-1200l300 700h1200z" />
<glyph unicode="&#xe119;" d="M302 300h198v600h-198l298 300l298 -300h-198v-600h198l-298 -300z" />
<glyph unicode="&#xe120;" d="M900 303v197h-600v-197l-300 297l300 298v-198h600v198l300 -298z" />
<glyph unicode="&#xe121;" d="M31 400l172 739q5 22 23 41.5t38 19.5h672q19 0 37.5 -22.5t23.5 -45.5l172 -732h-1138zM100 300h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5v100q0 41 29.5 70.5t70.5 29.5zM900 200h-100v-100h100v100z M1100 200h-100v-100h100v100z" />
<glyph unicode="&#xe122;" d="M1100 200v850q0 21 14.5 35.5t35.5 14.5q20 0 35 -14.5t15 -35.5v-850q0 -20 -15 -35t-35 -15q-21 0 -35.5 15t-14.5 35zM325 800l675 250v-850l-675 200h-38l47 -276q2 -12 -3 -17.5t-11 -6t-21 -0.5h-8h-83q-20 0 -34.5 14t-18.5 35q-56 337 -56 351v250v5 q0 13 0.5 18.5t2.5 13t8 10.5t15 3h200zM-101 600v50q0 24 25 49t50 38l25 13v-250l-11 5.5t-24 14t-30 21.5t-24 27.5t-11 31.5z" />
<glyph unicode="&#xe124;" d="M445 1180l-45 -233l-224 78l78 -225l-233 -44l179 -156l-179 -155l233 -45l-78 -224l224 78l45 -233l155 179l155 -179l45 233l224 -78l-78 224l234 45l-180 155l180 156l-234 44l78 225l-224 -78l-45 233l-155 -180z" />
<glyph unicode="&#xe125;" d="M700 1200h-50q-27 0 -51 -20t-38 -48l-96 -198l-145 -196q-20 -26 -20 -63v-400q0 -75 100 -75h61q123 -100 139 -100h250q46 0 83 57l238 344q29 31 29 74v100q0 44 -30.5 84.5t-69.5 40.5h-328q28 118 28 125v150q0 44 -30.5 84.5t-69.5 40.5zM700 925l-50 -225h450 v-125l-250 -375h-214l-136 100h-100v375l150 212l100 213h50v-175zM0 800v-600h200v600h-200z" />
<glyph unicode="&#xe126;" d="M700 0h-50q-27 0 -51 20t-38 48l-96 198l-145 196q-20 26 -20 63v400q0 75 100 75h61q123 100 139 100h250q46 0 83 -57l238 -344q29 -31 29 -74v-100q0 -44 -30.5 -84.5t-69.5 -40.5h-328q28 -118 28 -125v-150q0 -44 -30.5 -84.5t-69.5 -40.5zM200 400h-200v600h200 v-600zM700 275l-50 225h450v125l-250 375h-214l-136 -100h-100v-375l150 -212l100 -213h50v175z" />
<glyph unicode="&#xe127;" d="M364 873l362 230q14 6 25 6q17 0 29 -12l109 -112q14 -14 14 -34q0 -18 -11 -32l-85 -121h302q85 0 138.5 -38t53.5 -110t-54.5 -111t-138.5 -39h-107l-130 -339q-7 -22 -20.5 -41.5t-28.5 -19.5h-341q-7 0 -90 81t-83 94v525q0 17 14 35.5t28 28.5zM408 792v-503 l100 -89h293l131 339q6 21 19.5 41t28.5 20h203q16 0 25 15t9 36q0 20 -9 34.5t-25 14.5h-457h-6.5h-7.5t-6.5 0.5t-6 1t-5 1.5t-5.5 2.5t-4 4t-4 5.5q-5 12 -5 20q0 14 10 27l147 183l-86 83zM208 200h-200v600h200v-600z" />
<glyph unicode="&#xe128;" d="M475 1104l365 -230q7 -4 16.5 -10.5t26 -26t16.5 -36.5v-526q0 -13 -85.5 -93.5t-93.5 -80.5h-342q-15 0 -28.5 20t-19.5 41l-131 339h-106q-84 0 -139 39t-55 111t54 110t139 37h302l-85 121q-11 16 -11 32q0 21 14 34l109 113q13 12 29 12q11 0 25 -6zM370 946 l145 -184q10 -11 10 -26q0 -11 -5 -20q-1 -3 -3.5 -5.5l-4 -4t-5 -2.5t-5.5 -1.5t-6.5 -1t-6.5 -0.5h-7.5h-6.5h-476v-100h222q15 0 28.5 -20.5t19.5 -40.5l131 -339h293l106 89v502l-342 237zM1199 201h-200v600h200v-600z" />
<glyph unicode="&#xe129;" d="M1100 473v342q0 15 -20 28.5t-41 19.5l-339 131v106q0 84 -39 139t-111 55t-110 -53.5t-38 -138.5v-302l-121 84q-15 12 -33.5 11.5t-32.5 -13.5l-112 -110q-22 -22 -6 -53l230 -363q4 -6 10.5 -15.5t26 -25t36.5 -15.5h525q13 0 94 83t81 90zM911 400h-503l-236 339 l83 86l183 -146q22 -18 47 -5q3 1 5.5 3.5l4 4t2.5 5t1.5 5.5t1 6.5t0.5 6v7.5v7v456q0 22 25 31t50 -0.5t25 -30.5v-202q0 -16 20 -29.5t41 -19.5l339 -130v-294zM1000 200v-200h-600v200h600z" />
<glyph unicode="&#xe130;" d="M305 1104v200h600v-200h-600zM605 310l339 131q20 6 40.5 19.5t20.5 28.5v342q0 7 -81 90t-94 83h-525q-17 0 -35.5 -14t-28.5 -28l-10 -15l-230 -362q-15 -31 7 -53l112 -110q13 -13 32 -13.5t34 10.5l121 85l-1 -302q0 -84 38.5 -138t110.5 -54t111 55t39 139v106z M905 804v-294l-340 -130q-20 -6 -40 -20t-20 -29v-202q0 -22 -25 -31t-50 0t-25 31v456v14.5t-1.5 11.5t-5 12t-9.5 7q-24 13 -46 -5l-184 -146l-83 86l237 339h503z" />
<glyph unicode="&#xe131;" d="M603 1195q162 0 299.5 -80t217.5 -218t80 -300t-80 -299.5t-217.5 -217.5t-299.5 -80t-300 80t-218 217.5t-80 299.5q0 122 47.5 232.5t127.5 190.5t190.5 127.5t232.5 47.5zM598 701h-298v-201h300l-2 -194l402 294l-402 298v-197z" />
<glyph unicode="&#xe132;" d="M597 1195q122 0 232.5 -47.5t190.5 -127.5t127.5 -190.5t47.5 -232.5q0 -162 -80 -299.5t-218 -217.5t-300 -80t-299.5 80t-217.5 217.5t-80 299.5q0 122 47.5 232.5t127.5 190.5t190.5 127.5t231.5 47.5zM200 600l400 -294v194h302v201h-300v197z" />
<glyph unicode="&#xe133;" d="M603 1195q121 0 231.5 -47.5t190.5 -127.5t127.5 -190.5t47.5 -232.5q0 -162 -80 -299.5t-217.5 -217.5t-299.5 -80t-300 80t-218 217.5t-80 299.5q0 122 47.5 232.5t127.5 190.5t190.5 127.5t232.5 47.5zM300 600h200v-300h200v300h200l-300 400z" />
<glyph unicode="&#xe134;" d="M603 1195q121 0 231.5 -47.5t190.5 -127.5t127.5 -190.5t47.5 -232.5q0 -162 -80 -299.5t-217.5 -217.5t-299.5 -80t-300 80t-218 217.5t-80 299.5q0 122 47.5 232.5t127.5 190.5t190.5 127.5t232.5 47.5zM500 900v-300h-200l300 -400l300 400h-200v300h-200z" />
<glyph unicode="&#xe135;" d="M603 1195q121 0 231.5 -47.5t190.5 -127.5t127.5 -190.5t47.5 -232.5q0 -162 -80 -299.5t-217.5 -217.5t-299.5 -80t-300 80t-218 217.5t-80 299.5q0 122 47.5 232.5t127.5 190.5t190.5 127.5t232.5 47.5zM627 1101q-15 -12 -36.5 -21t-34.5 -12t-44 -8t-39 -6 q-15 -3 -45.5 0.5t-45.5 -2.5q-21 -7 -52 -26.5t-34 -34.5q-3 -11 6.5 -22.5t8.5 -18.5q-3 -34 -27.5 -90.5t-29.5 -79.5q-8 -33 5.5 -92.5t7.5 -87.5q0 -9 17 -44t16 -60q12 0 23 -5.5t23 -15t20 -13.5q24 -12 108 -42q22 -8 53 -31.5t59.5 -38.5t57.5 -11q8 -18 -15 -55 t-20 -57q42 -71 87 -80q0 -6 -3 -15.5t-3.5 -14.5t4.5 -17q102 -2 221 112q30 29 47 47t34.5 49t20.5 62q-14 9 -37 9.5t-36 7.5q-14 7 -49 15t-52 19q-9 0 -39.5 -0.5t-46.5 -1.5t-39 -6.5t-39 -16.5q-50 -35 -66 -12q-4 2 -3.5 25.5t0.5 25.5q-6 13 -26.5 17t-24.5 7 q2 22 -2 41t-16.5 28t-38.5 -20q-23 -25 -42 4q-19 28 -8 58q6 16 22 22q6 -1 26 -1.5t33.5 -4t19.5 -13.5q12 -19 32 -37.5t34 -27.5l14 -8q0 3 9.5 39.5t5.5 57.5q-4 23 14.5 44.5t22.5 31.5q5 14 10 35t8.5 31t15.5 22.5t34 21.5q-6 18 10 37q8 0 23.5 -1.5t24.5 -1.5 t20.5 4.5t20.5 15.5q-10 23 -30.5 42.5t-38 30t-49 26.5t-43.5 23q11 41 1 44q31 -13 58.5 -14.5t39.5 3.5l11 4q6 36 -17 53.5t-64 28.5t-56 23q-19 -3 -37 0zM613 994q0 -18 8 -42.5t16.5 -44t9.5 -23.5q-9 2 -31 5t-36 5t-32 8t-30 14q3 12 16 30t16 25q10 -10 18.5 -10 t14 6t14.5 14.5t16 12.5z" />
<glyph unicode="&#xe137;" horiz-adv-x="1220" d="M100 1196h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5v100q0 41 29.5 70.5t70.5 29.5zM1100 1096h-200v-100h200v100zM100 796h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000 q-41 0 -70.5 29.5t-29.5 70.5v100q0 41 29.5 70.5t70.5 29.5zM1100 696h-500v-100h500v100zM100 396h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5v100q0 41 29.5 70.5t70.5 29.5zM1100 296h-300v-100h300v100z " />
<glyph unicode="&#xe138;" d="M1100 1200v-100h-1000v100h1000zM150 1000h900l-350 -500v-300l-200 -200v500z" />
<glyph unicode="&#xe140;" d="M329 729l142 142l-200 200l129 129h-400v-400l129 129zM1200 1200v-400l-129 129l-200 -200l-142 142l200 200l-129 129h400zM271 129l129 -129h-400v400l129 -129l200 200l142 -142zM1071 271l129 129v-400h-400l129 129l-200 200l142 142z" />
<glyph unicode="&#xe141;" d="M596 1192q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM596 1010q-171 0 -292.5 -121.5t-121.5 -292.5q0 -172 121.5 -293t292.5 -121t292.5 121t121.5 293q0 171 -121.5 292.5t-292.5 121.5zM455 905 q22 0 38 -16t16 -39t-16 -39t-38 -16q-23 0 -39 16.5t-16 38.5t16 38.5t39 16.5zM708 821l1 1q-9 14 -9 28q0 22 16 38.5t39 16.5q22 0 38 -16t16 -39t-16 -39t-38 -16q-14 0 -29 10l-55 -145q17 -22 17 -51q0 -36 -25.5 -61.5t-61.5 -25.5t-61.5 25.5t-25.5 61.5 q0 32 20.5 56.5t51.5 29.5zM855 709q23 0 38.5 -15.5t15.5 -38.5t-16 -39t-38 -16q-23 0 -39 16t-16 39q0 22 16 38t39 16zM345 709q23 0 39 -16t16 -38q0 -23 -16 -39t-39 -16q-22 0 -38 16t-16 39t15.5 38.5t38.5 15.5z" />
<glyph unicode="&#xe143;" d="M649 54l-16 22q-90 125 -293 323q-71 70 -104.5 105.5t-77 89.5t-61 99t-17.5 91q0 131 98.5 229.5t230.5 98.5q143 0 241 -129q103 129 246 129q129 0 226 -98.5t97 -229.5q0 -46 -17.5 -91t-61 -99t-77 -89.5t-104.5 -105.5q-203 -198 -293 -323zM844 524l12 12 q64 62 97.5 97t64.5 79t31 72q0 71 -48 119t-105 48q-74 0 -132 -82l-118 -171l-114 174q-51 79 -123 79q-60 0 -109.5 -49t-49.5 -118q0 -27 30.5 -70t61.5 -75.5t95 -94.5l22 -22q93 -90 190 -201q82 92 195 203z" />
<glyph unicode="&#xe144;" d="M476 406l19 -17l105 105l-212 212l389 389l247 -247l-95 -96l18 -18q46 -46 77 -99l29 29q35 35 62.5 88t27.5 96q0 93 -66 159l-141 141q-66 66 -159 66q-95 0 -159 -66l-283 -283q-66 -64 -66 -159q0 -93 66 -159zM123 193l141 -141q66 -66 159 -66q95 0 159 66 l283 283q66 66 66 159t-66 159l-141 141q-12 12 -19 17l-105 -105l212 -212l-389 -389l-247 248l95 95l-18 18q-46 45 -75 101l-55 -55q-66 -66 -66 -159q0 -94 66 -160z" />
<glyph unicode="&#xe145;" d="M200 100v953q0 21 30 46t81 48t129 38t163 15t162 -15t127 -38t79 -48t29 -46v-953q0 -41 -29.5 -70.5t-70.5 -29.5h-600q-41 0 -70.5 29.5t-29.5 70.5zM900 1000h-600v-700h600v700zM600 46q43 0 73.5 30.5t30.5 73.5t-30.5 73.5t-73.5 30.5t-73.5 -30.5t-30.5 -73.5 t30.5 -73.5t73.5 -30.5z" />
<glyph unicode="&#xe148;" d="M700 1029v-307l64 -14q34 -7 64 -16.5t70 -31.5t67.5 -52t47.5 -80.5t20 -112.5q0 -139 -89 -224t-244 -96v-77h-100v78q-152 17 -237 104q-40 40 -52.5 93.5t-15.5 139.5h139q5 -77 48.5 -126.5t117.5 -64.5v335l-27 7q-46 14 -79 26.5t-72 36t-62.5 52t-40 72.5 t-16.5 99q0 92 44 159.5t109 101t144 40.5v78h100v-79q38 -4 72.5 -13.5t75.5 -31.5t71 -53.5t51.5 -84t24.5 -118.5h-159q-8 72 -35 109.5t-101 50.5zM600 755v274q-61 -8 -97.5 -37.5t-36.5 -102.5q0 -29 8 -51t16.5 -34t29.5 -22.5t31 -13.5t38 -10q7 -2 11 -3zM700 548 v-311q170 18 170 151q0 64 -44 99.5t-126 60.5z" />
<glyph unicode="&#xe149;" d="M866 300l50 -147q-41 -25 -80.5 -36.5t-59 -13t-61.5 -1.5q-23 0 -128 33t-155 29q-39 -4 -82 -17t-66 -25l-24 -11l-55 145l16.5 11t15.5 10t13.5 9.5t14.5 12t14.5 14t17.5 18.5q48 55 54 126.5t-30 142.5h-221v100h166q-24 49 -44 104q-10 26 -14.5 55.5t-3 72.5 t25 90t68.5 87q97 88 263 88q129 0 230 -89t101 -208h-153q0 52 -34 89.5t-74 51.5t-76 14q-37 0 -79 -14.5t-62 -35.5q-41 -44 -41 -101q0 -11 2.5 -24.5t5.5 -24t9.5 -26.5t10.5 -25t14 -27.5t14 -25.5t15.5 -27t13.5 -24h242v-100h-197q8 -50 -2.5 -115t-31.5 -94 q-41 -59 -99 -113q35 11 84 18t70 7q32 1 102 -16t104 -17q76 0 136 30z" />
<glyph unicode="&#xe150;" d="M300 0l298 300h-198v900h-200v-900h-198zM900 1200l298 -300h-198v-900h-200v900h-198z" />
<glyph unicode="&#xe151;" d="M400 300h198l-298 -300l-298 300h198v900h200v-900zM1000 1200v-500h-100v100h-100v-100h-100v500h300zM901 1100h-100v-200h100v200zM700 500h300v-200h-99v-100h-100v100h99v100h-200v100zM800 100h200v-100h-300v200h100v-100z" />
<glyph unicode="&#xe152;" d="M400 300h198l-298 -300l-298 300h198v900h200v-900zM1000 1200v-200h-99v-100h-100v100h99v100h-200v100h300zM800 800h200v-100h-300v200h100v-100zM700 500h300v-500h-100v100h-100v-100h-100v500zM801 200h100v200h-100v-200z" />
<glyph unicode="&#xe153;" d="M300 0l298 300h-198v900h-200v-900h-198zM900 1100h-100v100h200v-500h-100v400zM1100 500v-500h-100v100h-200v400h300zM1001 400h-100v-200h100v200z" />
<glyph unicode="&#xe154;" d="M300 0l298 300h-198v900h-200v-900h-198zM1100 1200v-500h-100v100h-200v400h300zM1001 1100h-100v-200h100v200zM900 400h-100v100h200v-500h-100v400z" />
<glyph unicode="&#xe155;" d="M300 0l298 300h-198v900h-200v-900h-198zM900 1000h-200v200h200v-200zM1000 700h-300v200h300v-200zM1100 400h-400v200h400v-200zM1200 100h-500v200h500v-200z" />
<glyph unicode="&#xe156;" d="M300 0l298 300h-198v900h-200v-900h-198zM1200 1000h-500v200h500v-200zM1100 700h-400v200h400v-200zM1000 400h-300v200h300v-200zM900 100h-200v200h200v-200z" />
<glyph unicode="&#xe157;" d="M400 1100h300q162 0 281 -118.5t119 -281.5v-300q0 -165 -118.5 -282.5t-281.5 -117.5h-300q-165 0 -282.5 117.5t-117.5 282.5v300q0 165 117.5 282.5t282.5 117.5zM800 900h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5 t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5z" />
<glyph unicode="&#xe158;" d="M700 0h-300q-163 0 -281.5 117.5t-118.5 282.5v300q0 163 119 281.5t281 118.5h300q165 0 282.5 -117.5t117.5 -282.5v-300q0 -165 -117.5 -282.5t-282.5 -117.5zM800 900h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5 t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5zM400 800v-500l333 250z" />
<glyph unicode="&#xe159;" d="M0 400v300q0 163 117.5 281.5t282.5 118.5h300q163 0 281.5 -119t118.5 -281v-300q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-165 0 -282.5 117.5t-117.5 282.5zM900 300v500q0 41 -29.5 70.5t-70.5 29.5h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5 t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5zM800 700h-500l250 -333z" />
<glyph unicode="&#xe160;" d="M1100 700v-300q0 -162 -118.5 -281t-281.5 -119h-300q-165 0 -282.5 118.5t-117.5 281.5v300q0 165 117.5 282.5t282.5 117.5h300q165 0 282.5 -117.5t117.5 -282.5zM900 300v500q0 41 -29.5 70.5t-70.5 29.5h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5 t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5zM550 733l-250 -333h500z" />
<glyph unicode="&#xe161;" d="M500 1100h400q165 0 282.5 -117.5t117.5 -282.5v-300q0 -165 -117.5 -282.5t-282.5 -117.5h-400v200h500q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5h-500v200zM700 550l-400 -350v200h-300v300h300v200z" />
<glyph unicode="&#xe162;" d="M403 2l9 -1q13 0 26 16l538 630q15 19 6 36q-8 18 -32 16h-300q1 4 78 219.5t79 227.5q2 17 -6 27l-8 8h-9q-16 0 -25 -15q-4 -5 -98.5 -111.5t-228 -257t-209.5 -238.5q-17 -19 -7 -40q10 -19 32 -19h302q-155 -438 -160 -458q-5 -21 4 -32z" />
<glyph unicode="&#xe163;" d="M800 200h-500q-41 0 -70.5 29.5t-29.5 70.5v500q0 41 29.5 70.5t70.5 29.5h500v185q-14 4 -114 7.5t-193 5.5l-93 2q-165 0 -282.5 -117.5t-117.5 -282.5v-300q0 -165 117.5 -282.5t282.5 -117.5h300q47 0 100 15v185zM900 200v200h-300v300h300v200l400 -350z" />
<glyph unicode="&#xe164;" d="M1200 700l-149 149l-342 -353l-213 213l353 342l-149 149h500v-500zM1022 571l-122 -123v-148q0 -41 -29.5 -70.5t-70.5 -29.5h-500q-41 0 -70.5 29.5t-29.5 70.5v500q0 41 29.5 70.5t70.5 29.5h156l118 122l-74 78h-100q-165 0 -282.5 -117.5t-117.5 -282.5v-300 q0 -165 117.5 -282.5t282.5 -117.5h300q163 0 281.5 117.5t118.5 282.5v98z" />
<glyph unicode="&#xe165;" d="M600 1196q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM600 1014q-171 0 -292.5 -121.5t-121.5 -292.5t121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5zM600 794 q80 0 137 -57t57 -137t-57 -137t-137 -57t-137 57t-57 137t57 137t137 57z" />
<glyph unicode="&#xe166;" d="M700 800v400h-300v-400h-300l445 -500l450 500h-295zM25 300h1048q11 0 19 -7.5t8 -17.5v-275h-1100v275q0 11 7 18t18 7zM1000 200h-100v-50h100v50z" />
<glyph unicode="&#xe167;" d="M400 700v-300h300v300h295l-445 500l-450 -500h300zM25 300h1048q11 0 19 -7.5t8 -17.5v-275h-1100v275q0 11 7 18t18 7zM1000 200h-100v-50h100v50z" />
<glyph unicode="&#xe168;" d="M405 400l596 596l-154 155l-442 -442l-150 151l-155 -155zM25 300h1048q11 0 19 -7.5t8 -17.5v-275h-1100v275q0 11 7 18t18 7zM1000 200h-100v-50h100v50z" />
<glyph unicode="&#xe169;" d="M409 1103l-97 97l-212 -212l97 -98zM650 861l-149 149l-212 -212l149 -149l-238 -248h700v699zM25 300h1048q11 0 19 -7.5t8 -17.5v-275h-1100v275q0 11 7 18t18 7zM1000 200h-100v-50h100v50z" />
<glyph unicode="&#xe170;" d="M539 950l-149 -149l212 -212l149 148l248 -237v700h-699zM297 709l-97 -97l212 -212l98 97zM25 300h1048q11 0 19 -7.5t8 -17.5v-275h-1100v275q0 11 7 18t18 7zM1000 200h-100v-50h100v50z" />
<glyph unicode="&#xe171;" d="M1200 1199v-1079l-475 272l-310 -393v416h-392zM1166 1148l-672 -712v-226z" />
<glyph unicode="&#xe172;" d="M1100 1000v-850q0 -21 -15 -35.5t-35 -14.5h-150v400h-700v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100zM700 1200h-100v-200h100v200z" />
<glyph unicode="&#xe173;" d="M578 500h-378v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-218l-276 -275l-120 120zM700 1200h-100v-200h100v200zM1300 538l-475 -476l-244 244l123 123l120 -120l353 352z" />
<glyph unicode="&#xe174;" d="M529 500h-329v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-269l-103 -103l-170 170zM700 1200h-100v-200h100v200zM1167 6l-170 170l-170 -170l-127 127l170 170l-170 170l127 127l170 -170l170 170l127 -128 l-170 -169l170 -170z" />
<glyph unicode="&#xe175;" d="M700 500h-500v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-300h-400v-200zM700 1000h-100v200h100v-200zM1000 600h-200v-300h-200l300 -300l300 300h-200v300z" />
<glyph unicode="&#xe176;" d="M602 500h-402v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-402l-200 200zM700 1000h-100v200h100v-200zM1000 300h200l-300 300l-300 -300h200v-300h200v300z" />
<glyph unicode="&#xe177;" d="M1200 900v150q0 21 -14.5 35.5t-35.5 14.5h-1100q-21 0 -35.5 -14.5t-14.5 -35.5v-150h1200zM0 800v-550q0 -21 14.5 -35.5t35.5 -14.5h1100q21 0 35.5 14.5t14.5 35.5v550h-1200zM100 500h400v-200h-400v200z" />
<glyph unicode="&#xe178;" d="M500 1000h400v198l300 -298l-300 -298v198h-400v200zM100 800v200h100v-200h-100zM400 800h-100v200h100v-200zM700 300h-400v-198l-300 298l300 298v-198h400v-200zM800 500h100v-200h-100v200zM1000 500v-200h100v200h-100z" />
<glyph unicode="&#xe179;" d="M1200 50v1106q0 31 -18 40.5t-44 -7.5l-276 -117q-25 -16 -43.5 -50.5t-18.5 -65.5v-359q0 -29 10.5 -55.5t25 -43t29 -28.5t25.5 -18l10 -5v-397q0 -21 14.5 -35.5t35.5 -14.5h200q21 0 35.5 14.5t14.5 35.5zM550 1200l50 -100v-400l-100 -203v-447q0 -21 -14.5 -35.5 t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v447l-100 203v400l50 100l50 -100v-300h100v300l50 100l50 -100v-300h100v300z" />
<glyph unicode="&#xe180;" d="M1100 106v888q0 22 25 34.5t50 13.5l25 2v56h-400v-56q75 0 87.5 -6t12.5 -44v-394h-500v394q0 38 12.5 44t87.5 6v56h-400v-56q4 0 11 -0.5t24 -3t30 -7t24 -15t11 -24.5v-888q0 -22 -25 -34.5t-50 -13.5l-25 -2v-56h400v56q-75 0 -87.5 6t-12.5 44v394h500v-394 q0 -38 -12.5 -44t-87.5 -6v-56h400v56q-4 0 -11 0.5t-24 3t-30 7t-24 15t-11 24.5z" />
<glyph unicode="&#xe181;" d="M675 1000l-100 100h-375l-100 -100h400l200 -200v-98l295 98h105v200h-425zM500 300v500q0 41 -29.5 70.5t-70.5 29.5h-300q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h300q41 0 70.5 29.5t29.5 70.5zM100 800h300v-200h-300v200zM700 565l400 133 v-163l-400 -133v163zM100 500h300v-200h-300v200zM805 300l295 98v-298h-425l-100 -100h-375l-100 100h400l200 200h105z" />
<glyph unicode="&#xe182;" d="M179 1169l-162 -162q-1 -11 -0.5 -32.5t16 -90t46.5 -140t104 -177.5t175 -208q103 -103 207.5 -176t180 -103.5t137 -47t92.5 -16.5l31 1l163 162q16 17 13 40.5t-22 37.5l-192 136q-19 14 -45 12t-42 -19l-119 -118q-143 103 -267 227q-126 126 -227 268l118 118 q17 17 20 41.5t-11 44.5l-139 194q-14 19 -36.5 22t-40.5 -14z" />
<glyph unicode="&#xe183;" d="M1200 712v200q-6 8 -19 20.5t-63 45t-112 57t-171 45t-235 20.5q-92 0 -175 -10.5t-141.5 -27t-108.5 -36.5t-81.5 -40t-53.5 -36.5t-31 -27.5l-9 -10v-200q0 -21 14.5 -33.5t34.5 -8.5l202 33q20 4 34.5 21t14.5 38v146q141 24 300 24t300 -24v-146q0 -21 14.5 -38 t34.5 -21l202 -33q20 -4 34.5 8.5t14.5 33.5zM800 650l365 -303q14 -14 24.5 -39.5t10.5 -45.5v-212q0 -21 -15 -35.5t-35 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5v212q0 20 10.5 45.5t24.5 39.5l365 303v50q0 4 1 10.5t12 22.5t30 28.5t60 23t97 10.5t97 -10t60 -23.5 t30 -27.5t12 -24l1 -10v-50z" />
<glyph unicode="&#xe184;" d="M175 200h950l-125 150v250l100 100v400h-100v-200h-100v200h-200v-200h-100v200h-200v-200h-100v200h-100v-400l100 -100v-250zM1200 100v-100h-1100v100h1100z" />
<glyph unicode="&#xe185;" d="M600 1100h100q41 0 70.5 -29.5t29.5 -70.5v-1000h-300v1000q0 41 29.5 70.5t70.5 29.5zM1000 800h100q41 0 70.5 -29.5t29.5 -70.5v-700h-300v700q0 41 29.5 70.5t70.5 29.5zM400 0v400q0 41 -29.5 70.5t-70.5 29.5h-100q-41 0 -70.5 -29.5t-29.5 -70.5v-400h300z" />
<glyph unicode="&#xe186;" d="M1200 800v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212zM1000 900h-900v-700h900v700zM200 800v-300h200v-100h-200v-100h300v300h-200v100h200v100h-300zM800 800h-200v-500h200v100h100v300h-100 v100zM800 700v-300h-100v300h100z" />
<glyph unicode="&#xe187;" d="M1200 800v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212zM1000 900h-900v-700h900v700zM400 600h-100v200h-100v-500h100v200h100v-200h100v500h-100v-200zM800 800h-200v-500h200v100h100v300h-100 v100zM800 700v-300h-100v300h100z" />
<glyph unicode="&#xe188;" d="M1200 800v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212zM1000 900h-900v-700h900v700zM200 800v-500h300v100h-200v300h200v100h-300zM600 800v-500h300v100h-200v300h200v100h-300z" />
<glyph unicode="&#xe189;" d="M1200 800v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212zM1000 900h-900v-700h900v700zM500 700l-300 -150l300 -150v300zM600 400l300 150l-300 150v-300z" />
<glyph unicode="&#xe190;" d="M1200 800v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212zM1000 900h-900v-700h900v700zM900 800v-500h-700v500h700zM300 400h130q41 0 68 42t27 107t-28.5 108t-66.5 43h-130v-300zM800 700h-130 q-38 0 -66.5 -43t-28.5 -108t27 -107t68 -42h130v300z" />
<glyph unicode="&#xe191;" d="M1200 800v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212zM1000 900h-900v-700h900v700zM200 800v-300h200v-100h-200v-100h300v300h-200v100h200v100h-300zM800 300h100v500h-200v-100h100v-400z M601 300h100v100h-100v-100z" />
<glyph unicode="&#xe192;" d="M1200 800v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212zM1000 900h-900v-700h900v700zM300 700v100h-100v-500h300v400h-200zM800 300h100v500h-200v-100h100v-400zM401 400h-100v200h100v-200z M601 300h100v100h-100v-100z" />
<glyph unicode="&#xe193;" d="M200 1100h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88zM1000 900h-900v-700h900v700zM400 700h-200v100h300v-300h-99v-100h-100v100h99v200zM800 700h-100v100h200v-500h-100v400zM201 400h100v-100 h-100v100zM701 300h-100v100h100v-100z" />
<glyph unicode="&#xe194;" d="M600 1196q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM600 1014q-171 0 -292.5 -121.5t-121.5 -292.5t121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5zM800 700h-300 v-200h300v-100h-300l-100 100v200l100 100h300v-100z" />
<glyph unicode="&#xe195;" d="M596 1196q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM596 1014q-171 0 -292.5 -121.5t-121.5 -292.5t121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5zM800 700v-100 h-100v100h-200v-100h200v-100h-200v-100h-100v400h300zM800 400h-100v100h100v-100z" />
<glyph unicode="&#xe197;" d="M800 300h128q120 0 205 86t85 208q0 120 -85 206.5t-205 86.5q-46 0 -90 -14q-44 97 -134.5 156.5t-200.5 59.5q-152 0 -260 -107.5t-108 -260.5q0 -25 2 -37q-66 -14 -108.5 -67.5t-42.5 -122.5q0 -80 56.5 -137t135.5 -57h222v300h400v-300zM700 200h200l-300 -300 l-300 300h200v300h200v-300z" />
<glyph unicode="&#xe198;" d="M600 714l403 -403q94 26 154.5 104t60.5 178q0 121 -85 207.5t-205 86.5q-46 0 -90 -14q-44 97 -134.5 156.5t-200.5 59.5q-152 0 -260 -107.5t-108 -260.5q0 -25 2 -37q-66 -14 -108.5 -67.5t-42.5 -122.5q0 -80 56.5 -137t135.5 -57h8zM700 -100h-200v300h-200l300 300 l300 -300h-200v-300z" />
<glyph unicode="&#xe199;" d="M700 200h400l-270 300h170l-270 300h170l-300 333l-300 -333h170l-270 -300h170l-270 -300h400v-155l-75 -45h350l-75 45v155z" />
<glyph unicode="&#xe200;" d="M700 45v306q46 -30 100 -30q74 0 126.5 52.5t52.5 126.5q0 24 -9 55q50 32 79.5 83t29.5 112q0 90 -61.5 155.5t-150.5 71.5q-26 89 -99.5 145.5t-167.5 56.5q-116 0 -197.5 -81.5t-81.5 -197.5q0 -4 1 -12t1 -11q-14 2 -23 2q-74 0 -126.5 -52.5t-52.5 -126.5 q0 -53 28.5 -97t75.5 -65q-4 -16 -4 -38q0 -74 52.5 -126.5t126.5 -52.5q56 0 100 30v-306l-75 -45h350z" />
<glyph unicode="&#x1f4bc;" d="M800 1000h300q41 0 70.5 -29.5t29.5 -70.5v-400h-500v100h-200v-100h-500v400q0 41 29.5 70.5t70.5 29.5h300v100q0 41 29.5 70.5t70.5 29.5h200q41 0 70.5 -29.5t29.5 -70.5v-100zM500 1000h200v100h-200v-100zM1200 400v-200q0 -41 -29.5 -70.5t-70.5 -29.5h-1000 q-41 0 -70.5 29.5t-29.5 70.5v200h1200z" />
<glyph unicode="&#x1f4c5;" d="M1100 900v150q0 21 -14.5 35.5t-35.5 14.5h-150v100h-100v-100h-500v100h-100v-100h-150q-21 0 -35.5 -14.5t-14.5 -35.5v-150h1100zM0 800v-750q0 -20 14.5 -35t35.5 -15h1000q21 0 35.5 15t14.5 35v750h-1100zM100 600h100v-100h-100v100zM300 600h100v-100h-100v100z M500 600h100v-100h-100v100zM700 600h100v-100h-100v100zM900 600h100v-100h-100v100zM100 400h100v-100h-100v100zM300 400h100v-100h-100v100zM500 400h100v-100h-100v100zM700 400h100v-100h-100v100zM900 400h100v-100h-100v100zM100 200h100v-100h-100v100zM300 200 h100v-100h-100v100zM500 200h100v-100h-100v100zM700 200h100v-100h-100v100zM900 200h100v-100h-100v100z" />
<glyph unicode="&#x1f4cc;" d="M902 1185l283 -282q15 -15 15 -36t-15 -35q-14 -15 -35 -15t-35 15l-36 35l-279 -267v-300l-212 210l-208 -207l-380 -303l303 380l207 208l-210 212h300l267 279l-35 36q-15 14 -15 35t15 35q14 15 35 15t35 -15z" />
<glyph unicode="&#x1f4ce;" d="M518 119l69 -60l517 511q67 67 95 157t11 183q-16 87 -67 154t-130 103q-69 33 -152 33q-107 0 -197 -55q-40 -24 -111 -95l-512 -512q-68 -68 -81 -163t35 -173q35 -57 94 -89t129 -32q63 0 119 28q33 16 65 40.5t52.5 45.5t59.5 64q40 44 57 61l394 394q35 35 47 84 t-3 96q-27 87 -117 104q-20 2 -29 2q-46 0 -79.5 -17t-67.5 -51l-388 -396l-7 -7l69 -67l377 373q20 22 39 38q23 23 50 23q38 0 53 -36q16 -39 -20 -75l-547 -547q-52 -52 -125 -52q-55 0 -100 33t-54 96q-5 35 2.5 66t31.5 63t42 50t56 54q24 21 44 41l348 348 q52 52 82.5 79.5t84 54t107.5 26.5q25 0 48 -4q95 -17 154 -94.5t51 -175.5q-7 -101 -98 -192l-252 -249l-253 -256z" />
<glyph unicode="&#x1f4f7;" d="M1200 200v600q0 41 -29.5 70.5t-70.5 29.5h-150q-4 8 -11.5 21.5t-33 48t-53 61t-69 48t-83.5 21.5h-200q-41 0 -82 -20.5t-70 -50t-52 -59t-34 -50.5l-12 -20h-150q-41 0 -70.5 -29.5t-29.5 -70.5v-600q0 -41 29.5 -70.5t70.5 -29.5h1000q41 0 70.5 29.5t29.5 70.5z M1000 700h-100v100h100v-100zM844 500q0 -100 -72 -172t-172 -72t-172 72t-72 172t72 172t172 72t172 -72t72 -172zM706 500q0 44 -31 75t-75 31t-75 -31t-31 -75t31 -75t75 -31t75 31t31 75z" />
<glyph unicode="&#x1f512;" d="M900 800h100q41 0 70.5 -29.5t29.5 -70.5v-600q0 -41 -29.5 -70.5t-70.5 -29.5h-900q-41 0 -70.5 29.5t-29.5 70.5v600q0 41 29.5 70.5t70.5 29.5h100v200q0 82 59 141t141 59h300q82 0 141 -59t59 -141v-200zM400 800h300v150q0 21 -14.5 35.5t-35.5 14.5h-200 q-21 0 -35.5 -14.5t-14.5 -35.5v-150z" />
<glyph unicode="&#x1f514;" d="M1062 400h17q20 0 33.5 -14.5t13.5 -35.5q0 -20 -13 -40t-31 -27q-22 -9 -63 -23t-167.5 -37t-251.5 -23t-245.5 20.5t-178.5 41.5l-58 20q-18 7 -31 27.5t-13 40.5q0 21 13.5 35.5t33.5 14.5h17l118 173l63 327q15 77 76 140t144 83l-18 32q-6 19 3 32t29 13h94 q20 0 29 -10.5t3 -29.5l-18 -37q83 -19 144 -82.5t76 -140.5l63 -327zM600 104q-54 0 -103 6q12 -49 40 -79.5t63 -30.5t63 30.5t39 79.5q-48 -6 -102 -6z" />
<glyph unicode="&#x1f516;" d="M200 0l450 444l450 -443v1150q0 20 -14.5 35t-35.5 15h-800q-21 0 -35.5 -15t-14.5 -35v-1151z" />
<glyph unicode="&#x1f525;" d="M400 755q2 -12 8 -41.5t8 -43t6 -39.5t3.5 -39.5t-1 -33.5t-6 -31.5t-13.5 -24t-21 -20.5t-31 -12q-38 -10 -67 13t-40.5 61.5t-15 81.5t10.5 75q-52 -46 -83.5 -101t-39 -107t-7.5 -85t5 -63q9 -56 44 -119.5t105 -108.5q31 -21 64 -16t62 23.5t57 49.5t48 61.5t35 60.5 q32 66 39 184.5t-13 157.5q79 -80 122 -164t26 -184q-5 -33 -20.5 -69.5t-37.5 -80.5q-10 -19 -14.5 -29t-12 -26t-9 -23.5t-3 -19t2.5 -15.5t11 -9.5t19.5 -5t30.5 2.5t42 8q57 20 91 34t87.5 44.5t87 64t65.5 88.5t47 122q38 172 -44.5 341.5t-246.5 278.5q22 -44 43 -129 q39 -159 -32 -154q-15 2 -33 9q-79 33 -120.5 100t-44 175.5t48.5 257.5q-13 -8 -34 -23.5t-72.5 -66.5t-88.5 -105.5t-60 -138t-8 -166.5z" />
<glyph unicode="&#x1f527;" d="M948 778l251 126q13 -175 -151 -267q-123 -70 -253 -23l-596 -596q-15 -16 -36.5 -16t-36.5 16l-111 110q-15 15 -15 36.5t15 37.5l600 599q-33 101 6 201.5t135 154.5q164 92 306 -9l-259 -138z" />
</font>
</defs></svg>

Before

Width:  |  Height:  |  Size: 62 KiB

BIN
rd_ui/app/google_login.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

View File

@@ -15,7 +15,9 @@
<link rel="stylesheet" href="/bower_components/pivottable/dist/pivot.css">
<link rel="stylesheet" href="/bower_components/cornelius/src/cornelius.css">
<link rel="stylesheet" href="/bower_components/select2/select2.css">
<link rel="stylesheet" href="/bower_components/angular-ui-select/dist/select.css">
<link rel="stylesheet" href="/bower_components/pace/themes/pace-theme-minimal.css">
<link rel="stylesheet" href="/bower_components/font-awesome/css/font-awesome.css">
<link rel="stylesheet" href="/styles/redash.css">
<!-- endbuild -->
</head>
@@ -65,6 +67,12 @@
</ul>
</li>
</ul>
<form class="navbar-form navbar-left" role="search" ng-submit="searchQueries()">
<div class="form-group">
<input type="text" ng-model="term" class="form-control" placeholder="Search queries...">
</div>
<button type="submit" class="btn btn-default"><span class="glyphicon glyphicon-search"></span></button>
</form>
<ul class="nav navbar-nav navbar-right">
<p class="navbar-text avatar" ng-show="currentUser.id" ng-cloak>
<img ng-src="{{currentUser.gravatar_url}}" class="img-circle" alt="{{currentUser.name}}"/>
@@ -110,6 +118,7 @@
<script src="/bower_components/mousetrap/plugins/global-bind/mousetrap-global-bind.js"></script>
<script src="/bower_components/select2/select2.js"></script>
<script src="/bower_components/angular-ui-select2/src/select2.js"></script>
<script src="/bower_components/angular-ui-select/dist/select.js"></script>
<script src="/bower_components/underscore.string/lib/underscore.string.js"></script>
<script src="/bower_components/marked/lib/marked.js"></script>
<script src="/scripts/ng_highchart.js"></script>
@@ -117,6 +126,10 @@
<script src="/scripts/ui-bootstrap-tpls-0.5.0.min.js"></script>
<script src="/bower_components/bucky/bucky.js"></script>
<script src="/bower_components/pace/pace.js"></script>
<script src="/bower_components/mustache/mustache.js"></script>
<script src="/bower_components/canvg/rgbcolor.js"></script>
<script src="/bower_components/canvg/StackBlur.js"></script>
<script src="/bower_components/canvg/canvg.js"></script>
<!-- endbuild -->
<!-- build:js({.tmp,app}) /scripts/scripts.js -->
@@ -133,6 +146,7 @@
<script src="/scripts/visualizations/base.js"></script>
<script src="/scripts/visualizations/chart.js"></script>
<script src="/scripts/visualizations/cohort.js"></script>
<script src="/scripts/visualizations/counter.js"></script>
<script src="/scripts/visualizations/table.js"></script>
<script src="/scripts/visualizations/pivot.js"></script>
<script src="/scripts/directives/directives.js"></script>

View File

@@ -35,6 +35,19 @@
<div class="row">
<div class="main">
{% if show_google_openid %}
<div class="row">
<a href="/oauth/google?next={{next}}"><img src="/google_login.png" class="login-button"/></a>
</div>
<div class="login-or">
<hr class="hr-or">
<span class="span-or">or</span>
</div>
{% endif %}
<form role="form" method="post" name="login">
<div class="form-group">
<label for="inputUsernameEmail">Username or email</label>
@@ -56,20 +69,7 @@
</button>
</form>
{% if show_google_openid %}
<div class="login-or">
<hr class="hr-or">
<span class="span-or">or</span>
</div>
<div class="row">
<div class="col-xs-6 col-sm-6 col-md-6">
<a href="/google_auth/login?next={{next}}" class="btn btn-lg btn-info btn-block">Google</a>
</div>
</div>
{% endif %}
</div>
</div>

View File

@@ -14,7 +14,8 @@ angular.module('redash', [
'ui.bootstrap',
'smartTable.table',
'ngResource',
'ngRoute'
'ngRoute',
'ui.select'
]).config(['$routeProvider', '$locationProvider', '$compileProvider', 'growlProvider',
function ($routeProvider, $locationProvider, $compileProvider, growlProvider) {
if (featureFlags.clientSideMetrics) {
@@ -55,6 +56,11 @@ angular.module('redash', [
}]
}
});
$routeProvider.when('/queries/search', {
templateUrl: '/views/queries_search_results.html',
controller: 'QuerySearchCtrl',
reloadOnSearch: true,
});
$routeProvider.when('/queries/:queryId', {
templateUrl: '/views/query.html',
controller: 'QueryViewCtrl',
@@ -84,6 +90,10 @@ angular.module('redash', [
templateUrl: '/views/index.html',
controller: 'IndexCtrl'
});
$routeProvider.when('/personal', {
templateUrl: '/views/personal.html',
controller: 'PersonalIndexCtrl'
});
$routeProvider.otherwise({
redirectTo: '/'
});

View File

@@ -16,16 +16,9 @@
$timeout(refresh, 59 * 1000);
};
$scope.flowerUrl = featureFlags.flowerUrl;
refresh();
}
var AdminWorkersCtrl = function ($scope, $sce) {
$scope.flowerUrl = $sce.trustAsResourceUrl(featureFlags.flowerUrl);
};
angular.module('redash.admin_controllers', [])
.controller('AdminStatusCtrl', ['$scope', 'Events', '$http', '$timeout', AdminStatusCtrl])
.controller('AdminWorkersCtrl', ['$scope', '$sce', AdminWorkersCtrl])
})();

View File

@@ -1,12 +1,71 @@
(function () {
var QuerySearchCtrl = function($scope, $location, $filter, Events, Query) {
$scope.$parent.pageTitle = "Queries Search";
$scope.gridConfig = {
isPaginationEnabled: true,
itemsByPage: 50,
maxSize: 8,
};
var dateFormatter = function (value) {
if (!value) return "-";
return value.format("DD/MM/YY HH:mm");
}
$scope.gridColumns = [
{
"label": "Name",
"map": "name",
"cellTemplateUrl": "/views/queries_query_name_cell.html"
},
{
'label': 'Created By',
'map': 'user.name'
},
{
'label': 'Created At',
'map': 'created_at',
'formatFunction': dateFormatter
},
{
'label': 'Update Schedule',
'map': 'ttl',
'formatFunction': function (value) {
return $filter('refreshRateHumanize')(value);
}
}
];
$scope.queries = [];
$scope.$parent.term = $location.search().q;
Query.search({q: $scope.term }, function(results) {
$scope.queries = _.map(results, function(query) {
query.created_at = moment(query.created_at);
return query;
});
});
$scope.search = function() {
if (!angular.isString($scope.term) || $scope.term.trim() == "") {
$scope.queries = [];
return;
}
$location.search({q: $scope.term});
};
Events.record(currentUser, "search", "query", "", {"term": $scope.term});
};
var QueriesCtrl = function ($scope, $http, $location, $filter, Query) {
$scope.$parent.pageTitle = "All Queries";
$scope.gridConfig = {
isPaginationEnabled: true,
itemsByPage: 50,
maxSize: 8,
isGlobalSearchActivated: true
}
isGlobalSearchActivated: true};
$scope.allQueries = [];
$scope.queries = [];
@@ -35,7 +94,7 @@
Query.query(function (queries) {
$scope.allQueries = _.map(queries, function (query) {
query.created_at = moment(query.created_at);
query.last_retrieved_at = moment(query.last_retrieved_at);
query.retrieved_at = moment(query.retrieved_at);
return query;
});
@@ -58,35 +117,17 @@
'formatFunction': dateFormatter
},
{
'label': 'Runtime (avg)',
'map': 'avg_runtime',
'formatFunction': function (value) {
return $filter('durationHumanize')(value);
}
},
{
'label': 'Runtime (min)',
'map': 'min_runtime',
'formatFunction': function (value) {
return $filter('durationHumanize')(value);
}
},
{
'label': 'Runtime (max)',
'map': 'max_runtime',
'label': 'Runtime',
'map': 'runtime',
'formatFunction': function (value) {
return $filter('durationHumanize')(value);
}
},
{
'label': 'Last Executed At',
'map': 'last_retrieved_at',
'map': 'retrieved_at',
'formatFunction': dateFormatter
},
{
'label': 'Times Executed',
'map': 'times_retrieved'
},
{
'label': 'Update Schedule',
'map': 'ttl',
@@ -95,6 +136,7 @@
}
}
]
$scope.tabs = [
{"name": "My Queries", "key": "my"},
{"key": "all", "name": "All Queries"},
@@ -110,7 +152,7 @@
});
}
var MainCtrl = function ($scope, Dashboard, notifications) {
var MainCtrl = function ($scope, $location, Dashboard, notifications) {
if (featureFlags.clientSideMetrics) {
$scope.$on('$locationChangeSuccess', function(event, newLocation, oldLocation) {
// This will be called once per actual page load.
@@ -133,7 +175,11 @@
$scope.otherDashboards = $scope.allDashboards['Other'] || [];
$scope.groupedDashboards = _.omit($scope.allDashboards, 'Other');
});
}
};
$scope.searchQueries = function() {
$location.path('/queries/search').search({q: $scope.term});
};
$scope.reloadDashboards();
@@ -146,7 +192,7 @@
$(window).click(function () {
notifications.getPermissions();
});
}
};
var IndexCtrl = function ($scope, Events, Dashboard) {
Events.record(currentUser, "view", "page", "homepage");
@@ -160,10 +206,29 @@
});
}
}
}
};
var PersonalIndexCtrl = function ($scope, Events, Dashboard, Query) {
Events.record(currentUser, "view", "page", "personal_homepage");
$scope.$parent.pageTitle = "Home";
$scope.recentQueries = Query.recent();
$scope.recentDashboards = Dashboard.recent();
$scope.archiveDashboard = function (dashboard) {
if (confirm('Are you sure you want to delete "' + dashboard.name + '" dashboard?')) {
Events.record(currentUser, "archive", "dashboard", dashboard.id);
dashboard.$delete(function () {
$scope.$parent.reloadDashboards();
});
}
}
};
angular.module('redash.controllers', [])
.controller('QueriesCtrl', ['$scope', '$http', '$location', '$filter', 'Query', QueriesCtrl])
.controller('IndexCtrl', ['$scope', 'Events', 'Dashboard', IndexCtrl])
.controller('MainCtrl', ['$scope', 'Dashboard', 'notifications', MainCtrl]);
.controller('PersonalIndexCtrl', ['$scope', 'Events', 'Dashboard', 'Query', PersonalIndexCtrl])
.controller('MainCtrl', ['$scope', '$location', 'Dashboard', 'notifications', MainCtrl])
.controller('QuerySearchCtrl', ['$scope', '$location', '$filter', 'Events', 'Query', QuerySearchCtrl]);
})();

View File

@@ -1,5 +1,5 @@
(function() {
var DashboardCtrl = function($scope, Events, Widget, $routeParams, $http, $timeout, $q, Dashboard) {
var DashboardCtrl = function($scope, Events, Widget, $routeParams, $location, $http, $timeout, $q, Dashboard) {
$scope.refreshEnabled = false;
$scope.refreshRate = 60;
@@ -15,8 +15,8 @@
return _.map(row, function (widget) {
var w = new Widget(widget);
if (w.visualization && dashboard.dashboard_filters_enabled) {
promises.push(w.getQuery().getQueryResultPromise());
if (w.visualization) {
promises.push(w.getQuery().getQueryResult().toPromise());
}
return w;
@@ -27,27 +27,35 @@
var filters = {};
_.each(queryResults, function(queryResult) {
var queryFilters = queryResult.getFilters();
_.each(queryFilters, function (filter) {
if (!_.has(filters, filter.name)) {
// TODO: first object should be a copy, otherwise one of the chart filters behaves different than the others.
_.each(queryFilters, function (queryFilter) {
var hasQueryStringValue = _.has($location.search(), queryFilter.name);
if (!(hasQueryStringValue || dashboard.dashboard_filters_enabled)) {
// If dashboard filters not enabled, or no query string value given, skip filters linking.
return;
}
if (!_.has(filters, queryFilter.name)) {
var filter = _.extend({}, queryFilter);
filters[filter.name] = filter;
filters[filter.name].originFilters = [];
if (hasQueryStringValue) {
filter.current = $location.search()[filter.name];
}
$scope.$watch(function () { return filter.current }, function (value) {
_.each(filter.originFilters, function (originFilter) {
originFilter.current = value;
});
});
};
}
// TODO: merge values.
filters[filter.name].originFilters.push(filter);
filters[queryFilter.name].originFilters.push(queryFilter);
});
});
if (dashboard.dashboard_filters_enabled) {
$scope.filters = _.values(filters);
}
$scope.filters = _.values(filters);
});
@@ -74,7 +82,7 @@
_.each(row, function(widget, i) {
var newWidget = newWidgets[widget.id];
if (newWidget && newWidget[0].visualization.query.latest_query_data_id != widget.visualization.query.latest_query_data_id) {
row[i] = newWidget[0];
row[i] = new Widget(newWidget[0]);
}
});
});
@@ -83,8 +91,8 @@
});
}, $scope.refreshRate);
};
}
}
};
$scope.triggerRefresh = function() {
$scope.refreshEnabled = !$scope.refreshEnabled;
@@ -103,7 +111,7 @@
};
};
var WidgetCtrl = function($scope, Events, Query) {
var WidgetCtrl = function($scope, $location, Events, Query) {
$scope.deleteWidget = function() {
if (!confirm('Are you sure you want to remove "' + $scope.widget.getName() + '" from the dashboard?')) {
return;
@@ -127,7 +135,9 @@
Events.record(currentUser, "view", "visualization", $scope.widget.visualization.id);
$scope.query = $scope.widget.getQuery();
$scope.queryResult = $scope.query.getQueryResult();
var parameters = Query.collectParamsFromQueryString($location, $scope.query);
var maxAge = $location.search()['maxAge'];
$scope.queryResult = $scope.query.getQueryResult(maxAge, parameters);
$scope.nextUpdateTime = moment(new Date(($scope.query.updated_at + $scope.query.ttl + $scope.query.runtime + 300) * 1000)).fromNow();
$scope.type = 'visualization';
@@ -137,7 +147,7 @@
};
angular.module('redash.controllers')
.controller('DashboardCtrl', ['$scope', 'Events', 'Widget', '$routeParams', '$http', '$timeout', '$q', 'Dashboard', DashboardCtrl])
.controller('WidgetCtrl', ['$scope', 'Events', 'Query', WidgetCtrl])
.controller('DashboardCtrl', ['$scope', 'Events', 'Widget', '$routeParams', '$location', '$http', '$timeout', '$q', 'Dashboard', DashboardCtrl])
.controller('WidgetCtrl', ['$scope', '$location', 'Events', 'Query', WidgetCtrl])
})();
})();

View File

@@ -14,17 +14,7 @@
var isNewQuery = !$scope.query.id,
queryText = $scope.query.query,
// ref to QueryViewCtrl.saveQuery
saveQuery = $scope.saveQuery,
shortcuts = {
'meta+s': function () {
if ($scope.canEdit) {
$scope.saveQuery();
}
},
'meta+enter': function () {
$scope.executeQuery();
}
};
saveQuery = $scope.saveQuery;
$scope.sourceMode = true;
$scope.canEdit = currentUser.canEdit($scope.query);
@@ -39,6 +29,22 @@
}
});
var shortcuts = {
'meta+s': function () {
if ($scope.canEdit) {
$scope.saveQuery();
}
},
'ctrl+s': function () {
if ($scope.canEdit) {
$scope.saveQuery();
}
},
// Cmd+Enter for Mac
'meta+enter': $scope.executeQuery,
// Ctrl+Enter for PC
'ctrl+enter': $scope.executeQuery
};
KeyboardShortcuts.bind(shortcuts);

View File

@@ -4,9 +4,18 @@
function QueryViewCtrl($scope, Events, $route, $location, notifications, growl, Query, DataSource) {
var DEFAULT_TAB = 'table';
var getQueryResult = function(ttl) {
// Collect params, and getQueryResult with params; getQueryResult merges it into the query
var parameters = Query.collectParamsFromQueryString($location, $scope.query);
if (ttl == undefined) {
ttl = $location.search()['maxAge'];
}
$scope.queryResult = $scope.query.getQueryResult(ttl, parameters);
}
$scope.query = $route.current.locals.query;
Events.record(currentUser, 'view', 'query', $scope.query.id);
$scope.queryResult = $scope.query.getQueryResult();
getQueryResult();
$scope.queryExecuting = false;
$scope.isQueryOwner = currentUser.id === $scope.query.user.id;
@@ -24,6 +33,10 @@
$scope.queryExecuting = lock;
};
$scope.showApiKey = function() {
alert("API Key for this query:\n" + $scope.query.api_key);
};
$scope.saveQuery = function(options, data) {
if (data) {
data.id = $scope.query.id;
@@ -57,7 +70,7 @@
};
$scope.executeQuery = function() {
$scope.queryResult = $scope.query.getQueryResult(0);
getQueryResult(0);
$scope.lockButton(true);
$scope.cancelling = false;
Events.record(currentUser, 'execute', 'query', $scope.query.id);
@@ -68,6 +81,31 @@
$scope.queryResult.cancelExecution();
Events.record(currentUser, 'cancel_execute', 'query', $scope.query.id);
};
$scope.archiveQuery = function(options, data) {
if (data) {
data.id = $scope.query.id;
} else {
data = $scope.query;
}
$scope.isDirty = false;
options = _.extend({}, {
successMessage: 'Query archived',
errorMessage: 'Query could not be archived'
}, options);
return Query.delete({id: data.id}, function() {
$scope.query.is_archived = true;
$scope.query.ttl = -1;
growl.addSuccessMessage(options.successMessage);
// This feels dirty.
$('#archive-confirmation-modal').modal('hide');
}, function(httpResponse) {
growl.addErrorMessage(options.errorMessage);
}).$promise;
}
$scope.updateDataSource = function() {
Events.record(currentUser, 'update_data_source', 'query', $scope.query.id);
@@ -121,6 +159,8 @@
$scope.query.queryResult = $scope.queryResult;
notifications.showNotification("re:dash", $scope.query.name + " updated.");
} else if (status == 'failed') {
notifications.showNotification("re:dash", $scope.query.name + " failed to run: " + $scope.queryResult.getError());
}
if (status === 'done' || status === 'failed') {

View File

@@ -147,22 +147,22 @@
var reset = function() {
$scope.saveInProgress = false;
$scope.widgetSize = 1;
$scope.queryId = null;
$scope.selectedVis = null;
$scope.query = null;
$scope.query = {};
$scope.selected_query = undefined;
$scope.text = "";
};
reset();
$scope.loadVisualizations = function () {
if (!$scope.queryId) {
if (!$scope.query.selected) {
return;
}
Query.get({ id: $scope.queryId }, function(query) {
Query.get({ id: $scope.query.selected.id }, function(query) {
if (query) {
$scope.query = query;
$scope.selected_query = query;
if (query.visualizations.length) {
$scope.selectedVis = query.visualizations[0];
}
@@ -170,6 +170,20 @@
});
};
$scope.searchQueries = function (term) {
if (!term || term.length < 3) {
return;
}
Query.search({q: term}, function(results) {
$scope.queries = results;
});
};
$scope.$watch('query', function () {
$scope.loadVisualizations();
}, true);
$scope.saveWidget = function() {
$scope.saveInProgress = true;

View File

@@ -97,14 +97,24 @@
value: '=',
ignoreBlanks: '=',
editable: '=',
done: '='
done: '=',
},
template: function (tElement, tAttrs) {
var elType = tAttrs.editor || 'input';
var placeholder = tAttrs.placeholder || 'Click to edit';
return '<span ng-click="editable && edit()" ng-bind="value" ng-class="{editable: editable}"></span>' +
'<span ng-click="editable && edit()" ng-show="editable && !value" ng-class="{editable: editable}">' + placeholder + '</span>' +
'<{elType} ng-model="value" class="rd-form-control"></{elType}>'.replace('{elType}', elType);
var viewMode = '';
if (tAttrs.markdown == "true") {
viewMode = '<span ng-click="editable && edit()" ng-bind-html="value|markdown" ng-class="{editable: editable}"></span>';
} else {
viewMode = '<span ng-click="editable && edit()" ng-bind="value" ng-class="{editable: editable}"></span>';
}
var placeholderSpan = '<span ng-click="editable && edit()" ng-show="editable && !value" ng-class="{editable: editable}">' + placeholder + '</span>';
var editor = '<{elType} ng-model="value" class="rd-form-control"></{elType}>'.replace('{elType}', elType);
return viewMode + placeholderSpan + editor;
},
link: function ($scope, element, attrs) {
// Let's get a reference to the input element, as we'll want to reference it.
@@ -224,4 +234,17 @@
'</span>'
}
});
// Used instead of autofocus attribute, which doesn't work in Angular as there is no real page load.
directives.directive('autofocus',
['$timeout', function ($timeout) {
return {
link: function (scope, element) {
$timeout(function () {
element[0].focus();
});
}
};
}]
);
})();

View File

@@ -98,13 +98,13 @@
</button>',
link: function($scope) {
$scope.formatQuery = function formatQuery() {
$scope.queryExecuting = true;
$scope.queryFormatting = true;
$http.post('/api/queries/format', {
'query': $scope.query.query
}).success(function (response) {
$scope.query.query = response;
}).finally(function () {
$scope.queryExecuting = false;
$scope.queryFormatting = false;
});
};
}

View File

@@ -70,6 +70,18 @@ angular.module('redash.filters', []).
.filter('markdown', ['$sce', function ($sce) {
return function (text) {
if (!text) {
return "";
}
return $sce.trustAsHtml(marked(text));
}
}]);
}])
.filter('trustAsHtml', ['$sce', function ($sce) {
return function (text) {
if (!text) {
return "";
}
return $sce.trustAsHtml(text);
}
}]);

View File

@@ -1,9 +1,20 @@
(function () {
'use strict';
var ColorPalette = {
'Blue':'#4572A7',
'Red':'#AA4643',
'Green': '#89A54E',
'Purple': '#80699B',
'Cyan': '#3D96AE',
'Orange': '#DB843D',
'Light Blue': '#92A8CD',
'Lilac': '#A47D7C',
'Light Green': '#B5CA92',
};
Highcharts.setOptions({
colors: ["#4572A7", "#AA4643", "#89A54E", "#80699B", "#3D96AE",
"#DB843D", "#92A8CD", "#A47D7C", "#B5CA92"]
colors: _.values(ColorPalette)
});
var defaultOptions = {
@@ -115,23 +126,51 @@
{
text: 'Show Total',
onclick: function () {
var hasTotalsAlready = _.some(this.series, function (s) {
var res = (s.name == 'Total');
//if 'Total' already exists - just make it visible
if (res) s.setVisible(true, false);
return res;
})
var data = {};
_.each(this.series, function (s) {
s.setVisible(false, false);
_.each(s.data, function (p) {
data[p.x] = data[p.x] || {'x': p.x, 'y': 0};
data[p.x].y = data[p.x].y + p.y;
});
if (s.name != 'Total') s.setVisible(false, false);
if (!hasTotalsAlready) {
_.each(s.data, function (p) {
data[p.x] = data[p.x] || {'x': p.x, 'y': 0};
data[p.x].y = data[p.x].y + p.y;
});
}
});
this.addSeries({
data: _.values(data),
type: 'line',
name: 'Total'
}, false)
if (!hasTotalsAlready) {
this.addSeries({
data: _.values(data),
type: 'line',
name: 'Total'
}, false)
}
this.redraw();
}
},
{
text: 'Save Image',
onclick: function () {
var canvas = document.createElement('canvas');
window.canvg(canvas, this.getSVG());
var href = canvas.toDataURL('image/png');
var a = document.createElement('a');
a.href = href;
var filenameSuffix = new Date().toISOString().replace(/:/g,'_').replace('Z', '');
if (this.title) {
filenameSuffix = this.title.text;
}
a.download = 'redash_charts_'+filenameSuffix+'.png';
document.body.appendChild(a);
a.click();
a.remove();
}
}
]
}
@@ -204,6 +243,7 @@
};
angular.module('highchart', [])
.constant('ColorPalette', ColorPalette)
.directive('chart', ['$timeout', function ($timeout) {
return {
restrict: 'E',
@@ -265,8 +305,28 @@
scope.chart.series[0].remove(false);
};
// We check either for true or undefined for backward compatibility.
var series = scope.series;
if (chartOptions['sortX'] === true || chartOptions['sortX'] === undefined) {
var seriesCopy = [];
_.each(series, function (s) {
// make a copy of series data, so we don't override original.
var fieldName = 'x';
if (s.data.length > 0 && _.has(s.data[0], 'name')) {
fieldName = 'name';
};
var sorted = _.extend({}, s, {data: _.sortBy(s.data, fieldName)});
seriesCopy.push(sorted);
});
series = seriesCopy;
}
if (!('xAxis' in chartOptions && 'type' in chartOptions['xAxis'])) {
if (scope.series.length > 0 && _.some(scope.series[0].data, function (p) {
if (series.length > 0 && _.some(series[0].data, function (p) {
return (angular.isString(p.x) || angular.isDefined(p.name));
})) {
chartOptions['xAxis'] = chartOptions['xAxis'] || {};
@@ -278,13 +338,13 @@
}
if (chartOptions['xAxis']['type'] == 'category' || chartOptions['series']['type']=='pie') {
if (!angular.isDefined(scope.series[0].data[0].name)) {
if (!angular.isDefined(series[0].data[0].name)) {
// We need to make sure that for each category, each series has a value.
var categories = _.union.apply(this, _.map(scope.series, function (s) {
var categories = _.union.apply(this, _.map(series, function (s) {
return _.pluck(s.data, 'x')
}));
_.each(scope.series, function (s) {
_.each(series, function (s) {
// TODO: move this logic to Query#getChartData
var yValues = _.groupBy(s.data, 'x');
@@ -295,11 +355,6 @@
}
});
if (categories.length == 1) {
newData = _.sortBy(newData, 'y').reverse();
}
;
s.data = newData;
});
}
@@ -307,7 +362,7 @@
scope.chart.counters.color = 0;
_.each(scope.series, function (s) {
_.each(series, function (s) {
// here we override the series with the visualization config
s = _.extend(s, chartOptions['series']);
@@ -338,4 +393,4 @@
};
}]);
})();
})();

View File

@@ -1,6 +1,12 @@
(function () {
var Dashboard = function($resource) {
var resource = $resource('/api/dashboards/:slug', {slug: '@slug'});
var resource = $resource('/api/dashboards/:slug', {slug: '@slug'}, {
recent: {
method: 'get',
isArray: true,
url: "/api/dashboards/recent"
}});
resource.prototype.canEdit = function() {
return currentUser.hasPermission('admin') || currentUser.canEdit(this);
}

View File

@@ -1,10 +1,9 @@
(function () {
var notifications = function (Events) {
var notificationService = {};
var lastNotification = null;
notificationService.isSupported = function () {
if (window.webkitNotifications) {
if ("Notification" in window) {
return true;
} else {
console.log("HTML5 notifications are not supported.");
@@ -17,8 +16,12 @@
return;
}
if (!window.webkitNotifications.checkPermission() == 0) { // 0 is PERMISSION_ALLOWED
window.webkitNotifications.requestPermission();
if (Notification.permission !== "granted") {
Notification.requestPermission(function (status) {
if (Notification.permission !== status) {
Notification.permission = status;
}
});
}
}
@@ -27,23 +30,16 @@
return;
}
if (document.webkitVisibilityState && document.webkitVisibilityState == 'visible') {
return;
}
if (lastNotification) {
lastNotification.cancel();
}
var notification = window.webkitNotifications.createNotification('', title, content);
lastNotification = notification;
//using the 'tag' to avoid showing duplicate notifications
var notification = new Notification(title, {'tag': title+content, 'body': content});
setTimeout(function(){
notification.close();
},3000);
notification.onclick = function () {
window.focus();
this.cancel();
Events.record(currentUser, 'click', 'notification');
};
notification.show()
}
return notificationService;

View File

@@ -22,6 +22,8 @@
} else if (_.isString(v) && v.match(/^\d{4}-\d{2}-\d{2}/)) {
row[k] = moment(v);
columnTypes[k] = 'date';
} else if (typeof(v) == 'object' && v !== null) {
row[k] = JSON.stringify(v);
}
}, this);
}, this);
@@ -212,10 +214,6 @@
}
});
_.each(series, function (series) {
series.data = _.sortBy(series.data, 'x');
});
return _.values(series);
};
@@ -375,7 +373,18 @@
};
var Query = function ($resource, QueryResult, DataSource) {
var Query = $resource('/api/queries/:id', {id: '@id'});
var Query = $resource('/api/queries/:id', {id: '@id'},
{
search: {
method: 'get',
isArray: true,
url: "/api/queries/search"
},
recent: {
method: 'get',
isArray: true,
url: "/api/queries/recent"
}});
Query.newQuery = function () {
return new Query({
@@ -386,15 +395,54 @@
});
};
Query.collectParamsFromQueryString = function($location, query) {
var parameterNames = query.getParameters();
var parameters = {};
var queryString = $location.search();
_.each(parameterNames, function(param, i) {
var qsName = "p_" + param;
if (qsName in queryString) {
parameters[param] = queryString[qsName];
}
});
return parameters;
};
Query.prototype.getSourceLink = function () {
return '/queries/' + this.id + '/source';
};
Query.prototype.getQueryResult = function (ttl) {
Query.prototype.getQueryResult = function (ttl, parameters) {
if (ttl == undefined) {
ttl = this.ttl;
}
var queryText = this.query;
var queryParameters = this.getParameters();
var paramsRequired = !_.isEmpty(queryParameters);
var missingParams = parameters === undefined ? queryParameters : _.difference(queryParameters, _.keys(parameters));
if (paramsRequired && missingParams.length > 0) {
var paramsWord = "parameter";
if (missingParams.length > 1) {
paramsWord = "parameters";
}
return new QueryResult({job: {error: "Missing values for " + missingParams.join(', ') + " "+paramsWord+".", status: 4}});
}
if (paramsRequired) {
queryText = Mustache.render(queryText, parameters);
// Need to clear latest results, to make sure we don't used results for different params.
this.latest_query_data = null;
this.latest_query_data_id = null;
}
if (this.latest_query_data && ttl != 0) {
if (!this.queryResult) {
this.queryResult = new QueryResult({'query_result': this.latest_query_data});
@@ -404,7 +452,7 @@
this.queryResult = QueryResult.getById(this.latest_query_data_id);
}
} else if (this.data_source_id) {
this.queryResult = QueryResult.get(this.data_source_id, this.query, ttl);
this.queryResult = QueryResult.get(this.data_source_id, queryText, ttl);
}
return this.queryResult;
@@ -412,6 +460,26 @@
Query.prototype.getQueryResultPromise = function() {
return this.getQueryResult().toPromise();
};
Query.prototype.getParameters = function() {
var parts = Mustache.parse(this.query);
var parameters = [];
var collectParams = function(parts) {
parameters = [];
_.each(parts, function(part) {
if (part[0] == 'name' || part[0] == '&') {
parameters.push(part[1]);
} else if (part[0] == '#') {
parameters = _.union(parameters, collectParams(part[4]));
}
});
return parameters;
};
parameters = collectParams(parts);
return parameters;
}
return Query;

View File

@@ -68,7 +68,6 @@
template: '<filters></filters>\n' + Visualization.renderVisualizationsTemplate,
replace: false,
link: function (scope) {
scope.select2Options = {
width: '50%'
};
@@ -103,7 +102,7 @@
if (filters) {
scope.filters = filters;
if (filters.length) {
if (filters.length && false) {
readURL();
// start watching for changes and update URL

View File

@@ -33,7 +33,7 @@
$scope.chartSeries = [];
$scope.chartOptions = {};
var reloadData = _.throttle(function(data) {
var reloadData = function(data) {
if (!data || ($scope.queryResult && $scope.queryResult.getData()) == null) {
$scope.chartSeries.splice(0, $scope.chartSeries.length);
} else {
@@ -41,6 +41,9 @@
_.each($scope.queryResult.getChartData($scope.options.columnMapping), function (s) {
var additional = {'stacking': 'normal'};
if ('globalSeriesType' in $scope.options) {
additional['type'] = $scope.options.globalSeriesType;
}
if ($scope.options.seriesOptions && $scope.options.seriesOptions[s.name]) {
additional = $scope.options.seriesOptions[s.name];
if (!additional.name || additional.name == "") {
@@ -49,8 +52,8 @@
}
$scope.chartSeries.push(_.extend(s, additional));
});
}
}, 500);
};
};
$scope.$watch('options', function (chartOptions) {
if (chartOptions) {
@@ -74,11 +77,13 @@
};
});
chartVisualization.directive('chartEditor', function () {
chartVisualization.directive('chartEditor', function (ColorPalette) {
return {
restrict: 'E',
templateUrl: '/views/visualizations/chart_editor.html',
link: function (scope, element, attrs) {
scope.palette = ColorPalette;
scope.seriesTypes = {
'Line': 'line',
'Column': 'column',
@@ -87,6 +92,8 @@
'Pie': 'pie'
};
scope.globalSeriesType = scope.visualization.options.globalSeriesType || 'column';
scope.stackingOptions = {
"None": "none",
"Normal": "normal",
@@ -120,6 +127,16 @@
var chartOptionsUnwatch = null,
columnsWatch = null;
scope.$watch('globalSeriesType', function(type, old) {
scope.visualization.options.globalSeriesType = type;
if (type && old && type !== old && scope.visualization.options.seriesOptions) {
_.each(scope.visualization.options.seriesOptions, function(sOptions) {
sOptions.type = type;
});
}
});
scope.$watch('visualization.type', function (visualizationType) {
if (visualizationType == 'CHART') {
if (scope.visualization.options.series.stacking === null) {
@@ -130,19 +147,25 @@
scope.stacking = scope.visualization.options.series.stacking;
}
if (scope.visualization.options.sortX === undefined) {
scope.visualization.options.sortX = true;
}
var refreshSeries = function() {
scope.series = _.map(scope.queryResult.getChartData(scope.visualization.options.columnMapping), function (s) { return s.name; });
// TODO: remove uneeded ones?
if (scope.visualization.options.seriesOptions == undefined) {
scope.visualization.options.seriesOptions = {};
scope.visualization.options.seriesOptions = {
type: scope.globalSeriesType
};
};
_.each(scope.series, function(s, i) {
if (scope.visualization.options.seriesOptions[s] == undefined) {
scope.visualization.options.seriesOptions[s] = {'type': 'column', 'yAxis': 0};
scope.visualization.options.seriesOptions[s] = {'type': scope.visualization.options.globalSeriesType, 'yAxis': 0};
}
scope.visualization.options.seriesOptions[s].zIndex = i;
scope.visualization.options.seriesOptions[s].zIndex = scope.visualization.options.seriesOptions[s].zIndex === undefined ? i : scope.visualization.options.seriesOptions[s].zIndex;
});
scope.zIndexes = _.range(scope.series.length);
@@ -230,4 +253,4 @@
}
}
});
}());
}());

View File

@@ -0,0 +1,61 @@
'use strict';
(function() {
var module = angular.module('redash.visualization');
module.config(['VisualizationProvider', function(VisualizationProvider) {
var renderTemplate =
'<counter-renderer ' +
'options="visualization.options" query-result="queryResult">' +
'</counter-renderer>';
var editTemplate = '<counter-editor></counter-editor>';
var defaultOptions = {};
VisualizationProvider.registerVisualization({
type: 'COUNTER',
name: 'Counter',
renderTemplate: renderTemplate,
editorTemplate: editTemplate,
defaultOptions: defaultOptions
});
}
]);
module.directive('counterRenderer', function() {
return {
restrict: 'E',
templateUrl: '/views/visualizations/counter.html',
link: function($scope, elm, attrs) {
$scope.visualization.options.rowNumber =
$scope.visualization.options.rowNumber || 0;
$scope.$watch('[queryResult && queryResult.getData(), visualization.options]',
function() {
var queryData = $scope.queryResult.getData();
if (queryData) {
var rowNumber = $scope.visualization.options.rowNumber || 0;
var counterColName = $scope.visualization.options.counterColName || 'counter';
var targetColName = $scope.visualization.options.targetColName || 'target';
$scope.counterValue = queryData[rowNumber][counterColName];
$scope.targetValue = queryData[rowNumber][targetColName];
if ($scope.targetValue) {
$scope.delta = $scope.counterValue - $scope.targetValue;
$scope.trendPositive = $scope.delta >= 0;
}
}
}, true);
}
}
});
module.directive('counterEditor', function() {
return {
restrict: 'E',
templateUrl: '/views/visualizations/counter_editor.html'
}
});
})();

View File

@@ -1,14 +1,15 @@
.main {
max-width: 320px;
margin: 0 auto;
margin-top:20px;
}
.login-or {
position: relative;
font-size: 18px;
color: #aaa;
margin-top: 10px;
margin-bottom: 10px;
margin-top: 20px;
margin-bottom: 20px;
padding-top: 10px;
padding-bottom: 10px;
}
@@ -31,7 +32,9 @@
margin-bottom: 0px !important;
}
/*h3 {*/
/*text-align: center;*/
/*line-height: 300%;*/
/*}*/
img.login-button {
width: 250px;
display: block;
margin-left: auto;
margin-right: auto;
}

View File

@@ -270,6 +270,35 @@ to add those CSS styles here. */
pivot-table-renderer > table, grid-renderer > div, visualization-renderer > div {
overflow: auto;
}
counter-renderer {
display: block;
text-align: center;
}
counter-renderer counter {
margin: 0 auto;
background: #f9f9f9;
padding: 15px 50px;
display: block;;
}
counter-renderer value,
counter-renderer counter-target {
font-size: 80px;
display: block;
}
counter-renderer counter-target {
color: #ccc;
}
counter-renderer counter.positive value {
color: #5cb85c;
}
counter-renderer counter.negative value {
color: #d9534f;
margin-right: 15px;
}
counter-renderer counter-name {
font-size: 40px;
display: block;
}
.rd-widget-textbox p {
margin-bottom: 0;

View File

@@ -24,10 +24,6 @@
<span class="badge">{{manager.outdated_queries_count}}</span>
Outdated Queries Count
</li>
<li class="list-group-item" ng-if="flowerUrl">
<a href="/admin/workers">Workers' Status</a>
</li>
</ul>
<ul class="list-group col-lg-4">
<li class="list-group-item active">Queues</li>

View File

@@ -1,3 +0,0 @@
<div class="container-fluid iframe-container">
<iframe src="{{flowerUrl}}" style="width:100%; height:100%; background-color:transparent;"></iframe>
</div>

View File

@@ -14,7 +14,7 @@
</button>
</span>
</h2>
<filters></filters>
<filters ng-if="dashboard.dashboard_filters_enabled"></filters>
</div>
<div class="container" id="dashboard">
@@ -29,7 +29,7 @@
<span ng-hide="currentUser.hasPermission('view_query')">{{query.name}}</span>
<query-link query="query" visualization="widget.visualization" ng-show="currentUser.hasPermission('view_query')"></query-link>
</p>
<div class="text-muted" ng-bind="query.description"></div>
<div class="text-muted" ng-bind-html="query.description | markdown"></div>
</h3>
</div>

View File

@@ -22,22 +22,22 @@
</div>
<div ng-show="isVisualization()">
<p>
<form class="form-inline" role="form" ng-submit="loadVisualizations()">
<div class="form-group">
<input class="form-control" placeholder="Query Id" ng-model="queryId">
</div>
<button type="submit" class="btn btn-primary" ng-disabled="!queryId">
Load visualizations
</button>
</form>
</p>
<div class="form-group">
<ui-select ng-model="query.selected" theme="bootstrap" reset-search-input="false">
<ui-select-match placeholder="Search a query by name">{{$select.selected.name}}</ui-select-match>
<ui-select-choices repeat="q in queries"
refresh="searchQueries($select.search)"
refresh-delay="0">
<div ng-bind-html="q.name | highlight: $select.search | trustAsHtml"></div>
</ui-select-choices>
</ui-select>
</div>
<div ng-show="query">
<div class="form-group">
<label for="">Choose Visualization</label>
<select ng-model="selectedVis" ng-options="vis as vis.name group by vis.type for vis in query.visualizations" class="form-control"></select>
</div>
<div ng-show="selected_query">
<div class="form-group">
<label for="">Choose Visualization</label>
<select ng-model="selectedVis" ng-options="vis as vis.name group by vis.type for vis in selected_query.visualizations" class="form-control"></select>
</div>
</div>
</div>

View File

@@ -0,0 +1,28 @@
<div class="container">
<div class="row">
<div class="list-group col-md-6">
<div class="list-group-item active">
Recent Dashboards
<button ng-show="currentUser.hasPermission('create_dashboard')" type="button" class="btn btn-sm btn-link" data-toggle="modal" href="#new_dashboard_dialog" tooltip="New Dashboard"><span class="glyphicon glyphicon-plus-sign"></span></button>
</div>
<div class="list-group-item" ng-repeat="dashboard in recentDashboards" >
<button type="button" class="close delete-button" aria-hidden="true" ng-show="dashboard.canEdit()" ng-click="archiveDashboard(dashboard)" tooltip="Delete Dashboard">&times;</button>
<a ng-href="/dashboard/{{dashboard.slug}}">{{dashboard.name}}</a>
</div>
</div>
<div class="list-group col-md-6">
<div class="list-group-item active">
Recent Queries
</div>
<a ng-href="/queries/{{query.id}}" class="list-group-item" ng-repeat="query in recentQueries">{{query.name}}</a>
</div>
</div>
<div ng-show="currentUser.hasPermission('admin')" class="row">
<div class="list-group">
<div class="list-group-item active">Admin</div>
<a href="/admin/status" class="list-group-item">Status</a>
</div>
</div>
</div>

View File

@@ -0,0 +1,19 @@
<div class="container">
<div class="row">
<p>
<form class="form-inline" role="form" ng-submit="search()">
<div class="form-group">
<input class="form-control" placeholder="Search..." ng-model="term" autofocus>
</div>
<button type="submit" class="btn btn-primary">
<span class="glyphicon glyphicon-search"></span>
</button>
</form>
</p>
<smart-table rows="queries" columns="gridColumns"
config="gridConfig"
class="table table-condensed table-hover"></smart-table>
</div>
</div>

View File

@@ -1,6 +1,7 @@
<div class="container">
<p class="alert alert-warning" ng-if="query.is_archived">This query is archived and can't be used in dashboards, and won't appear in search results.</p>
<alert-unsaved-changes ng-if="canEdit" is-dirty="isDirty"></alert-unsaved-changes>
<div class="row">
@@ -12,7 +13,14 @@
</h2>
<p>
<em>
<edit-in-place editable="isQueryOwner" done="saveDescription" editor="textarea" placeholder="No description" ignore-blanks='false' value="query.description"></edit-in-place>
<edit-in-place editable="isQueryOwner"
done="saveDescription"
editor="textarea"
placeholder="No description"
ignore-blanks='false'
value="query.description"
markdown="true">
</edit-in-place>
</em>
</p>
</div>
@@ -74,7 +82,7 @@
<!-- code editor -->
<div ng-show="sourceMode">
<p>
<query-editor query="query" lock="queryExecuting"></query-editor>
<query-editor query="query" lock="queryFormatting"></query-editor>
</p>
<hr>
</div>
@@ -126,6 +134,33 @@
<span class="glyphicon glyphicon-cloud-download"></span>
<span class="rd-hidden-xs">Download Dataset</span>
</a>
<a class="btn btn-warning btn-sm" ng-disabled="queryExecuting" data-toggle="modal" data-target="#archive-confirmation-modal"
ng-show="!query.is_archived && query.id != undefined && (isQueryOwner || currentUser.hasPermission('admin'))">
<i class="fa fa-archive" title="Archive Query"></i>
</a>
<button class="btn btn-default btn-sm" ng-show="query.id != undefined" ng-click="showApiKey()">
<i class="fa fa-key" title="Show API Key"></i>
</button>
<div class="modal fade" id="archive-confirmation-modal" tabindex="-1" role="dialog" aria-labelledby="archiveConfirmationModal" aria-hidden="true">
<div class="modal-dialog">
<div class="modal-content">
<div class="modal-header">
<h4 class="modal-title">Query Archive</h4>
</div>
<div class="modal-body">
Are you sure you want to archive this query? <br/>
All dashboard widgets created with its visualizations will be deleted.
</div>
<div class="modal-footer">
<button type="button" class="btn btn-default" data-dismiss="modal">No</button>
<button type="button" class="btn btn-primary" ng-click="archiveQuery()">Yes, archive.</button>
</div>
</div>
</div>
</div>
</p>
</div>

View File

@@ -18,6 +18,42 @@
class="form-control"></select>
</div>
</div>
<div class="form-group">
<label class="control-label col-sm-2">Series Type</label>
<div class="col-sm-10">
<select required ng-options="value as key for (key, value) in seriesTypes"
ng-model="globalSeriesType" class="form-control"></select>
</div>
</div>
<div class="form-group">
<label class="control-label col-sm-2">y Axis min</label>
<div class="col-sm-10">
<input name="yAxisMin" type="number" class="form-control"
ng-model="visualization.options.yAxis.min"
placeholder="Auto">
</div>
</div>
<div class="form-group">
<label class="control-label col-sm-2">y Axis max</label>
<div class="col-sm-10">
<input name="yAxisMin" type="number" class="form-control"
ng-model="visualization.options.yAxis.max"
placeholder="Auto">
</div>
</div>
<div class="form-group">
<label class="control-label col-sm-2">Sort X Values</label>
<div class="col-sm-10">
<input name="sortX" type="checkbox" class="form-control"
ng-model="visualization.options.sortX">
</div>
</div>
</div>
</div>
@@ -82,10 +118,17 @@
placeholder="{{seriesName}}">
</div>
</div>
<div class="form-group">
<label class="control-label col-sm-3">Color</label>
<div class="col-sm-9">
<select class="form-control" ng-model="visualization.options.seriesOptions[seriesName].color" ng-options="val as key for (key,val) in palette"></select>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</div>

View File

@@ -0,0 +1,5 @@
<counter ng-class="{'positive': targetValue && trendPositive, 'negative': targetValue && !trendPositive}">
<value>{{counterValue|number}}</value>
<counter-target ng-if="targetValue">({{targetValue|number}})</counter-target>
<counter-name>{{visualization.name}}</counter-name>
</counter>

View File

@@ -0,0 +1,20 @@
<div class="form-horizontal">
<div class="form-group">
<label class="col-lg-6">Row Number</label>
<div class="col-lg-6">
<input type="number" ng-model="visualization.options.rowNumber" class="form-control">
</div>
</div>
<div class="form-group">
<label class="col-lg-6">Counter Column Name</label>
<div class="col-lg-6">
<select ng-options="name for name in queryResult.columnNames" ng-model="visualization.options.counterColName" class="form-control"></select>
</div>
</div>
<div class="form-group">
<label class="col-lg-6">Target Column Name</label>
<div class="col-lg-6">
<select ng-options="name for name in queryResult.columnNames" ng-model="visualization.options.targetColName" class="form-control"></select>
</div>
</div>
</div>

View File

@@ -24,4 +24,4 @@
</div>
</form>
</div>
</div>

View File

@@ -2,7 +2,10 @@
"name": "rdUi",
"version": "0.1.0",
"dependencies": {
"angular": "1.2.7",
"angular": "1.2.18",
"angular-resource": "1.2.18",
"angular-route": "1.2.18",
"angular-growl": "0.4.0",
"json3": "3.2.4",
"jquery": "1.9.1",
"bootstrap": "3.0.0",
@@ -13,9 +16,6 @@
"angular-ui-codemirror": "0.0.5",
"highcharts": "3.0.10",
"underscore": "1.5.1",
"angular-resource": "1.2.15",
"angular-growl": "0.3.1",
"angular-route": "1.2.7",
"pivottable": "~1.1.1",
"cornelius": "https://github.com/restorando/cornelius.git",
"gridster": "0.2.0",
@@ -25,13 +25,17 @@
"underscore.string": "~2.3.3",
"marked": "~0.3.2",
"bucky": "~0.2.6",
"pace": "~0.5.1"
"pace": "~0.5.1",
"angular-ui-select": "0.8.2",
"font-awesome": "~4.2.0",
"mustache": "~1.0.0",
"canvg": "gabelerner/canvg"
},
"devDependencies": {
"angular-mocks": "~1.0.7",
"angular-scenario": "~1.0.7"
"angular-mocks": "1.2.18",
"angular-scenario": "1.2.18"
},
"resolutions": {
"angular": "1.2.7"
"angular": "1.2.18"
}
}

View File

@@ -47,6 +47,7 @@ module.exports = function(config) {
'app/bower_components/mousetrap/plugins/global-bind/mousetrap-global-bind.js',
'app/bower_components/select2/select2.js',
'app/bower_components/angular-ui-select2/src/select2.js',
'app/bower_components/angular-ui-select/dist/select.js',
'app/bower_components/underscore.string/lib/underscore.string.js',
'app/bower_components/marked/lib/marked.js',
'app/scripts/ng_highchart.js',
@@ -54,6 +55,7 @@ module.exports = function(config) {
'app/scripts/ui-bootstrap-tpls-0.5.0.min.js',
'app/bower_components/bucky/bucky.js',
'app/bower_components/pace/pace.js',
'app/bower_components/mustache/mustache.js',
'app/scripts/app.js',
'app/scripts/services/services.js',

View File

@@ -54,7 +54,7 @@ describe('VisualizationRenderer', function() {
});
describe('URL binding', function() {
/*describe('URL binding', function() {
beforeEach(inject(function($rootScope, $compile, $location) {
spyOn($location, 'search').andCallThrough();
@@ -85,5 +85,5 @@ describe('VisualizationRenderer', function() {
var searchFilters = angular.fromJson($location.search().filters);
expect(searchFilters[filters[0].friendlyName]).toEqual('newValue');
}));
});
});*/
});

View File

@@ -3,9 +3,10 @@ import urlparse
import redis
from statsd import StatsClient
from redash import settings, events
from redash import settings
from redash.query_runner import import_query_runners
__version__ = '0.4.0'
__version__ = '0.6.0'
def setup_logging():
@@ -14,8 +15,7 @@ def setup_logging():
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
logging.getLogger().setLevel(settings.LOG_LEVEL)
events.setup_logging(settings.EVENTS_LOG_PATH, settings.EVENTS_CONSOLE_OUTPUT)
logging.getLogger("passlib").setLevel("ERROR")
def create_redis_connection():
@@ -32,4 +32,6 @@ def create_redis_connection():
setup_logging()
redis_connection = create_redis_connection()
statsd_client = StatsClient(host=settings.STATSD_HOST, port=settings.STATSD_PORT, prefix=settings.STATSD_PREFIX)
statsd_client = StatsClient(host=settings.STATSD_HOST, port=settings.STATSD_PORT, prefix=settings.STATSD_PREFIX)
import_query_runners(settings.QUERY_RUNNERS)

View File

@@ -5,11 +5,9 @@ import time
import logging
from flask import request, make_response, redirect, url_for
from flask.ext.login import LoginManager, login_user, current_user
from flask.ext.googleauth import GoogleAuth, login
from werkzeug.contrib.fixers import ProxyFix
from flask.ext.login import LoginManager, login_user, current_user, logout_user
from redash import models, settings
from redash import models, settings, google_oauth
login_manager = LoginManager()
logger = logging.getLogger('authentication')
@@ -25,9 +23,38 @@ def sign(key, path, expires):
return h.hexdigest()
class HMACAuthentication(object):
@staticmethod
def api_key_authentication():
class Authentication(object):
def verify_authentication(self):
return False
def required(self, fn):
@functools.wraps(fn)
def decorated(*args, **kwargs):
if current_user.is_authenticated() or self.verify_authentication():
return fn(*args, **kwargs)
return make_response(redirect(url_for("login", next=request.url)))
return decorated
class ApiKeyAuthentication(Authentication):
def verify_authentication(self):
api_key = request.args.get('api_key')
query_id = request.view_args.get('query_id', None)
if query_id and api_key:
query = models.Query.get(models.Query.id == query_id)
if query.api_key and api_key == query.api_key:
login_user(models.ApiUser(query.api_key), remember=False)
return True
return False
class HMACAuthentication(Authentication):
def verify_authentication(self):
signature = request.args.get('signature')
expires = float(request.args.get('expires') or 0)
query_id = request.view_args.get('query_id', None)
@@ -43,62 +70,30 @@ class HMACAuthentication(object):
return False
def required(self, fn):
@functools.wraps(fn)
def decorated(*args, **kwargs):
if current_user.is_authenticated():
return fn(*args, **kwargs)
if self.api_key_authentication():
return fn(*args, **kwargs)
return make_response(redirect(url_for("login", next=request.url)))
return decorated
def validate_email(email):
if not settings.GOOGLE_APPS_DOMAIN:
return True
return email in settings.ALLOWED_EXTERNAL_USERS or email.endswith("@%s" % settings.GOOGLE_APPS_DOMAIN)
def create_and_login_user(app, user):
if not validate_email(user.email):
return
try:
user_object = models.User.get(models.User.email == user.email)
if user_object.name != user.name:
logger.debug("Updating user name (%r -> %r)", user_object.name, user.name)
user_object.name = user.name
user_object.save()
except models.User.DoesNotExist:
logger.debug("Creating user object (%r)", user.name)
user_object = models.User.create(name=user.name, email=user.email, groups = models.User.DEFAULT_GROUPS)
login_user(user_object, remember=True)
login.connect(create_and_login_user)
@login_manager.user_loader
def load_user(user_id):
# If the user was previously logged in as api user, the user_id will be the api key and will raise an exception as
# it can't be casted to int.
if isinstance(user_id, basestring) and not user_id.isdigit():
return None
return models.User.select().where(models.User.id == user_id).first()
def setup_authentication(app):
if settings.GOOGLE_OPENID_ENABLED:
openid_auth = GoogleAuth(app, url_prefix="/google_auth")
# If we don't have a list of external users, we can use Google's federated login, which limits
# the domain with which you can sign in.
if not settings.ALLOWED_EXTERNAL_USERS and settings.GOOGLE_APPS_DOMAIN:
openid_auth._OPENID_ENDPOINT = "https://www.google.com/a/%s/o8/ud?be=o8" % settings.GOOGLE_APPS_DOMAIN
login_manager.init_app(app)
login_manager.anonymous_user = models.AnonymousUser
app.wsgi_app = ProxyFix(app.wsgi_app)
app.secret_key = settings.COOKIE_SECRET
app.register_blueprint(google_oauth.blueprint)
if settings.AUTH_TYPE == 'hmac':
auth = HMACAuthentication()
elif settings.AUTH_TYPE == 'api_key':
auth = ApiKeyAuthentication()
else:
logger.warning("Unknown authentication type ({}). Using default (HMAC).".format(settings.AUTH_TYPE))
auth = HMACAuthentication()
return auth
return HMACAuthentication()

129
redash/cli/data_sources.py Normal file
View File

@@ -0,0 +1,129 @@
import json
import click
from flask.ext.script import Manager
from redash import models
from redash.query_runner import query_runners, validate_configuration
manager = Manager(help="Data sources management commands.")
@manager.command
def list():
"""List currently configured data sources"""
for i, ds in enumerate(models.DataSource.select()):
if i > 0:
print "-"*20
print "Id: {}\nName: {}\nType: {}\nOptions: {}".format(ds.id, ds.name, ds.type, ds.options)
def validate_data_source_type(type):
if type not in query_runners.keys():
print "Error: the type \"{}\" is not supported (supported types: {}).".format(type, ", ".join(query_runners.keys()))
exit()
def validate_data_source_options(type, options):
if not validate_configuration(type, options):
print "Error: invalid configuration."
exit()
@manager.command
def new(name=None, type=None, options=None):
"""Create new data source"""
if name is None:
name = click.prompt("Name")
if type is None:
print "Select type:"
for i, query_runner_name in enumerate(query_runners.keys()):
print "{}. {}".format(i+1, query_runner_name)
idx = 0
while idx < 1 or idx > len(query_runners.keys()):
idx = click.prompt("[{}-{}]".format(1, len(query_runners.keys())), type=int)
type = query_runners.keys()[idx-1]
else:
validate_data_source_type(type)
if options is None:
query_runner = query_runners[type]
schema = query_runner.configuration_schema()
types = {
'string': unicode,
'number': int,
'boolean': bool
}
options_obj = {}
for k, prop in schema['properties'].iteritems():
required = k in schema.get('required', [])
default_value = "<<DEFAULT_VALUE>>"
if required:
default_value = None
prompt = prop.get('title', k.capitalize())
if required:
prompt = "{} (required)".format(prompt)
else:
prompt = "{} (optional)".format(prompt)
value = click.prompt(prompt, default=default_value, type=types[prop['type']], show_default=False)
if value != default_value:
options_obj[k] = value
options = json.dumps(options_obj)
validate_data_source_options(type, options)
print "Creating {} data source ({}) with options:\n{}".format(type, name, options)
data_source = models.DataSource.create(name=name,
type=type,
options=options)
print "Id: {}".format(data_source.id)
@manager.command
def delete(name):
"""Deletes data source by name"""
try:
data_source = models.DataSource.get(models.DataSource.name==name)
print "Deleting data source: {} (id={})".format(name, data_source.id)
data_source.delete_instance()
except models.DataSource.DoesNotExist:
print "Couldn't find data source named: {}".format(name)
def update_attr(obj, attr, new_value):
if new_value is not None:
old_value = getattr(obj, attr)
print "Updating {}: {} -> {}".format(attr, old_value, new_value)
setattr(obj, attr, new_value)
@manager.option('name', default=None, help="name of data source to edit")
@manager.option('--name', dest='new_name', default=None, help="new name for the data source")
@manager.option('--options', dest='options', default=None, help="updated options for the data source")
@manager.option('--type', dest='type', default=None, help="new type for the data source")
def edit(name, new_name=None, options=None, type=None):
"""Edit data source settings (name, options, type)"""
try:
if type is not None:
validate_data_source_type(type)
data_source = models.DataSource.get(models.DataSource.name==name)
if options is not None:
validate_data_source_options(data_source.type, options)
update_attr(data_source, "name", new_name)
update_attr(data_source, "type", type)
update_attr(data_source, "options", options)
data_source.save()
except models.DataSource.DoesNotExist:
print "Couldn't find data source named: {}".format(name)

19
redash/cli/database.py Normal file
View File

@@ -0,0 +1,19 @@
from flask.ext.script import Manager
manager = Manager(help="Manages the database (create/drop tables).")
@manager.command
def create_tables():
"""Creates the database tables."""
from redash.models import create_db, init_db
create_db(True, False)
init_db()
@manager.command
def drop_tables():
"""Drop the database tables."""
from redash.models import create_db
create_db(False, True)

74
redash/cli/users.py Normal file
View File

@@ -0,0 +1,74 @@
from flask.ext.script import Manager, prompt_pass
from redash import models
manager = Manager(help="Users management commands.")
@manager.option('email', help="email address of the user to grant admin to")
def grant_admin(email):
try:
user = models.User.get_by_email(email)
user.groups.append('admin')
user.save()
print "User updated."
except models.User.DoesNotExist:
print "User [%s] not found." % email
@manager.option('email', help="User's email")
@manager.option('name', help="User's full name")
@manager.option('--admin', dest='is_admin', action="store_true", default=False, help="set user as admin")
@manager.option('--google', dest='google_auth', action="store_true", default=False, help="user uses Google Auth to login")
@manager.option('--password', dest='password', default=None, help="Password for users who don't use Google Auth (leave blank for prompt).")
@manager.option('--groups', dest='groups', default=models.User.DEFAULT_GROUPS, help="Comma seperated list of groups (leave blank for default).")
def create(email, name, groups, is_admin=False, google_auth=False, password=None):
print "Creating user (%s, %s)..." % (email, name)
print "Admin: %r" % is_admin
print "Login with Google Auth: %r\n" % google_auth
if isinstance(groups, basestring):
groups= groups.split(',')
groups.remove('') # in case it was empty string
if is_admin:
groups += ['admin']
user = models.User(email=email, name=name, groups=groups)
if not google_auth:
password = password or prompt_pass("Password")
user.hash_password(password)
try:
user.save()
except Exception, e:
print "Failed creating user: %s" % e.message
@manager.option('email', help="email address of user to delete")
def delete(email):
deleted_count = models.User.delete().where(models.User.email == email).execute()
print "Deleted %d users." % deleted_count
@manager.option('password', help="new password for the user")
@manager.option('email', help="email address of the user to change password for")
def password(email, password):
try:
user = models.User.get_by_email(email)
user.hash_password(password)
user.save()
print "User updated."
except models.User.DoesNotExist:
print "User [%s] not found." % email
@manager.command
def list():
"""List all users"""
for i, user in enumerate(models.User.select()):
if i > 0:
print "-"*20
print "Id: {}\nName: {}\nEmail: {}".format(user.id, user.name.encode('utf-8'), user.email)

View File

@@ -7,26 +7,23 @@ but this is only due to configuration issues and temporary.
import csv
import hashlib
import json
import numbers
import cStringIO
import datetime
import time
import logging
from flask import render_template, send_from_directory, make_response, request, jsonify, redirect, \
session, url_for
from flask.ext.restful import Resource, abort
from flask_login import current_user, login_user, logout_user
import sqlparse
import events
from permissions import require_permission
from redash import redis_connection, statsd_client, models, settings, utils, __version__
from redash.wsgi import app, auth, api
from redash.tasks import QueryTask, record_event
from redash.cache import headers as cache_headers
from redash.permissions import require_permission
from redash.query_runner import query_runners, validate_configuration
import logging
from tasks import QueryTask
from cache import headers as cache_headers
@app.route('/ping', methods=['GET'])
def ping():
@@ -38,6 +35,7 @@ def ping():
@app.route('/queries')
@app.route('/queries/<query_id>')
@app.route('/queries/<query_id>/<anything>')
@app.route('/personal')
@app.route('/')
@auth.required
def index(**kwargs):
@@ -54,8 +52,7 @@ def index(**kwargs):
}
features = {
'clientSideMetrics': settings.CLIENT_SIDE_METRICS,
'flowerUrl': settings.CELERY_FLOWER_URL
'clientSideMetrics': settings.CLIENT_SIDE_METRICS
}
return render_template("index.html", user=json.dumps(user), name=settings.NAME,
@@ -69,8 +66,7 @@ def login():
return redirect(request.args.get('next') or '/')
if not settings.PASSWORD_LOGIN_ENABLED:
blueprint = app.extensions['googleauth'].blueprint
return redirect(url_for("%s.login" % blueprint.name, next=request.args.get('next')))
return redirect(url_for("google_oauth.authorize", next=request.args.get('next')))
if request.method == 'POST':
user = models.User.select().where(models.User.email == request.form['username']).first()
@@ -84,7 +80,7 @@ def login():
analytics=settings.ANALYTICS,
next=request.args.get('next'),
username=request.form.get('username', ''),
show_google_openid=settings.GOOGLE_OPENID_ENABLED)
show_google_openid=settings.GOOGLE_OAUTH_ENABLED)
@app.route('/logout')
@@ -104,6 +100,7 @@ def status_api():
status['version'] = __version__
status['queries_count'] = models.Query.select().count()
status['query_results_count'] = models.QueryResult.select().count()
status['unused_query_results_count'] = models.QueryResult.unused().count()
status['dashboards_count'] = models.Dashboard.select().count()
status['widgets_count'] = models.Widget.select().count()
@@ -111,7 +108,6 @@ def status_api():
manager_status = redis_connection.hgetall('redash:status')
status['manager'] = manager_status
status['manager']['queue_size'] = redis_connection.llen('queries') + redis_connection.llen('scheduled_queries')
status['manager']['outdated_queries_count'] = models.Query.outdated_queries().count()
queues = {}
@@ -160,7 +156,7 @@ class EventAPI(BaseResource):
def post(self):
events_list = request.get_json(force=True)
for event in events_list:
events.record_event(event)
record_event.delay(event)
api.add_resource(EventAPI, '/api/events', endpoint='events')
@@ -177,14 +173,43 @@ class MetricsAPI(BaseResource):
api.add_resource(MetricsAPI, '/api/metrics/v1/send', endpoint='metrics')
class DataSourceTypeListAPI(BaseResource):
@require_permission("admin")
def get(self):
return [q.to_dict() for q in query_runners.values()]
api.add_resource(DataSourceTypeListAPI, '/api/data_sources/types', endpoint='data_source_types')
class DataSourceListAPI(BaseResource):
def get(self):
data_sources = [ds.to_dict() for ds in models.DataSource.all()]
return data_sources
@require_permission("admin")
def post(self):
req = request.get_json(True)
required_fields = ('options', 'name', 'type')
for f in required_fields:
if f not in req:
abort(400)
if not validate_configuration(req['type'], req['options']):
abort(400)
datasource = models.DataSource.create(name=req['name'], type=req['type'], options=req['options'])
return datasource.to_dict()
api.add_resource(DataSourceListAPI, '/api/data_sources', endpoint='data_sources')
class DashboardRecentAPI(BaseResource):
def get(self):
return [d.to_dict() for d in models.Dashboard.recent(current_user.id).limit(20)]
class DashboardListAPI(BaseResource):
def get(self):
dashboards = [d.to_dict() for d in
@@ -229,6 +254,7 @@ class DashboardAPI(BaseResource):
dashboard.save()
api.add_resource(DashboardListAPI, '/api/dashboards', endpoint='dashboards')
api.add_resource(DashboardRecentAPI, '/api/dashboards/recent', endpoint='recent_dashboards')
api.add_resource(DashboardAPI, '/api/dashboards/<dashboard_slug>', endpoint='dashboard')
@@ -268,19 +294,26 @@ class WidgetAPI(BaseResource):
@require_permission('edit_dashboard')
def delete(self, widget_id):
widget = models.Widget.get(models.Widget.id == widget_id)
# TODO: reposition existing ones
layout = json.loads(widget.dashboard.layout)
layout = map(lambda row: filter(lambda w: w != widget_id, row), layout)
layout = filter(lambda row: len(row) > 0, layout)
widget.dashboard.layout = json.dumps(layout)
widget.dashboard.save()
widget.delete_instance()
api.add_resource(WidgetListAPI, '/api/widgets', endpoint='widgets')
api.add_resource(WidgetAPI, '/api/widgets/<int:widget_id>', endpoint='widget')
class QuerySearchAPI(BaseResource):
@require_permission('view_query')
def get(self):
term = request.args.get('q', '')
return [q.to_dict() for q in models.Query.search(term)]
class QueryRecentAPI(BaseResource):
@require_permission('view_query')
def get(self):
return [q.to_dict() for q in models.Query.recent(current_user.id).limit(20)]
class QueryListAPI(BaseResource):
@require_permission('create_query')
def post(self):
@@ -305,6 +338,8 @@ class QueryListAPI(BaseResource):
class QueryAPI(BaseResource):
@require_permission('edit_query')
def post(self, query_id):
query = models.Query.get_by_id(query_id)
query_def = request.get_json(force=True)
for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'user']:
query_def.pop(field, None)
@@ -329,6 +364,20 @@ class QueryAPI(BaseResource):
else:
abort(404, message="Query not found.")
# TODO: move to resource of its own? (POST /queries/{id}/archive)
def delete(self, query_id):
q = models.Query.get(models.Query.id == query_id)
if q:
if q.user.id == self.current_user.id or self.current_user.has_permission('admin'):
q.archive()
else:
abort(403)
else:
abort(404, message="Query not found.")
api.add_resource(QuerySearchAPI, '/api/queries/search', endpoint='queries_search')
api.add_resource(QueryRecentAPI, '/api/queries/recent', endpoint='recent_queries')
api.add_resource(QueryListAPI, '/api/queries', endpoint='queries')
api.add_resource(QueryAPI, '/api/queries/<query_id>', endpoint='query')
@@ -339,7 +388,7 @@ class VisualizationListAPI(BaseResource):
kwargs = request.get_json(force=True)
kwargs['options'] = json.dumps(kwargs['options'])
kwargs['query'] = kwargs.pop('query_id')
vis = models.Visualization(**kwargs)
vis.save()
@@ -414,48 +463,66 @@ class QueryResultListAPI(BaseResource):
class QueryResultAPI(BaseResource):
@require_permission('view_query')
def get(self, query_result_id):
query_result = models.QueryResult.get_by_id(query_result_id)
if query_result:
data = json.dumps({'query_result': query_result.to_dict()}, cls=utils.JSONEncoder)
return make_response(data, 200, cache_headers)
else:
abort(404)
@staticmethod
def csv_response(query_result):
s = cStringIO.StringIO()
query_data = json.loads(query_result.data)
writer = csv.DictWriter(s, fieldnames=[col['name'] for col in query_data['columns']])
writer.writer = utils.UnicodeWriter(s)
writer.writeheader()
for row in query_data['rows']:
writer.writerow(row)
headers = {'Content-Type': "text/csv; charset=UTF-8"}
headers.update(cache_headers)
return make_response(s.getvalue(), 200, headers)
class CsvQueryResultsAPI(BaseResource):
@require_permission('view_query')
def get(self, query_id, query_result_id=None):
if not query_result_id:
def get(self, query_id=None, query_result_id=None, filetype='json'):
if query_result_id is None and query_id is not None:
query = models.Query.get(models.Query.id == query_id)
if query:
query_result_id = query._data['latest_query_data']
query_result = query_result_id and models.QueryResult.get_by_id(query_result_id)
if query_result_id:
query_result = models.QueryResult.get_by_id(query_result_id)
if query_result:
s = cStringIO.StringIO()
if isinstance(self.current_user, models.ApiUser):
event = {
'user_id': None,
'action': 'api_get',
'timestamp': int(time.time()),
'api_key': self.current_user.id,
'file_type': filetype
}
query_data = json.loads(query_result.data)
writer = csv.DictWriter(s, fieldnames=[col['name'] for col in query_data['columns']])
writer.writer = utils.UnicodeWriter(s)
writer.writeheader()
for row in query_data['rows']:
for k, v in row.iteritems():
if isinstance(v, numbers.Number) and (v > 1000 * 1000 * 1000 * 100):
row[k] = datetime.datetime.fromtimestamp(v/1000.0)
if query_id:
event['object_type'] = 'query'
event['object_id'] = query_id
else:
event['object_type'] = 'query_result'
event['object_id'] = query_result_id
writer.writerow(row)
record_event.delay(event)
if filetype == 'json':
data = json.dumps({'query_result': query_result.to_dict()}, cls=utils.JSONEncoder)
return make_response(data, 200, cache_headers)
else:
return self.csv_response(query_result)
return make_response(s.getvalue(), 200, {'Content-Type': "text/csv; charset=UTF-8"})
else:
abort(404)
api.add_resource(CsvQueryResultsAPI, '/api/queries/<query_id>/results/<query_result_id>.csv',
'/api/queries/<query_id>/results.csv',
endpoint='csv_query_results')
api.add_resource(QueryResultListAPI, '/api/query_results', endpoint='query_results')
api.add_resource(QueryResultAPI, '/api/query_results/<query_result_id>', endpoint='query_result')
api.add_resource(QueryResultAPI,
'/api/query_results/<query_result_id>',
'/api/queries/<query_id>/results.<filetype>',
'/api/queries/<query_id>/results/<query_result_id>.<filetype>',
endpoint='query_result')
class JobAPI(BaseResource):

View File

@@ -1,30 +0,0 @@
import json
def get_query_runner(connection_type, connection_string):
if connection_type == 'mysql':
from redash.data import query_runner_mysql
runner = query_runner_mysql.mysql(connection_string)
elif connection_type == 'graphite':
from redash.data import query_runner_graphite
connection_params = json.loads(connection_string)
if connection_params['auth']:
connection_params['auth'] = tuple(connection_params['auth'])
else:
connection_params['auth'] = None
runner = query_runner_graphite.graphite(connection_params)
elif connection_type == 'bigquery':
from redash.data import query_runner_bigquery
connection_params = json.loads(connection_string)
runner = query_runner_bigquery.bigquery(connection_params)
elif connection_type == 'script':
from redash.data import query_runner_script
runner = query_runner_script.script(connection_string)
elif connection_type == 'url':
from redash.data import query_runner_url
runner = query_runner_url.url(connection_string)
else:
from redash.data import query_runner_pg
runner = query_runner_pg.pg(connection_string)
return runner

View File

@@ -1,46 +0,0 @@
"""
QueryRunner for Graphite.
"""
import json
import datetime
import requests
from redash.utils import JSONEncoder
def graphite(connection_params):
def transform_result(response):
columns = [{'name': 'Time::x'}, {'name': 'value::y'}, {'name': 'name::series'}]
rows = []
for series in response.json():
for values in series['datapoints']:
timestamp = datetime.datetime.fromtimestamp(int(values[1]))
rows.append({'Time::x': timestamp, 'name::series': series['target'], 'value::y': values[0]})
data = {'columns': columns, 'rows': rows}
return json.dumps(data, cls=JSONEncoder)
def query_runner(query):
base_url = "%s/render?format=json&" % connection_params['url']
url = "%s%s" % (base_url, "&".join(query.split("\n")))
error = None
data = None
try:
response = requests.get(url, auth=connection_params['auth'],
verify=connection_params['verify'])
if response.status_code == 200:
data = transform_result(response)
else:
error = "Failed getting results (%d)" % response.status_code
except Exception, ex:
data = None
error = ex.message
return data, error
query_runner.annotate_query = False
return query_runner

View File

@@ -1,64 +0,0 @@
"""
QueryRunner is the function that the workers use, to execute queries. This is the Redshift
(PostgreSQL in fact) version, but easily we can write another to support additional databases
(MySQL and others).
Because the worker just pass the query, this can be used with any data store that has some sort of
query language (for example: HiveQL).
"""
import logging
import json
import MySQLdb
import sys
from redash.utils import JSONEncoder
def mysql(connection_string):
if connection_string.endswith(';'):
connection_string = connection_string[0:-1]
def query_runner(query):
connections_params = [entry.split('=')[1] for entry in connection_string.split(';')]
connection = MySQLdb.connect(*connections_params)
cursor = connection.cursor()
logging.debug("mysql got query: %s", query)
try:
cursor.execute(query)
data = cursor.fetchall()
cursor_desc = cursor.description
if (cursor_desc != None):
num_fields = len(cursor_desc)
column_names = [i[0] for i in cursor.description]
rows = [dict(zip(column_names, row)) for row in data]
columns = [{'name': col_name,
'friendly_name': col_name,
'type': None} for col_name in column_names]
data = {'columns': columns, 'rows': rows}
json_data = json.dumps(data, cls=JSONEncoder)
error = None
else:
json_data = None
error = "No data was returned."
cursor.close()
except MySQLdb.Error, e:
json_data = None
error = e.args[1]
except KeyboardInterrupt:
error = "Query cancelled by user."
json_data = None
except Exception as e:
raise sys.exc_info()[1], None, sys.exc_info()[2]
finally:
connection.close()
return json_data, error
return query_runner

View File

@@ -1,110 +0,0 @@
"""
QueryRunner is the function that the workers use, to execute queries. This is the PostgreSQL
version, but easily we can write another to support additional databases (MySQL and others).
Because the worker just pass the query, this can be used with any data store that has some sort of
query language (for example: HiveQL).
"""
import json
import sys
import select
import logging
import psycopg2
from redash.utils import JSONEncoder
types_map = {
20: 'integer',
21: 'integer',
23: 'integer',
700: 'float',
1700: 'float',
701: 'float',
16: 'boolean',
1082: 'date',
1114: 'datetime',
1184: 'datetime',
1014: 'string',
1015: 'string',
1008: 'string',
1009: 'string',
2951: 'string'
}
def pg(connection_string):
def column_friendly_name(column_name):
return column_name
def wait(conn):
while 1:
try:
state = conn.poll()
if state == psycopg2.extensions.POLL_OK:
break
elif state == psycopg2.extensions.POLL_WRITE:
select.select([], [conn.fileno()], [])
elif state == psycopg2.extensions.POLL_READ:
select.select([conn.fileno()], [], [])
else:
raise psycopg2.OperationalError("poll() returned %s" % state)
except select.error:
raise psycopg2.OperationalError("select.error received")
def query_runner(query):
connection = psycopg2.connect(connection_string, async=True)
wait(connection)
cursor = connection.cursor()
try:
cursor.execute(query)
wait(connection)
# While set would be more efficient here, it sorts the data which is not what we want, but due to the small
# size of the data we can assume it's ok.
column_names = []
columns = []
duplicates_counter = 1
for column in cursor.description:
# TODO: this deduplication needs to be generalized and reused in all query runners.
column_name = column.name
if column_name in column_names:
column_name = column_name + str(duplicates_counter)
duplicates_counter += 1
column_names.append(column_name)
columns.append({
'name': column_name,
'friendly_name': column_friendly_name(column_name),
'type': types_map.get(column.type_code, None)
})
rows = [dict(zip(column_names, row)) for row in cursor]
data = {'columns': columns, 'rows': rows}
json_data = json.dumps(data, cls=JSONEncoder)
error = None
cursor.close()
except (select.error, OSError) as e:
logging.exception(e)
error = "Query interrupted. Please retry."
json_data = None
except psycopg2.DatabaseError as e:
logging.exception(e)
json_data = None
error = e.message
except KeyboardInterrupt:
connection.cancel()
error = "Query cancelled by user."
json_data = None
except Exception as e:
raise sys.exc_info()[1], None, sys.exc_info()[2]
finally:
connection.close()
return json_data, error
return query_runner

View File

@@ -1,48 +0,0 @@
import json
import logging
import sys
import os
import subprocess
# We use subprocess.check_output because we are lazy.
# If someone will really want to run this on Python < 2.7 they can easily update the code to run
# Popen, check the retcodes and other things and read the standard output to a variable.
if not "check_output" in subprocess.__dict__:
print "ERROR: This runner uses subprocess.check_output function which exists in Python 2.7"
def script(connection_string):
def query_runner(query):
try:
json_data = None
error = None
# Poor man's protection against running scripts from output the scripts directory
if connection_string.find("../") > -1:
return None, "Scripts can only be run from the configured scripts directory"
query = query.strip()
script = os.path.join(connection_string, query)
if not os.path.exists(script):
return None, "Script '%s' not found in script directory" % query
output = subprocess.check_output(script, shell=False)
if output != None:
output = output.strip()
if output != "":
return output, None
error = "Error reading output"
except subprocess.CalledProcessError as e:
return None, str(e)
except KeyboardInterrupt:
error = "Query cancelled by user."
json_data = None
except Exception as e:
raise sys.exc_info()[1], None, sys.exc_info()[2]
return json_data, error
query_runner.annotate_query = False
return query_runner

View File

@@ -1,23 +0,0 @@
import logging
import json
logger = logging.getLogger("redash.events")
logger.propagate = False
def setup_logging(log_path, console_output=False):
if log_path:
fh = logging.FileHandler(log_path)
formatter = logging.Formatter('%(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
if console_output:
handler = logging.StreamHandler()
formatter = logging.Formatter('[%(name)s] %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
def record_event(event):
logger.info(json.dumps(event))

81
redash/google_oauth.py Normal file
View File

@@ -0,0 +1,81 @@
import logging
from flask.ext.login import login_user
import requests
from flask import redirect, url_for, Blueprint
from flask_oauth import OAuth
from redash import models, settings
logger = logging.getLogger('google_oauth')
oauth = OAuth()
request_token_params = {'scope': 'https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/userinfo.profile', 'response_type': 'code'}
if settings.GOOGLE_APPS_DOMAIN:
request_token_params['hd'] = settings.GOOGLE_APPS_DOMAIN
else:
logger.warning("No Google Apps domain defined, all Google accounts allowed.")
google = oauth.remote_app('google',
base_url='https://www.google.com/accounts/',
authorize_url='https://accounts.google.com/o/oauth2/auth',
request_token_url=None,
request_token_params=request_token_params,
access_token_url='https://accounts.google.com/o/oauth2/token',
access_token_method='POST',
access_token_params={'grant_type': 'authorization_code'},
consumer_key=settings.GOOGLE_CLIENT_ID,
consumer_secret=settings.GOOGLE_CLIENT_SECRET)
blueprint = Blueprint('google_oauth', __name__)
def get_user_profile(access_token):
headers = {'Authorization': 'OAuth '+access_token}
response = requests.get('https://www.googleapis.com/oauth2/v1/userinfo', headers=headers)
if response.status_code == 401:
logger.warning("Failed getting user profile (response code 401).")
return None
return response.json()
def create_and_login_user(name, email):
try:
user_object = models.User.get(models.User.email == email)
if user_object.name != name:
logger.debug("Updating user name (%r -> %r)", user_object.name, name)
user_object.name = name
user_object.save()
except models.User.DoesNotExist:
logger.debug("Creating user object (%r)", name)
user_object = models.User.create(name=name, email=email, groups=models.User.DEFAULT_GROUPS)
login_user(user_object, remember=True)
@blueprint.route('/oauth/google', endpoint="authorize")
def login():
# TODO, suport next
callback=url_for('.callback', _external=True)
logger.debug("Callback url: %s", callback)
return google.authorize(callback=callback)
@blueprint.route('/oauth/google_callback', endpoint="callback")
@google.authorized_handler
def authorized(resp):
access_token = resp['access_token']
if access_token is None:
logger.warning("Access token missing in call back request.")
return redirect(url_for('login'))
profile = get_user_profile(access_token)
if profile is None:
return redirect(url_for('login'))
create_and_login_user(profile['name'], profile['email'])
return redirect(url_for('index'))

View File

@@ -1,8 +1,11 @@
import contextlib
import json
import logging
import os
from redash import models
from flask.ext.script import Manager
logger = logging.getLogger()
class Importer(object):
def __init__(self, object_mapping=None, data_source=None):
@@ -22,22 +25,17 @@ class Importer(object):
return query_result
def import_query(self, user, query):
query_result = self.import_query_result(query['latest_query_data'])
new_query = self._get_or_create(models.Query, query['id'], name=query['name'],
user=user,
ttl=-1,
query=query['query'],
query_hash=query['query_hash'],
description=query['description'],
latest_query_data=query_result,
data_source=self.data_source)
return new_query
def import_visualization(self, user, visualization):
query = self.import_query(user, visualization['query'])
@@ -50,9 +48,13 @@ class Importer(object):
return new_visualization
def import_widget(self, dashboard, widget):
visualization = self.import_visualization(dashboard.user, widget['visualization'])
if 'visualization' in widget:
visualization = self.import_visualization(dashboard.user, widget['visualization'])
else:
visualization = None
new_widget = self._get_or_create(models.Widget, widget['id'],
text=widget.get('text', None),
dashboard=dashboard,
width=widget['width'],
options=json.dumps(widget['options']),
@@ -91,6 +93,7 @@ class Importer(object):
def _get_or_create(self, object_type, external_id, **properties):
internal_id = self._get_mapping(object_type, external_id)
logger.info("Creating %s with external id: %s and internal id: %s", object_type, external_id, internal_id)
if internal_id:
update = object_type.update(**properties).where(object_type.id == internal_id)
update.execute()
@@ -114,11 +117,21 @@ export_manager = Manager(help="export utilities")
@contextlib.contextmanager
def importer_with_mapping_file(mapping_filename):
def importer_with_mapping_file(mapping_filename, data_source_id=None):
# Touch file in case it doesn't exists
if not os.path.isfile(mapping_filename):
with open(mapping_filename, 'w') as f:
f.write("{}")
with open(mapping_filename) as f:
mapping = json.loads(f.read())
importer = Importer(object_mapping=mapping, data_source=get_data_source())
if data_source_id is not None:
data_source = models.DataSource.get_by_id(data_source_id)
else:
data_source = get_data_source()
importer = Importer(object_mapping=mapping, data_source=data_source)
yield importer
with open(mapping_filename, 'w') as f:
@@ -146,12 +159,13 @@ def query(mapping_filename, query_filename, user_id):
@import_manager.command
def dashboard(mapping_filename, dashboard_filename, user_id):
def dashboard(mapping_filename, dashboard_filename, user_id, data_source_id=None):
user = models.User.get_by_id(user_id)
with open(dashboard_filename) as f:
dashboard = json.loads(f.read())
with importer_with_mapping_file(mapping_filename) as importer:
with importer_with_mapping_file(mapping_filename, data_source_id) as importer:
importer.import_dashboard(user, dashboard)

View File

@@ -9,7 +9,7 @@ import itertools
import peewee
from passlib.apps import custom_app_context as pwd_context
from playhouse.postgres_ext import ArrayField
from playhouse.postgres_ext import ArrayField, DateTimeTZField, PostgresqlExtDatabase
from flask.ext.login import UserMixin, AnonymousUserMixin
from redash import utils, settings
@@ -18,8 +18,9 @@ from redash import utils, settings
class Database(object):
def __init__(self):
self.database_config = dict(settings.DATABASE_CONFIG)
self.database_config['register_hstore'] = False
self.database_name = self.database_config.pop('name')
self.database = peewee.PostgresqlDatabase(self.database_name, **self.database_config)
self.database = PostgresqlExtDatabase(self.database_name, **self.database_config)
self.app = None
self.pid = os.getpid()
@@ -60,16 +61,32 @@ class BaseModel(peewee.Model):
return cls.get(cls.id == model_id)
class AnonymousUser(AnonymousUserMixin):
class PermissionsCheckMixin(object):
def has_permission(self, permission):
return self.has_permissions((permission,))
def has_permissions(self, permissions):
has_permissions = reduce(lambda a, b: a and b,
map(lambda permission: permission in self.permissions,
permissions),
True)
return has_permissions
class AnonymousUser(AnonymousUserMixin, PermissionsCheckMixin):
@property
def permissions(self):
return []
class ApiUser(UserMixin):
class ApiUser(UserMixin, PermissionsCheckMixin):
def __init__(self, api_key):
self.id = api_key
def __repr__(self):
return u"<ApiUser: {}>".format(self.id)
@property
def permissions(self):
return ['view_query']
@@ -83,7 +100,7 @@ class Group(BaseModel):
name = peewee.CharField(max_length=100)
permissions = ArrayField(peewee.CharField, default=DEFAULT_PERMISSIONS)
tables = ArrayField(peewee.CharField)
created_at = peewee.DateTimeField(default=datetime.datetime.now)
created_at = DateTimeTZField(default=datetime.datetime.now)
class Meta:
db_table = 'groups'
@@ -101,7 +118,7 @@ class Group(BaseModel):
return unicode(self.id)
class User(BaseModel, UserMixin):
class User(BaseModel, UserMixin, PermissionsCheckMixin):
DEFAULT_GROUPS = ['default']
id = peewee.PrimaryKeyField()
@@ -160,7 +177,7 @@ class ActivityLog(BaseModel):
user = peewee.ForeignKeyField(User)
type = peewee.IntegerField()
activity = peewee.TextField()
created_at = peewee.DateTimeField(default=datetime.datetime.now)
created_at = DateTimeTZField(default=datetime.datetime.now)
class Meta:
db_table = 'activity_log'
@@ -185,7 +202,7 @@ class DataSource(BaseModel):
options = peewee.TextField()
queue_name = peewee.CharField(default="queries")
scheduled_queue_name = peewee.CharField(default="queries")
created_at = peewee.DateTimeField(default=datetime.datetime.now)
created_at = DateTimeTZField(default=datetime.datetime.now)
class Meta:
db_table = 'data_sources'
@@ -209,7 +226,7 @@ class QueryResult(BaseModel):
query = peewee.TextField()
data = peewee.TextField()
runtime = peewee.FloatField()
retrieved_at = peewee.DateTimeField()
retrieved_at = DateTimeTZField()
class Meta:
db_table = 'query_results'
@@ -225,6 +242,15 @@ class QueryResult(BaseModel):
'retrieved_at': self.retrieved_at
}
@classmethod
def unused(cls):
week_ago = datetime.datetime.now() - datetime.timedelta(days=7)
unused_results = cls.select().where(Query.id == None, cls.retrieved_at < week_ago)\
.join(Query, join_type=peewee.JOIN_LEFT_OUTER)
return unused_results
@classmethod
def get_latest(cls, data_source, query, ttl=0):
query_hash = utils.gen_query_hash(query)
@@ -274,7 +300,8 @@ class Query(BaseModel):
ttl = peewee.IntegerField()
user_email = peewee.CharField(max_length=360, null=True)
user = peewee.ForeignKeyField(User)
created_at = peewee.DateTimeField(default=datetime.datetime.now)
is_archived = peewee.BooleanField(default=False, index=True)
created_at = DateTimeTZField(default=datetime.datetime.now)
class Meta:
db_table = 'queries'
@@ -295,6 +322,7 @@ class Query(BaseModel):
'query_hash': self.query_hash,
'ttl': self.ttl,
'api_key': self.api_key,
'is_archived': self.is_archived,
'created_at': self.created_at,
'data_source_id': self._data.get('data_source', None)
}
@@ -305,11 +333,8 @@ class Query(BaseModel):
d['user_id'] = self._data['user']
if with_stats:
d['avg_runtime'] = self.avg_runtime
d['min_runtime'] = self.min_runtime
d['max_runtime'] = self.max_runtime
d['last_retrieved_at'] = self.last_retrieved_at
d['times_retrieved'] = self.times_retrieved
d['retrieved_at'] = self.retrieved_at
d['runtime'] = self.runtime
if with_visualizations:
d['visualizations'] = [vis.to_dict(with_query=False)
@@ -317,17 +342,24 @@ class Query(BaseModel):
return d
def archive(self):
self.is_archived = True
self.ttl = -1
for vis in self.visualizations:
for w in vis.widgets:
w.delete_instance()
self.save()
@classmethod
def all_queries(cls):
q = Query.select(Query, User,
peewee.fn.Count(QueryResult.id).alias('times_retrieved'),
peewee.fn.Avg(QueryResult.runtime).alias('avg_runtime'),
peewee.fn.Min(QueryResult.runtime).alias('min_runtime'),
peewee.fn.Max(QueryResult.runtime).alias('max_runtime'),
peewee.fn.Max(QueryResult.retrieved_at).alias('last_retrieved_at'))\
q = Query.select(Query, User, QueryResult.retrieved_at, QueryResult.runtime)\
.join(QueryResult, join_type=peewee.JOIN_LEFT_OUTER)\
.switch(Query).join(User)\
.group_by(Query.id, User.id)
.where(Query.is_archived==False)\
.group_by(Query.id, User.id, QueryResult.id, QueryResult.retrieved_at, QueryResult.runtime)\
.order_by(cls.created_at.desc())
return q
@@ -339,6 +371,7 @@ class Query(BaseModel):
peewee.Func('first_value', cls.id).over(partition_by=[cls.query_hash, cls.data_source])) \
.join(QueryResult) \
.where(cls.ttl > 0,
cls.is_archived==False,
(QueryResult.retrieved_at +
(cls.ttl * peewee.SQL("interval '1 second'"))) <
peewee.SQL("(now() at time zone 'utc')"))
@@ -348,6 +381,31 @@ class Query(BaseModel):
return queries
@classmethod
def search(cls, term):
# This is very naive implementation of search, to be replaced with PostgreSQL full-text-search solution.
where = (cls.name**u"%{}%".format(term)) | (cls.description**u"%{}%".format(term))
if term.isdigit():
where |= cls.id == term
where &= cls.is_archived == False
return cls.select().where(where).order_by(cls.created_at.desc())
@classmethod
def recent(cls, user_id):
return cls.select().where(Event.created_at > peewee.SQL("current_date - 7")).\
join(Event, on=(Query.id == peewee.SQL("t2.object_id::integer"))).\
where(Event.action << ('edit', 'execute', 'edit_name', 'edit_description', 'view_source')).\
where(Event.user == user_id).\
where(~(Event.object_id >> None)).\
where(Event.object_type == 'query'). \
where(cls.is_archived == False).\
group_by(Event.object_id, Query.id).\
order_by(peewee.SQL("count(0) desc"))
@classmethod
def update_instance(cls, query_id, **kwargs):
if 'query' in kwargs:
@@ -366,6 +424,14 @@ class Query(BaseModel):
self.api_key = hashlib.sha1(
u''.join((str(time.time()), self.query, str(self._data['user']), self.name)).encode('utf-8')).hexdigest()
@property
def runtime(self):
return self.latest_query_data.runtime
@property
def retrieved_at(self):
return self.latest_query_data.retrieved_at
def __unicode__(self):
return unicode(self.id)
@@ -379,7 +445,7 @@ class Dashboard(BaseModel):
layout = peewee.TextField()
dashboard_filters_enabled = peewee.BooleanField(default=False)
is_archived = peewee.BooleanField(default=False, index=True)
created_at = peewee.DateTimeField(default=datetime.datetime.now)
created_at = DateTimeTZField(default=datetime.datetime.now)
class Meta:
db_table = 'dashboards'
@@ -428,6 +494,17 @@ class Dashboard(BaseModel):
def get_by_slug(cls, slug):
return cls.get(cls.slug == slug)
@classmethod
def recent(cls, user_id):
return cls.select().where(Event.created_at > peewee.SQL("current_date - 7")). \
join(Event, on=(Dashboard.id == peewee.SQL("t2.object_id::integer"))). \
where(Event.action << ('edit', 'view')).\
where(Event.user == user_id). \
where(~(Event.object_id >> None)). \
where(Event.object_type == 'dashboard'). \
group_by(Event.object_id, Dashboard.id). \
order_by(peewee.SQL("count(0) desc"))
def save(self, *args, **kwargs):
if not self.slug:
self.slug = utils.slugify(self.name)
@@ -479,7 +556,7 @@ class Widget(BaseModel):
width = peewee.IntegerField()
options = peewee.TextField()
dashboard = peewee.ForeignKeyField(Dashboard, related_name='widgets', index=True)
created_at = peewee.DateTimeField(default=datetime.datetime.now)
created_at = DateTimeTZField(default=datetime.datetime.now)
# unused; kept for backward compatability:
type = peewee.CharField(max_length=100, null=True)
@@ -501,18 +578,26 @@ class Widget(BaseModel):
d['visualization'] = self.visualization.to_dict()
return d
def __unicode__(self):
return u"%s" % self.id
def delete_instance(self, *args, **kwargs):
layout = json.loads(self.dashboard.layout)
layout = map(lambda row: filter(lambda w: w != self.id, row), layout)
layout = filter(lambda row: len(row) > 0, layout)
self.dashboard.layout = json.dumps(layout)
self.dashboard.save()
super(Widget, self).delete_instance(*args, **kwargs)
class Event(BaseModel):
user = peewee.ForeignKeyField(User, related_name="events")
user = peewee.ForeignKeyField(User, related_name="events", null=True)
action = peewee.CharField()
object_type = peewee.CharField()
object_id = peewee.CharField(null=True)
additional_properties = peewee.TextField(null=True)
created_at = peewee.DateTimeField(default=datetime.datetime.now)
created_at = DateTimeTZField(default=datetime.datetime.now)
class Meta:
db_table = 'events'
@@ -520,6 +605,21 @@ class Event(BaseModel):
def __unicode__(self):
return u"%s,%s,%s,%s" % (self._data['user'], self.action, self.object_type, self.object_id)
@classmethod
def record(cls, event):
user = event.pop('user_id')
action = event.pop('action')
object_type = event.pop('object_type')
object_id = event.pop('object_id', None)
created_at = datetime.datetime.utcfromtimestamp(event.pop('timestamp'))
additional_properties = json.dumps(event)
event = cls.create(user=user, action=action, object_type=object_type, object_id=object_id,
additional_properties=additional_properties, created_at=created_at)
return event
all_models = (DataSource, User, QueryResult, Query, Dashboard, Visualization, Widget, ActivityLog, Group, Event)
@@ -536,7 +636,6 @@ def create_db(create_tables, drop_tables):
if drop_tables and model.table_exists():
# TODO: submit PR to peewee to allow passing cascade option to drop_table.
db.database.execute_sql('DROP TABLE %s CASCADE' % model._meta.db_table)
#model.drop_table()
if create_tables and not model.table_exists():
model.create_table()

View File

@@ -10,10 +10,7 @@ class require_permissions(object):
def __call__(self, fn):
@functools.wraps(fn)
def decorated(*args, **kwargs):
has_permissions = reduce(lambda a, b: a and b,
map(lambda permission: permission in current_user.permissions,
self.permissions),
True)
has_permissions = current_user.has_permissions(self.permissions)
if has_permissions:
return fn(*args, **kwargs)

View File

@@ -0,0 +1,104 @@
import logging
import json
import jsonschema
from jsonschema import ValidationError
logger = logging.getLogger(__name__)
__all__ = [
'ValidationError',
'BaseQueryRunner',
'TYPE_DATETIME',
'TYPE_BOOLEAN',
'TYPE_INTEGER',
'TYPE_STRING',
'TYPE_DATE',
'TYPE_FLOAT',
'register',
'get_query_runner',
'import_query_runners'
]
# Valid types of columns returned in results:
TYPE_INTEGER = 'integer'
TYPE_FLOAT = 'float'
TYPE_BOOLEAN = 'boolean'
TYPE_STRING = 'string'
TYPE_DATETIME = 'datetime'
TYPE_DATE = 'date'
class BaseQueryRunner(object):
def __init__(self, configuration):
jsonschema.validate(configuration, self.configuration_schema())
self.configuration = configuration
@classmethod
def name(cls):
return cls.__name__
@classmethod
def type(cls):
return cls.__name__.lower()
@classmethod
def enabled(cls):
return True
@classmethod
def annotate_query(cls):
return True
@classmethod
def configuration_schema(cls):
return {}
def run_query(self, query):
raise NotImplementedError()
@classmethod
def to_dict(cls):
return {
'name': cls.name(),
'type': cls.type(),
'configuration_schema': cls.configuration_schema()
}
query_runners = {}
def register(query_runner_class):
global query_runners
if query_runner_class.enabled():
logger.debug("Registering %s (%s) query runner.", query_runner_class.name(), query_runner_class.type())
query_runners[query_runner_class.type()] = query_runner_class
else:
logger.warning("%s query runner enabled but not supported, not registering. Either disable or install missing dependencies.", query_runner_class.name())
def get_query_runner(query_runner_type, configuration_json):
query_runner_class = query_runners.get(query_runner_type, None)
if query_runner_class is None:
return None
return query_runner_class(json.loads(configuration_json))
def validate_configuration(query_runner_type, configuration_json):
query_runner_class = query_runners.get(query_runner_type, None)
if query_runner_class is None:
return False
try:
jsonschema.validate(json.loads(configuration_json), query_runner_class.configuration_schema())
except (ValidationError, ValueError):
return False
return True
def import_query_runners(query_runner_imports):
for runner_import in query_runner_imports:
__import__(runner_import)

View File

@@ -1,29 +1,37 @@
import datetime
import httplib2
import json
import httplib2
import logging
import sys
import time
from redash.query_runner import *
from redash.utils import JSONEncoder
logger = logging.getLogger(__name__)
try:
import apiclient.errors
from apiclient.discovery import build
from apiclient.errors import HttpError
from oauth2client.client import SignedJwtAssertionCredentials
except ImportError:
print "Missing dependencies. Please install google-api-python-client and oauth2client."
print "You can use pip: pip install google-api-python-client oauth2client"
from redash.utils import JSONEncoder
enabled = True
except ImportError:
logger.warning("Missing dependencies. Please install google-api-python-client and oauth2client.")
logger.warning("You can use pip: pip install google-api-python-client oauth2client")
enabled = False
types_map = {
'INTEGER': 'integer',
'FLOAT': 'float',
'BOOLEAN': 'boolean',
'STRING': 'string',
'TIMESTAMP': 'datetime',
'INTEGER': TYPE_INTEGER,
'FLOAT': TYPE_FLOAT,
'BOOLEAN': TYPE_BOOLEAN,
'STRING': TYPE_STRING,
'TIMESTAMP': TYPE_DATETIME,
}
def transform_row(row, fields):
column_index = 0
row_data = {}
@@ -49,37 +57,71 @@ def transform_row(row, fields):
return row_data
def bigquery(connection_string):
def load_key(filename):
f = file(filename, "rb")
try:
return f.read()
finally:
f.close()
def get_bigquery_service():
scope = [
"https://www.googleapis.com/auth/bigquery",
]
def _load_key(filename):
f = file(filename, "rb")
try:
return f.read()
finally:
f.close()
credentials = SignedJwtAssertionCredentials(connection_string["serviceAccount"],
load_key(connection_string["privateKey"]), scope=scope)
http = httplib2.Http()
http = credentials.authorize(http)
return build("bigquery", "v2", http=http)
def _get_bigquery_service(service_account, private_key):
scope = [
"https://www.googleapis.com/auth/bigquery",
]
def get_query_results(jobs, project_id, job_id, start_index):
query_reply = jobs.getQueryResults(projectId=project_id, jobId=job_id, startIndex=start_index).execute()
logging.debug('query_reply %s', query_reply)
if not query_reply['jobComplete']:
time.sleep(10)
return get_query_results(jobs, project_id, job_id, start_index)
credentials = SignedJwtAssertionCredentials(service_account, private_key, scope=scope)
http = httplib2.Http()
http = credentials.authorize(http)
return query_reply
return build("bigquery", "v2", http=http)
def query_runner(query):
bigquery_service = get_bigquery_service()
def _get_query_results(jobs, project_id, job_id, start_index):
query_reply = jobs.getQueryResults(projectId=project_id, jobId=job_id, startIndex=start_index).execute()
logging.debug('query_reply %s', query_reply)
if not query_reply['jobComplete']:
time.sleep(10)
return _get_query_results(jobs, project_id, job_id, start_index)
return query_reply
class BigQuery(BaseQueryRunner):
@classmethod
def enabled(cls):
return enabled
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
'properties': {
'serviceAccount': {
'type': 'string',
'title': 'Service Account'
},
'projectId': {
'type': 'string',
'title': 'Project ID'
},
'privateKey': {
'type': 'string',
'title': 'Private Key Path'
}
},
'required': ['serviceAccount', 'projectId', 'privateKey']
}
def __init__(self, configuration_json):
super(BigQuery, self).__init__(configuration_json)
self.private_key = _load_key(self.configuration["privateKey"])
def run_query(self, query):
bigquery_service = _get_bigquery_service(self.configuration["serviceAccount"],
self.private_key)
jobs = bigquery_service.jobs()
job_data = {
@@ -90,17 +132,17 @@ def bigquery(connection_string):
}
}
logging.debug("bigquery got query: %s", query)
logger.debug("BigQuery got query: %s", query)
project_id = connection_string["projectId"]
project_id = self.configuration["projectId"]
try:
insert_response = jobs.insert(projectId=project_id, body=job_data).execute()
current_row = 0
query_reply = get_query_results(jobs, project_id=project_id,
query_reply = _get_query_results(jobs, project_id=project_id,
job_id=insert_response['jobReference']['jobId'], start_index=current_row)
logging.debug("bigquery replied: %s", query_reply)
logger.debug("bigquery replied: %s", query_reply)
rows = []
@@ -134,5 +176,4 @@ def bigquery(connection_string):
return json_data, error
return query_runner
register(BigQuery)

View File

@@ -0,0 +1,83 @@
import json
import datetime
import requests
import logging
from redash.query_runner import *
from redash.utils import JSONEncoder
logger = logging.getLogger(__name__)
def _transform_result(response):
columns = ({'name': 'Time::x', 'type': TYPE_DATETIME},
{'name': 'value::y', 'type': TYPE_FLOAT},
{'name': 'name::series', 'type': TYPE_STRING})
rows = []
for series in response.json():
for values in series['datapoints']:
timestamp = datetime.datetime.fromtimestamp(int(values[1]))
rows.append({'Time::x': timestamp, 'name::series': series['target'], 'value::y': values[0]})
data = {'columns': columns, 'rows': rows}
return json.dumps(data, cls=JSONEncoder)
class Graphite(BaseQueryRunner):
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
'properties': {
'url': {
'type': 'string'
},
'username': {
'type': 'string'
},
'password': {
'type': 'string'
},
'verify': {
'type': 'boolean',
'title': 'Verify SSL certificate'
}
},
'required': ['url']
}
@classmethod
def annotate_query(cls):
return False
def __init__(self, configuration_json):
super(Graphite, self).__init__(configuration_json)
if "username" in self.configuration and self.configuration["username"]:
self.auth = (self.configuration["username"], self.configuration["password"])
else:
self.auth = None
self.verify = self.configuration["verify"]
self.base_url = "%s/render?format=json&" % self.configuration['url']
def run_query(self, query):
url = "%s%s" % (self.base_url, "&".join(query.split("\n")))
error = None
data = None
try:
response = requests.get(url, auth=self.auth, verify=self.verify)
if response.status_code == 200:
data = _transform_result(response)
else:
error = "Failed getting results (%d)" % response.status_code
except Exception, ex:
data = None
error = ex.message
return data, error
register(Graphite)

View File

@@ -0,0 +1,178 @@
import json
import datetime
import logging
import re
import time
from redash.utils import JSONEncoder
from redash.query_runner import *
logger = logging.getLogger(__name__)
try:
import pymongo
from bson.objectid import ObjectId
enabled = True
except ImportError:
logger.warning("Missing dependencies. Please install pymongo.")
logger.warning("You can use pip: pip install pymongo")
enabled = False
TYPES_MAP = {
str: TYPE_STRING,
unicode: TYPE_STRING,
int: TYPE_INTEGER,
long: TYPE_INTEGER,
float: TYPE_FLOAT,
bool: TYPE_BOOLEAN,
datetime.datetime: TYPE_DATETIME,
}
date_regex = re.compile("ISODate\(\"(.*)\"\)", re.IGNORECASE)
def _get_column_by_name(columns, column_name):
for c in columns:
if "name" in c and c["name"] == column_name:
return c
return None
def _convert_date(q, field_name):
m = date_regex.findall(q[field_name])
if len(m) > 0:
if q[field_name].find(":") == -1:
q[field_name] = datetime.datetime.fromtimestamp(time.mktime(time.strptime(m[0], "%Y-%m-%d")))
else:
q[field_name] = datetime.datetime.fromtimestamp(time.mktime(time.strptime(m[0], "%Y-%m-%d %H:%M")))
class MongoDB(BaseQueryRunner):
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
'properties': {
'connectionString': {
'type': 'string',
'title': 'Connection String'
},
'dbName': {
'type': 'string',
'title': "Database Name"
},
'replicaSetName': {
'type': 'string',
'title': 'Replica Set Name'
},
'required': ['connectionString']
}
}
@classmethod
def enabled(cls):
return enabled
@classmethod
def annotate_query(cls):
return False
def __init__(self, configuration_json):
super(MongoDB, self).__init__(configuration_json)
self.db_name = self.configuration["dbName"]
self.is_replica_set = True if "replicaSetName" in self.configuration and self.configuration["replicaSetName"] else False
def run_query(self, query):
if self.is_replica_set:
db_connection = pymongo.MongoReplicaSetClient(self.configuration["connectionString"], replicaSet=self.configuration["replicaSetName"])
else:
db_connection = pymongo.MongoClient(self.configuration["connectionString"])
if self.db_name not in db_connection.database_names():
return None, "Unknown database name '%s'" % self.db_name
db = db_connection[self.db_name ]
logger.debug("mongodb connection string: %s", self.configuration['connectionString'])
logger.debug("mongodb got query: %s", query)
try:
query_data = json.loads(query)
except ValueError:
return None, "Invalid query format. The query is not a valid JSON."
if "collection" not in query_data:
return None, "'collection' must have a value to run a query"
else:
collection = query_data["collection"]
q = None
if "query" in query_data:
q = query_data["query"]
for k in q:
if q[k] and type(q[k]) in [str, unicode]:
logging.debug(q[k])
_convert_date(q, k)
elif q[k] and type(q[k]) is dict:
for k2 in q[k]:
if type(q[k][k2]) in [str, unicode]:
_convert_date(q[k], k2)
f = None
if "fields" in query_data:
f = query_data["fields"]
s = None
if "sort" in query_data and query_data["sort"]:
s = []
for field_name in query_data["sort"]:
s.append((field_name, query_data["sort"][field_name]))
columns = []
rows = []
error = None
json_data = None
if s:
cursor = db[collection].find(q, f).sort(s)
else:
cursor = db[collection].find(q, f)
for r in cursor:
for k in r:
if _get_column_by_name(columns, k) is None:
columns.append({
"name": k,
"friendly_name": k,
"type": TYPES_MAP.get(type(r[k]), TYPE_STRING)
})
# Convert ObjectId to string
if type(r[k]) == ObjectId:
r[k] = str(r[k])
rows.append(r)
if f:
ordered_columns = []
for k in sorted(f, key=f.get):
ordered_columns.append(_get_column_by_name(columns, k))
columns = ordered_columns
data = {
"columns": columns,
"rows": rows
}
error = None
json_data = json.dumps(data, cls=JSONEncoder)
return json_data, error
register(MongoDB)

View File

@@ -0,0 +1,97 @@
import sys
import json
import logging
from redash.utils import JSONEncoder
from redash.query_runner import *
logger = logging.getLogger(__name__)
class Mysql(BaseQueryRunner):
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
'properties': {
'host': {
'type': 'string'
},
'user': {
'type': 'string'
},
'passwd': {
'type': 'string',
'title': 'Password'
},
'db': {
'type': 'string',
'title': 'Database name'
}
},
'required': ['db']
}
@classmethod
def enabled(cls):
try:
import MySQLdb
except ImportError:
return False
return True
def __init__(self, configuration_json):
super(Mysql, self).__init__(configuration_json)
def run_query(self, query):
import MySQLdb
connection = MySQLdb.connect(self.configuration.get('host', ''),
self.configuration.get('user', ''),
self.configuration.get('passwd', ''),
self.configuration['db'],
charset='utf8', use_unicode=True)
cursor = connection.cursor()
logger.debug("MySQL running query: %s", query)
try:
cursor.execute(query)
data = cursor.fetchall()
cursor_desc = cursor.description
if cursor_desc is not None:
num_fields = len(cursor_desc)
column_names = [i[0] for i in cursor.description]
rows = [dict(zip(column_names, row)) for row in data]
# TODO: add types support
columns = [{'name': col_name,
'friendly_name': col_name,
'type': None} for col_name in column_names]
data = {'columns': columns, 'rows': rows}
json_data = json.dumps(data, cls=JSONEncoder)
error = None
else:
json_data = None
error = "No data was returned."
cursor.close()
except MySQLdb.Error, e:
json_data = None
error = e.args[1]
except KeyboardInterrupt:
error = "Query cancelled by user."
json_data = None
except Exception as e:
raise sys.exc_info()[1], None, sys.exc_info()[2]
finally:
connection.close()
return json_data, error
register(Mysql)

142
redash/query_runner/pg.py Normal file
View File

@@ -0,0 +1,142 @@
import json
import logging
import psycopg2
import select
import sys
from redash.query_runner import *
from redash.utils import JSONEncoder
logger = logging.getLogger(__name__)
types_map = {
20: TYPE_INTEGER,
21: TYPE_INTEGER,
23: TYPE_INTEGER,
700: TYPE_FLOAT,
1700: TYPE_FLOAT,
701: TYPE_FLOAT,
16: TYPE_BOOLEAN,
1082: TYPE_DATE,
1114: TYPE_DATETIME,
1184: TYPE_DATETIME,
1014: TYPE_STRING,
1015: TYPE_STRING,
1008: TYPE_STRING,
1009: TYPE_STRING,
2951: TYPE_STRING
}
def _wait(conn):
while 1:
try:
state = conn.poll()
if state == psycopg2.extensions.POLL_OK:
break
elif state == psycopg2.extensions.POLL_WRITE:
select.select([], [conn.fileno()], [])
elif state == psycopg2.extensions.POLL_READ:
select.select([conn.fileno()], [], [])
else:
raise psycopg2.OperationalError("poll() returned %s" % state)
except select.error:
raise psycopg2.OperationalError("select.error received")
class PostgreSQL(BaseQueryRunner):
@classmethod
def configuration_schema(cls):
return {
"type": "object",
"properties": {
"user": {
"type": "string"
},
"password": {
"type": "string"
},
"host": {
"type": "string"
},
"port": {
"type": "number"
},
"dbname": {
"type": "string",
"title": "Database Name"
}
},
"required": ["dbname"]
}
@classmethod
def type(cls):
return "pg"
def __init__(self, configuration_json):
super(PostgreSQL, self).__init__(configuration_json)
values = []
for k, v in self.configuration.iteritems():
values.append("{}={}".format(k, v))
self.connection_string = " ".join(values)
def run_query(self, query):
connection = psycopg2.connect(self.connection_string, async=True)
_wait(connection)
cursor = connection.cursor()
try:
cursor.execute(query)
_wait(connection)
# While set would be more efficient here, it sorts the data which is not what we want, but due to the small
# size of the data we can assume it's ok.
column_names = []
columns = []
duplicates_counter = 1
for column in cursor.description:
# TODO: this deduplication needs to be generalized and reused in all query runners.
column_name = column.name
if column_name in column_names:
column_name += str(duplicates_counter)
duplicates_counter += 1
column_names.append(column_name)
columns.append({
'name': column_name,
'friendly_name': column_name,
'type': types_map.get(column.type_code, None)
})
rows = [dict(zip(column_names, row)) for row in cursor]
data = {'columns': columns, 'rows': rows}
json_data = json.dumps(data, cls=JSONEncoder)
error = None
cursor.close()
except (select.error, OSError) as e:
logging.exception(e)
error = "Query interrupted. Please retry."
json_data = None
except psycopg2.DatabaseError as e:
logging.exception(e)
json_data = None
error = e.message
except KeyboardInterrupt:
connection.cancel()
error = "Query cancelled by user."
json_data = None
except Exception as e:
raise sys.exc_info()[1], None, sys.exc_info()[2]
finally:
connection.close()
return json_data, error
register(PostgreSQL)

View File

@@ -0,0 +1,79 @@
import sys
import json
from redash.query_runner import *
from redash import models
def get_query_result(query_id):
try:
query = models.Query.get_by_id(query_id)
except models.Query.DoesNotExist:
raise Exception("Query id %s does not exist." % query_id)
if query.latest_query_data is None:
raise Exception("Query does not have results yet.")
if query.latest_query_data.data is None:
raise Exception("Query does not have results yet.")
return json.loads(query.latest_query_data.data)
def execute_query(data_source_name, query):
try:
data_source = models.DataSource.get(models.DataSource.name==data_source_name)
except models.DataSource.DoesNotExist:
raise Exception("Wrong data source name: %s." % data_source_name)
query_runner = get_query_runner(data_source.type, data_source.options)
data, error = query_runner.run_query(query)
if error is not None:
raise Exception(error)
# TODO: allow avoiding the json.dumps/loads in same process
return json.loads(data)
class Python(BaseQueryRunner):
"""
This is very, very unsafe. Use at your own risk with people you really trust.
"""
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
'properties': {
}
}
@classmethod
def annotate_query(cls):
return False
def __init__(self, configuration_json):
super(Python, self).__init__(configuration_json)
def run_query(self, query):
try:
error = None
script_globals = {'get_query_result': get_query_result, 'execute_query': execute_query}
script_locals = {'result': None}
# TODO: timeout, sandboxing
exec query in script_globals, script_locals
if script_locals['result'] is None:
raise Exception("result wasn't set to value.")
json_data = json.dumps(script_locals['result'])
except KeyboardInterrupt:
error = "Query cancelled by user."
json_data = None
except Exception as e:
raise sys.exc_info()[1], None, sys.exc_info()[2]
return json_data, error
register(Python)

View File

@@ -0,0 +1,65 @@
import os
import sys
import subprocess
from redash.query_runner import *
class Script(BaseQueryRunner):
@classmethod
def enabled(cls):
return "check_output" in subprocess.__dict__
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
'properties': {
'path': {
'type': 'string',
'title': 'Scripts path'
}
},
'required': ['path']
}
@classmethod
def annotate_query(cls):
return False
def __init__(self, configuration_json):
super(Script, self).__init__(configuration_json)
# Poor man's protection against running scripts from output the scripts directory
if self.configuration["path"].find("../") > -1:
raise ValidationError("Scripts can only be run from the configured scripts directory")
def run_query(self, query):
try:
json_data = None
error = None
query = query.strip()
script = os.path.join(self.configuration["path"], query)
if not os.path.exists(script):
return None, "Script '%s' not found in script directory" % query
output = subprocess.check_output(script, shell=False)
if output is not None:
output = output.strip()
if output != "":
return output, None
error = "Error reading output"
except subprocess.CalledProcessError as e:
return None, str(e)
except KeyboardInterrupt:
error = "Query cancelled by user."
json_data = None
except Exception as e:
raise sys.exc_info()[1], None, sys.exc_info()[2]
return json_data, error
register(Script)

View File

@@ -1,16 +1,30 @@
import json
import logging
import sys
import os
import urllib2
def url(connection_string):
from redash.query_runner import *
def query_runner(query):
base_url = connection_string
class Url(BaseQueryRunner):
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
'properties': {
'url': {
'type': 'string',
'title': 'URL base path'
}
}
}
@classmethod
def annotate_query(cls):
return False
def run_query(self, query):
base_url = self.configuration["url"]
try:
json_data = None
error = None
query = query.strip()
@@ -41,5 +55,4 @@ def url(connection_string):
return json_data, error
query_runner.annotate_query = False
return query_runner
register(Url)

View File

@@ -44,33 +44,45 @@ STATSD_HOST = os.environ.get('REDASH_STATSD_HOST', "127.0.0.1")
STATSD_PORT = int(os.environ.get('REDASH_STATSD_PORT', "8125"))
STATSD_PREFIX = os.environ.get('REDASH_STATSD_PREFIX', "redash")
# The following is kept for backward compatability, and shouldn't be used any more.
CONNECTION_ADAPTER = os.environ.get("REDASH_CONNECTION_ADAPTER", "pg")
CONNECTION_STRING = os.environ.get("REDASH_CONNECTION_STRING", "user= password= host= port=5439 dbname=")
# Connection settings for re:dash's own database (where we store the queries, results, etc)
DATABASE_CONFIG = parse_db_url(os.environ.get("REDASH_DATABASE_URL", "postgresql://postgres"))
# Celery related settings
CELERY_BROKER = os.environ.get("REDASH_CELERY_BROKER", REDIS_URL)
CELERY_BACKEND = os.environ.get("REDASH_CELERY_BACKEND", REDIS_URL)
CELERY_FLOWER_URL = os.environ.get("REDASH_CELERY_FLOWER_URL", "/flower")
# The following enables periodic job (every 5 minutes) of removing unused query results. Behind this "feature flag" until
# proved to be "safe".
QUERY_RESULTS_CLEANUP_ENABLED = parse_boolean(os.environ.get("REDASH_QUERY_RESULTS_CLEANUP_ENABLED", "false"))
AUTH_TYPE = os.environ.get("REDASH_AUTH_TYPE", "hmac")
PASSWORD_LOGIN_ENABLED = parse_boolean(os.environ.get("REDASH_PASSWORD_LOGIN_ENABLED", "true"))
# Google Apps domain to allow access from; any user with email in this Google Apps will be allowed
# access
GOOGLE_APPS_DOMAIN = os.environ.get("REDASH_GOOGLE_APPS_DOMAIN", "")
GOOGLE_OPENID_ENABLED = parse_boolean(os.environ.get("REDASH_GOOGLE_OPENID_ENABLED", "true"))
PASSWORD_LOGIN_ENABLED = parse_boolean(os.environ.get("REDASH_PASSWORD_LOGIN_ENABLED", "false"))
ALLOWED_EXTERNAL_USERS = array_from_string(os.environ.get("REDASH_ALLOWED_EXTERNAL_USERS", ''))
GOOGLE_CLIENT_ID = os.environ.get("REDASH_GOOGLE_CLIENT_ID", "")
GOOGLE_CLIENT_SECRET = os.environ.get("REDASH_GOOGLE_CLIENT_SECRET", "")
GOOGLE_OAUTH_ENABLED = GOOGLE_CLIENT_ID and GOOGLE_CLIENT_SECRET
STATIC_ASSETS_PATH = fix_assets_path(os.environ.get("REDASH_STATIC_ASSETS_PATH", "../rd_ui/app/"))
WORKERS_COUNT = int(os.environ.get("REDASH_WORKERS_COUNT", "2"))
JOB_EXPIRY_TIME = int(os.environ.get("REDASH_JOB_EXPIRY_TIME", 3600*24))
JOB_EXPIRY_TIME = int(os.environ.get("REDASH_JOB_EXPIRY_TIME", 3600*6))
COOKIE_SECRET = os.environ.get("REDASH_COOKIE_SECRET", "c292a0a3aa32397cdb050e233733900f")
LOG_LEVEL = os.environ.get("REDASH_LOG_LEVEL", "INFO")
EVENTS_LOG_PATH = os.environ.get("REDASH_EVENTS_LOG_PATH", "")
EVENTS_CONSOLE_OUTPUT = parse_boolean(os.environ.get("REDASH_EVENTS_CONSOLE_OUTPUT", "false"))
CLIENT_SIDE_METRICS = parse_boolean(os.environ.get("REDASH_CLIENT_SIDE_METRICS", "false"))
ANALYTICS = os.environ.get("REDASH_ANALYTICS", "")
# Query Runners
QUERY_RUNNERS = [
'redash.query_runner.big_query',
'redash.query_runner.graphite',
'redash.query_runner.mongodb',
'redash.query_runner.mysql',
'redash.query_runner.pg',
'redash.query_runner.script',
'redash.query_runner.url',
]
# Features:
FEATURE_TABLES_PERMISSIONS = parse_boolean(os.environ.get("REDASH_FEATURE_TABLES_PERMISSIONS", "false"))
FEATURE_TABLES_PERMISSIONS = parse_boolean(os.environ.get("REDASH_FEATURE_TABLES_PERMISSIONS", "false"))

View File

@@ -5,10 +5,10 @@ import redis
from celery import Task
from celery.result import AsyncResult
from celery.utils.log import get_task_logger
from redash import redis_connection, models, statsd_client
from redash import redis_connection, models, statsd_client, settings
from redash.utils import gen_query_hash
from redash.worker import celery
from redash.data.query_runner import get_query_runner
from redash.query_runner import get_query_runner
logger = get_task_logger(__name__)
@@ -64,7 +64,12 @@ class QueryTask(object):
logging.info("[Manager][%s] Found existing job: %s", query_hash, job_id)
job = cls(job_id=job_id)
else:
if job.ready():
logging.info("[%s] job found is ready (%s), removing lock", query_hash, job.celery_status)
redis_connection.delete(QueryTask._job_lock_id(query_hash, data_source.id))
job = None
if not job:
pipe.multi()
if scheduled:
@@ -75,7 +80,7 @@ class QueryTask(object):
result = execute_query.apply_async(args=(query, data_source.id), queue=queue_name)
job = cls(async_result=result)
logging.info("[Manager][%s] Created new job: %s", query_hash, job.id)
pipe.set(cls._job_lock_id(query_hash, data_source.id), job.id)
pipe.set(cls._job_lock_id(query_hash, data_source.id), job.id, settings.JOB_EXPIRY_TIME)
pipe.execute()
break
@@ -113,6 +118,17 @@ class QueryTask(object):
'query_result_id': query_result_id,
}
@property
def is_cancelled(self):
return self._async_result.status == 'REVOKED'
@property
def celery_status(self):
return self._async_result.status
def ready(self):
return self._async_result.ready()
def cancel(self):
return self._async_result.revoke(terminate=True)
@@ -135,8 +151,6 @@ def refresh_queries():
outdated_queries_count += 1
statsd_client.gauge('manager.outdated_queries', outdated_queries_count)
# TODO: decide if we still need this
# statsd_client.gauge('manager.queue_size', self.redis_connection.zcard('jobs'))
logger.info("Done refreshing queries. Found %d outdated queries." % outdated_queries_count)
@@ -151,6 +165,59 @@ def refresh_queries():
statsd_client.gauge('manager.seconds_since_refresh', now - float(status.get('last_refresh_at', now)))
@celery.task(base=BaseTask)
def cleanup_tasks():
# in case of cold restart of the workers, there might be jobs that still have their "lock" object, but aren't really
# going to run. this job removes them.
lock_keys = redis_connection.keys("query_hash_job:*") # TODO: use set instead of keys command
if not lock_keys:
return
query_tasks = [QueryTask(job_id=j) for j in redis_connection.mget(lock_keys)]
logger.info("Found %d locks", len(query_tasks))
inspect = celery.control.inspect()
active_tasks = inspect.active()
if active_tasks is None:
active_tasks = []
else:
active_tasks = active_tasks.values()
all_tasks = set()
for task_list in active_tasks:
for task in task_list:
all_tasks.add(task['id'])
logger.info("Active jobs count: %d", len(all_tasks))
for i, t in enumerate(query_tasks):
if t.ready():
# if locked task is ready already (failed, finished, revoked), we don't need the lock anymore
logger.warning("%s is ready (%s), removing lock.", lock_keys[i], t.celery_status)
redis_connection.delete(lock_keys[i])
if t.celery_status == 'STARTED' and t.id not in all_tasks:
logger.warning("Couldn't find active job for: %s, removing lock.", lock_keys[i])
redis_connection.delete(lock_keys[i])
@celery.task(base=BaseTask)
def cleanup_query_results():
"""
Job to cleanup unused query results -- such that no query links to them anymore, and older than a week (so it's less
likely to be open in someone's browser and be used).
Each time the job deletes only 100 query results so it won't choke the database in case of many such results.
"""
unused_query_results = models.QueryResult.unused().limit(100)
total_unused_query_results = models.QueryResult.unused().count()
deleted_count = models.QueryResult.delete().where(models.QueryResult.id << unused_query_results).execute()
logger.info("Deleted %d unused query results out of total of %d." % (deleted_count, total_unused_query_results))
@celery.task(bind=True, base=BaseTask, track_started=True)
def execute_query(self, query, data_source_id):
# TODO: maybe this should be a class?
@@ -168,15 +235,15 @@ def execute_query(self, query, data_source_id):
query_hash = gen_query_hash(query)
query_runner = get_query_runner(data_source.type, data_source.options)
if getattr(query_runner, 'annotate_query', True):
# TODO: anotate with queu ename
if query_runner.annotate_query():
# TODO: annotate with queue name
annotated_query = "/* Task Id: %s, Query hash: %s */ %s" % \
(self.request.id, query_hash, query)
else:
annotated_query = query
with statsd_client.timer('query_runner.{}.{}.run_time'.format(data_source.type, data_source.name)):
data, error = query_runner(annotated_query)
data, error = query_runner.run_query(annotated_query)
run_time = time.time() - start_time
logger.info("Query finished... data length=%s, error=%s", data and len(data), error)
@@ -186,8 +253,6 @@ def execute_query(self, query, data_source_id):
# Delete query_hash
redis_connection.delete(QueryTask._job_lock_id(query_hash, data_source.id))
# TODO: it is possible that storing the data will fail, and we will need to retry
# while we already marked the job as done
if not error:
query_result = models.QueryResult.store_result(data_source.id, query_hash, query, data, run_time, datetime.datetime.utcnow())
else:
@@ -195,3 +260,7 @@ def execute_query(self, query, data_source_id):
return query_result.id
@celery.task(base=BaseTask)
def record_event(event):
models.Event.record(event)

View File

@@ -7,15 +7,26 @@ celery = Celery('redash',
broker=settings.CELERY_BROKER,
include='redash.tasks')
celery.conf.update(CELERY_RESULT_BACKEND=settings.CELERY_BACKEND,
CELERYBEAT_SCHEDULE={
'refresh_queries': {
'task': 'redash.tasks.refresh_queries',
'schedule': timedelta(seconds=30)
},
},
CELERY_TIMEZONE='UTC')
celery_schedule = {
'refresh_queries': {
'task': 'redash.tasks.refresh_queries',
'schedule': timedelta(seconds=30)
},
'cleanup_tasks': {
'task': 'redash.tasks.cleanup_tasks',
'schedule': timedelta(minutes=5)
}
}
if settings.QUERY_RESULTS_CLEANUP_ENABLED:
celery_schedule['cleanup_query_results'] = {
'task': 'redash.tasks.cleanup_query_results',
'schedule': timedelta(minutes=5)
}
celery.conf.update(CELERY_RESULT_BACKEND=settings.CELERY_BACKEND,
CELERYBEAT_SCHEDULE=celery_schedule,
CELERY_TIMEZONE='UTC')
if __name__ == '__main__':
celery.start()

View File

@@ -1,7 +1,7 @@
Flask==0.10.1
Flask-GoogleAuth==0.4
Flask-RESTful==0.2.10
Flask-Login==0.2.9
Flask-OAuth==0.12
passlib==1.6.2
Jinja2==2.7.2
MarkupSafe==0.18
@@ -9,8 +9,8 @@ Werkzeug==0.9.4
aniso8601==0.82
blinker==1.3
itsdangerous==0.23
peewee==2.2.2
psycopg2==2.5.1
peewee==2.4.7
psycopg2==2.5.2
python-dateutil==2.1
pytz==2013.9
redis==2.7.5
@@ -23,3 +23,5 @@ honcho==0.5.0
statsd==2.1.2
gunicorn==18.0
celery==3.1.11
jsonschema==2.4.0
click==3.3

12
setup/Vagrantfile_debian Normal file
View File

@@ -0,0 +1,12 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
VAGRANTFILE_API_VERSION = "2"
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# Every Vagrant virtual environment requires a box to build off of.
config.vm.box = "box-cutter/debian76"
config.vm.provision "shell", path: "setup.sh"
config.vm.network "forwarded_port", guest: 80, host: 9001
end

177
setup/bootstrap.sh Normal file
View File

@@ -0,0 +1,177 @@
#!/bin/bash
set -eu
REDASH_BASE_PATH=/opt/redash
FILES_BASE_URL=https://raw.githubusercontent.com/EverythingMe/redash/docs_setup/setup/files/
# Verify running as root:
if [ "$(id -u)" != "0" ]; then
if [ $# -ne 0 ]; then
echo "Failed running with sudo. Exiting." 1>&2
exit 1
fi
echo "This script must be run as root. Trying to run with sudo."
sudo bash $0 --with-sudo
exit 0
fi
# Base packages
apt-get update
apt-get install -y python-pip python-dev nginx curl build-essential pwgen
# redash user
# TODO: check user doesn't exist yet?
adduser --system --no-create-home --disabled-login --gecos "" redash
# PostgreSQL
pg_available=0
psql --version || pg_available=$?
if [ $pg_available -ne 0 ]; then
wget $FILES_BASE_URL"postgres_apt.sh" -O /tmp/postgres_apt.sh
bash /tmp/postgres_apt.sh
apt-get update
apt-get -y install postgresql-9.3 postgresql-server-dev-9.3
fi
add_service() {
service_name=$1
service_command="/etc/init.d/$service_name"
echo "Adding service: $service_name (/etc/init.d/$service_name)."
chmod +x $service_command
if command -v chkconfig >/dev/null 2>&1; then
# we're chkconfig, so lets add to chkconfig and put in runlevel 345
chkconfig --add $service_name && echo "Successfully added to chkconfig!"
chkconfig --level 345 $service_name on && echo "Successfully added to runlevels 345!"
elif command -v update-rc.d >/dev/null 2>&1; then
#if we're not a chkconfig box assume we're able to use update-rc.d
update-rc.d $service_name defaults && echo "Success!"
else
echo "No supported init tool found."
fi
$service_command start
}
# Redis
redis_available=0
redis-cli --version || redis_available=$?
if [ $redis_available -ne 0 ]; then
wget http://download.redis.io/releases/redis-2.8.17.tar.gz
tar xzf redis-2.8.17.tar.gz
rm redis-2.8.17.tar.gz
cd redis-2.8.17
make
make install
# Setup process init & configuration
REDIS_PORT=6379
REDIS_CONFIG_FILE="/etc/redis/$REDIS_PORT.conf"
REDIS_LOG_FILE="/var/log/redis_$REDIS_PORT.log"
REDIS_DATA_DIR="/var/lib/redis/$REDIS_PORT"
mkdir -p `dirname "$REDIS_CONFIG_FILE"` || die "Could not create redis config directory"
mkdir -p `dirname "$REDIS_LOG_FILE"` || die "Could not create redis log dir"
mkdir -p "$REDIS_DATA_DIR" || die "Could not create redis data directory"
wget -O /etc/init.d/redis_6379 $FILES_BASE_URL"redis_init"
wget -O $REDIS_CONFIG_FILE $FILES_BASE_URL"redis.conf"
add_service "redis_$REDIS_PORT"
cd ..
rm -rf redis-2.8.17
fi
# Directories
if [ ! -d "$REDASH_BASE_PATH" ]; then
sudo mkdir /opt/redash
sudo chown redash /opt/redash
sudo -u redash mkdir /opt/redash/logs
fi
# Default config file
if [ ! -f "/opt/redash/.env" ]; then
sudo -u redash wget $FILES_BASE_URL"env" -O /opt/redash/.env
fi
# Install latest version
REDASH_VERSION=${REDASH_VERSION-0.4.0.b589}
LATEST_URL="https://github.com/EverythingMe/redash/releases/download/v${REDASH_VERSION/.b/%2Bb}/redash.$REDASH_VERSION.tar.gz"
VERSION_DIR="/opt/redash/redash.$REDASH_VERSION"
REDASH_TARBALL=/tmp/redash.tar.gz
REDASH_TARBALL=/tmp/redash.tar.gz
if [ ! -d "$VERSION_DIR" ]; then
sudo -u redash wget $LATEST_URL -O $REDASH_TARBALL
sudo -u redash mkdir $VERSION_DIR
sudo -u redash tar -C $VERSION_DIR -xvf $REDASH_TARBALL
ln -nfs $VERSION_DIR /opt/redash/current
ln -nfs /opt/redash/.env /opt/redash/current/.env
cd /opt/redash/current
# TODO: venv?
pip install -r requirements.txt
fi
# Create database / tables
pg_user_exists=0
sudo -u postgres psql postgres -tAc "SELECT 1 FROM pg_roles WHERE rolname='redash'" | grep -q 1 || pg_user_exists=$?
if [ $pg_user_exists -ne 0 ]; then
echo "Creating redash postgres user & database."
sudo -u postgres createuser redash --no-superuser --no-createdb --no-createrole
sudo -u postgres createdb redash --owner=redash
cd /opt/redash/current
sudo -u redash bin/run ./manage.py database create_tables
fi
# Create default admin user
cd /opt/redash/current
# TODO: make sure user created only once
# TODO: generate temp password and print to screen
sudo -u redash bin/run ./manage.py users create --admin --password admin "Admin" "admin"
# Create re:dash read only pg user & setup data source
pg_user_exists=0
sudo -u postgres psql postgres -tAc "SELECT 1 FROM pg_roles WHERE rolname='redash_reader'" | grep -q 1 || pg_user_exists=$?
if [ $pg_user_exists -ne 0 ]; then
echo "Creating redash reader postgres user."
REDASH_READER_PASSWORD=$(pwgen -1)
sudo -u postgres psql -c "CREATE ROLE redash_reader WITH PASSWORD '$REDASH_READER_PASSWORD' NOCREATEROLE NOCREATEDB NOSUPERUSER LOGIN"
sudo -u redash psql -c "grant select(id,name,type) ON data_sources to redash_reader;" redash
sudo -u redash psql -c "grant select on activity_log, events, queries, dashboards, widgets, visualizations, query_results to redash_reader;" redash
cd /opt/redash/current
sudo -u redash bin/run ./manage.py ds new -n "re:dash metadata" -t "pg" -o "{\"user\": \"redash_reader\", \"password\": \"$REDASH_READER_PASSWORD\", \"host\": \"localhost\", \"dbname\": \"redash\"}"
fi
# BigQuery dependencies:
apt-get install -y libffi-dev libssl-dev
pip install google-api-python-client==1.2 pyOpenSSL==0.14 oauth2client==1.2
# MySQL dependencies:
apt-get install -y libmysqlclient-dev
pip install MySQL-python==1.2.5
# Mongo dependencies:
pip install pymongo==2.7.2
# Setup supervisord + sysv init startup script
sudo -u redash mkdir -p /opt/redash/supervisord
pip install supervisor==3.1.2 # TODO: move to requirements.txt
# Get supervisord startup script
sudo -u redash wget -O /opt/redash/supervisord/supervisord.conf $FILES_BASE_URL"supervisord.conf"
wget -O /etc/init.d/redash_supervisord $FILES_BASE_URL"redash_supervisord_init"
add_service "redash_supervisord"
# Nginx setup
rm /etc/nginx/sites-enabled/default
wget -O /etc/nginx/sites-available/redash $FILES_BASE_URL"nginx_redash_site"
ln -nfs /etc/nginx/sites-available/redash /etc/nginx/sites-enabled/redash
service nginx restart

9
setup/files/env Normal file
View File

@@ -0,0 +1,9 @@
export REDASH_CONNECTION_ADAPTER=pg
export REDASH_CONNECTION_STRING="dbname=redash"
export REDASH_STATIC_ASSETS_PATH="../rd_ui/dist/"
export REDASH_LOG_LEVEL="INFO"
export REDASH_WORKERS_COUNT=6
export REDASH_REDIS_URL=redis://localhost:6379/1
export REDASH_DATABASE_URL="postgresql://redash"
export REDASH_COOKIE_SECRET=veryverysecret
export REDASH_GOOGLE_APPS_DOMAIN=

View File

@@ -0,0 +1,20 @@
upstream rd_servers {
server 127.0.0.1:5000;
}
server {
listen 80 default;
access_log /var/log/nginx/rd.access.log;
gzip on;
gzip_types *;
gzip_proxied any;
location / {
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_pass http://rd_servers;
}
}

162
setup/files/postgres_apt.sh Normal file
View File

@@ -0,0 +1,162 @@
#!/bin/sh
# script to add apt.postgresql.org to sources.list
# from command line
CODENAME="$1"
# lsb_release is the best interface, but not always available
if [ -z "$CODENAME" ]; then
CODENAME=$(lsb_release -cs 2>/dev/null)
fi
# parse os-release (unreliable, does not work on Ubuntu)
if [ -z "$CODENAME" -a -f /etc/os-release ]; then
. /etc/os-release
# Debian: VERSION="7.0 (wheezy)"
# Ubuntu: VERSION="13.04, Raring Ringtail"
CODENAME=$(echo $VERSION | sed -ne 's/.*(\(.*\)).*/\1/')
fi
# guess from sources.list
if [ -z "$CODENAME" ]; then
CODENAME=$(grep '^deb ' /etc/apt/sources.list | head -n1 | awk '{ print $3 }')
fi
# complain if no result yet
if [ -z "$CODENAME" ]; then
cat <<EOF
Could not determine the distribution codename. Please report this as a bug to
pgsql-pkg-debian@postgresql.org. As a workaround, you can call this script with
the proper codename as parameter, e.g. "$0 squeeze".
EOF
exit 1
fi
# errors are non-fatal above
set -e
cat <<EOF
This script will enable the PostgreSQL APT repository on apt.postgresql.org on
your system. The distribution codename used will be $CODENAME-pgdg.
EOF
case $CODENAME in
# known distributions
sid|wheezy|squeeze|lenny|etch) ;;
precise|lucid) ;;
*) # unknown distribution, verify on the web
DISTURL="http://apt.postgresql.org/pub/repos/apt/dists/"
if [ -x /usr/bin/curl ]; then
DISTHTML=$(curl -s $DISTURL)
elif [ -x /usr/bin/wget ]; then
DISTHTML=$(wget --quiet -O - $DISTURL)
fi
if [ "$DISTHTML" ]; then
if ! echo "$DISTHTML" | grep -q "$CODENAME-pgdg"; then
cat <<EOF
Your system is using the distribution codename $CODENAME, but $CODENAME-pgdg
does not seem to be a valid distribution on
$DISTURL
We abort the installation here. If you want to use a distribution different
from your system, you can call this script with an explicit codename, e.g.
"$0 precise".
Specifically, if you are using a non-LTS Ubuntu release, refer to
https://wiki.postgresql.org/wiki/Apt/FAQ#I_am_using_a_non-LTS_release_of_Ubuntu
For more information, refer to https://wiki.postgresql.org/wiki/Apt
or ask on the mailing list for assistance: pgsql-pkg-debian@postgresql.org
EOF
exit 1
fi
fi
;;
esac
echo "Writing /etc/apt/sources.list.d/pgdg.list ..."
cat > /etc/apt/sources.list.d/pgdg.list <<EOF
deb http://apt.postgresql.org/pub/repos/apt/ $CODENAME-pgdg main
#deb-src http://apt.postgresql.org/pub/repos/apt/ $CODENAME-pgdg main
EOF
echo "Importing repository signing key ..."
KEYRING="/etc/apt/trusted.gpg.d/apt.postgresql.org.gpg"
test -e $KEYRING || touch $KEYRING
apt-key --keyring $KEYRING add - <<EOF
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1
mQINBE6XR8IBEACVdDKT2HEH1IyHzXkb4nIWAY7echjRxo7MTcj4vbXAyBKOfjja
UrBEJWHN6fjKJXOYWXHLIYg0hOGeW9qcSiaa1/rYIbOzjfGfhE4x0Y+NJHS1db0V
G6GUj3qXaeyqIJGS2z7m0Thy4Lgr/LpZlZ78Nf1fliSzBlMo1sV7PpP/7zUO+aA4
bKa8Rio3weMXQOZgclzgeSdqtwKnyKTQdXY5MkH1QXyFIk1nTfWwyqpJjHlgtwMi
c2cxjqG5nnV9rIYlTTjYG6RBglq0SmzF/raBnF4Lwjxq4qRqvRllBXdFu5+2pMfC
IZ10HPRdqDCTN60DUix+BTzBUT30NzaLhZbOMT5RvQtvTVgWpeIn20i2NrPWNCUh
hj490dKDLpK/v+A5/i8zPvN4c6MkDHi1FZfaoz3863dylUBR3Ip26oM0hHXf4/2U
A/oA4pCl2W0hc4aNtozjKHkVjRx5Q8/hVYu+39csFWxo6YSB/KgIEw+0W8DiTII3
RQj/OlD68ZDmGLyQPiJvaEtY9fDrcSpI0Esm0i4sjkNbuuh0Cvwwwqo5EF1zfkVj
Tqz2REYQGMJGc5LUbIpk5sMHo1HWV038TWxlDRwtOdzw08zQA6BeWe9FOokRPeR2
AqhyaJJwOZJodKZ76S+LDwFkTLzEKnYPCzkoRwLrEdNt1M7wQBThnC5z6wARAQAB
tBxQb3N0Z3JlU1FMIERlYmlhbiBSZXBvc2l0b3J5iQI9BBMBCAAnAhsDBQsJCAcD
BRUKCQgLBRYCAwEAAh4BAheABQJS6RUZBQkOhCctAAoJEH/MfUaszEz4zmQP/2ad
HtuaXL5Xu3C3NGLha/aQb9iSJC8z5vN55HMCpsWlmslCBuEr+qR+oZvPkvwh0Io/
8hQl/qN54DMNifRwVL2n2eG52yNERie9BrAMK2kNFZZCH4OxlMN0876BmDuNq2U6
7vUtCv+pxT+g9R1LvlPgLCTjS3m+qMqUICJ310BMT2cpYlJx3YqXouFkdWBVurI0
pGU/+QtydcJALz5eZbzlbYSPWbOm2ZSS2cLrCsVNFDOAbYLtUn955yXB5s4rIscE
vTzBxPgID1iBknnPzdu2tCpk07yJleiupxI1yXstCtvhGCbiAbGFDaKzhgcAxSIX
0ZPahpaYLdCkcoLlfgD+ar4K8veSK2LazrhO99O0onRG0p7zuXszXphO4E/WdbTO
yDD35qCqYeAX6TaB+2l4kIdVqPgoXT/doWVLUK2NjZtd3JpMWI0OGYDFn2DAvgwP
xqKEoGTOYuoWKssnwLlA/ZMETegak27gFAKfoQlmHjeA/PLC2KRYd6Wg2DSifhn+
2MouoE4XFfeekVBQx98rOQ5NLwy/TYlsHXm1n0RW86ETN3chj/PPWjsi80t5oepx
82azRoVu95LJUkHpPLYyqwfueoVzp2+B2hJU2Rg7w+cJq64TfeJG8hrc93MnSKIb
zTvXfdPtvYdHhhA2LYu4+5mh5ASlAMJXD7zIOZt2iEYEEBEIAAYFAk6XSO4ACgkQ
xa93SlhRC1qmjwCg9U7U+XN7Gc/dhY/eymJqmzUGT/gAn0guvoX75Y+BsZlI6dWn
qaFU6N8HiQIcBBABCAAGBQJOl0kLAAoJEExaa6sS0qeuBfEP/3AnLrcKx+dFKERX
o4NBCGWr+i1CnowupKS3rm2xLbmiB969szG5TxnOIvnjECqPz6skK3HkV3jTZaju
v3sR6M2ItpnrncWuiLnYcCSDp9TEMpCWzTEgtrBlKdVuTNTeRGILeIcvqoZX5w+u
i0eBvvbeRbHEyUsvOEnYjrqoAjqUJj5FUZtR1+V9fnZp8zDgpOSxx0LomnFdKnhj
uyXAQlRCA6/roVNR9ruRjxTR5ubteZ9ubTsVYr2/eMYOjQ46LhAgR+3Alblu/WHB
MR/9F9//RuOa43R5Sjx9TiFCYol+Ozk8XRt3QGweEH51YkSYY3oRbHBb2Fkql6N6
YFqlLBL7/aiWnNmRDEs/cdpo9HpFsbjOv4RlsSXQfvvfOayHpT5nO1UQFzoyMVpJ
615zwmQDJT5Qy7uvr2eQYRV9AXt8t/H+xjQsRZCc5YVmeAo91qIzI/tA2gtXik49
6yeziZbfUvcZzuzjjxFExss4DSAwMgorvBeIbiz2k2qXukbqcTjB2XqAlZasd6Ll
nLXpQdqDV3McYkP/MvttWh3w+J/woiBcA7yEI5e3YJk97uS6+ssbqLEd0CcdT+qz
+Waw0z/ZIU99Lfh2Qm77OT6vr//Zulw5ovjZVO2boRIcve7S97gQ4KC+G/+QaRS+
VPZ67j5UMxqtT/Y4+NHcQGgwF/1iiQI9BBMBCAAnAhsDBQsJCAcDBRUKCQgLBRYC
AwEAAh4BAheABQJQeSssBQkDwxbfAAoJEH/MfUaszEz4bgkP/0AI0UgDgkNNqplA
IpE/pkwem2jgGpJGKurh2xDu6j2ZL+BPzPhzyCeMHZwTXkkI373TXGQQP8dIa+RD
HAZ3iijw4+ISdKWpziEUJjUk04UMPTlN+dYJt2EHLQDD0VLtX0yQC/wLmVEH/REp
oclbVjZR/+ehwX2IxOIlXmkZJDSycl975FnSUjMAvyzty8P9DN0fIrQ7Ju+BfMOM
TnUkOdp0kRUYez7pxbURJfkM0NxAP1geACI91aISBpFg3zxQs1d3MmUIhJ4wHvYB
uaR7Fx1FkLAxWddre/OCYJBsjucE9uqc04rgKVjN5P/VfqNxyUoB+YZ+8Lk4t03p
RBcD9XzcyOYlFLWXbcWxTn1jJ2QMqRIWi5lzZIOMw5B+OK9LLPX0dAwIFGr9WtuV
J2zp+D4CBEMtn4Byh8EaQsttHeqAkpZoMlrEeNBDz2L7RquPQNmiuom15nb7xU/k
7PGfqtkpBaaGBV9tJkdp7BdH27dZXx+uT+uHbpMXkRrXliHjWpAw+NGwADh/Pjmq
ExlQSdgAiXy1TTOdzxKH7WrwMFGDK0fddKr8GH3f+Oq4eOoNRa6/UhTCmBPbryCS
IA7EAd0Aae9YaLlOB+eTORg/F1EWLPm34kKSRtae3gfHuY2cdUmoDVnOF8C9hc0P
bL65G4NWPt+fW7lIj+0+kF19s2PviQI9BBMBCAAnAhsDBQsJCAcDBRUKCQgLBRYC
AwEAAh4BAheABQJRKm2VBQkINsBBAAoJEH/MfUaszEz4RTEP/1sQHyjHaUiAPaCA
v8jw/3SaWP/g8qLjpY6ROjLnDMvwKwRAoxUwcIv4/TWDOMpwJN+CJIbjXsXNYvf9
OX+UTOvq4iwi4ADrAAw2xw+Jomc6EsYla+hkN2FzGzhpXfZFfUsuphjY3FKL+4hX
H+R8ucNwIz3yrkfc17MMn8yFNWFzm4omU9/JeeaafwUoLxlULL2zY7H3+QmxCl0u
6t8VvlszdEFhemLHzVYRY0Ro/ISrR78CnANNsMIy3i11U5uvdeWVCoWV1BXNLzOD
4+BIDbMB/Do8PQCWiliSGZi8lvmj/sKbumMFQonMQWOfQswTtqTyQ3yhUM1LaxK5
PYq13rggi3rA8oq8SYb/KNCQL5pzACji4TRVK0kNpvtxJxe84X8+9IB1vhBvF/Ji
/xDd/3VDNPY+k1a47cON0S8Qc8DA3mq4hRfcgvuWy7ZxoMY7AfSJOhleb9+PzRBB
n9agYgMxZg1RUWZazQ5KuoJqbxpwOYVFja/stItNS4xsmi0lh2I4MNlBEDqnFLUx
SvTDc22c3uJlWhzBM/f2jH19uUeqm4jaggob3iJvJmK+Q7Ns3WcfhuWwCnc1+58d
iFAMRUCRBPeFS0qd56QGk1r97B6+3UfLUslCfaaA8IMOFvQSHJwDO87xWGyxeRTY
IIP9up4xwgje9LB7fMxsSkCDTHOk
=s3DI
-----END PGP PUBLIC KEY BLOCK-----
EOF
echo "Running apt-get update ..."
apt-get update
cat <<EOF
You can now start installing packages from apt.postgresql.org.
Have a look at https://wiki.postgresql.org/wiki/Apt for more information;
most notably the FAQ at https://wiki.postgresql.org/wiki/Apt/FAQ
EOF

View File

@@ -0,0 +1,129 @@
#!/bin/sh
# /etc/init.d/redash_supervisord
### BEGIN INIT INFO
# Provides: supervisord
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: process supervisor
### END INIT INFO
# Author: Ron DuPlain <ron.duplain@gmail.com>
# Do NOT "set -e"
# PATH should only include /usr/* if it runs after the mountnfs.sh script
PATH=/sbin:/usr/sbin:/usr/local/sbin:/bin:/usr/bin:/usr/local/bin
NAME=supervisord
DESC="process supervisor"
DAEMON=/usr/local/bin/$NAME
DAEMON_ARGS="--configuration /opt/redash/supervisord/supervisord.conf "
PIDFILE=/opt/redash/supervisord/supervisord.pid
SCRIPTNAME=/etc/init.d/redash_supervisord
USER=redash
# Exit if the package is not installed
[ -x "$DAEMON" ] || exit 0
# Read configuration variable file if it is present
[ -r /etc/default/$NAME ] && . /etc/default/$NAME
# Load the VERBOSE setting and other rcS variables
. /lib/init/vars.sh
# Define LSB log_* functions.
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
# and status_of_proc is working.
. /lib/lsb/init-functions
#
# Function that starts the daemon/service
#
do_start()
{
# Return
# 0 if daemon has been started
# 1 if daemon was already running
# 2 if daemon could not be started
start-stop-daemon --start --quiet --pidfile $PIDFILE --user $USER --chuid $USER --exec $DAEMON --test > /dev/null \
|| return 1
start-stop-daemon --start --quiet --pidfile $PIDFILE --user $USER --chuid $USER --exec $DAEMON -- \
$DAEMON_ARGS \
|| return 2
# Add code here, if necessary, that waits for the process to be ready
# to handle requests from services started subsequently which depend
# on this one. As a last resort, sleep for some time.
}
#
# Function that stops the daemon/service
#
do_stop()
{
# Return
# 0 if daemon has been stopped
# 1 if daemon was already stopped
# 2 if daemon could not be stopped
# other if a failure occurred
start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --user $USER --chuid $USER --name $NAME
RETVAL="$?"
[ "$RETVAL" = 2 ] && return 2
# Wait for children to finish too if this is a daemon that forks
# and if the daemon is only ever run from this initscript.
# If the above conditions are not satisfied then add some other code
# that waits for the process to drop all resources that could be
# needed by services started subsequently. A last resort is to
# sleep for some time.
start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 --user $USER --chuid $USER --exec $DAEMON
[ "$?" = 2 ] && return 2
# Many daemons don't delete their pidfiles when they exit.
rm -f $PIDFILE
return "$RETVAL"
}
case "$1" in
start)
[ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
do_start
case "$?" in
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
esac
;;
stop)
[ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
do_stop
case "$?" in
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
esac
;;
status)
status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
;;
restart)
log_daemon_msg "Restarting $DESC" "$NAME"
do_stop
case "$?" in
0|1)
do_start
case "$?" in
0) log_end_msg 0 ;;
1) log_end_msg 1 ;; # Old process is still running
*) log_end_msg 1 ;; # Failed to start
esac
;;
*)
# Failed to stop
log_end_msg 1
;;
esac
;;
*)
echo "Usage: $SCRIPTNAME {start|stop|status|restart}" >&2
exit 3
;;
esac
:

785
setup/files/redis.conf Normal file
View File

@@ -0,0 +1,785 @@
## Generated by install_server.sh ##
# Redis configuration file example
# Note on units: when memory size is needed, it is possible to specify
# it in the usual form of 1k 5GB 4M and so forth:
#
# 1k => 1000 bytes
# 1kb => 1024 bytes
# 1m => 1000000 bytes
# 1mb => 1024*1024 bytes
# 1g => 1000000000 bytes
# 1gb => 1024*1024*1024 bytes
#
# units are case insensitive so 1GB 1Gb 1gB are all the same.
################################## INCLUDES ###################################
# Include one or more other config files here. This is useful if you
# have a standard template that goes to all Redis server but also need
# to customize a few per-server settings. Include files can include
# other files, so use this wisely.
#
# Notice option "include" won't be rewritten by command "CONFIG REWRITE"
# from admin or Redis Sentinel. Since Redis always uses the last processed
# line as value of a configuration directive, you'd better put includes
# at the beginning of this file to avoid overwriting config change at runtime.
#
# If instead you are interested in using includes to override configuration
# options, it is better to use include as the last line.
#
# include /path/to/local.conf
# include /path/to/other.conf
################################ GENERAL #####################################
# By default Redis does not run as a daemon. Use 'yes' if you need it.
# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
daemonize yes
# When running daemonized, Redis writes a pid file in /var/run/redis.pid by
# default. You can specify a custom pid file location here.
pidfile /var/run/redis_6379.pid
# Accept connections on the specified port, default is 6379.
# If port 0 is specified Redis will not listen on a TCP socket.
port 6379
# TCP listen() backlog.
#
# In high requests-per-second environments you need an high backlog in order
# to avoid slow clients connections issues. Note that the Linux kernel
# will silently truncate it to the value of /proc/sys/net/core/somaxconn so
# make sure to raise both the value of somaxconn and tcp_max_syn_backlog
# in order to get the desired effect.
tcp-backlog 511
# By default Redis listens for connections from all the network interfaces
# available on the server. It is possible to listen to just one or multiple
# interfaces using the "bind" configuration directive, followed by one or
# more IP addresses.
#
# Examples:
#
# bind 192.168.1.100 10.0.0.1
# bind 127.0.0.1
# Specify the path for the Unix socket that will be used to listen for
# incoming connections. There is no default, so Redis will not listen
# on a unix socket when not specified.
#
# unixsocket /tmp/redis.sock
# unixsocketperm 700
# Close the connection after a client is idle for N seconds (0 to disable)
timeout 0
# TCP keepalive.
#
# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
# of communication. This is useful for two reasons:
#
# 1) Detect dead peers.
# 2) Take the connection alive from the point of view of network
# equipment in the middle.
#
# On Linux, the specified value (in seconds) is the period used to send ACKs.
# Note that to close the connection the double of the time is needed.
# On other kernels the period depends on the kernel configuration.
#
# A reasonable value for this option is 60 seconds.
tcp-keepalive 0
# Specify the server verbosity level.
# This can be one of:
# debug (a lot of information, useful for development/testing)
# verbose (many rarely useful info, but not a mess like the debug level)
# notice (moderately verbose, what you want in production probably)
# warning (only very important / critical messages are logged)
loglevel notice
# Specify the log file name. Also the empty string can be used to force
# Redis to log on the standard output. Note that if you use standard
# output for logging but daemonize, logs will be sent to /dev/null
logfile /var/log/redis_6379.log
# To enable logging to the system logger, just set 'syslog-enabled' to yes,
# and optionally update the other syslog parameters to suit your needs.
# syslog-enabled no
# Specify the syslog identity.
# syslog-ident redis
# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
# syslog-facility local0
# Set the number of databases. The default database is DB 0, you can select
# a different one on a per-connection basis using SELECT <dbid> where
# dbid is a number between 0 and 'databases'-1
databases 16
################################ SNAPSHOTTING ################################
#
# Save the DB on disk:
#
# save <seconds> <changes>
#
# Will save the DB if both the given number of seconds and the given
# number of write operations against the DB occurred.
#
# In the example below the behaviour will be to save:
# after 900 sec (15 min) if at least 1 key changed
# after 300 sec (5 min) if at least 10 keys changed
# after 60 sec if at least 10000 keys changed
#
# Note: you can disable saving at all commenting all the "save" lines.
#
# It is also possible to remove all the previously configured save
# points by adding a save directive with a single empty string argument
# like in the following example:
#
# save ""
save 900 1
save 300 10
save 60 10000
# By default Redis will stop accepting writes if RDB snapshots are enabled
# (at least one save point) and the latest background save failed.
# This will make the user aware (in a hard way) that data is not persisting
# on disk properly, otherwise chances are that no one will notice and some
# disaster will happen.
#
# If the background saving process will start working again Redis will
# automatically allow writes again.
#
# However if you have setup your proper monitoring of the Redis server
# and persistence, you may want to disable this feature so that Redis will
# continue to work as usual even if there are problems with disk,
# permissions, and so forth.
stop-writes-on-bgsave-error yes
# Compress string objects using LZF when dump .rdb databases?
# For default that's set to 'yes' as it's almost always a win.
# If you want to save some CPU in the saving child set it to 'no' but
# the dataset will likely be bigger if you have compressible values or keys.
rdbcompression yes
# Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
# This makes the format more resistant to corruption but there is a performance
# hit to pay (around 10%) when saving and loading RDB files, so you can disable it
# for maximum performances.
#
# RDB files created with checksum disabled have a checksum of zero that will
# tell the loading code to skip the check.
rdbchecksum yes
# The filename where to dump the DB
dbfilename dump.rdb
# The working directory.
#
# The DB will be written inside this directory, with the filename specified
# above using the 'dbfilename' configuration directive.
#
# The Append Only File will also be created inside this directory.
#
# Note that you must specify a directory here, not a file name.
dir /var/lib/redis/6379
################################# REPLICATION #################################
# Master-Slave replication. Use slaveof to make a Redis instance a copy of
# another Redis server. A few things to understand ASAP about Redis replication.
#
# 1) Redis replication is asynchronous, but you can configure a master to
# stop accepting writes if it appears to be not connected with at least
# a given number of slaves.
# 2) Redis slaves are able to perform a partial resynchronization with the
# master if the replication link is lost for a relatively small amount of
# time. You may want to configure the replication backlog size (see the next
# sections of this file) with a sensible value depending on your needs.
# 3) Replication is automatic and does not need user intervention. After a
# network partition slaves automatically try to reconnect to masters
# and resynchronize with them.
#
# slaveof <masterip> <masterport>
# If the master is password protected (using the "requirepass" configuration
# directive below) it is possible to tell the slave to authenticate before
# starting the replication synchronization process, otherwise the master will
# refuse the slave request.
#
# masterauth <master-password>
# When a slave loses its connection with the master, or when the replication
# is still in progress, the slave can act in two different ways:
#
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
# still reply to client requests, possibly with out of date data, or the
# data set may just be empty if this is the first synchronization.
#
# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
# an error "SYNC with master in progress" to all the kind of commands
# but to INFO and SLAVEOF.
#
slave-serve-stale-data yes
# You can configure a slave instance to accept writes or not. Writing against
# a slave instance may be useful to store some ephemeral data (because data
# written on a slave will be easily deleted after resync with the master) but
# may also cause problems if clients are writing to it because of a
# misconfiguration.
#
# Since Redis 2.6 by default slaves are read-only.
#
# Note: read only slaves are not designed to be exposed to untrusted clients
# on the internet. It's just a protection layer against misuse of the instance.
# Still a read only slave exports by default all the administrative commands
# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
# security of read only slaves using 'rename-command' to shadow all the
# administrative / dangerous commands.
slave-read-only yes
# Slaves send PINGs to server in a predefined interval. It's possible to change
# this interval with the repl_ping_slave_period option. The default value is 10
# seconds.
#
# repl-ping-slave-period 10
# The following option sets the replication timeout for:
#
# 1) Bulk transfer I/O during SYNC, from the point of view of slave.
# 2) Master timeout from the point of view of slaves (data, pings).
# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings).
#
# It is important to make sure that this value is greater than the value
# specified for repl-ping-slave-period otherwise a timeout will be detected
# every time there is low traffic between the master and the slave.
#
# repl-timeout 60
# Disable TCP_NODELAY on the slave socket after SYNC?
#
# If you select "yes" Redis will use a smaller number of TCP packets and
# less bandwidth to send data to slaves. But this can add a delay for
# the data to appear on the slave side, up to 40 milliseconds with
# Linux kernels using a default configuration.
#
# If you select "no" the delay for data to appear on the slave side will
# be reduced but more bandwidth will be used for replication.
#
# By default we optimize for low latency, but in very high traffic conditions
# or when the master and slaves are many hops away, turning this to "yes" may
# be a good idea.
repl-disable-tcp-nodelay no
# Set the replication backlog size. The backlog is a buffer that accumulates
# slave data when slaves are disconnected for some time, so that when a slave
# wants to reconnect again, often a full resync is not needed, but a partial
# resync is enough, just passing the portion of data the slave missed while
# disconnected.
#
# The biggest the replication backlog, the longer the time the slave can be
# disconnected and later be able to perform a partial resynchronization.
#
# The backlog is only allocated once there is at least a slave connected.
#
# repl-backlog-size 1mb
# After a master has no longer connected slaves for some time, the backlog
# will be freed. The following option configures the amount of seconds that
# need to elapse, starting from the time the last slave disconnected, for
# the backlog buffer to be freed.
#
# A value of 0 means to never release the backlog.
#
# repl-backlog-ttl 3600
# The slave priority is an integer number published by Redis in the INFO output.
# It is used by Redis Sentinel in order to select a slave to promote into a
# master if the master is no longer working correctly.
#
# A slave with a low priority number is considered better for promotion, so
# for instance if there are three slaves with priority 10, 100, 25 Sentinel will
# pick the one with priority 10, that is the lowest.
#
# However a special priority of 0 marks the slave as not able to perform the
# role of master, so a slave with priority of 0 will never be selected by
# Redis Sentinel for promotion.
#
# By default the priority is 100.
slave-priority 100
# It is possible for a master to stop accepting writes if there are less than
# N slaves connected, having a lag less or equal than M seconds.
#
# The N slaves need to be in "online" state.
#
# The lag in seconds, that must be <= the specified value, is calculated from
# the last ping received from the slave, that is usually sent every second.
#
# This option does not GUARANTEES that N replicas will accept the write, but
# will limit the window of exposure for lost writes in case not enough slaves
# are available, to the specified number of seconds.
#
# For example to require at least 3 slaves with a lag <= 10 seconds use:
#
# min-slaves-to-write 3
# min-slaves-max-lag 10
#
# Setting one or the other to 0 disables the feature.
#
# By default min-slaves-to-write is set to 0 (feature disabled) and
# min-slaves-max-lag is set to 10.
################################## SECURITY ###################################
# Require clients to issue AUTH <PASSWORD> before processing any other
# commands. This might be useful in environments in which you do not trust
# others with access to the host running redis-server.
#
# This should stay commented out for backward compatibility and because most
# people do not need auth (e.g. they run their own servers).
#
# Warning: since Redis is pretty fast an outside user can try up to
# 150k passwords per second against a good box. This means that you should
# use a very strong password otherwise it will be very easy to break.
#
# requirepass foobared
# Command renaming.
#
# It is possible to change the name of dangerous commands in a shared
# environment. For instance the CONFIG command may be renamed into something
# hard to guess so that it will still be available for internal-use tools
# but not available for general clients.
#
# Example:
#
# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
#
# It is also possible to completely kill a command by renaming it into
# an empty string:
#
# rename-command CONFIG ""
#
# Please note that changing the name of commands that are logged into the
# AOF file or transmitted to slaves may cause problems.
################################### LIMITS ####################################
# Set the max number of connected clients at the same time. By default
# this limit is set to 10000 clients, however if the Redis server is not
# able to configure the process file limit to allow for the specified limit
# the max number of allowed clients is set to the current file limit
# minus 32 (as Redis reserves a few file descriptors for internal uses).
#
# Once the limit is reached Redis will close all the new connections sending
# an error 'max number of clients reached'.
#
# maxclients 10000
# Don't use more memory than the specified amount of bytes.
# When the memory limit is reached Redis will try to remove keys
# according to the eviction policy selected (see maxmemory-policy).
#
# If Redis can't remove keys according to the policy, or if the policy is
# set to 'noeviction', Redis will start to reply with errors to commands
# that would use more memory, like SET, LPUSH, and so on, and will continue
# to reply to read-only commands like GET.
#
# This option is usually useful when using Redis as an LRU cache, or to set
# a hard memory limit for an instance (using the 'noeviction' policy).
#
# WARNING: If you have slaves attached to an instance with maxmemory on,
# the size of the output buffers needed to feed the slaves are subtracted
# from the used memory count, so that network problems / resyncs will
# not trigger a loop where keys are evicted, and in turn the output
# buffer of slaves is full with DELs of keys evicted triggering the deletion
# of more keys, and so forth until the database is completely emptied.
#
# In short... if you have slaves attached it is suggested that you set a lower
# limit for maxmemory so that there is some free RAM on the system for slave
# output buffers (but this is not needed if the policy is 'noeviction').
#
# maxmemory <bytes>
# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
# is reached. You can select among five behaviors:
#
# volatile-lru -> remove the key with an expire set using an LRU algorithm
# allkeys-lru -> remove any key accordingly to the LRU algorithm
# volatile-random -> remove a random key with an expire set
# allkeys-random -> remove a random key, any key
# volatile-ttl -> remove the key with the nearest expire time (minor TTL)
# noeviction -> don't expire at all, just return an error on write operations
#
# Note: with any of the above policies, Redis will return an error on write
# operations, when there are not suitable keys for eviction.
#
# At the date of writing this commands are: set setnx setex append
# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
# getset mset msetnx exec sort
#
# The default is:
#
# maxmemory-policy volatile-lru
# LRU and minimal TTL algorithms are not precise algorithms but approximated
# algorithms (in order to save memory), so you can select as well the sample
# size to check. For instance for default Redis will check three keys and
# pick the one that was used less recently, you can change the sample size
# using the following configuration directive.
#
# maxmemory-samples 3
############################## APPEND ONLY MODE ###############################
# By default Redis asynchronously dumps the dataset on disk. This mode is
# good enough in many applications, but an issue with the Redis process or
# a power outage may result into a few minutes of writes lost (depending on
# the configured save points).
#
# The Append Only File is an alternative persistence mode that provides
# much better durability. For instance using the default data fsync policy
# (see later in the config file) Redis can lose just one second of writes in a
# dramatic event like a server power outage, or a single write if something
# wrong with the Redis process itself happens, but the operating system is
# still running correctly.
#
# AOF and RDB persistence can be enabled at the same time without problems.
# If the AOF is enabled on startup Redis will load the AOF, that is the file
# with the better durability guarantees.
#
# Please check http://redis.io/topics/persistence for more information.
appendonly no
# The name of the append only file (default: "appendonly.aof")
appendfilename "appendonly.aof"
# The fsync() call tells the Operating System to actually write data on disk
# instead to wait for more data in the output buffer. Some OS will really flush
# data on disk, some other OS will just try to do it ASAP.
#
# Redis supports three different modes:
#
# no: don't fsync, just let the OS flush the data when it wants. Faster.
# always: fsync after every write to the append only log . Slow, Safest.
# everysec: fsync only one time every second. Compromise.
#
# The default is "everysec", as that's usually the right compromise between
# speed and data safety. It's up to you to understand if you can relax this to
# "no" that will let the operating system flush the output buffer when
# it wants, for better performances (but if you can live with the idea of
# some data loss consider the default persistence mode that's snapshotting),
# or on the contrary, use "always" that's very slow but a bit safer than
# everysec.
#
# More details please check the following article:
# http://antirez.com/post/redis-persistence-demystified.html
#
# If unsure, use "everysec".
# appendfsync always
appendfsync everysec
# appendfsync no
# When the AOF fsync policy is set to always or everysec, and a background
# saving process (a background save or AOF log background rewriting) is
# performing a lot of I/O against the disk, in some Linux configurations
# Redis may block too long on the fsync() call. Note that there is no fix for
# this currently, as even performing fsync in a different thread will block
# our synchronous write(2) call.
#
# In order to mitigate this problem it's possible to use the following option
# that will prevent fsync() from being called in the main process while a
# BGSAVE or BGREWRITEAOF is in progress.
#
# This means that while another child is saving, the durability of Redis is
# the same as "appendfsync none". In practical terms, this means that it is
# possible to lose up to 30 seconds of log in the worst scenario (with the
# default Linux settings).
#
# If you have latency problems turn this to "yes". Otherwise leave it as
# "no" that is the safest pick from the point of view of durability.
no-appendfsync-on-rewrite no
# Automatic rewrite of the append only file.
# Redis is able to automatically rewrite the log file implicitly calling
# BGREWRITEAOF when the AOF log size grows by the specified percentage.
#
# This is how it works: Redis remembers the size of the AOF file after the
# latest rewrite (if no rewrite has happened since the restart, the size of
# the AOF at startup is used).
#
# This base size is compared to the current size. If the current size is
# bigger than the specified percentage, the rewrite is triggered. Also
# you need to specify a minimal size for the AOF file to be rewritten, this
# is useful to avoid rewriting the AOF file even if the percentage increase
# is reached but it is still pretty small.
#
# Specify a percentage of zero in order to disable the automatic AOF
# rewrite feature.
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
# An AOF file may be found to be truncated at the end during the Redis
# startup process, when the AOF data gets loaded back into memory.
# This may happen when the system where Redis is running
# crashes, especially when an ext4 filesystem is mounted without the
# data=ordered option (however this can't happen when Redis itself
# crashes or aborts but the operating system still works correctly).
#
# Redis can either exit with an error when this happens, or load as much
# data as possible (the default now) and start if the AOF file is found
# to be truncated at the end. The following option controls this behavior.
#
# If aof-load-truncated is set to yes, a truncated AOF file is loaded and
# the Redis server starts emitting a log to inform the user of the event.
# Otherwise if the option is set to no, the server aborts with an error
# and refuses to start. When the option is set to no, the user requires
# to fix the AOF file using the "redis-check-aof" utility before to restart
# the server.
#
# Note that if the AOF file will be found to be corrupted in the middle
# the server will still exit with an error. This option only applies when
# Redis will try to read more data from the AOF file but not enough bytes
# will be found.
aof-load-truncated yes
################################ LUA SCRIPTING ###############################
# Max execution time of a Lua script in milliseconds.
#
# If the maximum execution time is reached Redis will log that a script is
# still in execution after the maximum allowed time and will start to
# reply to queries with an error.
#
# When a long running script exceed the maximum execution time only the
# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
# used to stop a script that did not yet called write commands. The second
# is the only way to shut down the server in the case a write commands was
# already issue by the script but the user don't want to wait for the natural
# termination of the script.
#
# Set it to 0 or a negative value for unlimited execution without warnings.
lua-time-limit 5000
################################## SLOW LOG ###################################
# The Redis Slow Log is a system to log queries that exceeded a specified
# execution time. The execution time does not include the I/O operations
# like talking with the client, sending the reply and so forth,
# but just the time needed to actually execute the command (this is the only
# stage of command execution where the thread is blocked and can not serve
# other requests in the meantime).
#
# You can configure the slow log with two parameters: one tells Redis
# what is the execution time, in microseconds, to exceed in order for the
# command to get logged, and the other parameter is the length of the
# slow log. When a new command is logged the oldest one is removed from the
# queue of logged commands.
# The following time is expressed in microseconds, so 1000000 is equivalent
# to one second. Note that a negative number disables the slow log, while
# a value of zero forces the logging of every command.
slowlog-log-slower-than 10000
# There is no limit to this length. Just be aware that it will consume memory.
# You can reclaim memory used by the slow log with SLOWLOG RESET.
slowlog-max-len 128
################################ LATENCY MONITOR ##############################
# The Redis latency monitoring subsystem samples different operations
# at runtime in order to collect data related to possible sources of
# latency of a Redis instance.
#
# Via the LATENCY command this information is available to the user that can
# print graphs and obtain reports.
#
# The system only logs operations that were performed in a time equal or
# greater than the amount of milliseconds specified via the
# latency-monitor-threshold configuration directive. When its value is set
# to zero, the latency monitor is turned off.
#
# By default latency monitoring is disabled since it is mostly not needed
# if you don't have latency issues, and collecting data has a performance
# impact, that while very small, can be measured under big load. Latency
# monitoring can easily be enalbed at runtime using the command
# "CONFIG SET latency-monitor-threshold <milliseconds>" if needed.
latency-monitor-threshold 0
############################# Event notification ##############################
# Redis can notify Pub/Sub clients about events happening in the key space.
# This feature is documented at http://redis.io/topics/notifications
#
# For instance if keyspace events notification is enabled, and a client
# performs a DEL operation on key "foo" stored in the Database 0, two
# messages will be published via Pub/Sub:
#
# PUBLISH __keyspace@0__:foo del
# PUBLISH __keyevent@0__:del foo
#
# It is possible to select the events that Redis will notify among a set
# of classes. Every class is identified by a single character:
#
# K Keyspace events, published with __keyspace@<db>__ prefix.
# E Keyevent events, published with __keyevent@<db>__ prefix.
# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
# $ String commands
# l List commands
# s Set commands
# h Hash commands
# z Sorted set commands
# x Expired events (events generated every time a key expires)
# e Evicted events (events generated when a key is evicted for maxmemory)
# A Alias for g$lshzxe, so that the "AKE" string means all the events.
#
# The "notify-keyspace-events" takes as argument a string that is composed
# by zero or multiple characters. The empty string means that notifications
# are disabled at all.
#
# Example: to enable list and generic events, from the point of view of the
# event name, use:
#
# notify-keyspace-events Elg
#
# Example 2: to get the stream of the expired keys subscribing to channel
# name __keyevent@0__:expired use:
#
# notify-keyspace-events Ex
#
# By default all notifications are disabled because most users don't need
# this feature and the feature has some overhead. Note that if you don't
# specify at least one of K or E, no events will be delivered.
notify-keyspace-events ""
############################### ADVANCED CONFIG ###############################
# Hashes are encoded using a memory efficient data structure when they have a
# small number of entries, and the biggest entry does not exceed a given
# threshold. These thresholds can be configured using the following directives.
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
# Similarly to hashes, small lists are also encoded in a special way in order
# to save a lot of space. The special representation is only used when
# you are under the following limits:
list-max-ziplist-entries 512
list-max-ziplist-value 64
# Sets have a special encoding in just one case: when a set is composed
# of just strings that happens to be integers in radix 10 in the range
# of 64 bit signed integers.
# The following configuration setting sets the limit in the size of the
# set in order to use this special memory saving encoding.
set-max-intset-entries 512
# Similarly to hashes and lists, sorted sets are also specially encoded in
# order to save a lot of space. This encoding is only used when the length and
# elements of a sorted set are below the following limits:
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
# HyperLogLog sparse representation bytes limit. The limit includes the
# 16 bytes header. When an HyperLogLog using the sparse representation crosses
# this limit, it is converted into the dense representation.
#
# A value greater than 16000 is totally useless, since at that point the
# dense representation is more memory efficient.
#
# The suggested value is ~ 3000 in order to have the benefits of
# the space efficient encoding without slowing down too much PFADD,
# which is O(N) with the sparse encoding. The value can be raised to
# ~ 10000 when CPU is not a concern, but space is, and the data set is
# composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
hll-sparse-max-bytes 3000
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
# order to help rehashing the main Redis hash table (the one mapping top-level
# keys to values). The hash table implementation Redis uses (see dict.c)
# performs a lazy rehashing: the more operation you run into a hash table
# that is rehashing, the more rehashing "steps" are performed, so if the
# server is idle the rehashing is never complete and some more memory is used
# by the hash table.
#
# The default is to use this millisecond 10 times every second in order to
# active rehashing the main dictionaries, freeing memory when possible.
#
# If unsure:
# use "activerehashing no" if you have hard latency requirements and it is
# not a good thing in your environment that Redis can reply form time to time
# to queries with 2 milliseconds delay.
#
# use "activerehashing yes" if you don't have such hard requirements but
# want to free memory asap when possible.
activerehashing yes
# The client output buffer limits can be used to force disconnection of clients
# that are not reading data from the server fast enough for some reason (a
# common reason is that a Pub/Sub client can't consume messages as fast as the
# publisher can produce them).
#
# The limit can be set differently for the three different classes of clients:
#
# normal -> normal clients including MONITOR clients
# slave -> slave clients
# pubsub -> clients subscribed to at least one pubsub channel or pattern
#
# The syntax of every client-output-buffer-limit directive is the following:
#
# client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
#
# A client is immediately disconnected once the hard limit is reached, or if
# the soft limit is reached and remains reached for the specified number of
# seconds (continuously).
# So for instance if the hard limit is 32 megabytes and the soft limit is
# 16 megabytes / 10 seconds, the client will get disconnected immediately
# if the size of the output buffers reach 32 megabytes, but will also get
# disconnected if the client reaches 16 megabytes and continuously overcomes
# the limit for 10 seconds.
#
# By default normal clients are not limited because they don't receive data
# without asking (in a push way), but just after a request, so only
# asynchronous clients may create a scenario where data is requested faster
# than it can read.
#
# Instead there is a default limit for pubsub and slave clients, since
# subscribers and slaves receive data in a push fashion.
#
# Both the hard or the soft limit can be disabled by setting them to zero.
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
# Redis calls an internal function to perform many background tasks, like
# closing connections of clients in timeout, purging expired keys that are
# never requested, and so forth.
#
# Not all tasks are performed with the same frequency, but Redis checks for
# tasks to perform accordingly to the specified "hz" value.
#
# By default "hz" is set to 10. Raising the value will use more CPU when
# Redis is idle, but at the same time will make Redis more responsive when
# there are many keys expiring at the same time, and timeouts may be
# handled with more precision.
#
# The range is between 1 and 500, however a value over 100 is usually not
# a good idea. Most users should use the default of 10 and raise this up to
# 100 only in environments where very low latency is required.
hz 10
# When a child rewrites the AOF file, if the following option is enabled
# the file will be fsync-ed every 32 MB of data generated. This is useful
# in order to commit the file to the disk more incrementally and avoid
# big latency spikes.
aof-rewrite-incremental-fsync yes

66
setup/files/redis_init Normal file
View File

@@ -0,0 +1,66 @@
#!/bin/sh
EXEC=/usr/local/bin/redis-server
CLIEXEC=/usr/local/bin/redis-cli
PIDFILE=/var/run/redis_6379.pid
CONF="/etc/redis/6379.conf"
REDISPORT="6379"
###############
# SysV Init Information
# chkconfig: - 58 74
# description: redis_6379 is the redis daemon.
### BEGIN INIT INFO
# Provides: redis_6379
# Required-Start: $network $local_fs $remote_fs
# Required-Stop: $network $local_fs $remote_fs
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Should-Start: $syslog $named
# Should-Stop: $syslog $named
# Short-Description: start and stop redis_6379
# Description: Redis daemon
### END INIT INFO
case "$1" in
start)
if [ -f $PIDFILE ]
then
echo "$PIDFILE exists, process is already running or crashed"
else
echo "Starting Redis server..."
$EXEC $CONF
fi
;;
stop)
if [ ! -f $PIDFILE ]
then
echo "$PIDFILE does not exist, process is not running"
else
PID=$(cat $PIDFILE)
echo "Stopping ..."
$CLIEXEC -p $REDISPORT shutdown
while [ -x /proc/${PID} ]
do
echo "Waiting for Redis to shutdown ..."
sleep 1
done
echo "Redis stopped"
fi
;;
status)
if [ ! -f $PIDFILE ]
then
echo 'Redis is not running'
else
echo "Redis is running ($(<$PIDFILE))"
fi
;;
restart)
$0 stop
$0 start
;;
*)
echo "Please use start, stop, restart or status as first argument"
;;
esac

View File

@@ -0,0 +1,31 @@
[supervisord]
nodaemon=false
logfile=/opt/redash/logs/supervisord.log
pidfile=/opt/redash/supervisord/supervisord.pid
directory=/opt/redash/current
[inet_http_server]
port = 127.0.0.1:9001
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
[program:redash_server]
command=/opt/redash/current/bin/run gunicorn -b 127.0.0.1:5000 --name redash -w 4 redash.wsgi:app
process_name=redash_server
numprocs=1
priority=999
autostart=true
autorestart=true
stdout_logfile=/opt/redash/logs/api.log
stderr_logfile=/opt/redash/logs/api_error.log
[program:redash_celery]
command=/opt/redash/current/bin/run celery worker --app=redash.worker --beat -Qqueries,celery,scheduled_queries
process_name=redash_celery
numprocs=1
priority=999
autostart=true
autorestart=true
stdout_logfile=/opt/redash/logs/celery.log
stderr_logfile=/opt/redash/logs/celery_error.log

49
setup/packer.json Normal file
View File

@@ -0,0 +1,49 @@
{
"variables": {
"aws_access_key": "",
"aws_secret_key": "",
"redash_version": "0.4.0.b589",
"image_version": "040b589"
},
"builders": [
{
"name": "redash-us-east-1",
"type": "amazon-ebs",
"access_key": "{{user `aws_access_key`}}",
"secret_key": "{{user `aws_secret_key`}}",
"region": "us-east-1",
"source_ami": "ami-fe7cc796",
"instance_type": "t2.micro",
"ssh_username": "ubuntu",
"ami_name": "redash-{{user `image_version`}}-us-east-1"
},
{
"name": "redash-eu-west-1",
"type": "amazon-ebs",
"access_key": "{{user `aws_access_key`}}",
"secret_key": "{{user `aws_secret_key`}}",
"region": "eu-west-1",
"source_ami": "ami-d2ff50a5",
"instance_type": "t2.micro",
"ssh_username": "ubuntu",
"ami_name": "redash-{{user `image_version`}}-eu-west-1"
},
{
"type": "googlecompute",
"bucket_name": "redash-images",
"account_file": "account.json",
"client_secrets_file": "client_secret.json",
"project_id": "redash-bird-123",
"source_image": "debian-7-wheezy-v20141017",
"zone": "us-central1-a",
"image_name": "redash-{{user `image_version`}}"
}
],
"provisioners": [
{
"type": "shell",
"script": "bootstrap.sh",
"environment_vars": ["REDASH_VERSION={{user `redash_version`}}"]
}
]
}

View File

@@ -60,6 +60,7 @@ query_factory = ModelFactory(redash.models.Query,
query='SELECT 1',
ttl=-1,
user=user_factory.create,
is_archived=False,
data_source=data_source_factory.create)
query_result_factory = ModelFactory(redash.models.QueryResult,

File diff suppressed because one or more lines are too long

View File

@@ -1,52 +1,62 @@
from unittest import TestCase
from flask.ext.login import current_user
from mock import patch
from flask_googleauth import ObjectDict
from tests import BaseTestCase
from redash.authentication import validate_email, create_and_login_user
from redash import settings, models
from tests.factories import user_factory
from redash import models
from redash.google_oauth import create_and_login_user
from redash.authentication import ApiKeyAuthentication
from tests.factories import user_factory, query_factory
from redash.wsgi import app
class TestEmailValidation(TestCase):
def test_accepts_address_with_correct_domain(self):
with patch.object(settings, 'GOOGLE_APPS_DOMAIN', 'example.com'):
self.assertTrue(validate_email('example@example.com'))
class TestApiKeyAuthentication(BaseTestCase):
#
# This is a bad way to write these tests, but the way Flask works doesn't make it easy to write them properly...
#
def setUp(self):
super(TestApiKeyAuthentication, self).setUp()
self.api_key = 10
self.query = query_factory.create(api_key=self.api_key)
def test_accepts_address_from_exception_list(self):
with patch.multiple(settings, GOOGLE_APPS_DOMAIN='example.com', ALLOWED_EXTERNAL_USERS=['whatever@whatever.com']):
self.assertTrue(validate_email('whatever@whatever.com'))
def test_no_api_key(self):
auth = ApiKeyAuthentication()
with app.test_client() as c:
rv = c.get('/api/queries/{0}'.format(self.query.id))
self.assertFalse(auth.verify_authentication())
def test_accept_any_address_when_domain_empty(self):
with patch.object(settings, 'GOOGLE_APPS_DOMAIN', None):
self.assertTrue(validate_email('whatever@whatever.com'))
def test_wrong_api_key(self):
auth = ApiKeyAuthentication()
with app.test_client() as c:
rv = c.get('/api/queries/{0}'.format(self.query.id), query_string={'api_key': 'whatever'})
self.assertFalse(auth.verify_authentication())
def test_rejects_address_with_incorrect_domain(self):
with patch.object(settings, 'GOOGLE_APPS_DOMAIN', 'example.com'):
self.assertFalse(validate_email('whatever@whatever.com'))
def test_correct_api_key(self):
auth = ApiKeyAuthentication()
with app.test_client() as c:
rv = c.get('/api/queries/{0}'.format(self.query.id), query_string={'api_key': self.api_key})
self.assertTrue(auth.verify_authentication())
def test_no_query_id(self):
auth = ApiKeyAuthentication()
with app.test_client() as c:
rv = c.get('/api/queries', query_string={'api_key': self.api_key})
self.assertFalse(auth.verify_authentication())
class TestCreateAndLoginUser(BaseTestCase):
def test_logins_valid_user(self):
user = user_factory.create(email='test@example.com')
with patch.object(settings, 'GOOGLE_APPS_DOMAIN', 'example.com'), patch('redash.authentication.login_user') as login_user_mock:
create_and_login_user(None, user)
with patch('redash.google_oauth.login_user') as login_user_mock:
create_and_login_user(user.name, user.email)
login_user_mock.assert_called_once_with(user, remember=True)
def test_creates_vaild_new_user(self):
openid_user = ObjectDict({'email': 'test@example.com', 'name': 'Test User'})
email = 'test@example.com'
name = 'Test User'
with patch.multiple(settings, GOOGLE_APPS_DOMAIN='example.com'), \
patch('redash.authentication.login_user') as login_user_mock:
with patch('redash.google_oauth.login_user') as login_user_mock:
create_and_login_user(None, openid_user)
create_and_login_user(name, email)
self.assertTrue(login_user_mock.called)
user = models.User.get(models.User.email == openid_user.email)
def test_ignores_invliad_user(self):
user = ObjectDict({'email': 'test@whatever.com'})
with patch.object(settings, 'GOOGLE_APPS_DOMAIN', 'example.com'), patch('redash.authentication.login_user') as login_user_mock:
create_and_login_user(None, user)
self.assertFalse(login_user_mock.called)
user = models.User.get(models.User.email == email)

View File

@@ -380,7 +380,7 @@ class TestLogin(BaseTestCase):
with app.test_client() as c, patch.object(settings, 'PASSWORD_LOGIN_ENABLED', False):
rv = c.get('/login')
self.assertEquals(rv.status_code, 302)
self.assertTrue(rv.location.endswith(url_for('GoogleAuth.login')))
self.assertTrue(rv.location.endswith(url_for('google_oauth.authorize')))
def test_get_login_form(self):
with app.test_client() as c:
@@ -472,4 +472,45 @@ class TestLogout(BaseTestCase):
self.assertTrue(current_user.is_authenticated())
rv = c.get('/logout')
self.assertEquals(rv.status_code, 302)
self.assertFalse(current_user.is_authenticated())
self.assertFalse(current_user.is_authenticated())
class DataSourceTypesTest(BaseTestCase):
def test_returns_data_for_admin(self):
admin = user_factory.create(groups=['admin', 'default'])
with app.test_client() as c, authenticated_user(c, user=admin):
rv = c.get("/api/data_sources/types")
self.assertEqual(rv.status_code, 200)
def test_returns_403_for_non_admin(self):
with app.test_client() as c, authenticated_user(c):
rv = c.get("/api/data_sources/types")
self.assertEqual(rv.status_code, 403)
class DataSourceTest(BaseTestCase):
def test_returns_400_when_missing_fields(self):
admin = user_factory.create(groups=['admin', 'default'])
with app.test_client() as c, authenticated_user(c, user=admin):
rv = c.post("/api/data_sources")
self.assertEqual(rv.status_code, 400)
rv = json_request(c.post, '/api/data_sources', data={'name': 'DS 1'})
self.assertEqual(rv.status_code, 400)
def test_returns_400_when_configuration_invalid(self):
admin = user_factory.create(groups=['admin', 'default'])
with app.test_client() as c, authenticated_user(c, user=admin):
rv = json_request(c.post, '/api/data_sources',
data={'name': 'DS 1', 'type': 'pg', 'options': '{}'})
self.assertEqual(rv.status_code, 400)
def test_creates_data_source(self):
admin = user_factory.create(groups=['admin', 'default'])
with app.test_client() as c, authenticated_user(c, user=admin):
rv = json_request(c.post, '/api/data_sources',
data={'name': 'DS 1', 'type': 'pg', 'options': '{"dbname": "redash"}'})
self.assertEqual(rv.status_code, 200)

View File

@@ -26,9 +26,8 @@ class ImportTest(BaseTestCase):
self.assertEqual(dashboard.widgets.count(),
reduce(lambda s, row: s + len(row), self.dashboard['widgets'], 0))
self.assertEqual(models.Visualization.select().count(), dashboard.widgets.count())
self.assertEqual(models.Query.select().count(), dashboard.widgets.count()-1)
self.assertEqual(models.QueryResult.select().count(), dashboard.widgets.count()-1)
self.assertEqual(models.Visualization.select().count(), dashboard.widgets.count()-1)
self.assertEqual(models.Query.select().count(), dashboard.widgets.count()-2)
def test_imports_updates_existing_models(self):
importer = import_export.Importer(data_source=data_source_factory.create())

View File

@@ -1,7 +1,9 @@
#encoding: utf8
import datetime
import json
from tests import BaseTestCase
from redash import models
from factories import dashboard_factory, query_factory, data_source_factory, query_result_factory
from factories import dashboard_factory, query_factory, data_source_factory, query_result_factory, user_factory, widget_factory
from redash.utils import gen_query_hash
@@ -29,6 +31,87 @@ class QueryTest(BaseTestCase):
self.assertNotEquals(old_hash, q.query_hash)
def test_search_finds_in_name(self):
q1 = query_factory.create(name=u"Testing seåřċħ")
q2 = query_factory.create(name=u"Testing seåřċħing")
q3 = query_factory.create(name=u"Testing seå řċħ")
queries = models.Query.search(u"seåřċħ")
self.assertIn(q1, queries)
self.assertIn(q2, queries)
self.assertNotIn(q3, queries)
def test_search_finds_in_description(self):
q1 = query_factory.create(description=u"Testing seåřċħ")
q2 = query_factory.create(description=u"Testing seåřċħing")
q3 = query_factory.create(description=u"Testing seå řċħ")
queries = models.Query.search(u"seåřċħ")
self.assertIn(q1, queries)
self.assertIn(q2, queries)
self.assertNotIn(q3, queries)
def test_search_by_id_returns_query(self):
q1 = query_factory.create(description="Testing search")
q2 = query_factory.create(description="Testing searching")
q3 = query_factory.create(description="Testing sea rch")
queries = models.Query.search(str(q3.id))
self.assertIn(q3, queries)
self.assertNotIn(q1, queries)
self.assertNotIn(q2, queries)
class QueryArchiveTest(BaseTestCase):
def setUp(self):
super(QueryArchiveTest, self).setUp()
def test_archive_query_sets_flag(self):
query = query_factory.create(ttl=1)
query.archive()
query = models.Query.get_by_id(query.id)
self.assertEquals(query.is_archived, True)
def test_archived_query_doesnt_return_in_all(self):
query = query_factory.create(ttl=1)
yesterday = datetime.datetime.now() - datetime.timedelta(days=1)
query_result = models.QueryResult.store_result(query.data_source.id, query.query_hash, query.query, "1",
123, yesterday)
query.latest_query_data = query_result
query.save()
self.assertIn(query, models.Query.all_queries())
self.assertIn(query, models.Query.outdated_queries())
query.archive()
self.assertNotIn(query, models.Query.all_queries())
self.assertNotIn(query, models.Query.outdated_queries())
def test_removes_associated_widgets_from_dashboards(self):
widget = widget_factory.create()
query = widget.visualization.query
query.archive()
self.assertRaises(models.Widget.DoesNotExist, models.Widget.get_by_id, widget.id)
def test_removes_scheduling(self):
query = query_factory.create(ttl=1)
query.archive()
query = models.Query.get_by_id(query.id)
self.assertEqual(-1, query.ttl)
class QueryResultTest(BaseTestCase):
def setUp(self):
@@ -93,6 +176,26 @@ class QueryResultTest(BaseTestCase):
self.assertEqual(found_query_result.id, qr.id)
class TestUnusedQueryResults(BaseTestCase):
def test_returns_only_unused_query_results(self):
two_weeks_ago = datetime.datetime.now() - datetime.timedelta(days=14)
qr = query_result_factory.create()
query = query_factory.create(latest_query_data=qr)
unused_qr = query_result_factory.create(retrieved_at=two_weeks_ago)
self.assertIn(unused_qr, models.QueryResult.unused())
self.assertNotIn(qr, models.QueryResult.unused())
def test_returns_only_over_a_week_old_results(self):
two_weeks_ago = datetime.datetime.now() - datetime.timedelta(days=14)
unused_qr = query_result_factory.create(retrieved_at=two_weeks_ago)
new_unused_qr = query_result_factory.create()
self.assertIn(unused_qr, models.QueryResult.unused())
self.assertNotIn(new_unused_qr, models.QueryResult.unused())
class TestQueryResultStoreResult(BaseTestCase):
def setUp(self):
super(TestQueryResultStoreResult, self).setUp()
@@ -148,4 +251,59 @@ class TestQueryResultStoreResult(BaseTestCase):
self.assertEqual(models.Query.get_by_id(query1.id)._data['latest_query_data'], query_result.id)
self.assertEqual(models.Query.get_by_id(query2.id)._data['latest_query_data'], query_result.id)
self.assertNotEqual(models.Query.get_by_id(query3.id)._data['latest_query_data'], query_result.id)
self.assertNotEqual(models.Query.get_by_id(query3.id)._data['latest_query_data'], query_result.id)
class TestEvents(BaseTestCase):
def raw_event(self):
timestamp = 1411778709.791
user = user_factory.create()
created_at = datetime.datetime.utcfromtimestamp(timestamp)
raw_event = {"action": "view",
"timestamp": timestamp,
"object_type": "dashboard",
"user_id": user.id,
"object_id": 1}
return raw_event, user, created_at
def test_records_event(self):
raw_event, user, created_at = self.raw_event()
event = models.Event.record(raw_event)
self.assertEqual(event.user, user)
self.assertEqual(event.action, "view")
self.assertEqual(event.object_type, "dashboard")
self.assertEqual(event.object_id, 1)
self.assertEqual(event.created_at, created_at)
def test_records_additional_properties(self):
raw_event, _, _ = self.raw_event()
additional_properties = {'test': 1, 'test2': 2, 'whatever': "abc"}
raw_event.update(additional_properties)
event = models.Event.record(raw_event)
self.assertDictEqual(json.loads(event.additional_properties), additional_properties)
class TestWidgetDeleteInstance(BaseTestCase):
def test_delete_removes_from_layout(self):
widget = widget_factory.create()
widget2 = widget_factory.create(dashboard=widget.dashboard)
widget.dashboard.layout = json.dumps([[widget.id, widget2.id]])
widget.dashboard.save()
widget.delete_instance()
self.assertEquals(json.dumps([[widget2.id]]), widget.dashboard.layout)
def test_delete_removes_empty_rows(self):
widget = widget_factory.create()
widget2 = widget_factory.create(dashboard=widget.dashboard)
widget.dashboard.layout = json.dumps([[widget.id, widget2.id]])
widget.dashboard.save()
widget.delete_instance()
widget2.delete_instance()
self.assertEquals("[]", widget.dashboard.layout)