mirror of
https://github.com/getredash/redash.git
synced 2025-12-25 01:03:20 -05:00
Compare commits
267 Commits
v0.4.0+b56
...
v0.6.1-rc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f00d080ed2 | ||
|
|
4e76c1305f | ||
|
|
36ef388e92 | ||
|
|
2e1ee7f76c | ||
|
|
fc1e38772d | ||
|
|
0e631a5121 | ||
|
|
d74175efca | ||
|
|
bf5fe7d2c7 | ||
|
|
0f022aba92 | ||
|
|
0b6e55e55a | ||
|
|
e1c409366c | ||
|
|
3b942118e9 | ||
|
|
7f1543db8f | ||
|
|
74a5121be2 | ||
|
|
26fe136a1a | ||
|
|
83fb189b05 | ||
|
|
5e8d0d36c0 | ||
|
|
4ae4cffa04 | ||
|
|
bc433e88fe | ||
|
|
513ef501a4 | ||
|
|
f2bdcbedfb | ||
|
|
fd056edb2a | ||
|
|
0f0acfdd12 | ||
|
|
1e3b507b2b | ||
|
|
84d95272f3 | ||
|
|
3b08e9e214 | ||
|
|
f4be83b06f | ||
|
|
4918d0430c | ||
|
|
e25b86b10d | ||
|
|
d3d305a843 | ||
|
|
825b93bfe9 | ||
|
|
8c98282200 | ||
|
|
768ac9eb04 | ||
|
|
71011d2fca | ||
|
|
9683a8ed82 | ||
|
|
10a6ac9313 | ||
|
|
dba325e9a2 | ||
|
|
fcd9ab533c | ||
|
|
68e3e8e1c5 | ||
|
|
7f8b738b9e | ||
|
|
8a35dcedfa | ||
|
|
ef763b7157 | ||
|
|
498e1d4474 | ||
|
|
73de936c75 | ||
|
|
e32b709a41 | ||
|
|
60652f63c4 | ||
|
|
d0d4101f90 | ||
|
|
646875794f | ||
|
|
cdad4be0d5 | ||
|
|
8f4285be62 | ||
|
|
acfa55e2d0 | ||
|
|
0b7cd07db0 | ||
|
|
6297ffd523 | ||
|
|
368f4fdbef | ||
|
|
f52044a209 | ||
|
|
9fb33cf746 | ||
|
|
e3c5da5bc5 | ||
|
|
e675690cc6 | ||
|
|
edc1622cf5 | ||
|
|
5ab3d4a40d | ||
|
|
cb29d87b63 | ||
|
|
6ff6bdad9f | ||
|
|
e3cc3ef9a4 | ||
|
|
1fe4f291f2 | ||
|
|
a54119f4a2 | ||
|
|
c5b7fe5321 | ||
|
|
d487ec9153 | ||
|
|
fa19b1ddc8 | ||
|
|
267c32b390 | ||
|
|
aeff3f1494 | ||
|
|
e80e52f6c9 | ||
|
|
fe41a70602 | ||
|
|
976d9abe2d | ||
|
|
041bc1100a | ||
|
|
5d095ff6ab | ||
|
|
ef01b61b29 | ||
|
|
faad6b656b | ||
|
|
0bc775584b | ||
|
|
f2d96d61a1 | ||
|
|
09bf2dd608 | ||
|
|
ad1b9b06cf | ||
|
|
a4bceae60b | ||
|
|
9385449feb | ||
|
|
562e1bb8c9 | ||
|
|
082b718303 | ||
|
|
c0872899e9 | ||
|
|
086bbf129d | ||
|
|
4b7561e538 | ||
|
|
407c5a839b | ||
|
|
b8aefd26b8 | ||
|
|
85a762bcd2 | ||
|
|
4f1b3d5beb | ||
|
|
9218a7c437 | ||
|
|
71a3f066a5 | ||
|
|
89436d779c | ||
|
|
3631e938da | ||
|
|
c0a9db68f0 | ||
|
|
bec9c9e14e | ||
|
|
47bbc25277 | ||
|
|
f02c2588d2 | ||
|
|
7db5449dad | ||
|
|
7f6c7f0634 | ||
|
|
73955c74f7 | ||
|
|
7de85da8ef | ||
|
|
0aab35252a | ||
|
|
141dbc9e70 | ||
|
|
2e513c347c | ||
|
|
335c136ec2 | ||
|
|
df1170eb9b | ||
|
|
69bcaddbe0 | ||
|
|
67958cc27b | ||
|
|
6c716f23d9 | ||
|
|
bea11b0ac2 | ||
|
|
4927386299 | ||
|
|
30a8550f6b | ||
|
|
0389a45be4 | ||
|
|
707c169867 | ||
|
|
fca034ac0d | ||
|
|
97691ea5ee | ||
|
|
40335a0e21 | ||
|
|
9344cbd078 | ||
|
|
9442fd9465 | ||
|
|
c816f1003d | ||
|
|
2107b79a80 | ||
|
|
8fae6de8c7 | ||
|
|
d798c77574 | ||
|
|
0abce27381 | ||
|
|
8a171ba39a | ||
|
|
20af276772 | ||
|
|
4058342763 | ||
|
|
af64657260 | ||
|
|
b6bd46e59e | ||
|
|
31fe547e03 | ||
|
|
aff324071e | ||
|
|
131266e408 | ||
|
|
b1f97e8c8d | ||
|
|
9783d6e839 | ||
|
|
8eea2fb367 | ||
|
|
b585480c81 | ||
|
|
89e307daba | ||
|
|
a5eb0e293c | ||
|
|
48d1113225 | ||
|
|
d82d5c3bdc | ||
|
|
dfe58b3953 | ||
|
|
44019b8357 | ||
|
|
3c15a44faf | ||
|
|
8d113dadd2 | ||
|
|
c1dd26aee7 | ||
|
|
b2228c2a39 | ||
|
|
d9618cb09c | ||
|
|
c8ca683d3a | ||
|
|
888963ffaa | ||
|
|
ae947a8310 | ||
|
|
bee9cde347 | ||
|
|
c131dab125 | ||
|
|
e113642ae4 | ||
|
|
b76906b168 | ||
|
|
3960005002 | ||
|
|
3dde578b86 | ||
|
|
813f0e74ff | ||
|
|
1e4e37c2ce | ||
|
|
a00c80eab2 | ||
|
|
496e5ebe8c | ||
|
|
18cc8434a0 | ||
|
|
5eba318019 | ||
|
|
63274dbb17 | ||
|
|
4c73e788ae | ||
|
|
b71a2b3651 | ||
|
|
521a32dfff | ||
|
|
fd6ebe6e12 | ||
|
|
6fb97675ad | ||
|
|
c0c102207d | ||
|
|
3b9d9ac75d | ||
|
|
2536fd57ed | ||
|
|
d941e5e5b1 | ||
|
|
039b0a89bb | ||
|
|
febf9939c8 | ||
|
|
bb84c6dab8 | ||
|
|
cddc00e2cc | ||
|
|
091e3d41e1 | ||
|
|
9dc3a35c1a | ||
|
|
f8878d3006 | ||
|
|
1c0d596f26 | ||
|
|
1afd2ab388 | ||
|
|
4aa9500402 | ||
|
|
4a8a4482fc | ||
|
|
d83849a1b5 | ||
|
|
44272f5d66 | ||
|
|
83727ae931 | ||
|
|
0b0b88a255 | ||
|
|
f23d709f4e | ||
|
|
88abbc7ea6 | ||
|
|
16f0413af8 | ||
|
|
f47020a64d | ||
|
|
55e1ef81f7 | ||
|
|
6bb43d0411 | ||
|
|
f51c2328c9 | ||
|
|
fd37188ace | ||
|
|
758e27ce91 | ||
|
|
9a3b25eb50 | ||
|
|
6da890dfb8 | ||
|
|
0d35ec7139 | ||
|
|
dc0f9a63cb | ||
|
|
21c042996e | ||
|
|
5f22adadf2 | ||
|
|
4e8888ce2f | ||
|
|
0a69609d38 | ||
|
|
2dbcd88313 | ||
|
|
6b0775f7c7 | ||
|
|
e85d3c3c9f | ||
|
|
e20f57bba8 | ||
|
|
933ace2e38 | ||
|
|
4c1e5aed6b | ||
|
|
77d982b4aa | ||
|
|
02c8163265 | ||
|
|
ef868dbb6e | ||
|
|
b2bab33baa | ||
|
|
149e0835f8 | ||
|
|
50bed1d8f2 | ||
|
|
d4b5d78743 | ||
|
|
7fc82a2562 | ||
|
|
92fb138c2c | ||
|
|
71b4b45a3c | ||
|
|
07f4a1b227 | ||
|
|
e116e88e98 | ||
|
|
2278a181ca | ||
|
|
98dc75a404 | ||
|
|
536918aab3 | ||
|
|
c75ac80c7a | ||
|
|
522d8542e9 | ||
|
|
562df44c22 | ||
|
|
86e6798c96 | ||
|
|
db7a287e82 | ||
|
|
518206f208 | ||
|
|
bcee1e12b4 | ||
|
|
410f4f35e2 | ||
|
|
84ea9fec43 | ||
|
|
cda82b7adc | ||
|
|
f2d8c2020b | ||
|
|
1b82ecbc46 | ||
|
|
e381331c36 | ||
|
|
ff58247987 | ||
|
|
dcf0d2cbe3 | ||
|
|
eb99fa5671 | ||
|
|
ce3e19f212 | ||
|
|
44dca6da01 | ||
|
|
34c9fee540 | ||
|
|
e0b13b2ffa | ||
|
|
df362c12b6 | ||
|
|
0d1f8c948a | ||
|
|
f523378326 | ||
|
|
b0f9e49709 | ||
|
|
b6dbb4e3f8 | ||
|
|
3f6a0e8ffa | ||
|
|
a7bcc6d31e | ||
|
|
8aa2d8e70a | ||
|
|
4720e12be7 | ||
|
|
5463591f0d | ||
|
|
2a0198fba8 | ||
|
|
652f214b25 | ||
|
|
aa49780134 | ||
|
|
f483b61cfb | ||
|
|
38a189b671 | ||
|
|
c2331988db | ||
|
|
eff5bdb454 | ||
|
|
bd1babec3a | ||
|
|
d43c2bbf62 |
2
.landscape.yaml
Normal file
2
.landscape.yaml
Normal file
@@ -0,0 +1,2 @@
|
||||
ignore-paths:
|
||||
- migrations
|
||||
5
Makefile
5
Makefile
@@ -1,6 +1,7 @@
|
||||
NAME=redash
|
||||
VERSION=`python ./manage.py version`
|
||||
FULL_VERSION=$(VERSION)+b$(CIRCLE_BUILD_NUM)
|
||||
BASE_VERSION=$(shell python ./manage.py version | cut -d + -f 1)
|
||||
# VERSION gets evaluated every time it's referenced, therefore we need to use VERSION here instead of FULL_VERSION.
|
||||
FILENAME=$(CIRCLE_ARTIFACTS)/$(NAME).$(VERSION).tar.gz
|
||||
|
||||
@@ -15,8 +16,8 @@ pack:
|
||||
tar -zcv -f $(FILENAME) --exclude=".git*" --exclude="*.pyc" --exclude="*.pyo" --exclude="venv" --exclude="rd_ui/node_modules" --exclude="rd_ui/dist/bower_components" --exclude="rd_ui/app" *
|
||||
|
||||
upload:
|
||||
python bin/upload_version.py $(VERSION) $(FILENAME)
|
||||
python bin/release_manager.py $(CIRCLE_SHA1) $(BASE_VERSION) $(FILENAME)
|
||||
|
||||
test:
|
||||
nosetests --with-coverage --cover-package=redash tests/*.py
|
||||
cd rd_ui && grunt test
|
||||
#cd rd_ui && grunt test
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
web: ./manage.py runserver -p $PORT
|
||||
web: ./manage.py runserver -p $PORT --host 0.0.0.0
|
||||
worker: ./bin/run celery worker --app=redash.worker --beat -Qqueries,celery,scheduled_queries
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
<p align="center">
|
||||
<img title="re:dash" src='https://raw.githubusercontent.com/EverythingMe/redash/screenshots/redash_logo.png' />
|
||||
|
||||
<img title="re:dash" src='http://redash.io/static/img/redash_logo.png' width="200px"/>
|
||||
</p>
|
||||
<p align="center">
|
||||
<img title="Build Status" src='https://circleci.com/gh/EverythingMe/redash.png?circle-token=8a695aa5ec2cbfa89b48c275aea298318016f040'/>
|
||||
@@ -28,7 +27,7 @@ You can try out the demo instance: http://demo.redash.io/ (login with any Google
|
||||
|
||||
## Getting Started
|
||||
|
||||
* [Setting re:dash on your own server (Ubuntu)](https://github.com/EverythingMe/redash/wiki/Setting-re:dash-on-your-own-server-(for-version-0.4))
|
||||
* [Setting up re:dash instance](https://github.com/EverythingMe/redash/wiki/Setting-up-re:dash-instance) (includes links to ready made AWS/GCE images).
|
||||
* Additional documentation in the [Wiki](https://github.com/everythingme/redash/wiki).
|
||||
|
||||
|
||||
|
||||
11
Vagrantfile
vendored
Normal file
11
Vagrantfile
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
|
||||
VAGRANTFILE_API_VERSION = "2"
|
||||
|
||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||
config.vm.box = "redash/dev"
|
||||
config.vm.synced_folder "./", "/opt/redash/current"
|
||||
config.vm.network "forwarded_port", guest: 5000, host: 9001
|
||||
end
|
||||
@@ -1,30 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
import sys
|
||||
import requests
|
||||
|
||||
if __name__ == '__main__':
|
||||
response = requests.get('https://api.github.com/repos/EverythingMe/redash/releases')
|
||||
|
||||
if response.status_code != 200:
|
||||
exit("Failed getting releases (status code: %s)." % response.status_code)
|
||||
|
||||
sorted_releases = sorted(response.json(), key=lambda release: release['id'], reverse=True)
|
||||
|
||||
latest_release = sorted_releases[0]
|
||||
asset_url = latest_release['assets'][0]['url']
|
||||
filename = latest_release['assets'][0]['name']
|
||||
|
||||
wget_command = 'wget --header="Accept: application/octet-stream" %s -O %s' % (asset_url, filename)
|
||||
|
||||
if '--url-only' in sys.argv:
|
||||
print asset_url
|
||||
elif '--wget' in sys.argv:
|
||||
print wget_command
|
||||
else:
|
||||
print "Latest release: %s" % latest_release['tag_name']
|
||||
print latest_release['body']
|
||||
|
||||
print "\nTarball URL: %s" % asset_url
|
||||
print 'wget: %s' % (wget_command)
|
||||
|
||||
|
||||
130
bin/release_manager.py
Normal file
130
bin/release_manager.py
Normal file
@@ -0,0 +1,130 @@
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import re
|
||||
import subprocess
|
||||
import requests
|
||||
|
||||
github_token = os.environ['GITHUB_TOKEN']
|
||||
auth = (github_token, 'x-oauth-basic')
|
||||
repo = 'EverythingMe/redash'
|
||||
|
||||
def _github_request(method, path, params=None, headers={}):
|
||||
if not path.startswith('https://api.github.com'):
|
||||
url = "https://api.github.com/{}".format(path)
|
||||
else:
|
||||
url = path
|
||||
|
||||
if params is not None:
|
||||
params = json.dumps(params)
|
||||
|
||||
response = requests.request(method, url, data=params, auth=auth)
|
||||
return response
|
||||
|
||||
def exception_from_error(message, response):
|
||||
return Exception("({}) {}: {}".format(response.status_code, message, response.json().get('message', '?')))
|
||||
|
||||
def rc_tag_name(version):
|
||||
return "v{}-rc".format(version)
|
||||
|
||||
def get_rc_release(version):
|
||||
tag = rc_tag_name(version)
|
||||
response = _github_request('get', 'repos/{}/releases/tags/{}'.format(repo, tag))
|
||||
|
||||
if response.status_code == 404:
|
||||
return None
|
||||
elif response.status_code == 200:
|
||||
return response.json()
|
||||
|
||||
raise exception_from_error("Unknown error while looking RC release: ", response)
|
||||
|
||||
def create_release(version, commit_sha):
|
||||
tag = rc_tag_name(version)
|
||||
|
||||
params = {
|
||||
'tag_name': tag,
|
||||
'name': "{} - RC".format(version),
|
||||
'target_commitish': commit_sha,
|
||||
'prerelease': True
|
||||
}
|
||||
|
||||
response = _github_request('post', 'repos/{}/releases'.format(repo), params)
|
||||
|
||||
if response.status_code != 201:
|
||||
raise exception_from_error("Failed creating new release", response)
|
||||
|
||||
return response.json()
|
||||
|
||||
def upload_asset(release, filepath):
|
||||
upload_url = release['upload_url'].replace('{?name}', '')
|
||||
filename = filepath.split('/')[-1]
|
||||
|
||||
with open(filepath) as file_content:
|
||||
headers = {'Content-Type': 'application/gzip'}
|
||||
response = requests.post(upload_url, file_content, params={'name': filename}, headers=headers, auth=auth, verify=False)
|
||||
|
||||
if response.status_code != 201: # not 200/201/...
|
||||
raise exception_from_error('Failed uploading asset', response)
|
||||
|
||||
return response
|
||||
|
||||
def remove_previous_builds(release):
|
||||
for asset in release['assets']:
|
||||
response = _github_request('delete', asset['url'])
|
||||
if response.status_code != 204:
|
||||
raise exception_from_error("Failed deleting asset", response)
|
||||
|
||||
def get_changelog(commit_sha):
|
||||
latest_release = _github_request('get', 'repos/{}/releases/latest'.format(repo))
|
||||
if latest_release.status_code != 200:
|
||||
raise exception_from_error('Failed getting latest release', latest_release)
|
||||
|
||||
latest_release = latest_release.json()
|
||||
previous_sha = latest_release['target_commitish']
|
||||
|
||||
args = ['git', '--no-pager', 'log', '--merges', '--grep', 'Merge pull request', '--pretty=format:"%h|%s|%b|%p"', '{}...{}'.format(previous_sha, commit_sha)]
|
||||
log = subprocess.check_output(args)
|
||||
changes = ["Changes since {}:".format(latest_release['name'])]
|
||||
|
||||
for line in log.split('\n'):
|
||||
try:
|
||||
sha, subject, body, parents = line[1:-1].split('|')
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
try:
|
||||
pull_request = re.match("Merge pull request #(\d+)", subject).groups()[0]
|
||||
pull_request = " #{}".format(pull_request)
|
||||
except Exception, ex:
|
||||
pull_request = ""
|
||||
|
||||
author = subprocess.check_output(['git', 'log', '-1', '--pretty=format:"%an"', parents.split(' ')[-1]])[1:-1]
|
||||
|
||||
changes.append("{}{}: {} ({})".format(sha, pull_request, body.strip(), author))
|
||||
|
||||
return "\n".join(changes)
|
||||
|
||||
def update_release(version, build_filepath, commit_sha):
|
||||
try:
|
||||
release = get_rc_release(version) or create_release(version, commit_sha)
|
||||
print "Using release id: {}".format(release['id'])
|
||||
|
||||
remove_previous_builds(release)
|
||||
response = upload_asset(release, build_filepath)
|
||||
|
||||
changelog = get_changelog(commit_sha)
|
||||
|
||||
response = _github_request('patch', release['url'], {'body': changelog})
|
||||
if response.status_code != 200:
|
||||
raise exception_from_error("Failed updating release description", response)
|
||||
|
||||
except Exception, ex:
|
||||
print ex
|
||||
|
||||
if __name__ == '__main__':
|
||||
commit_sha = sys.argv[1]
|
||||
version = sys.argv[2]
|
||||
filepath = sys.argv[3]
|
||||
|
||||
# TODO: make sure running from git directory & remote = repo
|
||||
update_release(version, filepath, commit_sha)
|
||||
@@ -1,46 +0,0 @@
|
||||
#!python
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import requests
|
||||
import subprocess
|
||||
|
||||
|
||||
def capture_output(command):
|
||||
proc = subprocess.Popen(command, stdout=subprocess.PIPE)
|
||||
return proc.stdout.read()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
version = sys.argv[1]
|
||||
filepath = sys.argv[2]
|
||||
filename = filepath.split('/')[-1]
|
||||
github_token = os.environ['GITHUB_TOKEN']
|
||||
auth = (github_token, 'x-oauth-basic')
|
||||
commit_sha = os.environ['CIRCLE_SHA1']
|
||||
|
||||
commit_body = capture_output(["git", "log", "--format=%b", "-n", "1", commit_sha])
|
||||
file_md5_checksum = capture_output(["md5sum", filepath]).split()[0]
|
||||
file_sha256_checksum = capture_output(["sha256sum", filepath]).split()[0]
|
||||
version_body = "%s\n\nMD5: %s\nSHA256: %s" % (commit_body, file_md5_checksum, file_sha256_checksum)
|
||||
|
||||
params = json.dumps({
|
||||
'tag_name': 'v{0}'.format(version),
|
||||
'name': 're:dash v{0}'.format(version),
|
||||
'body': version_body,
|
||||
'target_commitish': commit_sha,
|
||||
'prerelease': True
|
||||
})
|
||||
|
||||
response = requests.post('https://api.github.com/repos/everythingme/redash/releases',
|
||||
data=params,
|
||||
auth=auth)
|
||||
|
||||
upload_url = response.json()['upload_url']
|
||||
upload_url = upload_url.replace('{?name}', '')
|
||||
|
||||
with open(filepath) as file_content:
|
||||
headers = {'Content-Type': 'application/gzip'}
|
||||
response = requests.post(upload_url, file_content, params={'name': filename}, auth=auth,
|
||||
headers=headers, verify=False)
|
||||
|
||||
@@ -7,6 +7,9 @@ machine:
|
||||
2.7.3
|
||||
dependencies:
|
||||
pre:
|
||||
- wget http://downloads.sourceforge.net/project/optipng/OptiPNG/optipng-0.7.5/optipng-0.7.5.tar.gz
|
||||
- tar xvf optipng-0.7.5.tar.gz
|
||||
- cd optipng-0.7.5; ./configure; make; sudo checkinstall -y;
|
||||
- make deps
|
||||
- pip install -r dev_requirements.txt
|
||||
- pip install -r requirements.txt
|
||||
@@ -26,3 +29,7 @@ deployment:
|
||||
notify:
|
||||
webhooks:
|
||||
- url: https://webhooks.gitter.im/e/895d09c3165a0913ac2f
|
||||
general:
|
||||
branches:
|
||||
ignore:
|
||||
- gh-pages
|
||||
|
||||
137
manage.py
137
manage.py
@@ -2,17 +2,19 @@
|
||||
"""
|
||||
CLI to manage redash.
|
||||
"""
|
||||
import datetime
|
||||
from flask.ext.script import Manager, prompt_pass
|
||||
from flask.ext.script import Manager
|
||||
|
||||
from redash import settings, models, __version__
|
||||
from redash.wsgi import app
|
||||
from redash.import_export import import_manager
|
||||
from redash.cli import users, database, data_sources
|
||||
|
||||
manager = Manager(app)
|
||||
database_manager = Manager(help="Manages the database (create/drop tables).")
|
||||
users_manager = Manager(help="Users management commands.")
|
||||
data_sources_manager = Manager(help="Data sources management commands.")
|
||||
manager.add_command("database", database.manager)
|
||||
manager.add_command("users", users.manager)
|
||||
manager.add_command("import", import_manager)
|
||||
manager.add_command("ds", data_sources.manager)
|
||||
|
||||
|
||||
@manager.command
|
||||
def version():
|
||||
@@ -22,7 +24,7 @@ def version():
|
||||
|
||||
@manager.command
|
||||
def runworkers():
|
||||
"""Prints deprecation warning."""
|
||||
"""Start workers (deprecated)."""
|
||||
print "** This command is deprecated. Please use Celery's CLI to control the workers. **"
|
||||
|
||||
|
||||
@@ -31,8 +33,10 @@ def make_shell_context():
|
||||
from redash.models import db
|
||||
return dict(app=app, db=db, models=models)
|
||||
|
||||
|
||||
@manager.command
|
||||
def check_settings():
|
||||
"""Show the settings as re:dash sees them (useful for debugging)."""
|
||||
from types import ModuleType
|
||||
|
||||
for name in dir(settings):
|
||||
@@ -40,125 +44,6 @@ def check_settings():
|
||||
if not callable(item) and not name.startswith("__") and not isinstance(item, ModuleType):
|
||||
print "{} = {}".format(name, item)
|
||||
|
||||
@manager.command
|
||||
def import_events(events_file):
|
||||
# TODO: remove this code past 1/11/2014.
|
||||
import json
|
||||
from collections import Counter
|
||||
|
||||
count = Counter()
|
||||
|
||||
with open(events_file) as f:
|
||||
for line in f:
|
||||
try:
|
||||
event = json.loads(line)
|
||||
|
||||
object_type = event.get('object_type', None)
|
||||
object_id = event.get('object_id', None)
|
||||
|
||||
if object_id == 'dashboard' and object_type == 'dashboard':
|
||||
count['bad dashboard id'] += 1
|
||||
continue
|
||||
|
||||
models.Event.record(event)
|
||||
|
||||
count['imported'] += 1
|
||||
|
||||
except Exception as ex:
|
||||
print "Failed importing line:"
|
||||
print line
|
||||
print ex.message
|
||||
count[ex.message] += 1
|
||||
count['failed'] += 1
|
||||
|
||||
models.db.close_db(None)
|
||||
|
||||
for k, v in count.iteritems():
|
||||
print k
|
||||
print v
|
||||
|
||||
|
||||
@database_manager.command
|
||||
def create_tables():
|
||||
"""Creates the database tables."""
|
||||
from redash.models import create_db, init_db
|
||||
|
||||
create_db(True, False)
|
||||
init_db()
|
||||
|
||||
@database_manager.command
|
||||
def drop_tables():
|
||||
"""Drop the database tables."""
|
||||
from redash.models import create_db
|
||||
|
||||
create_db(False, True)
|
||||
|
||||
|
||||
@users_manager.option('email', help="User's email")
|
||||
@users_manager.option('name', help="User's full name")
|
||||
@users_manager.option('--admin', dest='is_admin', action="store_true", default=False, help="set user as admin")
|
||||
@users_manager.option('--google', dest='google_auth', action="store_true", default=False, help="user uses Google Auth to login")
|
||||
@users_manager.option('--password', dest='password', default=None, help="Password for users who don't use Google Auth (leave blank for prompt).")
|
||||
@users_manager.option('--groups', dest='groups', default=models.User.DEFAULT_GROUPS, help="Comma seperated list of groups (leave blank for default).")
|
||||
def create(email, name, groups, is_admin=False, google_auth=False, password=None):
|
||||
print "Creating user (%s, %s)..." % (email, name)
|
||||
print "Admin: %r" % is_admin
|
||||
print "Login with Google Auth: %r\n" % google_auth
|
||||
if isinstance(groups, basestring):
|
||||
groups= groups.split(',')
|
||||
groups.remove('') # in case it was empty string
|
||||
|
||||
if is_admin:
|
||||
groups += ['admin']
|
||||
|
||||
user = models.User(email=email, name=name, groups=groups)
|
||||
if not google_auth:
|
||||
password = password or prompt_pass("Password")
|
||||
user.hash_password(password)
|
||||
|
||||
try:
|
||||
user.save()
|
||||
except Exception, e:
|
||||
print "Failed creating user: %s" % e.message
|
||||
|
||||
|
||||
@users_manager.option('email', help="email address of user to delete")
|
||||
def delete(email):
|
||||
deleted_count = models.User.delete().where(models.User.email == email).execute()
|
||||
print "Deleted %d users." % deleted_count
|
||||
|
||||
@data_sources_manager.command
|
||||
def import_from_settings(name=None):
|
||||
"""Import data source from settings (env variables)."""
|
||||
name = name or "Default"
|
||||
data_source = models.DataSource.create(name=name,
|
||||
type=settings.CONNECTION_ADAPTER,
|
||||
options=settings.CONNECTION_STRING)
|
||||
|
||||
print "Imported data source from settings (id={}).".format(data_source.id)
|
||||
|
||||
|
||||
@data_sources_manager.command
|
||||
def list():
|
||||
"""List currently configured data sources"""
|
||||
for ds in models.DataSource.select():
|
||||
print "Name: {}\nType: {}\nOptions: {}".format(ds.name, ds.type, ds.options)
|
||||
|
||||
@data_sources_manager.command
|
||||
def new(name, type, options):
|
||||
"""Create new data source"""
|
||||
# TODO: validate it's a valid type and in the future, validate the options.
|
||||
print "Creating {} data source ({}) with options:\n{}".format(type, name, options)
|
||||
data_source = models.DataSource.create(name=name,
|
||||
type=type,
|
||||
options=options)
|
||||
print "Id: {}".format(data_source.id)
|
||||
|
||||
|
||||
manager.add_command("database", database_manager)
|
||||
manager.add_command("users", users_manager)
|
||||
manager.add_command("import", import_manager)
|
||||
manager.add_command("ds", data_sources_manager)
|
||||
|
||||
if __name__ == '__main__':
|
||||
manager.run()
|
||||
manager.run()
|
||||
|
||||
15
migrations/0001_allow_delete_query.py
Normal file
15
migrations/0001_allow_delete_query.py
Normal file
@@ -0,0 +1,15 @@
|
||||
from playhouse.migrate import PostgresqlMigrator, migrate
|
||||
|
||||
from redash.models import db
|
||||
from redash import models
|
||||
|
||||
if __name__ == '__main__':
|
||||
db.connect_db()
|
||||
migrator = PostgresqlMigrator(db.database)
|
||||
|
||||
with db.database.transaction():
|
||||
migrate(
|
||||
migrator.add_column('queries', 'is_archived', models.Query.is_archived)
|
||||
)
|
||||
|
||||
db.close_db(None)
|
||||
21
migrations/0002_fix_timestamp_fields.py
Normal file
21
migrations/0002_fix_timestamp_fields.py
Normal file
@@ -0,0 +1,21 @@
|
||||
from redash.models import db
|
||||
|
||||
if __name__ == '__main__':
|
||||
db.connect_db()
|
||||
columns = (
|
||||
('activity_log', 'created_at'),
|
||||
('dashboards', 'created_at'),
|
||||
('data_sources', 'created_at'),
|
||||
('events', 'created_at'),
|
||||
('groups', 'created_at'),
|
||||
('queries', 'created_at'),
|
||||
('widgets', 'created_at'),
|
||||
('query_results', 'retrieved_at')
|
||||
)
|
||||
|
||||
with db.database.transaction():
|
||||
for column in columns:
|
||||
db.database.execute_sql("ALTER TABLE {} ALTER COLUMN {} TYPE timestamp with time zone;".format(*column))
|
||||
|
||||
db.close_db(None)
|
||||
|
||||
73
migrations/0003_update_data_source_config.py
Normal file
73
migrations/0003_update_data_source_config.py
Normal file
@@ -0,0 +1,73 @@
|
||||
import json
|
||||
|
||||
from redash import query_runner
|
||||
from redash.models import DataSource
|
||||
|
||||
|
||||
def update(data_source):
|
||||
print "[%s] Old options: %s" % (data_source.name, data_source.options)
|
||||
|
||||
if query_runner.validate_configuration(data_source.type, data_source.options):
|
||||
print "[%s] configuration already valid. skipping." % data_source.name
|
||||
return
|
||||
|
||||
if data_source.type == 'pg':
|
||||
values = data_source.options.split(" ")
|
||||
configuration = {}
|
||||
for value in values:
|
||||
k, v = value.split("=", 1)
|
||||
configuration[k] = v
|
||||
if k == 'port':
|
||||
configuration[k] = int(v)
|
||||
|
||||
data_source.options = json.dumps(configuration)
|
||||
|
||||
elif data_source.type == 'mysql':
|
||||
mapping = {
|
||||
'Server': 'host',
|
||||
'User': 'user',
|
||||
'Pwd': 'passwd',
|
||||
'Database': 'db'
|
||||
}
|
||||
|
||||
values = data_source.options.split(";")
|
||||
configuration = {}
|
||||
for value in values:
|
||||
k, v = value.split("=", 1)
|
||||
configuration[mapping[k]] = v
|
||||
data_source.options = json.dumps(configuration)
|
||||
|
||||
elif data_source.type == 'graphite':
|
||||
old_config = json.loads(data_source.options)
|
||||
|
||||
configuration = {
|
||||
"url": old_config["url"]
|
||||
}
|
||||
|
||||
if "verify" in old_config:
|
||||
configuration['verify'] = old_config['verify']
|
||||
|
||||
if "auth" in old_config:
|
||||
configuration['username'], configuration['password'] = old_config["auth"]
|
||||
|
||||
data_source.options = json.dumps(configuration)
|
||||
|
||||
elif data_source.type == 'url':
|
||||
data_source.options = json.dumps({"url": data_source.options})
|
||||
|
||||
elif data_source.type == 'script':
|
||||
data_source.options = json.dumps({"path": data_source.options})
|
||||
|
||||
elif data_source.type == 'mongo':
|
||||
data_source.type = 'mongodb'
|
||||
|
||||
else:
|
||||
print "[%s] No need to convert type of: %s" % (data_source.name, data_source.type)
|
||||
|
||||
print "[%s] New options: %s" % (data_source.name, data_source.options)
|
||||
data_source.save()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
for data_source in DataSource.all():
|
||||
update(data_source)
|
||||
12
migrations/0004_allow_null_in_event_user.py
Normal file
12
migrations/0004_allow_null_in_event_user.py
Normal file
@@ -0,0 +1,12 @@
|
||||
from playhouse.migrate import PostgresqlMigrator, migrate
|
||||
|
||||
from redash.models import db
|
||||
|
||||
if __name__ == '__main__':
|
||||
db.connect_db()
|
||||
migrator = PostgresqlMigrator(db.database)
|
||||
|
||||
with db.database.transaction():
|
||||
migrate(
|
||||
migrator.drop_not_null('events', 'user_id')
|
||||
)
|
||||
26
migrations/0005_add_updated_at.py
Normal file
26
migrations/0005_add_updated_at.py
Normal file
@@ -0,0 +1,26 @@
|
||||
from playhouse.migrate import PostgresqlMigrator, migrate
|
||||
|
||||
from redash.models import db
|
||||
from redash import models
|
||||
|
||||
if __name__ == '__main__':
|
||||
db.connect_db()
|
||||
migrator = PostgresqlMigrator(db.database)
|
||||
|
||||
with db.database.transaction():
|
||||
migrate(
|
||||
migrator.add_column('queries', 'updated_at', models.Query.updated_at),
|
||||
migrator.add_column('dashboards', 'updated_at', models.Dashboard.updated_at),
|
||||
migrator.add_column('widgets', 'updated_at', models.Widget.updated_at),
|
||||
migrator.add_column('users', 'created_at', models.User.created_at),
|
||||
migrator.add_column('users', 'updated_at', models.User.updated_at),
|
||||
migrator.add_column('visualizations', 'created_at', models.Visualization.created_at),
|
||||
migrator.add_column('visualizations', 'updated_at', models.Visualization.updated_at)
|
||||
)
|
||||
|
||||
db.database.execute_sql("UPDATE queries SET updated_at = created_at;")
|
||||
db.database.execute_sql("UPDATE dashboards SET updated_at = created_at;")
|
||||
db.database.execute_sql("UPDATE widgets SET updated_at = created_at;")
|
||||
|
||||
db.close_db(None)
|
||||
|
||||
19
migrations/0006_queries_last_edit_by.py
Normal file
19
migrations/0006_queries_last_edit_by.py
Normal file
@@ -0,0 +1,19 @@
|
||||
from playhouse.migrate import PostgresqlMigrator, migrate
|
||||
|
||||
from redash.models import db
|
||||
from redash import models
|
||||
|
||||
if __name__ == '__main__':
|
||||
db.connect_db()
|
||||
migrator = PostgresqlMigrator(db.database)
|
||||
|
||||
with db.database.transaction():
|
||||
migrate(
|
||||
migrator.add_column('queries', 'last_modified_by_id', models.Query.last_modified_by)
|
||||
)
|
||||
|
||||
db.database.execute_sql("UPDATE queries SET last_modified_by_id = user_id;")
|
||||
|
||||
db.close_db(None)
|
||||
|
||||
|
||||
23
migrations/0007_add_schedule_to_queries.py
Normal file
23
migrations/0007_add_schedule_to_queries.py
Normal file
@@ -0,0 +1,23 @@
|
||||
from playhouse.migrate import PostgresqlMigrator, migrate
|
||||
|
||||
from redash.models import db
|
||||
from redash import models
|
||||
|
||||
if __name__ == '__main__':
|
||||
db.connect_db()
|
||||
migrator = PostgresqlMigrator(db.database)
|
||||
|
||||
with db.database.transaction():
|
||||
migrate(
|
||||
migrator.add_column('queries', 'schedule', models.Query.schedule),
|
||||
)
|
||||
|
||||
db.database.execute_sql("UPDATE queries SET schedule = ttl WHERE ttl > 0;")
|
||||
|
||||
migrate(
|
||||
migrator.drop_column('queries', 'ttl')
|
||||
)
|
||||
|
||||
db.close_db(None)
|
||||
|
||||
|
||||
20
migrations/0008_make_ds_name_unique.py
Normal file
20
migrations/0008_make_ds_name_unique.py
Normal file
@@ -0,0 +1,20 @@
|
||||
from redash.models import db
|
||||
|
||||
if __name__ == '__main__':
|
||||
db.connect_db()
|
||||
|
||||
with db.database.transaction():
|
||||
# Make sure all data sources names are unique.
|
||||
db.database.execute_sql("""
|
||||
UPDATE data_sources
|
||||
SET name = new_names.name
|
||||
FROM (
|
||||
SELECT id, name || ' ' || id as name
|
||||
FROM (SELECT id, name, rank() OVER (PARTITION BY name ORDER BY created_at ASC) FROM data_sources) ds WHERE rank > 1
|
||||
) AS new_names
|
||||
WHERE data_sources.id = new_names.id;
|
||||
""")
|
||||
# Add unique constraint on data_sources.name.
|
||||
db.database.execute_sql("ALTER TABLE data_sources ADD CONSTRAINT unique_name UNIQUE (name);")
|
||||
|
||||
db.close_db(None)
|
||||
@@ -163,7 +163,6 @@ module.exports = function (grunt) {
|
||||
// Automatically inject Bower components into the app
|
||||
wiredep: {
|
||||
options: {
|
||||
cwd: '<%= yeoman.app %>'
|
||||
},
|
||||
app: {
|
||||
src: ['<%= yeoman.app %>/index.html'],
|
||||
@@ -321,7 +320,12 @@ module.exports = function (grunt) {
|
||||
src: ['generated/*']
|
||||
}, {
|
||||
expand: true,
|
||||
cwd: 'bower_components/bootstrap/dist',
|
||||
cwd: '<%= yeoman.app %>/bower_components/bootstrap/dist',
|
||||
src: 'fonts/*',
|
||||
dest: '<%= yeoman.dist %>'
|
||||
}, {
|
||||
expand: true,
|
||||
cwd: '<%= yeoman.app %>/bower_components/font-awesome',
|
||||
src: 'fonts/*',
|
||||
dest: '<%= yeoman.dist %>'
|
||||
}]
|
||||
|
||||
Binary file not shown.
@@ -1,228 +0,0 @@
|
||||
<?xml version="1.0" standalone="no"?>
|
||||
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd" >
|
||||
<svg xmlns="http://www.w3.org/2000/svg">
|
||||
<metadata></metadata>
|
||||
<defs>
|
||||
<font id="glyphicons_halflingsregular" horiz-adv-x="1200" >
|
||||
<font-face units-per-em="1200" ascent="960" descent="-240" />
|
||||
<missing-glyph horiz-adv-x="500" />
|
||||
<glyph />
|
||||
<glyph />
|
||||
<glyph unicode=" " />
|
||||
<glyph unicode="*" d="M1100 500h-259l183 -183l-141 -141l-183 183v-259h-200v259l-183 -183l-141 141l183 183h-259v200h259l-183 183l141 141l183 -183v259h200v-259l183 183l141 -141l-183 -183h259v-200z" />
|
||||
<glyph unicode="+" d="M1100 400h-400v-400h-300v400h-400v300h400v400h300v-400h400v-300z" />
|
||||
<glyph unicode=" " />
|
||||
<glyph unicode=" " horiz-adv-x="652" />
|
||||
<glyph unicode=" " horiz-adv-x="1304" />
|
||||
<glyph unicode=" " horiz-adv-x="652" />
|
||||
<glyph unicode=" " horiz-adv-x="1304" />
|
||||
<glyph unicode=" " horiz-adv-x="434" />
|
||||
<glyph unicode=" " horiz-adv-x="326" />
|
||||
<glyph unicode=" " horiz-adv-x="217" />
|
||||
<glyph unicode=" " horiz-adv-x="217" />
|
||||
<glyph unicode=" " horiz-adv-x="163" />
|
||||
<glyph unicode=" " horiz-adv-x="260" />
|
||||
<glyph unicode=" " horiz-adv-x="72" />
|
||||
<glyph unicode=" " horiz-adv-x="260" />
|
||||
<glyph unicode=" " horiz-adv-x="326" />
|
||||
<glyph unicode="€" d="M800 500h-300q9 -74 33 -132t52.5 -91t62 -54.5t59 -29t46.5 -7.5q29 0 66 13t75 37t63.5 67.5t25.5 96.5h174q-31 -172 -128 -278q-107 -117 -274 -117q-205 0 -324 158q-36 46 -69 131.5t-45 205.5h-217l100 100h113q0 47 5 100h-218l100 100h135q37 167 112 257 q117 141 297 141q242 0 354 -189q60 -103 66 -209h-181q0 55 -25.5 99t-63.5 68t-75 36.5t-67 12.5q-24 0 -52.5 -10t-62.5 -32t-65.5 -67t-50.5 -107h379l-100 -100h-300q-6 -46 -6 -100h406z" />
|
||||
<glyph unicode="−" d="M1100 700h-900v-300h900v300z" />
|
||||
<glyph unicode="☁" d="M178 300h750q120 0 205 86t85 208q0 120 -85 206.5t-205 86.5q-46 0 -90 -14q-44 97 -134.5 156.5t-200.5 59.5q-152 0 -260 -107.5t-108 -260.5q0 -25 2 -37q-66 -14 -108.5 -67.5t-42.5 -122.5q0 -80 56.5 -137t135.5 -57z" />
|
||||
<glyph unicode="✉" d="M1200 1100h-1200l600 -603zM300 600l-300 -300v600zM1200 900v-600l-300 300zM800 500l400 -400h-1200l400 400l200 -200z" />
|
||||
<glyph unicode="✏" d="M1101 889l99 92q13 13 13 32.5t-13 33.5l-153 153q-15 13 -33 13t-33 -13l-94 -97zM401 189l614 614l-214 214l-614 -614zM-13 -13l333 112l-223 223z" />
|
||||
<glyph unicode="" horiz-adv-x="500" d="M0 0z" />
|
||||
<glyph unicode="" d="M700 100h300v-100h-800v100h300v550l-500 550h1200l-500 -550v-550z" />
|
||||
<glyph unicode="" d="M1000 934v-521q-64 16 -138 -7q-79 -26 -122.5 -83t-25.5 -111q17 -55 85.5 -75.5t147.5 4.5q70 23 111.5 63.5t41.5 95.5v881q0 10 -7 15.5t-17 2.5l-752 -193q-10 -3 -17 -12.5t-7 -19.5v-689q-64 17 -138 -7q-79 -25 -122.5 -82t-25.5 -112t86 -75.5t147 5.5 q65 21 109 69t44 90v606z" />
|
||||
<glyph unicode="" d="M913 432l300 -300q7 -8 7 -18t-7 -18l-109 -109q-8 -7 -18 -7t-18 7l-300 300q-119 -78 -261 -78q-200 0 -342 142t-142 342t142 342t342 142t342 -142t142 -342q0 -142 -78 -261zM176 693q0 -136 97 -233t234 -97t233.5 96.5t96.5 233.5t-96.5 233.5t-233.5 96.5 t-234 -97t-97 -233z" />
|
||||
<glyph unicode="" d="M649 949q48 69 109.5 105t121.5 38t118.5 -20.5t102.5 -64t71 -100.5t27 -123q0 -57 -33.5 -117.5t-94 -124.5t-126.5 -127.5t-150 -152.5t-146 -174q-62 85 -145.5 174t-149.5 152.5t-126.5 127.5t-94 124.5t-33.5 117.5q0 64 28 123t73 100.5t104.5 64t119 20.5 t120 -38.5t104.5 -104.5z" />
|
||||
<glyph unicode="" d="M791 522l145 -449l-384 275l-382 -275l146 447l-388 280h479l146 400h2l146 -400h472zM168 71l2 1z" />
|
||||
<glyph unicode="" d="M791 522l145 -449l-384 275l-382 -275l146 447l-388 280h479l146 400h2l146 -400h472zM747 331l-74 229l193 140h-235l-77 211l-78 -211h-239l196 -142l-73 -226l192 140zM168 71l2 1z" />
|
||||
<glyph unicode="" d="M1200 143v-143h-1200v143l400 257v100q-37 0 -68.5 74.5t-31.5 125.5v200q0 124 88 212t212 88t212 -88t88 -212v-200q0 -51 -31.5 -125.5t-68.5 -74.5v-100z" />
|
||||
<glyph unicode="" d="M1200 1100v-1100h-1200v1100h1200zM200 1000h-100v-100h100v100zM900 1000h-600v-400h600v400zM1100 1000h-100v-100h100v100zM200 800h-100v-100h100v100zM1100 800h-100v-100h100v100zM200 600h-100v-100h100v100zM1100 600h-100v-100h100v100zM900 500h-600v-400h600 v400zM200 400h-100v-100h100v100zM1100 400h-100v-100h100v100zM200 200h-100v-100h100v100zM1100 200h-100v-100h100v100z" />
|
||||
<glyph unicode="" d="M500 1050v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5h400q21 0 35.5 -14.5t14.5 -35.5zM1100 1050v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5h400 q21 0 35.5 -14.5t14.5 -35.5zM500 450v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5h400q21 0 35.5 -14.5t14.5 -35.5zM1100 450v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v400 q0 21 14.5 35.5t35.5 14.5h400q21 0 35.5 -14.5t14.5 -35.5z" />
|
||||
<glyph unicode="" d="M300 1050v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5zM700 1050v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200 q21 0 35.5 -14.5t14.5 -35.5zM1100 1050v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5zM300 650v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200 q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5zM700 650v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5zM1100 650v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200 q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5zM300 250v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5zM700 250v-200 q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5zM1100 250v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5 t14.5 -35.5z" />
|
||||
<glyph unicode="" d="M300 1050v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5zM1200 1050v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h700 q21 0 35.5 -14.5t14.5 -35.5zM300 450v200q0 21 -14.5 35.5t-35.5 14.5h-200q-21 0 -35.5 -14.5t-14.5 -35.5v-200q0 -21 14.5 -35.5t35.5 -14.5h200q21 0 35.5 14.5t14.5 35.5zM1200 650v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5v200 q0 21 14.5 35.5t35.5 14.5h700q21 0 35.5 -14.5t14.5 -35.5zM300 250v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5zM1200 250v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-700 q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h700q21 0 35.5 -14.5t14.5 -35.5z" />
|
||||
<glyph unicode="" d="M448 34l818 820l-212 212l-607 -607l-206 207l-212 -212z" />
|
||||
<glyph unicode="" d="M882 106l-282 282l-282 -282l-212 212l282 282l-282 282l212 212l282 -282l282 282l212 -212l-282 -282l282 -282z" />
|
||||
<glyph unicode="" d="M913 432l300 -300q7 -8 7 -18t-7 -18l-109 -109q-8 -7 -18 -7t-18 7l-300 300q-119 -78 -261 -78q-200 0 -342 142t-142 342t142 342t342 142t342 -142t142 -342q0 -142 -78 -261zM507 363q137 0 233.5 96.5t96.5 233.5t-96.5 233.5t-233.5 96.5t-234 -97t-97 -233 t97 -233t234 -97zM600 800h100v-200h-100v-100h-200v100h-100v200h100v100h200v-100z" />
|
||||
<glyph unicode="" d="M913 432l300 -299q7 -7 7 -18t-7 -18l-109 -109q-8 -8 -18 -8t-18 8l-300 299q-120 -77 -261 -77q-200 0 -342 142t-142 342t142 342t342 142t342 -142t142 -342q0 -141 -78 -262zM176 694q0 -136 97 -233t234 -97t233.5 97t96.5 233t-96.5 233t-233.5 97t-234 -97 t-97 -233zM300 801v-200h400v200h-400z" />
|
||||
<glyph unicode="" d="M700 750v400q0 21 -14.5 35.5t-35.5 14.5h-100q-21 0 -35.5 -14.5t-14.5 -35.5v-400q0 -21 14.5 -35.5t35.5 -14.5h100q21 0 35.5 14.5t14.5 35.5zM800 975v166q167 -62 272 -210t105 -331q0 -118 -45.5 -224.5t-123 -184t-184 -123t-224.5 -45.5t-224.5 45.5t-184 123 t-123 184t-45.5 224.5q0 183 105 331t272 210v-166q-103 -55 -165 -155t-62 -220q0 -177 125 -302t302 -125t302 125t125 302q0 120 -62 220t-165 155z" />
|
||||
<glyph unicode="" d="M1200 1h-200v1200h200v-1200zM900 1h-200v800h200v-800zM600 1h-200v500h200v-500zM300 301h-200v-300h200v300z" />
|
||||
<glyph unicode="" d="M488 183l38 -151q40 -5 74 -5q27 0 74 5l38 151l6 2q46 13 93 39l5 3l134 -81q56 44 104 105l-80 134l3 5q24 44 39 93l1 6l152 38q5 40 5 74q0 28 -5 73l-152 38l-1 6q-16 51 -39 93l-3 5l80 134q-44 58 -104 105l-134 -81l-5 3q-45 25 -93 39l-6 1l-38 152q-40 5 -74 5 q-27 0 -74 -5l-38 -152l-5 -1q-50 -14 -94 -39l-5 -3l-133 81q-59 -47 -105 -105l80 -134l-3 -5q-25 -47 -38 -93l-2 -6l-151 -38q-6 -48 -6 -73q0 -33 6 -74l151 -38l2 -6q14 -49 38 -93l3 -5l-80 -134q45 -59 105 -105l133 81l5 -3q45 -26 94 -39zM600 815q89 0 152 -63 t63 -151q0 -89 -63 -152t-152 -63t-152 63t-63 152q0 88 63 151t152 63z" />
|
||||
<glyph unicode="" d="M900 1100h275q10 0 17.5 -7.5t7.5 -17.5v-50q0 -11 -7 -18t-18 -7h-1050q-11 0 -18 7t-7 18v50q0 10 7.5 17.5t17.5 7.5h275v100q0 41 29.5 70.5t70.5 29.5h300q41 0 70.5 -29.5t29.5 -70.5v-100zM800 1100v100h-300v-100h300zM200 900h900v-800q0 -41 -29.5 -71 t-70.5 -30h-700q-41 0 -70.5 30t-29.5 71v800zM300 100h100v700h-100v-700zM500 100h100v700h-100v-700zM700 100h100v700h-100v-700zM900 100h100v700h-100v-700z" />
|
||||
<glyph unicode="" d="M1301 601h-200v-600h-300v400h-300v-400h-300v600h-200l656 644z" />
|
||||
<glyph unicode="" d="M600 700h400v-675q0 -11 -7 -18t-18 -7h-850q-11 0 -18 7t-7 18v1150q0 11 7 18t18 7h475v-500zM1000 800h-300v300z" />
|
||||
<glyph unicode="" d="M600 1196q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM600 1014q-171 0 -292.5 -121.5t-121.5 -292.5t121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5zM600 600h200 v-100h-300v400h100v-300z" />
|
||||
<glyph unicode="" d="M721 400h-242l-40 -400h-539l431 1200h209l-21 -300h162l-20 300h208l431 -1200h-538zM712 500l-27 300h-170l-27 -300h224z" />
|
||||
<glyph unicode="" d="M1100 400v-400h-1100v400h490l-290 300h200v500h300v-500h200l-290 -300h490zM988 300h-175v-100h175v100z" />
|
||||
<glyph unicode="" d="M600 1199q122 0 233 -47.5t191 -127.5t127.5 -191t47.5 -233t-47.5 -233t-127.5 -191t-191 -127.5t-233 -47.5t-233 47.5t-191 127.5t-127.5 191t-47.5 233t47.5 233t127.5 191t191 127.5t233 47.5zM600 1012q-170 0 -291 -121t-121 -291t121 -291t291 -121t291 121 t121 291t-121 291t-291 121zM700 600h150l-250 -300l-250 300h150v300h200v-300z" />
|
||||
<glyph unicode="" d="M600 1196q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM600 1014q-171 0 -292.5 -121.5t-121.5 -292.5t121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5zM850 600h-150 v-300h-200v300h-150l250 300z" />
|
||||
<glyph unicode="" d="M0 500l200 700h800q199 -700 200 -700v-475q0 -11 -7 -18t-18 -7h-1150q-11 0 -18 7t-7 18v475zM903 1000h-606l-97 -500h200l50 -200h300l50 200h200z" />
|
||||
<glyph unicode="" d="M600 1196q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM600 1014q-171 0 -292.5 -121.5t-121.5 -292.5q0 -172 121.5 -293t292.5 -121t292.5 121t121.5 293q0 171 -121.5 292.5t-292.5 121.5zM797 598 l-297 -201v401z" />
|
||||
<glyph unicode="" d="M1177 600h-150q0 -177 -125 -302t-302 -125t-302 125t-125 302t125 302t302 125q136 0 246 -81l-146 -146h400v400l-145 -145q-157 122 -355 122q-118 0 -224.5 -45.5t-184 -123t-123 -184t-45.5 -224.5t45.5 -224.5t123 -184t184 -123t224.5 -45.5t224.5 45.5t184 123 t123 184t45.5 224.5z" />
|
||||
<glyph unicode="" d="M700 800l147 147q-112 80 -247 80q-177 0 -302 -125t-125 -302h-150q0 118 45.5 224.5t123 184t184 123t224.5 45.5q198 0 355 -122l145 145v-400h-400zM500 400l-147 -147q112 -80 247 -80q177 0 302 125t125 302h150q0 -118 -45.5 -224.5t-123 -184t-184 -123 t-224.5 -45.5q-198 0 -355 122l-145 -145v400h400z" />
|
||||
<glyph unicode="" d="M100 1200v-1200h1100v1200h-1100zM1100 100h-900v900h900v-900zM400 800h-100v100h100v-100zM1000 800h-500v100h500v-100zM400 600h-100v100h100v-100zM1000 600h-500v100h500v-100zM400 400h-100v100h100v-100zM1000 400h-500v100h500v-100zM400 200h-100v100h100v-100 zM1000 300h-500v-100h500v100z" />
|
||||
<glyph unicode="" d="M200 0h-100v1100h100v-1100zM1100 600v500q-40 -81 -101.5 -115.5t-127.5 -29.5t-138 25t-139.5 40t-125.5 25t-103 -29.5t-65 -115.5v-500q60 60 127.5 84t127.5 17.5t122 -23t119 -30t110 -11t103 42t91 120.5z" />
|
||||
<glyph unicode="" d="M1200 275v300q0 116 -49.5 227t-131 192.5t-192.5 131t-227 49.5t-227 -49.5t-192.5 -131t-131 -192.5t-49.5 -227v-300q0 -11 7 -18t18 -7h50q11 0 18 7t7 18v300q0 127 70.5 231.5t184.5 161.5t245 57t245 -57t184.5 -161.5t70.5 -231.5v-300q0 -11 7 -18t18 -7h50 q11 0 18 7t7 18zM400 480v-460q0 -8 -6 -14t-14 -6h-160q-8 0 -14 6t-6 14v460q0 8 6 14t14 6h160q8 0 14 -6t6 -14zM1000 480v-460q0 -8 -6 -14t-14 -6h-160q-8 0 -14 6t-6 14v460q0 8 6 14t14 6h160q8 0 14 -6t6 -14z" />
|
||||
<glyph unicode="" d="M0 800v-400h300l300 -200v800l-300 -200h-300zM971 600l141 -141l-71 -71l-141 141l-141 -141l-71 71l141 141l-141 141l71 71l141 -141l141 141l71 -71z" />
|
||||
<glyph unicode="" d="M0 800v-400h300l300 -200v800l-300 -200h-300zM700 857l69 53q111 -135 111 -310q0 -169 -106 -302l-67 54q86 110 86 248q0 146 -93 257z" />
|
||||
<glyph unicode="" d="M974 186l6 8q142 178 142 405q0 230 -144 408l-6 8l-83 -64l7 -8q123 -151 123 -344q0 -189 -119 -339l-7 -8zM300 801l300 200v-800l-300 200h-300v400h300zM702 858l69 53q111 -135 111 -310q0 -170 -106 -303l-67 55q86 110 86 248q0 145 -93 257z" />
|
||||
<glyph unicode="" d="M100 700h400v100h100v100h-100v300h-500v-600h100v100zM1200 700v500h-600v-200h100v-300h200v-300h300v200h-200v100h200zM100 1100h300v-300h-300v300zM800 800v300h300v-300h-300zM200 900h100v100h-100v-100zM900 1000h100v-100h-100v100zM300 600h-100v-100h-200 v-500h500v500h-200v100zM900 200v-100h-200v100h-100v100h100v200h-200v100h300v-300h200v-100h-100zM400 400v-300h-300v300h300zM300 200h-100v100h100v-100zM1100 300h100v-100h-100v100zM600 100h100v-100h-100v100zM1200 100v-100h-300v100h300z" />
|
||||
<glyph unicode="" d="M100 1200h-100v-1000h100v1000zM300 200h-100v1000h100v-1000zM700 200h-200v1000h200v-1000zM900 200h-100v1000h100v-1000zM1200 1200v-1000h-200v1000h200zM400 100v-100h-300v100h300zM500 91h100v-91h-100v91zM700 91h100v-91h-100v91zM1100 91v-91h-200v91h200z " />
|
||||
<glyph unicode="" d="M1200 500l-500 -500l-699 700v475q0 10 7.5 17.5t17.5 7.5h474zM320 882q29 29 29 71t-29 71q-30 30 -71.5 30t-71.5 -30q-29 -29 -29 -71t29 -71q30 -30 71.5 -30t71.5 30z" />
|
||||
<glyph unicode="" d="M1201 500l-500 -500l-699 700v475q0 11 7 18t18 7h474zM1501 500l-500 -500l-50 50l450 450l-700 700h100zM320 882q30 29 30 71t-30 71q-29 30 -71 30t-71 -30q-30 -29 -30 -71t30 -71q29 -30 71 -30t71 30z" />
|
||||
<glyph unicode="" d="M1200 1200v-1000l-100 -100v1000h-750l-100 -100h750v-1000h-900v1025l175 175h925z" />
|
||||
<glyph unicode="" d="M947 829l-94 346q-2 11 -10 18t-18 7h-450q-10 0 -18 -7t-10 -18l-94 -346l40 -124h592zM1200 800v-700h-200v200h-800v-200h-200v700h200l100 -200h600l100 200h200zM881 176l38 -152q2 -10 -3.5 -17t-15.5 -7h-600q-10 0 -15.5 7t-3.5 17l38 152q2 10 11.5 17t19.5 7 h500q10 0 19.5 -7t11.5 -17z" />
|
||||
<glyph unicode="" d="M1200 0v66q-34 1 -74 43q-18 19 -33 42t-21 37l-6 13l-385 998h-93l-399 -1006q-24 -48 -52 -75q-12 -12 -33 -25t-36 -20l-15 -7v-66h365v66q-41 0 -72 11t-49 38t1 71l92 234h391l82 -222q16 -45 -5.5 -88.5t-74.5 -43.5v-66h417zM416 521l178 457l46 -140l116 -317 h-340z" />
|
||||
<glyph unicode="" d="M100 1199h471q120 0 213 -88t93 -228q0 -55 -11.5 -101.5t-28 -74t-33.5 -47.5t-28 -28l-12 -7q8 -3 21.5 -9t48 -31.5t60.5 -58t47.5 -91.5t21.5 -129q0 -84 -59 -156.5t-142 -111t-162 -38.5h-500v89q41 7 70.5 32.5t29.5 65.5v827q0 28 -1 39.5t-5.5 26t-15.5 21 t-29 14t-49 14.5v70zM400 1079v-379h139q76 0 130 61.5t54 138.5q0 82 -84 130.5t-239 48.5zM400 200h161q89 0 153 48.5t64 132.5q0 90 -62.5 154.5t-156.5 64.5h-159v-400z" />
|
||||
<glyph unicode="" d="M877 1200l2 -57q-33 -8 -62 -25.5t-46 -37t-29.5 -38t-17.5 -30.5l-5 -12l-128 -825q-10 -52 14 -82t95 -36v-57h-500v57q77 7 134.5 40.5t65.5 80.5l173 849q10 56 -10 74t-91 37q-6 1 -10.5 2.5t-9.5 2.5v57h425z" />
|
||||
<glyph unicode="" d="M1150 1200h150v-300h-50q0 29 -8 48.5t-18.5 30t-33.5 15t-39.5 5.5t-50.5 1h-200v-850l100 -50v-100h-400v100l100 50v850h-200q-34 0 -50.5 -1t-40 -5.5t-33.5 -15t-18.5 -30t-8.5 -48.5h-49v300h150h700zM100 1000v-800h75l-125 -167l-125 167h75v800h-75l125 167 l125 -167h-75z" />
|
||||
<glyph unicode="" d="M950 1201h150v-300h-50q0 29 -8 48.5t-18 30t-33.5 15t-40 5.5t-50.5 1h-200v-650l100 -50v-100h-400v100l100 50v650h-200q-34 0 -50.5 -1t-39.5 -5.5t-33.5 -15t-18.5 -30t-8 -48.5h-50v300h150h700zM200 101h800v75l167 -125l-167 -125v75h-800v-75l-167 125l167 125 v-75z" />
|
||||
<glyph unicode="" d="M700 950v100q0 21 -14.5 35.5t-35.5 14.5h-600q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h600q21 0 35.5 15t14.5 35zM1100 650v100q0 21 -14.5 35.5t-35.5 14.5h-1000q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h1000 q21 0 35.5 15t14.5 35zM900 350v100q0 21 -14.5 35.5t-35.5 14.5h-800q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h800q21 0 35.5 15t14.5 35zM1200 50v100q0 21 -14.5 35.5t-35.5 14.5h-1100q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35 t35.5 -15h1100q21 0 35.5 15t14.5 35z" />
|
||||
<glyph unicode="" d="M1000 950v100q0 21 -14.5 35.5t-35.5 14.5h-700q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h700q21 0 35.5 15t14.5 35zM1200 650v100q0 21 -14.5 35.5t-35.5 14.5h-1100q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h1100 q21 0 35.5 15t14.5 35zM1000 350v100q0 21 -14.5 35.5t-35.5 14.5h-700q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h700q21 0 35.5 15t14.5 35zM1200 50v100q0 21 -14.5 35.5t-35.5 14.5h-1100q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35 t35.5 -15h1100q21 0 35.5 15t14.5 35z" />
|
||||
<glyph unicode="" d="M500 950v100q0 21 14.5 35.5t35.5 14.5h600q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-600q-21 0 -35.5 15t-14.5 35zM100 650v100q0 21 14.5 35.5t35.5 14.5h1000q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1000q-21 0 -35.5 15 t-14.5 35zM300 350v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-800q-21 0 -35.5 15t-14.5 35zM0 50v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100 q-21 0 -35.5 15t-14.5 35z" />
|
||||
<glyph unicode="" d="M0 950v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100q-21 0 -35.5 15t-14.5 35zM0 650v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100q-21 0 -35.5 15 t-14.5 35zM0 350v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100q-21 0 -35.5 15t-14.5 35zM0 50v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100 q-21 0 -35.5 15t-14.5 35z" />
|
||||
<glyph unicode="" d="M0 950v100q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-100q-21 0 -35.5 15t-14.5 35zM300 950v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-800q-21 0 -35.5 15 t-14.5 35zM0 650v100q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-100q-21 0 -35.5 15t-14.5 35zM300 650v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-800 q-21 0 -35.5 15t-14.5 35zM0 350v100q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-100q-21 0 -35.5 15t-14.5 35zM300 350v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15 h-800q-21 0 -35.5 15t-14.5 35zM0 50v100q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-100q-21 0 -35.5 15t-14.5 35zM300 50v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15 h-800q-21 0 -35.5 15t-14.5 35z" />
|
||||
<glyph unicode="" d="M400 1100h-100v-1100h100v1100zM700 950v100q0 21 -15 35.5t-35 14.5h-100q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h100q20 0 35 15t15 35zM1100 650v100q0 21 -15 35.5t-35 14.5h-500q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15 h500q20 0 35 15t15 35zM100 425v75h-201v100h201v75l166 -125zM900 350v100q0 21 -15 35.5t-35 14.5h-300q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h300q20 0 35 15t15 35zM1200 50v100q0 21 -15 35.5t-35 14.5h-600q-21 0 -35.5 -14.5t-14.5 -35.5 v-100q0 -20 14.5 -35t35.5 -15h600q20 0 35 15t15 35z" />
|
||||
<glyph unicode="" d="M201 950v100q0 21 -15 35.5t-35 14.5h-100q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h100q20 0 35 15t15 35zM801 1100h100v-1100h-100v1100zM601 650v100q0 21 -15 35.5t-35 14.5h-500q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15 h500q20 0 35 15t15 35zM1101 425v75h200v100h-200v75l-167 -125zM401 350v100q0 21 -15 35.5t-35 14.5h-300q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h300q20 0 35 15t15 35zM701 50v100q0 21 -15 35.5t-35 14.5h-600q-21 0 -35.5 -14.5t-14.5 -35.5 v-100q0 -20 14.5 -35t35.5 -15h600q20 0 35 15t15 35z" />
|
||||
<glyph unicode="" d="M900 925v-650q0 -31 -22 -53t-53 -22h-750q-31 0 -53 22t-22 53v650q0 31 22 53t53 22h750q31 0 53 -22t22 -53zM1200 300l-300 300l300 300v-600z" />
|
||||
<glyph unicode="" d="M1200 1056v-1012q0 -18 -12.5 -31t-31.5 -13h-1112q-18 0 -31 13t-13 31v1012q0 18 13 31t31 13h1112q19 0 31.5 -13t12.5 -31zM1100 1000h-1000v-737l247 182l298 -131l-74 156l293 318l236 -288v500zM476 750q0 -56 -39 -95t-95 -39t-95 39t-39 95t39 95t95 39t95 -39 t39 -95z" />
|
||||
<glyph unicode="" d="M600 1213q123 0 227 -63t164.5 -169.5t60.5 -229.5t-73 -272q-73 -114 -166.5 -237t-150.5 -189l-57 -66q-10 9 -27 26t-66.5 70.5t-96 109t-104 135.5t-100.5 155q-63 139 -63 262q0 124 60.5 231.5t165 172t226.5 64.5zM599 514q107 0 182.5 75.5t75.5 182.5t-75.5 182 t-182.5 75t-182 -75.5t-75 -181.5q0 -107 75.5 -182.5t181.5 -75.5z" />
|
||||
<glyph unicode="" d="M600 1199q122 0 233 -47.5t191 -127.5t127.5 -191t47.5 -233t-47.5 -233t-127.5 -191t-191 -127.5t-233 -47.5t-233 47.5t-191 127.5t-127.5 191t-47.5 233t47.5 233t127.5 191t191 127.5t233 47.5zM600 173v854q-176 0 -301.5 -125t-125.5 -302t125.5 -302t301.5 -125z " />
|
||||
<glyph unicode="" d="M554 1295q21 -71 57.5 -142.5t76 -130.5t83 -118.5t82 -117t70 -116t50 -125.5t18.5 -136q0 -89 -39 -165.5t-102 -126.5t-140 -79.5t-156 -33.5q-114 6 -211.5 53t-161.5 138.5t-64 210.5q0 94 34 186t88.5 172.5t112 159t115 177t87.5 194.5zM455 296q-7 6 -18 17 t-34 48t-33 77q-15 73 -14 143.5t10 122.5l9 51q-92 -110 -119.5 -185t-12.5 -156q14 -82 59.5 -136t136.5 -80z" />
|
||||
<glyph unicode="" d="M1108 902l113 113l-21 85l-92 28l-113 -113zM1100 625v-225q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-165 0 -282.5 117.5t-117.5 282.5v300q0 165 117.5 282.5t282.5 117.5q366 -6 397 -14l-186 -186h-311q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5 t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v125zM436 341l161 50l412 412l-114 113l-405 -405z" />
|
||||
<glyph unicode="" d="M1100 453v-53q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-165 0 -282.5 117.5t-117.5 282.5v300q0 165 117.5 282.5t282.5 117.5h261l2 -80q-133 -32 -218 -120h-145q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5z M813 431l360 324l-359 318v-216q-7 0 -19 -1t-48 -8t-69.5 -18.5t-76.5 -37t-76.5 -59t-62 -88t-39.5 -121.5q30 38 81.5 64t103 35.5t99 14t77.5 3.5l29 -1v-209z" />
|
||||
<glyph unicode="" d="M1100 569v-169q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-165 0 -282.5 117.5t-117.5 282.5v300q0 165 117.5 282.5t282.5 117.5h300q60 0 127 -23l-178 -177h-349q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v69z M625 348l566 567l-136 137l-430 -431l-147 147l-136 -136z" />
|
||||
<glyph unicode="" d="M900 303v198h-200v-200h195l-295 -300l-300 300h200v200h-200v-198l-300 300l300 296v-198h200v200h-200l300 300l295 -300h-195v-200h200v198l300 -296z" />
|
||||
<glyph unicode="" d="M900 0l-500 488v-438q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v1000q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-437l500 487v-1100z" />
|
||||
<glyph unicode="" d="M1200 0l-500 488v-488l-500 488v-438q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v1000q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-437l500 487v-487l500 487v-1100z" />
|
||||
<glyph unicode="" d="M1200 0l-500 488v-488l-564 550l564 550v-487l500 487v-1100z" />
|
||||
<glyph unicode="" d="M1100 550l-900 550v-1100z" />
|
||||
<glyph unicode="" d="M500 150v800q0 21 -14.5 35.5t-35.5 14.5h-200q-21 0 -35.5 -14.5t-14.5 -35.5v-800q0 -21 14.5 -35.5t35.5 -14.5h200q21 0 35.5 14.5t14.5 35.5zM900 150v800q0 21 -14.5 35.5t-35.5 14.5h-200q-21 0 -35.5 -14.5t-14.5 -35.5v-800q0 -21 14.5 -35.5t35.5 -14.5h200 q21 0 35.5 14.5t14.5 35.5z" />
|
||||
<glyph unicode="" d="M1100 150v800q0 21 -14.5 35.5t-35.5 14.5h-800q-21 0 -35.5 -14.5t-14.5 -35.5v-800q0 -20 14.5 -35t35.5 -15h800q21 0 35.5 15t14.5 35z" />
|
||||
<glyph unicode="" d="M500 0v488l-500 -488v1100l500 -487v487l564 -550z" />
|
||||
<glyph unicode="" d="M1050 1100h100q21 0 35.5 -14.5t14.5 -35.5v-1000q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v438l-500 -488v488l-500 -488v1100l500 -487v487l500 -487v437q0 21 14.5 35.5t35.5 14.5z" />
|
||||
<glyph unicode="" d="M850 1100h100q21 0 35.5 -14.5t14.5 -35.5v-1000q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v438l-500 -488v1100l500 -487v437q0 21 14.5 35.5t35.5 14.5z" />
|
||||
<glyph unicode="" d="M650 1064l-550 -564h1100zM1200 350v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1000q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5h1000q21 0 35.5 -14.5t14.5 -35.5z" />
|
||||
<glyph unicode="" d="M777 7l240 240l-353 353l353 353l-240 240l-592 -594z" />
|
||||
<glyph unicode="" d="M513 -46l-241 240l353 353l-353 353l241 240l572 -571l21 -22l-1 -1v-1z" />
|
||||
<glyph unicode="" d="M600 1197q162 0 299.5 -80t217.5 -217.5t80 -299.5t-80 -299.5t-217.5 -217.5t-299.5 -80t-299.5 80t-217.5 217.5t-80 299.5t80 299.5t217.5 217.5t299.5 80zM500 900v-200h-200v-200h200v-200h200v200h200v200h-200v200h-200z" />
|
||||
<glyph unicode="" d="M600 1197q162 0 299.5 -80t217.5 -217.5t80 -299.5t-80 -299.5t-217.5 -217.5t-299.5 -80t-299.5 80t-217.5 217.5t-80 299.5t80 299.5t217.5 217.5t299.5 80zM300 700v-200h600v200h-600z" />
|
||||
<glyph unicode="" d="M600 1197q162 0 299.5 -80t217.5 -217.5t80 -299.5t-80 -299.5t-217.5 -217.5t-299.5 -80t-299.5 80t-217.5 217.5t-80 299.5t80 299.5t217.5 217.5t299.5 80zM247 741l141 -141l-142 -141l213 -213l141 142l141 -142l213 213l-142 141l142 141l-213 212l-141 -141 l-141 142z" />
|
||||
<glyph unicode="" d="M600 1197q162 0 299.5 -80t217.5 -217.5t80 -299.5t-80 -299.5t-217.5 -217.5t-299.5 -80t-299.5 80t-217.5 217.5t-80 299.5t80 299.5t217.5 217.5t299.5 80zM546 623l-102 102l-174 -174l276 -277l411 411l-175 174z" />
|
||||
<glyph unicode="" d="M600 1197q162 0 299.5 -80t217.5 -217.5t80 -299.5t-80 -299.5t-217.5 -217.5t-299.5 -80t-299.5 80t-217.5 217.5t-80 299.5t80 299.5t217.5 217.5t299.5 80zM500 500h200q5 3 14 8t31.5 25.5t39.5 45.5t31 69t14 94q0 51 -17.5 89t-42 58t-58.5 32t-58.5 15t-51.5 3 q-105 0 -172 -56t-67 -183h144q4 0 11.5 -1t11 -1t6.5 3t3 9t1 11t3.5 8.5t3.5 6t5.5 4t6.5 2.5t9 1.5t9 0.5h11.5h12.5q19 0 30 -10t11 -26q0 -22 -4 -28t-27 -22q-5 -1 -12.5 -3t-27 -13.5t-34 -27t-26.5 -46t-11 -68.5zM500 400v-100h200v100h-200z" />
|
||||
<glyph unicode="" d="M600 1197q162 0 299.5 -80t217.5 -217.5t80 -299.5t-80 -299.5t-217.5 -217.5t-299.5 -80t-299.5 80t-217.5 217.5t-80 299.5t80 299.5t217.5 217.5t299.5 80zM500 900v-100h200v100h-200zM400 700v-100h100v-200h-100v-100h400v100h-100v300h-300z" />
|
||||
<glyph unicode="" d="M1200 700v-200h-203q-25 -102 -116.5 -186t-180.5 -117v-197h-200v197q-140 27 -208 102.5t-98 200.5h-194v200h194q15 60 36 104.5t55.5 86t88 69t126.5 40.5v200h200v-200q54 -20 113 -60t112.5 -105.5t71.5 -134.5h203zM700 500v-206q149 48 201 206h-201v200h200 q-25 74 -76 127.5t-124 76.5v-204h-200v203q-75 -24 -130 -77.5t-79 -125.5h209v-200h-210q24 -73 79.5 -127.5t130.5 -78.5v206h200z" />
|
||||
<glyph unicode="" d="M600 1196q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM600 1014q-171 0 -292.5 -121.5t-121.5 -292.5t121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5zM844 735 l-135 -135l135 -135l-109 -109l-135 135l-135 -135l-109 109l135 135l-135 135l109 109l135 -135l135 135z" />
|
||||
<glyph unicode="" d="M600 1196q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM600 1014q-171 0 -292.5 -121.5t-121.5 -292.5t121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5zM896 654 l-346 -345l-228 228l141 141l87 -87l204 205z" />
|
||||
<glyph unicode="" d="M600 1196q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM248 385l568 567q-100 62 -216 62q-171 0 -292.5 -121.5t-121.5 -292.5q0 -115 62 -215zM955 809l-564 -564q97 -59 209 -59q171 0 292.5 121.5 t121.5 292.5q0 112 -59 209z" />
|
||||
<glyph unicode="" d="M1200 400h-600v-301l-600 448l600 453v-300h600v-300z" />
|
||||
<glyph unicode="" d="M600 400h-600v300h600v300l600 -453l-600 -448v301z" />
|
||||
<glyph unicode="" d="M1098 600h-298v-600h-300v600h-296l450 600z" />
|
||||
<glyph unicode="" d="M998 600l-449 -600l-445 600h296v600h300v-600h298z" />
|
||||
<glyph unicode="" d="M600 199v301q-95 -2 -183 -20t-170 -52t-147 -92.5t-100 -135.5q6 132 41 238.5t103.5 193t184 138t271.5 59.5v271l600 -453z" />
|
||||
<glyph unicode="" d="M1200 1200h-400l129 -129l-294 -294l142 -142l294 294l129 -129v400zM565 423l-294 -294l129 -129h-400v400l129 -129l294 294z" />
|
||||
<glyph unicode="" d="M871 730l129 -130h-400v400l129 -129l295 295l142 -141zM200 600h400v-400l-129 130l-295 -295l-142 141l295 295z" />
|
||||
<glyph unicode="" d="M600 1177q118 0 224.5 -45.5t184 -123t123 -184t45.5 -224.5t-45.5 -224.5t-123 -184t-184 -123t-224.5 -45.5t-224.5 45.5t-184 123t-123 184t-45.5 224.5t45.5 224.5t123 184t184 123t224.5 45.5zM686 549l58 302q4 20 -8 34.5t-33 14.5h-207q-20 0 -32 -14.5t-8 -34.5 l58 -302q4 -20 21.5 -34.5t37.5 -14.5h54q20 0 37.5 14.5t21.5 34.5zM700 400h-200v-100h200v100z" />
|
||||
<glyph unicode="" d="M1200 900h-111v6t-1 15t-3 18l-34 172q-11 39 -41.5 63t-69.5 24q-32 0 -61 -17l-239 -144q-22 -13 -40 -35q-19 24 -40 36l-238 144q-33 18 -62 18q-39 0 -69.5 -23t-40.5 -61l-35 -177q-2 -8 -3 -18t-1 -15v-6h-111v-100h100v-200h400v300h200v-300h400v200h100v100z M731 900l202 197q5 -12 12 -32.5t23 -64t25 -72t7 -28.5h-269zM481 900h-281q-3 0 14 48t35 96l18 47zM100 0h400v400h-400v-400zM700 400h400v-400h-400v400z" />
|
||||
<glyph unicode="" d="M0 121l216 193q-9 53 -13 83t-5.5 94t9 113t38.5 114t74 124q47 60 99.5 102.5t103 68t127.5 48t145.5 37.5t184.5 43.5t220 58.5q0 -189 -22 -343t-59 -258t-89 -181.5t-108.5 -120t-122 -68t-125.5 -30t-121.5 -1.5t-107.5 12.5t-87.5 17t-56.5 7.5l-99 -55l-201 -202 v143zM692 611q70 38 118.5 69.5t102 79t99 111.5t86.5 148q22 50 24 60t-6 19q-7 5 -17 5t-26.5 -14.5t-33.5 -39.5q-35 -51 -113.5 -108.5t-139.5 -89.5l-61 -32q-369 -197 -458 -401q-48 -111 -28.5 -117.5t86.5 76.5q55 66 367 234z" />
|
||||
<glyph unicode="" d="M1261 600l-26 -40q-6 -10 -20 -30t-49 -63.5t-74.5 -85.5t-97 -90t-116.5 -83.5t-132.5 -59t-145.5 -23.5t-145.5 23.5t-132.5 59t-116.5 83.5t-97 90t-74.5 85.5t-49 63.5t-20 30l-26 40l26 40q6 10 20 30t49 63.5t74.5 85.5t97 90t116.5 83.5t132.5 59t145.5 23.5 t145.5 -23.5t132.5 -59t116.5 -83.5t97 -90t74.5 -85.5t49 -63.5t20 -30zM600 240q64 0 123.5 20t100.5 45.5t85.5 71.5t66.5 75.5t58 81.5t47 66q-1 1 -28.5 37.5t-42 55t-43.5 53t-57.5 63.5t-58.5 54q49 -74 49 -163q0 -124 -88 -212t-212 -88t-212 88t-88 212 q0 85 46 158q-102 -87 -226 -258q7 -10 40.5 -58t56 -78.5t68 -77.5t87.5 -75t103 -49.5t125 -21.5zM484 762l-107 -106q49 -124 154 -191l105 105q-37 24 -75 72t-57 84z" />
|
||||
<glyph unicode="" d="M906 1200l-314 -1200h-148l37 143q-82 21 -165 71.5t-140 102t-109.5 112t-72 88.5t-29.5 43l-26 40l26 40q6 10 20 30t49 63.5t74.5 85.5t97 90t116.5 83.5t132.5 59t145.5 23.5q61 0 121 -17l37 142h148zM1261 600l-26 -40q-7 -12 -25.5 -38t-63.5 -79.5t-95.5 -102.5 t-124 -100t-146.5 -79l38 145q22 15 44.5 34t46 44t40.5 44t41 50.5t33.5 43.5t33 44t24.5 34q-97 127 -140 175l39 146q67 -54 131.5 -125.5t87.5 -103.5t36 -52zM513 264l37 141q-107 18 -178.5 101.5t-71.5 193.5q0 85 46 158q-102 -87 -226 -258q210 -282 393 -336z M484 762l-107 -106q49 -124 154 -191l47 47l23 87q-30 28 -59 69t-44 68z" />
|
||||
<glyph unicode="" d="M-47 0h1294q37 0 50.5 35.5t-7.5 67.5l-642 1056q-20 33 -48 36t-48 -29l-642 -1066q-21 -32 -7.5 -66t50.5 -34zM700 200v100h-200v-100h-345l445 723l445 -723h-345zM700 700h-200v-100l100 -300l100 300v100z" />
|
||||
<glyph unicode="" d="M800 711l363 -325q15 -14 26 -38.5t11 -44.5v-41q0 -20 -12 -26.5t-29 5.5l-359 249v-263q100 -91 100 -113v-64q0 -21 -13 -29t-32 1l-94 78h-222l-94 -78q-19 -9 -32 -1t-13 29v64q0 22 100 113v263l-359 -249q-17 -12 -29 -5.5t-12 26.5v41q0 20 11 44.5t26 38.5 l363 325v339q0 62 44 106t106 44t106 -44t44 -106v-339z" />
|
||||
<glyph unicode="" d="M941 800l-600 -600h-341v200h259l600 600h241v198l300 -295l-300 -300v197h-159zM381 678l141 142l-181 180h-341v-200h259zM1100 598l300 -295l-300 -300v197h-241l-181 181l141 142l122 -123h159v198z" />
|
||||
<glyph unicode="" d="M100 1100h1000q41 0 70.5 -29.5t29.5 -70.5v-600q0 -41 -29.5 -70.5t-70.5 -29.5h-596l-304 -300v300h-100q-41 0 -70.5 29.5t-29.5 70.5v600q0 41 29.5 70.5t70.5 29.5z" />
|
||||
<glyph unicode="" d="M400 900h-300v300h300v-300zM1100 900h-300v300h300v-300zM1100 800v-200q0 -42 -3 -83t-15 -104t-31.5 -116t-58 -109.5t-89 -96.5t-129 -65.5t-174.5 -25.5t-174.5 25.5t-129 65.5t-89 96.5t-58 109.5t-31.5 116t-15 104t-3 83v200h300v-250q0 -113 6 -145 q17 -92 102 -117q39 -11 92 -11q37 0 66.5 5.5t50 15.5t36 24t24 31.5t14 37.5t7 42t2.5 45t0 47v25v250h300z" />
|
||||
<glyph unicode="" d="M902 184l226 227l-578 579l-580 -579l227 -227l352 353z" />
|
||||
<glyph unicode="" d="M650 218l578 579l-226 227l-353 -353l-352 353l-227 -227z" />
|
||||
<glyph unicode="" d="M1198 400v600h-796l215 -200h381v-400h-198l299 -283l299 283h-200zM-198 700l299 283l300 -283h-203v-400h385l215 -200h-800v600h-196z" />
|
||||
<glyph unicode="" d="M1050 1200h94q20 0 35 -14.5t15 -35.5t-15 -35.5t-35 -14.5h-54l-201 -961q-2 -4 -6 -10.5t-19 -17.5t-33 -11h-31v-50q0 -20 -14.5 -35t-35.5 -15t-35.5 15t-14.5 35v50h-300v-50q0 -20 -14.5 -35t-35.5 -15t-35.5 15t-14.5 35v50h-50q-21 0 -35.5 15t-14.5 35 q0 21 14.5 35.5t35.5 14.5h535l48 200h-633q-32 0 -54.5 21t-27.5 43l-100 475q-5 24 10 42q14 19 39 19h896l38 162q5 17 18.5 27.5t30.5 10.5z" />
|
||||
<glyph unicode="" d="M1200 1000v-100h-1200v100h200q0 41 29.5 70.5t70.5 29.5h300q41 0 70.5 -29.5t29.5 -70.5h500zM0 800h1200v-800h-1200v800z" />
|
||||
<glyph unicode="" d="M201 800l-200 -400v600h200q0 41 29.5 70.5t70.5 29.5h300q41 0 70.5 -29.5t29.5 -70.5h500v-200h-1000zM1501 700l-300 -700h-1200l300 700h1200z" />
|
||||
<glyph unicode="" d="M302 300h198v600h-198l298 300l298 -300h-198v-600h198l-298 -300z" />
|
||||
<glyph unicode="" d="M900 303v197h-600v-197l-300 297l300 298v-198h600v198l300 -298z" />
|
||||
<glyph unicode="" d="M31 400l172 739q5 22 23 41.5t38 19.5h672q19 0 37.5 -22.5t23.5 -45.5l172 -732h-1138zM100 300h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5v100q0 41 29.5 70.5t70.5 29.5zM900 200h-100v-100h100v100z M1100 200h-100v-100h100v100z" />
|
||||
<glyph unicode="" d="M1100 200v850q0 21 14.5 35.5t35.5 14.5q20 0 35 -14.5t15 -35.5v-850q0 -20 -15 -35t-35 -15q-21 0 -35.5 15t-14.5 35zM325 800l675 250v-850l-675 200h-38l47 -276q2 -12 -3 -17.5t-11 -6t-21 -0.5h-8h-83q-20 0 -34.5 14t-18.5 35q-56 337 -56 351v250v5 q0 13 0.5 18.5t2.5 13t8 10.5t15 3h200zM-101 600v50q0 24 25 49t50 38l25 13v-250l-11 5.5t-24 14t-30 21.5t-24 27.5t-11 31.5z" />
|
||||
<glyph unicode="" d="M445 1180l-45 -233l-224 78l78 -225l-233 -44l179 -156l-179 -155l233 -45l-78 -224l224 78l45 -233l155 179l155 -179l45 233l224 -78l-78 224l234 45l-180 155l180 156l-234 44l78 225l-224 -78l-45 233l-155 -180z" />
|
||||
<glyph unicode="" d="M700 1200h-50q-27 0 -51 -20t-38 -48l-96 -198l-145 -196q-20 -26 -20 -63v-400q0 -75 100 -75h61q123 -100 139 -100h250q46 0 83 57l238 344q29 31 29 74v100q0 44 -30.5 84.5t-69.5 40.5h-328q28 118 28 125v150q0 44 -30.5 84.5t-69.5 40.5zM700 925l-50 -225h450 v-125l-250 -375h-214l-136 100h-100v375l150 212l100 213h50v-175zM0 800v-600h200v600h-200z" />
|
||||
<glyph unicode="" d="M700 0h-50q-27 0 -51 20t-38 48l-96 198l-145 196q-20 26 -20 63v400q0 75 100 75h61q123 100 139 100h250q46 0 83 -57l238 -344q29 -31 29 -74v-100q0 -44 -30.5 -84.5t-69.5 -40.5h-328q28 -118 28 -125v-150q0 -44 -30.5 -84.5t-69.5 -40.5zM200 400h-200v600h200 v-600zM700 275l-50 225h450v125l-250 375h-214l-136 -100h-100v-375l150 -212l100 -213h50v175z" />
|
||||
<glyph unicode="" d="M364 873l362 230q14 6 25 6q17 0 29 -12l109 -112q14 -14 14 -34q0 -18 -11 -32l-85 -121h302q85 0 138.5 -38t53.5 -110t-54.5 -111t-138.5 -39h-107l-130 -339q-7 -22 -20.5 -41.5t-28.5 -19.5h-341q-7 0 -90 81t-83 94v525q0 17 14 35.5t28 28.5zM408 792v-503 l100 -89h293l131 339q6 21 19.5 41t28.5 20h203q16 0 25 15t9 36q0 20 -9 34.5t-25 14.5h-457h-6.5h-7.5t-6.5 0.5t-6 1t-5 1.5t-5.5 2.5t-4 4t-4 5.5q-5 12 -5 20q0 14 10 27l147 183l-86 83zM208 200h-200v600h200v-600z" />
|
||||
<glyph unicode="" d="M475 1104l365 -230q7 -4 16.5 -10.5t26 -26t16.5 -36.5v-526q0 -13 -85.5 -93.5t-93.5 -80.5h-342q-15 0 -28.5 20t-19.5 41l-131 339h-106q-84 0 -139 39t-55 111t54 110t139 37h302l-85 121q-11 16 -11 32q0 21 14 34l109 113q13 12 29 12q11 0 25 -6zM370 946 l145 -184q10 -11 10 -26q0 -11 -5 -20q-1 -3 -3.5 -5.5l-4 -4t-5 -2.5t-5.5 -1.5t-6.5 -1t-6.5 -0.5h-7.5h-6.5h-476v-100h222q15 0 28.5 -20.5t19.5 -40.5l131 -339h293l106 89v502l-342 237zM1199 201h-200v600h200v-600z" />
|
||||
<glyph unicode="" d="M1100 473v342q0 15 -20 28.5t-41 19.5l-339 131v106q0 84 -39 139t-111 55t-110 -53.5t-38 -138.5v-302l-121 84q-15 12 -33.5 11.5t-32.5 -13.5l-112 -110q-22 -22 -6 -53l230 -363q4 -6 10.5 -15.5t26 -25t36.5 -15.5h525q13 0 94 83t81 90zM911 400h-503l-236 339 l83 86l183 -146q22 -18 47 -5q3 1 5.5 3.5l4 4t2.5 5t1.5 5.5t1 6.5t0.5 6v7.5v7v456q0 22 25 31t50 -0.5t25 -30.5v-202q0 -16 20 -29.5t41 -19.5l339 -130v-294zM1000 200v-200h-600v200h600z" />
|
||||
<glyph unicode="" d="M305 1104v200h600v-200h-600zM605 310l339 131q20 6 40.5 19.5t20.5 28.5v342q0 7 -81 90t-94 83h-525q-17 0 -35.5 -14t-28.5 -28l-10 -15l-230 -362q-15 -31 7 -53l112 -110q13 -13 32 -13.5t34 10.5l121 85l-1 -302q0 -84 38.5 -138t110.5 -54t111 55t39 139v106z M905 804v-294l-340 -130q-20 -6 -40 -20t-20 -29v-202q0 -22 -25 -31t-50 0t-25 31v456v14.5t-1.5 11.5t-5 12t-9.5 7q-24 13 -46 -5l-184 -146l-83 86l237 339h503z" />
|
||||
<glyph unicode="" d="M603 1195q162 0 299.5 -80t217.5 -218t80 -300t-80 -299.5t-217.5 -217.5t-299.5 -80t-300 80t-218 217.5t-80 299.5q0 122 47.5 232.5t127.5 190.5t190.5 127.5t232.5 47.5zM598 701h-298v-201h300l-2 -194l402 294l-402 298v-197z" />
|
||||
<glyph unicode="" d="M597 1195q122 0 232.5 -47.5t190.5 -127.5t127.5 -190.5t47.5 -232.5q0 -162 -80 -299.5t-218 -217.5t-300 -80t-299.5 80t-217.5 217.5t-80 299.5q0 122 47.5 232.5t127.5 190.5t190.5 127.5t231.5 47.5zM200 600l400 -294v194h302v201h-300v197z" />
|
||||
<glyph unicode="" d="M603 1195q121 0 231.5 -47.5t190.5 -127.5t127.5 -190.5t47.5 -232.5q0 -162 -80 -299.5t-217.5 -217.5t-299.5 -80t-300 80t-218 217.5t-80 299.5q0 122 47.5 232.5t127.5 190.5t190.5 127.5t232.5 47.5zM300 600h200v-300h200v300h200l-300 400z" />
|
||||
<glyph unicode="" d="M603 1195q121 0 231.5 -47.5t190.5 -127.5t127.5 -190.5t47.5 -232.5q0 -162 -80 -299.5t-217.5 -217.5t-299.5 -80t-300 80t-218 217.5t-80 299.5q0 122 47.5 232.5t127.5 190.5t190.5 127.5t232.5 47.5zM500 900v-300h-200l300 -400l300 400h-200v300h-200z" />
|
||||
<glyph unicode="" d="M603 1195q121 0 231.5 -47.5t190.5 -127.5t127.5 -190.5t47.5 -232.5q0 -162 -80 -299.5t-217.5 -217.5t-299.5 -80t-300 80t-218 217.5t-80 299.5q0 122 47.5 232.5t127.5 190.5t190.5 127.5t232.5 47.5zM627 1101q-15 -12 -36.5 -21t-34.5 -12t-44 -8t-39 -6 q-15 -3 -45.5 0.5t-45.5 -2.5q-21 -7 -52 -26.5t-34 -34.5q-3 -11 6.5 -22.5t8.5 -18.5q-3 -34 -27.5 -90.5t-29.5 -79.5q-8 -33 5.5 -92.5t7.5 -87.5q0 -9 17 -44t16 -60q12 0 23 -5.5t23 -15t20 -13.5q24 -12 108 -42q22 -8 53 -31.5t59.5 -38.5t57.5 -11q8 -18 -15 -55 t-20 -57q42 -71 87 -80q0 -6 -3 -15.5t-3.5 -14.5t4.5 -17q102 -2 221 112q30 29 47 47t34.5 49t20.5 62q-14 9 -37 9.5t-36 7.5q-14 7 -49 15t-52 19q-9 0 -39.5 -0.5t-46.5 -1.5t-39 -6.5t-39 -16.5q-50 -35 -66 -12q-4 2 -3.5 25.5t0.5 25.5q-6 13 -26.5 17t-24.5 7 q2 22 -2 41t-16.5 28t-38.5 -20q-23 -25 -42 4q-19 28 -8 58q6 16 22 22q6 -1 26 -1.5t33.5 -4t19.5 -13.5q12 -19 32 -37.5t34 -27.5l14 -8q0 3 9.5 39.5t5.5 57.5q-4 23 14.5 44.5t22.5 31.5q5 14 10 35t8.5 31t15.5 22.5t34 21.5q-6 18 10 37q8 0 23.5 -1.5t24.5 -1.5 t20.5 4.5t20.5 15.5q-10 23 -30.5 42.5t-38 30t-49 26.5t-43.5 23q11 41 1 44q31 -13 58.5 -14.5t39.5 3.5l11 4q6 36 -17 53.5t-64 28.5t-56 23q-19 -3 -37 0zM613 994q0 -18 8 -42.5t16.5 -44t9.5 -23.5q-9 2 -31 5t-36 5t-32 8t-30 14q3 12 16 30t16 25q10 -10 18.5 -10 t14 6t14.5 14.5t16 12.5z" />
|
||||
<glyph unicode="" horiz-adv-x="1220" d="M100 1196h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5v100q0 41 29.5 70.5t70.5 29.5zM1100 1096h-200v-100h200v100zM100 796h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000 q-41 0 -70.5 29.5t-29.5 70.5v100q0 41 29.5 70.5t70.5 29.5zM1100 696h-500v-100h500v100zM100 396h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5v100q0 41 29.5 70.5t70.5 29.5zM1100 296h-300v-100h300v100z " />
|
||||
<glyph unicode="" d="M1100 1200v-100h-1000v100h1000zM150 1000h900l-350 -500v-300l-200 -200v500z" />
|
||||
<glyph unicode="" d="M329 729l142 142l-200 200l129 129h-400v-400l129 129zM1200 1200v-400l-129 129l-200 -200l-142 142l200 200l-129 129h400zM271 129l129 -129h-400v400l129 -129l200 200l142 -142zM1071 271l129 129v-400h-400l129 129l-200 200l142 142z" />
|
||||
<glyph unicode="" d="M596 1192q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM596 1010q-171 0 -292.5 -121.5t-121.5 -292.5q0 -172 121.5 -293t292.5 -121t292.5 121t121.5 293q0 171 -121.5 292.5t-292.5 121.5zM455 905 q22 0 38 -16t16 -39t-16 -39t-38 -16q-23 0 -39 16.5t-16 38.5t16 38.5t39 16.5zM708 821l1 1q-9 14 -9 28q0 22 16 38.5t39 16.5q22 0 38 -16t16 -39t-16 -39t-38 -16q-14 0 -29 10l-55 -145q17 -22 17 -51q0 -36 -25.5 -61.5t-61.5 -25.5t-61.5 25.5t-25.5 61.5 q0 32 20.5 56.5t51.5 29.5zM855 709q23 0 38.5 -15.5t15.5 -38.5t-16 -39t-38 -16q-23 0 -39 16t-16 39q0 22 16 38t39 16zM345 709q23 0 39 -16t16 -38q0 -23 -16 -39t-39 -16q-22 0 -38 16t-16 39t15.5 38.5t38.5 15.5z" />
|
||||
<glyph unicode="" d="M649 54l-16 22q-90 125 -293 323q-71 70 -104.5 105.5t-77 89.5t-61 99t-17.5 91q0 131 98.5 229.5t230.5 98.5q143 0 241 -129q103 129 246 129q129 0 226 -98.5t97 -229.5q0 -46 -17.5 -91t-61 -99t-77 -89.5t-104.5 -105.5q-203 -198 -293 -323zM844 524l12 12 q64 62 97.5 97t64.5 79t31 72q0 71 -48 119t-105 48q-74 0 -132 -82l-118 -171l-114 174q-51 79 -123 79q-60 0 -109.5 -49t-49.5 -118q0 -27 30.5 -70t61.5 -75.5t95 -94.5l22 -22q93 -90 190 -201q82 92 195 203z" />
|
||||
<glyph unicode="" d="M476 406l19 -17l105 105l-212 212l389 389l247 -247l-95 -96l18 -18q46 -46 77 -99l29 29q35 35 62.5 88t27.5 96q0 93 -66 159l-141 141q-66 66 -159 66q-95 0 -159 -66l-283 -283q-66 -64 -66 -159q0 -93 66 -159zM123 193l141 -141q66 -66 159 -66q95 0 159 66 l283 283q66 66 66 159t-66 159l-141 141q-12 12 -19 17l-105 -105l212 -212l-389 -389l-247 248l95 95l-18 18q-46 45 -75 101l-55 -55q-66 -66 -66 -159q0 -94 66 -160z" />
|
||||
<glyph unicode="" d="M200 100v953q0 21 30 46t81 48t129 38t163 15t162 -15t127 -38t79 -48t29 -46v-953q0 -41 -29.5 -70.5t-70.5 -29.5h-600q-41 0 -70.5 29.5t-29.5 70.5zM900 1000h-600v-700h600v700zM600 46q43 0 73.5 30.5t30.5 73.5t-30.5 73.5t-73.5 30.5t-73.5 -30.5t-30.5 -73.5 t30.5 -73.5t73.5 -30.5z" />
|
||||
<glyph unicode="" d="M700 1029v-307l64 -14q34 -7 64 -16.5t70 -31.5t67.5 -52t47.5 -80.5t20 -112.5q0 -139 -89 -224t-244 -96v-77h-100v78q-152 17 -237 104q-40 40 -52.5 93.5t-15.5 139.5h139q5 -77 48.5 -126.5t117.5 -64.5v335l-27 7q-46 14 -79 26.5t-72 36t-62.5 52t-40 72.5 t-16.5 99q0 92 44 159.5t109 101t144 40.5v78h100v-79q38 -4 72.5 -13.5t75.5 -31.5t71 -53.5t51.5 -84t24.5 -118.5h-159q-8 72 -35 109.5t-101 50.5zM600 755v274q-61 -8 -97.5 -37.5t-36.5 -102.5q0 -29 8 -51t16.5 -34t29.5 -22.5t31 -13.5t38 -10q7 -2 11 -3zM700 548 v-311q170 18 170 151q0 64 -44 99.5t-126 60.5z" />
|
||||
<glyph unicode="" d="M866 300l50 -147q-41 -25 -80.5 -36.5t-59 -13t-61.5 -1.5q-23 0 -128 33t-155 29q-39 -4 -82 -17t-66 -25l-24 -11l-55 145l16.5 11t15.5 10t13.5 9.5t14.5 12t14.5 14t17.5 18.5q48 55 54 126.5t-30 142.5h-221v100h166q-24 49 -44 104q-10 26 -14.5 55.5t-3 72.5 t25 90t68.5 87q97 88 263 88q129 0 230 -89t101 -208h-153q0 52 -34 89.5t-74 51.5t-76 14q-37 0 -79 -14.5t-62 -35.5q-41 -44 -41 -101q0 -11 2.5 -24.5t5.5 -24t9.5 -26.5t10.5 -25t14 -27.5t14 -25.5t15.5 -27t13.5 -24h242v-100h-197q8 -50 -2.5 -115t-31.5 -94 q-41 -59 -99 -113q35 11 84 18t70 7q32 1 102 -16t104 -17q76 0 136 30z" />
|
||||
<glyph unicode="" d="M300 0l298 300h-198v900h-200v-900h-198zM900 1200l298 -300h-198v-900h-200v900h-198z" />
|
||||
<glyph unicode="" d="M400 300h198l-298 -300l-298 300h198v900h200v-900zM1000 1200v-500h-100v100h-100v-100h-100v500h300zM901 1100h-100v-200h100v200zM700 500h300v-200h-99v-100h-100v100h99v100h-200v100zM800 100h200v-100h-300v200h100v-100z" />
|
||||
<glyph unicode="" d="M400 300h198l-298 -300l-298 300h198v900h200v-900zM1000 1200v-200h-99v-100h-100v100h99v100h-200v100h300zM800 800h200v-100h-300v200h100v-100zM700 500h300v-500h-100v100h-100v-100h-100v500zM801 200h100v200h-100v-200z" />
|
||||
<glyph unicode="" d="M300 0l298 300h-198v900h-200v-900h-198zM900 1100h-100v100h200v-500h-100v400zM1100 500v-500h-100v100h-200v400h300zM1001 400h-100v-200h100v200z" />
|
||||
<glyph unicode="" d="M300 0l298 300h-198v900h-200v-900h-198zM1100 1200v-500h-100v100h-200v400h300zM1001 1100h-100v-200h100v200zM900 400h-100v100h200v-500h-100v400z" />
|
||||
<glyph unicode="" d="M300 0l298 300h-198v900h-200v-900h-198zM900 1000h-200v200h200v-200zM1000 700h-300v200h300v-200zM1100 400h-400v200h400v-200zM1200 100h-500v200h500v-200z" />
|
||||
<glyph unicode="" d="M300 0l298 300h-198v900h-200v-900h-198zM1200 1000h-500v200h500v-200zM1100 700h-400v200h400v-200zM1000 400h-300v200h300v-200zM900 100h-200v200h200v-200z" />
|
||||
<glyph unicode="" d="M400 1100h300q162 0 281 -118.5t119 -281.5v-300q0 -165 -118.5 -282.5t-281.5 -117.5h-300q-165 0 -282.5 117.5t-117.5 282.5v300q0 165 117.5 282.5t282.5 117.5zM800 900h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5 t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5z" />
|
||||
<glyph unicode="" d="M700 0h-300q-163 0 -281.5 117.5t-118.5 282.5v300q0 163 119 281.5t281 118.5h300q165 0 282.5 -117.5t117.5 -282.5v-300q0 -165 -117.5 -282.5t-282.5 -117.5zM800 900h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5 t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5zM400 800v-500l333 250z" />
|
||||
<glyph unicode="" d="M0 400v300q0 163 117.5 281.5t282.5 118.5h300q163 0 281.5 -119t118.5 -281v-300q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-165 0 -282.5 117.5t-117.5 282.5zM900 300v500q0 41 -29.5 70.5t-70.5 29.5h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5 t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5zM800 700h-500l250 -333z" />
|
||||
<glyph unicode="" d="M1100 700v-300q0 -162 -118.5 -281t-281.5 -119h-300q-165 0 -282.5 118.5t-117.5 281.5v300q0 165 117.5 282.5t282.5 117.5h300q165 0 282.5 -117.5t117.5 -282.5zM900 300v500q0 41 -29.5 70.5t-70.5 29.5h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5 t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5zM550 733l-250 -333h500z" />
|
||||
<glyph unicode="" d="M500 1100h400q165 0 282.5 -117.5t117.5 -282.5v-300q0 -165 -117.5 -282.5t-282.5 -117.5h-400v200h500q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5h-500v200zM700 550l-400 -350v200h-300v300h300v200z" />
|
||||
<glyph unicode="" d="M403 2l9 -1q13 0 26 16l538 630q15 19 6 36q-8 18 -32 16h-300q1 4 78 219.5t79 227.5q2 17 -6 27l-8 8h-9q-16 0 -25 -15q-4 -5 -98.5 -111.5t-228 -257t-209.5 -238.5q-17 -19 -7 -40q10 -19 32 -19h302q-155 -438 -160 -458q-5 -21 4 -32z" />
|
||||
<glyph unicode="" d="M800 200h-500q-41 0 -70.5 29.5t-29.5 70.5v500q0 41 29.5 70.5t70.5 29.5h500v185q-14 4 -114 7.5t-193 5.5l-93 2q-165 0 -282.5 -117.5t-117.5 -282.5v-300q0 -165 117.5 -282.5t282.5 -117.5h300q47 0 100 15v185zM900 200v200h-300v300h300v200l400 -350z" />
|
||||
<glyph unicode="" d="M1200 700l-149 149l-342 -353l-213 213l353 342l-149 149h500v-500zM1022 571l-122 -123v-148q0 -41 -29.5 -70.5t-70.5 -29.5h-500q-41 0 -70.5 29.5t-29.5 70.5v500q0 41 29.5 70.5t70.5 29.5h156l118 122l-74 78h-100q-165 0 -282.5 -117.5t-117.5 -282.5v-300 q0 -165 117.5 -282.5t282.5 -117.5h300q163 0 281.5 117.5t118.5 282.5v98z" />
|
||||
<glyph unicode="" d="M600 1196q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM600 1014q-171 0 -292.5 -121.5t-121.5 -292.5t121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5zM600 794 q80 0 137 -57t57 -137t-57 -137t-137 -57t-137 57t-57 137t57 137t137 57z" />
|
||||
<glyph unicode="" d="M700 800v400h-300v-400h-300l445 -500l450 500h-295zM25 300h1048q11 0 19 -7.5t8 -17.5v-275h-1100v275q0 11 7 18t18 7zM1000 200h-100v-50h100v50z" />
|
||||
<glyph unicode="" d="M400 700v-300h300v300h295l-445 500l-450 -500h300zM25 300h1048q11 0 19 -7.5t8 -17.5v-275h-1100v275q0 11 7 18t18 7zM1000 200h-100v-50h100v50z" />
|
||||
<glyph unicode="" d="M405 400l596 596l-154 155l-442 -442l-150 151l-155 -155zM25 300h1048q11 0 19 -7.5t8 -17.5v-275h-1100v275q0 11 7 18t18 7zM1000 200h-100v-50h100v50z" />
|
||||
<glyph unicode="" d="M409 1103l-97 97l-212 -212l97 -98zM650 861l-149 149l-212 -212l149 -149l-238 -248h700v699zM25 300h1048q11 0 19 -7.5t8 -17.5v-275h-1100v275q0 11 7 18t18 7zM1000 200h-100v-50h100v50z" />
|
||||
<glyph unicode="" d="M539 950l-149 -149l212 -212l149 148l248 -237v700h-699zM297 709l-97 -97l212 -212l98 97zM25 300h1048q11 0 19 -7.5t8 -17.5v-275h-1100v275q0 11 7 18t18 7zM1000 200h-100v-50h100v50z" />
|
||||
<glyph unicode="" d="M1200 1199v-1079l-475 272l-310 -393v416h-392zM1166 1148l-672 -712v-226z" />
|
||||
<glyph unicode="" d="M1100 1000v-850q0 -21 -15 -35.5t-35 -14.5h-150v400h-700v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100zM700 1200h-100v-200h100v200z" />
|
||||
<glyph unicode="" d="M578 500h-378v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-218l-276 -275l-120 120zM700 1200h-100v-200h100v200zM1300 538l-475 -476l-244 244l123 123l120 -120l353 352z" />
|
||||
<glyph unicode="" d="M529 500h-329v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-269l-103 -103l-170 170zM700 1200h-100v-200h100v200zM1167 6l-170 170l-170 -170l-127 127l170 170l-170 170l127 127l170 -170l170 170l127 -128 l-170 -169l170 -170z" />
|
||||
<glyph unicode="" d="M700 500h-500v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-300h-400v-200zM700 1000h-100v200h100v-200zM1000 600h-200v-300h-200l300 -300l300 300h-200v300z" />
|
||||
<glyph unicode="" d="M602 500h-402v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-402l-200 200zM700 1000h-100v200h100v-200zM1000 300h200l-300 300l-300 -300h200v-300h200v300z" />
|
||||
<glyph unicode="" d="M1200 900v150q0 21 -14.5 35.5t-35.5 14.5h-1100q-21 0 -35.5 -14.5t-14.5 -35.5v-150h1200zM0 800v-550q0 -21 14.5 -35.5t35.5 -14.5h1100q21 0 35.5 14.5t14.5 35.5v550h-1200zM100 500h400v-200h-400v200z" />
|
||||
<glyph unicode="" d="M500 1000h400v198l300 -298l-300 -298v198h-400v200zM100 800v200h100v-200h-100zM400 800h-100v200h100v-200zM700 300h-400v-198l-300 298l300 298v-198h400v-200zM800 500h100v-200h-100v200zM1000 500v-200h100v200h-100z" />
|
||||
<glyph unicode="" d="M1200 50v1106q0 31 -18 40.5t-44 -7.5l-276 -117q-25 -16 -43.5 -50.5t-18.5 -65.5v-359q0 -29 10.5 -55.5t25 -43t29 -28.5t25.5 -18l10 -5v-397q0 -21 14.5 -35.5t35.5 -14.5h200q21 0 35.5 14.5t14.5 35.5zM550 1200l50 -100v-400l-100 -203v-447q0 -21 -14.5 -35.5 t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v447l-100 203v400l50 100l50 -100v-300h100v300l50 100l50 -100v-300h100v300z" />
|
||||
<glyph unicode="" d="M1100 106v888q0 22 25 34.5t50 13.5l25 2v56h-400v-56q75 0 87.5 -6t12.5 -44v-394h-500v394q0 38 12.5 44t87.5 6v56h-400v-56q4 0 11 -0.5t24 -3t30 -7t24 -15t11 -24.5v-888q0 -22 -25 -34.5t-50 -13.5l-25 -2v-56h400v56q-75 0 -87.5 6t-12.5 44v394h500v-394 q0 -38 -12.5 -44t-87.5 -6v-56h400v56q-4 0 -11 0.5t-24 3t-30 7t-24 15t-11 24.5z" />
|
||||
<glyph unicode="" d="M675 1000l-100 100h-375l-100 -100h400l200 -200v-98l295 98h105v200h-425zM500 300v500q0 41 -29.5 70.5t-70.5 29.5h-300q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h300q41 0 70.5 29.5t29.5 70.5zM100 800h300v-200h-300v200zM700 565l400 133 v-163l-400 -133v163zM100 500h300v-200h-300v200zM805 300l295 98v-298h-425l-100 -100h-375l-100 100h400l200 200h105z" />
|
||||
<glyph unicode="" d="M179 1169l-162 -162q-1 -11 -0.5 -32.5t16 -90t46.5 -140t104 -177.5t175 -208q103 -103 207.5 -176t180 -103.5t137 -47t92.5 -16.5l31 1l163 162q16 17 13 40.5t-22 37.5l-192 136q-19 14 -45 12t-42 -19l-119 -118q-143 103 -267 227q-126 126 -227 268l118 118 q17 17 20 41.5t-11 44.5l-139 194q-14 19 -36.5 22t-40.5 -14z" />
|
||||
<glyph unicode="" d="M1200 712v200q-6 8 -19 20.5t-63 45t-112 57t-171 45t-235 20.5q-92 0 -175 -10.5t-141.5 -27t-108.5 -36.5t-81.5 -40t-53.5 -36.5t-31 -27.5l-9 -10v-200q0 -21 14.5 -33.5t34.5 -8.5l202 33q20 4 34.5 21t14.5 38v146q141 24 300 24t300 -24v-146q0 -21 14.5 -38 t34.5 -21l202 -33q20 -4 34.5 8.5t14.5 33.5zM800 650l365 -303q14 -14 24.5 -39.5t10.5 -45.5v-212q0 -21 -15 -35.5t-35 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5v212q0 20 10.5 45.5t24.5 39.5l365 303v50q0 4 1 10.5t12 22.5t30 28.5t60 23t97 10.5t97 -10t60 -23.5 t30 -27.5t12 -24l1 -10v-50z" />
|
||||
<glyph unicode="" d="M175 200h950l-125 150v250l100 100v400h-100v-200h-100v200h-200v-200h-100v200h-200v-200h-100v200h-100v-400l100 -100v-250zM1200 100v-100h-1100v100h1100z" />
|
||||
<glyph unicode="" d="M600 1100h100q41 0 70.5 -29.5t29.5 -70.5v-1000h-300v1000q0 41 29.5 70.5t70.5 29.5zM1000 800h100q41 0 70.5 -29.5t29.5 -70.5v-700h-300v700q0 41 29.5 70.5t70.5 29.5zM400 0v400q0 41 -29.5 70.5t-70.5 29.5h-100q-41 0 -70.5 -29.5t-29.5 -70.5v-400h300z" />
|
||||
<glyph unicode="" d="M1200 800v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212zM1000 900h-900v-700h900v700zM200 800v-300h200v-100h-200v-100h300v300h-200v100h200v100h-300zM800 800h-200v-500h200v100h100v300h-100 v100zM800 700v-300h-100v300h100z" />
|
||||
<glyph unicode="" d="M1200 800v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212zM1000 900h-900v-700h900v700zM400 600h-100v200h-100v-500h100v200h100v-200h100v500h-100v-200zM800 800h-200v-500h200v100h100v300h-100 v100zM800 700v-300h-100v300h100z" />
|
||||
<glyph unicode="" d="M1200 800v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212zM1000 900h-900v-700h900v700zM200 800v-500h300v100h-200v300h200v100h-300zM600 800v-500h300v100h-200v300h200v100h-300z" />
|
||||
<glyph unicode="" d="M1200 800v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212zM1000 900h-900v-700h900v700zM500 700l-300 -150l300 -150v300zM600 400l300 150l-300 150v-300z" />
|
||||
<glyph unicode="" d="M1200 800v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212zM1000 900h-900v-700h900v700zM900 800v-500h-700v500h700zM300 400h130q41 0 68 42t27 107t-28.5 108t-66.5 43h-130v-300zM800 700h-130 q-38 0 -66.5 -43t-28.5 -108t27 -107t68 -42h130v300z" />
|
||||
<glyph unicode="" d="M1200 800v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212zM1000 900h-900v-700h900v700zM200 800v-300h200v-100h-200v-100h300v300h-200v100h200v100h-300zM800 300h100v500h-200v-100h100v-400z M601 300h100v100h-100v-100z" />
|
||||
<glyph unicode="" d="M1200 800v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212zM1000 900h-900v-700h900v700zM300 700v100h-100v-500h300v400h-200zM800 300h100v500h-200v-100h100v-400zM401 400h-100v200h100v-200z M601 300h100v100h-100v-100z" />
|
||||
<glyph unicode="" d="M200 1100h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88zM1000 900h-900v-700h900v700zM400 700h-200v100h300v-300h-99v-100h-100v100h99v200zM800 700h-100v100h200v-500h-100v400zM201 400h100v-100 h-100v100zM701 300h-100v100h100v-100z" />
|
||||
<glyph unicode="" d="M600 1196q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM600 1014q-171 0 -292.5 -121.5t-121.5 -292.5t121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5zM800 700h-300 v-200h300v-100h-300l-100 100v200l100 100h300v-100z" />
|
||||
<glyph unicode="" d="M596 1196q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM596 1014q-171 0 -292.5 -121.5t-121.5 -292.5t121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5zM800 700v-100 h-100v100h-200v-100h200v-100h-200v-100h-100v400h300zM800 400h-100v100h100v-100z" />
|
||||
<glyph unicode="" d="M800 300h128q120 0 205 86t85 208q0 120 -85 206.5t-205 86.5q-46 0 -90 -14q-44 97 -134.5 156.5t-200.5 59.5q-152 0 -260 -107.5t-108 -260.5q0 -25 2 -37q-66 -14 -108.5 -67.5t-42.5 -122.5q0 -80 56.5 -137t135.5 -57h222v300h400v-300zM700 200h200l-300 -300 l-300 300h200v300h200v-300z" />
|
||||
<glyph unicode="" d="M600 714l403 -403q94 26 154.5 104t60.5 178q0 121 -85 207.5t-205 86.5q-46 0 -90 -14q-44 97 -134.5 156.5t-200.5 59.5q-152 0 -260 -107.5t-108 -260.5q0 -25 2 -37q-66 -14 -108.5 -67.5t-42.5 -122.5q0 -80 56.5 -137t135.5 -57h8zM700 -100h-200v300h-200l300 300 l300 -300h-200v-300z" />
|
||||
<glyph unicode="" d="M700 200h400l-270 300h170l-270 300h170l-300 333l-300 -333h170l-270 -300h170l-270 -300h400v-155l-75 -45h350l-75 45v155z" />
|
||||
<glyph unicode="" d="M700 45v306q46 -30 100 -30q74 0 126.5 52.5t52.5 126.5q0 24 -9 55q50 32 79.5 83t29.5 112q0 90 -61.5 155.5t-150.5 71.5q-26 89 -99.5 145.5t-167.5 56.5q-116 0 -197.5 -81.5t-81.5 -197.5q0 -4 1 -12t1 -11q-14 2 -23 2q-74 0 -126.5 -52.5t-52.5 -126.5 q0 -53 28.5 -97t75.5 -65q-4 -16 -4 -38q0 -74 52.5 -126.5t126.5 -52.5q56 0 100 30v-306l-75 -45h350z" />
|
||||
<glyph unicode="💼" d="M800 1000h300q41 0 70.5 -29.5t29.5 -70.5v-400h-500v100h-200v-100h-500v400q0 41 29.5 70.5t70.5 29.5h300v100q0 41 29.5 70.5t70.5 29.5h200q41 0 70.5 -29.5t29.5 -70.5v-100zM500 1000h200v100h-200v-100zM1200 400v-200q0 -41 -29.5 -70.5t-70.5 -29.5h-1000 q-41 0 -70.5 29.5t-29.5 70.5v200h1200z" />
|
||||
<glyph unicode="📅" d="M1100 900v150q0 21 -14.5 35.5t-35.5 14.5h-150v100h-100v-100h-500v100h-100v-100h-150q-21 0 -35.5 -14.5t-14.5 -35.5v-150h1100zM0 800v-750q0 -20 14.5 -35t35.5 -15h1000q21 0 35.5 15t14.5 35v750h-1100zM100 600h100v-100h-100v100zM300 600h100v-100h-100v100z M500 600h100v-100h-100v100zM700 600h100v-100h-100v100zM900 600h100v-100h-100v100zM100 400h100v-100h-100v100zM300 400h100v-100h-100v100zM500 400h100v-100h-100v100zM700 400h100v-100h-100v100zM900 400h100v-100h-100v100zM100 200h100v-100h-100v100zM300 200 h100v-100h-100v100zM500 200h100v-100h-100v100zM700 200h100v-100h-100v100zM900 200h100v-100h-100v100z" />
|
||||
<glyph unicode="📌" d="M902 1185l283 -282q15 -15 15 -36t-15 -35q-14 -15 -35 -15t-35 15l-36 35l-279 -267v-300l-212 210l-208 -207l-380 -303l303 380l207 208l-210 212h300l267 279l-35 36q-15 14 -15 35t15 35q14 15 35 15t35 -15z" />
|
||||
<glyph unicode="📎" d="M518 119l69 -60l517 511q67 67 95 157t11 183q-16 87 -67 154t-130 103q-69 33 -152 33q-107 0 -197 -55q-40 -24 -111 -95l-512 -512q-68 -68 -81 -163t35 -173q35 -57 94 -89t129 -32q63 0 119 28q33 16 65 40.5t52.5 45.5t59.5 64q40 44 57 61l394 394q35 35 47 84 t-3 96q-27 87 -117 104q-20 2 -29 2q-46 0 -79.5 -17t-67.5 -51l-388 -396l-7 -7l69 -67l377 373q20 22 39 38q23 23 50 23q38 0 53 -36q16 -39 -20 -75l-547 -547q-52 -52 -125 -52q-55 0 -100 33t-54 96q-5 35 2.5 66t31.5 63t42 50t56 54q24 21 44 41l348 348 q52 52 82.5 79.5t84 54t107.5 26.5q25 0 48 -4q95 -17 154 -94.5t51 -175.5q-7 -101 -98 -192l-252 -249l-253 -256z" />
|
||||
<glyph unicode="📷" d="M1200 200v600q0 41 -29.5 70.5t-70.5 29.5h-150q-4 8 -11.5 21.5t-33 48t-53 61t-69 48t-83.5 21.5h-200q-41 0 -82 -20.5t-70 -50t-52 -59t-34 -50.5l-12 -20h-150q-41 0 -70.5 -29.5t-29.5 -70.5v-600q0 -41 29.5 -70.5t70.5 -29.5h1000q41 0 70.5 29.5t29.5 70.5z M1000 700h-100v100h100v-100zM844 500q0 -100 -72 -172t-172 -72t-172 72t-72 172t72 172t172 72t172 -72t72 -172zM706 500q0 44 -31 75t-75 31t-75 -31t-31 -75t31 -75t75 -31t75 31t31 75z" />
|
||||
<glyph unicode="🔒" d="M900 800h100q41 0 70.5 -29.5t29.5 -70.5v-600q0 -41 -29.5 -70.5t-70.5 -29.5h-900q-41 0 -70.5 29.5t-29.5 70.5v600q0 41 29.5 70.5t70.5 29.5h100v200q0 82 59 141t141 59h300q82 0 141 -59t59 -141v-200zM400 800h300v150q0 21 -14.5 35.5t-35.5 14.5h-200 q-21 0 -35.5 -14.5t-14.5 -35.5v-150z" />
|
||||
<glyph unicode="🔔" d="M1062 400h17q20 0 33.5 -14.5t13.5 -35.5q0 -20 -13 -40t-31 -27q-22 -9 -63 -23t-167.5 -37t-251.5 -23t-245.5 20.5t-178.5 41.5l-58 20q-18 7 -31 27.5t-13 40.5q0 21 13.5 35.5t33.5 14.5h17l118 173l63 327q15 77 76 140t144 83l-18 32q-6 19 3 32t29 13h94 q20 0 29 -10.5t3 -29.5l-18 -37q83 -19 144 -82.5t76 -140.5l63 -327zM600 104q-54 0 -103 6q12 -49 40 -79.5t63 -30.5t63 30.5t39 79.5q-48 -6 -102 -6z" />
|
||||
<glyph unicode="🔖" d="M200 0l450 444l450 -443v1150q0 20 -14.5 35t-35.5 15h-800q-21 0 -35.5 -15t-14.5 -35v-1151z" />
|
||||
<glyph unicode="🔥" d="M400 755q2 -12 8 -41.5t8 -43t6 -39.5t3.5 -39.5t-1 -33.5t-6 -31.5t-13.5 -24t-21 -20.5t-31 -12q-38 -10 -67 13t-40.5 61.5t-15 81.5t10.5 75q-52 -46 -83.5 -101t-39 -107t-7.5 -85t5 -63q9 -56 44 -119.5t105 -108.5q31 -21 64 -16t62 23.5t57 49.5t48 61.5t35 60.5 q32 66 39 184.5t-13 157.5q79 -80 122 -164t26 -184q-5 -33 -20.5 -69.5t-37.5 -80.5q-10 -19 -14.5 -29t-12 -26t-9 -23.5t-3 -19t2.5 -15.5t11 -9.5t19.5 -5t30.5 2.5t42 8q57 20 91 34t87.5 44.5t87 64t65.5 88.5t47 122q38 172 -44.5 341.5t-246.5 278.5q22 -44 43 -129 q39 -159 -32 -154q-15 2 -33 9q-79 33 -120.5 100t-44 175.5t48.5 257.5q-13 -8 -34 -23.5t-72.5 -66.5t-88.5 -105.5t-60 -138t-8 -166.5z" />
|
||||
<glyph unicode="🔧" d="M948 778l251 126q13 -175 -151 -267q-123 -70 -253 -23l-596 -596q-15 -16 -36.5 -16t-36.5 16l-111 110q-15 15 -15 36.5t15 37.5l600 599q-33 101 6 201.5t135 154.5q164 92 306 -9l-259 -138z" />
|
||||
</font>
|
||||
</defs></svg>
|
||||
|
Before Width: | Height: | Size: 62 KiB |
Binary file not shown.
Binary file not shown.
BIN
rd_ui/app/images/favicon-16x16.png
Executable file
BIN
rd_ui/app/images/favicon-16x16.png
Executable file
Binary file not shown.
|
After Width: | Height: | Size: 1.3 KiB |
BIN
rd_ui/app/images/favicon-32x32.png
Executable file
BIN
rd_ui/app/images/favicon-32x32.png
Executable file
Binary file not shown.
|
After Width: | Height: | Size: 2.0 KiB |
BIN
rd_ui/app/images/favicon-96x96.png
Executable file
BIN
rd_ui/app/images/favicon-96x96.png
Executable file
Binary file not shown.
|
After Width: | Height: | Size: 3.8 KiB |
BIN
rd_ui/app/images/redash_icon_small.png
Normal file
BIN
rd_ui/app/images/redash_icon_small.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 6.0 KiB |
@@ -15,9 +15,17 @@
|
||||
<link rel="stylesheet" href="/bower_components/pivottable/dist/pivot.css">
|
||||
<link rel="stylesheet" href="/bower_components/cornelius/src/cornelius.css">
|
||||
<link rel="stylesheet" href="/bower_components/select2/select2.css">
|
||||
<link rel="stylesheet" href="/bower_components/angular-ui-select/dist/select.css">
|
||||
<link rel="stylesheet" href="/bower_components/pace/themes/pace-theme-minimal.css">
|
||||
<link rel="stylesheet" href="/bower_components/font-awesome/css/font-awesome.css">
|
||||
<link rel="stylesheet" href="/bower_components/codemirror/addon/hint/show-hint.css">
|
||||
<link rel="stylesheet" href="/styles/redash.css">
|
||||
<!-- endbuild -->
|
||||
|
||||
<link rel="icon" type="image/png" sizes="32x32" href="/images/favicon-32x32.png">
|
||||
<link rel="icon" type="image/png" sizes="96x96" href="/images/favicon-96x96.png">
|
||||
<link rel="icon" type="image/png" sizes="16x16" href="/images/favicon-16x16.png">
|
||||
|
||||
</head>
|
||||
<body>
|
||||
<div growl></div>
|
||||
@@ -31,15 +39,15 @@
|
||||
<span class="icon-bar"></span>
|
||||
<span class="icon-bar"></span>
|
||||
</button>
|
||||
<a class="navbar-brand" href="/"><strong>{{name}}</strong></a>
|
||||
<a class="navbar-brand" href="/"><img src="/images/redash_icon_small.png"/></a>
|
||||
</div>
|
||||
{% raw %}
|
||||
<div class="collapse navbar-collapse navbar-ex1-collapse">
|
||||
<ul class="nav navbar-nav">
|
||||
<li class="active" ng-show="pageTitle"><a class="page-title" ng-bind="pageTitle"></a></li>
|
||||
<li class="dropdown" ng-show="groupedDashboards.length > 0 || otherDashboards.length > 0 || currentUser.hasPermission('create_dashboard')">
|
||||
<a href="#" class="dropdown-toggle" data-toggle="dropdown"><span class="glyphicon glyphicon-th-large"></span> <b class="caret"></b></a>
|
||||
<ul class="dropdown-menu">
|
||||
<li class="dropdown" ng-show="groupedDashboards.length > 0 || otherDashboards.length > 0 || currentUser.hasPermission('create_dashboard')" dropdown>
|
||||
<a href="#" class="dropdown-toggle" dropdown-toggle><span class="glyphicon glyphicon-th-large"></span> <b class="caret"></b></a>
|
||||
<ul class="dropdown-menu" dropdown-menu>
|
||||
<span ng-repeat="(name, group) in groupedDashboards">
|
||||
<li class="dropdown-submenu">
|
||||
<a href="#" ng-bind="name"></a>
|
||||
@@ -57,9 +65,9 @@
|
||||
<li><a data-toggle="modal" href="#new_dashboard_dialog" ng-show="currentUser.hasPermission('create_dashboard')">New Dashboard</a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="dropdown" ng-show="currentUser.hasPermission('view_query')">
|
||||
<a href="#" class="dropdown-toggle" data-toggle="dropdown">Queries <b class="caret"></b></a>
|
||||
<ul class="dropdown-menu">
|
||||
<li class="dropdown" ng-show="currentUser.hasPermission('view_query')" dropdown>
|
||||
<a href="#" class="dropdown-toggle" dropdown-toggle>Queries <b class="caret"></b></a>
|
||||
<ul class="dropdown-menu" dropdown-menu>
|
||||
<li ng-show="currentUser.hasPermission('create_query')"><a href="/queries/new">New Query</a></li>
|
||||
<li><a href="/queries">Queries</a></li>
|
||||
</ul>
|
||||
@@ -103,9 +111,11 @@
|
||||
<script src="/bower_components/codemirror/lib/codemirror.js"></script>
|
||||
<script src="/bower_components/codemirror/addon/edit/matchbrackets.js"></script>
|
||||
<script src="/bower_components/codemirror/addon/edit/closebrackets.js"></script>
|
||||
<script src="/bower_components/codemirror/addon/hint/show-hint.js"></script>
|
||||
<script src="/bower_components/codemirror/addon/hint/anyword-hint.js"></script>
|
||||
<script src="/bower_components/codemirror/mode/sql/sql.js"></script>
|
||||
<script src="/bower_components/codemirror/mode/python/python.js"></script>
|
||||
<script src="/bower_components/codemirror/mode/javascript/javascript.js"></script>
|
||||
<script src="/bower_components/angular-ui-codemirror/ui-codemirror.js"></script>
|
||||
<script src="/bower_components/highcharts/highcharts.js"></script>
|
||||
<script src="/bower_components/highcharts/modules/exporting.js"></script>
|
||||
<script src="/bower_components/gridster/dist/jquery.gridster.js"></script>
|
||||
@@ -116,13 +126,18 @@
|
||||
<script src="/bower_components/mousetrap/plugins/global-bind/mousetrap-global-bind.js"></script>
|
||||
<script src="/bower_components/select2/select2.js"></script>
|
||||
<script src="/bower_components/angular-ui-select2/src/select2.js"></script>
|
||||
<script src="/bower_components/angular-ui-select/dist/select.js"></script>
|
||||
<script src="/bower_components/underscore.string/lib/underscore.string.js"></script>
|
||||
<script src="/bower_components/marked/lib/marked.js"></script>
|
||||
<script src="/scripts/ng_highchart.js"></script>
|
||||
<script src="/scripts/ng_smart_table.js"></script>
|
||||
<script src="/scripts/ui-bootstrap-tpls-0.5.0.min.js"></script>
|
||||
<script src="/bower_components/angular-ui-bootstrap-bower/ui-bootstrap-tpls.js"></script>
|
||||
<script src="/bower_components/bucky/bucky.js"></script>
|
||||
<script src="/bower_components/pace/pace.js"></script>
|
||||
<script src="/bower_components/mustache/mustache.js"></script>
|
||||
<script src="/bower_components/canvg/rgbcolor.js"></script>
|
||||
<script src="/bower_components/canvg/StackBlur.js"></script>
|
||||
<script src="/bower_components/canvg/canvg.js"></script>
|
||||
<!-- endbuild -->
|
||||
|
||||
<!-- build:js({.tmp,app}) /scripts/scripts.js -->
|
||||
@@ -139,6 +154,7 @@
|
||||
<script src="/scripts/visualizations/base.js"></script>
|
||||
<script src="/scripts/visualizations/chart.js"></script>
|
||||
<script src="/scripts/visualizations/cohort.js"></script>
|
||||
<script src="/scripts/visualizations/counter.js"></script>
|
||||
<script src="/scripts/visualizations/table.js"></script>
|
||||
<script src="/scripts/visualizations/pivot.js"></script>
|
||||
<script src="/scripts/directives/directives.js"></script>
|
||||
|
||||
@@ -6,7 +6,6 @@ angular.module('redash', [
|
||||
'redash.services',
|
||||
'redash.renderers',
|
||||
'redash.visualization',
|
||||
'ui.codemirror',
|
||||
'highchart',
|
||||
'ui.select2',
|
||||
'angular-growl',
|
||||
@@ -14,7 +13,8 @@ angular.module('redash', [
|
||||
'ui.bootstrap',
|
||||
'smartTable.table',
|
||||
'ngResource',
|
||||
'ngRoute'
|
||||
'ngRoute',
|
||||
'ui.select'
|
||||
]).config(['$routeProvider', '$locationProvider', '$compileProvider', 'growlProvider',
|
||||
function ($routeProvider, $locationProvider, $compileProvider, growlProvider) {
|
||||
if (featureFlags.clientSideMetrics) {
|
||||
@@ -89,6 +89,10 @@ angular.module('redash', [
|
||||
templateUrl: '/views/index.html',
|
||||
controller: 'IndexCtrl'
|
||||
});
|
||||
$routeProvider.when('/personal', {
|
||||
templateUrl: '/views/personal.html',
|
||||
controller: 'PersonalIndexCtrl'
|
||||
});
|
||||
$routeProvider.otherwise({
|
||||
redirectTo: '/'
|
||||
});
|
||||
|
||||
@@ -16,16 +16,9 @@
|
||||
$timeout(refresh, 59 * 1000);
|
||||
};
|
||||
|
||||
$scope.flowerUrl = featureFlags.flowerUrl;
|
||||
|
||||
refresh();
|
||||
}
|
||||
|
||||
var AdminWorkersCtrl = function ($scope, $sce) {
|
||||
$scope.flowerUrl = $sce.trustAsResourceUrl(featureFlags.flowerUrl);
|
||||
};
|
||||
|
||||
angular.module('redash.admin_controllers', [])
|
||||
.controller('AdminStatusCtrl', ['$scope', 'Events', '$http', '$timeout', AdminStatusCtrl])
|
||||
.controller('AdminWorkersCtrl', ['$scope', '$sce', AdminWorkersCtrl])
|
||||
})();
|
||||
|
||||
@@ -1,4 +1,11 @@
|
||||
(function () {
|
||||
var dateFormatter = function (value) {
|
||||
if (!value) {
|
||||
return "-";
|
||||
}
|
||||
return value.toDate().toLocaleString();
|
||||
};
|
||||
|
||||
var QuerySearchCtrl = function($scope, $location, $filter, Events, Query) {
|
||||
$scope.$parent.pageTitle = "Queries Search";
|
||||
|
||||
@@ -8,11 +15,6 @@
|
||||
maxSize: 8,
|
||||
};
|
||||
|
||||
var dateFormatter = function (value) {
|
||||
if (!value) return "-";
|
||||
return value.format("DD/MM/YY HH:mm");
|
||||
}
|
||||
|
||||
$scope.gridColumns = [
|
||||
{
|
||||
"label": "Name",
|
||||
@@ -21,7 +23,7 @@
|
||||
},
|
||||
{
|
||||
'label': 'Created By',
|
||||
'map': 'user.name'
|
||||
'map': 'user_name'
|
||||
},
|
||||
{
|
||||
'label': 'Created At',
|
||||
@@ -30,9 +32,9 @@
|
||||
},
|
||||
{
|
||||
'label': 'Update Schedule',
|
||||
'map': 'ttl',
|
||||
'map': 'schedule',
|
||||
'formatFunction': function (value) {
|
||||
return $filter('refreshRateHumanize')(value);
|
||||
return $filter('scheduleHumanize')(value);
|
||||
}
|
||||
}
|
||||
];
|
||||
@@ -43,6 +45,7 @@
|
||||
Query.search({q: $scope.term }, function(results) {
|
||||
$scope.queries = _.map(results, function(query) {
|
||||
query.created_at = moment(query.created_at);
|
||||
query.user_name = query.user.name;
|
||||
return query;
|
||||
});
|
||||
});
|
||||
@@ -70,11 +73,6 @@
|
||||
$scope.allQueries = [];
|
||||
$scope.queries = [];
|
||||
|
||||
var dateFormatter = function (value) {
|
||||
if (!value) return "-";
|
||||
return value.format("DD/MM/YY HH:mm");
|
||||
}
|
||||
|
||||
var filterQueries = function () {
|
||||
$scope.queries = _.filter($scope.allQueries, function (query) {
|
||||
if (!$scope.selectedTab) {
|
||||
@@ -95,6 +93,7 @@
|
||||
$scope.allQueries = _.map(queries, function (query) {
|
||||
query.created_at = moment(query.created_at);
|
||||
query.retrieved_at = moment(query.retrieved_at);
|
||||
query.user_name = query.user.name;
|
||||
return query;
|
||||
});
|
||||
|
||||
@@ -109,7 +108,7 @@
|
||||
},
|
||||
{
|
||||
'label': 'Created By',
|
||||
'map': 'user.name'
|
||||
'map': 'user_name'
|
||||
},
|
||||
{
|
||||
'label': 'Created At',
|
||||
@@ -130,9 +129,9 @@
|
||||
},
|
||||
{
|
||||
'label': 'Update Schedule',
|
||||
'map': 'ttl',
|
||||
'map': 'schedule',
|
||||
'formatFunction': function (value) {
|
||||
return $filter('refreshRateHumanize')(value);
|
||||
return $filter('scheduleHumanize')(value);
|
||||
}
|
||||
}
|
||||
]
|
||||
@@ -192,7 +191,7 @@
|
||||
$(window).click(function () {
|
||||
notifications.getPermissions();
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
var IndexCtrl = function ($scope, Events, Dashboard) {
|
||||
Events.record(currentUser, "view", "page", "homepage");
|
||||
@@ -206,11 +205,29 @@
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
var PersonalIndexCtrl = function ($scope, Events, Dashboard, Query) {
|
||||
Events.record(currentUser, "view", "page", "personal_homepage");
|
||||
$scope.$parent.pageTitle = "Home";
|
||||
|
||||
$scope.recentQueries = Query.recent();
|
||||
$scope.recentDashboards = Dashboard.recent();
|
||||
|
||||
$scope.archiveDashboard = function (dashboard) {
|
||||
if (confirm('Are you sure you want to delete "' + dashboard.name + '" dashboard?')) {
|
||||
Events.record(currentUser, "archive", "dashboard", dashboard.id);
|
||||
dashboard.$delete(function () {
|
||||
$scope.$parent.reloadDashboards();
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
angular.module('redash.controllers', [])
|
||||
.controller('QueriesCtrl', ['$scope', '$http', '$location', '$filter', 'Query', QueriesCtrl])
|
||||
.controller('IndexCtrl', ['$scope', 'Events', 'Dashboard', IndexCtrl])
|
||||
.controller('PersonalIndexCtrl', ['$scope', 'Events', 'Dashboard', 'Query', PersonalIndexCtrl])
|
||||
.controller('MainCtrl', ['$scope', '$location', 'Dashboard', 'notifications', MainCtrl])
|
||||
.controller('QuerySearchCtrl', ['$scope', '$location', '$filter', 'Events', 'Query', QuerySearchCtrl]);
|
||||
})();
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
(function() {
|
||||
var DashboardCtrl = function($scope, Events, Widget, $routeParams, $http, $timeout, $q, Dashboard) {
|
||||
var DashboardCtrl = function($scope, Events, Widget, $routeParams, $location, $http, $timeout, $q, Dashboard) {
|
||||
$scope.refreshEnabled = false;
|
||||
$scope.refreshRate = 60;
|
||||
|
||||
@@ -15,8 +15,8 @@
|
||||
return _.map(row, function (widget) {
|
||||
var w = new Widget(widget);
|
||||
|
||||
if (w.visualization && dashboard.dashboard_filters_enabled) {
|
||||
promises.push(w.getQuery().getQueryResultPromise());
|
||||
if (w.visualization) {
|
||||
promises.push(w.getQuery().getQueryResult().toPromise());
|
||||
}
|
||||
|
||||
return w;
|
||||
@@ -27,27 +27,35 @@
|
||||
var filters = {};
|
||||
_.each(queryResults, function(queryResult) {
|
||||
var queryFilters = queryResult.getFilters();
|
||||
_.each(queryFilters, function (filter) {
|
||||
if (!_.has(filters, filter.name)) {
|
||||
// TODO: first object should be a copy, otherwise one of the chart filters behaves different than the others.
|
||||
_.each(queryFilters, function (queryFilter) {
|
||||
var hasQueryStringValue = _.has($location.search(), queryFilter.name);
|
||||
|
||||
if (!(hasQueryStringValue || dashboard.dashboard_filters_enabled)) {
|
||||
// If dashboard filters not enabled, or no query string value given, skip filters linking.
|
||||
return;
|
||||
}
|
||||
|
||||
if (!_.has(filters, queryFilter.name)) {
|
||||
var filter = _.extend({}, queryFilter);
|
||||
filters[filter.name] = filter;
|
||||
filters[filter.name].originFilters = [];
|
||||
if (hasQueryStringValue) {
|
||||
filter.current = $location.search()[filter.name];
|
||||
}
|
||||
|
||||
$scope.$watch(function () { return filter.current }, function (value) {
|
||||
_.each(filter.originFilters, function (originFilter) {
|
||||
originFilter.current = value;
|
||||
});
|
||||
});
|
||||
};
|
||||
}
|
||||
|
||||
// TODO: merge values.
|
||||
filters[filter.name].originFilters.push(filter);
|
||||
filters[queryFilter.name].originFilters.push(queryFilter);
|
||||
});
|
||||
});
|
||||
|
||||
if (dashboard.dashboard_filters_enabled) {
|
||||
$scope.filters = _.values(filters);
|
||||
}
|
||||
$scope.filters = _.values(filters);
|
||||
});
|
||||
|
||||
|
||||
@@ -74,7 +82,7 @@
|
||||
_.each(row, function(widget, i) {
|
||||
var newWidget = newWidgets[widget.id];
|
||||
if (newWidget && newWidget[0].visualization.query.latest_query_data_id != widget.visualization.query.latest_query_data_id) {
|
||||
row[i] = newWidget[0];
|
||||
row[i] = new Widget(newWidget[0]);
|
||||
}
|
||||
});
|
||||
});
|
||||
@@ -83,8 +91,8 @@
|
||||
});
|
||||
|
||||
}, $scope.refreshRate);
|
||||
};
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
$scope.triggerRefresh = function() {
|
||||
$scope.refreshEnabled = !$scope.refreshEnabled;
|
||||
@@ -92,9 +100,13 @@
|
||||
Events.record(currentUser, "autorefresh", "dashboard", dashboard.id, {'enable': $scope.refreshEnabled});
|
||||
|
||||
if ($scope.refreshEnabled) {
|
||||
var refreshRate = _.min(_.flatten($scope.dashboard.widgets), function(widget) {
|
||||
return widget.visualization.query.ttl;
|
||||
}).visualization.query.ttl;
|
||||
var refreshRate = _.min(_.map(_.flatten($scope.dashboard.widgets), function(widget) {
|
||||
var schedule = widget.visualization.query.schedule;
|
||||
if (schedule === null || schedule.match(/\d\d:\d\d/) !== null) {
|
||||
return 60;
|
||||
}
|
||||
return widget.visualization.query.schedule;
|
||||
}));
|
||||
|
||||
$scope.refreshRate = _.max([120, refreshRate * 2]) * 1000;
|
||||
|
||||
@@ -103,7 +115,7 @@
|
||||
};
|
||||
};
|
||||
|
||||
var WidgetCtrl = function($scope, Events, Query) {
|
||||
var WidgetCtrl = function($scope, $location, Events, Query) {
|
||||
$scope.deleteWidget = function() {
|
||||
if (!confirm('Are you sure you want to remove "' + $scope.widget.getName() + '" from the dashboard?')) {
|
||||
return;
|
||||
@@ -127,8 +139,9 @@
|
||||
Events.record(currentUser, "view", "visualization", $scope.widget.visualization.id);
|
||||
|
||||
$scope.query = $scope.widget.getQuery();
|
||||
$scope.queryResult = $scope.query.getQueryResult();
|
||||
$scope.nextUpdateTime = moment(new Date(($scope.query.updated_at + $scope.query.ttl + $scope.query.runtime + 300) * 1000)).fromNow();
|
||||
var parameters = Query.collectParamsFromQueryString($location, $scope.query);
|
||||
var maxAge = $location.search()['maxAge'];
|
||||
$scope.queryResult = $scope.query.getQueryResult(maxAge, parameters);
|
||||
|
||||
$scope.type = 'visualization';
|
||||
} else {
|
||||
@@ -137,7 +150,7 @@
|
||||
};
|
||||
|
||||
angular.module('redash.controllers')
|
||||
.controller('DashboardCtrl', ['$scope', 'Events', 'Widget', '$routeParams', '$http', '$timeout', '$q', 'Dashboard', DashboardCtrl])
|
||||
.controller('WidgetCtrl', ['$scope', 'Events', 'Query', WidgetCtrl])
|
||||
.controller('DashboardCtrl', ['$scope', 'Events', 'Widget', '$routeParams', '$location', '$http', '$timeout', '$q', 'Dashboard', DashboardCtrl])
|
||||
.controller('WidgetCtrl', ['$scope', '$location', 'Events', 'Query', WidgetCtrl])
|
||||
|
||||
})();
|
||||
})();
|
||||
|
||||
@@ -14,20 +14,10 @@
|
||||
var isNewQuery = !$scope.query.id,
|
||||
queryText = $scope.query.query,
|
||||
// ref to QueryViewCtrl.saveQuery
|
||||
saveQuery = $scope.saveQuery,
|
||||
shortcuts = {
|
||||
'meta+s': function () {
|
||||
if ($scope.canEdit) {
|
||||
$scope.saveQuery();
|
||||
}
|
||||
},
|
||||
'meta+enter': function () {
|
||||
$scope.executeQuery();
|
||||
}
|
||||
};
|
||||
saveQuery = $scope.saveQuery;
|
||||
|
||||
$scope.sourceMode = true;
|
||||
$scope.canEdit = currentUser.canEdit($scope.query);
|
||||
$scope.canEdit = true;
|
||||
$scope.isDirty = false;
|
||||
|
||||
$scope.newVisualization = undefined;
|
||||
@@ -39,6 +29,22 @@
|
||||
}
|
||||
});
|
||||
|
||||
var shortcuts = {
|
||||
'meta+s': function () {
|
||||
if ($scope.canEdit) {
|
||||
$scope.saveQuery();
|
||||
}
|
||||
},
|
||||
'ctrl+s': function () {
|
||||
if ($scope.canEdit) {
|
||||
$scope.saveQuery();
|
||||
}
|
||||
},
|
||||
// Cmd+Enter for Mac
|
||||
'meta+enter': $scope.executeQuery,
|
||||
// Ctrl+Enter for PC
|
||||
'ctrl+enter': $scope.executeQuery
|
||||
};
|
||||
|
||||
KeyboardShortcuts.bind(shortcuts);
|
||||
|
||||
@@ -62,7 +68,7 @@
|
||||
$scope.duplicateQuery = function() {
|
||||
Events.record(currentUser, 'fork', 'query', $scope.query.id);
|
||||
$scope.query.id = null;
|
||||
$scope.query.ttl = -1;
|
||||
$scope.query.schedule = null;
|
||||
|
||||
$scope.saveQuery({
|
||||
successMessage: 'Query forked',
|
||||
|
||||
@@ -1,19 +1,57 @@
|
||||
(function() {
|
||||
'use strict';
|
||||
|
||||
function QueryViewCtrl($scope, Events, $route, $location, notifications, growl, Query, DataSource) {
|
||||
function QueryViewCtrl($scope, Events, $route, $location, notifications, growl, $modal, Query, DataSource) {
|
||||
var DEFAULT_TAB = 'table';
|
||||
|
||||
var getQueryResult = function(maxAge) {
|
||||
// Collect params, and getQueryResult with params; getQueryResult merges it into the query
|
||||
var parameters = Query.collectParamsFromQueryString($location, $scope.query);
|
||||
if (maxAge == undefined) {
|
||||
maxAge = $location.search()['maxAge'];
|
||||
}
|
||||
|
||||
if (maxAge == undefined) {
|
||||
maxAge = -1;
|
||||
}
|
||||
|
||||
$scope.queryResult = $scope.query.getQueryResult(maxAge, parameters);
|
||||
}
|
||||
|
||||
$scope.dataSource = {};
|
||||
$scope.query = $route.current.locals.query;
|
||||
|
||||
var updateSchema = function() {
|
||||
$scope.hasSchema = false;
|
||||
$scope.editorSize = "col-md-12";
|
||||
var dataSourceId = $scope.query.data_source_id || $scope.dataSources[0].id;
|
||||
DataSource.getSchema({id: dataSourceId}, function(data) {
|
||||
if (data && data.length > 0) {
|
||||
$scope.schema = data;
|
||||
_.each(data, function(table) {
|
||||
table.collapsed = true;
|
||||
});
|
||||
|
||||
$scope.editorSize = "col-md-9";
|
||||
$scope.hasSchema = true;
|
||||
} else {
|
||||
$scope.hasSchema = false;
|
||||
$scope.editorSize = "col-md-12";
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
Events.record(currentUser, 'view', 'query', $scope.query.id);
|
||||
$scope.queryResult = $scope.query.getQueryResult();
|
||||
getQueryResult();
|
||||
$scope.queryExecuting = false;
|
||||
|
||||
$scope.isQueryOwner = currentUser.id === $scope.query.user.id;
|
||||
$scope.isQueryOwner = (currentUser.id === $scope.query.user.id) || currentUser.hasPermission('admin');
|
||||
$scope.canViewSource = currentUser.hasPermission('view_source');
|
||||
|
||||
$scope.dataSources = DataSource.get(function(dataSources) {
|
||||
updateSchema();
|
||||
$scope.query.data_source_id = $scope.query.data_source_id || dataSources[0].id;
|
||||
$scope.dataSource = _.find(dataSources, function(ds) { return ds.id == $scope.query.data_source_id; });
|
||||
});
|
||||
|
||||
// in view mode, latest dataset is always visible
|
||||
@@ -24,6 +62,10 @@
|
||||
$scope.queryExecuting = lock;
|
||||
};
|
||||
|
||||
$scope.showApiKey = function() {
|
||||
alert("API Key for this query:\n" + $scope.query.api_key);
|
||||
};
|
||||
|
||||
$scope.saveQuery = function(options, data) {
|
||||
if (data) {
|
||||
data.id = $scope.query.id;
|
||||
@@ -57,7 +99,7 @@
|
||||
};
|
||||
|
||||
$scope.executeQuery = function() {
|
||||
$scope.queryResult = $scope.query.getQueryResult(0);
|
||||
getQueryResult(0);
|
||||
$scope.lockButton(true);
|
||||
$scope.cancelling = false;
|
||||
Events.record(currentUser, 'execute', 'query', $scope.query.id);
|
||||
@@ -68,6 +110,31 @@
|
||||
$scope.queryResult.cancelExecution();
|
||||
Events.record(currentUser, 'cancel_execute', 'query', $scope.query.id);
|
||||
};
|
||||
|
||||
$scope.archiveQuery = function(options, data) {
|
||||
if (data) {
|
||||
data.id = $scope.query.id;
|
||||
} else {
|
||||
data = $scope.query;
|
||||
}
|
||||
|
||||
$scope.isDirty = false;
|
||||
|
||||
options = _.extend({}, {
|
||||
successMessage: 'Query archived',
|
||||
errorMessage: 'Query could not be archived'
|
||||
}, options);
|
||||
|
||||
return Query.delete({id: data.id}, function() {
|
||||
$scope.query.is_archived = true;
|
||||
$scope.query.schedule = null;
|
||||
growl.addSuccessMessage(options.successMessage);
|
||||
// This feels dirty.
|
||||
$('#archive-confirmation-modal').modal('hide');
|
||||
}, function(httpResponse) {
|
||||
growl.addErrorMessage(options.errorMessage);
|
||||
}).$promise;
|
||||
}
|
||||
|
||||
$scope.updateDataSource = function() {
|
||||
Events.record(currentUser, 'update_data_source', 'query', $scope.query.id);
|
||||
@@ -83,6 +150,8 @@
|
||||
});
|
||||
}
|
||||
|
||||
updateSchema();
|
||||
$scope.dataSource = _.find($scope.dataSources, function(ds) { return ds.id == $scope.query.data_source_id; });
|
||||
$scope.executeQuery();
|
||||
};
|
||||
|
||||
@@ -121,6 +190,8 @@
|
||||
$scope.query.queryResult = $scope.queryResult;
|
||||
|
||||
notifications.showNotification("re:dash", $scope.query.name + " updated.");
|
||||
} else if (status == 'failed') {
|
||||
notifications.showNotification("re:dash", $scope.query.name + " failed to run: " + $scope.queryResult.getError());
|
||||
}
|
||||
|
||||
if (status === 'done' || status === 'failed') {
|
||||
@@ -128,6 +199,28 @@
|
||||
}
|
||||
});
|
||||
|
||||
$scope.openScheduleForm = function() {
|
||||
if (!$scope.isQueryOwner) {
|
||||
return;
|
||||
};
|
||||
|
||||
$modal.open({
|
||||
templateUrl: '/views/schedule_form.html',
|
||||
size: 'sm',
|
||||
scope: $scope,
|
||||
controller: ['$scope', '$modalInstance', function($scope, $modalInstance) {
|
||||
$scope.close = function() {
|
||||
$modalInstance.close();
|
||||
}
|
||||
if ($scope.query.hasDailySchedule()) {
|
||||
$scope.refreshType = 'daily';
|
||||
} else {
|
||||
$scope.refreshType = 'periodic';
|
||||
}
|
||||
}]
|
||||
});
|
||||
};
|
||||
|
||||
$scope.$watch(function() {
|
||||
return $location.hash()
|
||||
}, function(hash) {
|
||||
@@ -140,5 +233,5 @@
|
||||
|
||||
angular.module('redash.controllers')
|
||||
.controller('QueryViewCtrl',
|
||||
['$scope', 'Events', '$route', '$location', 'notifications', 'growl', 'Query', 'DataSource', QueryViewCtrl]);
|
||||
['$scope', 'Events', '$route', '$location', 'notifications', 'growl', '$modal', 'Query', 'DataSource', QueryViewCtrl]);
|
||||
})();
|
||||
|
||||
@@ -147,22 +147,22 @@
|
||||
var reset = function() {
|
||||
$scope.saveInProgress = false;
|
||||
$scope.widgetSize = 1;
|
||||
$scope.queryId = null;
|
||||
$scope.selectedVis = null;
|
||||
$scope.query = null;
|
||||
$scope.query = {};
|
||||
$scope.selected_query = undefined;
|
||||
$scope.text = "";
|
||||
};
|
||||
|
||||
reset();
|
||||
|
||||
$scope.loadVisualizations = function () {
|
||||
if (!$scope.queryId) {
|
||||
if (!$scope.query.selected) {
|
||||
return;
|
||||
}
|
||||
|
||||
Query.get({ id: $scope.queryId }, function(query) {
|
||||
Query.get({ id: $scope.query.selected.id }, function(query) {
|
||||
if (query) {
|
||||
$scope.query = query;
|
||||
$scope.selected_query = query;
|
||||
if (query.visualizations.length) {
|
||||
$scope.selectedVis = query.visualizations[0];
|
||||
}
|
||||
@@ -170,6 +170,20 @@
|
||||
});
|
||||
};
|
||||
|
||||
$scope.searchQueries = function (term) {
|
||||
if (!term || term.length < 3) {
|
||||
return;
|
||||
}
|
||||
|
||||
Query.search({q: term}, function(results) {
|
||||
$scope.queries = results;
|
||||
});
|
||||
};
|
||||
|
||||
$scope.$watch('query', function () {
|
||||
$scope.loadVisualizations();
|
||||
}, true);
|
||||
|
||||
$scope.saveWidget = function() {
|
||||
$scope.saveInProgress = true;
|
||||
|
||||
|
||||
@@ -29,7 +29,7 @@
|
||||
restrict: 'E',
|
||||
template: '<span ng-show="query.id && canViewSource">\
|
||||
<a ng-show="!sourceMode"\
|
||||
ng-href="{{query.id}}/source#{{selectedTab}}">Show Source\
|
||||
ng-href="/queries/{{query.id}}/source#{{selectedTab}}">Show Source\
|
||||
</a>\
|
||||
<a ng-show="sourceMode"\
|
||||
ng-href="/queries/{{query.id}}#{{selectedTab}}">Hide Source\
|
||||
@@ -63,26 +63,95 @@
|
||||
restrict: 'E',
|
||||
scope: {
|
||||
'query': '=',
|
||||
'lock': '='
|
||||
'lock': '=',
|
||||
'schema': '=',
|
||||
'syntax': '='
|
||||
},
|
||||
template: '<textarea\
|
||||
ui-codemirror="editorOptions"\
|
||||
ng-model="query.query">',
|
||||
link: function($scope) {
|
||||
$scope.editorOptions = {
|
||||
mode: 'text/x-sql',
|
||||
template: '<textarea></textarea>',
|
||||
link: {
|
||||
pre: function ($scope, element) {
|
||||
$scope.syntax = $scope.syntax || 'sql';
|
||||
|
||||
var modes = {
|
||||
'sql': 'text/x-sql',
|
||||
'python': 'text/x-python',
|
||||
'json': 'application/json'
|
||||
};
|
||||
|
||||
var textarea = element.children()[0];
|
||||
var editorOptions = {
|
||||
mode: modes[$scope.syntax],
|
||||
lineWrapping: true,
|
||||
lineNumbers: true,
|
||||
readOnly: false,
|
||||
matchBrackets: true,
|
||||
autoCloseBrackets: true
|
||||
};
|
||||
autoCloseBrackets: true,
|
||||
extraKeys: {"Ctrl-Space": "autocomplete"}
|
||||
};
|
||||
|
||||
$scope.$watch('lock', function(locked) {
|
||||
$scope.editorOptions.readOnly = locked ? 'nocursor' : false;
|
||||
});
|
||||
var additionalHints = [];
|
||||
|
||||
CodeMirror.commands.autocomplete = function(cm) {
|
||||
var hinter = function(editor, options) {
|
||||
var hints = CodeMirror.hint.anyword(editor, options);
|
||||
var cur = editor.getCursor(), token = editor.getTokenAt(cur).string;
|
||||
|
||||
hints.list = _.union(hints.list, _.filter(additionalHints, function (h) {
|
||||
return h.search(token) === 0;
|
||||
}));
|
||||
|
||||
return hints;
|
||||
};
|
||||
|
||||
// CodeMirror.showHint(cm, CodeMirror.hint.anyword);
|
||||
CodeMirror.showHint(cm, hinter);
|
||||
};
|
||||
|
||||
var codemirror = CodeMirror.fromTextArea(textarea, editorOptions);
|
||||
|
||||
codemirror.on('change', function(instance) {
|
||||
var newValue = instance.getValue();
|
||||
|
||||
if (newValue !== $scope.query.query) {
|
||||
$scope.$evalAsync(function() {
|
||||
$scope.query.query = newValue;
|
||||
});
|
||||
}
|
||||
|
||||
$('.schema-container').css('height', $('.CodeMirror').css('height'));
|
||||
});
|
||||
|
||||
$scope.$watch('query.query', function () {
|
||||
if ($scope.query.query !== codemirror.getValue()) {
|
||||
codemirror.setValue($scope.query.query);
|
||||
}
|
||||
});
|
||||
|
||||
$scope.$watch('schema', function (schema) {
|
||||
if (schema) {
|
||||
var keywords = [];
|
||||
_.each(schema, function (table) {
|
||||
keywords.push(table.name);
|
||||
_.each(table.columns, function (c) {
|
||||
keywords.push(c);
|
||||
});
|
||||
});
|
||||
|
||||
additionalHints = _.unique(keywords);
|
||||
}
|
||||
});
|
||||
|
||||
$scope.$watch('syntax', function(syntax) {
|
||||
codemirror.setOption('mode', modes[syntax]);
|
||||
});
|
||||
|
||||
$scope.$watch('lock', function (locked) {
|
||||
var readOnly = locked ? 'nocursor' : false;
|
||||
codemirror.setOption('readOnly', readOnly);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
function queryFormatter($http) {
|
||||
@@ -98,55 +167,104 @@
|
||||
</button>',
|
||||
link: function($scope) {
|
||||
$scope.formatQuery = function formatQuery() {
|
||||
$scope.queryExecuting = true;
|
||||
$scope.queryFormatting = true;
|
||||
$http.post('/api/queries/format', {
|
||||
'query': $scope.query.query
|
||||
}).success(function (response) {
|
||||
$scope.query.query = response;
|
||||
}).finally(function () {
|
||||
$scope.queryExecuting = false;
|
||||
$scope.queryFormatting = false;
|
||||
});
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function queryTimePicker() {
|
||||
return {
|
||||
restrict: 'E',
|
||||
template: '<select ng-disabled="refreshType != \'daily\'" ng-model="hour" ng-change="updateSchedule()" ng-options="c as c for c in hourOptions"></select> :\
|
||||
<select ng-disabled="refreshType != \'daily\'" ng-model="minute" ng-change="updateSchedule()" ng-options="c as c for c in minuteOptions"></select>',
|
||||
link: function($scope) {
|
||||
var padWithZeros = function(size, v) {
|
||||
v = String(v);
|
||||
if (v.length < size) {
|
||||
v = "0" + v;
|
||||
}
|
||||
return v;
|
||||
};
|
||||
|
||||
$scope.hourOptions = _.map(_.range(0, 24), _.partial(padWithZeros, 2));
|
||||
$scope.minuteOptions = _.map(_.range(0, 60, 5), _.partial(padWithZeros, 2));
|
||||
|
||||
if ($scope.query.hasDailySchedule()) {
|
||||
var parts = $scope.query.scheduleInLocalTime().split(':');
|
||||
$scope.minute = parts[1];
|
||||
$scope.hour = parts[0];
|
||||
} else {
|
||||
$scope.minute = "15";
|
||||
$scope.hour = "00";
|
||||
}
|
||||
|
||||
$scope.updateSchedule = function() {
|
||||
var newSchedule = moment().hour($scope.hour).minute($scope.minute).utc().format('HH:mm');
|
||||
if (newSchedule != $scope.query.schedule) {
|
||||
$scope.query.schedule = newSchedule;
|
||||
$scope.saveQuery();
|
||||
}
|
||||
};
|
||||
|
||||
$scope.$watch('refreshType', function() {
|
||||
if ($scope.refreshType == 'daily') {
|
||||
$scope.updateSchedule();
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function queryRefreshSelect() {
|
||||
return {
|
||||
restrict: 'E',
|
||||
template: '<select\
|
||||
ng-disabled="!isQueryOwner"\
|
||||
ng-model="query.ttl"\
|
||||
ng-disabled="refreshType != \'periodic\'"\
|
||||
ng-model="query.schedule"\
|
||||
ng-change="saveQuery()"\
|
||||
ng-options="c.value as c.name for c in refreshOptions">\
|
||||
<option value="">No Refresh</option>\
|
||||
</select>',
|
||||
link: function($scope) {
|
||||
$scope.refreshOptions = [
|
||||
{
|
||||
value: -1,
|
||||
name: 'No Refresh'
|
||||
},
|
||||
{
|
||||
value: 60,
|
||||
value: "60",
|
||||
name: 'Every minute'
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
_.each(_.range(1, 13), function (i) {
|
||||
$scope.refreshOptions.push({
|
||||
value: i * 3600,
|
||||
value: String(i * 3600),
|
||||
name: 'Every ' + i + 'h'
|
||||
});
|
||||
})
|
||||
|
||||
$scope.refreshOptions.push({
|
||||
value: 24 * 3600,
|
||||
value: String(24 * 3600),
|
||||
name: 'Every 24h'
|
||||
});
|
||||
$scope.refreshOptions.push({
|
||||
value: 7 * 24 * 3600,
|
||||
value: String(7 * 24 * 3600),
|
||||
name: 'Once a week'
|
||||
});
|
||||
|
||||
$scope.$watch('refreshType', function() {
|
||||
if ($scope.refreshType == 'periodic') {
|
||||
if ($scope.query.hasDailySchedule()) {
|
||||
$scope.query.schedule = null;
|
||||
$scope.saveQuery();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
@@ -158,5 +276,6 @@
|
||||
.directive('queryResultLink', queryResultCSVLink)
|
||||
.directive('queryEditor', queryEditor)
|
||||
.directive('queryRefreshSelect', queryRefreshSelect)
|
||||
.directive('queryTimePicker', queryTimePicker)
|
||||
.directive('queryFormatter', ['$http', queryFormatter]);
|
||||
})();
|
||||
@@ -24,13 +24,17 @@ angular.module('redash.filters', []).
|
||||
return durationHumanize;
|
||||
})
|
||||
|
||||
.filter('refreshRateHumanize', function () {
|
||||
return function (ttl) {
|
||||
if (ttl == -1) {
|
||||
.filter('scheduleHumanize', function() {
|
||||
return function (schedule) {
|
||||
if (schedule === null) {
|
||||
return "Never";
|
||||
} else {
|
||||
return "Every " + durationHumanize(ttl);
|
||||
} else if (schedule.match(/\d\d:\d\d/) !== null) {
|
||||
var parts = schedule.split(':');
|
||||
var localTime = moment.utc().hour(parts[0]).minute(parts[1]).local().format('HH:mm');
|
||||
return "Every day at " + localTime;
|
||||
}
|
||||
|
||||
return "Every " + durationHumanize(parseInt(schedule));
|
||||
}
|
||||
})
|
||||
|
||||
@@ -75,4 +79,13 @@ angular.module('redash.filters', []).
|
||||
}
|
||||
return $sce.trustAsHtml(marked(text));
|
||||
}
|
||||
}]);
|
||||
}])
|
||||
|
||||
.filter('trustAsHtml', ['$sce', function ($sce) {
|
||||
return function (text) {
|
||||
if (!text) {
|
||||
return "";
|
||||
}
|
||||
return $sce.trustAsHtml(text);
|
||||
}
|
||||
}]);
|
||||
|
||||
@@ -1,9 +1,20 @@
|
||||
(function () {
|
||||
'use strict';
|
||||
|
||||
var ColorPalette = {
|
||||
'Blue':'#4572A7',
|
||||
'Red':'#AA4643',
|
||||
'Green': '#89A54E',
|
||||
'Purple': '#80699B',
|
||||
'Cyan': '#3D96AE',
|
||||
'Orange': '#DB843D',
|
||||
'Light Blue': '#92A8CD',
|
||||
'Lilac': '#A47D7C',
|
||||
'Light Green': '#B5CA92',
|
||||
};
|
||||
|
||||
Highcharts.setOptions({
|
||||
colors: ["#4572A7", "#AA4643", "#89A54E", "#80699B", "#3D96AE",
|
||||
"#DB843D", "#92A8CD", "#A47D7C", "#B5CA92"]
|
||||
colors: _.values(ColorPalette)
|
||||
});
|
||||
|
||||
var defaultOptions = {
|
||||
@@ -39,7 +50,7 @@
|
||||
;
|
||||
|
||||
if (moment.isMoment(this.x)) {
|
||||
var s = '<b>' + moment(this.x).format("DD/MM/YY HH:mm") + '</b>',
|
||||
var s = '<b>' + this.x.toDate().toLocaleString() + '</b>',
|
||||
pointsCount = this.points.length;
|
||||
|
||||
$.each(this.points, function (i, point) {
|
||||
@@ -115,23 +126,51 @@
|
||||
{
|
||||
text: 'Show Total',
|
||||
onclick: function () {
|
||||
var hasTotalsAlready = _.some(this.series, function (s) {
|
||||
var res = (s.name == 'Total');
|
||||
//if 'Total' already exists - just make it visible
|
||||
if (res) s.setVisible(true, false);
|
||||
return res;
|
||||
})
|
||||
var data = {};
|
||||
_.each(this.series, function (s) {
|
||||
s.setVisible(false, false);
|
||||
_.each(s.data, function (p) {
|
||||
data[p.x] = data[p.x] || {'x': p.x, 'y': 0};
|
||||
data[p.x].y = data[p.x].y + p.y;
|
||||
});
|
||||
if (s.name != 'Total') s.setVisible(false, false);
|
||||
if (!hasTotalsAlready) {
|
||||
_.each(s.data, function (p) {
|
||||
data[p.x] = data[p.x] || {'x': p.x, 'y': 0};
|
||||
data[p.x].y = data[p.x].y + p.y;
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
this.addSeries({
|
||||
data: _.values(data),
|
||||
type: 'line',
|
||||
name: 'Total'
|
||||
}, false)
|
||||
if (!hasTotalsAlready) {
|
||||
this.addSeries({
|
||||
data: _.values(data),
|
||||
type: 'line',
|
||||
name: 'Total'
|
||||
}, false)
|
||||
}
|
||||
|
||||
this.redraw();
|
||||
}
|
||||
},
|
||||
{
|
||||
text: 'Save Image',
|
||||
onclick: function () {
|
||||
var canvas = document.createElement('canvas');
|
||||
window.canvg(canvas, this.getSVG());
|
||||
var href = canvas.toDataURL('image/png');
|
||||
var a = document.createElement('a');
|
||||
a.href = href;
|
||||
var filenameSuffix = new Date().toISOString().replace(/:/g,'_').replace('Z', '');
|
||||
if (this.title) {
|
||||
filenameSuffix = this.title.text;
|
||||
}
|
||||
a.download = 'redash_charts_'+filenameSuffix+'.png';
|
||||
document.body.appendChild(a);
|
||||
a.click();
|
||||
a.remove();
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -204,6 +243,7 @@
|
||||
};
|
||||
|
||||
angular.module('highchart', [])
|
||||
.constant('ColorPalette', ColorPalette)
|
||||
.directive('chart', ['$timeout', function ($timeout) {
|
||||
return {
|
||||
restrict: 'E',
|
||||
@@ -265,8 +305,45 @@
|
||||
scope.chart.series[0].remove(false);
|
||||
};
|
||||
|
||||
// We check either for true or undefined for backward compatibility.
|
||||
var series = scope.series;
|
||||
|
||||
if (chartOptions['sortX'] === true || chartOptions['sortX'] === undefined) {
|
||||
var seriesCopy = [];
|
||||
|
||||
_.each(series, function (s) {
|
||||
// make a copy of series data, so we don't override original.
|
||||
var fieldName = 'x';
|
||||
if (s.data.length > 0 && _.has(s.data[0], 'name')) {
|
||||
fieldName = 'name';
|
||||
};
|
||||
|
||||
var sorted = _.extend({}, s, {data: _.sortBy(s.data, fieldName)});
|
||||
seriesCopy.push(sorted);
|
||||
});
|
||||
|
||||
series = seriesCopy;
|
||||
}
|
||||
|
||||
// If this is a chart that has just one row for multiple columns, sort
|
||||
// by the Y values. For example:
|
||||
//
|
||||
// A | B | C
|
||||
// 20 | 30 | 15
|
||||
//
|
||||
// Will be sorted:
|
||||
// C | A | B
|
||||
// 15 | 20 | 30
|
||||
var sortable = _.every(series, function(s) { return s.data.length == 1 });
|
||||
|
||||
if (sortable) {
|
||||
series = _.sortBy(series, function (s) {
|
||||
return s.data[0].y
|
||||
});
|
||||
}
|
||||
|
||||
if (!('xAxis' in chartOptions && 'type' in chartOptions['xAxis'])) {
|
||||
if (scope.series.length > 0 && _.some(scope.series[0].data, function (p) {
|
||||
if (series.length > 0 && _.some(series[0].data, function (p) {
|
||||
return (angular.isString(p.x) || angular.isDefined(p.name));
|
||||
})) {
|
||||
chartOptions['xAxis'] = chartOptions['xAxis'] || {};
|
||||
@@ -278,13 +355,13 @@
|
||||
}
|
||||
|
||||
if (chartOptions['xAxis']['type'] == 'category' || chartOptions['series']['type']=='pie') {
|
||||
if (!angular.isDefined(scope.series[0].data[0].name)) {
|
||||
if (!angular.isDefined(series[0].data[0].name)) {
|
||||
// We need to make sure that for each category, each series has a value.
|
||||
var categories = _.union.apply(this, _.map(scope.series, function (s) {
|
||||
var categories = _.union.apply(this, _.map(series, function (s) {
|
||||
return _.pluck(s.data, 'x')
|
||||
}));
|
||||
|
||||
_.each(scope.series, function (s) {
|
||||
_.each(series, function (s) {
|
||||
// TODO: move this logic to Query#getChartData
|
||||
var yValues = _.groupBy(s.data, 'x');
|
||||
|
||||
@@ -295,11 +372,6 @@
|
||||
}
|
||||
});
|
||||
|
||||
if (categories.length == 1) {
|
||||
newData = _.sortBy(newData, 'y').reverse();
|
||||
}
|
||||
;
|
||||
|
||||
s.data = newData;
|
||||
});
|
||||
}
|
||||
@@ -307,7 +379,7 @@
|
||||
|
||||
scope.chart.counters.color = 0;
|
||||
|
||||
_.each(scope.series, function (s) {
|
||||
_.each(series, function (s) {
|
||||
// here we override the series with the visualization config
|
||||
s = _.extend(s, chartOptions['series']);
|
||||
|
||||
@@ -338,4 +410,4 @@
|
||||
};
|
||||
|
||||
}]);
|
||||
})();
|
||||
})();
|
||||
|
||||
@@ -91,7 +91,7 @@
|
||||
|
||||
//insert columns from column config
|
||||
//TODO add a way to clean all columns
|
||||
scope.$watch('columnCollection', function (oldValue, newValue) {
|
||||
scope.$watchCollection('columnCollection', function (oldValue, newValue) {
|
||||
if (scope.columnCollection) {
|
||||
scope.columns.length = 0;
|
||||
for (var i = 0, l = scope.columnCollection.length; i < l; i++) {
|
||||
@@ -205,11 +205,10 @@
|
||||
column = scope.column,
|
||||
row = scope.dataRow,
|
||||
format = filter('format'),
|
||||
getter = parse(column.map),
|
||||
childScope;
|
||||
|
||||
//can be useful for child directives
|
||||
scope.formatedValue = format(getter(row), column.formatFunction, column.formatParameter);
|
||||
scope.formatedValue = format(row[column.map], column.formatFunction, column.formatParameter);
|
||||
|
||||
function defaultContent() {
|
||||
//clear content
|
||||
@@ -267,12 +266,11 @@
|
||||
replace: true,
|
||||
link: function (scope, element, attrs, ctrl) {
|
||||
var form = angular.element(element.children()[1]),
|
||||
input = angular.element(form.children()[0]),
|
||||
getter = parse(scope.column.map);
|
||||
input = angular.element(form.children()[0]);
|
||||
|
||||
//init values
|
||||
scope.isEditMode = false;
|
||||
scope.value = getter(scope.row);
|
||||
scope.value = scope.row[scope.column.map];
|
||||
|
||||
|
||||
scope.submit = function () {
|
||||
@@ -285,7 +283,7 @@
|
||||
};
|
||||
|
||||
scope.toggleEditMode = function () {
|
||||
scope.value = getter(scope.row);
|
||||
scope.value = scope.row[scope.column.map];
|
||||
scope.isEditMode = scope.isEditMode !== true;
|
||||
};
|
||||
|
||||
@@ -595,13 +593,11 @@
|
||||
*/
|
||||
this.updateDataRow = function (dataRow, propertyName, newValue) {
|
||||
var index = scope.displayedCollection.indexOf(dataRow),
|
||||
getter = parse(propertyName),
|
||||
setter = getter.assign,
|
||||
oldValue;
|
||||
if (index !== -1) {
|
||||
oldValue = getter(scope.displayedCollection[index]);
|
||||
oldValue = scope.displayedCollection[index][propertyName];
|
||||
if (oldValue !== newValue) {
|
||||
setter(scope.displayedCollection[index], newValue);
|
||||
scope.displayedCollection[index][propertyName] = newValue;
|
||||
scope.$emit('updateDataRow', {item: scope.displayedCollection[index]});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,12 @@
|
||||
(function () {
|
||||
var Dashboard = function($resource) {
|
||||
var resource = $resource('/api/dashboards/:slug', {slug: '@slug'});
|
||||
var resource = $resource('/api/dashboards/:slug', {slug: '@slug'}, {
|
||||
recent: {
|
||||
method: 'get',
|
||||
isArray: true,
|
||||
url: "/api/dashboards/recent"
|
||||
}});
|
||||
|
||||
resource.prototype.canEdit = function() {
|
||||
return currentUser.hasPermission('admin') || currentUser.canEdit(this);
|
||||
}
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
(function () {
|
||||
var notifications = function (Events) {
|
||||
var notificationService = {};
|
||||
var lastNotification = null;
|
||||
|
||||
notificationService.isSupported = function () {
|
||||
if (window.webkitNotifications) {
|
||||
if ("Notification" in window) {
|
||||
return true;
|
||||
} else {
|
||||
console.log("HTML5 notifications are not supported.");
|
||||
@@ -17,8 +16,12 @@
|
||||
return;
|
||||
}
|
||||
|
||||
if (!window.webkitNotifications.checkPermission() == 0) { // 0 is PERMISSION_ALLOWED
|
||||
window.webkitNotifications.requestPermission();
|
||||
if (Notification.permission !== "granted") {
|
||||
Notification.requestPermission(function (status) {
|
||||
if (Notification.permission !== status) {
|
||||
Notification.permission = status;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,23 +30,16 @@
|
||||
return;
|
||||
}
|
||||
|
||||
if (document.webkitVisibilityState && document.webkitVisibilityState == 'visible') {
|
||||
return;
|
||||
}
|
||||
|
||||
if (lastNotification) {
|
||||
lastNotification.cancel();
|
||||
}
|
||||
|
||||
var notification = window.webkitNotifications.createNotification('', title, content);
|
||||
lastNotification = notification;
|
||||
//using the 'tag' to avoid showing duplicate notifications
|
||||
var notification = new Notification(title, {'tag': title+content, 'body': content});
|
||||
setTimeout(function(){
|
||||
notification.close();
|
||||
},3000);
|
||||
notification.onclick = function () {
|
||||
window.focus();
|
||||
this.cancel();
|
||||
Events.record(currentUser, 'click', 'notification');
|
||||
};
|
||||
|
||||
notification.show()
|
||||
}
|
||||
|
||||
return notificationService;
|
||||
|
||||
@@ -22,13 +22,17 @@
|
||||
} else if (_.isString(v) && v.match(/^\d{4}-\d{2}-\d{2}/)) {
|
||||
row[k] = moment(v);
|
||||
columnTypes[k] = 'date';
|
||||
} else if (typeof(v) == 'object' && v !== null) {
|
||||
row[k] = JSON.stringify(v);
|
||||
}
|
||||
}, this);
|
||||
}, this);
|
||||
|
||||
_.each(this.query_result.data.columns, function(column) {
|
||||
if (columnTypes[column.name]) {
|
||||
column.type = columnTypes[column.name];
|
||||
if (column.type == null) {
|
||||
column.type = columnTypes[column.name];
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
@@ -212,10 +216,6 @@
|
||||
}
|
||||
});
|
||||
|
||||
_.each(series, function (series) {
|
||||
series.data = _.sortBy(series.data, 'x');
|
||||
});
|
||||
|
||||
return _.values(series);
|
||||
};
|
||||
|
||||
@@ -245,26 +245,9 @@
|
||||
return parts[0];
|
||||
};
|
||||
|
||||
var charConversionMap = {
|
||||
'__pct': /%/g,
|
||||
'_': / /g,
|
||||
'__qm': /\?/g,
|
||||
'__brkt': /[\(\)\[\]]/g,
|
||||
'__dash': /-/g,
|
||||
'__amp': /&/g,
|
||||
'__sl': /\//g,
|
||||
'__fsl': /\\/g,
|
||||
};
|
||||
|
||||
QueryResult.prototype.getColumnCleanName = function (column) {
|
||||
var name = this.getColumnNameWithoutType(column);
|
||||
|
||||
if (name != '') {
|
||||
_.each(charConversionMap, function(regex, replacement) {
|
||||
name = name.replace(regex, replacement);
|
||||
});
|
||||
}
|
||||
|
||||
return name;
|
||||
}
|
||||
|
||||
@@ -327,7 +310,7 @@
|
||||
this.filters = filters;
|
||||
}
|
||||
|
||||
var refreshStatus = function (queryResult, query, ttl) {
|
||||
var refreshStatus = function (queryResult, query) {
|
||||
Job.get({'id': queryResult.job.id}, function (response) {
|
||||
queryResult.update(response);
|
||||
|
||||
@@ -337,7 +320,7 @@
|
||||
});
|
||||
} else if (queryResult.getStatus() != "failed") {
|
||||
$timeout(function () {
|
||||
refreshStatus(queryResult, query, ttl);
|
||||
refreshStatus(queryResult, query);
|
||||
}, 3000);
|
||||
}
|
||||
})
|
||||
@@ -357,14 +340,19 @@
|
||||
return this.deferred.promise;
|
||||
}
|
||||
|
||||
QueryResult.get = function (data_source_id, query, ttl) {
|
||||
QueryResult.get = function (data_source_id, query, maxAge, queryId) {
|
||||
var queryResult = new QueryResult();
|
||||
|
||||
QueryResultResource.post({'data_source_id': data_source_id, 'query': query, 'ttl': ttl}, function (response) {
|
||||
var params = {'data_source_id': data_source_id, 'query': query, 'max_age': maxAge};
|
||||
if (queryId !== undefined) {
|
||||
params['query_id'] = queryId;
|
||||
};
|
||||
|
||||
QueryResultResource.post(params, function (response) {
|
||||
queryResult.update(response);
|
||||
|
||||
if ('job' in response) {
|
||||
refreshStatus(queryResult, query, ttl);
|
||||
refreshStatus(queryResult, query);
|
||||
}
|
||||
});
|
||||
|
||||
@@ -375,36 +363,95 @@
|
||||
};
|
||||
|
||||
var Query = function ($resource, QueryResult, DataSource) {
|
||||
var Query = $resource('/api/queries/:id', {id: '@id'}, {search: {method: 'get', isArray: true, url: "/api/queries/search"}});
|
||||
var Query = $resource('/api/queries/:id', {id: '@id'},
|
||||
{
|
||||
search: {
|
||||
method: 'get',
|
||||
isArray: true,
|
||||
url: "/api/queries/search"
|
||||
},
|
||||
recent: {
|
||||
method: 'get',
|
||||
isArray: true,
|
||||
url: "/api/queries/recent"
|
||||
}});
|
||||
|
||||
Query.newQuery = function () {
|
||||
return new Query({
|
||||
query: "",
|
||||
name: "New Query",
|
||||
ttl: -1,
|
||||
schedule: null,
|
||||
user: currentUser
|
||||
});
|
||||
};
|
||||
|
||||
Query.collectParamsFromQueryString = function($location, query) {
|
||||
var parameterNames = query.getParameters();
|
||||
var parameters = {};
|
||||
|
||||
var queryString = $location.search();
|
||||
_.each(parameterNames, function(param, i) {
|
||||
var qsName = "p_" + param;
|
||||
if (qsName in queryString) {
|
||||
parameters[param] = queryString[qsName];
|
||||
}
|
||||
});
|
||||
|
||||
return parameters;
|
||||
};
|
||||
|
||||
Query.prototype.getSourceLink = function () {
|
||||
return '/queries/' + this.id + '/source';
|
||||
};
|
||||
|
||||
Query.prototype.getQueryResult = function (ttl) {
|
||||
if (ttl == undefined) {
|
||||
ttl = this.ttl;
|
||||
Query.prototype.hasDailySchedule = function() {
|
||||
return (this.schedule && this.schedule.match(/\d\d:\d\d/) !== null);
|
||||
}
|
||||
|
||||
Query.prototype.scheduleInLocalTime = function() {
|
||||
var parts = this.schedule.split(':');
|
||||
return moment.utc().hour(parts[0]).minute(parts[1]).local().format('HH:mm');
|
||||
}
|
||||
|
||||
Query.prototype.getQueryResult = function (maxAge, parameters) {
|
||||
// if (ttl == undefined) {
|
||||
// ttl = this.ttl;
|
||||
// }
|
||||
|
||||
var queryText = this.query;
|
||||
|
||||
var queryParameters = this.getParameters();
|
||||
var paramsRequired = !_.isEmpty(queryParameters);
|
||||
|
||||
var missingParams = parameters === undefined ? queryParameters : _.difference(queryParameters, _.keys(parameters));
|
||||
|
||||
if (paramsRequired && missingParams.length > 0) {
|
||||
var paramsWord = "parameter";
|
||||
if (missingParams.length > 1) {
|
||||
paramsWord = "parameters";
|
||||
}
|
||||
|
||||
return new QueryResult({job: {error: "Missing values for " + missingParams.join(', ') + " "+paramsWord+".", status: 4}});
|
||||
}
|
||||
|
||||
if (this.latest_query_data && ttl != 0) {
|
||||
if (paramsRequired) {
|
||||
queryText = Mustache.render(queryText, parameters);
|
||||
|
||||
// Need to clear latest results, to make sure we don't used results for different params.
|
||||
this.latest_query_data = null;
|
||||
this.latest_query_data_id = null;
|
||||
}
|
||||
|
||||
if (this.latest_query_data && maxAge != 0) {
|
||||
if (!this.queryResult) {
|
||||
this.queryResult = new QueryResult({'query_result': this.latest_query_data});
|
||||
}
|
||||
} else if (this.latest_query_data_id && ttl != 0) {
|
||||
} else if (this.latest_query_data_id && maxAge != 0) {
|
||||
if (!this.queryResult) {
|
||||
this.queryResult = QueryResult.getById(this.latest_query_data_id);
|
||||
}
|
||||
} else if (this.data_source_id) {
|
||||
this.queryResult = QueryResult.get(this.data_source_id, this.query, ttl);
|
||||
this.queryResult = QueryResult.get(this.data_source_id, queryText, maxAge, this.id);
|
||||
}
|
||||
|
||||
return this.queryResult;
|
||||
@@ -412,6 +459,26 @@
|
||||
|
||||
Query.prototype.getQueryResultPromise = function() {
|
||||
return this.getQueryResult().toPromise();
|
||||
};
|
||||
|
||||
Query.prototype.getParameters = function() {
|
||||
var parts = Mustache.parse(this.query);
|
||||
var parameters = [];
|
||||
var collectParams = function(parts) {
|
||||
parameters = [];
|
||||
_.each(parts, function(part) {
|
||||
if (part[0] == 'name' || part[0] == '&') {
|
||||
parameters.push(part[1]);
|
||||
} else if (part[0] == '#') {
|
||||
parameters = _.union(parameters, collectParams(part[4]));
|
||||
}
|
||||
});
|
||||
return parameters;
|
||||
};
|
||||
|
||||
parameters = collectParams(parts);
|
||||
|
||||
return parameters;
|
||||
}
|
||||
|
||||
return Query;
|
||||
@@ -420,7 +487,12 @@
|
||||
|
||||
|
||||
var DataSource = function ($resource) {
|
||||
var DataSourceResource = $resource('/api/data_sources/:id', {id: '@id'}, {'get': {'method': 'GET', 'cache': true, 'isArray': true}});
|
||||
var actions = {
|
||||
'get': {'method': 'GET', 'cache': true, 'isArray': true},
|
||||
'getSchema': {'method': 'GET', 'cache': true, 'isArray': true, 'url': '/api/data_sources/:id/schema'}
|
||||
};
|
||||
|
||||
var DataSourceResource = $resource('/api/data_sources/:id', {id: '@id'}, actions);
|
||||
|
||||
return DataSourceResource;
|
||||
}
|
||||
|
||||
@@ -55,6 +55,22 @@
|
||||
}];
|
||||
};
|
||||
|
||||
var VisualizationName = function(Visualization) {
|
||||
return {
|
||||
restrict: 'E',
|
||||
scope: {
|
||||
visualization: '='
|
||||
},
|
||||
template: '<small>{{name}}</small>',
|
||||
replace: false,
|
||||
link: function (scope) {
|
||||
if (Visualization.visualizations[scope.visualization.type].name != scope.visualization.name) {
|
||||
scope.name = scope.visualization.name;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var VisualizationRenderer = function ($location, Visualization) {
|
||||
return {
|
||||
restrict: 'E',
|
||||
@@ -72,42 +88,9 @@
|
||||
width: '50%'
|
||||
};
|
||||
|
||||
function readURL() {
|
||||
var searchFilters = angular.fromJson($location.search().filters);
|
||||
if (searchFilters) {
|
||||
_.forEach(scope.filters, function(filter) {
|
||||
var value = searchFilters[filter.friendlyName];
|
||||
if (value) {
|
||||
filter.current = value;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function updateURL(filters) {
|
||||
var current = {};
|
||||
_.each(filters, function(filter) {
|
||||
if (filter.current) {
|
||||
current[filter.friendlyName] = filter.current;
|
||||
}
|
||||
});
|
||||
|
||||
var newSearch = angular.extend($location.search(), {
|
||||
filters: angular.toJson(current)
|
||||
});
|
||||
$location.search(newSearch);
|
||||
}
|
||||
|
||||
scope.$watch('queryResult && queryResult.getFilters()', function (filters) {
|
||||
if (filters) {
|
||||
scope.filters = filters;
|
||||
|
||||
if (filters.length && false) {
|
||||
readURL();
|
||||
|
||||
// start watching for changes and update URL
|
||||
scope.$watch('filters', updateURL, true);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -208,6 +191,7 @@
|
||||
.provider('Visualization', VisualizationProvider)
|
||||
.directive('visualizationRenderer', ['$location', 'Visualization', VisualizationRenderer])
|
||||
.directive('visualizationOptionsEditor', ['Visualization', VisualizationOptionsEditor])
|
||||
.directive('visualizationName', ['Visualization', VisualizationName])
|
||||
.directive('filters', Filters)
|
||||
.directive('editVisulatizationForm', ['Events', 'Visualization', 'growl', EditVisualizationForm])
|
||||
})();
|
||||
|
||||
@@ -41,6 +41,9 @@
|
||||
|
||||
_.each($scope.queryResult.getChartData($scope.options.columnMapping), function (s) {
|
||||
var additional = {'stacking': 'normal'};
|
||||
if ('globalSeriesType' in $scope.options) {
|
||||
additional['type'] = $scope.options.globalSeriesType;
|
||||
}
|
||||
if ($scope.options.seriesOptions && $scope.options.seriesOptions[s.name]) {
|
||||
additional = $scope.options.seriesOptions[s.name];
|
||||
if (!additional.name || additional.name == "") {
|
||||
@@ -74,11 +77,13 @@
|
||||
};
|
||||
});
|
||||
|
||||
chartVisualization.directive('chartEditor', function () {
|
||||
chartVisualization.directive('chartEditor', function (ColorPalette) {
|
||||
return {
|
||||
restrict: 'E',
|
||||
templateUrl: '/views/visualizations/chart_editor.html',
|
||||
link: function (scope, element, attrs) {
|
||||
scope.palette = ColorPalette;
|
||||
|
||||
scope.seriesTypes = {
|
||||
'Line': 'line',
|
||||
'Column': 'column',
|
||||
@@ -87,7 +92,7 @@
|
||||
'Pie': 'pie'
|
||||
};
|
||||
|
||||
scope.globalSeriesType = 'column';
|
||||
scope.globalSeriesType = scope.visualization.options.globalSeriesType || 'column';
|
||||
|
||||
scope.stackingOptions = {
|
||||
"None": "none",
|
||||
@@ -123,12 +128,15 @@
|
||||
columnsWatch = null;
|
||||
|
||||
scope.$watch('globalSeriesType', function(type, old) {
|
||||
scope.visualization.options.globalSeriesType = type;
|
||||
|
||||
if (type && old && type !== old && scope.visualization.options.seriesOptions) {
|
||||
_.each(scope.visualization.options.seriesOptions, function(sOptions) {
|
||||
sOptions.type = type;
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
scope.$watch('visualization.type', function (visualizationType) {
|
||||
if (visualizationType == 'CHART') {
|
||||
if (scope.visualization.options.series.stacking === null) {
|
||||
@@ -139,6 +147,10 @@
|
||||
scope.stacking = scope.visualization.options.series.stacking;
|
||||
}
|
||||
|
||||
if (scope.visualization.options.sortX === undefined) {
|
||||
scope.visualization.options.sortX = true;
|
||||
}
|
||||
|
||||
var refreshSeries = function() {
|
||||
scope.series = _.map(scope.queryResult.getChartData(scope.visualization.options.columnMapping), function (s) { return s.name; });
|
||||
|
||||
@@ -151,9 +163,9 @@
|
||||
|
||||
_.each(scope.series, function(s, i) {
|
||||
if (scope.visualization.options.seriesOptions[s] == undefined) {
|
||||
scope.visualization.options.seriesOptions[s] = {'type': 'column', 'yAxis': 0};
|
||||
scope.visualization.options.seriesOptions[s] = {'type': scope.visualization.options.globalSeriesType, 'yAxis': 0};
|
||||
}
|
||||
scope.visualization.options.seriesOptions[s].zIndex = i;
|
||||
scope.visualization.options.seriesOptions[s].zIndex = scope.visualization.options.seriesOptions[s].zIndex === undefined ? i : scope.visualization.options.seriesOptions[s].zIndex;
|
||||
|
||||
});
|
||||
scope.zIndexes = _.range(scope.series.length);
|
||||
|
||||
61
rd_ui/app/scripts/visualizations/counter.js
Normal file
61
rd_ui/app/scripts/visualizations/counter.js
Normal file
@@ -0,0 +1,61 @@
|
||||
'use strict';
|
||||
|
||||
(function() {
|
||||
var module = angular.module('redash.visualization');
|
||||
|
||||
module.config(['VisualizationProvider', function(VisualizationProvider) {
|
||||
var renderTemplate =
|
||||
'<counter-renderer ' +
|
||||
'options="visualization.options" query-result="queryResult">' +
|
||||
'</counter-renderer>';
|
||||
|
||||
var editTemplate = '<counter-editor></counter-editor>';
|
||||
var defaultOptions = {};
|
||||
|
||||
VisualizationProvider.registerVisualization({
|
||||
type: 'COUNTER',
|
||||
name: 'Counter',
|
||||
renderTemplate: renderTemplate,
|
||||
editorTemplate: editTemplate,
|
||||
defaultOptions: defaultOptions
|
||||
});
|
||||
}
|
||||
]);
|
||||
|
||||
module.directive('counterRenderer', function() {
|
||||
return {
|
||||
restrict: 'E',
|
||||
templateUrl: '/views/visualizations/counter.html',
|
||||
link: function($scope, elm, attrs) {
|
||||
$scope.visualization.options.rowNumber =
|
||||
$scope.visualization.options.rowNumber || 0;
|
||||
|
||||
$scope.$watch('[queryResult && queryResult.getData(), visualization.options]',
|
||||
function() {
|
||||
var queryData = $scope.queryResult.getData();
|
||||
if (queryData) {
|
||||
var rowNumber = $scope.visualization.options.rowNumber || 0;
|
||||
var counterColName = $scope.visualization.options.counterColName || 'counter';
|
||||
var targetColName = $scope.visualization.options.targetColName || 'target';
|
||||
|
||||
$scope.counterValue = queryData[rowNumber][counterColName];
|
||||
$scope.targetValue = queryData[rowNumber][targetColName];
|
||||
|
||||
if ($scope.targetValue) {
|
||||
$scope.delta = $scope.counterValue - $scope.targetValue;
|
||||
$scope.trendPositive = $scope.delta >= 0;
|
||||
}
|
||||
}
|
||||
}, true);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
module.directive('counterEditor', function() {
|
||||
return {
|
||||
restrict: 'E',
|
||||
templateUrl: '/views/visualizations/counter_editor.html'
|
||||
}
|
||||
});
|
||||
|
||||
})();
|
||||
@@ -78,15 +78,15 @@
|
||||
};
|
||||
} else if (columnType === 'date') {
|
||||
columnDefinition.formatFunction = function (value) {
|
||||
if (value) {
|
||||
return value.format("DD/MM/YY");
|
||||
if (value && moment.isMoment(value)) {
|
||||
return value.toDate().toLocaleDateString();
|
||||
}
|
||||
return value;
|
||||
};
|
||||
} else if (columnType === 'datetime') {
|
||||
columnDefinition.formatFunction = function (value) {
|
||||
if (value) {
|
||||
return value.format("DD/MM/YY HH:mm");
|
||||
if (value && moment.isMoment(value)) {
|
||||
return value.toDate().toLocaleString();
|
||||
}
|
||||
return value;
|
||||
};
|
||||
|
||||
@@ -14,7 +14,12 @@ a.page-title {
|
||||
}
|
||||
|
||||
a.navbar-brand {
|
||||
font-style: italic;
|
||||
padding: 5px 5px 0px 0px;
|
||||
margin-left: 0px !important;
|
||||
}
|
||||
|
||||
a.navbar-brand img {
|
||||
height: 40px;
|
||||
}
|
||||
|
||||
.graph {
|
||||
@@ -270,6 +275,35 @@ to add those CSS styles here. */
|
||||
pivot-table-renderer > table, grid-renderer > div, visualization-renderer > div {
|
||||
overflow: auto;
|
||||
}
|
||||
counter-renderer {
|
||||
display: block;
|
||||
text-align: center;
|
||||
}
|
||||
counter-renderer counter {
|
||||
margin: 0 auto;
|
||||
background: #f9f9f9;
|
||||
padding: 15px 50px;
|
||||
display: block;;
|
||||
}
|
||||
counter-renderer value,
|
||||
counter-renderer counter-target {
|
||||
font-size: 80px;
|
||||
display: block;
|
||||
}
|
||||
counter-renderer counter-target {
|
||||
color: #ccc;
|
||||
}
|
||||
counter-renderer counter.positive value {
|
||||
color: #5cb85c;
|
||||
}
|
||||
counter-renderer counter.negative value {
|
||||
color: #d9534f;
|
||||
margin-right: 15px;
|
||||
}
|
||||
counter-renderer counter-name {
|
||||
font-size: 40px;
|
||||
display: block;
|
||||
}
|
||||
|
||||
.rd-widget-textbox p {
|
||||
margin-bottom: 0;
|
||||
@@ -279,6 +313,23 @@ pivot-table-renderer > table, grid-renderer > div, visualization-renderer > div
|
||||
height: 100%;
|
||||
}
|
||||
|
||||
.schema-container {
|
||||
height: 300px;
|
||||
}
|
||||
|
||||
.schema-browser {
|
||||
height: 100%;
|
||||
overflow-y: auto;
|
||||
overflow-x: hidden;
|
||||
}
|
||||
|
||||
div.table-name {
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
white-space: nowrap;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
/*
|
||||
bootstrap's hidden-xs class adds display:block when not hidden
|
||||
use this class when you need to keep the original display value
|
||||
|
||||
@@ -24,10 +24,6 @@
|
||||
<span class="badge">{{manager.outdated_queries_count}}</span>
|
||||
Outdated Queries Count
|
||||
</li>
|
||||
|
||||
<li class="list-group-item" ng-if="flowerUrl">
|
||||
<a href="/admin/workers">Workers' Status</a>
|
||||
</li>
|
||||
</ul>
|
||||
<ul class="list-group col-lg-4">
|
||||
<li class="list-group-item active">Queues</li>
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
<div class="container-fluid iframe-container">
|
||||
<iframe src="{{flowerUrl}}" style="width:100%; height:100%; background-color:transparent;"></iframe>
|
||||
</div>
|
||||
@@ -14,7 +14,7 @@
|
||||
</button>
|
||||
</span>
|
||||
</h2>
|
||||
<filters></filters>
|
||||
<filters ng-if="dashboard.dashboard_filters_enabled"></filters>
|
||||
</div>
|
||||
|
||||
<div class="container" id="dashboard">
|
||||
@@ -28,6 +28,7 @@
|
||||
<p>
|
||||
<span ng-hide="currentUser.hasPermission('view_query')">{{query.name}}</span>
|
||||
<query-link query="query" visualization="widget.visualization" ng-show="currentUser.hasPermission('view_query')"></query-link>
|
||||
<visualization-name visualization="widget.visualization"/>
|
||||
</p>
|
||||
<div class="text-muted" ng-bind-html="query.description | markdown"></div>
|
||||
</h3>
|
||||
@@ -37,7 +38,7 @@
|
||||
|
||||
<div class="panel-footer">
|
||||
<span class="label label-default"
|
||||
tooltip="next update {{nextUpdateTime}} (query runtime: {{queryResult.getRuntime() | durationHumanize}})"
|
||||
tooltip="(query runtime: {{queryResult.getRuntime() | durationHumanize}})"
|
||||
tooltip-placement="bottom">Updated: <span am-time-ago="queryResult.getUpdatedAt()"></span></span>
|
||||
|
||||
<span class="pull-right">
|
||||
|
||||
@@ -22,22 +22,22 @@
|
||||
</div>
|
||||
|
||||
<div ng-show="isVisualization()">
|
||||
<p>
|
||||
<form class="form-inline" role="form" ng-submit="loadVisualizations()">
|
||||
<div class="form-group">
|
||||
<input class="form-control" placeholder="Query Id" ng-model="queryId">
|
||||
</div>
|
||||
<button type="submit" class="btn btn-primary" ng-disabled="!queryId">
|
||||
Load visualizations
|
||||
</button>
|
||||
</form>
|
||||
</p>
|
||||
<div class="form-group">
|
||||
<ui-select ng-model="query.selected" theme="bootstrap" reset-search-input="false">
|
||||
<ui-select-match placeholder="Search a query by name">{{$select.selected.name}}</ui-select-match>
|
||||
<ui-select-choices repeat="q in queries"
|
||||
refresh="searchQueries($select.search)"
|
||||
refresh-delay="0">
|
||||
<div ng-bind-html="q.name | highlight: $select.search | trustAsHtml"></div>
|
||||
</ui-select-choices>
|
||||
</ui-select>
|
||||
</div>
|
||||
|
||||
<div ng-show="query">
|
||||
<div class="form-group">
|
||||
<label for="">Choose Visualization</label>
|
||||
<select ng-model="selectedVis" ng-options="vis as vis.name group by vis.type for vis in query.visualizations" class="form-control"></select>
|
||||
</div>
|
||||
<div ng-show="selected_query">
|
||||
<div class="form-group">
|
||||
<label for="">Choose Visualization</label>
|
||||
<select ng-model="selectedVis" ng-options="vis as vis.name group by vis.type for vis in selected_query.visualizations" class="form-control"></select>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
28
rd_ui/app/views/personal.html
Normal file
28
rd_ui/app/views/personal.html
Normal file
@@ -0,0 +1,28 @@
|
||||
<div class="container">
|
||||
<div class="row">
|
||||
<div class="list-group col-md-6">
|
||||
<div class="list-group-item active">
|
||||
Recent Dashboards
|
||||
<button ng-show="currentUser.hasPermission('create_dashboard')" type="button" class="btn btn-sm btn-link" data-toggle="modal" href="#new_dashboard_dialog" tooltip="New Dashboard"><span class="glyphicon glyphicon-plus-sign"></span></button>
|
||||
</div>
|
||||
<div class="list-group-item" ng-repeat="dashboard in recentDashboards" >
|
||||
<button type="button" class="close delete-button" aria-hidden="true" ng-show="dashboard.canEdit()" ng-click="archiveDashboard(dashboard)" tooltip="Delete Dashboard">×</button>
|
||||
<a ng-href="/dashboard/{{dashboard.slug}}">{{dashboard.name}}</a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="list-group col-md-6">
|
||||
<div class="list-group-item active">
|
||||
Recent Queries
|
||||
</div>
|
||||
<a ng-href="/queries/{{query.id}}" class="list-group-item" ng-repeat="query in recentQueries">{{query.name}}</a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div ng-show="currentUser.hasPermission('admin')" class="row">
|
||||
<div class="list-group">
|
||||
<div class="list-group-item active">Admin</div>
|
||||
<a href="/admin/status" class="list-group-item">Status</a>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -1,6 +1,7 @@
|
||||
|
||||
<div class="container">
|
||||
|
||||
<p class="alert alert-warning" ng-if="query.is_archived">This query is archived and can't be used in dashboards, and won't appear in search results.</p>
|
||||
<alert-unsaved-changes ng-if="canEdit" is-dirty="isDirty"></alert-unsaved-changes>
|
||||
|
||||
<div class="row">
|
||||
@@ -58,9 +59,9 @@
|
||||
|
||||
<hr>
|
||||
|
||||
<div class="row">
|
||||
<div class="col-lg-12">
|
||||
<div ng-show="sourceMode">
|
||||
<div class="row" ng-if="sourceMode">
|
||||
<div ng-class="editorSize">
|
||||
<div>
|
||||
<p>
|
||||
<button type="button" class="btn btn-primary btn-xs" ng-disabled="queryExecuting" ng-click="executeQuery()">
|
||||
<span class="glyphicon glyphicon-play"></span> Execute
|
||||
@@ -76,21 +77,43 @@
|
||||
</button>
|
||||
</span>
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<!-- code editor -->
|
||||
<div ng-show="sourceMode">
|
||||
<p>
|
||||
<query-editor query="query" lock="queryExecuting"></query-editor>
|
||||
<query-editor query="query" schema="schema" syntax="dataSource.syntax" lock="queryFormatting"></query-editor>
|
||||
</p>
|
||||
<hr>
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-md-3 schema-container" ng-show="hasSchema">
|
||||
<div>
|
||||
<input type="text" placeholder="Search schema..." class="form-control" ng-model="schemaFilter">
|
||||
</div>
|
||||
<div class="schema-browser">
|
||||
<div ng-repeat="table in schema | filter:schemaFilter">
|
||||
<div class="table-name" ng-click="table.collapsed = !table.collapsed">
|
||||
<i class="fa fa-table"></i> <strong><span title="{{table.name}}">{{table.name}}</span></strong>
|
||||
</div>
|
||||
<div collapse="table.collapsed">
|
||||
<div ng-repeat="column in table.columns | filter:schemaFilter" style="padding-left:16px;">{{column}}</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
</div>
|
||||
<hr ng-if="sourceMode">
|
||||
<div class="row">
|
||||
<div class="col-lg-3 rd-hidden-xs">
|
||||
<p>
|
||||
<span class="glyphicon glyphicon-user"></span>
|
||||
<span class="text-muted">Created By </span>
|
||||
<strong>{{query.user.name}}</strong>
|
||||
</p>
|
||||
<p ng-if="query.last_modified_by && query.user.id != query.last_modified_by.id">
|
||||
<span class="glyphicon glyphicon-user"></span>
|
||||
<span class="text-muted">Last Modified By </span>
|
||||
<strong>{{query.last_modified_by.name}}</strong>
|
||||
</p>
|
||||
<p>
|
||||
<span class="glyphicon glyphicon-time"></span>
|
||||
<span class="text-muted">Last update </span>
|
||||
@@ -98,12 +121,6 @@
|
||||
<rd-time-ago value="queryResult.query_result.retrieved_at"></rd-time-ago>
|
||||
</strong>
|
||||
</p>
|
||||
<p>
|
||||
<span class="glyphicon glyphicon-user"></span>
|
||||
<span class="text-muted">Created By </span>
|
||||
<strong ng-hide="isQueryOwner">{{query.user.name}}</strong>
|
||||
<strong ng-show="isQueryOwner">You</strong>
|
||||
</p>
|
||||
<p>
|
||||
<span class="glyphicon glyphicon-play"></span>
|
||||
<span class="text-muted">Runtime </span>
|
||||
@@ -116,8 +133,8 @@
|
||||
</p>
|
||||
<p>
|
||||
<span class="glyphicon glyphicon-refresh"></span>
|
||||
<span class="text-muted">Refresh Interval</span>
|
||||
<query-refresh-select></query-refresh-select>
|
||||
<span class="text-muted">Refresh Schedule</span>
|
||||
<a href="" ng-click="openScheduleForm()">{{query.schedule | scheduleHumanize}}</a>
|
||||
</p>
|
||||
|
||||
<p>
|
||||
@@ -133,6 +150,33 @@
|
||||
<span class="glyphicon glyphicon-cloud-download"></span>
|
||||
<span class="rd-hidden-xs">Download Dataset</span>
|
||||
</a>
|
||||
|
||||
<a class="btn btn-warning btn-sm" ng-disabled="queryExecuting" data-toggle="modal" data-target="#archive-confirmation-modal"
|
||||
ng-show="!query.is_archived && query.id != undefined && (isQueryOwner || currentUser.hasPermission('admin'))">
|
||||
<i class="fa fa-archive" title="Archive Query"></i>
|
||||
</a>
|
||||
|
||||
<button class="btn btn-default btn-sm" ng-show="query.id != undefined" ng-click="showApiKey()">
|
||||
<i class="fa fa-key" title="Show API Key"></i>
|
||||
</button>
|
||||
|
||||
<div class="modal fade" id="archive-confirmation-modal" tabindex="-1" role="dialog" aria-labelledby="archiveConfirmationModal" aria-hidden="true">
|
||||
<div class="modal-dialog">
|
||||
<div class="modal-content">
|
||||
<div class="modal-header">
|
||||
<h4 class="modal-title">Query Archive</h4>
|
||||
</div>
|
||||
<div class="modal-body">
|
||||
Are you sure you want to archive this query? <br/>
|
||||
All dashboard widgets created with its visualizations will be deleted.
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-default" data-dismiss="modal">No</button>
|
||||
<button type="button" class="btn btn-primary" ng-click="archiveQuery()">Yes, archive.</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</p>
|
||||
</div>
|
||||
|
||||
@@ -158,7 +202,7 @@
|
||||
<rd-tab tab-id="{{vis.id}}" name="{{vis.name}}" ng-if="vis.type!='TABLE'" ng-repeat="vis in query.visualizations">
|
||||
<span class="remove" ng-click="deleteVisualization($event, vis)" ng-show="canEdit"> ×</span>
|
||||
</rd-tab>
|
||||
<rd-tab tab-id="add" name="+ New" removeable="true" ng-show="canEdit"></rd-tab>
|
||||
<rd-tab tab-id="add" name="+ New Visualization" removeable="true" ng-show="canEdit"></rd-tab>
|
||||
<li ng-if="!sourceMode" class="rd-tab-btn"><button class="btn btn-sm btn-default" ng-click="executeQuery()" ng-disabled="queryExecuting" title="Refresh Dataset"><span class="glyphicon glyphicon-refresh"></span></button></li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
18
rd_ui/app/views/schedule_form.html
Normal file
18
rd_ui/app/views/schedule_form.html
Normal file
@@ -0,0 +1,18 @@
|
||||
<div class="modal-header">
|
||||
<button type="button" class="close" aria-label="Close" ng-click="close()"><span aria-hidden="true">×</span></button>
|
||||
<h4 class="modal-title">Refresh Schedule</h4>
|
||||
</div>
|
||||
<div class="modal-body">
|
||||
<div class="radio">
|
||||
<label>
|
||||
<input type="radio" value="periodic" ng-model="refreshType">
|
||||
<query-refresh-select ng-disabled="refreshType != 'periodic'"></query-refresh-select>
|
||||
</label>
|
||||
</div>
|
||||
<div class="radio">
|
||||
<label>
|
||||
<input type="radio" value="daily" ng-model="refreshType">
|
||||
<query-time-picker ng-disabled="refreshType != 'daily'"></query-time-picker>
|
||||
</label>
|
||||
</div>
|
||||
</div>
|
||||
@@ -27,6 +27,33 @@
|
||||
ng-model="globalSeriesType" class="form-control"></select>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="form-group">
|
||||
<label class="control-label col-sm-2">y Axis min</label>
|
||||
|
||||
<div class="col-sm-10">
|
||||
<input name="yAxisMin" type="number" class="form-control"
|
||||
ng-model="visualization.options.yAxis.min"
|
||||
placeholder="Auto">
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label class="control-label col-sm-2">y Axis max</label>
|
||||
|
||||
<div class="col-sm-10">
|
||||
<input name="yAxisMin" type="number" class="form-control"
|
||||
ng-model="visualization.options.yAxis.max"
|
||||
placeholder="Auto">
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label class="control-label col-sm-2">Sort X Values</label>
|
||||
|
||||
<div class="col-sm-10">
|
||||
<input name="sortX" type="checkbox" class="form-control"
|
||||
ng-model="visualization.options.sortX">
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -91,6 +118,13 @@
|
||||
placeholder="{{seriesName}}">
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label class="control-label col-sm-3">Color</label>
|
||||
|
||||
<div class="col-sm-9">
|
||||
<select class="form-control" ng-model="visualization.options.seriesOptions[seriesName].color" ng-options="val as key for (key,val) in palette"></select>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
5
rd_ui/app/views/visualizations/counter.html
Normal file
5
rd_ui/app/views/visualizations/counter.html
Normal file
@@ -0,0 +1,5 @@
|
||||
<counter ng-class="{'positive': targetValue && trendPositive, 'negative': targetValue && !trendPositive}">
|
||||
<value>{{counterValue|number}}</value>
|
||||
<counter-target ng-if="targetValue">({{targetValue|number}})</counter-target>
|
||||
<counter-name>{{visualization.name}}</counter-name>
|
||||
</counter>
|
||||
20
rd_ui/app/views/visualizations/counter_editor.html
Normal file
20
rd_ui/app/views/visualizations/counter_editor.html
Normal file
@@ -0,0 +1,20 @@
|
||||
<div class="form-horizontal">
|
||||
<div class="form-group">
|
||||
<label class="col-lg-6">Row Number</label>
|
||||
<div class="col-lg-6">
|
||||
<input type="number" ng-model="visualization.options.rowNumber" class="form-control">
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label class="col-lg-6">Counter Column Name</label>
|
||||
<div class="col-lg-6">
|
||||
<select ng-options="name for name in queryResult.columnNames" ng-model="visualization.options.counterColName" class="form-control"></select>
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label class="col-lg-6">Target Column Name</label>
|
||||
<div class="col-lg-6">
|
||||
<select ng-options="name for name in queryResult.columnNames" ng-model="visualization.options.targetColName" class="form-control"></select>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -2,20 +2,19 @@
|
||||
"name": "rdUi",
|
||||
"version": "0.1.0",
|
||||
"dependencies": {
|
||||
"angular": "1.2.7",
|
||||
"angular": "1.2.18",
|
||||
"angular-resource": "1.2.18",
|
||||
"angular-route": "1.2.18",
|
||||
"angular-growl": "0.4.0",
|
||||
"json3": "3.2.4",
|
||||
"jquery": "1.9.1",
|
||||
"bootstrap": "3.0.0",
|
||||
"es5-shim": "2.0.8",
|
||||
"angular-moment": "0.2.0",
|
||||
"moment": "2.1.0",
|
||||
"angular-ui-bootstrap": "0.5.0",
|
||||
"angular-ui-codemirror": "0.0.5",
|
||||
"codemirror": "4.8.0",
|
||||
"highcharts": "3.0.10",
|
||||
"underscore": "1.5.1",
|
||||
"angular-resource": "1.2.15",
|
||||
"angular-growl": "0.3.1",
|
||||
"angular-route": "1.2.7",
|
||||
"pivottable": "~1.1.1",
|
||||
"cornelius": "https://github.com/restorando/cornelius.git",
|
||||
"gridster": "0.2.0",
|
||||
@@ -25,13 +24,18 @@
|
||||
"underscore.string": "~2.3.3",
|
||||
"marked": "~0.3.2",
|
||||
"bucky": "~0.2.6",
|
||||
"pace": "~0.5.1"
|
||||
"pace": "~0.5.1",
|
||||
"angular-ui-select": "0.8.2",
|
||||
"font-awesome": "~4.2.0",
|
||||
"mustache": "~1.0.0",
|
||||
"canvg": "gabelerner/canvg",
|
||||
"angular-ui-bootstrap-bower": "~0.12.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"angular-mocks": "~1.0.7",
|
||||
"angular-scenario": "~1.0.7"
|
||||
"angular-mocks": "1.2.18",
|
||||
"angular-scenario": "1.2.18"
|
||||
},
|
||||
"resolutions": {
|
||||
"angular": "1.2.7"
|
||||
"angular": "1.2.18"
|
||||
}
|
||||
}
|
||||
|
||||
BIN
rd_ui/favicon.ico
Executable file
BIN
rd_ui/favicon.ico
Executable file
Binary file not shown.
|
After Width: | Height: | Size: 1.1 KiB |
@@ -47,6 +47,7 @@ module.exports = function(config) {
|
||||
'app/bower_components/mousetrap/plugins/global-bind/mousetrap-global-bind.js',
|
||||
'app/bower_components/select2/select2.js',
|
||||
'app/bower_components/angular-ui-select2/src/select2.js',
|
||||
'app/bower_components/angular-ui-select/dist/select.js',
|
||||
'app/bower_components/underscore.string/lib/underscore.string.js',
|
||||
'app/bower_components/marked/lib/marked.js',
|
||||
'app/scripts/ng_highchart.js',
|
||||
@@ -54,6 +55,7 @@ module.exports = function(config) {
|
||||
'app/scripts/ui-bootstrap-tpls-0.5.0.min.js',
|
||||
'app/bower_components/bucky/bucky.js',
|
||||
'app/bower_components/pace/pace.js',
|
||||
'app/bower_components/mustache/mustache.js',
|
||||
|
||||
'app/scripts/app.js',
|
||||
'app/scripts/services/services.js',
|
||||
|
||||
@@ -4,8 +4,9 @@ import redis
|
||||
from statsd import StatsClient
|
||||
|
||||
from redash import settings
|
||||
from redash.query_runner import import_query_runners
|
||||
|
||||
__version__ = '0.4.0'
|
||||
__version__ = '0.6.1'
|
||||
|
||||
|
||||
def setup_logging():
|
||||
@@ -14,6 +15,7 @@ def setup_logging():
|
||||
handler.setFormatter(formatter)
|
||||
logging.getLogger().addHandler(handler)
|
||||
logging.getLogger().setLevel(settings.LOG_LEVEL)
|
||||
logging.getLogger("passlib").setLevel("ERROR")
|
||||
|
||||
|
||||
def create_redis_connection():
|
||||
@@ -30,4 +32,6 @@ def create_redis_connection():
|
||||
|
||||
setup_logging()
|
||||
redis_connection = create_redis_connection()
|
||||
statsd_client = StatsClient(host=settings.STATSD_HOST, port=settings.STATSD_PORT, prefix=settings.STATSD_PREFIX)
|
||||
statsd_client = StatsClient(host=settings.STATSD_HOST, port=settings.STATSD_PORT, prefix=settings.STATSD_PREFIX)
|
||||
|
||||
import_query_runners(settings.QUERY_RUNNERS)
|
||||
|
||||
116
redash/admin.py
Normal file
116
redash/admin.py
Normal file
@@ -0,0 +1,116 @@
|
||||
import json
|
||||
from flask_admin.contrib.peewee import ModelView
|
||||
from flask.ext.admin import Admin
|
||||
from flask_admin.contrib.peewee.form import CustomModelConverter
|
||||
from flask_admin.form.widgets import DateTimePickerWidget
|
||||
from playhouse.postgres_ext import ArrayField, DateTimeTZField
|
||||
from wtforms import fields
|
||||
from wtforms.widgets import TextInput
|
||||
|
||||
from redash import models
|
||||
from redash import query_runner
|
||||
from redash.permissions import require_permission
|
||||
|
||||
|
||||
class ArrayListField(fields.Field):
|
||||
widget = TextInput()
|
||||
|
||||
def _value(self):
|
||||
if self.data:
|
||||
return u', '.join(self.data)
|
||||
else:
|
||||
return u''
|
||||
|
||||
def process_formdata(self, valuelist):
|
||||
if valuelist:
|
||||
self.data = [x.strip() for x in valuelist[0].split(',')]
|
||||
else:
|
||||
self.data = []
|
||||
|
||||
|
||||
class JSONTextAreaField(fields.TextAreaField):
|
||||
def process_formdata(self, valuelist):
|
||||
if valuelist:
|
||||
try:
|
||||
json.loads(valuelist[0])
|
||||
except ValueError:
|
||||
raise ValueError(self.gettext(u'Invalid JSON'))
|
||||
self.data = valuelist[0]
|
||||
else:
|
||||
self.data = ''
|
||||
|
||||
class PasswordHashField(fields.PasswordField):
|
||||
def _value(self):
|
||||
return u''
|
||||
|
||||
def process_formdata(self, valuelist):
|
||||
if valuelist:
|
||||
self.data = models.pwd_context.encrypt(valuelist[0])
|
||||
else:
|
||||
self.data = u''
|
||||
|
||||
|
||||
class PgModelConverter(CustomModelConverter):
|
||||
def __init__(self, view, additional=None):
|
||||
additional = {ArrayField: self.handle_array_field,
|
||||
DateTimeTZField: self.handle_datetime_tz_field}
|
||||
super(PgModelConverter, self).__init__(view, additional)
|
||||
self.view = view
|
||||
|
||||
def handle_array_field(self, model, field, **kwargs):
|
||||
return field.name, ArrayListField(**kwargs)
|
||||
|
||||
def handle_datetime_tz_field(self, model, field, **kwargs):
|
||||
kwargs['widget'] = DateTimePickerWidget()
|
||||
return field.name, fields.DateTimeField(**kwargs)
|
||||
|
||||
|
||||
class BaseModelView(ModelView):
|
||||
model_form_converter = PgModelConverter
|
||||
|
||||
@require_permission('admin')
|
||||
def is_accessible(self):
|
||||
return True
|
||||
|
||||
|
||||
class UserModelView(BaseModelView):
|
||||
column_searchable_list = ('name', 'email')
|
||||
form_excluded_columns = ('created_at', 'updated_at')
|
||||
column_exclude_list = ('password_hash',)
|
||||
|
||||
form_overrides = dict(password_hash=PasswordHashField)
|
||||
form_args = {
|
||||
'password_hash': {'label': 'Password'}
|
||||
}
|
||||
|
||||
|
||||
def query_runner_type_formatter(view, context, model, name):
|
||||
qr = query_runner.query_runners.get(model.type, None)
|
||||
if qr:
|
||||
return qr.name()
|
||||
|
||||
return model.type
|
||||
|
||||
|
||||
class DataSourceModelView(BaseModelView):
|
||||
form_overrides = dict(type=fields.SelectField, options=JSONTextAreaField)
|
||||
form_args = dict(type={
|
||||
'choices': [(k, r.name()) for k, r in query_runner.query_runners.iteritems()]
|
||||
})
|
||||
column_formatters = dict(type=query_runner_type_formatter)
|
||||
column_filters = ('type',)
|
||||
|
||||
|
||||
def init_admin(app):
|
||||
admin = Admin(app, name='re:dash admin')
|
||||
|
||||
views = {
|
||||
models.User: UserModelView(models.User),
|
||||
models.DataSource: DataSourceModelView(models.DataSource)
|
||||
}
|
||||
|
||||
for m in models.all_models:
|
||||
if m in views:
|
||||
admin.add_view(views[m])
|
||||
else:
|
||||
admin.add_view(BaseModelView(m))
|
||||
@@ -5,7 +5,7 @@ import time
|
||||
import logging
|
||||
|
||||
from flask import request, make_response, redirect, url_for
|
||||
from flask.ext.login import LoginManager, login_user, current_user
|
||||
from flask.ext.login import LoginManager, login_user, current_user, logout_user
|
||||
|
||||
from redash import models, settings, google_oauth
|
||||
|
||||
@@ -23,9 +23,38 @@ def sign(key, path, expires):
|
||||
return h.hexdigest()
|
||||
|
||||
|
||||
class HMACAuthentication(object):
|
||||
@staticmethod
|
||||
def api_key_authentication():
|
||||
class Authentication(object):
|
||||
def verify_authentication(self):
|
||||
return False
|
||||
|
||||
def required(self, fn):
|
||||
@functools.wraps(fn)
|
||||
def decorated(*args, **kwargs):
|
||||
if current_user.is_authenticated() or self.verify_authentication():
|
||||
return fn(*args, **kwargs)
|
||||
|
||||
return make_response(redirect(url_for("login", next=request.url)))
|
||||
|
||||
return decorated
|
||||
|
||||
|
||||
class ApiKeyAuthentication(Authentication):
|
||||
def verify_authentication(self):
|
||||
api_key = request.args.get('api_key')
|
||||
query_id = request.view_args.get('query_id', None)
|
||||
|
||||
if query_id and api_key:
|
||||
query = models.Query.get(models.Query.id == query_id)
|
||||
|
||||
if query.api_key and api_key == query.api_key:
|
||||
login_user(models.ApiUser(query.api_key), remember=False)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
class HMACAuthentication(Authentication):
|
||||
def verify_authentication(self):
|
||||
signature = request.args.get('signature')
|
||||
expires = float(request.args.get('expires') or 0)
|
||||
query_id = request.view_args.get('query_id', None)
|
||||
@@ -41,22 +70,14 @@ class HMACAuthentication(object):
|
||||
|
||||
return False
|
||||
|
||||
def required(self, fn):
|
||||
@functools.wraps(fn)
|
||||
def decorated(*args, **kwargs):
|
||||
if current_user.is_authenticated():
|
||||
return fn(*args, **kwargs)
|
||||
|
||||
if self.api_key_authentication():
|
||||
return fn(*args, **kwargs)
|
||||
|
||||
return make_response(redirect(url_for("login", next=request.url)))
|
||||
|
||||
return decorated
|
||||
|
||||
|
||||
@login_manager.user_loader
|
||||
def load_user(user_id):
|
||||
# If the user was previously logged in as api user, the user_id will be the api key and will raise an exception as
|
||||
# it can't be casted to int.
|
||||
if isinstance(user_id, basestring) and not user_id.isdigit():
|
||||
return None
|
||||
|
||||
return models.User.select().where(models.User.id == user_id).first()
|
||||
|
||||
|
||||
@@ -66,4 +87,13 @@ def setup_authentication(app):
|
||||
app.secret_key = settings.COOKIE_SECRET
|
||||
app.register_blueprint(google_oauth.blueprint)
|
||||
|
||||
return HMACAuthentication()
|
||||
if settings.AUTH_TYPE == 'hmac':
|
||||
auth = HMACAuthentication()
|
||||
elif settings.AUTH_TYPE == 'api_key':
|
||||
auth = ApiKeyAuthentication()
|
||||
else:
|
||||
logger.warning("Unknown authentication type ({}). Using default (HMAC).".format(settings.AUTH_TYPE))
|
||||
auth = HMACAuthentication()
|
||||
|
||||
return auth
|
||||
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
from flask import make_response
|
||||
from functools import update_wrapper
|
||||
|
||||
ONE_YEAR = 60 * 60 * 24 * 365.25
|
||||
|
||||
headers = {
|
||||
|
||||
129
redash/cli/data_sources.py
Normal file
129
redash/cli/data_sources.py
Normal file
@@ -0,0 +1,129 @@
|
||||
import json
|
||||
import click
|
||||
from flask.ext.script import Manager
|
||||
from redash import models
|
||||
from redash.query_runner import query_runners, validate_configuration
|
||||
|
||||
manager = Manager(help="Data sources management commands.")
|
||||
|
||||
@manager.command
|
||||
def list():
|
||||
"""List currently configured data sources"""
|
||||
for i, ds in enumerate(models.DataSource.select()):
|
||||
if i > 0:
|
||||
print "-"*20
|
||||
|
||||
print "Id: {}\nName: {}\nType: {}\nOptions: {}".format(ds.id, ds.name, ds.type, ds.options)
|
||||
|
||||
|
||||
def validate_data_source_type(type):
|
||||
if type not in query_runners.keys():
|
||||
print "Error: the type \"{}\" is not supported (supported types: {}).".format(type, ", ".join(query_runners.keys()))
|
||||
exit()
|
||||
|
||||
|
||||
def validate_data_source_options(type, options):
|
||||
if not validate_configuration(type, options):
|
||||
print "Error: invalid configuration."
|
||||
exit()
|
||||
|
||||
@manager.command
|
||||
def new(name=None, type=None, options=None):
|
||||
"""Create new data source"""
|
||||
if name is None:
|
||||
name = click.prompt("Name")
|
||||
|
||||
if type is None:
|
||||
print "Select type:"
|
||||
for i, query_runner_name in enumerate(query_runners.keys()):
|
||||
print "{}. {}".format(i+1, query_runner_name)
|
||||
|
||||
idx = 0
|
||||
while idx < 1 or idx > len(query_runners.keys()):
|
||||
idx = click.prompt("[{}-{}]".format(1, len(query_runners.keys())), type=int)
|
||||
|
||||
type = query_runners.keys()[idx-1]
|
||||
else:
|
||||
validate_data_source_type(type)
|
||||
|
||||
if options is None:
|
||||
query_runner = query_runners[type]
|
||||
schema = query_runner.configuration_schema()
|
||||
|
||||
types = {
|
||||
'string': unicode,
|
||||
'number': int,
|
||||
'boolean': bool
|
||||
}
|
||||
|
||||
options_obj = {}
|
||||
|
||||
for k, prop in schema['properties'].iteritems():
|
||||
required = k in schema.get('required', [])
|
||||
default_value = "<<DEFAULT_VALUE>>"
|
||||
if required:
|
||||
default_value = None
|
||||
|
||||
prompt = prop.get('title', k.capitalize())
|
||||
if required:
|
||||
prompt = "{} (required)".format(prompt)
|
||||
else:
|
||||
prompt = "{} (optional)".format(prompt)
|
||||
|
||||
value = click.prompt(prompt, default=default_value, type=types[prop['type']], show_default=False)
|
||||
if value != default_value:
|
||||
options_obj[k] = value
|
||||
|
||||
options = json.dumps(options_obj)
|
||||
|
||||
validate_data_source_options(type, options)
|
||||
|
||||
print "Creating {} data source ({}) with options:\n{}".format(type, name, options)
|
||||
|
||||
data_source = models.DataSource.create(name=name,
|
||||
type=type,
|
||||
options=options)
|
||||
print "Id: {}".format(data_source.id)
|
||||
|
||||
|
||||
@manager.command
|
||||
def delete(name):
|
||||
"""Deletes data source by name"""
|
||||
try:
|
||||
data_source = models.DataSource.get(models.DataSource.name==name)
|
||||
print "Deleting data source: {} (id={})".format(name, data_source.id)
|
||||
data_source.delete_instance()
|
||||
except models.DataSource.DoesNotExist:
|
||||
print "Couldn't find data source named: {}".format(name)
|
||||
|
||||
|
||||
def update_attr(obj, attr, new_value):
|
||||
if new_value is not None:
|
||||
old_value = getattr(obj, attr)
|
||||
print "Updating {}: {} -> {}".format(attr, old_value, new_value)
|
||||
setattr(obj, attr, new_value)
|
||||
|
||||
|
||||
@manager.option('name', default=None, help="name of data source to edit")
|
||||
@manager.option('--name', dest='new_name', default=None, help="new name for the data source")
|
||||
@manager.option('--options', dest='options', default=None, help="updated options for the data source")
|
||||
@manager.option('--type', dest='type', default=None, help="new type for the data source")
|
||||
def edit(name, new_name=None, options=None, type=None):
|
||||
"""Edit data source settings (name, options, type)"""
|
||||
try:
|
||||
if type is not None:
|
||||
validate_data_source_type(type)
|
||||
|
||||
data_source = models.DataSource.get(models.DataSource.name==name)
|
||||
|
||||
if options is not None:
|
||||
validate_data_source_options(data_source.type, options)
|
||||
|
||||
update_attr(data_source, "name", new_name)
|
||||
update_attr(data_source, "type", type)
|
||||
update_attr(data_source, "options", options)
|
||||
data_source.save()
|
||||
|
||||
except models.DataSource.DoesNotExist:
|
||||
print "Couldn't find data source named: {}".format(name)
|
||||
|
||||
19
redash/cli/database.py
Normal file
19
redash/cli/database.py
Normal file
@@ -0,0 +1,19 @@
|
||||
from flask.ext.script import Manager
|
||||
|
||||
manager = Manager(help="Manages the database (create/drop tables).")
|
||||
|
||||
@manager.command
|
||||
def create_tables():
|
||||
"""Creates the database tables."""
|
||||
from redash.models import create_db, init_db
|
||||
|
||||
create_db(True, False)
|
||||
init_db()
|
||||
|
||||
@manager.command
|
||||
def drop_tables():
|
||||
"""Drop the database tables."""
|
||||
from redash.models import create_db
|
||||
|
||||
create_db(False, True)
|
||||
|
||||
74
redash/cli/users.py
Normal file
74
redash/cli/users.py
Normal file
@@ -0,0 +1,74 @@
|
||||
from flask.ext.script import Manager, prompt_pass
|
||||
from redash import models
|
||||
|
||||
manager = Manager(help="Users management commands.")
|
||||
|
||||
@manager.option('email', help="email address of the user to grant admin to")
|
||||
def grant_admin(email):
|
||||
try:
|
||||
user = models.User.get_by_email(email)
|
||||
|
||||
user.groups.append('admin')
|
||||
user.save()
|
||||
|
||||
print "User updated."
|
||||
except models.User.DoesNotExist:
|
||||
print "User [%s] not found." % email
|
||||
|
||||
|
||||
@manager.option('email', help="User's email")
|
||||
@manager.option('name', help="User's full name")
|
||||
@manager.option('--admin', dest='is_admin', action="store_true", default=False, help="set user as admin")
|
||||
@manager.option('--google', dest='google_auth', action="store_true", default=False, help="user uses Google Auth to login")
|
||||
@manager.option('--password', dest='password', default=None, help="Password for users who don't use Google Auth (leave blank for prompt).")
|
||||
@manager.option('--groups', dest='groups', default=models.User.DEFAULT_GROUPS, help="Comma seperated list of groups (leave blank for default).")
|
||||
def create(email, name, groups, is_admin=False, google_auth=False, password=None):
|
||||
print "Creating user (%s, %s)..." % (email, name)
|
||||
print "Admin: %r" % is_admin
|
||||
print "Login with Google Auth: %r\n" % google_auth
|
||||
if isinstance(groups, basestring):
|
||||
groups= groups.split(',')
|
||||
groups.remove('') # in case it was empty string
|
||||
|
||||
if is_admin:
|
||||
groups += ['admin']
|
||||
|
||||
user = models.User(email=email, name=name, groups=groups)
|
||||
if not google_auth:
|
||||
password = password or prompt_pass("Password")
|
||||
user.hash_password(password)
|
||||
|
||||
try:
|
||||
user.save()
|
||||
except Exception, e:
|
||||
print "Failed creating user: %s" % e.message
|
||||
|
||||
|
||||
@manager.option('email', help="email address of user to delete")
|
||||
def delete(email):
|
||||
deleted_count = models.User.delete().where(models.User.email == email).execute()
|
||||
print "Deleted %d users." % deleted_count
|
||||
|
||||
|
||||
@manager.option('password', help="new password for the user")
|
||||
@manager.option('email', help="email address of the user to change password for")
|
||||
def password(email, password):
|
||||
try:
|
||||
user = models.User.get_by_email(email)
|
||||
|
||||
user.hash_password(password)
|
||||
user.save()
|
||||
|
||||
print "User updated."
|
||||
except models.User.DoesNotExist:
|
||||
print "User [%s] not found." % email
|
||||
|
||||
|
||||
@manager.command
|
||||
def list():
|
||||
"""List all users"""
|
||||
for i, user in enumerate(models.User.select()):
|
||||
if i > 0:
|
||||
print "-"*20
|
||||
|
||||
print "Id: {}\nName: {}\nEmail: {}".format(user.id, user.name.encode('utf-8'), user.email)
|
||||
@@ -7,9 +7,8 @@ but this is only due to configuration issues and temporary.
|
||||
import csv
|
||||
import hashlib
|
||||
import json
|
||||
import numbers
|
||||
import cStringIO
|
||||
import datetime
|
||||
import time
|
||||
import logging
|
||||
|
||||
from flask import render_template, send_from_directory, make_response, request, jsonify, redirect, \
|
||||
@@ -23,6 +22,7 @@ from redash.wsgi import app, auth, api
|
||||
from redash.tasks import QueryTask, record_event
|
||||
from redash.cache import headers as cache_headers
|
||||
from redash.permissions import require_permission
|
||||
from redash.query_runner import query_runners, validate_configuration
|
||||
|
||||
|
||||
@app.route('/ping', methods=['GET'])
|
||||
@@ -35,6 +35,7 @@ def ping():
|
||||
@app.route('/queries')
|
||||
@app.route('/queries/<query_id>')
|
||||
@app.route('/queries/<query_id>/<anything>')
|
||||
@app.route('/personal')
|
||||
@app.route('/')
|
||||
@auth.required
|
||||
def index(**kwargs):
|
||||
@@ -51,8 +52,7 @@ def index(**kwargs):
|
||||
}
|
||||
|
||||
features = {
|
||||
'clientSideMetrics': settings.CLIENT_SIDE_METRICS,
|
||||
'flowerUrl': settings.CELERY_FLOWER_URL
|
||||
'clientSideMetrics': settings.CLIENT_SIDE_METRICS
|
||||
}
|
||||
|
||||
return render_template("index.html", user=json.dumps(user), name=settings.NAME,
|
||||
@@ -100,6 +100,7 @@ def status_api():
|
||||
status['version'] = __version__
|
||||
status['queries_count'] = models.Query.select().count()
|
||||
status['query_results_count'] = models.QueryResult.select().count()
|
||||
status['unused_query_results_count'] = models.QueryResult.unused().count()
|
||||
status['dashboards_count'] = models.Dashboard.select().count()
|
||||
status['widgets_count'] = models.Widget.select().count()
|
||||
|
||||
@@ -107,7 +108,7 @@ def status_api():
|
||||
|
||||
manager_status = redis_connection.hgetall('redash:status')
|
||||
status['manager'] = manager_status
|
||||
status['manager']['outdated_queries_count'] = models.Query.outdated_queries().count()
|
||||
status['manager']['outdated_queries_count'] = len(models.Query.outdated_queries())
|
||||
|
||||
queues = {}
|
||||
for ds in models.DataSource.select():
|
||||
@@ -134,6 +135,24 @@ def format_sql_query():
|
||||
return sqlparse.format(query, reindent=True, keyword_case='upper')
|
||||
|
||||
|
||||
@app.route('/queries/new', methods=['POST'])
|
||||
@auth.required
|
||||
def create_query_route():
|
||||
query = request.form.get('query', None)
|
||||
data_source_id = request.form.get('data_source_id', None)
|
||||
|
||||
if query is None or data_source_id is None:
|
||||
abort(400)
|
||||
|
||||
query = models.Query.create(name="New Query",
|
||||
query=query,
|
||||
data_source=data_source_id,
|
||||
user=current_user._get_current_object(),
|
||||
schedule=None)
|
||||
|
||||
return redirect('/queries/{}'.format(query.id), 303)
|
||||
|
||||
|
||||
class BaseResource(Resource):
|
||||
decorators = [auth.required]
|
||||
|
||||
@@ -172,14 +191,51 @@ class MetricsAPI(BaseResource):
|
||||
api.add_resource(MetricsAPI, '/api/metrics/v1/send', endpoint='metrics')
|
||||
|
||||
|
||||
class DataSourceTypeListAPI(BaseResource):
|
||||
@require_permission("admin")
|
||||
def get(self):
|
||||
return [q.to_dict() for q in query_runners.values()]
|
||||
|
||||
api.add_resource(DataSourceTypeListAPI, '/api/data_sources/types', endpoint='data_source_types')
|
||||
|
||||
|
||||
class DataSourceListAPI(BaseResource):
|
||||
def get(self):
|
||||
data_sources = [ds.to_dict() for ds in models.DataSource.all()]
|
||||
return data_sources
|
||||
|
||||
@require_permission("admin")
|
||||
def post(self):
|
||||
req = request.get_json(True)
|
||||
required_fields = ('options', 'name', 'type')
|
||||
for f in required_fields:
|
||||
if f not in req:
|
||||
abort(400)
|
||||
|
||||
if not validate_configuration(req['type'], req['options']):
|
||||
abort(400)
|
||||
|
||||
datasource = models.DataSource.create(name=req['name'], type=req['type'], options=req['options'])
|
||||
|
||||
return datasource.to_dict()
|
||||
|
||||
api.add_resource(DataSourceListAPI, '/api/data_sources', endpoint='data_sources')
|
||||
|
||||
|
||||
class DataSourceSchemaAPI(BaseResource):
|
||||
def get(self, data_source_id):
|
||||
data_source = models.DataSource.get_by_id(data_source_id)
|
||||
schema = data_source.get_schema()
|
||||
|
||||
return schema
|
||||
|
||||
api.add_resource(DataSourceSchemaAPI, '/api/data_sources/<data_source_id>/schema')
|
||||
|
||||
class DashboardRecentAPI(BaseResource):
|
||||
def get(self):
|
||||
return [d.to_dict() for d in models.Dashboard.recent(current_user.id).limit(20)]
|
||||
|
||||
|
||||
class DashboardListAPI(BaseResource):
|
||||
def get(self):
|
||||
dashboards = [d.to_dict() for d in
|
||||
@@ -224,6 +280,7 @@ class DashboardAPI(BaseResource):
|
||||
dashboard.save()
|
||||
|
||||
api.add_resource(DashboardListAPI, '/api/dashboards', endpoint='dashboards')
|
||||
api.add_resource(DashboardRecentAPI, '/api/dashboards/recent', endpoint='recent_dashboards')
|
||||
api.add_resource(DashboardAPI, '/api/dashboards/<dashboard_slug>', endpoint='dashboard')
|
||||
|
||||
|
||||
@@ -263,13 +320,6 @@ class WidgetAPI(BaseResource):
|
||||
@require_permission('edit_dashboard')
|
||||
def delete(self, widget_id):
|
||||
widget = models.Widget.get(models.Widget.id == widget_id)
|
||||
# TODO: reposition existing ones
|
||||
layout = json.loads(widget.dashboard.layout)
|
||||
layout = map(lambda row: filter(lambda w: w != widget_id, row), layout)
|
||||
layout = filter(lambda row: len(row) > 0, layout)
|
||||
widget.dashboard.layout = json.dumps(layout)
|
||||
widget.dashboard.save()
|
||||
|
||||
widget.delete_instance()
|
||||
|
||||
api.add_resource(WidgetListAPI, '/api/widgets', endpoint='widgets')
|
||||
@@ -284,11 +334,17 @@ class QuerySearchAPI(BaseResource):
|
||||
return [q.to_dict() for q in models.Query.search(term)]
|
||||
|
||||
|
||||
class QueryRecentAPI(BaseResource):
|
||||
@require_permission('view_query')
|
||||
def get(self):
|
||||
return [q.to_dict() for q in models.Query.recent(current_user.id).limit(20)]
|
||||
|
||||
|
||||
class QueryListAPI(BaseResource):
|
||||
@require_permission('create_query')
|
||||
def post(self):
|
||||
query_def = request.get_json(force=True)
|
||||
for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data']:
|
||||
for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'last_modified_by']:
|
||||
query_def.pop(field, None)
|
||||
|
||||
query_def['user'] = self.current_user
|
||||
@@ -296,8 +352,6 @@ class QueryListAPI(BaseResource):
|
||||
query = models.Query(**query_def)
|
||||
query.save()
|
||||
|
||||
query.create_default_visualizations()
|
||||
|
||||
return query.to_dict()
|
||||
|
||||
@require_permission('view_query')
|
||||
@@ -308,8 +362,10 @@ class QueryListAPI(BaseResource):
|
||||
class QueryAPI(BaseResource):
|
||||
@require_permission('edit_query')
|
||||
def post(self, query_id):
|
||||
query = models.Query.get_by_id(query_id)
|
||||
|
||||
query_def = request.get_json(force=True)
|
||||
for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'user']:
|
||||
for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'user', 'last_modified_by']:
|
||||
query_def.pop(field, None)
|
||||
|
||||
if 'latest_query_data_id' in query_def:
|
||||
@@ -318,6 +374,9 @@ class QueryAPI(BaseResource):
|
||||
if 'data_source_id' in query_def:
|
||||
query_def['data_source'] = query_def.pop('data_source_id')
|
||||
|
||||
query_def['last_modified_by'] = self.current_user
|
||||
|
||||
# TODO: use #save() with #dirty_fields.
|
||||
models.Query.update_instance(query_id, **query_def)
|
||||
|
||||
query = models.Query.get_by_id(query_id)
|
||||
@@ -332,7 +391,20 @@ class QueryAPI(BaseResource):
|
||||
else:
|
||||
abort(404, message="Query not found.")
|
||||
|
||||
# TODO: move to resource of its own? (POST /queries/{id}/archive)
|
||||
def delete(self, query_id):
|
||||
q = models.Query.get(models.Query.id == query_id)
|
||||
|
||||
if q:
|
||||
if q.user.id == self.current_user.id or self.current_user.has_permission('admin'):
|
||||
q.archive()
|
||||
else:
|
||||
abort(403)
|
||||
else:
|
||||
abort(404, message="Query not found.")
|
||||
|
||||
api.add_resource(QuerySearchAPI, '/api/queries/search', endpoint='queries_search')
|
||||
api.add_resource(QueryRecentAPI, '/api/queries/recent', endpoint='recent_queries')
|
||||
api.add_resource(QueryListAPI, '/api/queries', endpoint='queries')
|
||||
api.add_resource(QueryAPI, '/api/queries/<query_id>', endpoint='query')
|
||||
|
||||
@@ -343,7 +415,7 @@ class VisualizationListAPI(BaseResource):
|
||||
kwargs = request.get_json(force=True)
|
||||
kwargs['options'] = json.dumps(kwargs['options'])
|
||||
kwargs['query'] = kwargs.pop('query_id')
|
||||
|
||||
|
||||
vis = models.Visualization(**kwargs)
|
||||
vis.save()
|
||||
|
||||
@@ -404,16 +476,19 @@ class QueryResultListAPI(BaseResource):
|
||||
activity=params['query']
|
||||
).save()
|
||||
|
||||
if params['ttl'] == 0:
|
||||
max_age = int(params['max_age'])
|
||||
|
||||
if max_age == 0:
|
||||
query_result = None
|
||||
else:
|
||||
query_result = models.QueryResult.get_latest(params['data_source_id'], params['query'], int(params['ttl']))
|
||||
query_result = models.QueryResult.get_latest(params['data_source_id'], params['query'], max_age)
|
||||
|
||||
if query_result:
|
||||
return {'query_result': query_result.to_dict()}
|
||||
else:
|
||||
data_source = models.DataSource.get_by_id(params['data_source_id'])
|
||||
job = QueryTask.add_task(params['query'], data_source)
|
||||
query_id = params.get('query_id', 'adhoc')
|
||||
job = QueryTask.add_task(params['query'], data_source, metadata={"Username": self.current_user.name, "Query ID": query_id})
|
||||
return {'job': job.to_dict()}
|
||||
|
||||
|
||||
@@ -427,10 +502,6 @@ class QueryResultAPI(BaseResource):
|
||||
writer.writer = utils.UnicodeWriter(s)
|
||||
writer.writeheader()
|
||||
for row in query_data['rows']:
|
||||
for k, v in row.iteritems():
|
||||
if isinstance(v, numbers.Number) and (v > 1000 * 1000 * 1000 * 100):
|
||||
row[k] = datetime.datetime.fromtimestamp(v/1000.0)
|
||||
|
||||
writer.writerow(row)
|
||||
|
||||
headers = {'Content-Type': "text/csv; charset=UTF-8"}
|
||||
@@ -448,6 +519,24 @@ class QueryResultAPI(BaseResource):
|
||||
query_result = models.QueryResult.get_by_id(query_result_id)
|
||||
|
||||
if query_result:
|
||||
if isinstance(self.current_user, models.ApiUser):
|
||||
event = {
|
||||
'user_id': None,
|
||||
'action': 'api_get',
|
||||
'timestamp': int(time.time()),
|
||||
'api_key': self.current_user.id,
|
||||
'file_type': filetype
|
||||
}
|
||||
|
||||
if query_id:
|
||||
event['object_type'] = 'query'
|
||||
event['object_id'] = query_id
|
||||
else:
|
||||
event['object_type'] = 'query_result'
|
||||
event['object_id'] = query_result_id
|
||||
|
||||
record_event.delay(event)
|
||||
|
||||
if filetype == 'json':
|
||||
data = json.dumps({'query_result': query_result.to_dict()}, cls=utils.JSONEncoder)
|
||||
return make_response(data, 200, cache_headers)
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
import json
|
||||
|
||||
|
||||
def get_query_runner(connection_type, connection_string):
|
||||
if connection_type == 'mysql':
|
||||
from redash.data import query_runner_mysql
|
||||
runner = query_runner_mysql.mysql(connection_string)
|
||||
elif connection_type == 'graphite':
|
||||
from redash.data import query_runner_graphite
|
||||
connection_params = json.loads(connection_string)
|
||||
if connection_params['auth']:
|
||||
connection_params['auth'] = tuple(connection_params['auth'])
|
||||
else:
|
||||
connection_params['auth'] = None
|
||||
runner = query_runner_graphite.graphite(connection_params)
|
||||
elif connection_type == 'bigquery':
|
||||
from redash.data import query_runner_bigquery
|
||||
connection_params = json.loads(connection_string)
|
||||
runner = query_runner_bigquery.bigquery(connection_params)
|
||||
elif connection_type == 'script':
|
||||
from redash.data import query_runner_script
|
||||
runner = query_runner_script.script(connection_string)
|
||||
elif connection_type == 'url':
|
||||
from redash.data import query_runner_url
|
||||
runner = query_runner_url.url(connection_string)
|
||||
elif connection_type == "mongo":
|
||||
from redash.data import query_runner_mongodb
|
||||
runner = query_runner_mongodb.mongodb(connection_string)
|
||||
else:
|
||||
from redash.data import query_runner_pg
|
||||
runner = query_runner_pg.pg(connection_string)
|
||||
|
||||
return runner
|
||||
@@ -1,138 +0,0 @@
|
||||
import datetime
|
||||
import httplib2
|
||||
import json
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
|
||||
try:
|
||||
import apiclient.errors
|
||||
from apiclient.discovery import build
|
||||
from apiclient.errors import HttpError
|
||||
from oauth2client.client import SignedJwtAssertionCredentials
|
||||
except ImportError:
|
||||
print "Missing dependencies. Please install google-api-python-client and oauth2client."
|
||||
print "You can use pip: pip install google-api-python-client oauth2client"
|
||||
|
||||
from redash.utils import JSONEncoder
|
||||
|
||||
types_map = {
|
||||
'INTEGER': 'integer',
|
||||
'FLOAT': 'float',
|
||||
'BOOLEAN': 'boolean',
|
||||
'STRING': 'string',
|
||||
'TIMESTAMP': 'datetime',
|
||||
}
|
||||
|
||||
def transform_row(row, fields):
|
||||
column_index = 0
|
||||
row_data = {}
|
||||
|
||||
for cell in row["f"]:
|
||||
field = fields[column_index]
|
||||
cell_value = cell['v']
|
||||
|
||||
if cell_value is None:
|
||||
pass
|
||||
# Otherwise just cast the value
|
||||
elif field['type'] == 'INTEGER':
|
||||
cell_value = int(cell_value)
|
||||
elif field['type'] == 'FLOAT':
|
||||
cell_value = float(cell_value)
|
||||
elif field['type'] == 'BOOLEAN':
|
||||
cell_value = cell_value.lower() == "true"
|
||||
elif field['type'] == 'TIMESTAMP':
|
||||
cell_value = datetime.datetime.fromtimestamp(float(cell_value))
|
||||
|
||||
row_data[field["name"]] = cell_value
|
||||
column_index += 1
|
||||
|
||||
return row_data
|
||||
|
||||
def bigquery(connection_string):
|
||||
def load_key(filename):
|
||||
f = file(filename, "rb")
|
||||
try:
|
||||
return f.read()
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
def get_bigquery_service():
|
||||
scope = [
|
||||
"https://www.googleapis.com/auth/bigquery",
|
||||
]
|
||||
|
||||
credentials = SignedJwtAssertionCredentials(connection_string["serviceAccount"],
|
||||
load_key(connection_string["privateKey"]), scope=scope)
|
||||
http = httplib2.Http()
|
||||
http = credentials.authorize(http)
|
||||
|
||||
return build("bigquery", "v2", http=http)
|
||||
|
||||
def get_query_results(jobs, project_id, job_id, start_index):
|
||||
query_reply = jobs.getQueryResults(projectId=project_id, jobId=job_id, startIndex=start_index).execute()
|
||||
logging.debug('query_reply %s', query_reply)
|
||||
if not query_reply['jobComplete']:
|
||||
time.sleep(10)
|
||||
return get_query_results(jobs, project_id, job_id, start_index)
|
||||
|
||||
return query_reply
|
||||
|
||||
def query_runner(query):
|
||||
bigquery_service = get_bigquery_service()
|
||||
|
||||
jobs = bigquery_service.jobs()
|
||||
job_data = {
|
||||
"configuration": {
|
||||
"query": {
|
||||
"query": query,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logging.debug("bigquery got query: %s", query)
|
||||
|
||||
project_id = connection_string["projectId"]
|
||||
|
||||
try:
|
||||
insert_response = jobs.insert(projectId=project_id, body=job_data).execute()
|
||||
current_row = 0
|
||||
query_reply = get_query_results(jobs, project_id=project_id,
|
||||
job_id=insert_response['jobReference']['jobId'], start_index=current_row)
|
||||
|
||||
logging.debug("bigquery replied: %s", query_reply)
|
||||
|
||||
rows = []
|
||||
|
||||
while ("rows" in query_reply) and current_row < query_reply['totalRows']:
|
||||
for row in query_reply["rows"]:
|
||||
rows.append(transform_row(row, query_reply["schema"]["fields"]))
|
||||
|
||||
current_row += len(query_reply['rows'])
|
||||
query_reply = jobs.getQueryResults(projectId=project_id, jobId=query_reply['jobReference']['jobId'],
|
||||
startIndex=current_row).execute()
|
||||
|
||||
columns = [{'name': f["name"],
|
||||
'friendly_name': f["name"],
|
||||
'type': types_map.get(f['type'], "string")} for f in query_reply["schema"]["fields"]]
|
||||
|
||||
data = {
|
||||
"columns": columns,
|
||||
"rows": rows
|
||||
}
|
||||
error = None
|
||||
|
||||
json_data = json.dumps(data, cls=JSONEncoder)
|
||||
except apiclient.errors.HttpError, e:
|
||||
json_data = None
|
||||
error = e.content
|
||||
except KeyboardInterrupt:
|
||||
error = "Query cancelled by user."
|
||||
json_data = None
|
||||
except Exception:
|
||||
raise sys.exc_info()[1], None, sys.exc_info()[2]
|
||||
|
||||
return json_data, error
|
||||
|
||||
|
||||
return query_runner
|
||||
@@ -1,46 +0,0 @@
|
||||
"""
|
||||
QueryRunner for Graphite.
|
||||
"""
|
||||
import json
|
||||
import datetime
|
||||
import requests
|
||||
from redash.utils import JSONEncoder
|
||||
|
||||
|
||||
def graphite(connection_params):
|
||||
def transform_result(response):
|
||||
columns = [{'name': 'Time::x'}, {'name': 'value::y'}, {'name': 'name::series'}]
|
||||
rows = []
|
||||
|
||||
for series in response.json():
|
||||
for values in series['datapoints']:
|
||||
timestamp = datetime.datetime.fromtimestamp(int(values[1]))
|
||||
rows.append({'Time::x': timestamp, 'name::series': series['target'], 'value::y': values[0]})
|
||||
|
||||
data = {'columns': columns, 'rows': rows}
|
||||
return json.dumps(data, cls=JSONEncoder)
|
||||
|
||||
def query_runner(query):
|
||||
base_url = "%s/render?format=json&" % connection_params['url']
|
||||
url = "%s%s" % (base_url, "&".join(query.split("\n")))
|
||||
error = None
|
||||
data = None
|
||||
|
||||
try:
|
||||
response = requests.get(url, auth=connection_params['auth'],
|
||||
verify=connection_params['verify'])
|
||||
|
||||
if response.status_code == 200:
|
||||
data = transform_result(response)
|
||||
else:
|
||||
error = "Failed getting results (%d)" % response.status_code
|
||||
|
||||
except Exception, ex:
|
||||
data = None
|
||||
error = ex.message
|
||||
|
||||
return data, error
|
||||
|
||||
query_runner.annotate_query = False
|
||||
|
||||
return query_runner
|
||||
@@ -1,135 +0,0 @@
|
||||
import datetime
|
||||
import logging
|
||||
import json
|
||||
import sys
|
||||
import re
|
||||
import time
|
||||
from redash.utils import JSONEncoder
|
||||
|
||||
try:
|
||||
import pymongo
|
||||
from bson.objectid import ObjectId
|
||||
except ImportError:
|
||||
print "Missing dependencies. Please install pymongo."
|
||||
print "You can use pip: pip install pymongo"
|
||||
|
||||
TYPES_MAP = {
|
||||
ObjectId : "string",
|
||||
str : "string",
|
||||
unicode : "string",
|
||||
int : "integer",
|
||||
long : "integer",
|
||||
float : "float",
|
||||
bool : "boolean",
|
||||
datetime.datetime: "datetime",
|
||||
}
|
||||
|
||||
date_regex = re.compile("ISODate\(\"(.*)\"\)", re.IGNORECASE)
|
||||
|
||||
def mongodb(connection_string):
|
||||
def _get_column_by_name(columns, column_name):
|
||||
for c in columns:
|
||||
if "name" in c and c["name"] == column_name:
|
||||
return c
|
||||
|
||||
return None
|
||||
|
||||
def _convert_date(q, field_name):
|
||||
m = date_regex.findall(q[field_name])
|
||||
if len(m) > 0:
|
||||
if q[field_name].find(":") == -1:
|
||||
q[field_name] = datetime.datetime.fromtimestamp(time.mktime(time.strptime(m[0], "%Y-%m-%d")))
|
||||
else:
|
||||
q[field_name] = datetime.datetime.fromtimestamp(time.mktime(time.strptime(m[0], "%Y-%m-%d %H:%M")))
|
||||
|
||||
def query_runner(query):
|
||||
db_name = connection_string.split("/")[-1]
|
||||
|
||||
db_connection = pymongo.MongoClient(connection_string)
|
||||
if db_name not in db_connection.database_names():
|
||||
return None, "Unknown database name '%s'" % db_name
|
||||
|
||||
db = db_connection[db_name]
|
||||
|
||||
logging.debug("mongodb connection string: %s", connection_string)
|
||||
logging.debug("mongodb got query: %s", query)
|
||||
|
||||
try:
|
||||
query_data = json.loads(query)
|
||||
except:
|
||||
return None, "Invalid query format. The query is not a valid JSON."
|
||||
|
||||
collection = None
|
||||
if not "collection" in query_data:
|
||||
return None, "'collection' must have a value to run a query"
|
||||
else:
|
||||
collection = query_data["collection"]
|
||||
|
||||
q = None
|
||||
if "query" in query_data:
|
||||
q = query_data["query"]
|
||||
for k in q:
|
||||
if q[k] and type(q[k]) in [str, unicode]:
|
||||
logging.debug(q[k])
|
||||
_convert_date(q, k)
|
||||
elif q[k] and type(q[k]) is dict:
|
||||
for k2 in q[k]:
|
||||
if type(q[k][k2]) in [str, unicode]:
|
||||
_convert_date(q[k], k2)
|
||||
|
||||
f = None
|
||||
if "fields" in query_data:
|
||||
f = query_data["fields"]
|
||||
|
||||
s = None
|
||||
if "sort" in query_data and query_data["sort"]:
|
||||
s = []
|
||||
for field_name in query_data["sort"]:
|
||||
s.append((field_name, query_data["sort"][field_name]))
|
||||
|
||||
columns = []
|
||||
rows = []
|
||||
|
||||
error = None
|
||||
json_data = None
|
||||
|
||||
cursor = None
|
||||
if s:
|
||||
cursor = db[collection].find(q, f).sort(s)
|
||||
else:
|
||||
cursor = db[collection].find(q, f)
|
||||
|
||||
for r in cursor:
|
||||
for k in r:
|
||||
if _get_column_by_name(columns, k) is None:
|
||||
columns.append({
|
||||
"name": k,
|
||||
"friendly_name": k,
|
||||
"type": TYPES_MAP[type(r[k])] if type(r[k]) in TYPES_MAP else None
|
||||
})
|
||||
|
||||
# Convert ObjectId to string
|
||||
if type(r[k]) == ObjectId:
|
||||
r[k] = str(r[k])
|
||||
|
||||
|
||||
rows.append(r)
|
||||
|
||||
if f:
|
||||
ordered_columns = []
|
||||
for k in sorted(f, key=f.get):
|
||||
ordered_columns.append(_get_column_by_name(columns, k))
|
||||
|
||||
columns = ordered_columns
|
||||
|
||||
data = {
|
||||
"columns": columns,
|
||||
"rows": rows
|
||||
}
|
||||
error = None
|
||||
json_data = json.dumps(data, cls=JSONEncoder)
|
||||
|
||||
return json_data, error
|
||||
|
||||
query_runner.annotate_query = False
|
||||
return query_runner
|
||||
@@ -1,64 +0,0 @@
|
||||
"""
|
||||
QueryRunner is the function that the workers use, to execute queries. This is the Redshift
|
||||
(PostgreSQL in fact) version, but easily we can write another to support additional databases
|
||||
(MySQL and others).
|
||||
|
||||
Because the worker just pass the query, this can be used with any data store that has some sort of
|
||||
query language (for example: HiveQL).
|
||||
"""
|
||||
import logging
|
||||
import json
|
||||
import MySQLdb
|
||||
import sys
|
||||
from redash.utils import JSONEncoder
|
||||
|
||||
def mysql(connection_string):
|
||||
if connection_string.endswith(';'):
|
||||
connection_string = connection_string[0:-1]
|
||||
|
||||
def query_runner(query):
|
||||
connections_params = [entry.split('=')[1] for entry in connection_string.split(';')]
|
||||
connection = MySQLdb.connect(*connections_params, charset="utf8", use_unicode=True)
|
||||
cursor = connection.cursor()
|
||||
|
||||
logging.debug("mysql got query: %s", query)
|
||||
|
||||
try:
|
||||
cursor.execute(query)
|
||||
|
||||
data = cursor.fetchall()
|
||||
|
||||
cursor_desc = cursor.description
|
||||
if (cursor_desc != None):
|
||||
num_fields = len(cursor_desc)
|
||||
column_names = [i[0] for i in cursor.description]
|
||||
|
||||
rows = [dict(zip(column_names, row)) for row in data]
|
||||
|
||||
columns = [{'name': col_name,
|
||||
'friendly_name': col_name,
|
||||
'type': None} for col_name in column_names]
|
||||
|
||||
data = {'columns': columns, 'rows': rows}
|
||||
json_data = json.dumps(data, cls=JSONEncoder)
|
||||
error = None
|
||||
else:
|
||||
json_data = None
|
||||
error = "No data was returned."
|
||||
|
||||
cursor.close()
|
||||
except MySQLdb.Error, e:
|
||||
json_data = None
|
||||
error = e.args[1]
|
||||
except KeyboardInterrupt:
|
||||
error = "Query cancelled by user."
|
||||
json_data = None
|
||||
except Exception as e:
|
||||
raise sys.exc_info()[1], None, sys.exc_info()[2]
|
||||
finally:
|
||||
connection.close()
|
||||
|
||||
return json_data, error
|
||||
|
||||
|
||||
return query_runner
|
||||
@@ -1,110 +0,0 @@
|
||||
"""
|
||||
QueryRunner is the function that the workers use, to execute queries. This is the PostgreSQL
|
||||
version, but easily we can write another to support additional databases (MySQL and others).
|
||||
|
||||
Because the worker just pass the query, this can be used with any data store that has some sort of
|
||||
query language (for example: HiveQL).
|
||||
"""
|
||||
import json
|
||||
import sys
|
||||
import select
|
||||
import logging
|
||||
import psycopg2
|
||||
|
||||
from redash.utils import JSONEncoder
|
||||
|
||||
types_map = {
|
||||
20: 'integer',
|
||||
21: 'integer',
|
||||
23: 'integer',
|
||||
700: 'float',
|
||||
1700: 'float',
|
||||
701: 'float',
|
||||
16: 'boolean',
|
||||
1082: 'date',
|
||||
1114: 'datetime',
|
||||
1184: 'datetime',
|
||||
1014: 'string',
|
||||
1015: 'string',
|
||||
1008: 'string',
|
||||
1009: 'string',
|
||||
2951: 'string'
|
||||
}
|
||||
|
||||
|
||||
def pg(connection_string):
|
||||
def column_friendly_name(column_name):
|
||||
return column_name
|
||||
|
||||
def wait(conn):
|
||||
while 1:
|
||||
try:
|
||||
state = conn.poll()
|
||||
if state == psycopg2.extensions.POLL_OK:
|
||||
break
|
||||
elif state == psycopg2.extensions.POLL_WRITE:
|
||||
select.select([], [conn.fileno()], [])
|
||||
elif state == psycopg2.extensions.POLL_READ:
|
||||
select.select([conn.fileno()], [], [])
|
||||
else:
|
||||
raise psycopg2.OperationalError("poll() returned %s" % state)
|
||||
except select.error:
|
||||
raise psycopg2.OperationalError("select.error received")
|
||||
|
||||
def query_runner(query):
|
||||
connection = psycopg2.connect(connection_string, async=True)
|
||||
wait(connection)
|
||||
|
||||
cursor = connection.cursor()
|
||||
|
||||
try:
|
||||
cursor.execute(query)
|
||||
wait(connection)
|
||||
|
||||
# While set would be more efficient here, it sorts the data which is not what we want, but due to the small
|
||||
# size of the data we can assume it's ok.
|
||||
column_names = []
|
||||
columns = []
|
||||
duplicates_counter = 1
|
||||
|
||||
for column in cursor.description:
|
||||
# TODO: this deduplication needs to be generalized and reused in all query runners.
|
||||
column_name = column.name
|
||||
if column_name in column_names:
|
||||
column_name = column_name + str(duplicates_counter)
|
||||
duplicates_counter += 1
|
||||
|
||||
column_names.append(column_name)
|
||||
|
||||
columns.append({
|
||||
'name': column_name,
|
||||
'friendly_name': column_friendly_name(column_name),
|
||||
'type': types_map.get(column.type_code, None)
|
||||
})
|
||||
|
||||
rows = [dict(zip(column_names, row)) for row in cursor]
|
||||
|
||||
data = {'columns': columns, 'rows': rows}
|
||||
json_data = json.dumps(data, cls=JSONEncoder)
|
||||
error = None
|
||||
cursor.close()
|
||||
except (select.error, OSError) as e:
|
||||
logging.exception(e)
|
||||
error = "Query interrupted. Please retry."
|
||||
json_data = None
|
||||
except psycopg2.DatabaseError as e:
|
||||
logging.exception(e)
|
||||
json_data = None
|
||||
error = e.message
|
||||
except KeyboardInterrupt:
|
||||
connection.cancel()
|
||||
error = "Query cancelled by user."
|
||||
json_data = None
|
||||
except Exception as e:
|
||||
raise sys.exc_info()[1], None, sys.exc_info()[2]
|
||||
finally:
|
||||
connection.close()
|
||||
|
||||
return json_data, error
|
||||
|
||||
return query_runner
|
||||
@@ -1,48 +0,0 @@
|
||||
import json
|
||||
import logging
|
||||
import sys
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
# We use subprocess.check_output because we are lazy.
|
||||
# If someone will really want to run this on Python < 2.7 they can easily update the code to run
|
||||
# Popen, check the retcodes and other things and read the standard output to a variable.
|
||||
if not "check_output" in subprocess.__dict__:
|
||||
print "ERROR: This runner uses subprocess.check_output function which exists in Python 2.7"
|
||||
|
||||
def script(connection_string):
|
||||
|
||||
def query_runner(query):
|
||||
try:
|
||||
json_data = None
|
||||
error = None
|
||||
|
||||
# Poor man's protection against running scripts from output the scripts directory
|
||||
if connection_string.find("../") > -1:
|
||||
return None, "Scripts can only be run from the configured scripts directory"
|
||||
|
||||
query = query.strip()
|
||||
|
||||
script = os.path.join(connection_string, query)
|
||||
if not os.path.exists(script):
|
||||
return None, "Script '%s' not found in script directory" % query
|
||||
|
||||
output = subprocess.check_output(script, shell=False)
|
||||
if output != None:
|
||||
output = output.strip()
|
||||
if output != "":
|
||||
return output, None
|
||||
|
||||
error = "Error reading output"
|
||||
except subprocess.CalledProcessError as e:
|
||||
return None, str(e)
|
||||
except KeyboardInterrupt:
|
||||
error = "Query cancelled by user."
|
||||
json_data = None
|
||||
except Exception as e:
|
||||
raise sys.exc_info()[1], None, sys.exc_info()[2]
|
||||
|
||||
return json_data, error
|
||||
|
||||
query_runner.annotate_query = False
|
||||
return query_runner
|
||||
@@ -1,8 +1,11 @@
|
||||
import contextlib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from redash import models
|
||||
from flask.ext.script import Manager
|
||||
|
||||
logger = logging.getLogger()
|
||||
|
||||
class Importer(object):
|
||||
def __init__(self, object_mapping=None, data_source=None):
|
||||
@@ -22,22 +25,17 @@ class Importer(object):
|
||||
|
||||
return query_result
|
||||
|
||||
|
||||
def import_query(self, user, query):
|
||||
query_result = self.import_query_result(query['latest_query_data'])
|
||||
|
||||
new_query = self._get_or_create(models.Query, query['id'], name=query['name'],
|
||||
user=user,
|
||||
ttl=-1,
|
||||
schedule=None,
|
||||
query=query['query'],
|
||||
query_hash=query['query_hash'],
|
||||
description=query['description'],
|
||||
latest_query_data=query_result,
|
||||
data_source=self.data_source)
|
||||
|
||||
return new_query
|
||||
|
||||
|
||||
def import_visualization(self, user, visualization):
|
||||
query = self.import_query(user, visualization['query'])
|
||||
|
||||
@@ -50,9 +48,13 @@ class Importer(object):
|
||||
return new_visualization
|
||||
|
||||
def import_widget(self, dashboard, widget):
|
||||
visualization = self.import_visualization(dashboard.user, widget['visualization'])
|
||||
if 'visualization' in widget:
|
||||
visualization = self.import_visualization(dashboard.user, widget['visualization'])
|
||||
else:
|
||||
visualization = None
|
||||
|
||||
new_widget = self._get_or_create(models.Widget, widget['id'],
|
||||
text=widget.get('text', None),
|
||||
dashboard=dashboard,
|
||||
width=widget['width'],
|
||||
options=json.dumps(widget['options']),
|
||||
@@ -91,6 +93,7 @@ class Importer(object):
|
||||
|
||||
def _get_or_create(self, object_type, external_id, **properties):
|
||||
internal_id = self._get_mapping(object_type, external_id)
|
||||
logger.info("Creating %s with external id: %s and internal id: %s", object_type, external_id, internal_id)
|
||||
if internal_id:
|
||||
update = object_type.update(**properties).where(object_type.id == internal_id)
|
||||
update.execute()
|
||||
@@ -114,11 +117,21 @@ export_manager = Manager(help="export utilities")
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def importer_with_mapping_file(mapping_filename):
|
||||
def importer_with_mapping_file(mapping_filename, data_source_id=None):
|
||||
# Touch file in case it doesn't exists
|
||||
if not os.path.isfile(mapping_filename):
|
||||
with open(mapping_filename, 'w') as f:
|
||||
f.write("{}")
|
||||
|
||||
with open(mapping_filename) as f:
|
||||
mapping = json.loads(f.read())
|
||||
|
||||
importer = Importer(object_mapping=mapping, data_source=get_data_source())
|
||||
if data_source_id is not None:
|
||||
data_source = models.DataSource.get_by_id(data_source_id)
|
||||
else:
|
||||
data_source = get_data_source()
|
||||
|
||||
importer = Importer(object_mapping=mapping, data_source=data_source)
|
||||
yield importer
|
||||
|
||||
with open(mapping_filename, 'w') as f:
|
||||
@@ -146,12 +159,13 @@ def query(mapping_filename, query_filename, user_id):
|
||||
|
||||
|
||||
@import_manager.command
|
||||
def dashboard(mapping_filename, dashboard_filename, user_id):
|
||||
def dashboard(mapping_filename, dashboard_filename, user_id, data_source_id=None):
|
||||
user = models.User.get_by_id(user_id)
|
||||
|
||||
with open(dashboard_filename) as f:
|
||||
dashboard = json.loads(f.read())
|
||||
|
||||
with importer_with_mapping_file(mapping_filename) as importer:
|
||||
with importer_with_mapping_file(mapping_filename, data_source_id) as importer:
|
||||
importer.import_dashboard(user, dashboard)
|
||||
|
||||
|
||||
|
||||
268
redash/models.py
268
redash/models.py
@@ -9,17 +9,20 @@ import itertools
|
||||
|
||||
import peewee
|
||||
from passlib.apps import custom_app_context as pwd_context
|
||||
from playhouse.postgres_ext import ArrayField
|
||||
from playhouse.postgres_ext import ArrayField, DateTimeTZField, PostgresqlExtDatabase
|
||||
from flask.ext.login import UserMixin, AnonymousUserMixin
|
||||
import psycopg2
|
||||
|
||||
from redash import utils, settings
|
||||
from redash import utils, settings, redis_connection
|
||||
from redash.query_runner import get_query_runner
|
||||
|
||||
|
||||
class Database(object):
|
||||
def __init__(self):
|
||||
self.database_config = dict(settings.DATABASE_CONFIG)
|
||||
self.database_config['register_hstore'] = False
|
||||
self.database_name = self.database_config.pop('name')
|
||||
self.database = peewee.PostgresqlDatabase(self.database_name, **self.database_config)
|
||||
self.database = PostgresqlExtDatabase(self.database_name, **self.database_config)
|
||||
self.app = None
|
||||
self.pid = os.getpid()
|
||||
|
||||
@@ -59,17 +62,57 @@ class BaseModel(peewee.Model):
|
||||
def get_by_id(cls, model_id):
|
||||
return cls.get(cls.id == model_id)
|
||||
|
||||
def pre_save(self, created):
|
||||
pass
|
||||
|
||||
class AnonymousUser(AnonymousUserMixin):
|
||||
def post_save(self, created):
|
||||
# Handler for post_save operations. Overriding if needed.
|
||||
pass
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
pk_value = self._get_pk_value()
|
||||
created = kwargs.get('force_insert', False) or not bool(pk_value)
|
||||
self.pre_save(created)
|
||||
super(BaseModel, self).save(*args, **kwargs)
|
||||
self.post_save(created)
|
||||
|
||||
|
||||
class ModelTimestampsMixin(BaseModel):
|
||||
updated_at = DateTimeTZField(default=datetime.datetime.now)
|
||||
created_at = DateTimeTZField(default=datetime.datetime.now)
|
||||
|
||||
def pre_save(self, created):
|
||||
super(ModelTimestampsMixin, self).pre_save(created)
|
||||
|
||||
self.updated_at = datetime.datetime.now()
|
||||
|
||||
|
||||
class PermissionsCheckMixin(object):
|
||||
def has_permission(self, permission):
|
||||
return self.has_permissions((permission,))
|
||||
|
||||
def has_permissions(self, permissions):
|
||||
has_permissions = reduce(lambda a, b: a and b,
|
||||
map(lambda permission: permission in self.permissions,
|
||||
permissions),
|
||||
True)
|
||||
|
||||
return has_permissions
|
||||
|
||||
|
||||
class AnonymousUser(AnonymousUserMixin, PermissionsCheckMixin):
|
||||
@property
|
||||
def permissions(self):
|
||||
return []
|
||||
|
||||
|
||||
class ApiUser(UserMixin):
|
||||
class ApiUser(UserMixin, PermissionsCheckMixin):
|
||||
def __init__(self, api_key):
|
||||
self.id = api_key
|
||||
|
||||
def __repr__(self):
|
||||
return u"<ApiUser: {}>".format(self.id)
|
||||
|
||||
@property
|
||||
def permissions(self):
|
||||
return ['view_query']
|
||||
@@ -83,7 +126,7 @@ class Group(BaseModel):
|
||||
name = peewee.CharField(max_length=100)
|
||||
permissions = ArrayField(peewee.CharField, default=DEFAULT_PERMISSIONS)
|
||||
tables = ArrayField(peewee.CharField)
|
||||
created_at = peewee.DateTimeField(default=datetime.datetime.now)
|
||||
created_at = DateTimeTZField(default=datetime.datetime.now)
|
||||
|
||||
class Meta:
|
||||
db_table = 'groups'
|
||||
@@ -101,7 +144,7 @@ class Group(BaseModel):
|
||||
return unicode(self.id)
|
||||
|
||||
|
||||
class User(BaseModel, UserMixin):
|
||||
class User(ModelTimestampsMixin, BaseModel, UserMixin, PermissionsCheckMixin):
|
||||
DEFAULT_GROUPS = ['default']
|
||||
|
||||
id = peewee.PrimaryKeyField()
|
||||
@@ -117,7 +160,9 @@ class User(BaseModel, UserMixin):
|
||||
return {
|
||||
'id': self.id,
|
||||
'name': self.name,
|
||||
'email': self.email
|
||||
'email': self.email,
|
||||
'updated_at': self.updated_at,
|
||||
'created_at': self.created_at
|
||||
}
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
@@ -160,7 +205,7 @@ class ActivityLog(BaseModel):
|
||||
user = peewee.ForeignKeyField(User)
|
||||
type = peewee.IntegerField()
|
||||
activity = peewee.TextField()
|
||||
created_at = peewee.DateTimeField(default=datetime.datetime.now)
|
||||
created_at = DateTimeTZField(default=datetime.datetime.now)
|
||||
|
||||
class Meta:
|
||||
db_table = 'activity_log'
|
||||
@@ -180,12 +225,12 @@ class ActivityLog(BaseModel):
|
||||
|
||||
class DataSource(BaseModel):
|
||||
id = peewee.PrimaryKeyField()
|
||||
name = peewee.CharField()
|
||||
name = peewee.CharField(unique=True)
|
||||
type = peewee.CharField()
|
||||
options = peewee.TextField()
|
||||
queue_name = peewee.CharField(default="queries")
|
||||
scheduled_queue_name = peewee.CharField(default="queries")
|
||||
created_at = peewee.DateTimeField(default=datetime.datetime.now)
|
||||
created_at = DateTimeTZField(default=datetime.datetime.now)
|
||||
|
||||
class Meta:
|
||||
db_table = 'data_sources'
|
||||
@@ -194,9 +239,31 @@ class DataSource(BaseModel):
|
||||
return {
|
||||
'id': self.id,
|
||||
'name': self.name,
|
||||
'type': self.type
|
||||
'type': self.type,
|
||||
'syntax': self.query_runner.syntax
|
||||
}
|
||||
|
||||
def get_schema(self, refresh=False):
|
||||
key = "data_source:schema:{}".format(self.id)
|
||||
|
||||
cache = None
|
||||
if not refresh:
|
||||
cache = redis_connection.get(key)
|
||||
|
||||
if cache is None:
|
||||
query_runner = self.query_runner
|
||||
schema = sorted(query_runner.get_schema(), key=lambda t: t['name'])
|
||||
|
||||
redis_connection.set(key, json.dumps(schema))
|
||||
else:
|
||||
schema = json.loads(cache)
|
||||
|
||||
return schema
|
||||
|
||||
@property
|
||||
def query_runner(self):
|
||||
return get_query_runner(self.type, self.options)
|
||||
|
||||
@classmethod
|
||||
def all(cls):
|
||||
return cls.select().order_by(cls.id.asc())
|
||||
@@ -209,7 +276,7 @@ class QueryResult(BaseModel):
|
||||
query = peewee.TextField()
|
||||
data = peewee.TextField()
|
||||
runtime = peewee.FloatField()
|
||||
retrieved_at = peewee.DateTimeField()
|
||||
retrieved_at = DateTimeTZField()
|
||||
|
||||
class Meta:
|
||||
db_table = 'query_results'
|
||||
@@ -226,16 +293,25 @@ class QueryResult(BaseModel):
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def get_latest(cls, data_source, query, ttl=0):
|
||||
def unused(cls):
|
||||
week_ago = datetime.datetime.now() - datetime.timedelta(days=7)
|
||||
|
||||
unused_results = cls.select().where(Query.id == None, cls.retrieved_at < week_ago)\
|
||||
.join(Query, join_type=peewee.JOIN_LEFT_OUTER)
|
||||
|
||||
return unused_results
|
||||
|
||||
@classmethod
|
||||
def get_latest(cls, data_source, query, max_age=0):
|
||||
query_hash = utils.gen_query_hash(query)
|
||||
|
||||
if ttl == -1:
|
||||
if max_age == -1:
|
||||
query = cls.select().where(cls.query_hash == query_hash,
|
||||
cls.data_source == data_source).order_by(cls.retrieved_at.desc())
|
||||
else:
|
||||
query = cls.select().where(cls.query_hash == query_hash, cls.data_source == data_source,
|
||||
peewee.SQL("retrieved_at + interval '%s second' >= now() at time zone 'utc'",
|
||||
ttl)).order_by(cls.retrieved_at.desc())
|
||||
max_age)).order_by(cls.retrieved_at.desc())
|
||||
|
||||
return query.first()
|
||||
|
||||
@@ -262,7 +338,28 @@ class QueryResult(BaseModel):
|
||||
return u"%d | %s | %s" % (self.id, self.query_hash, self.retrieved_at)
|
||||
|
||||
|
||||
class Query(BaseModel):
|
||||
def should_schedule_next(previous_iteration, now, schedule):
|
||||
if schedule.isdigit():
|
||||
ttl = int(schedule)
|
||||
next_iteration = previous_iteration + datetime.timedelta(seconds=ttl)
|
||||
else:
|
||||
hour, minute = schedule.split(':')
|
||||
hour, minute = int(hour), int(minute)
|
||||
|
||||
# The following logic is needed for cases like the following:
|
||||
# - The query scheduled to run at 23:59.
|
||||
# - The scheduler wakes up at 00:01.
|
||||
# - Using naive implementation of comparing timestamps, it will skip the execution.
|
||||
normalized_previous_iteration = previous_iteration.replace(hour=hour, minute=minute)
|
||||
if normalized_previous_iteration > previous_iteration:
|
||||
previous_iteration = normalized_previous_iteration - datetime.timedelta(days=1)
|
||||
|
||||
next_iteration = (previous_iteration + datetime.timedelta(days=1)).replace(hour=hour, minute=minute)
|
||||
|
||||
return now > next_iteration
|
||||
|
||||
|
||||
class Query(ModelTimestampsMixin, BaseModel):
|
||||
id = peewee.PrimaryKeyField()
|
||||
data_source = peewee.ForeignKeyField(DataSource)
|
||||
latest_query_data = peewee.ForeignKeyField(QueryResult, null=True)
|
||||
@@ -271,20 +368,15 @@ class Query(BaseModel):
|
||||
query = peewee.TextField()
|
||||
query_hash = peewee.CharField(max_length=32)
|
||||
api_key = peewee.CharField(max_length=40)
|
||||
ttl = peewee.IntegerField()
|
||||
user_email = peewee.CharField(max_length=360, null=True)
|
||||
user = peewee.ForeignKeyField(User)
|
||||
created_at = peewee.DateTimeField(default=datetime.datetime.now)
|
||||
last_modified_by = peewee.ForeignKeyField(User, null=True, related_name="modified_queries")
|
||||
is_archived = peewee.BooleanField(default=False, index=True)
|
||||
schedule = peewee.CharField(max_length=10, null=True)
|
||||
|
||||
class Meta:
|
||||
db_table = 'queries'
|
||||
|
||||
def create_default_visualizations(self):
|
||||
table_visualization = Visualization(query=self, name="Table",
|
||||
description='',
|
||||
type="TABLE", options="{}")
|
||||
table_visualization.save()
|
||||
|
||||
def to_dict(self, with_stats=False, with_visualizations=False, with_user=True):
|
||||
d = {
|
||||
'id': self.id,
|
||||
@@ -293,14 +385,17 @@ class Query(BaseModel):
|
||||
'description': self.description,
|
||||
'query': self.query,
|
||||
'query_hash': self.query_hash,
|
||||
'ttl': self.ttl,
|
||||
'schedule': self.schedule,
|
||||
'api_key': self.api_key,
|
||||
'is_archived': self.is_archived,
|
||||
'updated_at': self.updated_at,
|
||||
'created_at': self.created_at,
|
||||
'data_source_id': self._data.get('data_source', None)
|
||||
}
|
||||
|
||||
if with_user:
|
||||
d['user'] = self.user.to_dict()
|
||||
d['last_modified_by'] = self.last_modified_by.to_dict()
|
||||
else:
|
||||
d['user_id'] = self._data['user']
|
||||
|
||||
@@ -314,36 +409,68 @@ class Query(BaseModel):
|
||||
|
||||
return d
|
||||
|
||||
def archive(self):
|
||||
self.is_archived = True
|
||||
self.schedule = None
|
||||
|
||||
for vis in self.visualizations:
|
||||
for w in vis.widgets:
|
||||
w.delete_instance()
|
||||
|
||||
self.save()
|
||||
|
||||
@classmethod
|
||||
def all_queries(cls):
|
||||
q = Query.select(Query, User, QueryResult.retrieved_at, QueryResult.runtime)\
|
||||
.join(QueryResult, join_type=peewee.JOIN_LEFT_OUTER)\
|
||||
.switch(Query).join(User)\
|
||||
.group_by(Query.id, User.id, QueryResult.id, QueryResult.retrieved_at, QueryResult.runtime)
|
||||
.where(Query.is_archived==False)\
|
||||
.group_by(Query.id, User.id, QueryResult.id, QueryResult.retrieved_at, QueryResult.runtime)\
|
||||
.order_by(cls.created_at.desc())
|
||||
|
||||
return q
|
||||
|
||||
@classmethod
|
||||
def outdated_queries(cls):
|
||||
# TODO: this will only find scheduled queries that were executed before. I think this is
|
||||
# a reasonable assumption, but worth revisiting.
|
||||
outdated_queries_ids = cls.select(
|
||||
peewee.Func('first_value', cls.id).over(partition_by=[cls.query_hash, cls.data_source])) \
|
||||
.join(QueryResult) \
|
||||
.where(cls.ttl > 0,
|
||||
(QueryResult.retrieved_at +
|
||||
(cls.ttl * peewee.SQL("interval '1 second'"))) <
|
||||
peewee.SQL("(now() at time zone 'utc')"))
|
||||
queries = cls.select(cls, QueryResult.retrieved_at, DataSource)\
|
||||
.join(QueryResult)\
|
||||
.switch(Query).join(DataSource)\
|
||||
.where(cls.schedule != None)
|
||||
|
||||
queries = cls.select(cls, DataSource).join(DataSource) \
|
||||
.where(cls.id << outdated_queries_ids)
|
||||
now = datetime.datetime.utcnow().replace(tzinfo=psycopg2.tz.FixedOffsetTimezone(offset=0, name=None))
|
||||
outdated_queries = {}
|
||||
for query in queries:
|
||||
if should_schedule_next(query.latest_query_data.retrieved_at, now, query.schedule):
|
||||
key = "{}:{}".format(query.query_hash, query.data_source.id)
|
||||
outdated_queries[key] = query
|
||||
|
||||
return queries
|
||||
return outdated_queries.values()
|
||||
|
||||
@classmethod
|
||||
def search(cls, term):
|
||||
# This is very naive implementation of search, to be replaced with PostgreSQL full-text-search solution.
|
||||
return cls.select().where((cls.name**"%{}%".format(term)) | (cls.description**"%{}%".format(term)))
|
||||
|
||||
where = (cls.name**u"%{}%".format(term)) | (cls.description**u"%{}%".format(term))
|
||||
|
||||
if term.isdigit():
|
||||
where |= cls.id == term
|
||||
|
||||
where &= cls.is_archived == False
|
||||
|
||||
return cls.select().where(where).order_by(cls.created_at.desc())
|
||||
|
||||
@classmethod
|
||||
def recent(cls, user_id):
|
||||
# TODO: instead of t2 here, we should define table_alias for Query table
|
||||
return cls.select().where(Event.created_at > peewee.SQL("current_date - 7")).\
|
||||
join(Event, on=(Query.id == peewee.SQL("t2.object_id::integer"))).\
|
||||
where(Event.action << ('edit', 'execute', 'edit_name', 'edit_description', 'view_source')).\
|
||||
where(Event.user == user_id).\
|
||||
where(~(Event.object_id >> None)).\
|
||||
where(Event.object_type == 'query'). \
|
||||
where(cls.is_archived == False).\
|
||||
group_by(Event.object_id, Query.id).\
|
||||
order_by(peewee.SQL("count(0) desc"))
|
||||
|
||||
@classmethod
|
||||
def update_instance(cls, query_id, **kwargs):
|
||||
@@ -353,10 +480,23 @@ class Query(BaseModel):
|
||||
update = cls.update(**kwargs).where(cls.id == query_id)
|
||||
return update.execute()
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
def pre_save(self, created):
|
||||
super(Query, self).pre_save(created)
|
||||
self.query_hash = utils.gen_query_hash(self.query)
|
||||
self._set_api_key()
|
||||
super(Query, self).save(*args, **kwargs)
|
||||
|
||||
if self.last_modified_by is None:
|
||||
self.last_modified_by = self.user
|
||||
|
||||
def post_save(self, created):
|
||||
if created:
|
||||
self._create_default_visualizations()
|
||||
|
||||
def _create_default_visualizations(self):
|
||||
table_visualization = Visualization(query=self, name="Table",
|
||||
description='',
|
||||
type="TABLE", options="{}")
|
||||
table_visualization.save()
|
||||
|
||||
def _set_api_key(self):
|
||||
if not self.api_key:
|
||||
@@ -375,7 +515,7 @@ class Query(BaseModel):
|
||||
return unicode(self.id)
|
||||
|
||||
|
||||
class Dashboard(BaseModel):
|
||||
class Dashboard(ModelTimestampsMixin, BaseModel):
|
||||
id = peewee.PrimaryKeyField()
|
||||
slug = peewee.CharField(max_length=140, index=True)
|
||||
name = peewee.CharField(max_length=100)
|
||||
@@ -384,7 +524,6 @@ class Dashboard(BaseModel):
|
||||
layout = peewee.TextField()
|
||||
dashboard_filters_enabled = peewee.BooleanField(default=False)
|
||||
is_archived = peewee.BooleanField(default=False, index=True)
|
||||
created_at = peewee.DateTimeField(default=datetime.datetime.now)
|
||||
|
||||
class Meta:
|
||||
db_table = 'dashboards'
|
||||
@@ -426,13 +565,26 @@ class Dashboard(BaseModel):
|
||||
'user_id': self._data['user'],
|
||||
'layout': layout,
|
||||
'dashboard_filters_enabled': self.dashboard_filters_enabled,
|
||||
'widgets': widgets_layout
|
||||
'widgets': widgets_layout,
|
||||
'updated_at': self.updated_at,
|
||||
'created_at': self.created_at
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def get_by_slug(cls, slug):
|
||||
return cls.get(cls.slug == slug)
|
||||
|
||||
@classmethod
|
||||
def recent(cls, user_id):
|
||||
return cls.select().where(Event.created_at > peewee.SQL("current_date - 7")). \
|
||||
join(Event, on=(Dashboard.id == peewee.SQL("t2.object_id::integer"))). \
|
||||
where(Event.action << ('edit', 'view')).\
|
||||
where(Event.user == user_id). \
|
||||
where(~(Event.object_id >> None)). \
|
||||
where(Event.object_type == 'dashboard'). \
|
||||
group_by(Event.object_id, Dashboard.id). \
|
||||
order_by(peewee.SQL("count(0) desc"))
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
if not self.slug:
|
||||
self.slug = utils.slugify(self.name)
|
||||
@@ -448,7 +600,7 @@ class Dashboard(BaseModel):
|
||||
return u"%s=%s" % (self.id, self.name)
|
||||
|
||||
|
||||
class Visualization(BaseModel):
|
||||
class Visualization(ModelTimestampsMixin, BaseModel):
|
||||
id = peewee.PrimaryKeyField()
|
||||
type = peewee.CharField(max_length=100)
|
||||
query = peewee.ForeignKeyField(Query, related_name='visualizations')
|
||||
@@ -466,6 +618,8 @@ class Visualization(BaseModel):
|
||||
'name': self.name,
|
||||
'description': self.description,
|
||||
'options': json.loads(self.options),
|
||||
'updated_at': self.updated_at,
|
||||
'created_at': self.created_at
|
||||
}
|
||||
|
||||
if with_query:
|
||||
@@ -477,14 +631,13 @@ class Visualization(BaseModel):
|
||||
return u"%s %s" % (self.id, self.type)
|
||||
|
||||
|
||||
class Widget(BaseModel):
|
||||
class Widget(ModelTimestampsMixin, BaseModel):
|
||||
id = peewee.PrimaryKeyField()
|
||||
visualization = peewee.ForeignKeyField(Visualization, related_name='widgets', null=True)
|
||||
text = peewee.TextField(null=True)
|
||||
width = peewee.IntegerField()
|
||||
options = peewee.TextField()
|
||||
dashboard = peewee.ForeignKeyField(Dashboard, related_name='widgets', index=True)
|
||||
created_at = peewee.DateTimeField(default=datetime.datetime.now)
|
||||
|
||||
# unused; kept for backward compatability:
|
||||
type = peewee.CharField(max_length=100, null=True)
|
||||
@@ -499,25 +652,35 @@ class Widget(BaseModel):
|
||||
'width': self.width,
|
||||
'options': json.loads(self.options),
|
||||
'dashboard_id': self._data['dashboard'],
|
||||
'text': self.text
|
||||
'text': self.text,
|
||||
'updated_at': self.updated_at,
|
||||
'created_at': self.created_at
|
||||
}
|
||||
|
||||
if self.visualization and self.visualization.id:
|
||||
d['visualization'] = self.visualization.to_dict()
|
||||
|
||||
return d
|
||||
|
||||
|
||||
def __unicode__(self):
|
||||
return u"%s" % self.id
|
||||
|
||||
def delete_instance(self, *args, **kwargs):
|
||||
layout = json.loads(self.dashboard.layout)
|
||||
layout = map(lambda row: filter(lambda w: w != self.id, row), layout)
|
||||
layout = filter(lambda row: len(row) > 0, layout)
|
||||
self.dashboard.layout = json.dumps(layout)
|
||||
self.dashboard.save()
|
||||
super(Widget, self).delete_instance(*args, **kwargs)
|
||||
|
||||
|
||||
class Event(BaseModel):
|
||||
user = peewee.ForeignKeyField(User, related_name="events")
|
||||
user = peewee.ForeignKeyField(User, related_name="events", null=True)
|
||||
action = peewee.CharField()
|
||||
object_type = peewee.CharField()
|
||||
object_id = peewee.CharField(null=True)
|
||||
additional_properties = peewee.TextField(null=True)
|
||||
created_at = peewee.DateTimeField(default=datetime.datetime.now)
|
||||
created_at = DateTimeTZField(default=datetime.datetime.now)
|
||||
|
||||
class Meta:
|
||||
db_table = 'events'
|
||||
@@ -556,7 +719,6 @@ def create_db(create_tables, drop_tables):
|
||||
if drop_tables and model.table_exists():
|
||||
# TODO: submit PR to peewee to allow passing cascade option to drop_table.
|
||||
db.database.execute_sql('DROP TABLE %s CASCADE' % model._meta.db_table)
|
||||
#model.drop_table()
|
||||
|
||||
if create_tables and not model.table_exists():
|
||||
model.create_table()
|
||||
|
||||
@@ -10,10 +10,7 @@ class require_permissions(object):
|
||||
def __call__(self, fn):
|
||||
@functools.wraps(fn)
|
||||
def decorated(*args, **kwargs):
|
||||
has_permissions = reduce(lambda a, b: a and b,
|
||||
map(lambda permission: permission in current_user.permissions,
|
||||
self.permissions),
|
||||
True)
|
||||
has_permissions = current_user.has_permissions(self.permissions)
|
||||
|
||||
if has_permissions:
|
||||
return fn(*args, **kwargs)
|
||||
|
||||
117
redash/query_runner/__init__.py
Normal file
117
redash/query_runner/__init__.py
Normal file
@@ -0,0 +1,117 @@
|
||||
import logging
|
||||
import json
|
||||
|
||||
import jsonschema
|
||||
from jsonschema import ValidationError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
__all__ = [
|
||||
'ValidationError',
|
||||
'BaseQueryRunner',
|
||||
'TYPE_DATETIME',
|
||||
'TYPE_BOOLEAN',
|
||||
'TYPE_INTEGER',
|
||||
'TYPE_STRING',
|
||||
'TYPE_DATE',
|
||||
'TYPE_FLOAT',
|
||||
'SUPPORTED_COLUMN_TYPES',
|
||||
'register',
|
||||
'get_query_runner',
|
||||
'import_query_runners'
|
||||
]
|
||||
|
||||
# Valid types of columns returned in results:
|
||||
TYPE_INTEGER = 'integer'
|
||||
TYPE_FLOAT = 'float'
|
||||
TYPE_BOOLEAN = 'boolean'
|
||||
TYPE_STRING = 'string'
|
||||
TYPE_DATETIME = 'datetime'
|
||||
TYPE_DATE = 'date'
|
||||
|
||||
SUPPORTED_COLUMN_TYPES = set([
|
||||
TYPE_INTEGER,
|
||||
TYPE_FLOAT,
|
||||
TYPE_BOOLEAN,
|
||||
TYPE_STRING,
|
||||
TYPE_DATETIME,
|
||||
TYPE_DATE
|
||||
])
|
||||
|
||||
class BaseQueryRunner(object):
|
||||
def __init__(self, configuration):
|
||||
jsonschema.validate(configuration, self.configuration_schema())
|
||||
self.syntax = 'sql'
|
||||
self.configuration = configuration
|
||||
|
||||
@classmethod
|
||||
def name(cls):
|
||||
return cls.__name__
|
||||
|
||||
@classmethod
|
||||
def type(cls):
|
||||
return cls.__name__.lower()
|
||||
|
||||
@classmethod
|
||||
def enabled(cls):
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def annotate_query(cls):
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def configuration_schema(cls):
|
||||
return {}
|
||||
|
||||
def run_query(self, query):
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_schema(self):
|
||||
return []
|
||||
|
||||
@classmethod
|
||||
def to_dict(cls):
|
||||
return {
|
||||
'name': cls.name(),
|
||||
'type': cls.type(),
|
||||
'configuration_schema': cls.configuration_schema()
|
||||
}
|
||||
|
||||
|
||||
query_runners = {}
|
||||
|
||||
|
||||
def register(query_runner_class):
|
||||
global query_runners
|
||||
if query_runner_class.enabled():
|
||||
logger.debug("Registering %s (%s) query runner.", query_runner_class.name(), query_runner_class.type())
|
||||
query_runners[query_runner_class.type()] = query_runner_class
|
||||
else:
|
||||
logger.warning("%s query runner enabled but not supported, not registering. Either disable or install missing dependencies.", query_runner_class.name())
|
||||
|
||||
|
||||
def get_query_runner(query_runner_type, configuration_json):
|
||||
query_runner_class = query_runners.get(query_runner_type, None)
|
||||
if query_runner_class is None:
|
||||
return None
|
||||
|
||||
return query_runner_class(json.loads(configuration_json))
|
||||
|
||||
|
||||
def validate_configuration(query_runner_type, configuration_json):
|
||||
query_runner_class = query_runners.get(query_runner_type, None)
|
||||
if query_runner_class is None:
|
||||
return False
|
||||
|
||||
try:
|
||||
jsonschema.validate(json.loads(configuration_json), query_runner_class.configuration_schema())
|
||||
except (ValidationError, ValueError):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def import_query_runners(query_runner_imports):
|
||||
for runner_import in query_runner_imports:
|
||||
__import__(runner_import)
|
||||
204
redash/query_runner/big_query.py
Normal file
204
redash/query_runner/big_query.py
Normal file
@@ -0,0 +1,204 @@
|
||||
import datetime
|
||||
import json
|
||||
import httplib2
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
|
||||
import requests
|
||||
|
||||
from redash.query_runner import *
|
||||
from redash.utils import JSONEncoder
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
try:
|
||||
import apiclient.errors
|
||||
from apiclient.discovery import build
|
||||
from apiclient.errors import HttpError
|
||||
from oauth2client.client import SignedJwtAssertionCredentials
|
||||
from oauth2client import gce
|
||||
|
||||
enabled = True
|
||||
except ImportError:
|
||||
logger.warning("Missing dependencies. Please install google-api-python-client and oauth2client.")
|
||||
logger.warning("You can use pip: pip install google-api-python-client oauth2client")
|
||||
|
||||
enabled = False
|
||||
|
||||
types_map = {
|
||||
'INTEGER': TYPE_INTEGER,
|
||||
'FLOAT': TYPE_FLOAT,
|
||||
'BOOLEAN': TYPE_BOOLEAN,
|
||||
'STRING': TYPE_STRING,
|
||||
'TIMESTAMP': TYPE_DATETIME,
|
||||
}
|
||||
|
||||
|
||||
def transform_row(row, fields):
|
||||
column_index = 0
|
||||
row_data = {}
|
||||
|
||||
for cell in row["f"]:
|
||||
field = fields[column_index]
|
||||
cell_value = cell['v']
|
||||
|
||||
if cell_value is None:
|
||||
pass
|
||||
# Otherwise just cast the value
|
||||
elif field['type'] == 'INTEGER':
|
||||
cell_value = int(cell_value)
|
||||
elif field['type'] == 'FLOAT':
|
||||
cell_value = float(cell_value)
|
||||
elif field['type'] == 'BOOLEAN':
|
||||
cell_value = cell_value.lower() == "true"
|
||||
elif field['type'] == 'TIMESTAMP':
|
||||
cell_value = datetime.datetime.fromtimestamp(float(cell_value))
|
||||
|
||||
row_data[field["name"]] = cell_value
|
||||
column_index += 1
|
||||
|
||||
return row_data
|
||||
|
||||
|
||||
def _load_key(filename):
|
||||
f = file(filename, "rb")
|
||||
try:
|
||||
return f.read()
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
|
||||
def _get_query_results(jobs, project_id, job_id, start_index):
|
||||
query_reply = jobs.getQueryResults(projectId=project_id, jobId=job_id, startIndex=start_index).execute()
|
||||
logging.debug('query_reply %s', query_reply)
|
||||
if not query_reply['jobComplete']:
|
||||
time.sleep(10)
|
||||
return _get_query_results(jobs, project_id, job_id, start_index)
|
||||
|
||||
return query_reply
|
||||
|
||||
|
||||
class BigQuery(BaseQueryRunner):
|
||||
@classmethod
|
||||
def enabled(cls):
|
||||
return enabled
|
||||
|
||||
@classmethod
|
||||
def configuration_schema(cls):
|
||||
return {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'serviceAccount': {
|
||||
'type': 'string',
|
||||
'title': 'Service Account'
|
||||
},
|
||||
'projectId': {
|
||||
'type': 'string',
|
||||
'title': 'Project ID'
|
||||
},
|
||||
'privateKey': {
|
||||
'type': 'string',
|
||||
'title': 'Private Key Path'
|
||||
}
|
||||
},
|
||||
'required': ['serviceAccount', 'projectId', 'privateKey']
|
||||
}
|
||||
|
||||
def __init__(self, configuration_json):
|
||||
super(BigQuery, self).__init__(configuration_json)
|
||||
|
||||
def _get_bigquery_service(self):
|
||||
scope = [
|
||||
"https://www.googleapis.com/auth/bigquery",
|
||||
]
|
||||
|
||||
private_key = _load_key(self.configuration["privateKey"])
|
||||
credentials = SignedJwtAssertionCredentials(self.configuration['serviceAccount'], private_key, scope=scope)
|
||||
http = httplib2.Http()
|
||||
http = credentials.authorize(http)
|
||||
|
||||
return build("bigquery", "v2", http=http)
|
||||
|
||||
def _get_project_id(self):
|
||||
return self.configuration["projectId"]
|
||||
|
||||
def run_query(self, query):
|
||||
bigquery_service = self._get_bigquery_service()
|
||||
|
||||
jobs = bigquery_service.jobs()
|
||||
job_data = {
|
||||
"configuration": {
|
||||
"query": {
|
||||
"query": query,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logger.debug("BigQuery got query: %s", query)
|
||||
|
||||
project_id = self._get_project_id()
|
||||
|
||||
try:
|
||||
insert_response = jobs.insert(projectId=project_id, body=job_data).execute()
|
||||
current_row = 0
|
||||
query_reply = _get_query_results(jobs, project_id=project_id,
|
||||
job_id=insert_response['jobReference']['jobId'], start_index=current_row)
|
||||
|
||||
logger.debug("bigquery replied: %s", query_reply)
|
||||
|
||||
rows = []
|
||||
|
||||
while ("rows" in query_reply) and current_row < query_reply['totalRows']:
|
||||
for row in query_reply["rows"]:
|
||||
rows.append(transform_row(row, query_reply["schema"]["fields"]))
|
||||
|
||||
current_row += len(query_reply['rows'])
|
||||
query_reply = jobs.getQueryResults(projectId=project_id, jobId=query_reply['jobReference']['jobId'],
|
||||
startIndex=current_row).execute()
|
||||
|
||||
columns = [{'name': f["name"],
|
||||
'friendly_name': f["name"],
|
||||
'type': types_map.get(f['type'], "string")} for f in query_reply["schema"]["fields"]]
|
||||
|
||||
data = {
|
||||
"columns": columns,
|
||||
"rows": rows
|
||||
}
|
||||
error = None
|
||||
|
||||
json_data = json.dumps(data, cls=JSONEncoder)
|
||||
except apiclient.errors.HttpError, e:
|
||||
json_data = None
|
||||
error = e.content
|
||||
except KeyboardInterrupt:
|
||||
error = "Query cancelled by user."
|
||||
json_data = None
|
||||
except Exception:
|
||||
raise sys.exc_info()[1], None, sys.exc_info()[2]
|
||||
|
||||
return json_data, error
|
||||
|
||||
|
||||
class BigQueryGCE(BigQuery):
|
||||
@classmethod
|
||||
def type(cls):
|
||||
return "bigquery_gce"
|
||||
|
||||
@classmethod
|
||||
def configuration_schema(cls):
|
||||
return {}
|
||||
|
||||
def _get_project_id(self):
|
||||
return requests.get('http://metadata/computeMetadata/v1/project/project-id', headers={'Metadata-Flavor': 'Google'}).content
|
||||
|
||||
def _get_bigquery_service(self):
|
||||
credentials = gce.AppAssertionCredentials(scope='https://www.googleapis.com/auth/bigquery')
|
||||
http = httplib2.Http()
|
||||
http = credentials.authorize(http)
|
||||
|
||||
return build("bigquery", "v2", http=http)
|
||||
|
||||
|
||||
register(BigQuery)
|
||||
register(BigQueryGCE)
|
||||
83
redash/query_runner/graphite.py
Normal file
83
redash/query_runner/graphite.py
Normal file
@@ -0,0 +1,83 @@
|
||||
import json
|
||||
import datetime
|
||||
import requests
|
||||
import logging
|
||||
from redash.query_runner import *
|
||||
from redash.utils import JSONEncoder
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _transform_result(response):
|
||||
columns = ({'name': 'Time::x', 'type': TYPE_DATETIME},
|
||||
{'name': 'value::y', 'type': TYPE_FLOAT},
|
||||
{'name': 'name::series', 'type': TYPE_STRING})
|
||||
|
||||
rows = []
|
||||
|
||||
for series in response.json():
|
||||
for values in series['datapoints']:
|
||||
timestamp = datetime.datetime.fromtimestamp(int(values[1]))
|
||||
rows.append({'Time::x': timestamp, 'name::series': series['target'], 'value::y': values[0]})
|
||||
|
||||
data = {'columns': columns, 'rows': rows}
|
||||
return json.dumps(data, cls=JSONEncoder)
|
||||
|
||||
|
||||
class Graphite(BaseQueryRunner):
|
||||
@classmethod
|
||||
def configuration_schema(cls):
|
||||
return {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'url': {
|
||||
'type': 'string'
|
||||
},
|
||||
'username': {
|
||||
'type': 'string'
|
||||
},
|
||||
'password': {
|
||||
'type': 'string'
|
||||
},
|
||||
'verify': {
|
||||
'type': 'boolean',
|
||||
'title': 'Verify SSL certificate'
|
||||
}
|
||||
},
|
||||
'required': ['url']
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def annotate_query(cls):
|
||||
return False
|
||||
|
||||
def __init__(self, configuration_json):
|
||||
super(Graphite, self).__init__(configuration_json)
|
||||
|
||||
if "username" in self.configuration and self.configuration["username"]:
|
||||
self.auth = (self.configuration["username"], self.configuration["password"])
|
||||
else:
|
||||
self.auth = None
|
||||
|
||||
self.verify = self.configuration["verify"]
|
||||
self.base_url = "%s/render?format=json&" % self.configuration['url']
|
||||
|
||||
def run_query(self, query):
|
||||
url = "%s%s" % (self.base_url, "&".join(query.split("\n")))
|
||||
error = None
|
||||
data = None
|
||||
|
||||
try:
|
||||
response = requests.get(url, auth=self.auth, verify=self.verify)
|
||||
|
||||
if response.status_code == 200:
|
||||
data = _transform_result(response)
|
||||
else:
|
||||
error = "Failed getting results (%d)" % response.status_code
|
||||
except Exception, ex:
|
||||
data = None
|
||||
error = ex.message
|
||||
|
||||
return data, error
|
||||
|
||||
register(Graphite)
|
||||
183
redash/query_runner/mongodb.py
Normal file
183
redash/query_runner/mongodb.py
Normal file
@@ -0,0 +1,183 @@
|
||||
import json
|
||||
import datetime
|
||||
import logging
|
||||
import re
|
||||
import time
|
||||
|
||||
from redash.utils import JSONEncoder
|
||||
from redash.query_runner import *
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
try:
|
||||
import pymongo
|
||||
from bson.objectid import ObjectId
|
||||
enabled = True
|
||||
|
||||
except ImportError:
|
||||
logger.warning("Missing dependencies. Please install pymongo.")
|
||||
logger.warning("You can use pip: pip install pymongo")
|
||||
enabled = False
|
||||
|
||||
|
||||
TYPES_MAP = {
|
||||
str: TYPE_STRING,
|
||||
unicode: TYPE_STRING,
|
||||
int: TYPE_INTEGER,
|
||||
long: TYPE_INTEGER,
|
||||
float: TYPE_FLOAT,
|
||||
bool: TYPE_BOOLEAN,
|
||||
datetime.datetime: TYPE_DATETIME,
|
||||
}
|
||||
|
||||
date_regex = re.compile("ISODate\(\"(.*)\"\)", re.IGNORECASE)
|
||||
|
||||
|
||||
def _get_column_by_name(columns, column_name):
|
||||
for c in columns:
|
||||
if "name" in c and c["name"] == column_name:
|
||||
return c
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def _convert_date(q, field_name):
|
||||
m = date_regex.findall(q[field_name])
|
||||
if len(m) > 0:
|
||||
if q[field_name].find(":") == -1:
|
||||
q[field_name] = datetime.datetime.fromtimestamp(time.mktime(time.strptime(m[0], "%Y-%m-%d")))
|
||||
else:
|
||||
q[field_name] = datetime.datetime.fromtimestamp(time.mktime(time.strptime(m[0], "%Y-%m-%d %H:%M")))
|
||||
|
||||
|
||||
class MongoDB(BaseQueryRunner):
|
||||
@classmethod
|
||||
def configuration_schema(cls):
|
||||
return {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'connectionString': {
|
||||
'type': 'string',
|
||||
'title': 'Connection String'
|
||||
},
|
||||
'dbName': {
|
||||
'type': 'string',
|
||||
'title': "Database Name"
|
||||
},
|
||||
'replicaSetName': {
|
||||
'type': 'string',
|
||||
'title': 'Replica Set Name'
|
||||
},
|
||||
},
|
||||
'required': ['connectionString']
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def enabled(cls):
|
||||
return enabled
|
||||
|
||||
@classmethod
|
||||
def annotate_query(cls):
|
||||
return False
|
||||
|
||||
def __init__(self, configuration_json):
|
||||
super(MongoDB, self).__init__(configuration_json)
|
||||
|
||||
self.syntax = 'json'
|
||||
|
||||
self.db_name = self.configuration["dbName"]
|
||||
|
||||
self.is_replica_set = True if "replicaSetName" in self.configuration and self.configuration["replicaSetName"] else False
|
||||
|
||||
def run_query(self, query):
|
||||
if self.is_replica_set:
|
||||
db_connection = pymongo.MongoReplicaSetClient(self.configuration["connectionString"], replicaSet=self.configuration["replicaSetName"])
|
||||
else:
|
||||
db_connection = pymongo.MongoClient(self.configuration["connectionString"])
|
||||
|
||||
if self.db_name not in db_connection.database_names():
|
||||
return None, "Unknown database name '%s'" % self.db_name
|
||||
|
||||
db = db_connection[self.db_name ]
|
||||
|
||||
logger.debug("mongodb connection string: %s", self.configuration['connectionString'])
|
||||
logger.debug("mongodb got query: %s", query)
|
||||
|
||||
try:
|
||||
query_data = json.loads(query)
|
||||
except ValueError:
|
||||
return None, "Invalid query format. The query is not a valid JSON."
|
||||
|
||||
if "collection" not in query_data:
|
||||
return None, "'collection' must have a value to run a query"
|
||||
else:
|
||||
collection = query_data["collection"]
|
||||
|
||||
q = None
|
||||
if "query" in query_data:
|
||||
q = query_data["query"]
|
||||
for k in q:
|
||||
if q[k] and type(q[k]) in [str, unicode]:
|
||||
logging.debug(q[k])
|
||||
_convert_date(q, k)
|
||||
elif q[k] and type(q[k]) is dict:
|
||||
for k2 in q[k]:
|
||||
if type(q[k][k2]) in [str, unicode]:
|
||||
_convert_date(q[k], k2)
|
||||
|
||||
f = None
|
||||
if "fields" in query_data:
|
||||
f = query_data["fields"]
|
||||
|
||||
s = None
|
||||
if "sort" in query_data and query_data["sort"]:
|
||||
s = []
|
||||
for field_data in query_data["sort"]:
|
||||
s.append((field_data["name"], field_data["direction"]))
|
||||
|
||||
columns = []
|
||||
rows = []
|
||||
|
||||
error = None
|
||||
json_data = None
|
||||
|
||||
if s:
|
||||
cursor = db[collection].find(q, f).sort(s)
|
||||
else:
|
||||
cursor = db[collection].find(q, f)
|
||||
|
||||
if "limit" in query_data and query_data["limit"]:
|
||||
cursor = cursor.limit(query_data["limit"])
|
||||
|
||||
for r in cursor:
|
||||
for k in r:
|
||||
if _get_column_by_name(columns, k) is None:
|
||||
columns.append({
|
||||
"name": k,
|
||||
"friendly_name": k,
|
||||
"type": TYPES_MAP.get(type(r[k]), TYPE_STRING)
|
||||
})
|
||||
|
||||
# Convert ObjectId to string
|
||||
if type(r[k]) == ObjectId:
|
||||
r[k] = str(r[k])
|
||||
|
||||
rows.append(r)
|
||||
|
||||
if f:
|
||||
ordered_columns = []
|
||||
for k in sorted(f, key=f.get):
|
||||
ordered_columns.append(_get_column_by_name(columns, k))
|
||||
|
||||
columns = ordered_columns
|
||||
|
||||
data = {
|
||||
"columns": columns,
|
||||
"rows": rows
|
||||
}
|
||||
error = None
|
||||
json_data = json.dumps(data, cls=JSONEncoder)
|
||||
|
||||
return json_data, error
|
||||
|
||||
register(MongoDB)
|
||||
136
redash/query_runner/mysql.py
Normal file
136
redash/query_runner/mysql.py
Normal file
@@ -0,0 +1,136 @@
|
||||
import sys
|
||||
import json
|
||||
import logging
|
||||
|
||||
from redash.utils import JSONEncoder
|
||||
from redash.query_runner import *
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Mysql(BaseQueryRunner):
|
||||
@classmethod
|
||||
def configuration_schema(cls):
|
||||
return {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'host': {
|
||||
'type': 'string'
|
||||
},
|
||||
'user': {
|
||||
'type': 'string'
|
||||
},
|
||||
'passwd': {
|
||||
'type': 'string',
|
||||
'title': 'Password'
|
||||
},
|
||||
'db': {
|
||||
'type': 'string',
|
||||
'title': 'Database name'
|
||||
},
|
||||
"port": {
|
||||
"type": "number"
|
||||
},
|
||||
},
|
||||
'required': ['db']
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def enabled(cls):
|
||||
try:
|
||||
import MySQLdb
|
||||
except ImportError:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def __init__(self, configuration_json):
|
||||
super(Mysql, self).__init__(configuration_json)
|
||||
|
||||
def get_schema(self):
|
||||
query = """
|
||||
SELECT col.table_schema,
|
||||
col.table_name,
|
||||
col.column_name
|
||||
FROM `information_schema`.`columns` col
|
||||
INNER JOIN
|
||||
(SELECT table_schema,
|
||||
TABLE_NAME
|
||||
FROM information_schema.tables
|
||||
WHERE table_type <> 'SYSTEM VIEW' AND table_schema NOT IN ('performance_schema', 'mysql')) tables ON tables.table_schema = col.table_schema
|
||||
AND tables.TABLE_NAME = col.TABLE_NAME;
|
||||
"""
|
||||
|
||||
results, error = self.run_query(query)
|
||||
|
||||
if error is not None:
|
||||
raise Exception("Failed getting schema.")
|
||||
|
||||
results = json.loads(results)
|
||||
|
||||
schema = {}
|
||||
for row in results['rows']:
|
||||
if row['table_schema'] != self.configuration['db']:
|
||||
table_name = '{}.{}'.format(row['table_schema'], row['table_name'])
|
||||
else:
|
||||
table_name = row['table_name']
|
||||
|
||||
if table_name not in schema:
|
||||
schema[table_name] = {'name': table_name, 'columns': []}
|
||||
|
||||
schema[table_name]['columns'].append(row['column_name'])
|
||||
|
||||
return schema.values()
|
||||
|
||||
def run_query(self, query):
|
||||
import MySQLdb
|
||||
|
||||
connection = MySQLdb.connect(host=self.configuration.get('host', ''),
|
||||
user=self.configuration.get('user', ''),
|
||||
passwd=self.configuration.get('passwd', ''),
|
||||
db=self.configuration['db'],
|
||||
port=self.configuration.get('port', 3306),
|
||||
charset='utf8', use_unicode=True)
|
||||
cursor = connection.cursor()
|
||||
|
||||
logger.debug("MySQL running query: %s", query)
|
||||
|
||||
try:
|
||||
cursor.execute(query)
|
||||
|
||||
data = cursor.fetchall()
|
||||
|
||||
cursor_desc = cursor.description
|
||||
if cursor_desc is not None:
|
||||
num_fields = len(cursor_desc)
|
||||
column_names = [i[0] for i in cursor.description]
|
||||
|
||||
rows = [dict(zip(column_names, row)) for row in data]
|
||||
|
||||
# TODO: add types support
|
||||
columns = [{'name': col_name,
|
||||
'friendly_name': col_name,
|
||||
'type': None} for col_name in column_names]
|
||||
|
||||
data = {'columns': columns, 'rows': rows}
|
||||
json_data = json.dumps(data, cls=JSONEncoder)
|
||||
error = None
|
||||
else:
|
||||
json_data = None
|
||||
error = "No data was returned."
|
||||
|
||||
cursor.close()
|
||||
except MySQLdb.Error, e:
|
||||
json_data = None
|
||||
error = e.args[1]
|
||||
except KeyboardInterrupt:
|
||||
error = "Query cancelled by user."
|
||||
json_data = None
|
||||
except Exception as e:
|
||||
raise sys.exc_info()[1], None, sys.exc_info()[2]
|
||||
finally:
|
||||
connection.close()
|
||||
|
||||
return json_data, error
|
||||
|
||||
register(Mysql)
|
||||
170
redash/query_runner/pg.py
Normal file
170
redash/query_runner/pg.py
Normal file
@@ -0,0 +1,170 @@
|
||||
import json
|
||||
import logging
|
||||
import psycopg2
|
||||
import select
|
||||
import sys
|
||||
|
||||
from redash.query_runner import *
|
||||
from redash.utils import JSONEncoder
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
types_map = {
|
||||
20: TYPE_INTEGER,
|
||||
21: TYPE_INTEGER,
|
||||
23: TYPE_INTEGER,
|
||||
700: TYPE_FLOAT,
|
||||
1700: TYPE_FLOAT,
|
||||
701: TYPE_FLOAT,
|
||||
16: TYPE_BOOLEAN,
|
||||
1082: TYPE_DATE,
|
||||
1114: TYPE_DATETIME,
|
||||
1184: TYPE_DATETIME,
|
||||
1014: TYPE_STRING,
|
||||
1015: TYPE_STRING,
|
||||
1008: TYPE_STRING,
|
||||
1009: TYPE_STRING,
|
||||
2951: TYPE_STRING
|
||||
}
|
||||
|
||||
|
||||
def _wait(conn):
|
||||
while 1:
|
||||
try:
|
||||
state = conn.poll()
|
||||
if state == psycopg2.extensions.POLL_OK:
|
||||
break
|
||||
elif state == psycopg2.extensions.POLL_WRITE:
|
||||
select.select([], [conn.fileno()], [])
|
||||
elif state == psycopg2.extensions.POLL_READ:
|
||||
select.select([conn.fileno()], [], [])
|
||||
else:
|
||||
raise psycopg2.OperationalError("poll() returned %s" % state)
|
||||
except select.error:
|
||||
raise psycopg2.OperationalError("select.error received")
|
||||
|
||||
|
||||
class PostgreSQL(BaseQueryRunner):
|
||||
@classmethod
|
||||
def configuration_schema(cls):
|
||||
return {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"user": {
|
||||
"type": "string"
|
||||
},
|
||||
"password": {
|
||||
"type": "string"
|
||||
},
|
||||
"host": {
|
||||
"type": "string"
|
||||
},
|
||||
"port": {
|
||||
"type": "number"
|
||||
},
|
||||
"dbname": {
|
||||
"type": "string",
|
||||
"title": "Database Name"
|
||||
}
|
||||
},
|
||||
"required": ["dbname"]
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def type(cls):
|
||||
return "pg"
|
||||
|
||||
def __init__(self, configuration_json):
|
||||
super(PostgreSQL, self).__init__(configuration_json)
|
||||
|
||||
values = []
|
||||
for k, v in self.configuration.iteritems():
|
||||
values.append("{}={}".format(k, v))
|
||||
|
||||
self.connection_string = " ".join(values)
|
||||
|
||||
def get_schema(self):
|
||||
query = """
|
||||
SELECT table_schema, table_name, column_name
|
||||
FROM information_schema.columns
|
||||
WHERE table_schema NOT IN ('pg_catalog', 'information_schema');
|
||||
"""
|
||||
|
||||
results, error = self.run_query(query)
|
||||
|
||||
if error is not None:
|
||||
raise Exception("Failed getting schema.")
|
||||
|
||||
results = json.loads(results)
|
||||
|
||||
schema = {}
|
||||
for row in results['rows']:
|
||||
if row['table_schema'] != 'public':
|
||||
table_name = '{}.{}'.format(row['table_schema'], row['table_name'])
|
||||
else:
|
||||
table_name = row['table_name']
|
||||
|
||||
if table_name not in schema:
|
||||
schema[table_name] = {'name': table_name, 'columns': []}
|
||||
|
||||
schema[table_name]['columns'].append(row['column_name'])
|
||||
|
||||
return schema.values()
|
||||
|
||||
def run_query(self, query):
|
||||
connection = psycopg2.connect(self.connection_string, async=True)
|
||||
_wait(connection)
|
||||
|
||||
cursor = connection.cursor()
|
||||
|
||||
try:
|
||||
cursor.execute(query)
|
||||
_wait(connection)
|
||||
|
||||
# While set would be more efficient here, it sorts the data which is not what we want, but due to the small
|
||||
# size of the data we can assume it's ok.
|
||||
column_names = []
|
||||
columns = []
|
||||
duplicates_counter = 1
|
||||
|
||||
for column in cursor.description:
|
||||
# TODO: this deduplication needs to be generalized and reused in all query runners.
|
||||
column_name = column.name
|
||||
if column_name in column_names:
|
||||
column_name += str(duplicates_counter)
|
||||
duplicates_counter += 1
|
||||
|
||||
column_names.append(column_name)
|
||||
|
||||
columns.append({
|
||||
'name': column_name,
|
||||
'friendly_name': column_name,
|
||||
'type': types_map.get(column.type_code, None)
|
||||
})
|
||||
|
||||
rows = [dict(zip(column_names, row)) for row in cursor]
|
||||
|
||||
data = {'columns': columns, 'rows': rows}
|
||||
json_data = json.dumps(data, cls=JSONEncoder)
|
||||
error = None
|
||||
cursor.close()
|
||||
except (select.error, OSError) as e:
|
||||
logging.exception(e)
|
||||
error = "Query interrupted. Please retry."
|
||||
json_data = None
|
||||
except psycopg2.DatabaseError as e:
|
||||
logging.exception(e)
|
||||
json_data = None
|
||||
error = e.message
|
||||
except KeyboardInterrupt:
|
||||
connection.cancel()
|
||||
error = "Query cancelled by user."
|
||||
json_data = None
|
||||
except Exception as e:
|
||||
raise sys.exc_info()[1], None, sys.exc_info()[2]
|
||||
finally:
|
||||
connection.close()
|
||||
|
||||
return json_data, error
|
||||
|
||||
register(PostgreSQL)
|
||||
181
redash/query_runner/python.py
Normal file
181
redash/query_runner/python.py
Normal file
@@ -0,0 +1,181 @@
|
||||
import sys
|
||||
import json
|
||||
import logging
|
||||
|
||||
from redash.query_runner import *
|
||||
from redash import models
|
||||
|
||||
import importlib
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
from RestrictedPython import compile_restricted
|
||||
from RestrictedPython.Guards import safe_builtins
|
||||
|
||||
ALLOWED_MODULES = {}
|
||||
|
||||
|
||||
def custom_write(obj):
|
||||
"""
|
||||
Custom hooks which controls the way objects/lists/tuples/dicts behave in
|
||||
RestrictedPython
|
||||
"""
|
||||
return obj
|
||||
|
||||
|
||||
def custom_import(name, globals=None, locals=None, fromlist=(), level=0):
|
||||
if name in ALLOWED_MODULES:
|
||||
m = None
|
||||
if ALLOWED_MODULES[name] is None:
|
||||
m = importlib.import_module(name)
|
||||
ALLOWED_MODULES[name] = m
|
||||
else:
|
||||
m = ALLOWED_MODULES[name]
|
||||
|
||||
return m
|
||||
|
||||
raise Exception("'{0}' is not configured as a supported import module".format(name))
|
||||
|
||||
def custom_get_item(obj, key):
|
||||
return obj[key]
|
||||
|
||||
def get_query_result(query_id):
|
||||
try:
|
||||
query = models.Query.get_by_id(query_id)
|
||||
except models.Query.DoesNotExist:
|
||||
raise Exception("Query id %s does not exist." % query_id)
|
||||
|
||||
if query.latest_query_data is None:
|
||||
raise Exception("Query does not have results yet.")
|
||||
|
||||
if query.latest_query_data.data is None:
|
||||
raise Exception("Query does not have results yet.")
|
||||
|
||||
return json.loads(query.latest_query_data.data)
|
||||
|
||||
|
||||
def execute_query(data_source_name_or_id, query):
|
||||
try:
|
||||
if type(data_source_name_or_id) == int:
|
||||
data_source = models.DataSource.get_by_id(data_source_name_or_id)
|
||||
else:
|
||||
data_source = models.DataSource.get(models.DataSource.name==data_source_name_or_id)
|
||||
except models.DataSource.DoesNotExist:
|
||||
raise Exception("Wrong data source name/id: %s." % data_source_name_or_id)
|
||||
|
||||
query_runner = get_query_runner(data_source.type, data_source.options)
|
||||
|
||||
data, error = query_runner.run_query(query)
|
||||
if error is not None:
|
||||
raise Exception(error)
|
||||
|
||||
# TODO: allow avoiding the json.dumps/loads in same process
|
||||
return json.loads(data)
|
||||
|
||||
|
||||
def add_result_column(result, column_name, friendly_name, column_type):
|
||||
""" Helper function to add columns inside a Python script running in re:dash in an easier way """
|
||||
if column_type not in SUPPORTED_COLUMN_TYPES:
|
||||
raise Exception("'{0}' is not a supported column type".format(column_type))
|
||||
|
||||
if not "columns" in result:
|
||||
result["columns"] = []
|
||||
|
||||
result["columns"].append({
|
||||
"name" : column_name,
|
||||
"friendly_name" : friendly_name,
|
||||
"type" : column_type
|
||||
})
|
||||
|
||||
|
||||
def add_result_row(result, values):
|
||||
if not "rows" in result:
|
||||
result["rows"] = []
|
||||
|
||||
result["rows"].append(values)
|
||||
|
||||
|
||||
class Python(BaseQueryRunner):
|
||||
"""
|
||||
This is very, very unsafe. Use at your own risk with people you really trust.
|
||||
"""
|
||||
@classmethod
|
||||
def configuration_schema(cls):
|
||||
return {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'allowedImportModules': {
|
||||
'type': 'string',
|
||||
'title': 'Modules to import prior to running the script'
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def enabled(cls):
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def annotate_query(cls):
|
||||
return False
|
||||
|
||||
def __init__(self, configuration_json):
|
||||
global ALLOWED_MODULES
|
||||
|
||||
super(Python, self).__init__(configuration_json)
|
||||
|
||||
self.syntax = "python"
|
||||
|
||||
if self.configuration.get("allowedImportModules", None):
|
||||
for item in self.configuration["allowedImportModules"].split(","):
|
||||
ALLOWED_MODULES[item] = None
|
||||
|
||||
def run_query(self, query):
|
||||
try:
|
||||
error = None
|
||||
|
||||
code = compile_restricted(query, '<string>', 'exec')
|
||||
|
||||
safe_builtins["_write_"] = custom_write
|
||||
safe_builtins["__import__"] = custom_import
|
||||
safe_builtins["_getattr_"] = getattr
|
||||
safe_builtins["getattr"] = getattr
|
||||
safe_builtins["_setattr_"] = setattr
|
||||
safe_builtins["setattr"] = setattr
|
||||
safe_builtins["_getitem_"] = custom_get_item
|
||||
|
||||
script_locals = { "result" : { "rows" : [], "columns" : [] } }
|
||||
|
||||
restricted_globals = dict(__builtins__=safe_builtins)
|
||||
restricted_globals["get_query_result"] = get_query_result
|
||||
restricted_globals["execute_query"] = execute_query
|
||||
restricted_globals["add_result_column"] = add_result_column
|
||||
restricted_globals["add_result_row"] = add_result_row
|
||||
|
||||
restricted_globals["TYPE_DATETIME"] = TYPE_DATETIME
|
||||
restricted_globals["TYPE_BOOLEAN"] = TYPE_BOOLEAN
|
||||
restricted_globals["TYPE_INTEGER"] = TYPE_INTEGER
|
||||
restricted_globals["TYPE_STRING"] = TYPE_STRING
|
||||
restricted_globals["TYPE_DATE"] = TYPE_DATE
|
||||
restricted_globals["TYPE_FLOAT"] = TYPE_FLOAT
|
||||
|
||||
# TODO: Figure out the best way to have a timeout on a script
|
||||
# One option is to use ETA with Celery + timeouts on workers
|
||||
# And replacement of worker process every X requests handled.
|
||||
|
||||
exec(code) in restricted_globals, script_locals
|
||||
|
||||
if script_locals['result'] is None:
|
||||
raise Exception("result wasn't set to value.")
|
||||
|
||||
json_data = json.dumps(script_locals['result'])
|
||||
except KeyboardInterrupt:
|
||||
error = "Query cancelled by user."
|
||||
json_data = None
|
||||
except Exception as e:
|
||||
raise sys.exc_info()[1], None, sys.exc_info()[2]
|
||||
|
||||
return json_data, error
|
||||
|
||||
|
||||
register(Python)
|
||||
67
redash/query_runner/script.py
Normal file
67
redash/query_runner/script.py
Normal file
@@ -0,0 +1,67 @@
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
|
||||
from redash.query_runner import *
|
||||
|
||||
|
||||
class Script(BaseQueryRunner):
|
||||
@classmethod
|
||||
def enabled(cls):
|
||||
return "check_output" in subprocess.__dict__
|
||||
|
||||
@classmethod
|
||||
def configuration_schema(cls):
|
||||
return {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'path': {
|
||||
'type': 'string',
|
||||
'title': 'Scripts path'
|
||||
}
|
||||
},
|
||||
'required': ['path']
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def annotate_query(cls):
|
||||
return False
|
||||
|
||||
def __init__(self, configuration_json):
|
||||
super(Script, self).__init__(configuration_json)
|
||||
|
||||
# Poor man's protection against running scripts from outside the scripts directory
|
||||
if self.configuration["path"].find("../") > -1:
|
||||
raise ValidationError("Scripts can only be run from the configured scripts directory")
|
||||
|
||||
def run_query(self, query):
|
||||
try:
|
||||
json_data = None
|
||||
error = None
|
||||
|
||||
query = query.strip()
|
||||
|
||||
script = os.path.join(self.configuration["path"], query.split(" ")[0])
|
||||
if not os.path.exists(script):
|
||||
return None, "Script '%s' not found in script directory" % query
|
||||
|
||||
script = os.path.join(self.configuration["path"], query)
|
||||
|
||||
output = subprocess.check_output(script.split(" "), shell=False)
|
||||
if output is not None:
|
||||
output = output.strip()
|
||||
if output != "":
|
||||
return output, None
|
||||
|
||||
error = "Error reading output"
|
||||
except subprocess.CalledProcessError as e:
|
||||
return None, str(e)
|
||||
except KeyboardInterrupt:
|
||||
error = "Query cancelled by user."
|
||||
json_data = None
|
||||
except Exception as e:
|
||||
raise sys.exc_info()[1], None, sys.exc_info()[2]
|
||||
|
||||
return json_data, error
|
||||
|
||||
register(Script)
|
||||
@@ -1,16 +1,30 @@
|
||||
import json
|
||||
import logging
|
||||
import sys
|
||||
import os
|
||||
import urllib2
|
||||
|
||||
def url(connection_string):
|
||||
from redash.query_runner import *
|
||||
|
||||
def query_runner(query):
|
||||
base_url = connection_string
|
||||
|
||||
class Url(BaseQueryRunner):
|
||||
@classmethod
|
||||
def configuration_schema(cls):
|
||||
return {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'url': {
|
||||
'type': 'string',
|
||||
'title': 'URL base path'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def annotate_query(cls):
|
||||
return False
|
||||
|
||||
def run_query(self, query):
|
||||
base_url = self.configuration["url"]
|
||||
|
||||
try:
|
||||
json_data = None
|
||||
error = None
|
||||
|
||||
query = query.strip()
|
||||
@@ -41,5 +55,4 @@ def url(connection_string):
|
||||
|
||||
return json_data, error
|
||||
|
||||
query_runner.annotate_query = False
|
||||
return query_runner
|
||||
register(Url)
|
||||
@@ -44,17 +44,19 @@ STATSD_HOST = os.environ.get('REDASH_STATSD_HOST', "127.0.0.1")
|
||||
STATSD_PORT = int(os.environ.get('REDASH_STATSD_PORT', "8125"))
|
||||
STATSD_PREFIX = os.environ.get('REDASH_STATSD_PREFIX', "redash")
|
||||
|
||||
# The following is kept for backward compatability, and shouldn't be used any more.
|
||||
CONNECTION_ADAPTER = os.environ.get("REDASH_CONNECTION_ADAPTER", "pg")
|
||||
CONNECTION_STRING = os.environ.get("REDASH_CONNECTION_STRING", "user= password= host= port=5439 dbname=")
|
||||
|
||||
# Connection settings for re:dash's own database (where we store the queries, results, etc)
|
||||
DATABASE_CONFIG = parse_db_url(os.environ.get("REDASH_DATABASE_URL", "postgresql://postgres"))
|
||||
|
||||
# Celery related settings
|
||||
CELERY_BROKER = os.environ.get("REDASH_CELERY_BROKER", REDIS_URL)
|
||||
CELERY_BACKEND = os.environ.get("REDASH_CELERY_BACKEND", REDIS_URL)
|
||||
CELERY_FLOWER_URL = os.environ.get("REDASH_CELERY_FLOWER_URL", "/flower")
|
||||
|
||||
# The following enables periodic job (every 5 minutes) of removing unused query results. Behind this "feature flag" until
|
||||
# proved to be "safe".
|
||||
QUERY_RESULTS_CLEANUP_ENABLED = parse_boolean(os.environ.get("REDASH_QUERY_RESULTS_CLEANUP_ENABLED", "false"))
|
||||
|
||||
AUTH_TYPE = os.environ.get("REDASH_AUTH_TYPE", "hmac")
|
||||
PASSWORD_LOGIN_ENABLED = parse_boolean(os.environ.get("REDASH_PASSWORD_LOGIN_ENABLED", "true"))
|
||||
|
||||
# Google Apps domain to allow access from; any user with email in this Google Apps will be allowed
|
||||
# access
|
||||
@@ -64,14 +66,23 @@ GOOGLE_CLIENT_ID = os.environ.get("REDASH_GOOGLE_CLIENT_ID", "")
|
||||
GOOGLE_CLIENT_SECRET = os.environ.get("REDASH_GOOGLE_CLIENT_SECRET", "")
|
||||
GOOGLE_OAUTH_ENABLED = GOOGLE_CLIENT_ID and GOOGLE_CLIENT_SECRET
|
||||
|
||||
PASSWORD_LOGIN_ENABLED = parse_boolean(os.environ.get("REDASH_PASSWORD_LOGIN_ENABLED", "true"))
|
||||
STATIC_ASSETS_PATH = fix_assets_path(os.environ.get("REDASH_STATIC_ASSETS_PATH", "../rd_ui/app/"))
|
||||
WORKERS_COUNT = int(os.environ.get("REDASH_WORKERS_COUNT", "2"))
|
||||
JOB_EXPIRY_TIME = int(os.environ.get("REDASH_JOB_EXPIRY_TIME", 3600*6))
|
||||
JOB_EXPIRY_TIME = int(os.environ.get("REDASH_JOB_EXPIRY_TIME", 3600 * 6))
|
||||
COOKIE_SECRET = os.environ.get("REDASH_COOKIE_SECRET", "c292a0a3aa32397cdb050e233733900f")
|
||||
LOG_LEVEL = os.environ.get("REDASH_LOG_LEVEL", "INFO")
|
||||
CLIENT_SIDE_METRICS = parse_boolean(os.environ.get("REDASH_CLIENT_SIDE_METRICS", "false"))
|
||||
ANALYTICS = os.environ.get("REDASH_ANALYTICS", "")
|
||||
|
||||
# Query Runners
|
||||
QUERY_RUNNERS = array_from_string(os.environ.get("REDASH_ENABLED_QUERY_RUNNERS", ",".join([
|
||||
'redash.query_runner.big_query',
|
||||
'redash.query_runner.graphite',
|
||||
'redash.query_runner.mongodb',
|
||||
'redash.query_runner.mysql',
|
||||
'redash.query_runner.pg',
|
||||
'redash.query_runner.script',
|
||||
'redash.query_runner.url',
|
||||
])))
|
||||
|
||||
# Features:
|
||||
FEATURE_TABLES_PERMISSIONS = parse_boolean(os.environ.get("REDASH_FEATURE_TABLES_PERMISSIONS", "false"))
|
||||
|
||||
@@ -8,7 +8,7 @@ from celery.utils.log import get_task_logger
|
||||
from redash import redis_connection, models, statsd_client, settings
|
||||
from redash.utils import gen_query_hash
|
||||
from redash.worker import celery
|
||||
from redash.data.query_runner import get_query_runner
|
||||
from redash.query_runner import get_query_runner
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
@@ -47,12 +47,13 @@ class QueryTask(object):
|
||||
return self._async_result.id
|
||||
|
||||
@classmethod
|
||||
def add_task(cls, query, data_source, scheduled=False):
|
||||
def add_task(cls, query, data_source, scheduled=False, metadata={}):
|
||||
query_hash = gen_query_hash(query)
|
||||
logging.info("[Manager][%s] Inserting job", query_hash)
|
||||
logging.info("[Manager] Metadata: [%s]", metadata)
|
||||
try_count = 0
|
||||
job = None
|
||||
|
||||
|
||||
while try_count < cls.MAX_RETRIES:
|
||||
try_count += 1
|
||||
|
||||
@@ -77,8 +78,9 @@ class QueryTask(object):
|
||||
else:
|
||||
queue_name = data_source.queue_name
|
||||
|
||||
result = execute_query.apply_async(args=(query, data_source.id), queue=queue_name)
|
||||
result = execute_query.apply_async(args=(query, data_source.id, metadata), queue=queue_name)
|
||||
job = cls(async_result=result)
|
||||
|
||||
logging.info("[Manager][%s] Created new job: %s", query_hash, job.id)
|
||||
pipe.set(cls._job_lock_id(query_hash, data_source.id), job.id, settings.JOB_EXPIRY_TIME)
|
||||
pipe.execute()
|
||||
@@ -146,13 +148,11 @@ def refresh_queries():
|
||||
|
||||
outdated_queries_count = 0
|
||||
for query in models.Query.outdated_queries():
|
||||
# TODO: this should go into lower priority
|
||||
QueryTask.add_task(query.query, query.data_source, scheduled=True)
|
||||
QueryTask.add_task(query.query, query.data_source, scheduled=True,
|
||||
metadata={'Query ID': query.id, 'Username': 'Scheduled'})
|
||||
outdated_queries_count += 1
|
||||
|
||||
statsd_client.gauge('manager.outdated_queries', outdated_queries_count)
|
||||
# TODO: decide if we still need this
|
||||
# statsd_client.gauge('manager.queue_size', self.redis_connection.zcard('jobs'))
|
||||
|
||||
logger.info("Done refreshing queries. Found %d outdated queries." % outdated_queries_count)
|
||||
|
||||
@@ -171,8 +171,10 @@ def refresh_queries():
|
||||
def cleanup_tasks():
|
||||
# in case of cold restart of the workers, there might be jobs that still have their "lock" object, but aren't really
|
||||
# going to run. this job removes them.
|
||||
|
||||
lock_keys = redis_connection.keys("query_hash_job:*") # TODO: use set instead of keys command
|
||||
if not lock_keys:
|
||||
return
|
||||
|
||||
query_tasks = [QueryTask(job_id=j) for j in redis_connection.mget(lock_keys)]
|
||||
|
||||
logger.info("Found %d locks", len(query_tasks))
|
||||
@@ -197,14 +199,40 @@ def cleanup_tasks():
|
||||
logger.warning("%s is ready (%s), removing lock.", lock_keys[i], t.celery_status)
|
||||
redis_connection.delete(lock_keys[i])
|
||||
|
||||
if t.celery_status == 'STARTED' and t.id not in all_tasks:
|
||||
logger.warning("Couldn't find active job for: %s, removing lock.", lock_keys[i])
|
||||
redis_connection.delete(lock_keys[i])
|
||||
# if t.celery_status == 'STARTED' and t.id not in all_tasks:
|
||||
# logger.warning("Couldn't find active job for: %s, removing lock.", lock_keys[i])
|
||||
# redis_connection.delete(lock_keys[i])
|
||||
|
||||
|
||||
@celery.task(base=BaseTask)
|
||||
def cleanup_query_results():
|
||||
"""
|
||||
Job to cleanup unused query results -- such that no query links to them anymore, and older than a week (so it's less
|
||||
likely to be open in someone's browser and be used).
|
||||
|
||||
Each time the job deletes only 100 query results so it won't choke the database in case of many such results.
|
||||
"""
|
||||
|
||||
unused_query_results = models.QueryResult.unused().limit(100)
|
||||
total_unused_query_results = models.QueryResult.unused().count()
|
||||
deleted_count = models.QueryResult.delete().where(models.QueryResult.id << unused_query_results).execute()
|
||||
|
||||
logger.info("Deleted %d unused query results out of total of %d." % (deleted_count, total_unused_query_results))
|
||||
|
||||
|
||||
@celery.task(base=BaseTask)
|
||||
def refresh_schemas():
|
||||
"""
|
||||
Refershs the datasources schema.
|
||||
"""
|
||||
|
||||
for ds in models.DataSource.all():
|
||||
logger.info("Refreshing schema for: {}".format(ds.name))
|
||||
ds.get_schema(refresh=True)
|
||||
|
||||
|
||||
@celery.task(bind=True, base=BaseTask, track_started=True)
|
||||
def execute_query(self, query, data_source_id):
|
||||
# TODO: maybe this should be a class?
|
||||
def execute_query(self, query, data_source_id, metadata):
|
||||
start_time = time.time()
|
||||
|
||||
logger.info("Loading data source (%d)...", data_source_id)
|
||||
@@ -219,15 +247,21 @@ def execute_query(self, query, data_source_id):
|
||||
query_hash = gen_query_hash(query)
|
||||
query_runner = get_query_runner(data_source.type, data_source.options)
|
||||
|
||||
if getattr(query_runner, 'annotate_query', True):
|
||||
# TODO: anotate with queu ename
|
||||
annotated_query = "/* Task Id: %s, Query hash: %s */ %s" % \
|
||||
(self.request.id, query_hash, query)
|
||||
if query_runner.annotate_query():
|
||||
metadata['Task ID'] = self.request.id
|
||||
metadata['Query Hash'] = query_hash
|
||||
metadata['Queue'] = self.request.delivery_info['routing_key']
|
||||
|
||||
annotation = ", ".join(["{}: {}".format(k, v) for k, v in metadata.iteritems()])
|
||||
|
||||
logging.debug("Annotation: %s", annotation)
|
||||
|
||||
annotated_query = "/* {} */ {}".format(annotation, query)
|
||||
else:
|
||||
annotated_query = query
|
||||
|
||||
with statsd_client.timer('query_runner.{}.{}.run_time'.format(data_source.type, data_source.name)):
|
||||
data, error = query_runner(annotated_query)
|
||||
data, error = query_runner.run_query(annotated_query)
|
||||
|
||||
run_time = time.time() - start_time
|
||||
logger.info("Query finished... data length=%s, error=%s", data and len(data), error)
|
||||
@@ -237,8 +271,6 @@ def execute_query(self, query, data_source_id):
|
||||
# Delete query_hash
|
||||
redis_connection.delete(QueryTask._job_lock_id(query_hash, data_source.id))
|
||||
|
||||
# TODO: it is possible that storing the data will fail, and we will need to retry
|
||||
# while we already marked the job as done
|
||||
if not error:
|
||||
query_result = models.QueryResult.store_result(data_source.id, query_hash, query, data, run_time, datetime.datetime.utcnow())
|
||||
else:
|
||||
|
||||
@@ -7,19 +7,30 @@ celery = Celery('redash',
|
||||
broker=settings.CELERY_BROKER,
|
||||
include='redash.tasks')
|
||||
|
||||
celery.conf.update(CELERY_RESULT_BACKEND=settings.CELERY_BACKEND,
|
||||
CELERYBEAT_SCHEDULE={
|
||||
'refresh_queries': {
|
||||
'task': 'redash.tasks.refresh_queries',
|
||||
'schedule': timedelta(seconds=30)
|
||||
},
|
||||
'cleanup_tasks': {
|
||||
'task': 'redash.tasks.cleanup_tasks',
|
||||
'schedule': timedelta(minutes=5)
|
||||
},
|
||||
},
|
||||
CELERY_TIMEZONE='UTC')
|
||||
celery_schedule = {
|
||||
'refresh_queries': {
|
||||
'task': 'redash.tasks.refresh_queries',
|
||||
'schedule': timedelta(seconds=30)
|
||||
},
|
||||
'cleanup_tasks': {
|
||||
'task': 'redash.tasks.cleanup_tasks',
|
||||
'schedule': timedelta(minutes=5)
|
||||
},
|
||||
'refresh_schemas': {
|
||||
'task': 'redash.tasks.refresh_schemas',
|
||||
'schedule': timedelta(minutes=30)
|
||||
}
|
||||
}
|
||||
|
||||
if settings.QUERY_RESULTS_CLEANUP_ENABLED:
|
||||
celery_schedule['cleanup_query_results'] = {
|
||||
'task': 'redash.tasks.cleanup_query_results',
|
||||
'schedule': timedelta(minutes=5)
|
||||
}
|
||||
|
||||
celery.conf.update(CELERY_RESULT_BACKEND=settings.CELERY_BACKEND,
|
||||
CELERYBEAT_SCHEDULE=celery_schedule,
|
||||
CELERY_TIMEZONE='UTC')
|
||||
|
||||
if __name__ == '__main__':
|
||||
celery.start()
|
||||
@@ -4,6 +4,8 @@ from flask.ext.restful import Api
|
||||
|
||||
from redash import settings, utils
|
||||
from redash.models import db
|
||||
from redash.admin import init_admin
|
||||
|
||||
|
||||
__version__ = '0.4.0'
|
||||
|
||||
@@ -14,6 +16,7 @@ app = Flask(__name__,
|
||||
|
||||
|
||||
api = Api(app)
|
||||
init_admin(app)
|
||||
|
||||
# configure our database
|
||||
settings.DATABASE_CONFIG.update({'threadlocals': True})
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
Flask==0.10.1
|
||||
Flask-Admin==1.1.0
|
||||
Flask-RESTful==0.2.10
|
||||
Flask-Login==0.2.9
|
||||
Flask-OAuth==0.12
|
||||
@@ -9,12 +10,12 @@ Werkzeug==0.9.4
|
||||
aniso8601==0.82
|
||||
blinker==1.3
|
||||
itsdangerous==0.23
|
||||
peewee==2.2.2
|
||||
peewee==2.4.7
|
||||
psycopg2==2.5.2
|
||||
python-dateutil==2.1
|
||||
pytz==2013.9
|
||||
redis==2.7.5
|
||||
requests==2.2.0
|
||||
requests==2.3.0
|
||||
six==1.5.2
|
||||
sqlparse==0.1.8
|
||||
wsgiref==0.1.2
|
||||
@@ -23,3 +24,7 @@ honcho==0.5.0
|
||||
statsd==2.1.2
|
||||
gunicorn==18.0
|
||||
celery==3.1.11
|
||||
jsonschema==2.4.0
|
||||
click==3.3
|
||||
RestrictedPython==3.6.0
|
||||
wtf-peewee==0.2.3
|
||||
|
||||
12
setup/Vagrantfile_debian
Normal file
12
setup/Vagrantfile_debian
Normal file
@@ -0,0 +1,12 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
|
||||
VAGRANTFILE_API_VERSION = "2"
|
||||
|
||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||
# Every Vagrant virtual environment requires a box to build off of.
|
||||
config.vm.box = "box-cutter/debian76"
|
||||
config.vm.provision "shell", path: "setup.sh"
|
||||
config.vm.network "forwarded_port", guest: 80, host: 9001
|
||||
end
|
||||
179
setup/bootstrap.sh
Normal file
179
setup/bootstrap.sh
Normal file
@@ -0,0 +1,179 @@
|
||||
#!/bin/bash
|
||||
set -eu
|
||||
|
||||
REDASH_BASE_PATH=/opt/redash
|
||||
FILES_BASE_URL=https://raw.githubusercontent.com/EverythingMe/redash/docs_setup/setup/files/
|
||||
|
||||
# Verify running as root:
|
||||
if [ "$(id -u)" != "0" ]; then
|
||||
if [ $# -ne 0 ]; then
|
||||
echo "Failed running with sudo. Exiting." 1>&2
|
||||
exit 1
|
||||
fi
|
||||
echo "This script must be run as root. Trying to run with sudo."
|
||||
sudo bash $0 --with-sudo
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Base packages
|
||||
apt-get update
|
||||
apt-get install -y python-pip python-dev nginx curl build-essential pwgen
|
||||
|
||||
# redash user
|
||||
# TODO: check user doesn't exist yet?
|
||||
adduser --system --no-create-home --disabled-login --gecos "" redash
|
||||
|
||||
# PostgreSQL
|
||||
pg_available=0
|
||||
psql --version || pg_available=$?
|
||||
if [ $pg_available -ne 0 ]; then
|
||||
wget $FILES_BASE_URL"postgres_apt.sh" -O /tmp/postgres_apt.sh
|
||||
bash /tmp/postgres_apt.sh
|
||||
apt-get update
|
||||
apt-get -y install postgresql-9.3 postgresql-server-dev-9.3
|
||||
fi
|
||||
|
||||
add_service() {
|
||||
service_name=$1
|
||||
service_command="/etc/init.d/$service_name"
|
||||
|
||||
echo "Adding service: $service_name (/etc/init.d/$service_name)."
|
||||
chmod +x $service_command
|
||||
|
||||
if command -v chkconfig >/dev/null 2>&1; then
|
||||
# we're chkconfig, so lets add to chkconfig and put in runlevel 345
|
||||
chkconfig --add $service_name && echo "Successfully added to chkconfig!"
|
||||
chkconfig --level 345 $service_name on && echo "Successfully added to runlevels 345!"
|
||||
elif command -v update-rc.d >/dev/null 2>&1; then
|
||||
#if we're not a chkconfig box assume we're able to use update-rc.d
|
||||
update-rc.d $service_name defaults && echo "Success!"
|
||||
else
|
||||
echo "No supported init tool found."
|
||||
fi
|
||||
|
||||
$service_command start
|
||||
}
|
||||
|
||||
# Redis
|
||||
redis_available=0
|
||||
redis-cli --version || redis_available=$?
|
||||
if [ $redis_available -ne 0 ]; then
|
||||
wget http://download.redis.io/releases/redis-2.8.17.tar.gz
|
||||
tar xzf redis-2.8.17.tar.gz
|
||||
rm redis-2.8.17.tar.gz
|
||||
cd redis-2.8.17
|
||||
make
|
||||
make install
|
||||
|
||||
# Setup process init & configuration
|
||||
|
||||
REDIS_PORT=6379
|
||||
REDIS_CONFIG_FILE="/etc/redis/$REDIS_PORT.conf"
|
||||
REDIS_LOG_FILE="/var/log/redis_$REDIS_PORT.log"
|
||||
REDIS_DATA_DIR="/var/lib/redis/$REDIS_PORT"
|
||||
|
||||
mkdir -p `dirname "$REDIS_CONFIG_FILE"` || die "Could not create redis config directory"
|
||||
mkdir -p `dirname "$REDIS_LOG_FILE"` || die "Could not create redis log dir"
|
||||
mkdir -p "$REDIS_DATA_DIR" || die "Could not create redis data directory"
|
||||
|
||||
wget -O /etc/init.d/redis_6379 $FILES_BASE_URL"redis_init"
|
||||
wget -O $REDIS_CONFIG_FILE $FILES_BASE_URL"redis.conf"
|
||||
|
||||
add_service "redis_$REDIS_PORT"
|
||||
|
||||
cd ..
|
||||
rm -rf redis-2.8.17
|
||||
fi
|
||||
|
||||
# Directories
|
||||
if [ ! -d "$REDASH_BASE_PATH" ]; then
|
||||
sudo mkdir /opt/redash
|
||||
sudo chown redash /opt/redash
|
||||
sudo -u redash mkdir /opt/redash/logs
|
||||
fi
|
||||
|
||||
# Default config file
|
||||
if [ ! -f "/opt/redash/.env" ]; then
|
||||
sudo -u redash wget $FILES_BASE_URL"env" -O /opt/redash/.env
|
||||
fi
|
||||
|
||||
# Install latest version
|
||||
# REDASH_VERSION=${REDASH_VERSION-0.4.0.b589}
|
||||
# modified by @fedex1 3/15/2015 seems to be the latest version at this point in time.
|
||||
REDASH_VERSION=${REDASH_VERSION-0.6.0.b722}
|
||||
LATEST_URL="https://github.com/EverythingMe/redash/releases/download/v${REDASH_VERSION/.b/%2Bb}/redash.$REDASH_VERSION.tar.gz"
|
||||
VERSION_DIR="/opt/redash/redash.$REDASH_VERSION"
|
||||
REDASH_TARBALL=/tmp/redash.tar.gz
|
||||
REDASH_TARBALL=/tmp/redash.tar.gz
|
||||
|
||||
if [ ! -d "$VERSION_DIR" ]; then
|
||||
sudo -u redash wget $LATEST_URL -O $REDASH_TARBALL
|
||||
sudo -u redash mkdir $VERSION_DIR
|
||||
sudo -u redash tar -C $VERSION_DIR -xvf $REDASH_TARBALL
|
||||
ln -nfs $VERSION_DIR /opt/redash/current
|
||||
ln -nfs /opt/redash/.env /opt/redash/current/.env
|
||||
|
||||
cd /opt/redash/current
|
||||
|
||||
# TODO: venv?
|
||||
pip install -r requirements.txt
|
||||
fi
|
||||
|
||||
# Create database / tables
|
||||
pg_user_exists=0
|
||||
sudo -u postgres psql postgres -tAc "SELECT 1 FROM pg_roles WHERE rolname='redash'" | grep -q 1 || pg_user_exists=$?
|
||||
if [ $pg_user_exists -ne 0 ]; then
|
||||
echo "Creating redash postgres user & database."
|
||||
sudo -u postgres createuser redash --no-superuser --no-createdb --no-createrole
|
||||
sudo -u postgres createdb redash --owner=redash
|
||||
|
||||
cd /opt/redash/current
|
||||
sudo -u redash bin/run ./manage.py database create_tables
|
||||
fi
|
||||
|
||||
# Create default admin user
|
||||
cd /opt/redash/current
|
||||
# TODO: make sure user created only once
|
||||
# TODO: generate temp password and print to screen
|
||||
sudo -u redash bin/run ./manage.py users create --admin --password admin "Admin" "admin"
|
||||
|
||||
# Create re:dash read only pg user & setup data source
|
||||
pg_user_exists=0
|
||||
sudo -u postgres psql postgres -tAc "SELECT 1 FROM pg_roles WHERE rolname='redash_reader'" | grep -q 1 || pg_user_exists=$?
|
||||
if [ $pg_user_exists -ne 0 ]; then
|
||||
echo "Creating redash reader postgres user."
|
||||
REDASH_READER_PASSWORD=$(pwgen -1)
|
||||
sudo -u postgres psql -c "CREATE ROLE redash_reader WITH PASSWORD '$REDASH_READER_PASSWORD' NOCREATEROLE NOCREATEDB NOSUPERUSER LOGIN"
|
||||
sudo -u redash psql -c "grant select(id,name,type) ON data_sources to redash_reader;" redash
|
||||
sudo -u redash psql -c "grant select on activity_log, events, queries, dashboards, widgets, visualizations, query_results to redash_reader;" redash
|
||||
|
||||
cd /opt/redash/current
|
||||
sudo -u redash bin/run ./manage.py ds new -n "re:dash metadata" -t "pg" -o "{\"user\": \"redash_reader\", \"password\": \"$REDASH_READER_PASSWORD\", \"host\": \"localhost\", \"dbname\": \"redash\"}"
|
||||
fi
|
||||
|
||||
# BigQuery dependencies:
|
||||
apt-get install -y libffi-dev libssl-dev
|
||||
pip install google-api-python-client==1.2 pyOpenSSL==0.14 oauth2client==1.2
|
||||
|
||||
# MySQL dependencies:
|
||||
apt-get install -y libmysqlclient-dev
|
||||
pip install MySQL-python==1.2.5
|
||||
|
||||
# Mongo dependencies:
|
||||
pip install pymongo==2.7.2
|
||||
|
||||
# Setup supervisord + sysv init startup script
|
||||
sudo -u redash mkdir -p /opt/redash/supervisord
|
||||
pip install supervisor==3.1.2 # TODO: move to requirements.txt
|
||||
|
||||
# Get supervisord startup script
|
||||
sudo -u redash wget -O /opt/redash/supervisord/supervisord.conf $FILES_BASE_URL"supervisord.conf"
|
||||
|
||||
wget -O /etc/init.d/redash_supervisord $FILES_BASE_URL"redash_supervisord_init"
|
||||
add_service "redash_supervisord"
|
||||
|
||||
# Nginx setup
|
||||
rm /etc/nginx/sites-enabled/default
|
||||
wget -O /etc/nginx/sites-available/redash $FILES_BASE_URL"nginx_redash_site"
|
||||
ln -nfs /etc/nginx/sites-available/redash /etc/nginx/sites-enabled/redash
|
||||
service nginx restart
|
||||
9
setup/files/env
Normal file
9
setup/files/env
Normal file
@@ -0,0 +1,9 @@
|
||||
export REDASH_CONNECTION_ADAPTER=pg
|
||||
export REDASH_CONNECTION_STRING="dbname=redash"
|
||||
export REDASH_STATIC_ASSETS_PATH="../rd_ui/dist/"
|
||||
export REDASH_LOG_LEVEL="INFO"
|
||||
export REDASH_WORKERS_COUNT=6
|
||||
export REDASH_REDIS_URL=redis://localhost:6379/1
|
||||
export REDASH_DATABASE_URL="postgresql://redash"
|
||||
export REDASH_COOKIE_SECRET=veryverysecret
|
||||
export REDASH_GOOGLE_APPS_DOMAIN=
|
||||
20
setup/files/nginx_redash_site
Normal file
20
setup/files/nginx_redash_site
Normal file
@@ -0,0 +1,20 @@
|
||||
upstream rd_servers {
|
||||
server 127.0.0.1:5000;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 80 default;
|
||||
|
||||
access_log /var/log/nginx/rd.access.log;
|
||||
|
||||
gzip on;
|
||||
gzip_types *;
|
||||
gzip_proxied any;
|
||||
|
||||
location / {
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_pass http://rd_servers;
|
||||
}
|
||||
}
|
||||
162
setup/files/postgres_apt.sh
Normal file
162
setup/files/postgres_apt.sh
Normal file
@@ -0,0 +1,162 @@
|
||||
#!/bin/sh
|
||||
|
||||
# script to add apt.postgresql.org to sources.list
|
||||
|
||||
# from command line
|
||||
CODENAME="$1"
|
||||
# lsb_release is the best interface, but not always available
|
||||
if [ -z "$CODENAME" ]; then
|
||||
CODENAME=$(lsb_release -cs 2>/dev/null)
|
||||
fi
|
||||
# parse os-release (unreliable, does not work on Ubuntu)
|
||||
if [ -z "$CODENAME" -a -f /etc/os-release ]; then
|
||||
. /etc/os-release
|
||||
# Debian: VERSION="7.0 (wheezy)"
|
||||
# Ubuntu: VERSION="13.04, Raring Ringtail"
|
||||
CODENAME=$(echo $VERSION | sed -ne 's/.*(\(.*\)).*/\1/')
|
||||
fi
|
||||
# guess from sources.list
|
||||
if [ -z "$CODENAME" ]; then
|
||||
CODENAME=$(grep '^deb ' /etc/apt/sources.list | head -n1 | awk '{ print $3 }')
|
||||
fi
|
||||
# complain if no result yet
|
||||
if [ -z "$CODENAME" ]; then
|
||||
cat <<EOF
|
||||
Could not determine the distribution codename. Please report this as a bug to
|
||||
pgsql-pkg-debian@postgresql.org. As a workaround, you can call this script with
|
||||
the proper codename as parameter, e.g. "$0 squeeze".
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# errors are non-fatal above
|
||||
set -e
|
||||
|
||||
cat <<EOF
|
||||
This script will enable the PostgreSQL APT repository on apt.postgresql.org on
|
||||
your system. The distribution codename used will be $CODENAME-pgdg.
|
||||
|
||||
EOF
|
||||
|
||||
case $CODENAME in
|
||||
# known distributions
|
||||
sid|wheezy|squeeze|lenny|etch) ;;
|
||||
precise|lucid) ;;
|
||||
*) # unknown distribution, verify on the web
|
||||
DISTURL="http://apt.postgresql.org/pub/repos/apt/dists/"
|
||||
if [ -x /usr/bin/curl ]; then
|
||||
DISTHTML=$(curl -s $DISTURL)
|
||||
elif [ -x /usr/bin/wget ]; then
|
||||
DISTHTML=$(wget --quiet -O - $DISTURL)
|
||||
fi
|
||||
if [ "$DISTHTML" ]; then
|
||||
if ! echo "$DISTHTML" | grep -q "$CODENAME-pgdg"; then
|
||||
cat <<EOF
|
||||
Your system is using the distribution codename $CODENAME, but $CODENAME-pgdg
|
||||
does not seem to be a valid distribution on
|
||||
$DISTURL
|
||||
|
||||
We abort the installation here. If you want to use a distribution different
|
||||
from your system, you can call this script with an explicit codename, e.g.
|
||||
"$0 precise".
|
||||
|
||||
Specifically, if you are using a non-LTS Ubuntu release, refer to
|
||||
https://wiki.postgresql.org/wiki/Apt/FAQ#I_am_using_a_non-LTS_release_of_Ubuntu
|
||||
|
||||
For more information, refer to https://wiki.postgresql.org/wiki/Apt
|
||||
or ask on the mailing list for assistance: pgsql-pkg-debian@postgresql.org
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "Writing /etc/apt/sources.list.d/pgdg.list ..."
|
||||
cat > /etc/apt/sources.list.d/pgdg.list <<EOF
|
||||
deb http://apt.postgresql.org/pub/repos/apt/ $CODENAME-pgdg main
|
||||
#deb-src http://apt.postgresql.org/pub/repos/apt/ $CODENAME-pgdg main
|
||||
EOF
|
||||
|
||||
echo "Importing repository signing key ..."
|
||||
KEYRING="/etc/apt/trusted.gpg.d/apt.postgresql.org.gpg"
|
||||
test -e $KEYRING || touch $KEYRING
|
||||
apt-key --keyring $KEYRING add - <<EOF
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
Version: GnuPG v1
|
||||
|
||||
mQINBE6XR8IBEACVdDKT2HEH1IyHzXkb4nIWAY7echjRxo7MTcj4vbXAyBKOfjja
|
||||
UrBEJWHN6fjKJXOYWXHLIYg0hOGeW9qcSiaa1/rYIbOzjfGfhE4x0Y+NJHS1db0V
|
||||
G6GUj3qXaeyqIJGS2z7m0Thy4Lgr/LpZlZ78Nf1fliSzBlMo1sV7PpP/7zUO+aA4
|
||||
bKa8Rio3weMXQOZgclzgeSdqtwKnyKTQdXY5MkH1QXyFIk1nTfWwyqpJjHlgtwMi
|
||||
c2cxjqG5nnV9rIYlTTjYG6RBglq0SmzF/raBnF4Lwjxq4qRqvRllBXdFu5+2pMfC
|
||||
IZ10HPRdqDCTN60DUix+BTzBUT30NzaLhZbOMT5RvQtvTVgWpeIn20i2NrPWNCUh
|
||||
hj490dKDLpK/v+A5/i8zPvN4c6MkDHi1FZfaoz3863dylUBR3Ip26oM0hHXf4/2U
|
||||
A/oA4pCl2W0hc4aNtozjKHkVjRx5Q8/hVYu+39csFWxo6YSB/KgIEw+0W8DiTII3
|
||||
RQj/OlD68ZDmGLyQPiJvaEtY9fDrcSpI0Esm0i4sjkNbuuh0Cvwwwqo5EF1zfkVj
|
||||
Tqz2REYQGMJGc5LUbIpk5sMHo1HWV038TWxlDRwtOdzw08zQA6BeWe9FOokRPeR2
|
||||
AqhyaJJwOZJodKZ76S+LDwFkTLzEKnYPCzkoRwLrEdNt1M7wQBThnC5z6wARAQAB
|
||||
tBxQb3N0Z3JlU1FMIERlYmlhbiBSZXBvc2l0b3J5iQI9BBMBCAAnAhsDBQsJCAcD
|
||||
BRUKCQgLBRYCAwEAAh4BAheABQJS6RUZBQkOhCctAAoJEH/MfUaszEz4zmQP/2ad
|
||||
HtuaXL5Xu3C3NGLha/aQb9iSJC8z5vN55HMCpsWlmslCBuEr+qR+oZvPkvwh0Io/
|
||||
8hQl/qN54DMNifRwVL2n2eG52yNERie9BrAMK2kNFZZCH4OxlMN0876BmDuNq2U6
|
||||
7vUtCv+pxT+g9R1LvlPgLCTjS3m+qMqUICJ310BMT2cpYlJx3YqXouFkdWBVurI0
|
||||
pGU/+QtydcJALz5eZbzlbYSPWbOm2ZSS2cLrCsVNFDOAbYLtUn955yXB5s4rIscE
|
||||
vTzBxPgID1iBknnPzdu2tCpk07yJleiupxI1yXstCtvhGCbiAbGFDaKzhgcAxSIX
|
||||
0ZPahpaYLdCkcoLlfgD+ar4K8veSK2LazrhO99O0onRG0p7zuXszXphO4E/WdbTO
|
||||
yDD35qCqYeAX6TaB+2l4kIdVqPgoXT/doWVLUK2NjZtd3JpMWI0OGYDFn2DAvgwP
|
||||
xqKEoGTOYuoWKssnwLlA/ZMETegak27gFAKfoQlmHjeA/PLC2KRYd6Wg2DSifhn+
|
||||
2MouoE4XFfeekVBQx98rOQ5NLwy/TYlsHXm1n0RW86ETN3chj/PPWjsi80t5oepx
|
||||
82azRoVu95LJUkHpPLYyqwfueoVzp2+B2hJU2Rg7w+cJq64TfeJG8hrc93MnSKIb
|
||||
zTvXfdPtvYdHhhA2LYu4+5mh5ASlAMJXD7zIOZt2iEYEEBEIAAYFAk6XSO4ACgkQ
|
||||
xa93SlhRC1qmjwCg9U7U+XN7Gc/dhY/eymJqmzUGT/gAn0guvoX75Y+BsZlI6dWn
|
||||
qaFU6N8HiQIcBBABCAAGBQJOl0kLAAoJEExaa6sS0qeuBfEP/3AnLrcKx+dFKERX
|
||||
o4NBCGWr+i1CnowupKS3rm2xLbmiB969szG5TxnOIvnjECqPz6skK3HkV3jTZaju
|
||||
v3sR6M2ItpnrncWuiLnYcCSDp9TEMpCWzTEgtrBlKdVuTNTeRGILeIcvqoZX5w+u
|
||||
i0eBvvbeRbHEyUsvOEnYjrqoAjqUJj5FUZtR1+V9fnZp8zDgpOSxx0LomnFdKnhj
|
||||
uyXAQlRCA6/roVNR9ruRjxTR5ubteZ9ubTsVYr2/eMYOjQ46LhAgR+3Alblu/WHB
|
||||
MR/9F9//RuOa43R5Sjx9TiFCYol+Ozk8XRt3QGweEH51YkSYY3oRbHBb2Fkql6N6
|
||||
YFqlLBL7/aiWnNmRDEs/cdpo9HpFsbjOv4RlsSXQfvvfOayHpT5nO1UQFzoyMVpJ
|
||||
615zwmQDJT5Qy7uvr2eQYRV9AXt8t/H+xjQsRZCc5YVmeAo91qIzI/tA2gtXik49
|
||||
6yeziZbfUvcZzuzjjxFExss4DSAwMgorvBeIbiz2k2qXukbqcTjB2XqAlZasd6Ll
|
||||
nLXpQdqDV3McYkP/MvttWh3w+J/woiBcA7yEI5e3YJk97uS6+ssbqLEd0CcdT+qz
|
||||
+Waw0z/ZIU99Lfh2Qm77OT6vr//Zulw5ovjZVO2boRIcve7S97gQ4KC+G/+QaRS+
|
||||
VPZ67j5UMxqtT/Y4+NHcQGgwF/1iiQI9BBMBCAAnAhsDBQsJCAcDBRUKCQgLBRYC
|
||||
AwEAAh4BAheABQJQeSssBQkDwxbfAAoJEH/MfUaszEz4bgkP/0AI0UgDgkNNqplA
|
||||
IpE/pkwem2jgGpJGKurh2xDu6j2ZL+BPzPhzyCeMHZwTXkkI373TXGQQP8dIa+RD
|
||||
HAZ3iijw4+ISdKWpziEUJjUk04UMPTlN+dYJt2EHLQDD0VLtX0yQC/wLmVEH/REp
|
||||
oclbVjZR/+ehwX2IxOIlXmkZJDSycl975FnSUjMAvyzty8P9DN0fIrQ7Ju+BfMOM
|
||||
TnUkOdp0kRUYez7pxbURJfkM0NxAP1geACI91aISBpFg3zxQs1d3MmUIhJ4wHvYB
|
||||
uaR7Fx1FkLAxWddre/OCYJBsjucE9uqc04rgKVjN5P/VfqNxyUoB+YZ+8Lk4t03p
|
||||
RBcD9XzcyOYlFLWXbcWxTn1jJ2QMqRIWi5lzZIOMw5B+OK9LLPX0dAwIFGr9WtuV
|
||||
J2zp+D4CBEMtn4Byh8EaQsttHeqAkpZoMlrEeNBDz2L7RquPQNmiuom15nb7xU/k
|
||||
7PGfqtkpBaaGBV9tJkdp7BdH27dZXx+uT+uHbpMXkRrXliHjWpAw+NGwADh/Pjmq
|
||||
ExlQSdgAiXy1TTOdzxKH7WrwMFGDK0fddKr8GH3f+Oq4eOoNRa6/UhTCmBPbryCS
|
||||
IA7EAd0Aae9YaLlOB+eTORg/F1EWLPm34kKSRtae3gfHuY2cdUmoDVnOF8C9hc0P
|
||||
bL65G4NWPt+fW7lIj+0+kF19s2PviQI9BBMBCAAnAhsDBQsJCAcDBRUKCQgLBRYC
|
||||
AwEAAh4BAheABQJRKm2VBQkINsBBAAoJEH/MfUaszEz4RTEP/1sQHyjHaUiAPaCA
|
||||
v8jw/3SaWP/g8qLjpY6ROjLnDMvwKwRAoxUwcIv4/TWDOMpwJN+CJIbjXsXNYvf9
|
||||
OX+UTOvq4iwi4ADrAAw2xw+Jomc6EsYla+hkN2FzGzhpXfZFfUsuphjY3FKL+4hX
|
||||
H+R8ucNwIz3yrkfc17MMn8yFNWFzm4omU9/JeeaafwUoLxlULL2zY7H3+QmxCl0u
|
||||
6t8VvlszdEFhemLHzVYRY0Ro/ISrR78CnANNsMIy3i11U5uvdeWVCoWV1BXNLzOD
|
||||
4+BIDbMB/Do8PQCWiliSGZi8lvmj/sKbumMFQonMQWOfQswTtqTyQ3yhUM1LaxK5
|
||||
PYq13rggi3rA8oq8SYb/KNCQL5pzACji4TRVK0kNpvtxJxe84X8+9IB1vhBvF/Ji
|
||||
/xDd/3VDNPY+k1a47cON0S8Qc8DA3mq4hRfcgvuWy7ZxoMY7AfSJOhleb9+PzRBB
|
||||
n9agYgMxZg1RUWZazQ5KuoJqbxpwOYVFja/stItNS4xsmi0lh2I4MNlBEDqnFLUx
|
||||
SvTDc22c3uJlWhzBM/f2jH19uUeqm4jaggob3iJvJmK+Q7Ns3WcfhuWwCnc1+58d
|
||||
iFAMRUCRBPeFS0qd56QGk1r97B6+3UfLUslCfaaA8IMOFvQSHJwDO87xWGyxeRTY
|
||||
IIP9up4xwgje9LB7fMxsSkCDTHOk
|
||||
=s3DI
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
EOF
|
||||
|
||||
echo "Running apt-get update ..."
|
||||
apt-get update
|
||||
|
||||
cat <<EOF
|
||||
|
||||
You can now start installing packages from apt.postgresql.org.
|
||||
|
||||
Have a look at https://wiki.postgresql.org/wiki/Apt for more information;
|
||||
most notably the FAQ at https://wiki.postgresql.org/wiki/Apt/FAQ
|
||||
EOF
|
||||
129
setup/files/redash_supervisord_init
Normal file
129
setup/files/redash_supervisord_init
Normal file
@@ -0,0 +1,129 @@
|
||||
#!/bin/sh
|
||||
# /etc/init.d/redash_supervisord
|
||||
### BEGIN INIT INFO
|
||||
# Provides: supervisord
|
||||
# Required-Start: $remote_fs $syslog
|
||||
# Required-Stop: $remote_fs $syslog
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: process supervisor
|
||||
### END INIT INFO
|
||||
|
||||
# Author: Ron DuPlain <ron.duplain@gmail.com>
|
||||
|
||||
# Do NOT "set -e"
|
||||
|
||||
# PATH should only include /usr/* if it runs after the mountnfs.sh script
|
||||
PATH=/sbin:/usr/sbin:/usr/local/sbin:/bin:/usr/bin:/usr/local/bin
|
||||
NAME=supervisord
|
||||
DESC="process supervisor"
|
||||
DAEMON=/usr/local/bin/$NAME
|
||||
DAEMON_ARGS="--configuration /opt/redash/supervisord/supervisord.conf "
|
||||
PIDFILE=/opt/redash/supervisord/supervisord.pid
|
||||
SCRIPTNAME=/etc/init.d/redash_supervisord
|
||||
USER=redash
|
||||
|
||||
# Exit if the package is not installed
|
||||
[ -x "$DAEMON" ] || exit 0
|
||||
|
||||
# Read configuration variable file if it is present
|
||||
[ -r /etc/default/$NAME ] && . /etc/default/$NAME
|
||||
|
||||
# Load the VERBOSE setting and other rcS variables
|
||||
. /lib/init/vars.sh
|
||||
|
||||
# Define LSB log_* functions.
|
||||
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
|
||||
# and status_of_proc is working.
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
#
|
||||
# Function that starts the daemon/service
|
||||
#
|
||||
do_start()
|
||||
{
|
||||
# Return
|
||||
# 0 if daemon has been started
|
||||
# 1 if daemon was already running
|
||||
# 2 if daemon could not be started
|
||||
start-stop-daemon --start --quiet --pidfile $PIDFILE --user $USER --chuid $USER --exec $DAEMON --test > /dev/null \
|
||||
|| return 1
|
||||
start-stop-daemon --start --quiet --pidfile $PIDFILE --user $USER --chuid $USER --exec $DAEMON -- \
|
||||
$DAEMON_ARGS \
|
||||
|| return 2
|
||||
# Add code here, if necessary, that waits for the process to be ready
|
||||
# to handle requests from services started subsequently which depend
|
||||
# on this one. As a last resort, sleep for some time.
|
||||
}
|
||||
|
||||
#
|
||||
# Function that stops the daemon/service
|
||||
#
|
||||
do_stop()
|
||||
{
|
||||
# Return
|
||||
# 0 if daemon has been stopped
|
||||
# 1 if daemon was already stopped
|
||||
# 2 if daemon could not be stopped
|
||||
# other if a failure occurred
|
||||
start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --user $USER --chuid $USER --name $NAME
|
||||
RETVAL="$?"
|
||||
[ "$RETVAL" = 2 ] && return 2
|
||||
# Wait for children to finish too if this is a daemon that forks
|
||||
# and if the daemon is only ever run from this initscript.
|
||||
# If the above conditions are not satisfied then add some other code
|
||||
# that waits for the process to drop all resources that could be
|
||||
# needed by services started subsequently. A last resort is to
|
||||
# sleep for some time.
|
||||
start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 --user $USER --chuid $USER --exec $DAEMON
|
||||
[ "$?" = 2 ] && return 2
|
||||
# Many daemons don't delete their pidfiles when they exit.
|
||||
rm -f $PIDFILE
|
||||
return "$RETVAL"
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
[ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
|
||||
do_start
|
||||
case "$?" in
|
||||
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
|
||||
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
|
||||
esac
|
||||
;;
|
||||
stop)
|
||||
[ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
|
||||
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
|
||||
esac
|
||||
;;
|
||||
status)
|
||||
status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
|
||||
;;
|
||||
restart)
|
||||
log_daemon_msg "Restarting $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1)
|
||||
do_start
|
||||
case "$?" in
|
||||
0) log_end_msg 0 ;;
|
||||
1) log_end_msg 1 ;; # Old process is still running
|
||||
*) log_end_msg 1 ;; # Failed to start
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
# Failed to stop
|
||||
log_end_msg 1
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $SCRIPTNAME {start|stop|status|restart}" >&2
|
||||
exit 3
|
||||
;;
|
||||
esac
|
||||
|
||||
:
|
||||
785
setup/files/redis.conf
Normal file
785
setup/files/redis.conf
Normal file
@@ -0,0 +1,785 @@
|
||||
## Generated by install_server.sh ##
|
||||
# Redis configuration file example
|
||||
|
||||
# Note on units: when memory size is needed, it is possible to specify
|
||||
# it in the usual form of 1k 5GB 4M and so forth:
|
||||
#
|
||||
# 1k => 1000 bytes
|
||||
# 1kb => 1024 bytes
|
||||
# 1m => 1000000 bytes
|
||||
# 1mb => 1024*1024 bytes
|
||||
# 1g => 1000000000 bytes
|
||||
# 1gb => 1024*1024*1024 bytes
|
||||
#
|
||||
# units are case insensitive so 1GB 1Gb 1gB are all the same.
|
||||
|
||||
################################## INCLUDES ###################################
|
||||
|
||||
# Include one or more other config files here. This is useful if you
|
||||
# have a standard template that goes to all Redis server but also need
|
||||
# to customize a few per-server settings. Include files can include
|
||||
# other files, so use this wisely.
|
||||
#
|
||||
# Notice option "include" won't be rewritten by command "CONFIG REWRITE"
|
||||
# from admin or Redis Sentinel. Since Redis always uses the last processed
|
||||
# line as value of a configuration directive, you'd better put includes
|
||||
# at the beginning of this file to avoid overwriting config change at runtime.
|
||||
#
|
||||
# If instead you are interested in using includes to override configuration
|
||||
# options, it is better to use include as the last line.
|
||||
#
|
||||
# include /path/to/local.conf
|
||||
# include /path/to/other.conf
|
||||
|
||||
################################ GENERAL #####################################
|
||||
|
||||
# By default Redis does not run as a daemon. Use 'yes' if you need it.
|
||||
# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
|
||||
daemonize yes
|
||||
|
||||
# When running daemonized, Redis writes a pid file in /var/run/redis.pid by
|
||||
# default. You can specify a custom pid file location here.
|
||||
pidfile /var/run/redis_6379.pid
|
||||
|
||||
# Accept connections on the specified port, default is 6379.
|
||||
# If port 0 is specified Redis will not listen on a TCP socket.
|
||||
port 6379
|
||||
|
||||
# TCP listen() backlog.
|
||||
#
|
||||
# In high requests-per-second environments you need an high backlog in order
|
||||
# to avoid slow clients connections issues. Note that the Linux kernel
|
||||
# will silently truncate it to the value of /proc/sys/net/core/somaxconn so
|
||||
# make sure to raise both the value of somaxconn and tcp_max_syn_backlog
|
||||
# in order to get the desired effect.
|
||||
tcp-backlog 511
|
||||
|
||||
# By default Redis listens for connections from all the network interfaces
|
||||
# available on the server. It is possible to listen to just one or multiple
|
||||
# interfaces using the "bind" configuration directive, followed by one or
|
||||
# more IP addresses.
|
||||
#
|
||||
# Examples:
|
||||
#
|
||||
# bind 192.168.1.100 10.0.0.1
|
||||
# bind 127.0.0.1
|
||||
|
||||
# Specify the path for the Unix socket that will be used to listen for
|
||||
# incoming connections. There is no default, so Redis will not listen
|
||||
# on a unix socket when not specified.
|
||||
#
|
||||
# unixsocket /tmp/redis.sock
|
||||
# unixsocketperm 700
|
||||
|
||||
# Close the connection after a client is idle for N seconds (0 to disable)
|
||||
timeout 0
|
||||
|
||||
# TCP keepalive.
|
||||
#
|
||||
# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
|
||||
# of communication. This is useful for two reasons:
|
||||
#
|
||||
# 1) Detect dead peers.
|
||||
# 2) Take the connection alive from the point of view of network
|
||||
# equipment in the middle.
|
||||
#
|
||||
# On Linux, the specified value (in seconds) is the period used to send ACKs.
|
||||
# Note that to close the connection the double of the time is needed.
|
||||
# On other kernels the period depends on the kernel configuration.
|
||||
#
|
||||
# A reasonable value for this option is 60 seconds.
|
||||
tcp-keepalive 0
|
||||
|
||||
# Specify the server verbosity level.
|
||||
# This can be one of:
|
||||
# debug (a lot of information, useful for development/testing)
|
||||
# verbose (many rarely useful info, but not a mess like the debug level)
|
||||
# notice (moderately verbose, what you want in production probably)
|
||||
# warning (only very important / critical messages are logged)
|
||||
loglevel notice
|
||||
|
||||
# Specify the log file name. Also the empty string can be used to force
|
||||
# Redis to log on the standard output. Note that if you use standard
|
||||
# output for logging but daemonize, logs will be sent to /dev/null
|
||||
logfile /var/log/redis_6379.log
|
||||
|
||||
# To enable logging to the system logger, just set 'syslog-enabled' to yes,
|
||||
# and optionally update the other syslog parameters to suit your needs.
|
||||
# syslog-enabled no
|
||||
|
||||
# Specify the syslog identity.
|
||||
# syslog-ident redis
|
||||
|
||||
# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
|
||||
# syslog-facility local0
|
||||
|
||||
# Set the number of databases. The default database is DB 0, you can select
|
||||
# a different one on a per-connection basis using SELECT <dbid> where
|
||||
# dbid is a number between 0 and 'databases'-1
|
||||
databases 16
|
||||
|
||||
################################ SNAPSHOTTING ################################
|
||||
#
|
||||
# Save the DB on disk:
|
||||
#
|
||||
# save <seconds> <changes>
|
||||
#
|
||||
# Will save the DB if both the given number of seconds and the given
|
||||
# number of write operations against the DB occurred.
|
||||
#
|
||||
# In the example below the behaviour will be to save:
|
||||
# after 900 sec (15 min) if at least 1 key changed
|
||||
# after 300 sec (5 min) if at least 10 keys changed
|
||||
# after 60 sec if at least 10000 keys changed
|
||||
#
|
||||
# Note: you can disable saving at all commenting all the "save" lines.
|
||||
#
|
||||
# It is also possible to remove all the previously configured save
|
||||
# points by adding a save directive with a single empty string argument
|
||||
# like in the following example:
|
||||
#
|
||||
# save ""
|
||||
|
||||
save 900 1
|
||||
save 300 10
|
||||
save 60 10000
|
||||
|
||||
# By default Redis will stop accepting writes if RDB snapshots are enabled
|
||||
# (at least one save point) and the latest background save failed.
|
||||
# This will make the user aware (in a hard way) that data is not persisting
|
||||
# on disk properly, otherwise chances are that no one will notice and some
|
||||
# disaster will happen.
|
||||
#
|
||||
# If the background saving process will start working again Redis will
|
||||
# automatically allow writes again.
|
||||
#
|
||||
# However if you have setup your proper monitoring of the Redis server
|
||||
# and persistence, you may want to disable this feature so that Redis will
|
||||
# continue to work as usual even if there are problems with disk,
|
||||
# permissions, and so forth.
|
||||
stop-writes-on-bgsave-error yes
|
||||
|
||||
# Compress string objects using LZF when dump .rdb databases?
|
||||
# For default that's set to 'yes' as it's almost always a win.
|
||||
# If you want to save some CPU in the saving child set it to 'no' but
|
||||
# the dataset will likely be bigger if you have compressible values or keys.
|
||||
rdbcompression yes
|
||||
|
||||
# Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
|
||||
# This makes the format more resistant to corruption but there is a performance
|
||||
# hit to pay (around 10%) when saving and loading RDB files, so you can disable it
|
||||
# for maximum performances.
|
||||
#
|
||||
# RDB files created with checksum disabled have a checksum of zero that will
|
||||
# tell the loading code to skip the check.
|
||||
rdbchecksum yes
|
||||
|
||||
# The filename where to dump the DB
|
||||
dbfilename dump.rdb
|
||||
|
||||
# The working directory.
|
||||
#
|
||||
# The DB will be written inside this directory, with the filename specified
|
||||
# above using the 'dbfilename' configuration directive.
|
||||
#
|
||||
# The Append Only File will also be created inside this directory.
|
||||
#
|
||||
# Note that you must specify a directory here, not a file name.
|
||||
dir /var/lib/redis/6379
|
||||
|
||||
################################# REPLICATION #################################
|
||||
|
||||
# Master-Slave replication. Use slaveof to make a Redis instance a copy of
|
||||
# another Redis server. A few things to understand ASAP about Redis replication.
|
||||
#
|
||||
# 1) Redis replication is asynchronous, but you can configure a master to
|
||||
# stop accepting writes if it appears to be not connected with at least
|
||||
# a given number of slaves.
|
||||
# 2) Redis slaves are able to perform a partial resynchronization with the
|
||||
# master if the replication link is lost for a relatively small amount of
|
||||
# time. You may want to configure the replication backlog size (see the next
|
||||
# sections of this file) with a sensible value depending on your needs.
|
||||
# 3) Replication is automatic and does not need user intervention. After a
|
||||
# network partition slaves automatically try to reconnect to masters
|
||||
# and resynchronize with them.
|
||||
#
|
||||
# slaveof <masterip> <masterport>
|
||||
|
||||
# If the master is password protected (using the "requirepass" configuration
|
||||
# directive below) it is possible to tell the slave to authenticate before
|
||||
# starting the replication synchronization process, otherwise the master will
|
||||
# refuse the slave request.
|
||||
#
|
||||
# masterauth <master-password>
|
||||
|
||||
# When a slave loses its connection with the master, or when the replication
|
||||
# is still in progress, the slave can act in two different ways:
|
||||
#
|
||||
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
|
||||
# still reply to client requests, possibly with out of date data, or the
|
||||
# data set may just be empty if this is the first synchronization.
|
||||
#
|
||||
# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
|
||||
# an error "SYNC with master in progress" to all the kind of commands
|
||||
# but to INFO and SLAVEOF.
|
||||
#
|
||||
slave-serve-stale-data yes
|
||||
|
||||
# You can configure a slave instance to accept writes or not. Writing against
|
||||
# a slave instance may be useful to store some ephemeral data (because data
|
||||
# written on a slave will be easily deleted after resync with the master) but
|
||||
# may also cause problems if clients are writing to it because of a
|
||||
# misconfiguration.
|
||||
#
|
||||
# Since Redis 2.6 by default slaves are read-only.
|
||||
#
|
||||
# Note: read only slaves are not designed to be exposed to untrusted clients
|
||||
# on the internet. It's just a protection layer against misuse of the instance.
|
||||
# Still a read only slave exports by default all the administrative commands
|
||||
# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
|
||||
# security of read only slaves using 'rename-command' to shadow all the
|
||||
# administrative / dangerous commands.
|
||||
slave-read-only yes
|
||||
|
||||
# Slaves send PINGs to server in a predefined interval. It's possible to change
|
||||
# this interval with the repl_ping_slave_period option. The default value is 10
|
||||
# seconds.
|
||||
#
|
||||
# repl-ping-slave-period 10
|
||||
|
||||
# The following option sets the replication timeout for:
|
||||
#
|
||||
# 1) Bulk transfer I/O during SYNC, from the point of view of slave.
|
||||
# 2) Master timeout from the point of view of slaves (data, pings).
|
||||
# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings).
|
||||
#
|
||||
# It is important to make sure that this value is greater than the value
|
||||
# specified for repl-ping-slave-period otherwise a timeout will be detected
|
||||
# every time there is low traffic between the master and the slave.
|
||||
#
|
||||
# repl-timeout 60
|
||||
|
||||
# Disable TCP_NODELAY on the slave socket after SYNC?
|
||||
#
|
||||
# If you select "yes" Redis will use a smaller number of TCP packets and
|
||||
# less bandwidth to send data to slaves. But this can add a delay for
|
||||
# the data to appear on the slave side, up to 40 milliseconds with
|
||||
# Linux kernels using a default configuration.
|
||||
#
|
||||
# If you select "no" the delay for data to appear on the slave side will
|
||||
# be reduced but more bandwidth will be used for replication.
|
||||
#
|
||||
# By default we optimize for low latency, but in very high traffic conditions
|
||||
# or when the master and slaves are many hops away, turning this to "yes" may
|
||||
# be a good idea.
|
||||
repl-disable-tcp-nodelay no
|
||||
|
||||
# Set the replication backlog size. The backlog is a buffer that accumulates
|
||||
# slave data when slaves are disconnected for some time, so that when a slave
|
||||
# wants to reconnect again, often a full resync is not needed, but a partial
|
||||
# resync is enough, just passing the portion of data the slave missed while
|
||||
# disconnected.
|
||||
#
|
||||
# The biggest the replication backlog, the longer the time the slave can be
|
||||
# disconnected and later be able to perform a partial resynchronization.
|
||||
#
|
||||
# The backlog is only allocated once there is at least a slave connected.
|
||||
#
|
||||
# repl-backlog-size 1mb
|
||||
|
||||
# After a master has no longer connected slaves for some time, the backlog
|
||||
# will be freed. The following option configures the amount of seconds that
|
||||
# need to elapse, starting from the time the last slave disconnected, for
|
||||
# the backlog buffer to be freed.
|
||||
#
|
||||
# A value of 0 means to never release the backlog.
|
||||
#
|
||||
# repl-backlog-ttl 3600
|
||||
|
||||
# The slave priority is an integer number published by Redis in the INFO output.
|
||||
# It is used by Redis Sentinel in order to select a slave to promote into a
|
||||
# master if the master is no longer working correctly.
|
||||
#
|
||||
# A slave with a low priority number is considered better for promotion, so
|
||||
# for instance if there are three slaves with priority 10, 100, 25 Sentinel will
|
||||
# pick the one with priority 10, that is the lowest.
|
||||
#
|
||||
# However a special priority of 0 marks the slave as not able to perform the
|
||||
# role of master, so a slave with priority of 0 will never be selected by
|
||||
# Redis Sentinel for promotion.
|
||||
#
|
||||
# By default the priority is 100.
|
||||
slave-priority 100
|
||||
|
||||
# It is possible for a master to stop accepting writes if there are less than
|
||||
# N slaves connected, having a lag less or equal than M seconds.
|
||||
#
|
||||
# The N slaves need to be in "online" state.
|
||||
#
|
||||
# The lag in seconds, that must be <= the specified value, is calculated from
|
||||
# the last ping received from the slave, that is usually sent every second.
|
||||
#
|
||||
# This option does not GUARANTEES that N replicas will accept the write, but
|
||||
# will limit the window of exposure for lost writes in case not enough slaves
|
||||
# are available, to the specified number of seconds.
|
||||
#
|
||||
# For example to require at least 3 slaves with a lag <= 10 seconds use:
|
||||
#
|
||||
# min-slaves-to-write 3
|
||||
# min-slaves-max-lag 10
|
||||
#
|
||||
# Setting one or the other to 0 disables the feature.
|
||||
#
|
||||
# By default min-slaves-to-write is set to 0 (feature disabled) and
|
||||
# min-slaves-max-lag is set to 10.
|
||||
|
||||
################################## SECURITY ###################################
|
||||
|
||||
# Require clients to issue AUTH <PASSWORD> before processing any other
|
||||
# commands. This might be useful in environments in which you do not trust
|
||||
# others with access to the host running redis-server.
|
||||
#
|
||||
# This should stay commented out for backward compatibility and because most
|
||||
# people do not need auth (e.g. they run their own servers).
|
||||
#
|
||||
# Warning: since Redis is pretty fast an outside user can try up to
|
||||
# 150k passwords per second against a good box. This means that you should
|
||||
# use a very strong password otherwise it will be very easy to break.
|
||||
#
|
||||
# requirepass foobared
|
||||
|
||||
# Command renaming.
|
||||
#
|
||||
# It is possible to change the name of dangerous commands in a shared
|
||||
# environment. For instance the CONFIG command may be renamed into something
|
||||
# hard to guess so that it will still be available for internal-use tools
|
||||
# but not available for general clients.
|
||||
#
|
||||
# Example:
|
||||
#
|
||||
# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
|
||||
#
|
||||
# It is also possible to completely kill a command by renaming it into
|
||||
# an empty string:
|
||||
#
|
||||
# rename-command CONFIG ""
|
||||
#
|
||||
# Please note that changing the name of commands that are logged into the
|
||||
# AOF file or transmitted to slaves may cause problems.
|
||||
|
||||
################################### LIMITS ####################################
|
||||
|
||||
# Set the max number of connected clients at the same time. By default
|
||||
# this limit is set to 10000 clients, however if the Redis server is not
|
||||
# able to configure the process file limit to allow for the specified limit
|
||||
# the max number of allowed clients is set to the current file limit
|
||||
# minus 32 (as Redis reserves a few file descriptors for internal uses).
|
||||
#
|
||||
# Once the limit is reached Redis will close all the new connections sending
|
||||
# an error 'max number of clients reached'.
|
||||
#
|
||||
# maxclients 10000
|
||||
|
||||
# Don't use more memory than the specified amount of bytes.
|
||||
# When the memory limit is reached Redis will try to remove keys
|
||||
# according to the eviction policy selected (see maxmemory-policy).
|
||||
#
|
||||
# If Redis can't remove keys according to the policy, or if the policy is
|
||||
# set to 'noeviction', Redis will start to reply with errors to commands
|
||||
# that would use more memory, like SET, LPUSH, and so on, and will continue
|
||||
# to reply to read-only commands like GET.
|
||||
#
|
||||
# This option is usually useful when using Redis as an LRU cache, or to set
|
||||
# a hard memory limit for an instance (using the 'noeviction' policy).
|
||||
#
|
||||
# WARNING: If you have slaves attached to an instance with maxmemory on,
|
||||
# the size of the output buffers needed to feed the slaves are subtracted
|
||||
# from the used memory count, so that network problems / resyncs will
|
||||
# not trigger a loop where keys are evicted, and in turn the output
|
||||
# buffer of slaves is full with DELs of keys evicted triggering the deletion
|
||||
# of more keys, and so forth until the database is completely emptied.
|
||||
#
|
||||
# In short... if you have slaves attached it is suggested that you set a lower
|
||||
# limit for maxmemory so that there is some free RAM on the system for slave
|
||||
# output buffers (but this is not needed if the policy is 'noeviction').
|
||||
#
|
||||
# maxmemory <bytes>
|
||||
|
||||
# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
|
||||
# is reached. You can select among five behaviors:
|
||||
#
|
||||
# volatile-lru -> remove the key with an expire set using an LRU algorithm
|
||||
# allkeys-lru -> remove any key accordingly to the LRU algorithm
|
||||
# volatile-random -> remove a random key with an expire set
|
||||
# allkeys-random -> remove a random key, any key
|
||||
# volatile-ttl -> remove the key with the nearest expire time (minor TTL)
|
||||
# noeviction -> don't expire at all, just return an error on write operations
|
||||
#
|
||||
# Note: with any of the above policies, Redis will return an error on write
|
||||
# operations, when there are not suitable keys for eviction.
|
||||
#
|
||||
# At the date of writing this commands are: set setnx setex append
|
||||
# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
|
||||
# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
|
||||
# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
|
||||
# getset mset msetnx exec sort
|
||||
#
|
||||
# The default is:
|
||||
#
|
||||
# maxmemory-policy volatile-lru
|
||||
|
||||
# LRU and minimal TTL algorithms are not precise algorithms but approximated
|
||||
# algorithms (in order to save memory), so you can select as well the sample
|
||||
# size to check. For instance for default Redis will check three keys and
|
||||
# pick the one that was used less recently, you can change the sample size
|
||||
# using the following configuration directive.
|
||||
#
|
||||
# maxmemory-samples 3
|
||||
|
||||
############################## APPEND ONLY MODE ###############################
|
||||
|
||||
# By default Redis asynchronously dumps the dataset on disk. This mode is
|
||||
# good enough in many applications, but an issue with the Redis process or
|
||||
# a power outage may result into a few minutes of writes lost (depending on
|
||||
# the configured save points).
|
||||
#
|
||||
# The Append Only File is an alternative persistence mode that provides
|
||||
# much better durability. For instance using the default data fsync policy
|
||||
# (see later in the config file) Redis can lose just one second of writes in a
|
||||
# dramatic event like a server power outage, or a single write if something
|
||||
# wrong with the Redis process itself happens, but the operating system is
|
||||
# still running correctly.
|
||||
#
|
||||
# AOF and RDB persistence can be enabled at the same time without problems.
|
||||
# If the AOF is enabled on startup Redis will load the AOF, that is the file
|
||||
# with the better durability guarantees.
|
||||
#
|
||||
# Please check http://redis.io/topics/persistence for more information.
|
||||
|
||||
appendonly no
|
||||
|
||||
# The name of the append only file (default: "appendonly.aof")
|
||||
|
||||
appendfilename "appendonly.aof"
|
||||
|
||||
# The fsync() call tells the Operating System to actually write data on disk
|
||||
# instead to wait for more data in the output buffer. Some OS will really flush
|
||||
# data on disk, some other OS will just try to do it ASAP.
|
||||
#
|
||||
# Redis supports three different modes:
|
||||
#
|
||||
# no: don't fsync, just let the OS flush the data when it wants. Faster.
|
||||
# always: fsync after every write to the append only log . Slow, Safest.
|
||||
# everysec: fsync only one time every second. Compromise.
|
||||
#
|
||||
# The default is "everysec", as that's usually the right compromise between
|
||||
# speed and data safety. It's up to you to understand if you can relax this to
|
||||
# "no" that will let the operating system flush the output buffer when
|
||||
# it wants, for better performances (but if you can live with the idea of
|
||||
# some data loss consider the default persistence mode that's snapshotting),
|
||||
# or on the contrary, use "always" that's very slow but a bit safer than
|
||||
# everysec.
|
||||
#
|
||||
# More details please check the following article:
|
||||
# http://antirez.com/post/redis-persistence-demystified.html
|
||||
#
|
||||
# If unsure, use "everysec".
|
||||
|
||||
# appendfsync always
|
||||
appendfsync everysec
|
||||
# appendfsync no
|
||||
|
||||
# When the AOF fsync policy is set to always or everysec, and a background
|
||||
# saving process (a background save or AOF log background rewriting) is
|
||||
# performing a lot of I/O against the disk, in some Linux configurations
|
||||
# Redis may block too long on the fsync() call. Note that there is no fix for
|
||||
# this currently, as even performing fsync in a different thread will block
|
||||
# our synchronous write(2) call.
|
||||
#
|
||||
# In order to mitigate this problem it's possible to use the following option
|
||||
# that will prevent fsync() from being called in the main process while a
|
||||
# BGSAVE or BGREWRITEAOF is in progress.
|
||||
#
|
||||
# This means that while another child is saving, the durability of Redis is
|
||||
# the same as "appendfsync none". In practical terms, this means that it is
|
||||
# possible to lose up to 30 seconds of log in the worst scenario (with the
|
||||
# default Linux settings).
|
||||
#
|
||||
# If you have latency problems turn this to "yes". Otherwise leave it as
|
||||
# "no" that is the safest pick from the point of view of durability.
|
||||
|
||||
no-appendfsync-on-rewrite no
|
||||
|
||||
# Automatic rewrite of the append only file.
|
||||
# Redis is able to automatically rewrite the log file implicitly calling
|
||||
# BGREWRITEAOF when the AOF log size grows by the specified percentage.
|
||||
#
|
||||
# This is how it works: Redis remembers the size of the AOF file after the
|
||||
# latest rewrite (if no rewrite has happened since the restart, the size of
|
||||
# the AOF at startup is used).
|
||||
#
|
||||
# This base size is compared to the current size. If the current size is
|
||||
# bigger than the specified percentage, the rewrite is triggered. Also
|
||||
# you need to specify a minimal size for the AOF file to be rewritten, this
|
||||
# is useful to avoid rewriting the AOF file even if the percentage increase
|
||||
# is reached but it is still pretty small.
|
||||
#
|
||||
# Specify a percentage of zero in order to disable the automatic AOF
|
||||
# rewrite feature.
|
||||
|
||||
auto-aof-rewrite-percentage 100
|
||||
auto-aof-rewrite-min-size 64mb
|
||||
|
||||
# An AOF file may be found to be truncated at the end during the Redis
|
||||
# startup process, when the AOF data gets loaded back into memory.
|
||||
# This may happen when the system where Redis is running
|
||||
# crashes, especially when an ext4 filesystem is mounted without the
|
||||
# data=ordered option (however this can't happen when Redis itself
|
||||
# crashes or aborts but the operating system still works correctly).
|
||||
#
|
||||
# Redis can either exit with an error when this happens, or load as much
|
||||
# data as possible (the default now) and start if the AOF file is found
|
||||
# to be truncated at the end. The following option controls this behavior.
|
||||
#
|
||||
# If aof-load-truncated is set to yes, a truncated AOF file is loaded and
|
||||
# the Redis server starts emitting a log to inform the user of the event.
|
||||
# Otherwise if the option is set to no, the server aborts with an error
|
||||
# and refuses to start. When the option is set to no, the user requires
|
||||
# to fix the AOF file using the "redis-check-aof" utility before to restart
|
||||
# the server.
|
||||
#
|
||||
# Note that if the AOF file will be found to be corrupted in the middle
|
||||
# the server will still exit with an error. This option only applies when
|
||||
# Redis will try to read more data from the AOF file but not enough bytes
|
||||
# will be found.
|
||||
aof-load-truncated yes
|
||||
|
||||
################################ LUA SCRIPTING ###############################
|
||||
|
||||
# Max execution time of a Lua script in milliseconds.
|
||||
#
|
||||
# If the maximum execution time is reached Redis will log that a script is
|
||||
# still in execution after the maximum allowed time and will start to
|
||||
# reply to queries with an error.
|
||||
#
|
||||
# When a long running script exceed the maximum execution time only the
|
||||
# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
|
||||
# used to stop a script that did not yet called write commands. The second
|
||||
# is the only way to shut down the server in the case a write commands was
|
||||
# already issue by the script but the user don't want to wait for the natural
|
||||
# termination of the script.
|
||||
#
|
||||
# Set it to 0 or a negative value for unlimited execution without warnings.
|
||||
lua-time-limit 5000
|
||||
|
||||
################################## SLOW LOG ###################################
|
||||
|
||||
# The Redis Slow Log is a system to log queries that exceeded a specified
|
||||
# execution time. The execution time does not include the I/O operations
|
||||
# like talking with the client, sending the reply and so forth,
|
||||
# but just the time needed to actually execute the command (this is the only
|
||||
# stage of command execution where the thread is blocked and can not serve
|
||||
# other requests in the meantime).
|
||||
#
|
||||
# You can configure the slow log with two parameters: one tells Redis
|
||||
# what is the execution time, in microseconds, to exceed in order for the
|
||||
# command to get logged, and the other parameter is the length of the
|
||||
# slow log. When a new command is logged the oldest one is removed from the
|
||||
# queue of logged commands.
|
||||
|
||||
# The following time is expressed in microseconds, so 1000000 is equivalent
|
||||
# to one second. Note that a negative number disables the slow log, while
|
||||
# a value of zero forces the logging of every command.
|
||||
slowlog-log-slower-than 10000
|
||||
|
||||
# There is no limit to this length. Just be aware that it will consume memory.
|
||||
# You can reclaim memory used by the slow log with SLOWLOG RESET.
|
||||
slowlog-max-len 128
|
||||
|
||||
################################ LATENCY MONITOR ##############################
|
||||
|
||||
# The Redis latency monitoring subsystem samples different operations
|
||||
# at runtime in order to collect data related to possible sources of
|
||||
# latency of a Redis instance.
|
||||
#
|
||||
# Via the LATENCY command this information is available to the user that can
|
||||
# print graphs and obtain reports.
|
||||
#
|
||||
# The system only logs operations that were performed in a time equal or
|
||||
# greater than the amount of milliseconds specified via the
|
||||
# latency-monitor-threshold configuration directive. When its value is set
|
||||
# to zero, the latency monitor is turned off.
|
||||
#
|
||||
# By default latency monitoring is disabled since it is mostly not needed
|
||||
# if you don't have latency issues, and collecting data has a performance
|
||||
# impact, that while very small, can be measured under big load. Latency
|
||||
# monitoring can easily be enalbed at runtime using the command
|
||||
# "CONFIG SET latency-monitor-threshold <milliseconds>" if needed.
|
||||
latency-monitor-threshold 0
|
||||
|
||||
############################# Event notification ##############################
|
||||
|
||||
# Redis can notify Pub/Sub clients about events happening in the key space.
|
||||
# This feature is documented at http://redis.io/topics/notifications
|
||||
#
|
||||
# For instance if keyspace events notification is enabled, and a client
|
||||
# performs a DEL operation on key "foo" stored in the Database 0, two
|
||||
# messages will be published via Pub/Sub:
|
||||
#
|
||||
# PUBLISH __keyspace@0__:foo del
|
||||
# PUBLISH __keyevent@0__:del foo
|
||||
#
|
||||
# It is possible to select the events that Redis will notify among a set
|
||||
# of classes. Every class is identified by a single character:
|
||||
#
|
||||
# K Keyspace events, published with __keyspace@<db>__ prefix.
|
||||
# E Keyevent events, published with __keyevent@<db>__ prefix.
|
||||
# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
|
||||
# $ String commands
|
||||
# l List commands
|
||||
# s Set commands
|
||||
# h Hash commands
|
||||
# z Sorted set commands
|
||||
# x Expired events (events generated every time a key expires)
|
||||
# e Evicted events (events generated when a key is evicted for maxmemory)
|
||||
# A Alias for g$lshzxe, so that the "AKE" string means all the events.
|
||||
#
|
||||
# The "notify-keyspace-events" takes as argument a string that is composed
|
||||
# by zero or multiple characters. The empty string means that notifications
|
||||
# are disabled at all.
|
||||
#
|
||||
# Example: to enable list and generic events, from the point of view of the
|
||||
# event name, use:
|
||||
#
|
||||
# notify-keyspace-events Elg
|
||||
#
|
||||
# Example 2: to get the stream of the expired keys subscribing to channel
|
||||
# name __keyevent@0__:expired use:
|
||||
#
|
||||
# notify-keyspace-events Ex
|
||||
#
|
||||
# By default all notifications are disabled because most users don't need
|
||||
# this feature and the feature has some overhead. Note that if you don't
|
||||
# specify at least one of K or E, no events will be delivered.
|
||||
notify-keyspace-events ""
|
||||
|
||||
############################### ADVANCED CONFIG ###############################
|
||||
|
||||
# Hashes are encoded using a memory efficient data structure when they have a
|
||||
# small number of entries, and the biggest entry does not exceed a given
|
||||
# threshold. These thresholds can be configured using the following directives.
|
||||
hash-max-ziplist-entries 512
|
||||
hash-max-ziplist-value 64
|
||||
|
||||
# Similarly to hashes, small lists are also encoded in a special way in order
|
||||
# to save a lot of space. The special representation is only used when
|
||||
# you are under the following limits:
|
||||
list-max-ziplist-entries 512
|
||||
list-max-ziplist-value 64
|
||||
|
||||
# Sets have a special encoding in just one case: when a set is composed
|
||||
# of just strings that happens to be integers in radix 10 in the range
|
||||
# of 64 bit signed integers.
|
||||
# The following configuration setting sets the limit in the size of the
|
||||
# set in order to use this special memory saving encoding.
|
||||
set-max-intset-entries 512
|
||||
|
||||
# Similarly to hashes and lists, sorted sets are also specially encoded in
|
||||
# order to save a lot of space. This encoding is only used when the length and
|
||||
# elements of a sorted set are below the following limits:
|
||||
zset-max-ziplist-entries 128
|
||||
zset-max-ziplist-value 64
|
||||
|
||||
# HyperLogLog sparse representation bytes limit. The limit includes the
|
||||
# 16 bytes header. When an HyperLogLog using the sparse representation crosses
|
||||
# this limit, it is converted into the dense representation.
|
||||
#
|
||||
# A value greater than 16000 is totally useless, since at that point the
|
||||
# dense representation is more memory efficient.
|
||||
#
|
||||
# The suggested value is ~ 3000 in order to have the benefits of
|
||||
# the space efficient encoding without slowing down too much PFADD,
|
||||
# which is O(N) with the sparse encoding. The value can be raised to
|
||||
# ~ 10000 when CPU is not a concern, but space is, and the data set is
|
||||
# composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
|
||||
hll-sparse-max-bytes 3000
|
||||
|
||||
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
|
||||
# order to help rehashing the main Redis hash table (the one mapping top-level
|
||||
# keys to values). The hash table implementation Redis uses (see dict.c)
|
||||
# performs a lazy rehashing: the more operation you run into a hash table
|
||||
# that is rehashing, the more rehashing "steps" are performed, so if the
|
||||
# server is idle the rehashing is never complete and some more memory is used
|
||||
# by the hash table.
|
||||
#
|
||||
# The default is to use this millisecond 10 times every second in order to
|
||||
# active rehashing the main dictionaries, freeing memory when possible.
|
||||
#
|
||||
# If unsure:
|
||||
# use "activerehashing no" if you have hard latency requirements and it is
|
||||
# not a good thing in your environment that Redis can reply form time to time
|
||||
# to queries with 2 milliseconds delay.
|
||||
#
|
||||
# use "activerehashing yes" if you don't have such hard requirements but
|
||||
# want to free memory asap when possible.
|
||||
activerehashing yes
|
||||
|
||||
# The client output buffer limits can be used to force disconnection of clients
|
||||
# that are not reading data from the server fast enough for some reason (a
|
||||
# common reason is that a Pub/Sub client can't consume messages as fast as the
|
||||
# publisher can produce them).
|
||||
#
|
||||
# The limit can be set differently for the three different classes of clients:
|
||||
#
|
||||
# normal -> normal clients including MONITOR clients
|
||||
# slave -> slave clients
|
||||
# pubsub -> clients subscribed to at least one pubsub channel or pattern
|
||||
#
|
||||
# The syntax of every client-output-buffer-limit directive is the following:
|
||||
#
|
||||
# client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
|
||||
#
|
||||
# A client is immediately disconnected once the hard limit is reached, or if
|
||||
# the soft limit is reached and remains reached for the specified number of
|
||||
# seconds (continuously).
|
||||
# So for instance if the hard limit is 32 megabytes and the soft limit is
|
||||
# 16 megabytes / 10 seconds, the client will get disconnected immediately
|
||||
# if the size of the output buffers reach 32 megabytes, but will also get
|
||||
# disconnected if the client reaches 16 megabytes and continuously overcomes
|
||||
# the limit for 10 seconds.
|
||||
#
|
||||
# By default normal clients are not limited because they don't receive data
|
||||
# without asking (in a push way), but just after a request, so only
|
||||
# asynchronous clients may create a scenario where data is requested faster
|
||||
# than it can read.
|
||||
#
|
||||
# Instead there is a default limit for pubsub and slave clients, since
|
||||
# subscribers and slaves receive data in a push fashion.
|
||||
#
|
||||
# Both the hard or the soft limit can be disabled by setting them to zero.
|
||||
client-output-buffer-limit normal 0 0 0
|
||||
client-output-buffer-limit slave 256mb 64mb 60
|
||||
client-output-buffer-limit pubsub 32mb 8mb 60
|
||||
|
||||
# Redis calls an internal function to perform many background tasks, like
|
||||
# closing connections of clients in timeout, purging expired keys that are
|
||||
# never requested, and so forth.
|
||||
#
|
||||
# Not all tasks are performed with the same frequency, but Redis checks for
|
||||
# tasks to perform accordingly to the specified "hz" value.
|
||||
#
|
||||
# By default "hz" is set to 10. Raising the value will use more CPU when
|
||||
# Redis is idle, but at the same time will make Redis more responsive when
|
||||
# there are many keys expiring at the same time, and timeouts may be
|
||||
# handled with more precision.
|
||||
#
|
||||
# The range is between 1 and 500, however a value over 100 is usually not
|
||||
# a good idea. Most users should use the default of 10 and raise this up to
|
||||
# 100 only in environments where very low latency is required.
|
||||
hz 10
|
||||
|
||||
# When a child rewrites the AOF file, if the following option is enabled
|
||||
# the file will be fsync-ed every 32 MB of data generated. This is useful
|
||||
# in order to commit the file to the disk more incrementally and avoid
|
||||
# big latency spikes.
|
||||
aof-rewrite-incremental-fsync yes
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user