mirror of
https://github.com/getredash/redash.git
synced 2025-12-25 01:03:20 -05:00
Compare commits
144 Commits
v0.8.2-rc
...
v0.8.3.b11
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
32c0d3eb3d | ||
|
|
1bee22a578 | ||
|
|
6bb57508e1 | ||
|
|
2d34bf1c54 | ||
|
|
7e3856b4f5 | ||
|
|
189e105c68 | ||
|
|
378459d64f | ||
|
|
ab72531889 | ||
|
|
51deb8f75d | ||
|
|
68f6e9b5e5 | ||
|
|
fbfa76f4d6 | ||
|
|
28e8e049eb | ||
|
|
f1f9597998 | ||
|
|
0b389d51aa | ||
|
|
46f3e82571 | ||
|
|
5b64918379 | ||
|
|
7549f32d9a | ||
|
|
6f51776cbb | ||
|
|
ad0afd8f3e | ||
|
|
8863282e58 | ||
|
|
9c1fda488c | ||
|
|
995659ee0d | ||
|
|
ad2642e9e5 | ||
|
|
740b305910 | ||
|
|
ca8cca0a8c | ||
|
|
7c4410ac63 | ||
|
|
91a209ae82 | ||
|
|
60cdb85cc4 | ||
|
|
becb4decf1 | ||
|
|
5f33e7ea18 | ||
|
|
7675de4ec7 | ||
|
|
fe2aa71349 | ||
|
|
b7720f7001 | ||
|
|
3b24f56eba | ||
|
|
52b8e98b1a | ||
|
|
5fe9c2fcf0 | ||
|
|
816142aa54 | ||
|
|
f737be272f | ||
|
|
0343fa7980 | ||
|
|
0f9f9a24a0 | ||
|
|
5b9b18639b | ||
|
|
ce46295dd3 | ||
|
|
3781b0758e | ||
|
|
8d20180d40 | ||
|
|
a7b41327c6 | ||
|
|
4d415c0246 | ||
|
|
5331008e78 | ||
|
|
80783feda6 | ||
|
|
2f308c3fa6 | ||
|
|
a63055f7f0 | ||
|
|
ce884ba6d3 | ||
|
|
63765281fe | ||
|
|
47e79003e5 | ||
|
|
541060c62e | ||
|
|
3ba19fa80f | ||
|
|
f3ec0448f5 | ||
|
|
654349a7ae | ||
|
|
2b32de184e | ||
|
|
1fb57edd1f | ||
|
|
f6c65d139a | ||
|
|
4e59472238 | ||
|
|
feabc46da4 | ||
|
|
51a10e5a20 | ||
|
|
5bf370d0f0 | ||
|
|
5beec581d8 | ||
|
|
70080df534 | ||
|
|
0d4c3c329e | ||
|
|
76dfbad971 | ||
|
|
45a85c110f | ||
|
|
f77c0aeb1d | ||
|
|
b23e328f69 | ||
|
|
165d782b98 | ||
|
|
1bdc1bef73 | ||
|
|
e3b41b15d7 | ||
|
|
7a95dec33b | ||
|
|
a3d059041c | ||
|
|
3a6c1599f3 | ||
|
|
f92aa7b15f | ||
|
|
d823506e5b | ||
|
|
fc93de7aa2 | ||
|
|
a0cc25d174 | ||
|
|
df24bc3aae | ||
|
|
60c2cb0a75 | ||
|
|
ad19f2d304 | ||
|
|
3aa59a8152 | ||
|
|
32638aebed | ||
|
|
346ea66c9d | ||
|
|
d14b74b683 | ||
|
|
5d879ce358 | ||
|
|
b4da4359a8 | ||
|
|
7e08518a31 | ||
|
|
bea0e9aad0 | ||
|
|
a87179b68b | ||
|
|
91806eda44 | ||
|
|
d1fe3d63fd | ||
|
|
8408409ce2 | ||
|
|
6bbdd5eb44 | ||
|
|
34ba54397d | ||
|
|
ec79ce74d0 | ||
|
|
f324f1bf6f | ||
|
|
47cfb7d620 | ||
|
|
dab1a21b40 | ||
|
|
aa04a6e4a5 | ||
|
|
e0a43a32ab | ||
|
|
68001ae0f1 | ||
|
|
9d9501b158 | ||
|
|
67aecc0201 | ||
|
|
0bc9fc1ed5 | ||
|
|
b548cb1d8f | ||
|
|
eb5c4dd5f3 | ||
|
|
a07a9b9390 | ||
|
|
56ade4735c | ||
|
|
b8a9f1048a | ||
|
|
5b3bcff4f5 | ||
|
|
b41b21c69e | ||
|
|
172d57e82c | ||
|
|
f507da9df7 | ||
|
|
2e27e43357 | ||
|
|
8a0c287d05 | ||
|
|
664a1806bc | ||
|
|
9a0ccd1bb5 | ||
|
|
076fca0c5a | ||
|
|
59f099418a | ||
|
|
b9a0760d7e | ||
|
|
4eb490a839 | ||
|
|
410c5671f0 | ||
|
|
fad8bd47e8 | ||
|
|
89f5074054 | ||
|
|
5826fbd05f | ||
|
|
ddab1c9493 | ||
|
|
f9d5fe235b | ||
|
|
cc91981845 | ||
|
|
9832b7f72a | ||
|
|
2a6ed3ca52 | ||
|
|
2e78ef0128 | ||
|
|
d50af7dec9 | ||
|
|
20159a1c2a | ||
|
|
06400ed840 | ||
|
|
0ddc6cf135 | ||
|
|
61f2be02b7 | ||
|
|
9eca43801a | ||
|
|
bcaefda600 | ||
|
|
42b0430866 | ||
|
|
40ee0d8a6e |
@@ -1,4 +1,4 @@
|
||||
rd_ui/dist/
|
||||
rd_ui/.tmp/
|
||||
rd_ui/node_modules/
|
||||
.git/
|
||||
.vagrant/
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -21,4 +21,4 @@ venv
|
||||
dump.rdb
|
||||
|
||||
# Docker related
|
||||
docker-compose.yaml
|
||||
docker-compose.yml
|
||||
|
||||
14
Dockerfile
14
Dockerfile
@@ -3,11 +3,11 @@ MAINTAINER Di Wu <diwu@yelp.com>
|
||||
|
||||
# Ubuntu packages
|
||||
RUN apt-get update && \
|
||||
apt-get install -y python-pip python-dev curl build-essential pwgen libffi-dev sudo git-core wget && \
|
||||
apt-get install -y python-pip python-dev curl build-essential pwgen libffi-dev sudo git-core wget \
|
||||
# Postgres client
|
||||
apt-get -y install libpq-dev postgresql-client && \
|
||||
libpq-dev \
|
||||
# Additional packages required for data sources:
|
||||
apt-get install -y libssl-dev libmysqlclient-dev
|
||||
libssl-dev libmysqlclient-dev
|
||||
|
||||
# Users creation
|
||||
RUN useradd --system --comment " " --create-home redash
|
||||
@@ -16,12 +16,6 @@ RUN useradd --system --comment " " --create-home redash
|
||||
RUN pip install -U setuptools && \
|
||||
pip install supervisor==3.1.2
|
||||
|
||||
# Download latest source and extract into /opt/redash/current
|
||||
# COPY setup/latest_release_url.py /tmp/latest_release_url.py
|
||||
# RUN wget $(python /tmp/latest_release_url.py) -O redash.tar.gz && \
|
||||
# mkdir -p /opt/redash/current && \
|
||||
# tar -C /opt/redash/current -xvf redash.tar.gz && \
|
||||
# rm redash.tar.gz
|
||||
COPY . /opt/redash/current
|
||||
|
||||
# Setting working directory
|
||||
@@ -34,7 +28,7 @@ RUN pip install -r requirements_all_ds.txt && \
|
||||
# Setup supervisord
|
||||
RUN mkdir -p /opt/redash/supervisord && \
|
||||
mkdir -p /opt/redash/logs && \
|
||||
cp /opt/redash/current/setup/files/supervisord_docker.conf /opt/redash/supervisord/supervisord.conf
|
||||
cp /opt/redash/current/setup/docker/supervisord/supervisord.conf /opt/redash/supervisord/supervisord.conf
|
||||
|
||||
# Fix permissions
|
||||
RUN chown -R redash /opt/redash
|
||||
|
||||
28
README.md
28
README.md
@@ -1,8 +1,16 @@
|
||||
Some of you read the news about EverythingMe closing down. While more detailed announcement will come later (once more details are clear), **I just wanted to reassure you that you shouldn't worry -- this won't affect the future of re:dash.** I will keep maintaining re:dash, and might even be able to work more on it.
|
||||
|
||||
If you still have concerns, you're welcome to reach out to me directly -- arik@arikfr.com.
|
||||
|
||||
Arik.
|
||||
|
||||
---
|
||||
|
||||
<p align="center">
|
||||
<img title="re:dash" src='http://redash.io/static/img/redash_logo.png' width="200px"/>
|
||||
<img title="re:dash" src='http://redash.io/static/old_img/redash_logo.png' width="200px"/>
|
||||
</p>
|
||||
<p align="center">
|
||||
<img title="Build Status" src='https://circleci.com/gh/EverythingMe/redash.png?circle-token=8a695aa5ec2cbfa89b48c275aea298318016f040'/>
|
||||
<img title="Build Status" src='https://circleci.com/gh/getredash/redash.png?circle-token=8a695aa5ec2cbfa89b48c275aea298318016f040'/>
|
||||
</p>
|
||||
|
||||
**_re:dash_** is our take on freeing the data within our company in a way that will better fit our culture and usage patterns.
|
||||
@@ -22,31 +30,27 @@ Presto, Google Spreadsheets, Cloudera Impala, Hive and custom scripts.
|
||||
|
||||
## Demo
|
||||
|
||||

|
||||

|
||||
|
||||
You can try out the demo instance: http://demo.redash.io/ (login with any Google account).
|
||||
|
||||
## Getting Started
|
||||
|
||||
* [Setting up re:dash instance](http://redash.io/deployment/setup.html) (includes links to ready made AWS/GCE images).
|
||||
* Additional documentation in the [Wiki](https://github.com/everythingme/redash/wiki).
|
||||
* [Documentation](http://docs.redash.io).
|
||||
|
||||
|
||||
## Getting help
|
||||
|
||||
* [Google Group (mailing list)](https://groups.google.com/forum/#!forum/redash-users): the best place to get updates about new releases or ask general questions.
|
||||
* Find us [on gitter](https://gitter.im/EverythingMe/redash#) (chat).
|
||||
* Contact Arik, the maintainer directly: arik@everything.me.
|
||||
|
||||
## Roadmap
|
||||
|
||||
TBD.
|
||||
* Find us [on gitter](https://gitter.im/getredash/redash#) (chat).
|
||||
* Contact Arik, the maintainer directly: arik@redash.io.
|
||||
|
||||
## Reporting Bugs and Contributing Code
|
||||
|
||||
* Want to report a bug or request a feature? Please open [an issue](https://github.com/everythingme/redash/issues/new).
|
||||
* Want to report a bug or request a feature? Please open [an issue](https://github.com/getredash/redash/issues/new).
|
||||
* Want to help us build **_re:dash_**? Fork the project and make a pull request. We need all the help we can get!
|
||||
|
||||
## License
|
||||
|
||||
See [LICENSE](https://github.com/EverythingMe/redash/blob/master/LICENSE) file.
|
||||
See [LICENSE](https://github.com/getredash/redash/blob/master/LICENSE) file.
|
||||
|
||||
@@ -7,7 +7,7 @@ import requests
|
||||
|
||||
github_token = os.environ['GITHUB_TOKEN']
|
||||
auth = (github_token, 'x-oauth-basic')
|
||||
repo = 'EverythingMe/redash'
|
||||
repo = 'getredash/redash'
|
||||
|
||||
def _github_request(method, path, params=None, headers={}):
|
||||
if not path.startswith('https://api.github.com'):
|
||||
|
||||
13
circle.yml
13
circle.yml
@@ -1,15 +1,14 @@
|
||||
machine:
|
||||
services:
|
||||
- docker
|
||||
node:
|
||||
version:
|
||||
0.10.24
|
||||
0.12.4
|
||||
python:
|
||||
version:
|
||||
2.7.3
|
||||
dependencies:
|
||||
pre:
|
||||
- wget http://downloads.sourceforge.net/project/optipng/OptiPNG/optipng-0.7.5/optipng-0.7.5.tar.gz
|
||||
- tar xvf optipng-0.7.5.tar.gz
|
||||
- cd optipng-0.7.5; ./configure; make; sudo checkinstall -y;
|
||||
- make deps
|
||||
- pip install -r requirements_dev.txt
|
||||
- pip install -r requirements.txt
|
||||
@@ -22,10 +21,14 @@ test:
|
||||
post:
|
||||
- make pack
|
||||
deployment:
|
||||
github:
|
||||
github_and_docker:
|
||||
branch: master
|
||||
commands:
|
||||
- make upload
|
||||
- echo "rd_ui/app" >> .dockerignore
|
||||
- docker build -t redash/redash:$(./manage.py version | sed -e "s/\+/./") .
|
||||
- docker login -e $DOCKER_EMAIL -u $DOCKER_USER -p $DOCKER_PASS
|
||||
- docker push redash/redash:$(./manage.py version | sed -e "s/\+/./")
|
||||
notify:
|
||||
webhooks:
|
||||
- url: https://webhooks.gitter.im/e/895d09c3165a0913ac2f
|
||||
|
||||
@@ -5,7 +5,13 @@ redash:
|
||||
links:
|
||||
- redis
|
||||
- postgres
|
||||
env_file: .env
|
||||
environment:
|
||||
REDASH_STATIC_ASSETS_PATH:"../rd_ui/app/"
|
||||
REDASH_LOG_LEVEL:"INFO"
|
||||
REDASH_REDIS_URL:redis://localhost:6379/0
|
||||
REDASH_DATABASE_URL:"postgresql://redash"
|
||||
REDASH_COOKIE_SECRET:veryverysecret
|
||||
REDASH_GOOGLE_APPS_DOMAIN:
|
||||
redis:
|
||||
image: redis:2.8
|
||||
postgres:
|
||||
@@ -10,8 +10,8 @@ If one of the listed data source types isn't available when trying to create a n
|
||||
1. You installed required dependencies.
|
||||
2. If you've set custom value for the ``REDASH_ENABLED_QUERY_RUNNERS`` setting, it's included in the list.
|
||||
|
||||
PostgreSQL / Redshift
|
||||
---------------------
|
||||
PostgreSQL / Redshift / Greenplum
|
||||
---------------------------------
|
||||
|
||||
- **Options**:
|
||||
|
||||
@@ -201,3 +201,18 @@ Vertica
|
||||
- **Additional requirements**:
|
||||
|
||||
- ``vertica-python`` python package
|
||||
|
||||
Oracle
|
||||
------
|
||||
|
||||
- **Options**
|
||||
|
||||
- DSN Service name
|
||||
- User
|
||||
- Password
|
||||
- Host
|
||||
- Port
|
||||
|
||||
- **Additional requirements**
|
||||
|
||||
- ``cx_Oracle`` python package. This requires the installation of the Oracle `instant client <http://www.oracle.com/technetwork/database/features/instant-client/index-097480.html>`__.
|
||||
|
||||
@@ -34,7 +34,7 @@ When query execution is done, the result gets stored to
|
||||
``query_results`` table. Also we check for all queries in the
|
||||
``queries`` table that have the same query hash and update their
|
||||
reference to the query result we just saved
|
||||
(`code <https://github.com/EverythingMe/redash/blob/master/redash/models.py#L235>`__).
|
||||
(`code <https://github.com/getredash/redash/blob/master/redash/models.py#L235>`__).
|
||||
|
||||
Client
|
||||
------
|
||||
@@ -69,7 +69,7 @@ Ideas on how to implement query parameters
|
||||
Client side only implementation
|
||||
-------------------------------
|
||||
|
||||
(This was actually implemented in. See pull request `#363 <https://github.com/EverythingMe/redash/pull/363>`__ for details.)
|
||||
(This was actually implemented in. See pull request `#363 <https://github.com/getredash/redash/pull/363>`__ for details.)
|
||||
|
||||
The basic idea of how to implement parametized queries is to treat the
|
||||
query as a template and merge it with parameters taken from query string
|
||||
|
||||
@@ -13,7 +13,7 @@ To get started with this box:
|
||||
1. Make sure you have recent version of
|
||||
`Vagrant <https://www.vagrantup.com/>`__ installed.
|
||||
2. Clone the re:dash repository:
|
||||
``git clone https://github.com/EverythingMe/redash.git``.
|
||||
``git clone https://github.com/getredash/redash.git``.
|
||||
3. Change dir into the repository (``cd redash``) and run run
|
||||
``vagrant up``. This might take some time the first time you run it,
|
||||
as it downloads the Vagrant virtual box.
|
||||
@@ -30,20 +30,7 @@ To get started with this box:
|
||||
|
||||
::
|
||||
|
||||
PYTHONPATH=. bin/run python migrations/0001_allow_delete_query.py
|
||||
PYTHONPATH=. bin/run python migrations/0002_fix_timestamp_fields.py
|
||||
PYTHONPATH=. bin/run python migrations/0003_update_data_source_config.py
|
||||
PYTHONPATH=. bin/run python migrations/0004_allow_null_in_event_user.py
|
||||
PYTHONPATH=. bin/run python migrations/0005_add_updated_at.py
|
||||
PYTHONPATH=. bin/run python migrations/0006_queries_last_edit_by.py
|
||||
PYTHONPATH=. bin/run python migrations/0007_add_schedule_to_queries.py
|
||||
PYTHONPATH=. bin/run python migrations/0008_make_ds_name_unique.py
|
||||
PYTHONPATH=. bin/run python migrations/0009_add_api_key_to_user.py
|
||||
PYTHONPATH=. bin/run python migrations/0010_create_alerts.py
|
||||
PYTHONPATH=. bin/run python migrations/0010_allow_deleting_datasources.py
|
||||
PYTHONPATH=. bin/run python migrations/0011_migrate_bigquery_to_json.py
|
||||
PYTHONPATH=. bin/run python migrations/0012_add_list_users_permission.py
|
||||
PYTHONPATH=. bin/run python migrations/0013_update_counter_options.py
|
||||
export PYTHONPATH=. && find migrations/ -type f | grep 00 --null | xargs -I file bin/run python file
|
||||
|
||||
9. Start the server and background workers with
|
||||
``bin/run honcho start -f Procfile.dev``.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.. image:: http://redash.io/static/img/redash_logo.png
|
||||
.. image:: http://redash.io/static/old_img/redash_logo.png
|
||||
:width: 200px
|
||||
|
||||
Open Source Data Collaboration and Visualization Platform
|
||||
@@ -21,7 +21,7 @@ Features
|
||||
Demo
|
||||
####
|
||||
|
||||
.. figure:: https://raw.github.com/EverythingMe/redash/screenshots/screenshots.gif
|
||||
.. figure:: https://raw.github.com/getredash/redash/screenshots/screenshots.gif
|
||||
:alt: Screenshots
|
||||
|
||||
You can try out the demo instance: `http://demo.redash.io`_ (login with any Google account).
|
||||
@@ -37,11 +37,11 @@ Getting Started
|
||||
Getting Help
|
||||
############
|
||||
|
||||
* Source: https://github.com/everythingme/redash
|
||||
* Issues: https://github.com/everythingme/redash/issues
|
||||
* Source: https://github.com/getredash/redash
|
||||
* Issues: https://github.com/getredash/redash/issues
|
||||
* Mailing List: https://groups.google.com/forum/#!forum/redash-users
|
||||
* Gitter (chat): https://gitter.im/EverythingMe/redash
|
||||
* Contact Arik, the maintainer directly: arik@everything.me.
|
||||
* Gitter (chat): https://gitter.im/getredash/redash
|
||||
* Contact Arik, the maintainer directly: arik@redash.io.
|
||||
|
||||
TOC
|
||||
###
|
||||
|
||||
@@ -2,7 +2,7 @@ Setting up re:dash instance
|
||||
###########################
|
||||
|
||||
The `provisioning
|
||||
script <https://github.com/EverythingMe/redash/blob/master/setup/bootstrap.sh>`__
|
||||
script <https://raw.githubusercontent.com/getredash/redash/master/setup/ubuntu/bootstrap.sh>`__
|
||||
works on Ubuntu 12.04, Ubuntu 14.04 and Debian Wheezy. This script
|
||||
installs all needed dependencies and creates basic setup.
|
||||
|
||||
@@ -130,6 +130,32 @@ to create new data source connection.
|
||||
See :doc:`documentation </datasources>` for the different options.
|
||||
Your instance comes ready with dependencies needed to setup supported sources.
|
||||
|
||||
Mail Configuration
|
||||
------------------
|
||||
|
||||
For the system to be able to send emails (for example when alerts trigger), you need to set the mail server to use and the
|
||||
host name of your re:dash server. If you're using one of our images, you can do this by editing the `.env` file:
|
||||
|
||||
.. code::
|
||||
|
||||
# Note that not all values are required, as they have default values.
|
||||
|
||||
export REDASH_MAIL_SERVER="" # default: localhost
|
||||
export REDASH_MAIL_PORT="" # default: 25
|
||||
export REDASH_MAIL_USE_TLS="" # default: False
|
||||
export REDASH_MAIL_USE_SSL="" # default: False
|
||||
export REDASH_MAIL_USERNAME="" # default: None
|
||||
export REDASH_MAIL_PASSWORD="" # default: None
|
||||
export REDASH_MAIL_DEFAULT_SENDER="" # Email address to send from
|
||||
|
||||
export REDASH_HOST="" # base address of your re:dash instance, for example: "https://demo.redash.io"
|
||||
|
||||
- Note that not all values are required, as there are default values.
|
||||
- It's recommended to use some mail service, like `Amazon SES <https://aws.amazon.com/ses/>`__, `Mailgun <http://www.mailgun.com/>`__
|
||||
or `Mandrill <http://mandrillapp.com>`__ to send emails to ensure deliverability.
|
||||
|
||||
To test email configuration, you can run `bin/run ./manage.py send_test_mail` (from `/opt/redash/current`).
|
||||
|
||||
How to upgrade?
|
||||
---------------
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ Even if you didn't use the image, it's very likely you can reuse most of
|
||||
this script with small modifications. What this script does is:
|
||||
|
||||
1. Find the URL of the latest release tarball (from `GitHub releases
|
||||
page <github.com/everythingme/redash/releases>`__).
|
||||
page <github.com/getredash/redash/releases>`__).
|
||||
2. Download it.
|
||||
3. Create new directory for this version (for example:
|
||||
``/opt/redash/redash.0.5.0.b685``).
|
||||
|
||||
@@ -46,3 +46,27 @@ Simple query on a logstash ElasticSearch instance:
|
||||
"size" : 250,
|
||||
"sort" : "@timestamp:asc"
|
||||
}
|
||||
|
||||
Simple query on a ElasticSearch instance:
|
||||
==================================================
|
||||
|
||||
|
||||
- Query the index named "twitter"
|
||||
- Filter by user equal "kimchy"
|
||||
- Return the fields: "@timestamp", "tweet" and "user"
|
||||
- Return up to 15 results
|
||||
- Sort by @timestamp ascending
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"index" : "twitter",
|
||||
"query" : {
|
||||
"match": {
|
||||
"user" : "kimchy"
|
||||
}
|
||||
},
|
||||
"fields" : ["@timestamp", "tweet", "user"],
|
||||
"size" : 15,
|
||||
"sort" : "@timestamp:asc"
|
||||
}
|
||||
|
||||
10
migrations/0014_migrate_existing_es_to_kibana.py
Normal file
10
migrations/0014_migrate_existing_es_to_kibana.py
Normal file
@@ -0,0 +1,10 @@
|
||||
__author__ = 'lior'
|
||||
|
||||
from redash.models import DataSource
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
for ds in DataSource.all():
|
||||
if ds.type == 'elasticsearch':
|
||||
ds.type = 'kibana'
|
||||
ds.save()
|
||||
6
migrations/0015_add_schedule_query_permission.py
Normal file
6
migrations/0015_add_schedule_query_permission.py
Normal file
@@ -0,0 +1,6 @@
|
||||
from redash import models
|
||||
|
||||
if __name__ == '__main__':
|
||||
default_group = models.Group.get(models.Group.name=='default')
|
||||
default_group.permissions.append('schedule_query')
|
||||
default_group.save()
|
||||
10
migrations/0016_add_alert_subscriber.py
Normal file
10
migrations/0016_add_alert_subscriber.py
Normal file
@@ -0,0 +1,10 @@
|
||||
from redash.models import db, Alert, AlertSubscription
|
||||
|
||||
if __name__ == '__main__':
|
||||
with db.database.transaction():
|
||||
# There was an AWS/GCE image created without this table, to make sure this exists we run this migration.
|
||||
if not AlertSubscription.table_exists():
|
||||
AlertSubscription.create_table()
|
||||
|
||||
db.close_db(None)
|
||||
|
||||
@@ -95,7 +95,7 @@
|
||||
<a ng-href="/users/{{currentUser.id}}">
|
||||
<div class="row">
|
||||
<div class="col-sm-2">
|
||||
<img src="{{currentUser.gravatar_url}}" size="40px" class="img-circle"/>
|
||||
<img ng-src="{{currentUser.gravatar_url}}" size="40px" class="img-circle"/>
|
||||
</div>
|
||||
<div class="col-sm-10">
|
||||
<p><strong>{{currentUser.name}}</strong></p>
|
||||
@@ -120,6 +120,22 @@
|
||||
<edit-dashboard-form dashboard="newDashboard" id="new_dashboard_dialog"></edit-dashboard-form>
|
||||
<div ng-view></div>
|
||||
|
||||
{% raw %}
|
||||
<div class="container-fluid footer">
|
||||
<hr/>
|
||||
<div class="container">
|
||||
<div class="row">
|
||||
<a href="http://redash.io">re:dash</a> <span ng-bind="version"></span>
|
||||
<small ng-if="newVersionAvailable" ng-cloak class="ng-cloak"><a href="http://version.redash.io/">(new re:dash version available)</a></small>
|
||||
<div class="pull-right">
|
||||
<a href="http://docs.redash.io/">Docs</a>
|
||||
<a href="http://github.com/getredash/redash">Contribute</a>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{% endraw %}
|
||||
|
||||
<script src="/bower_components/jquery/jquery.js"></script>
|
||||
|
||||
<!-- build:js /scripts/plugins.js -->
|
||||
@@ -146,7 +162,6 @@
|
||||
<script src="/bower_components/gridster/dist/jquery.gridster.js"></script>
|
||||
<script src="/bower_components/angular-growl/build/angular-growl.js"></script>
|
||||
<script src="/bower_components/pivottable/dist/pivot.js"></script>
|
||||
<script src="/bower_components/pivottable/dist/export_renderers.js"></script>
|
||||
<script src="/bower_components/cornelius/src/cornelius.js"></script>
|
||||
<script src="/bower_components/mousetrap/mousetrap.js"></script>
|
||||
<script src="/bower_components/mousetrap/plugins/global-bind/mousetrap-global-bind.js"></script>
|
||||
@@ -160,12 +175,13 @@
|
||||
<script src="/bower_components/bucky/bucky.js"></script>
|
||||
<script src="/bower_components/pace/pace.js"></script>
|
||||
<script src="/bower_components/mustache/mustache.js"></script>
|
||||
<script src="/bower_components/canvg/rgbcolor.js"></script>
|
||||
<script src="/bower_components/canvg/rgbcolor.js"></script>
|
||||
<script src="/bower_components/canvg/StackBlur.js"></script>
|
||||
<script src="/bower_components/canvg/canvg.js"></script>
|
||||
<script src="/bower_components/leaflet/dist/leaflet.js"></script>
|
||||
<script src="/bower_components/angular-bootstrap-show-errors/src/showErrors.js"></script>
|
||||
<script src="/bower_components/d3/d3.min.js"></script>
|
||||
<script src="/bower_components/angular-ui-sortable/sortable.js"></script>
|
||||
<!-- endbuild -->
|
||||
|
||||
<!-- build:js({.tmp,app}) /scripts/scripts.js -->
|
||||
@@ -190,6 +206,7 @@
|
||||
<script src="/scripts/visualizations/box.js"></script>
|
||||
<script src="/scripts/visualizations/table.js"></script>
|
||||
<script src="/scripts/visualizations/pivot.js"></script>
|
||||
<script src="/scripts/visualizations/date_range_selector.js"></script>
|
||||
<script src="/scripts/directives/directives.js"></script>
|
||||
<script src="/scripts/directives/query_directives.js"></script>
|
||||
<script src="/scripts/directives/data_source_directives.js"></script>
|
||||
@@ -200,7 +217,7 @@
|
||||
|
||||
<script>
|
||||
// TODO: move currentUser & features to be an Angular service
|
||||
var featureFlags = {{ features|safe }};
|
||||
var clientConfig = {{ client_config|safe }};
|
||||
var currentUser = {{ user|safe }};
|
||||
|
||||
currentUser.canEdit = function(object) {
|
||||
|
||||
@@ -10,6 +10,7 @@ angular.module('redash', [
|
||||
'angular-growl',
|
||||
'angularMoment',
|
||||
'ui.bootstrap',
|
||||
'ui.sortable',
|
||||
'smartTable.table',
|
||||
'ngResource',
|
||||
'ngRoute',
|
||||
@@ -19,7 +20,7 @@ angular.module('redash', [
|
||||
'ngSanitize'
|
||||
]).config(['$routeProvider', '$locationProvider', '$compileProvider', 'growlProvider', 'uiSelectConfig',
|
||||
function ($routeProvider, $locationProvider, $compileProvider, growlProvider, uiSelectConfig) {
|
||||
if (featureFlags.clientSideMetrics) {
|
||||
if (clientConfig.clientSideMetrics) {
|
||||
Bucky.setOptions({
|
||||
host: '/api/metrics'
|
||||
});
|
||||
|
||||
@@ -3,7 +3,8 @@
|
||||
if (!value) {
|
||||
return "-";
|
||||
}
|
||||
return value.toDate().toLocaleString();
|
||||
|
||||
return value.format(clientConfig.dateTimeFormat);
|
||||
};
|
||||
|
||||
var QuerySearchCtrl = function($scope, $location, $filter, Events, Query) {
|
||||
@@ -150,14 +151,16 @@
|
||||
}
|
||||
|
||||
var MainCtrl = function ($scope, $location, Dashboard, notifications) {
|
||||
if (featureFlags.clientSideMetrics) {
|
||||
$scope.version = clientConfig.version;
|
||||
$scope.newVersionAvailable = clientConfig.newVersionAvailable && currentUser.hasPermission("admin");
|
||||
|
||||
if (clientConfig.clientSideMetrics) {
|
||||
$scope.$on('$locationChangeSuccess', function(event, newLocation, oldLocation) {
|
||||
// This will be called once per actual page load.
|
||||
Bucky.sendPagePerformance();
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
$scope.dashboards = [];
|
||||
$scope.reloadDashboards = function () {
|
||||
Dashboard.query(function (dashboards) {
|
||||
|
||||
@@ -16,7 +16,9 @@
|
||||
var w = new Widget(widget);
|
||||
|
||||
if (w.visualization) {
|
||||
promises.push(w.getQuery().getQueryResult().toPromise());
|
||||
var queryResult = w.getQuery().getQueryResult();
|
||||
if (angular.isDefined(queryResult))
|
||||
promises.push(queryResult.toPromise());
|
||||
}
|
||||
|
||||
return w;
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
saveQuery = $scope.saveQuery;
|
||||
|
||||
$scope.sourceMode = true;
|
||||
$scope.canEdit = currentUser.canEdit($scope.query) || featureFlags.allowAllToEditQueries;
|
||||
$scope.canEdit = currentUser.canEdit($scope.query) || clientConfig.allowAllToEditQueries;
|
||||
$scope.isDirty = false;
|
||||
|
||||
$scope.newVisualization = undefined;
|
||||
|
||||
@@ -70,6 +70,7 @@
|
||||
$scope.isQueryOwner = (currentUser.id === $scope.query.user.id) || currentUser.hasPermission('admin');
|
||||
$scope.canViewSource = currentUser.hasPermission('view_source');
|
||||
$scope.canExecuteQuery = currentUser.hasPermission('execute_query');
|
||||
$scope.canScheduleQuery = currentUser.hasPermission('schedule_query');
|
||||
|
||||
$scope.dataSources = DataSource.query(function(dataSources) {
|
||||
updateSchema();
|
||||
@@ -240,7 +241,7 @@
|
||||
});
|
||||
|
||||
$scope.openScheduleForm = function() {
|
||||
if (!$scope.isQueryOwner) {
|
||||
if (!$scope.isQueryOwner || !$scope.canScheduleQuery) {
|
||||
return;
|
||||
};
|
||||
|
||||
|
||||
@@ -49,6 +49,10 @@
|
||||
prop.type = 'file';
|
||||
}
|
||||
|
||||
if (prop.type == 'boolean') {
|
||||
prop.type = 'checkbox';
|
||||
}
|
||||
|
||||
prop.required = _.contains(type.configuration_schema.required, name);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -281,4 +281,34 @@
|
||||
}
|
||||
};
|
||||
});
|
||||
|
||||
directives.directive('onDestroy', function () {
|
||||
/* This directive can be used to invoke a callback when an element is destroyed,
|
||||
A useful example is the following:
|
||||
<div ng-if="includeText" on-destroy="form.text = null;">
|
||||
<input type="text" ng-model="form.text">
|
||||
</div>
|
||||
*/
|
||||
return {
|
||||
restrict: "A",
|
||||
scope: {
|
||||
onDestroy: "&",
|
||||
},
|
||||
link: function(scope, elem, attrs) {
|
||||
console.log(scope.onDestroy);
|
||||
scope.$on('$destroy', function() {
|
||||
scope.onDestroy();
|
||||
});
|
||||
}
|
||||
};
|
||||
});
|
||||
|
||||
directives.directive('colorBox', function () {
|
||||
return {
|
||||
restrict: "E",
|
||||
scope: {color: "="},
|
||||
template: "<span style='width: 12px; height: 12px; background-color: {{color}}; display: inline-block; margin-right: 5px;'></span>"
|
||||
};
|
||||
});
|
||||
|
||||
})();
|
||||
|
||||
@@ -82,7 +82,7 @@ angular.module('redash.filters', []).
|
||||
}
|
||||
|
||||
var html = marked(text);
|
||||
if (featureFlags.allowScriptsInUserInput) {
|
||||
if (clientConfig.allowScriptsInUserInput) {
|
||||
html = $sce.trustAsHtml(html);
|
||||
}
|
||||
|
||||
@@ -97,4 +97,21 @@ angular.module('redash.filters', []).
|
||||
}
|
||||
return $sce.trustAsHtml(text);
|
||||
}
|
||||
}]);
|
||||
}])
|
||||
|
||||
.filter('remove', function() {
|
||||
return function(items, item) {
|
||||
if (items == undefined)
|
||||
return items;
|
||||
if (item instanceof Array) {
|
||||
var notEquals = function(other) { return item.indexOf(other) == -1; }
|
||||
} else {
|
||||
var notEquals = function(other) { return item != other; }
|
||||
}
|
||||
var filtered = [];
|
||||
for (var i = 0; i < items.length; i++)
|
||||
if (notEquals(items[i]))
|
||||
filtered.push(items[i])
|
||||
return filtered;
|
||||
};
|
||||
});
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
'Lilac': '#A47D7C',
|
||||
'Light Green': '#B5CA92',
|
||||
'Brown':'#A52A2A',
|
||||
'Yellow':'#ffff00',
|
||||
'Black':'#000000',
|
||||
'Gray':'#808080',
|
||||
'Pink':'#FFC0CB',
|
||||
@@ -56,7 +55,7 @@
|
||||
;
|
||||
|
||||
if (moment.isMoment(this.x)) {
|
||||
var s = '<b>' + this.x.toDate().toLocaleString() + '</b>',
|
||||
var s = '<b>' + this.x.format(clientConfig.dateTimeFormat) + '</b>',
|
||||
pointsCount = this.points.length;
|
||||
|
||||
$.each(this.points, function (i, point) {
|
||||
@@ -98,19 +97,6 @@
|
||||
buttons: {
|
||||
contextButton: {
|
||||
menuItems: [
|
||||
{
|
||||
text: 'Toggle % Stacking',
|
||||
onclick: function () {
|
||||
var newStacking = "normal";
|
||||
if (this.series[0].options.stacking == "normal") {
|
||||
newStacking = "percent";
|
||||
}
|
||||
|
||||
_.each(this.series, function (series) {
|
||||
series.update({stacking: newStacking}, true);
|
||||
});
|
||||
}
|
||||
},
|
||||
{
|
||||
text: 'Select All',
|
||||
onclick: function () {
|
||||
@@ -273,7 +259,7 @@
|
||||
|
||||
var chartOptions = $.extend(true, {}, defaultOptions, chartsDefaults);
|
||||
chartOptions.plotOptions.series = {
|
||||
turboThreshold: featureFlags.highChartsTurboThreshold
|
||||
turboThreshold: clientConfig.highChartsTurboThreshold
|
||||
}
|
||||
|
||||
// $timeout makes sure that this function invoked after the DOM ready. When draw/init
|
||||
|
||||
@@ -718,7 +718,7 @@ angular.module("partials/smartTable.html", []).run(["$templateCache", function (
|
||||
" </tbody>\n" +
|
||||
" <tfoot ng-show=\"isPaginationEnabled\">\n" +
|
||||
" <tr class=\"smart-table-footer-row\">\n" +
|
||||
" <td colspan=\"{{columns.length}}\">\n" +
|
||||
" <td class=\"text-center\" colspan=\"{{columns.length}}\">\n" +
|
||||
" <div pagination-smart-table=\"\" num-pages=\"numberOfPages\" max-size=\"maxSize\" current-page=\"currentPage\"></div>\n" +
|
||||
" </td>\n" +
|
||||
" </tr>\n" +
|
||||
|
||||
@@ -43,10 +43,10 @@
|
||||
if (angular.isNumber(v)) {
|
||||
columnTypes[k] = 'float';
|
||||
} else if (_.isString(v) && v.match(/^\d{4}-\d{2}-\d{2}T/)) {
|
||||
row[k] = moment(v);
|
||||
row[k] = moment.utc(v);
|
||||
columnTypes[k] = 'datetime';
|
||||
} else if (_.isString(v) && v.match(/^\d{4}-\d{2}-\d{2}/)) {
|
||||
row[k] = moment(v);
|
||||
} else if (_.isString(v) && v.match(/^\d{4}-\d{2}-\d{2}$/)) {
|
||||
row[k] = moment.utc(v);
|
||||
columnTypes[k] = 'date';
|
||||
} else if (typeof(v) == 'object' && v !== null) {
|
||||
row[k] = JSON.stringify(v);
|
||||
@@ -186,9 +186,38 @@
|
||||
}
|
||||
|
||||
return this.filteredData;
|
||||
};
|
||||
|
||||
/**
|
||||
* Helper function to add a point into a series, also checks whether the point is within dateRange
|
||||
*/
|
||||
QueryResult.prototype._addPointToSeriesIfInDateRange = function (point, seriesCollection, seriesName, dateRange) {
|
||||
if (dateRange && moment.isMoment(point.x)) {
|
||||
// if dateRange is provided and x Axis is of type datetime
|
||||
if (point.x.isBefore(dateRange.min) || point.x.isAfter(dateRange.max)) {
|
||||
// if the point's date isn't within dateRange, then we will not add this point to series
|
||||
return;
|
||||
}
|
||||
}
|
||||
this._addPointToSeries(point, seriesCollection, seriesName);
|
||||
}
|
||||
|
||||
QueryResult.prototype.getChartData = function (mapping) {
|
||||
/**
|
||||
* Helper function to add a point into a series
|
||||
*/
|
||||
QueryResult.prototype._addPointToSeries = function (point, seriesCollection, seriesName) {
|
||||
if (seriesCollection[seriesName] == undefined) {
|
||||
seriesCollection[seriesName] = {
|
||||
name: seriesName,
|
||||
type: 'column',
|
||||
data: []
|
||||
};
|
||||
}
|
||||
|
||||
seriesCollection[seriesName]['data'].push(point);
|
||||
};
|
||||
|
||||
QueryResult.prototype.getChartData = function (mapping, dateRange) {
|
||||
var series = {};
|
||||
|
||||
_.each(this.getData(), function (row) {
|
||||
@@ -199,7 +228,7 @@
|
||||
|
||||
_.each(row, function (value, definition) {
|
||||
var name = definition.split("::")[0] || definition.split("__")[0];
|
||||
var type = definition.split("::")[1] || definition.split("__")[0];
|
||||
var type = definition.split("::")[1] || definition.split("__")[1];
|
||||
if (mapping) {
|
||||
type = mapping[definition];
|
||||
}
|
||||
@@ -229,26 +258,15 @@
|
||||
}
|
||||
});
|
||||
|
||||
var addPointToSeries = function (seriesName, point) {
|
||||
if (series[seriesName] == undefined) {
|
||||
series[seriesName] = {
|
||||
name: seriesName,
|
||||
type: 'column',
|
||||
data: []
|
||||
}
|
||||
}
|
||||
|
||||
series[seriesName]['data'].push(point);
|
||||
}
|
||||
|
||||
if (seriesName === undefined) {
|
||||
_.each(yValues, function (yValue, seriesName) {
|
||||
addPointToSeries(seriesName, {'x': xValue, 'y': yValue});
|
||||
});
|
||||
} else {
|
||||
addPointToSeries(seriesName, point);
|
||||
this._addPointToSeriesIfInDateRange({'x': xValue, 'y': yValue}, series, seriesName, dateRange);
|
||||
}.bind(this));
|
||||
}
|
||||
});
|
||||
else {
|
||||
this._addPointToSeriesIfInDateRange(point, series, seriesName, dateRange);
|
||||
}
|
||||
}.bind(this));
|
||||
|
||||
return _.values(series);
|
||||
};
|
||||
|
||||
@@ -3,12 +3,16 @@
|
||||
|
||||
chartVisualization.config(['VisualizationProvider', function (VisualizationProvider) {
|
||||
var renderTemplate = '<chart-renderer options="visualization.options" query-result="queryResult"></chart-renderer>';
|
||||
var editTemplate = '<chart-editor></chart-editor>';
|
||||
var editTemplate = '<chart-editor options="visualization.options" query-result="queryResult"></chart-editor>';
|
||||
|
||||
var defaultOptions = {
|
||||
'series': {
|
||||
// 'type': 'column',
|
||||
'stacking': null
|
||||
}
|
||||
globalSeriesType: 'column',
|
||||
sortX: true,
|
||||
yAxis: [{type: 'linear'}, {type: 'linear', opposite: true}],
|
||||
xAxis: {type: 'datetime', labels: {enabled: true}},
|
||||
series: {stacking: null},
|
||||
seriesOptions: {},
|
||||
columnMapping: {}
|
||||
};
|
||||
|
||||
VisualizationProvider.registerVisualization({
|
||||
@@ -27,30 +31,62 @@
|
||||
queryResult: '=',
|
||||
options: '=?'
|
||||
},
|
||||
template: "<chart options='chartOptions' series='chartSeries' class='graph'></chart>",
|
||||
templateUrl: '/views/visualizations/chart.html',
|
||||
replace: false,
|
||||
controller: ['$scope', function ($scope) {
|
||||
$scope.chartSeries = [];
|
||||
$scope.chartOptions = {};
|
||||
$scope.dateRangeEnabled = function() {
|
||||
return $scope.options.xAxis && $scope.options.xAxis.type === 'datetime';
|
||||
}
|
||||
$scope.dateRange = { min: moment('1970-01-01'), max: moment() };
|
||||
|
||||
var reloadData = function(data) {
|
||||
/**
|
||||
* Update date range by finding date extremes
|
||||
*
|
||||
* ISSUE: chart.getExtreme() does not support getting Moment object out of box
|
||||
* TODO: Find a faster way to do this
|
||||
*/
|
||||
var setDateRangeToExtreme = function (allSeries) {
|
||||
if ($scope.dateRangeEnabled() && allSeries && allSeries.length > 0) {
|
||||
$scope.dateRange = {
|
||||
min: moment.min.apply(undefined, _.map(allSeries, function (series) {
|
||||
return moment.min(_.pluck(series.data, 'x'));
|
||||
})),
|
||||
max: moment.max.apply(undefined, _.map(allSeries, function (series) {
|
||||
return moment.max(_.pluck(series.data, 'x'));
|
||||
}))
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
var reloadData = function(data, options) {
|
||||
options = options || {};
|
||||
if (!data || ($scope.queryResult && $scope.queryResult.getData()) == null) {
|
||||
$scope.chartSeries.splice(0, $scope.chartSeries.length);
|
||||
} else {
|
||||
$scope.chartSeries.splice(0, $scope.chartSeries.length);
|
||||
var allSeries = $scope.queryResult.getChartData($scope.options.columnMapping);
|
||||
if (!options.preventSetExtreme) {
|
||||
setDateRangeToExtreme(allSeries);
|
||||
}
|
||||
var allSeries = $scope.queryResult.getChartData(
|
||||
$scope.options.columnMapping,
|
||||
$scope.dateRangeEnabled() ? $scope.dateRange : null
|
||||
);
|
||||
|
||||
_.each($scope.queryResult.getChartData($scope.options.columnMapping), function (s) {
|
||||
_.each(allSeries, function (series) {
|
||||
var additional = {'stacking': 'normal'};
|
||||
if ('globalSeriesType' in $scope.options) {
|
||||
additional['type'] = $scope.options.globalSeriesType;
|
||||
}
|
||||
if ($scope.options.seriesOptions && $scope.options.seriesOptions[s.name]) {
|
||||
additional = $scope.options.seriesOptions[s.name];
|
||||
if ($scope.options.seriesOptions && $scope.options.seriesOptions[series.name]) {
|
||||
additional = $scope.options.seriesOptions[series.name];
|
||||
if (!additional.name || additional.name == "") {
|
||||
additional.name = s.name;
|
||||
additional.name = series.name;
|
||||
}
|
||||
}
|
||||
$scope.chartSeries.push(_.extend(s, additional));
|
||||
$scope.chartSeries.push(_.extend(series, additional));
|
||||
});
|
||||
};
|
||||
};
|
||||
@@ -73,6 +109,22 @@
|
||||
$scope.$watch('queryResult && queryResult.getData()', function (data) {
|
||||
reloadData(data);
|
||||
});
|
||||
|
||||
$scope.$watch('dateRange.min', function(minDateRange, oldMinDateRange) {
|
||||
if (!minDateRange.isSame(oldMinDateRange)) {
|
||||
reloadData(true, {
|
||||
preventSetExtreme: true
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
$scope.$watch('dateRange.max', function (maxDateRange, oldMaxDateRange) {
|
||||
if (!maxDateRange.isSame(oldMaxDateRange)) {
|
||||
reloadData(true, {
|
||||
preventSetExtreme: true
|
||||
});
|
||||
}
|
||||
});
|
||||
}]
|
||||
};
|
||||
});
|
||||
@@ -81,198 +133,135 @@
|
||||
return {
|
||||
restrict: 'E',
|
||||
templateUrl: '/views/visualizations/chart_editor.html',
|
||||
scope: {
|
||||
queryResult: '=',
|
||||
options: '=?'
|
||||
},
|
||||
link: function (scope, element, attrs) {
|
||||
scope.palette = ColorPalette;
|
||||
|
||||
scope.seriesTypes = {
|
||||
'Line': 'line',
|
||||
'Column': 'column',
|
||||
'Area': 'area',
|
||||
'Scatter': 'scatter',
|
||||
'Pie': 'pie'
|
||||
};
|
||||
|
||||
scope.globalSeriesType = scope.visualization.options.globalSeriesType || 'column';
|
||||
scope.colors = _.extend({'Automatic': null}, ColorPalette);
|
||||
|
||||
scope.stackingOptions = {
|
||||
"None": "none",
|
||||
"Normal": "normal",
|
||||
"Percent": "percent"
|
||||
'Disabled': null,
|
||||
'Enabled': 'normal',
|
||||
'Percent': 'percent'
|
||||
};
|
||||
|
||||
scope.xAxisOptions = {
|
||||
"Date/Time": "datetime",
|
||||
"Linear": "linear",
|
||||
"Logarithmic": "logarithmic",
|
||||
"Category": "category"
|
||||
scope.chartTypes = {
|
||||
'line': {name: 'Line', icon: 'line-chart'},
|
||||
'column': {name: 'Bar', icon: 'bar-chart'},
|
||||
'area': {name: 'Area', icon: 'area-chart'},
|
||||
'pie': {name: 'Pie', icon: 'pie-chart'},
|
||||
'scatter': {name: 'Scatter', icon: 'circle-o'}
|
||||
};
|
||||
|
||||
scope.yAxisOptions = {
|
||||
"Linear": "linear",
|
||||
"Logarithmic": "logarithmic"
|
||||
};
|
||||
scope.chartTypeChanged = function() {
|
||||
_.each(scope.options.seriesOptions, function(options) {
|
||||
options.type = scope.options.globalSeriesType;
|
||||
});
|
||||
}
|
||||
|
||||
scope.xAxisType = "datetime";
|
||||
scope.yAxisType = "linear";
|
||||
scope.stacking = "none";
|
||||
scope.xAxisScales = ['datetime', 'linear', 'logarithmic', 'category'];
|
||||
scope.yAxisScales = ['linear', 'logarithmic'];
|
||||
|
||||
|
||||
scope.columnTypes = {
|
||||
"X": "x",
|
||||
"Y": "y",
|
||||
"Series": "series",
|
||||
"Unused": "unused"
|
||||
};
|
||||
|
||||
scope.series = [];
|
||||
|
||||
scope.columnTypeSelection = {};
|
||||
|
||||
var chartOptionsUnwatch = null,
|
||||
columnsWatch = null;
|
||||
|
||||
scope.$watch('globalSeriesType', function(type, old) {
|
||||
scope.visualization.options.globalSeriesType = type;
|
||||
|
||||
if (type && old && type !== old && scope.visualization.options.seriesOptions) {
|
||||
_.each(scope.visualization.options.seriesOptions, function(sOptions) {
|
||||
sOptions.type = type;
|
||||
var refreshColumns = function() {
|
||||
scope.columns = scope.queryResult.getColumns();
|
||||
scope.columnNames = _.pluck(scope.columns, 'name');
|
||||
if (scope.columnNames.length > 0)
|
||||
_.each(_.difference(_.keys(scope.options.columnMapping), scope.columnNames), function(column) {
|
||||
delete scope.options.columnMapping[column];
|
||||
});
|
||||
};
|
||||
refreshColumns();
|
||||
|
||||
var refreshColumnsAndForm = function() {
|
||||
refreshColumns();
|
||||
if (!scope.queryResult.getData() || scope.queryResult.getData().length == 0 || scope.columns.length == 0)
|
||||
return;
|
||||
scope.form.yAxisColumns = _.intersection(scope.form.yAxisColumns, scope.columnNames);
|
||||
if (!_.contains(scope.columnNames, scope.form.xAxisColumn))
|
||||
scope.form.xAxisColumn = undefined;
|
||||
if (!_.contains(scope.columnNames, scope.form.groupby))
|
||||
scope.form.groupby = undefined;
|
||||
}
|
||||
|
||||
var refreshSeries = function() {
|
||||
var seriesNames = _.pluck(scope.queryResult.getChartData(scope.options.columnMapping), 'name');
|
||||
var existing = _.keys(scope.options.seriesOptions);
|
||||
_.each(_.difference(seriesNames, existing), function(name) {
|
||||
scope.options.seriesOptions[name] = {
|
||||
'type': scope.options.globalSeriesType,
|
||||
'yAxis': 0,
|
||||
};
|
||||
scope.form.seriesList.push(name);
|
||||
});
|
||||
_.each(_.difference(existing, seriesNames), function(name) {
|
||||
scope.form.seriesList = _.without(scope.form.seriesList, name)
|
||||
delete scope.options.seriesOptions[name];
|
||||
});
|
||||
};
|
||||
|
||||
scope.$watch('options.columnMapping', refreshSeries, true);
|
||||
|
||||
scope.$watch(function() {return [scope.queryResult.getId(), scope.queryResult.status]}, function(changed) {
|
||||
if (!changed[0]) {
|
||||
return;
|
||||
}
|
||||
refreshColumnsAndForm();
|
||||
refreshSeries();
|
||||
}, true);
|
||||
|
||||
scope.form = {
|
||||
yAxisColumns: [],
|
||||
seriesList: _.sortBy(_.keys(scope.options.seriesOptions), function(name) {
|
||||
return scope.options.seriesOptions[name].zIndex;
|
||||
})
|
||||
};
|
||||
|
||||
scope.$watchCollection('form.seriesList', function(value, old) {
|
||||
_.each(value, function(name, index) {
|
||||
scope.options.seriesOptions[name].zIndex = index;
|
||||
scope.options.seriesOptions[name].index = 0; // is this needed?
|
||||
});
|
||||
});
|
||||
|
||||
var setColumnRole = function(role, column) {
|
||||
scope.options.columnMapping[column] = role;
|
||||
}
|
||||
var unsetColumn = function(column) {
|
||||
setColumnRole('unused', column);
|
||||
}
|
||||
|
||||
scope.$watchCollection('form.yAxisColumns', function(value, old) {
|
||||
_.each(old, unsetColumn);
|
||||
_.each(value, _.partial(setColumnRole, 'y'));
|
||||
});
|
||||
|
||||
scope.$watch('form.xAxisColumn', function(value, old) {
|
||||
if (old !== undefined)
|
||||
unsetColumn(old);
|
||||
if (value !== undefined)
|
||||
setColumnRole('x', value);
|
||||
});
|
||||
|
||||
scope.$watch('form.groupby', function(value, old) {
|
||||
if (old !== undefined)
|
||||
unsetColumn(old)
|
||||
if (value !== undefined) {
|
||||
setColumnRole('series', value);
|
||||
}
|
||||
});
|
||||
|
||||
scope.$watch('visualization.type', function (visualizationType) {
|
||||
if (visualizationType == 'CHART') {
|
||||
if (scope.visualization.options.series.stacking === null) {
|
||||
scope.stacking = "none";
|
||||
} else if (scope.visualization.options.series.stacking === undefined) {
|
||||
scope.stacking = "normal";
|
||||
} else {
|
||||
scope.stacking = scope.visualization.options.series.stacking;
|
||||
}
|
||||
|
||||
if (scope.visualization.options.sortX === undefined) {
|
||||
scope.visualization.options.sortX = true;
|
||||
}
|
||||
|
||||
var refreshSeries = function() {
|
||||
scope.series = _.map(scope.queryResult.getChartData(scope.visualization.options.columnMapping), function (s) { return s.name; });
|
||||
|
||||
// TODO: remove uneeded ones?
|
||||
if (scope.visualization.options.seriesOptions == undefined) {
|
||||
scope.visualization.options.seriesOptions = {
|
||||
type: scope.globalSeriesType
|
||||
};
|
||||
};
|
||||
|
||||
_.each(scope.series, function(s, i) {
|
||||
if (scope.visualization.options.seriesOptions[s] == undefined) {
|
||||
scope.visualization.options.seriesOptions[s] = {'type': scope.visualization.options.globalSeriesType, 'yAxis': 0};
|
||||
}
|
||||
scope.visualization.options.seriesOptions[s].zIndex = scope.visualization.options.seriesOptions[s].zIndex === undefined ? i : scope.visualization.options.seriesOptions[s].zIndex;
|
||||
scope.visualization.options.seriesOptions[s].index = scope.visualization.options.seriesOptions[s].index === undefined ? i : scope.visualization.options.seriesOptions[s].index;
|
||||
});
|
||||
scope.zIndexes = _.range(scope.series.length);
|
||||
scope.yAxes = [[0, 'left'], [1, 'right']];
|
||||
};
|
||||
|
||||
var initColumnMapping = function() {
|
||||
scope.columns = scope.queryResult.getColumns();
|
||||
|
||||
if (scope.visualization.options.columnMapping == undefined) {
|
||||
scope.visualization.options.columnMapping = {};
|
||||
}
|
||||
|
||||
scope.columnTypeSelection = scope.visualization.options.columnMapping;
|
||||
|
||||
_.each(scope.columns, function(column) {
|
||||
var definition = column.name.split("::"),
|
||||
definedColumns = _.keys(scope.visualization.options.columnMapping);
|
||||
|
||||
if (_.indexOf(definedColumns, column.name) != -1) {
|
||||
// Skip already defined columns.
|
||||
return;
|
||||
};
|
||||
|
||||
if (definition.length == 1) {
|
||||
scope.columnTypeSelection[column.name] = scope.visualization.options.columnMapping[column.name] = 'unused';
|
||||
} else if (definition == 'multi-filter') {
|
||||
scope.columnTypeSelection[column.name] = scope.visualization.options.columnMapping[column.name] = 'series';
|
||||
} else if (_.indexOf(_.values(scope.columnTypes), definition[1]) != -1) {
|
||||
scope.columnTypeSelection[column.name] = scope.visualization.options.columnMapping[column.name] = definition[1];
|
||||
} else {
|
||||
scope.columnTypeSelection[column.name] = scope.visualization.options.columnMapping[column.name] = 'unused';
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
columnsWatch = scope.$watch('queryResult.getId()', function(id) {
|
||||
if (!id) {
|
||||
return;
|
||||
}
|
||||
|
||||
initColumnMapping();
|
||||
refreshSeries();
|
||||
});
|
||||
|
||||
scope.$watchCollection('columnTypeSelection', function(selections) {
|
||||
_.each(scope.columnTypeSelection, function(type, name) {
|
||||
scope.visualization.options.columnMapping[name] = type;
|
||||
});
|
||||
|
||||
refreshSeries();
|
||||
});
|
||||
|
||||
chartOptionsUnwatch = scope.$watch("stacking", function (stacking) {
|
||||
if (stacking == "none") {
|
||||
scope.visualization.options.series.stacking = null;
|
||||
} else {
|
||||
scope.visualization.options.series.stacking = stacking;
|
||||
}
|
||||
});
|
||||
|
||||
scope.visualization.options.xAxis = scope.visualization.options.xAxis || {};
|
||||
scope.visualization.options.xAxis.labels = scope.visualization.options.xAxis.labels || {};
|
||||
if (scope.visualization.options.xAxis.labels.enabled === undefined) {
|
||||
scope.visualization.options.xAxis.labels.enabled = true;
|
||||
}
|
||||
|
||||
scope.xAxisType = (scope.visualization.options.xAxis && scope.visualization.options.xAxis.type) || scope.xAxisType;
|
||||
|
||||
xAxisUnwatch = scope.$watch("xAxisType", function (xAxisType) {
|
||||
scope.visualization.options.xAxis = scope.visualization.options.xAxis || {};
|
||||
scope.visualization.options.xAxis.type = xAxisType;
|
||||
});
|
||||
|
||||
scope.visualization.options.yAxis = scope.visualization.options.yAxis || [{type: 'linear'}, {type: 'linear', opposite: true}];
|
||||
scope.yAxisType = (scope.visualization.options.yAxis && scope.visualization.options.yAxis[0].type) || scope.yAxisType;
|
||||
|
||||
yAxisUnwatch = scope.$watch("yAxisType", function (yAxisType) {
|
||||
scope.visualization.options.yAxis[0].type = yAxisType;
|
||||
scope.visualization.options.yAxis[1].type = yAxisType;
|
||||
});
|
||||
} else {
|
||||
if (chartOptionsUnwatch) {
|
||||
chartOptionsUnwatch();
|
||||
chartOptionsUnwatch = null;
|
||||
}
|
||||
|
||||
if (columnsWatch) {
|
||||
columnWatch();
|
||||
columnWatch = null;
|
||||
}
|
||||
|
||||
if (xAxisUnwatch) {
|
||||
xAxisUnwatch();
|
||||
xAxisUnwatch = null;
|
||||
}
|
||||
|
||||
if (yAxisUnwatch) {
|
||||
yAxisUnwatch();
|
||||
yAxisUnwatch = null;
|
||||
}
|
||||
}
|
||||
});
|
||||
if (scope.columnNames)
|
||||
_.each(scope.options.columnMapping, function(value, key) {
|
||||
if (scope.columnNames.length > 0 && !_.contains(scope.columnNames, key))
|
||||
return;
|
||||
if (value == 'x')
|
||||
scope.form.xAxisColumn = key;
|
||||
else if (value == 'y')
|
||||
scope.form.yAxisColumns.push(key);
|
||||
else if (value == 'series')
|
||||
scope.form.groupby = key;
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
43
rd_ui/app/scripts/visualizations/date_range_selector.js
Normal file
43
rd_ui/app/scripts/visualizations/date_range_selector.js
Normal file
@@ -0,0 +1,43 @@
|
||||
(function (window) {
|
||||
var module = angular.module('redash.visualization');
|
||||
|
||||
module.directive('dateRangeSelector', [function () {
|
||||
return {
|
||||
restrict: 'E',
|
||||
scope: {
|
||||
dateRange: "="
|
||||
},
|
||||
templateUrl: '/views/visualizations/date_range_selector.html',
|
||||
replace: true,
|
||||
controller: ['$scope', function ($scope) {
|
||||
$scope.dateRangeHuman = {
|
||||
min: null,
|
||||
max: null
|
||||
};
|
||||
|
||||
$scope.$watch('dateRange', function (dateRange, oldDateRange, scope) {
|
||||
scope.dateRangeHuman.min = dateRange.min.format('YYYY-MM-DD');
|
||||
scope.dateRangeHuman.max = dateRange.max.format('YYYY-MM-DD');
|
||||
});
|
||||
|
||||
$scope.$watch('dateRangeHuman', function (dateRangeHuman, oldDateRangeHuman, scope) {
|
||||
var newDateRangeMin = moment.utc(dateRangeHuman.min);
|
||||
var newDateRangeMax = moment.utc(dateRangeHuman.max);
|
||||
if (!newDateRangeMin ||
|
||||
!newDateRangeMax ||
|
||||
!newDateRangeMin.isValid() ||
|
||||
!newDateRangeMax.isValid() ||
|
||||
newDateRangeMin.isAfter(newDateRangeMax)) {
|
||||
// Prevent invalid date input
|
||||
// No need to show up a notification to user here, it will be too noisy.
|
||||
// Instead, simply preventing changes to the scope silently.
|
||||
scope.dateRangeHuman = oldDateRangeHuman;
|
||||
return;
|
||||
}
|
||||
scope.dateRange.min = newDateRangeMin;
|
||||
scope.dateRange.max = newDateRangeMax;
|
||||
}, true);
|
||||
}]
|
||||
}
|
||||
}]);
|
||||
})(window);
|
||||
@@ -19,10 +19,8 @@ renderers.directive('pivotTableRenderer', function () {
|
||||
// We need to give the pivot table its own copy of the data, because its change
|
||||
// it which interferes with other visualizations.
|
||||
var data = $.extend(true, [], $scope.queryResult.getData());
|
||||
var renderers = $.extend($.pivotUtilities.renderers,
|
||||
$.pivotUtilities.export_renderers)
|
||||
$(element).pivotUI(data, {
|
||||
renderers: renderers
|
||||
renderers: $.pivotUtilities.renderers
|
||||
}, true);
|
||||
}
|
||||
});
|
||||
|
||||
@@ -79,14 +79,14 @@
|
||||
} else if (columnType === 'date') {
|
||||
columnDefinition.formatFunction = function (value) {
|
||||
if (value && moment.isMoment(value)) {
|
||||
return value.toDate().toLocaleDateString();
|
||||
return value.format(clientConfig.dateFormat);
|
||||
}
|
||||
return value;
|
||||
};
|
||||
} else if (columnType === 'datetime') {
|
||||
columnDefinition.formatFunction = function (value) {
|
||||
if (value && moment.isMoment(value)) {
|
||||
return value.toDate().toLocaleString();
|
||||
return value.format(clientConfig.dateTimeFormat);
|
||||
}
|
||||
return value;
|
||||
};
|
||||
|
||||
@@ -432,16 +432,54 @@ div.table-name {
|
||||
padding: 30px;
|
||||
}
|
||||
|
||||
/*
|
||||
bootstrap's hidden-xs class adds display:block when not hidden
|
||||
use this class when you need to keep the original display value
|
||||
*/
|
||||
@media (max-width: 767px) {
|
||||
.rd-hidden-xs {
|
||||
display: none !important;
|
||||
}
|
||||
}
|
||||
|
||||
.log-container {
|
||||
margin-bottom: 50px;
|
||||
}
|
||||
|
||||
/* Footer */
|
||||
|
||||
.footer {
|
||||
color: #818d9f;
|
||||
padding-bottom: 30px;
|
||||
}
|
||||
|
||||
.footer a {
|
||||
color: #818d9f;
|
||||
margin-left: 20px;
|
||||
}
|
||||
|
||||
.col-table .missing-value {
|
||||
color: #b94a48;
|
||||
}
|
||||
|
||||
.col-table .super-small-input {
|
||||
padding-left: 3px;
|
||||
height: 24px;
|
||||
}
|
||||
|
||||
.col-table .ui-select-toggle, .col-table .ui-select-search {
|
||||
padding: 2px;
|
||||
padding-left: 5px;
|
||||
height: 24px;
|
||||
}
|
||||
|
||||
.clearable button {
|
||||
border-top-right-radius: 0;
|
||||
border-bottom-right-radius: 0;
|
||||
}
|
||||
|
||||
/* Immediately apply ng-cloak, instead of waiting for angular.js to load: */
|
||||
[ng\:cloak], [ng-cloak], [data-ng-cloak], [x-ng-cloak], .ng-cloak, .x-ng-cloak {
|
||||
display: none !important;
|
||||
}
|
||||
|
||||
/* Smart Table */
|
||||
|
||||
.smart-table {
|
||||
margin-bottom: 0px;
|
||||
}
|
||||
|
||||
.smart-table .pagination {
|
||||
margin-bottom: 5px;
|
||||
margin-top: 10px;
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
<div class="form-group" ng-class='{"has-error": !inner.input.$valid}' ng-form="inner" ng-repeat="(name, input) in type.configuration_schema.properties">
|
||||
<label>{{input.title || name | capitalize}}</label>
|
||||
<input name="input" type="{{input.type}}" class="form-control" ng-model="dataSource.options[name]" ng-required="input.required"
|
||||
ng-if="input.type !== 'file'" accesskey="tab">
|
||||
ng-if="input.type !== 'file'" accesskey="tab" placeholder="{{input.default}}">
|
||||
|
||||
<input name="input" type="file" class="form-control" ng-model="files[name]" ng-required="input.required && !dataSource.options[name]"
|
||||
base-sixty-four-input
|
||||
|
||||
@@ -17,4 +17,5 @@
|
||||
<a href="/admin/status" class="list-group-item">Status</a>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
</div>
|
||||
|
||||
<div class="col-lg-2">
|
||||
<div class="rd-hidden-xs pull-right">
|
||||
<div class="pull-right">
|
||||
<query-source-link></query-source-link>
|
||||
</div>
|
||||
</div>
|
||||
@@ -68,7 +68,7 @@
|
||||
</button>
|
||||
<query-formatter></query-formatter>
|
||||
<span class="pull-right">
|
||||
<button class="btn btn-xs btn-default rd-hidden-xs" ng-click="duplicateQuery()">
|
||||
<button class="btn btn-xs btn-default" ng-click="duplicateQuery()">
|
||||
<span class="glyphicon glyphicon-share-alt"></span> Fork
|
||||
</button>
|
||||
|
||||
@@ -103,7 +103,7 @@
|
||||
</div>
|
||||
<hr ng-if="sourceMode">
|
||||
<div class="row">
|
||||
<div class="col-lg-3 rd-hidden-xs">
|
||||
<div class="col-lg-3">
|
||||
<p>
|
||||
<span class="glyphicon glyphicon-user"></span>
|
||||
<span class="text-muted">Created By </span>
|
||||
@@ -148,7 +148,7 @@
|
||||
<p>
|
||||
<a class="btn btn-primary btn-sm" ng-disabled="queryExecuting || !queryResult.getData()" query-result-link target="_self">
|
||||
<span class="glyphicon glyphicon-cloud-download"></span>
|
||||
<span class="rd-hidden-xs">Download Dataset</span>
|
||||
<span>Download Dataset</span>
|
||||
</a>
|
||||
|
||||
<a class="btn btn-warning btn-sm" ng-disabled="queryExecuting" data-toggle="modal" data-target="#archive-confirmation-modal"
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
<span class="help-block error" ng-if="userForm.passwordRepeat.$error.compareTo">Passwords don't match.</span>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<button class="btn btn-primary">Save</button>
|
||||
<button class="btn btn-primary">Create</button>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
|
||||
8
rd_ui/app/views/visualizations/chart.html
Normal file
8
rd_ui/app/views/visualizations/chart.html
Normal file
@@ -0,0 +1,8 @@
|
||||
<div>
|
||||
<section class="clearfix">
|
||||
<date-range-selector ng-if="dateRangeEnabled()" date-range='dateRange' class='pull-right'></date-range-selector>
|
||||
</section>
|
||||
<section>
|
||||
<chart options='chartOptions' series='chartSeries' class='graph'></chart>
|
||||
</section>
|
||||
</div>
|
||||
@@ -1,169 +1,236 @@
|
||||
<div class="form-horizontal">
|
||||
<div class="panel panel-default">
|
||||
<form class="form-horizontal" name="chartEditor">
|
||||
|
||||
<div class="row">
|
||||
<div class="col-md-6">
|
||||
<div class="form-group row">
|
||||
|
||||
<label class="control-label col-sm-5">Chart Type</label>
|
||||
<div class="col-sm-7" ng-if="chartTypes"><!--the if is a weird workaround-->
|
||||
<ui-select ng-model="options.globalSeriesType" on-select="chartTypeChanged()">
|
||||
<ui-select-match placeholder="Choose chart type..."><i class="fa fa-{{$select.selected.value.icon}}"></i> {{$select.selected.value.name}}</ui-select-match>
|
||||
<ui-select-choices repeat="info.chartType as (chartType, info) in chartTypes">
|
||||
<div><i class="fa fa-{{info.value.icon}}"></i><span> </span><span ng-bind-html="info.value.name | highlight: $select.search"></span></div>
|
||||
</ui-select-choices>
|
||||
</ui-select>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="col-md-6">
|
||||
<div class="form-group row">
|
||||
|
||||
<label class="control-label col-sm-5">Stacking</label>
|
||||
|
||||
<div class="col-sm-7" ng-if="stackingOptions"><!--the if is a weird workaround-->
|
||||
<ui-select ng-model="options.series.stacking" ng-disabled="['line', 'area', 'column'].indexOf(options.globalSeriesType) == -1">
|
||||
<ui-select-match placeholder="Choose Stacking...">{{$select.selected.key | capitalize}}</ui-select-match>
|
||||
<ui-select-choices repeat="value.value as (key, value) in stackingOptions">
|
||||
<div ng-bind-html="value.key | highlight: $select.search"></div>
|
||||
</ui-select-choices>
|
||||
</ui-select>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="row">
|
||||
<div class="col-md-6">
|
||||
<div class="form-group row" ng-class="{'has-error': chartEditor.xAxisColumn.$invalid}">
|
||||
|
||||
<label class="control-label col-sm-5">X Column</label>
|
||||
|
||||
<div class="col-sm-7">
|
||||
<ui-select name="xAxisColumn" required ng-model="form.xAxisColumn">
|
||||
<ui-select-match placeholder="Choose column...">{{$select.selected}}</ui-select-match>
|
||||
<ui-select-choices repeat="column in columnNames | remove:form.yAxisColumns | remove:form.groupby">
|
||||
<span ng-bind-html="column | highlight: $select.search"></span><span> </span><small class="text-muted" ng-bind="columns[column].type"></small>
|
||||
</ui-select-choices>
|
||||
</ui-select>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-md-6">
|
||||
<div class="form-group row">
|
||||
|
||||
<label class="control-label col-sm-5">Group by</label>
|
||||
|
||||
<div class="col-sm-7">
|
||||
|
||||
<ui-select name="groupby" ng-model="form.groupby" class="clearable">
|
||||
<ui-select-match allow-clear="true" placeholder="Choose column...">{{$select.selected}}</ui-select-match>
|
||||
<ui-select-choices repeat="column in columnNames | remove:form.yAxisColumns | remove:form.xAxisColumn">
|
||||
<span ng-bind-html="column | highlight: $select.search"></span><span> </span><small class="text-muted" ng-bind="columns[column].type"></small>
|
||||
</ui-select-choices>
|
||||
</ui-select>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="row">
|
||||
<div class="col-md-6">
|
||||
<!-- not using regular validation (chartEditor.yAxisColumns.$invalid) due to a bug in ui-select with multiple choices-->
|
||||
<div class="form-group row" ng-class="{'has-error': !form.yAxisColumns || form.yAxisColumns.length == 0}">
|
||||
|
||||
<label class="control-label col-sm-5">Y Columns</label>
|
||||
|
||||
<div class="col-sm-7">
|
||||
|
||||
<ui-select multiple name="yAxisColumns" required ng-model="form.yAxisColumns">
|
||||
<ui-select-match placeholder="Choose columns...">{{$item}}</ui-select-match>
|
||||
<ui-select-choices repeat="column in columnNames | remove:form.groupby | remove:form.xAxisColumn">
|
||||
<span ng-bind-html="column | highlight: $select.search"></span><span> </span><small class="text-muted" ng-bind="columns[column].type"></small>
|
||||
</ui-select-choices>
|
||||
</ui-select>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="row">
|
||||
<div class="col-md-6">
|
||||
<div class="panel panel-default">
|
||||
|
||||
<div class="panel-heading">
|
||||
<h3 class="panel-title">X Axis</h3>
|
||||
</div>
|
||||
|
||||
<div class="panel-body">
|
||||
<div class="form-group">
|
||||
<label class="control-label col-sm-2">Stacking</label>
|
||||
<div class="row">
|
||||
|
||||
<div class="col-sm-10">
|
||||
<select required ng-model="stacking"
|
||||
ng-options="value as key for (key, value) in stackingOptions"
|
||||
class="form-control"></select>
|
||||
<div class="col-md-6">
|
||||
<div class="form-group row">
|
||||
<label class="control-label col-sm-3">Scale</label>
|
||||
<div class="col-sm-9">
|
||||
<ui-select ng-model="options.xAxis.type">
|
||||
<ui-select-match placeholder="Choose Scale...">{{$select.selected | capitalize}}</ui-select-match>
|
||||
<ui-select-choices repeat="scaleType in xAxisScales">
|
||||
<div ng-bind-html="scaleType | capitalize | highlight: $select.search"></div>
|
||||
</ui-select-choices>
|
||||
</ui-select>
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label class="control-label col-sm-2">X Axis Type</label>
|
||||
|
||||
<div class="col-sm-10">
|
||||
<select required ng-model="xAxisType" ng-options="value as key for (key, value) in xAxisOptions"
|
||||
class="form-control"></select>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="form-group">
|
||||
<label class="control-label col-sm-2">Y Axis Type</label>
|
||||
|
||||
<div class="col-sm-10">
|
||||
<select required ng-model="yAxisType" ng-options="value as key for (key, value) in yAxisOptions"
|
||||
class="form-control"></select>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="form-group">
|
||||
<label class="control-label col-sm-2">Series Type</label>
|
||||
|
||||
<div class="col-sm-10">
|
||||
<select required ng-options="value as key for (key, value) in seriesTypes"
|
||||
ng-model="globalSeriesType" class="form-control"></select>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="form-group">
|
||||
<label class="control-label col-sm-2">y Axis min</label>
|
||||
|
||||
<div class="col-sm-10">
|
||||
<input name="yAxisMin" type="number" class="form-control"
|
||||
ng-model="visualization.options.yAxis.min"
|
||||
placeholder="Auto">
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label class="control-label col-sm-2">y Axis max</label>
|
||||
|
||||
<div class="col-sm-10">
|
||||
<input name="yAxisMax" type="number" class="form-control"
|
||||
ng-model="visualization.options.yAxis.max"
|
||||
placeholder="Auto">
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label class="control-label col-sm-2">y Axis Name</label>
|
||||
|
||||
<div class="col-sm-10">
|
||||
<input name="yAxisName" type="text" class="form-control"
|
||||
ng-model="visualization.options.yAxis[0].title.text"
|
||||
placeholder="">
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label class="control-label col-sm-2">Sort X Values</label>
|
||||
|
||||
<div class="col-sm-10">
|
||||
<input name="sortX" type="checkbox" class="form-control"
|
||||
ng-model="visualization.options.sortX">
|
||||
<div class="col-md-6">
|
||||
<div class="form-group row">
|
||||
<label class="control-label col-sm-8">Sort Values</label>
|
||||
<div class="col-sm-4">
|
||||
<input type="checkbox" ng-model="options.sortX">
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label class="control-label col-sm-2">Show X Axis Labels</label>
|
||||
|
||||
<div class="col-sm-10">
|
||||
<input name="sortX" type="checkbox" class="form-control"
|
||||
ng-model="visualization.options.xAxis.labels.enabled">
|
||||
</div>
|
||||
<div class="row">
|
||||
|
||||
<div class="col-md-6">
|
||||
<div class="form-group row">
|
||||
<label class="control-label col-sm-3">Name</label>
|
||||
<div class="col-sm-9">
|
||||
<input ng-model="options.xAxis.title.text" type="text" class="form-control"></input>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="col-md-6">
|
||||
<div class="form-group row">
|
||||
<label class="control-label col-sm-8">Show Labels</label>
|
||||
<div class="col-sm-4">
|
||||
<input type="checkbox" ng-model="options.xAxis.labels.enabled">
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div ng-repeat="yAxis in options.yAxis" class="col-md-3">
|
||||
<div class="panel panel-default">
|
||||
|
||||
<div class="row">
|
||||
<div class="col-lg-6">
|
||||
<div class="list-group">
|
||||
<div class="list-group-item active">
|
||||
Columns Mapping
|
||||
</div>
|
||||
<div class="list-group-item">
|
||||
<div class="form-group" ng-repeat="column in columns">
|
||||
<label class="control-label col-sm-4">{{column.name}}</label>
|
||||
|
||||
<div class="col-sm-8">
|
||||
<select ng-options="value as key for (key, value) in columnTypes" class="form-control"
|
||||
ng-model="columnTypeSelection[column.name]"></select>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="panel-heading">
|
||||
<h3 class="panel-title">{{$index == 0 ? 'Left' : 'Right'}} Y Axis</h3>
|
||||
</div>
|
||||
|
||||
<div class="col-lg-6" ng-if="series.length > 0">
|
||||
<div class="list-group" ng-repeat="seriesName in series">
|
||||
<div class="list-group-item active">
|
||||
{{seriesName}}
|
||||
</div>
|
||||
<div class="list-group-item">
|
||||
<div class="form-group">
|
||||
<label class="control-label col-sm-3">Type</label>
|
||||
|
||||
<div class="col-sm-9">
|
||||
<select required ng-model="visualization.options.seriesOptions[seriesName].type"
|
||||
ng-options="value as key for (key, value) in seriesTypes"
|
||||
class="form-control"></select>
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label class="control-label col-sm-3">zIndex</label>
|
||||
|
||||
<div class="col-sm-9">
|
||||
<select required ng-model="visualization.options.seriesOptions[seriesName].zIndex"
|
||||
ng-options="o as o for o in zIndexes"
|
||||
class="form-control"></select>
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label class="control-label col-sm-3">Index</label>
|
||||
|
||||
<div class="col-sm-9">
|
||||
<select required ng-model="visualization.options.seriesOptions[seriesName].index"
|
||||
ng-options="o as o for o in zIndexes"
|
||||
class="form-control"></select>
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label class="control-label col-sm-3">y Axis</label>
|
||||
|
||||
<div class="col-sm-9">
|
||||
<select required ng-model="visualization.options.seriesOptions[seriesName].yAxis"
|
||||
ng-options="o[0] as o[1] for o in yAxes"
|
||||
class="form-control"></select>
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label class="control-label col-sm-3">Name</label>
|
||||
|
||||
<div class="col-sm-9">
|
||||
<input name="seriesName" type="text" class="form-control"
|
||||
ng-model="visualization.options.seriesOptions[seriesName].name"
|
||||
placeholder="{{seriesName}}">
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label class="control-label col-sm-3">Color</label>
|
||||
|
||||
<div class="col-sm-9">
|
||||
<select class="form-control" ng-model="visualization.options.seriesOptions[seriesName].color" ng-options="val as key for (key,val) in palette"></select>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="panel-body">
|
||||
<div class="form-group row">
|
||||
<label class="control-label col-sm-3">Scale</label>
|
||||
<div class="col-sm-9">
|
||||
<ui-select ng-model="yAxis.type">
|
||||
<ui-select-match placeholder="Choose Scale...">{{$select.selected | capitalize}}</ui-select-match>
|
||||
<ui-select-choices repeat="scaleType in yAxisScales">
|
||||
<div ng-bind-html="scaleType | capitalize | highlight: $select.search"></div>
|
||||
</ui-select-choices>
|
||||
</ui-select>
|
||||
</div>
|
||||
|
||||
|
||||
</div>
|
||||
<div class="form-group row">
|
||||
<label class="control-label col-sm-3">Name</label>
|
||||
<div class="col-sm-9">
|
||||
<input ng-model="yAxis.title.text" type="text" class="form-control"></input>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
|
||||
<div class="panel panel-default">
|
||||
|
||||
<div class="panel-heading"><h3 class="panel-title">Series Options</h3></div>
|
||||
|
||||
<div>
|
||||
<table class="table table-condensed col-table">
|
||||
<thead>
|
||||
<th>zIndex</th>
|
||||
<th>Column</th>
|
||||
<th>Left Y Axis</th>
|
||||
<th>Right Y Axis</th>
|
||||
<th>Label</th>
|
||||
<th>Color</th>
|
||||
<th>Type</th>
|
||||
</thead>
|
||||
<tbody ui-sortable ng-model="form.seriesList">
|
||||
<tr ng-repeat="name in form.seriesList">
|
||||
<td style="cursor: move;"><i class="fa fa-arrows-v"></i> <span ng-bind="options.seriesOptions[name].zIndex + 1"></span></td>
|
||||
<td>{{name}}</td>
|
||||
<td>
|
||||
<input type="radio" ng-value="0" ng-model="options.seriesOptions[name].yAxis">
|
||||
</td>
|
||||
<td>
|
||||
<input type="radio" ng-value="1" ng-model="options.seriesOptions[name].yAxis">
|
||||
</td>
|
||||
<td style="padding: 3px; width: 140px;">
|
||||
<input placeholder="{{name}}" class="form-control input-sm super-small-input" type="text" ng-model="options.seriesOptions[name].name">
|
||||
</td>
|
||||
<td style="padding: 3px; width: 35px;">
|
||||
<ui-select ng-model="options.seriesOptions[name].color">
|
||||
<ui-select-match><color-box color="$select.selected.value"></color-box></ui-select-match>
|
||||
<ui-select-choices repeat="color.value as (key, color) in colors">
|
||||
<color-box color="color.value"></color-box><span ng-bind-html="color.key | capitalize | highlight: $select.search"></span>
|
||||
</ui-select-choices>
|
||||
</ui-select>
|
||||
</td>
|
||||
<td style="padding: 3px; width: 105px;">
|
||||
<ui-select ng-model="options.seriesOptions[name].type">
|
||||
<ui-select-match placeholder="Chart Type"><i class="fa fa-{{$select.selected.value.icon}}"></i> {{$select.selected.value.name}}</ui-select-match>
|
||||
<ui-select-choices repeat="info.chartType as (chartType, info) in chartTypes">
|
||||
<div><i class="fa fa-{{info.value.icon}}"></i><span> </span><span ng-bind-html="info.value.name | highlight: $select.search"></span></div>
|
||||
</ui-select-choices>
|
||||
</ui-select>
|
||||
</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</form>
|
||||
|
||||
8
rd_ui/app/views/visualizations/date_range_selector.html
Normal file
8
rd_ui/app/views/visualizations/date_range_selector.html
Normal file
@@ -0,0 +1,8 @@
|
||||
<div>
|
||||
<span>
|
||||
From <input type="date" ng-model="dateRangeHuman.min">
|
||||
</span>
|
||||
<span>
|
||||
To <input type="date" ng-model="dateRangeHuman.max">
|
||||
</span>
|
||||
</div>
|
||||
@@ -1,25 +1,42 @@
|
||||
<div>
|
||||
<span ng-click="openEditor=!openEditor" class="details-toggle" ng-class="{open: openEditor}">Edit</span>
|
||||
|
||||
<form ng-show="openEditor" role="form" name="visForm" ng-submit="submit()">
|
||||
<div class="form-group">
|
||||
<label class="control-label">Name</label>
|
||||
<input name="name" type="text" class="form-control" ng-model="visualization.name" placeholder="{{visualization.type | capitalize}}">
|
||||
<form ng-show="openEditor" role="form" name="visForm" ng-submit="submit()" class="form-horizontal">
|
||||
<div class="row">
|
||||
<div class="col-md-6">
|
||||
<div class="form-group row">
|
||||
<label class="control-label col-sm-5">Visualization Type</label>
|
||||
|
||||
<div class="col-sm-7">
|
||||
<select required ng-model="visualization.type" ng-options="value as key for (key, value) in visTypes" class="form-control" ng-change="typeChanged()"></select>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="col-md-6">
|
||||
<div class="form-group row">
|
||||
<label class="control-label col-sm-5">Name</label>
|
||||
<div class="col-sm-7">
|
||||
<input name="name" type="text" class="form-control" ng-model="visualization.name" placeholder="{{visualization.type | capitalize}}">
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="form-group">
|
||||
<label class="control-label">Visualization Type</label>
|
||||
<select required ng-model="visualization.type" ng-options="value as key for (key, value) in visTypes" class="form-control" ng-change="typeChanged()"></select>
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<visualization-options-editor></visualization-options-editor>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<visualization-options-editor></visualization-options-editor>
|
||||
|
||||
<div class="form-group" ng-if="editRawOptions">
|
||||
<label class="control-label">Advanced</label>
|
||||
<textarea json-text ng-model="visualization.options" class="form-control" rows="10"></textarea>
|
||||
</div>
|
||||
|
||||
<div class="form-group">
|
||||
<div class="form-group text-center">
|
||||
<button type="submit" class="btn btn-primary">Save</button>
|
||||
</div>
|
||||
|
||||
|
||||
@@ -2,16 +2,16 @@
|
||||
<div class="filter" ng-repeat="filter in filters">
|
||||
<ui-select ng-model="filter.current" ng-if="!filter.multiple">
|
||||
<ui-select-match placeholder="Select value for {{filter.friendlyName}}...">{{filter.friendlyName}}: {{$select.selected}}</ui-select-match>
|
||||
<ui-select-choices repeat="value in filter.values | filter: $select.search track by $index">
|
||||
<ui-select-choices repeat="value in filter.values | filter: $select.search">
|
||||
{{value}}
|
||||
</ui-select-choices>
|
||||
</ui-select>
|
||||
|
||||
<ui-select ng-model="filter.current" multiple ng-if="filter.multiple">
|
||||
<ui-select-match placeholder="Select value for {{filter.friendlyName}}...">{{filter.friendlyName}}: {{$item}}</ui-select-match>
|
||||
<ui-select-choices repeat="value in filter.values | filter: $select.search track by $index">
|
||||
<ui-select-choices repeat="value in filter.values | filter: $select.search">
|
||||
{{value}}
|
||||
</ui-select-choices>
|
||||
</ui-select>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -10,12 +10,12 @@
|
||||
"jquery": "1.9.1",
|
||||
"bootstrap": "3.0.0",
|
||||
"es5-shim": "2.0.8",
|
||||
"angular-moment": "0.2.0",
|
||||
"moment": "2.1.0",
|
||||
"angular-moment": "0.10.3",
|
||||
"moment": "~2.8.0",
|
||||
"codemirror": "4.8.0",
|
||||
"highcharts": "3.0.10",
|
||||
"underscore": "1.5.1",
|
||||
"pivottable": "1.6.3",
|
||||
"pivottable": "~1.1.1",
|
||||
"cornelius": "https://github.com/restorando/cornelius.git",
|
||||
"gridster": "0.2.0",
|
||||
"mousetrap": "~1.4.6",
|
||||
@@ -30,10 +30,11 @@
|
||||
"angular-ui-bootstrap-bower": "~0.12.1",
|
||||
"leaflet": "~0.7.3",
|
||||
"angular-base64-upload": "~0.1.11",
|
||||
"angular-ui-select": "0.8.2",
|
||||
"angular-ui-select": "~0.13.2",
|
||||
"angular-bootstrap-show-errors": "~2.3.0",
|
||||
"angular-sanitize": "1.2.18",
|
||||
"d3": "3.5.6"
|
||||
"d3": "3.5.6",
|
||||
"angular-ui-sortable": "~0.13.4"
|
||||
},
|
||||
"devDependencies": {
|
||||
"angular-mocks": "1.2.18",
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
featureFlags = [];
|
||||
clientConfig = {};
|
||||
currentUser = {
|
||||
id: 1,
|
||||
name: 'John Mock',
|
||||
|
||||
@@ -7,7 +7,7 @@ from flask_mail import Mail
|
||||
from redash import settings
|
||||
from redash.query_runner import import_query_runners
|
||||
|
||||
__version__ = '0.8.2'
|
||||
__version__ = '0.8.3'
|
||||
|
||||
|
||||
def setup_logging():
|
||||
@@ -38,3 +38,6 @@ mail.init_mail(settings.all_settings())
|
||||
statsd_client = StatsClient(host=settings.STATSD_HOST, port=settings.STATSD_PORT, prefix=settings.STATSD_PREFIX)
|
||||
|
||||
import_query_runners(settings.QUERY_RUNNERS)
|
||||
|
||||
from redash.version_check import reset_new_version_status
|
||||
reset_new_version_status()
|
||||
|
||||
@@ -4,8 +4,9 @@ import json
|
||||
from flask import render_template, send_from_directory, current_app
|
||||
from flask_login import current_user, login_required
|
||||
|
||||
from redash import settings
|
||||
from redash import settings, __version__, redis_connection
|
||||
from redash.wsgi import app
|
||||
from redash.version_check import get_latest_version
|
||||
|
||||
|
||||
@app.route('/admin/<anything>/<whatever>')
|
||||
@@ -36,14 +37,18 @@ def index(**kwargs):
|
||||
'permissions': current_user.permissions
|
||||
}
|
||||
|
||||
features = {
|
||||
client_config = {
|
||||
'clientSideMetrics': settings.CLIENT_SIDE_METRICS,
|
||||
'allowScriptsInUserInput': settings.ALLOW_SCRIPTS_IN_USER_INPUT,
|
||||
'highChartsTurboThreshold': settings.HIGHCHARTS_TURBO_THRESHOLD
|
||||
'highChartsTurboThreshold': settings.HIGHCHARTS_TURBO_THRESHOLD,
|
||||
'dateFormat': settings.DATE_FORMAT,
|
||||
'dateTimeFormat': "{0} HH:mm".format(settings.DATE_FORMAT),
|
||||
'newVersionAvailable': get_latest_version(),
|
||||
'version': __version__
|
||||
}
|
||||
|
||||
return render_template("index.html", user=json.dumps(user), name=settings.NAME,
|
||||
features=json.dumps(features),
|
||||
client_config=json.dumps(client_config),
|
||||
analytics=settings.ANALYTICS)
|
||||
|
||||
|
||||
|
||||
@@ -131,7 +131,7 @@ class ApiUser(UserMixin, PermissionsCheckMixin):
|
||||
|
||||
class Group(BaseModel):
|
||||
DEFAULT_PERMISSIONS = ['create_dashboard', 'create_query', 'edit_dashboard', 'edit_query',
|
||||
'view_query', 'view_source', 'execute_query', 'list_users']
|
||||
'view_query', 'view_source', 'execute_query', 'list_users', 'schedule_query']
|
||||
|
||||
id = peewee.PrimaryKeyField()
|
||||
name = peewee.CharField(max_length=100)
|
||||
@@ -369,10 +369,10 @@ class QueryResult(BaseModel):
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def unused(cls):
|
||||
week_ago = datetime.datetime.now() - datetime.timedelta(days=7)
|
||||
def unused(cls, days=7):
|
||||
age_threshold = datetime.datetime.now() - datetime.timedelta(days=days)
|
||||
|
||||
unused_results = cls.select().where(Query.id == None, cls.retrieved_at < week_ago)\
|
||||
unused_results = cls.select().where(Query.id == None, cls.retrieved_at < age_threshold)\
|
||||
.join(Query, join_type=peewee.JOIN_LEFT_OUTER)
|
||||
|
||||
return unused_results
|
||||
@@ -621,19 +621,26 @@ class Alert(ModelTimestampsMixin, BaseModel):
|
||||
def all(cls):
|
||||
return cls.select(Alert, User, Query).join(Query).switch(Alert).join(User)
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
'id': self.id,
|
||||
'name': self.name,
|
||||
'query': self.query.to_dict(),
|
||||
'user': self.user.to_dict(),
|
||||
'options': self.options,
|
||||
'state': self.state,
|
||||
'last_triggered_at': self.last_triggered_at,
|
||||
'updated_at': self.updated_at,
|
||||
'created_at': self.created_at
|
||||
def to_dict(self, full=True):
|
||||
d = {
|
||||
'id': self.id,
|
||||
'name': self.name,
|
||||
'options': self.options,
|
||||
'state': self.state,
|
||||
'last_triggered_at': self.last_triggered_at,
|
||||
'updated_at': self.updated_at,
|
||||
'created_at': self.created_at
|
||||
}
|
||||
|
||||
if full:
|
||||
d['query'] = self.query.to_dict()
|
||||
d['user'] = self.user.to_dict()
|
||||
else:
|
||||
d['query_id'] = self._data['query']
|
||||
d['user_id'] = self._data['user']
|
||||
|
||||
return d
|
||||
|
||||
def evaluate(self):
|
||||
data = json.loads(self.query.latest_query_data.data)
|
||||
# todo: safe guard for empty
|
||||
@@ -875,7 +882,7 @@ class Event(BaseModel):
|
||||
return event
|
||||
|
||||
|
||||
all_models = (DataSource, User, QueryResult, Query, Alert, Dashboard, Visualization, Widget, ActivityLog, Group, Event)
|
||||
all_models = (DataSource, User, QueryResult, Query, Alert, AlertSubscription, Dashboard, Visualization, Widget, ActivityLog, Group, Event)
|
||||
|
||||
|
||||
def init_db():
|
||||
|
||||
@@ -9,6 +9,7 @@ logger = logging.getLogger(__name__)
|
||||
__all__ = [
|
||||
'ValidationError',
|
||||
'BaseQueryRunner',
|
||||
'InterruptException',
|
||||
'TYPE_DATETIME',
|
||||
'TYPE_BOOLEAN',
|
||||
'TYPE_INTEGER',
|
||||
@@ -38,6 +39,9 @@ SUPPORTED_COLUMN_TYPES = set([
|
||||
TYPE_DATE
|
||||
])
|
||||
|
||||
class InterruptException(Exception):
|
||||
pass
|
||||
|
||||
class BaseQueryRunner(object):
|
||||
def __init__(self, configuration):
|
||||
jsonschema.validate(configuration, self.configuration_schema())
|
||||
|
||||
@@ -2,6 +2,7 @@ import json
|
||||
import logging
|
||||
import sys
|
||||
import urllib
|
||||
from requests.auth import HTTPBasicAuth
|
||||
|
||||
from redash.query_runner import *
|
||||
|
||||
@@ -23,9 +24,15 @@ ELASTICSEARCH_TYPES_MAPPING = {
|
||||
"boolean" : TYPE_BOOLEAN,
|
||||
"string" : TYPE_STRING,
|
||||
"date" : TYPE_DATE,
|
||||
"object" : TYPE_STRING,
|
||||
# "geo_point" TODO: Need to split to 2 fields somehow
|
||||
}
|
||||
|
||||
ELASTICSEARCH_BUILTIN_FIELDS_MAPPING = {
|
||||
"_id" : "Id",
|
||||
"_score" : "Score"
|
||||
}
|
||||
|
||||
PYTHON_TYPES_MAPPING = {
|
||||
str: TYPE_STRING,
|
||||
unicode: TYPE_STRING,
|
||||
@@ -35,56 +42,10 @@ PYTHON_TYPES_MAPPING = {
|
||||
float: TYPE_FLOAT
|
||||
}
|
||||
|
||||
#
|
||||
# ElasticSearch currently supports only simple Lucene style queries (like Kibana
|
||||
# but without the aggregation).
|
||||
#
|
||||
# Full blown JSON based ElasticSearch queries (including aggregations) will be
|
||||
# added later
|
||||
#
|
||||
# Simple query example:
|
||||
#
|
||||
# - Query the index named "twitter"
|
||||
# - Filter by "user:kimchy"
|
||||
# - Return the fields: "@timestamp", "tweet" and "user"
|
||||
# - Return up to 15 results
|
||||
# - Sort by @timestamp ascending
|
||||
#
|
||||
# {
|
||||
# "index" : "twitter",
|
||||
# "query" : "user:kimchy",
|
||||
# "fields" : ["@timestamp", "tweet", "user"],
|
||||
# "size" : 15,
|
||||
# "sort" : "@timestamp:asc"
|
||||
# }
|
||||
#
|
||||
#
|
||||
# Simple query on a logstash ElasticSearch instance:
|
||||
#
|
||||
# - Query the index named "logstash-2015.04.*" (in this case its all of April 2015)
|
||||
# - Filter by type:events AND eventName:UserUpgrade AND channel:selfserve
|
||||
# - Return fields: "@timestamp", "userId", "channel", "utm_source", "utm_medium", "utm_campaign", "utm_content"
|
||||
# - Return up to 250 results
|
||||
# - Sort by @timestamp ascending
|
||||
class BaseElasticSearch(BaseQueryRunner):
|
||||
|
||||
# {
|
||||
# "index" : "logstash-2015.04.*",
|
||||
# "query" : "type:events AND eventName:UserUpgrade AND channel:selfserve",
|
||||
# "fields" : ["@timestamp", "userId", "channel", "utm_source", "utm_medium", "utm_campaign", "utm_content"],
|
||||
# "size" : 250,
|
||||
# "sort" : "@timestamp:asc"
|
||||
# }
|
||||
#
|
||||
#
|
||||
DEBUG_ENABLED = True
|
||||
|
||||
class ElasticSearch(BaseQueryRunner):
|
||||
DEBUG_ENABLED = False
|
||||
|
||||
"""
|
||||
ElastichSearch query runner for querying ElasticSearch servers.
|
||||
Query can be done using the Lucene Syntax (single line) or the more complex,
|
||||
full blown ElasticSearch JSON syntax
|
||||
"""
|
||||
@classmethod
|
||||
def configuration_schema(cls):
|
||||
return {
|
||||
@@ -93,6 +54,14 @@ class ElasticSearch(BaseQueryRunner):
|
||||
'server': {
|
||||
'type': 'string',
|
||||
'title': 'Base URL'
|
||||
},
|
||||
'basic_auth_user': {
|
||||
'type': 'string',
|
||||
'title': 'Basic Auth User'
|
||||
},
|
||||
'basic_auth_password': {
|
||||
'type': 'string',
|
||||
'title': 'Basic Auth Password'
|
||||
}
|
||||
},
|
||||
"required" : ["server"]
|
||||
@@ -100,20 +69,16 @@ class ElasticSearch(BaseQueryRunner):
|
||||
|
||||
@classmethod
|
||||
def enabled(cls):
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def annotate_query(cls):
|
||||
return False
|
||||
|
||||
def __init__(self, configuration_json):
|
||||
super(ElasticSearch, self).__init__(configuration_json)
|
||||
super(BaseElasticSearch, self).__init__(configuration_json)
|
||||
|
||||
self.syntax = "json"
|
||||
|
||||
if self.DEBUG_ENABLED:
|
||||
http_client.HTTPConnection.debuglevel = 1
|
||||
|
||||
|
||||
# you need to initialize logging, otherwise you will not see anything from requests
|
||||
logging.basicConfig()
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
@@ -121,11 +86,26 @@ class ElasticSearch(BaseQueryRunner):
|
||||
requests_log.setLevel(logging.DEBUG)
|
||||
requests_log.propagate = True
|
||||
|
||||
def get_mappings(self, url):
|
||||
logger.setLevel(logging.DEBUG)
|
||||
|
||||
self.server_url = self.configuration["server"]
|
||||
if self.server_url[-1] == "/":
|
||||
self.server_url = self.server_url[:-1]
|
||||
|
||||
basic_auth_user = self.configuration["basic_auth_user"]
|
||||
basic_auth_password = self.configuration["basic_auth_password"]
|
||||
self.auth = None
|
||||
if basic_auth_user and basic_auth_password:
|
||||
self.auth = HTTPBasicAuth(basic_auth_user, basic_auth_password)
|
||||
|
||||
def _get_mappings(self, url):
|
||||
mappings = {}
|
||||
|
||||
r = requests.get(url)
|
||||
r = requests.get(url, auth=self.auth)
|
||||
mappings_data = r.json()
|
||||
|
||||
logger.debug(mappings_data)
|
||||
|
||||
for index_name in mappings_data:
|
||||
index_mappings = mappings_data[index_name]
|
||||
for m in index_mappings.get("mappings", {}):
|
||||
@@ -137,14 +117,21 @@ class ElasticSearch(BaseQueryRunner):
|
||||
if property_type in ELASTICSEARCH_TYPES_MAPPING:
|
||||
mappings[property_name] = property_type
|
||||
else:
|
||||
raise "Unknown property type: {0}".format(property_type)
|
||||
raise Exception("Unknown property type: {0}".format(property_type))
|
||||
|
||||
return mappings
|
||||
|
||||
def parse_results(self, mappings, result_fields, raw_result, result_columns, result_rows):
|
||||
result_columns_index = {}
|
||||
for c in result_columns:
|
||||
result_columns_index[c["name"]] = c
|
||||
def _parse_results(self, mappings, result_fields, raw_result, result_columns, result_rows):
|
||||
|
||||
def add_column_if_needed(mappings, column_name, friendly_name, result_columns, result_columns_index):
|
||||
if friendly_name not in result_columns_index:
|
||||
result_columns.append({
|
||||
"name" : friendly_name,
|
||||
"friendly_name" : friendly_name,
|
||||
"type" : mappings.get(column_name, "string")})
|
||||
result_columns_index[friendly_name] = result_columns[-1]
|
||||
|
||||
result_columns_index = {c["name"] : c for c in result_columns}
|
||||
|
||||
result_fields_index = {}
|
||||
if result_fields:
|
||||
@@ -153,32 +140,49 @@ class ElasticSearch(BaseQueryRunner):
|
||||
|
||||
for h in raw_result["hits"]["hits"]:
|
||||
row = {}
|
||||
for column in h["_source"]:
|
||||
|
||||
for field, column in ELASTICSEARCH_BUILTIN_FIELDS_MAPPING.iteritems():
|
||||
if field in h:
|
||||
add_column_if_needed(mappings, field, column, result_columns, result_columns_index)
|
||||
row[column] = h[field]
|
||||
|
||||
column_name = "_source" if "_source" in h else "fields"
|
||||
for column in h[column_name]:
|
||||
if result_fields and column not in result_fields_index:
|
||||
continue
|
||||
|
||||
if column not in result_columns_index:
|
||||
result_columns.append({
|
||||
"name" : column,
|
||||
"friendly_name" : column,
|
||||
"type" : mappings.get(column, "string")
|
||||
})
|
||||
result_columns_index[column] = result_columns[-1]
|
||||
add_column_if_needed(mappings, column, column, result_columns, result_columns_index)
|
||||
|
||||
value = h[column_name][column]
|
||||
row[column] = value[0] if isinstance(value, list) and len(value) == 1 else value
|
||||
|
||||
row[column] = h["_source"][column]
|
||||
|
||||
if row and len(row) > 0:
|
||||
result_rows.append(row)
|
||||
|
||||
def execute_simple_query(self, url, _from, mappings, result_fields, result_columns, result_rows):
|
||||
|
||||
class Kibana(BaseElasticSearch):
|
||||
|
||||
def __init__(self, configuration_json):
|
||||
super(Kibana, self).__init__(configuration_json)
|
||||
|
||||
@classmethod
|
||||
def enabled(cls):
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def annotate_query(cls):
|
||||
return False
|
||||
|
||||
def _execute_simple_query(self, url, auth, _from, mappings, result_fields, result_columns, result_rows):
|
||||
url += "&from={0}".format(_from)
|
||||
r = requests.get(url)
|
||||
r = requests.get(url, auth=self.auth)
|
||||
if r.status_code != 200:
|
||||
raise Exception("Failed to execute query. Return Code: {0} Reason: {1}".format(r.status_code, r.text))
|
||||
|
||||
raw_result = r.json()
|
||||
|
||||
self.parse_results(mappings, result_fields, raw_result, result_columns, result_rows)
|
||||
self._parse_results(mappings, result_fields, raw_result, result_columns, result_rows)
|
||||
|
||||
total = raw_result["hits"]["total"]
|
||||
result_size = len(raw_result["hits"]["hits"])
|
||||
@@ -199,19 +203,14 @@ class ElasticSearch(BaseQueryRunner):
|
||||
result_fields = query_params.get("fields", None)
|
||||
sort = query_params.get("sort", None)
|
||||
|
||||
server_url = self.configuration["server"]
|
||||
if not server_url:
|
||||
if not self.server_url:
|
||||
error = "Missing configuration key 'server'"
|
||||
return None, error
|
||||
|
||||
url = "{0}/{1}/_search?".format(self.server_url, index_name)
|
||||
mapping_url = "{0}/{1}/_mapping".format(self.server_url, index_name)
|
||||
|
||||
if server_url[-1] == "/":
|
||||
server_url = server_url[:-1]
|
||||
|
||||
url = "{0}/{1}/_search?".format(server_url, index_name)
|
||||
mapping_url = "{0}/{1}/_mapping".format(server_url, index_name)
|
||||
|
||||
mappings = self.get_mappings(mapping_url)
|
||||
mappings = self._get_mappings(mapping_url)
|
||||
|
||||
logger.debug(json.dumps(mappings, indent=4))
|
||||
|
||||
@@ -231,7 +230,7 @@ class ElasticSearch(BaseQueryRunner):
|
||||
if isinstance(query_data, str) or isinstance(query_data, unicode):
|
||||
_from = 0
|
||||
while True:
|
||||
total = self.execute_simple_query(url, _from, mappings, result_fields, result_columns, result_rows)
|
||||
total = self._execute_simple_query(url, _from, mappings, result_fields, result_columns, result_rows)
|
||||
_from += size
|
||||
if _from >= total:
|
||||
break
|
||||
@@ -252,4 +251,61 @@ class ElasticSearch(BaseQueryRunner):
|
||||
return json_data, error
|
||||
|
||||
|
||||
class ElasticSearch(BaseElasticSearch):
|
||||
|
||||
def __init__(self, configuration_json):
|
||||
super(ElasticSearch, self).__init__(configuration_json)
|
||||
|
||||
@classmethod
|
||||
def enabled(cls):
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def annotate_query(cls):
|
||||
return False
|
||||
|
||||
def run_query(self, query):
|
||||
try:
|
||||
error = None
|
||||
|
||||
logger.debug(query)
|
||||
query_dict = json.loads(query)
|
||||
|
||||
index_name = query_dict.pop("index", "")
|
||||
|
||||
if not self.server_url:
|
||||
error = "Missing configuration key 'server'"
|
||||
return None, error
|
||||
|
||||
url = "{0}/{1}/_search".format(self.server_url, index_name)
|
||||
mapping_url = "{0}/{1}/_mapping".format(self.server_url, index_name)
|
||||
|
||||
mappings = self._get_mappings(mapping_url)
|
||||
|
||||
logger.debug(json.dumps(mappings, indent=4))
|
||||
|
||||
params = {"source": json.dumps(query_dict)}
|
||||
logger.debug("Using URL: %s", url)
|
||||
logger.debug("Using params : %s", params)
|
||||
r = requests.get(url, params=params, auth=self.auth)
|
||||
logger.debug("Result: %s", r.json())
|
||||
|
||||
result_columns = []
|
||||
result_rows = []
|
||||
self._parse_results(mappings, None, r.json(), result_columns, result_rows)
|
||||
|
||||
json_data = json.dumps({
|
||||
"columns" : result_columns,
|
||||
"rows" : result_rows
|
||||
})
|
||||
except KeyboardInterrupt:
|
||||
error = "Query cancelled by user."
|
||||
json_data = None
|
||||
except Exception as e:
|
||||
raise sys.exc_info()[1], None, sys.exc_info()[2]
|
||||
|
||||
return json_data, error
|
||||
|
||||
|
||||
register(Kibana)
|
||||
register(ElasticSearch)
|
||||
|
||||
@@ -60,7 +60,7 @@ class Graphite(BaseQueryRunner):
|
||||
else:
|
||||
self.auth = None
|
||||
|
||||
self.verify = self.configuration["verify"]
|
||||
self.verify = self.configuration.get("verify", True)
|
||||
self.base_url = "%s/render?format=json&" % self.configuration['url']
|
||||
|
||||
def run_query(self, query):
|
||||
@@ -81,4 +81,4 @@ class Graphite(BaseQueryRunner):
|
||||
|
||||
return data, error
|
||||
|
||||
register(Graphite)
|
||||
register(Graphite)
|
||||
|
||||
57
redash/query_runner/mql.py
Normal file
57
redash/query_runner/mql.py
Normal file
@@ -0,0 +1,57 @@
|
||||
import json
|
||||
|
||||
from . import BaseQueryRunner, register
|
||||
from .mongodb import TYPES_MAP, TYPE_STRING
|
||||
|
||||
try:
|
||||
import pymongo
|
||||
from ognom import query_to_plan
|
||||
from website.server.utils import simplify
|
||||
enabled = True
|
||||
except ImportError:
|
||||
enabled = False
|
||||
|
||||
def deduce_columns(rows):
|
||||
column_to_type = {}
|
||||
for row in rows:
|
||||
for column, value in row.iteritems():
|
||||
column_to_type[column] = TYPES_MAP.get(value.__class__, TYPE_STRING)
|
||||
return [{'name': column, 'friendly_name': column, 'type': type}
|
||||
for column, type in column_to_type.iteritems()]
|
||||
|
||||
class MQL(BaseQueryRunner):
|
||||
|
||||
def __init__(self, configuration_json):
|
||||
super(MQL, self).__init__(configuration_json)
|
||||
self.syntax = 'sql'
|
||||
|
||||
@classmethod
|
||||
def enabled(cls):
|
||||
return enabled
|
||||
|
||||
@classmethod
|
||||
def annotate_query(cls):
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def configuration_schema(cls):
|
||||
return {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'uri': {
|
||||
'type': 'string',
|
||||
'title': 'Connection String'
|
||||
}
|
||||
},
|
||||
'required': ['uri']
|
||||
}
|
||||
|
||||
def run_query(self, query):
|
||||
conn = pymongo.MongoClient(self.configuration['uri'])
|
||||
# execute() returns a generator (that wraps a cursor)
|
||||
gen = query_to_plan(query).execute(conn)
|
||||
# simplify converts special MongoDB data types (ObjectId, Date, etc') to strings
|
||||
result = simplify(list(gen))
|
||||
return json.dumps({'columns': deduce_columns(result), 'rows': result}), None
|
||||
|
||||
register(MQL)
|
||||
@@ -33,7 +33,8 @@ class Mysql(BaseQueryRunner):
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'host': {
|
||||
'type': 'string'
|
||||
'type': 'string',
|
||||
'default': '127.0.0.1'
|
||||
},
|
||||
'user': {
|
||||
'type': 'string'
|
||||
@@ -46,9 +47,26 @@ class Mysql(BaseQueryRunner):
|
||||
'type': 'string',
|
||||
'title': 'Database name'
|
||||
},
|
||||
"port": {
|
||||
"type": "number"
|
||||
'port': {
|
||||
'type': 'number',
|
||||
'default': 3306,
|
||||
},
|
||||
'use_ssl': {
|
||||
'type': 'boolean',
|
||||
'title': 'Use SSL'
|
||||
},
|
||||
'ssl_cacert': {
|
||||
'type': 'string',
|
||||
'title': 'Path to CA certificate file to verify peer against (SSL)'
|
||||
},
|
||||
'ssl_cert': {
|
||||
'type': 'string',
|
||||
'title': 'Path to client certificate file (SSL)'
|
||||
},
|
||||
'ssl_key': {
|
||||
'type': 'string',
|
||||
'title': 'Path to private key file (SSL)'
|
||||
}
|
||||
},
|
||||
'required': ['db'],
|
||||
'secret': ['passwd']
|
||||
@@ -111,7 +129,8 @@ class Mysql(BaseQueryRunner):
|
||||
passwd=self.configuration.get('passwd', ''),
|
||||
db=self.configuration['db'],
|
||||
port=self.configuration.get('port', 3306),
|
||||
charset='utf8', use_unicode=True)
|
||||
charset='utf8', use_unicode=True,
|
||||
ssl=self._get_ssl_parameters())
|
||||
cursor = connection.cursor()
|
||||
logger.debug("MySQL running query: %s", query)
|
||||
cursor.execute(query)
|
||||
@@ -145,4 +164,19 @@ class Mysql(BaseQueryRunner):
|
||||
|
||||
return json_data, error
|
||||
|
||||
def _get_ssl_parameters(self):
|
||||
ssl_params = {}
|
||||
|
||||
if self.configuration.get('use_ssl'):
|
||||
config_map = dict(ssl_cacert='ca',
|
||||
ssl_cert='cert',
|
||||
ssl_key='key')
|
||||
for key, cfg in config_map.items():
|
||||
val = self.configuration.get(key)
|
||||
if val:
|
||||
ssl_params[cfg] = val
|
||||
|
||||
return ssl_params
|
||||
|
||||
|
||||
register(Mysql)
|
||||
|
||||
175
redash/query_runner/oracle.py
Normal file
175
redash/query_runner/oracle.py
Normal file
@@ -0,0 +1,175 @@
|
||||
import json
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from redash.query_runner import *
|
||||
from redash.utils import JSONEncoder
|
||||
|
||||
try:
|
||||
import cx_Oracle
|
||||
|
||||
TYPES_MAP = {
|
||||
cx_Oracle.DATETIME: TYPE_DATETIME,
|
||||
cx_Oracle.CLOB: TYPE_STRING,
|
||||
cx_Oracle.LOB: TYPE_STRING,
|
||||
cx_Oracle.FIXED_CHAR: TYPE_STRING,
|
||||
cx_Oracle.FIXED_NCHAR: TYPE_STRING,
|
||||
cx_Oracle.FIXED_UNICODE: TYPE_STRING,
|
||||
cx_Oracle.INTERVAL: TYPE_DATETIME,
|
||||
cx_Oracle.LONG_NCHAR: TYPE_STRING,
|
||||
cx_Oracle.LONG_STRING: TYPE_STRING,
|
||||
cx_Oracle.LONG_UNICODE: TYPE_STRING,
|
||||
cx_Oracle.NATIVE_FLOAT: TYPE_FLOAT,
|
||||
cx_Oracle.NCHAR: TYPE_STRING,
|
||||
cx_Oracle.NUMBER: TYPE_FLOAT,
|
||||
cx_Oracle.ROWID: TYPE_INTEGER,
|
||||
cx_Oracle.STRING: TYPE_STRING,
|
||||
cx_Oracle.TIMESTAMP: TYPE_DATETIME,
|
||||
cx_Oracle.UNICODE: TYPE_STRING,
|
||||
}
|
||||
|
||||
|
||||
ENABLED = True
|
||||
except ImportError:
|
||||
ENABLED = False
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class Oracle(BaseQueryRunner):
|
||||
|
||||
@classmethod
|
||||
def get_col_type(cls, col_type, scale):
|
||||
if col_type == cx_Oracle.NUMBER:
|
||||
return TYPE_FLOAT if scale > 0 else TYPE_INTEGER
|
||||
else:
|
||||
return TYPES_MAP.get(col_type, None)
|
||||
|
||||
@classmethod
|
||||
def enabled(cls):
|
||||
return ENABLED
|
||||
|
||||
@classmethod
|
||||
def configuration_schema(cls):
|
||||
return {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"user": {
|
||||
"type": "string"
|
||||
},
|
||||
"password": {
|
||||
"type": "string"
|
||||
},
|
||||
"host": {
|
||||
"type": "string"
|
||||
},
|
||||
"port": {
|
||||
"type": "number"
|
||||
},
|
||||
"servicename": {
|
||||
"type": "string",
|
||||
"title": "DSN Service Name"
|
||||
}
|
||||
},
|
||||
"required": ["servicename"],
|
||||
"secret": ["password"]
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def type(cls):
|
||||
return "oracle"
|
||||
|
||||
def __init__(self, configuration_json):
|
||||
super(Oracle, self).__init__(configuration_json)
|
||||
|
||||
dsn = cx_Oracle.makedsn(
|
||||
self.configuration["host"],
|
||||
self.configuration["port"],
|
||||
service_name=self.configuration["servicename"])
|
||||
|
||||
self.connection_string = "{}/{}@{}".format(self.configuration["user"], self.configuration["password"], dsn)
|
||||
|
||||
def get_schema(self):
|
||||
query = """
|
||||
SELECT
|
||||
user_tables.TABLESPACE_NAME,
|
||||
all_tab_cols.TABLE_NAME,
|
||||
all_tab_cols.COLUMN_NAME
|
||||
FROM all_tab_cols
|
||||
JOIN user_tables ON (all_tab_cols.TABLE_NAME = user_tables.TABLE_NAME)
|
||||
"""
|
||||
|
||||
results, error = self.run_query(query)
|
||||
|
||||
if error is not None:
|
||||
raise Exception("Failed getting schema.")
|
||||
|
||||
results = json.loads(results)
|
||||
|
||||
schema = {}
|
||||
for row in results['rows']:
|
||||
if row['TABLESPACE_NAME'] != None:
|
||||
table_name = '{}.{}'.format(row['TABLESPACE_NAME'], row['TABLE_NAME'])
|
||||
else:
|
||||
table_name = row['TABLE_NAME']
|
||||
|
||||
if table_name not in schema:
|
||||
schema[table_name] = {'name': table_name, 'columns': []}
|
||||
|
||||
schema[table_name]['columns'].append(row['COLUMN_NAME'])
|
||||
|
||||
return schema.values()
|
||||
|
||||
@classmethod
|
||||
def _convert_number(cls, value):
|
||||
try:
|
||||
return int(value)
|
||||
except:
|
||||
return value
|
||||
|
||||
@classmethod
|
||||
def output_handler(cls, cursor, name, default_type, length, precision, scale):
|
||||
if default_type in (cx_Oracle.CLOB, cx_Oracle.LOB):
|
||||
return cursor.var(cx_Oracle.LONG_STRING, 80000, cursor.arraysize)
|
||||
|
||||
if default_type in (cx_Oracle.STRING, cx_Oracle.FIXED_CHAR):
|
||||
return cursor.var(unicode, length, cursor.arraysize)
|
||||
|
||||
if default_type == cx_Oracle.NUMBER:
|
||||
if scale <= 0:
|
||||
return cursor.var(cx_Oracle.STRING, 255, outconverter=Oracle._convert_number, arraysize=cursor.arraysize)
|
||||
|
||||
def run_query(self, query):
|
||||
connection = cx_Oracle.connect(self.connection_string)
|
||||
connection.outputtypehandler = Oracle.output_handler
|
||||
|
||||
cursor = connection.cursor()
|
||||
|
||||
try:
|
||||
cursor.execute(query)
|
||||
|
||||
if cursor.description is not None:
|
||||
columns = self.fetch_columns([(i[0], Oracle.get_col_type(i[1], i[5])) for i in cursor.description])
|
||||
rows = [dict(zip((c['name'] for c in columns), row)) for row in cursor]
|
||||
|
||||
data = {'columns': columns, 'rows': rows}
|
||||
error = None
|
||||
json_data = json.dumps(data, cls=JSONEncoder)
|
||||
else:
|
||||
error = 'Query completed but it returned no data.'
|
||||
json_data = None
|
||||
except cx_Oracle.DatabaseError as err:
|
||||
logging.exception(err.message)
|
||||
error = "Query failed. {}.".format(err.message)
|
||||
json_data = None
|
||||
except KeyboardInterrupt:
|
||||
connection.cancel()
|
||||
error = "Query cancelled by user."
|
||||
json_data = None
|
||||
except Exception as err:
|
||||
raise sys.exc_info()[1], None, sys.exc_info()[2]
|
||||
finally:
|
||||
connection.close()
|
||||
|
||||
return json_data, error
|
||||
|
||||
register(Oracle)
|
||||
@@ -57,10 +57,12 @@ class PostgreSQL(BaseQueryRunner):
|
||||
"type": "string"
|
||||
},
|
||||
"host": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"default": "127.0.0.1"
|
||||
},
|
||||
"port": {
|
||||
"type": "number"
|
||||
"type": "number",
|
||||
"default": 5432
|
||||
},
|
||||
"dbname": {
|
||||
"type": "string",
|
||||
@@ -140,7 +142,7 @@ class PostgreSQL(BaseQueryRunner):
|
||||
logging.exception(e)
|
||||
error = e.message
|
||||
json_data = None
|
||||
except KeyboardInterrupt:
|
||||
except (KeyboardInterrupt, InterruptException):
|
||||
connection.cancel()
|
||||
error = "Query cancelled by user."
|
||||
json_data = None
|
||||
|
||||
@@ -97,7 +97,14 @@ class Python(BaseQueryRunner):
|
||||
return iter(obj)
|
||||
|
||||
def add_result_column(self, result, column_name, friendly_name, column_type):
|
||||
""" Helper function to add columns inside a Python script running in re:dash in an easier way """
|
||||
"""Helper function to add columns inside a Python script running in re:dash in an easier way
|
||||
|
||||
Parameters:
|
||||
:result dict: The result dict
|
||||
:column_name string: Name of the column, which should be consisted of lowercase latin letters or underscore.
|
||||
:friendly_name string: Name of the column for display
|
||||
:column_type string: Type of the column. Check supported data types for details.
|
||||
"""
|
||||
if column_type not in SUPPORTED_COLUMN_TYPES:
|
||||
raise Exception("'{0}' is not a supported column type".format(column_type))
|
||||
|
||||
@@ -111,12 +118,24 @@ class Python(BaseQueryRunner):
|
||||
})
|
||||
|
||||
def add_result_row(self, result, values):
|
||||
"""Helper function to add one row to results set
|
||||
|
||||
Parameters:
|
||||
:result dict: The result dict
|
||||
:values dict: One row of result in dict. The key should be one of the column names. The value is the value of the column in this row.
|
||||
"""
|
||||
if not "rows" in result:
|
||||
result["rows"] = []
|
||||
|
||||
result["rows"].append(values)
|
||||
|
||||
def execute_query(self, data_source_name_or_id, query):
|
||||
"""Run query from specific data source.
|
||||
|
||||
Parameters:
|
||||
:data_source_name_or_id string|integer: Name or ID of the data source
|
||||
:query string: Query to run
|
||||
"""
|
||||
try:
|
||||
if type(data_source_name_or_id) == int:
|
||||
data_source = models.DataSource.get_by_id(data_source_name_or_id)
|
||||
@@ -135,6 +154,11 @@ class Python(BaseQueryRunner):
|
||||
return json.loads(data)
|
||||
|
||||
def get_query_result(self, query_id):
|
||||
"""Get result of an existing query.
|
||||
|
||||
Parameters:
|
||||
:query_id integer: ID of existing query
|
||||
"""
|
||||
try:
|
||||
query = models.Query.get_by_id(query_id)
|
||||
except models.Query.DoesNotExist:
|
||||
@@ -171,7 +195,8 @@ class Python(BaseQueryRunner):
|
||||
restricted_globals["add_result_row"] = self.add_result_row
|
||||
restricted_globals["disable_print_log"] = self._custom_print.disable
|
||||
restricted_globals["enable_print_log"] = self._custom_print.enable
|
||||
|
||||
|
||||
# Supported data types
|
||||
restricted_globals["TYPE_DATETIME"] = TYPE_DATETIME
|
||||
restricted_globals["TYPE_BOOLEAN"] = TYPE_BOOLEAN
|
||||
restricted_globals["TYPE_INTEGER"] = TYPE_INTEGER
|
||||
|
||||
96
redash/query_runner/sqlite.py
Normal file
96
redash/query_runner/sqlite.py
Normal file
@@ -0,0 +1,96 @@
|
||||
import json
|
||||
import logging
|
||||
import sqlite3
|
||||
import sys
|
||||
|
||||
from redash.query_runner import BaseQueryRunner
|
||||
from redash.query_runner import TYPE_STRING
|
||||
from redash.query_runner import register
|
||||
|
||||
from redash.utils import JSONEncoder
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class Sqlite(BaseQueryRunner):
|
||||
@classmethod
|
||||
def configuration_schema(cls):
|
||||
return {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"dbpath": {
|
||||
"type": "string",
|
||||
"title": "Database Path"
|
||||
}
|
||||
},
|
||||
"required": ["dbpath"],
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def type(cls):
|
||||
return "sqlite"
|
||||
|
||||
def __init__(self, configuration_json):
|
||||
super(Sqlite, self).__init__(configuration_json)
|
||||
|
||||
self._dbpath = self.configuration['dbpath']
|
||||
|
||||
def get_schema(self):
|
||||
query_table = "select tbl_name from sqlite_master where type='table'"
|
||||
query_columns = "PRAGMA table_info(%s)"
|
||||
|
||||
results, error = self.run_query(query_table)
|
||||
|
||||
if error is not None:
|
||||
raise Exception("Failed getting schema.")
|
||||
|
||||
results = json.loads(results)
|
||||
|
||||
schema = {}
|
||||
for row in results['rows']:
|
||||
table_name = row['tbl_name']
|
||||
schema[table_name] = {'name': table_name, 'columns': []}
|
||||
results_table, error = self.run_query(query_columns % (table_name,))
|
||||
if error is not None:
|
||||
raise Exception("Failed getting schema.")
|
||||
|
||||
results_table = json.loads(results_table)
|
||||
for row_column in results_table['rows']:
|
||||
schema[table_name]['columns'].append(row_column['name'])
|
||||
|
||||
return schema.values()
|
||||
|
||||
def run_query(self, query):
|
||||
connection = sqlite3.connect(self._dbpath)
|
||||
|
||||
cursor = connection.cursor()
|
||||
|
||||
try:
|
||||
cursor.execute(query)
|
||||
|
||||
if cursor.description is not None:
|
||||
columns = self.fetch_columns([(i[0], None) for i in cursor.description])
|
||||
rows = [dict(zip((c['name'] for c in columns), row)) for row in cursor]
|
||||
|
||||
data = {'columns': columns, 'rows': rows}
|
||||
error = None
|
||||
json_data = json.dumps(data, cls=JSONEncoder)
|
||||
else:
|
||||
error = 'Query completed but it returned no data.'
|
||||
json_data = None
|
||||
except KeyboardInterrupt:
|
||||
connection.cancel()
|
||||
error = "Query cancelled by user."
|
||||
json_data = None
|
||||
except Exception as e:
|
||||
raise sys.exc_info()[1], None, sys.exc_info()[2]
|
||||
finally:
|
||||
connection.close()
|
||||
return json_data, error
|
||||
|
||||
register(Sqlite)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -69,6 +69,8 @@ CELERY_BACKEND = os.environ.get("REDASH_CELERY_BACKEND", CELERY_BROKER)
|
||||
|
||||
# The following enables periodic job (every 5 minutes) of removing unused query results.
|
||||
QUERY_RESULTS_CLEANUP_ENABLED = parse_boolean(os.environ.get("REDASH_QUERY_RESULTS_CLEANUP_ENABLED", "true"))
|
||||
QUERY_RESULTS_CLEANUP_COUNT = int(os.environ.get("REDASH_QUERY_RESULTS_CLEANUP_COUNT", "100"))
|
||||
QUERY_RESULTS_CLEANUP_MAX_AGE = int(os.environ.get("REDASH_QUERY_RESULTS_CLEANUP_MAX_AGE", "7"))
|
||||
|
||||
AUTH_TYPE = os.environ.get("REDASH_AUTH_TYPE", "api_key")
|
||||
PASSWORD_LOGIN_ENABLED = parse_boolean(os.environ.get("REDASH_PASSWORD_LOGIN_ENABLED", "true"))
|
||||
@@ -104,6 +106,13 @@ MAIL_ASCII_ATTACHMENTS = parse_boolean(os.environ.get('REDASH_MAIL_ASCII_ATTACHM
|
||||
|
||||
HOST = os.environ.get('REDASH_HOST', '')
|
||||
|
||||
HIPCHAT_API_TOKEN = os.environ.get('REDASH_HIPCHAT_API_TOKEN', None)
|
||||
HIPCHAT_ROOM_ID = os.environ.get('REDASH_HIPCHAT_ROOM_ID', None)
|
||||
|
||||
WEBHOOK_ENDPOINT = os.environ.get('REDASH_WEBHOOK_ENDPOINT', None)
|
||||
WEBHOOK_USERNAME = os.environ.get('REDASH_WEBHOOK_USERNAME', None)
|
||||
WEBHOOK_PASSWORD = os.environ.get('REDASH_WEBHOOK_PASSWORD', None)
|
||||
|
||||
# CORS settings for the Query Result API (and possbily future external APIs).
|
||||
# In most cases all you need to do is set REDASH_CORS_ACCESS_CONTROL_ALLOW_ORIGIN
|
||||
# to the calling domain (or domains in a comma separated list).
|
||||
@@ -118,6 +127,7 @@ default_query_runners = [
|
||||
'redash.query_runner.google_spreadsheets',
|
||||
'redash.query_runner.graphite',
|
||||
'redash.query_runner.mongodb',
|
||||
'redash.query_runner.mql',
|
||||
'redash.query_runner.mysql',
|
||||
'redash.query_runner.pg',
|
||||
'redash.query_runner.url',
|
||||
@@ -127,7 +137,9 @@ default_query_runners = [
|
||||
'redash.query_runner.hive_ds',
|
||||
'redash.query_runner.impala_ds',
|
||||
'redash.query_runner.vertica',
|
||||
'redash.query_runner.treasuredata'
|
||||
'redash.query_runner.treasuredata',
|
||||
'redash.query_runner.oracle',
|
||||
'redash.query_runner.sqlite',
|
||||
]
|
||||
|
||||
enabled_query_runners = array_from_string(os.environ.get("REDASH_ENABLED_QUERY_RUNNERS", ",".join(default_query_runners)))
|
||||
@@ -143,10 +155,12 @@ ALLOW_SCRIPTS_IN_USER_INPUT = parse_boolean(os.environ.get("REDASH_ALLOW_SCRIPTS
|
||||
CLIENT_SIDE_METRICS = parse_boolean(os.environ.get("REDASH_CLIENT_SIDE_METRICS", "false"))
|
||||
# http://api.highcharts.com/highcharts#plotOptions.series.turboThreshold
|
||||
HIGHCHARTS_TURBO_THRESHOLD = int(os.environ.get("REDASH_HIGHCHARTS_TURBO_THRESHOLD", "1000"))
|
||||
DATE_FORMAT = os.environ.get("REDASH_DATE_FORMAT", "DD/MM/YY")
|
||||
|
||||
# Features:
|
||||
FEATURE_ALLOW_ALL_TO_EDIT_QUERIES = parse_boolean(os.environ.get("REDASH_FEATURE_ALLOW_ALL_TO_EDIT", "true"))
|
||||
FEATURE_TABLES_PERMISSIONS = parse_boolean(os.environ.get("REDASH_FEATURE_TABLES_PERMISSIONS", "false"))
|
||||
VERSION_CHECK = parse_boolean(os.environ.get("REDASH_VERSION_CEHCK", "true"))
|
||||
|
||||
# BigQuery
|
||||
BIGQUERY_HTTP_TIMEOUT = int(os.environ.get("REDASH_BIGQUERY_HTTP_TIMEOUT", "600"))
|
||||
|
||||
@@ -1,14 +1,22 @@
|
||||
import time
|
||||
import logging
|
||||
import signal
|
||||
import traceback
|
||||
from flask.ext.mail import Message
|
||||
import redis
|
||||
import hipchat
|
||||
import requests
|
||||
import json
|
||||
from redash.utils import json_dumps
|
||||
from requests.auth import HTTPBasicAuth
|
||||
from celery import Task
|
||||
from celery.result import AsyncResult
|
||||
from celery.utils.log import get_task_logger
|
||||
from redash import redis_connection, models, statsd_client, settings, utils, mail
|
||||
from redash.utils import gen_query_hash
|
||||
from redash.worker import celery
|
||||
from redash.query_runner import get_query_runner
|
||||
from redash.query_runner import get_query_runner, InterruptException
|
||||
from version_check import run_version_check
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
@@ -132,7 +140,7 @@ class QueryTask(object):
|
||||
return self._async_result.ready()
|
||||
|
||||
def cancel(self):
|
||||
return self._async_result.revoke(terminate=True)
|
||||
return self._async_result.revoke(terminate=True, signal='SIGINT')
|
||||
|
||||
@staticmethod
|
||||
def _job_lock_id(query_hash, data_source_id):
|
||||
@@ -213,7 +221,10 @@ def cleanup_query_results():
|
||||
Each time the job deletes only 100 query results so it won't choke the database in case of many such results.
|
||||
"""
|
||||
|
||||
unused_query_results = models.QueryResult.unused().limit(100)
|
||||
logging.info("Running query results clean up (removing maximum of %d unused results, that are %d days old or more)",
|
||||
settings.QUERY_RESULTS_CLEANUP_COUNT, settings.QUERY_RESULTS_CLEANUP_MAX_AGE)
|
||||
|
||||
unused_query_results = models.QueryResult.unused(settings.QUERY_RESULTS_CLEANUP_MAX_AGE).limit(settings.QUERY_RESULTS_CLEANUP_COUNT)
|
||||
total_unused_query_results = models.QueryResult.unused().count()
|
||||
deleted_count = models.QueryResult.delete().where(models.QueryResult.id << unused_query_results).execute()
|
||||
|
||||
@@ -250,22 +261,24 @@ def check_alerts_for_query(self, query_id):
|
||||
continue
|
||||
|
||||
# message = Message
|
||||
recipients = [s.email for s in alert.subscribers()]
|
||||
logger.debug("Notifying: %s", recipients)
|
||||
html = """
|
||||
Check <a href="{host}/alerts/{alert_id}">alert</a> / check <a href="{host}/queries/{query_id}">query</a>.
|
||||
""".format(host=settings.HOST, alert_id=alert.id, query_id=query.id)
|
||||
|
||||
with app.app_context():
|
||||
message = Message(recipients=recipients,
|
||||
subject="[{1}] {0}".format(alert.name, new_state.upper()),
|
||||
html=html)
|
||||
notify_mail(alert, html, new_state, app)
|
||||
|
||||
mail.send(message)
|
||||
if settings.HIPCHAT_API_TOKEN:
|
||||
notify_hipchat(alert, html, new_state)
|
||||
|
||||
if settings.WEBHOOK_ENDPOINT:
|
||||
notify_webhook(alert, query, html, new_state)
|
||||
|
||||
def signal_handler(*args):
|
||||
raise InterruptException
|
||||
|
||||
@celery.task(bind=True, base=BaseTask, track_started=True)
|
||||
def execute_query(self, query, data_source_id, metadata):
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
start_time = time.time()
|
||||
|
||||
logger.info("Loading data source (%d)...", data_source_id)
|
||||
@@ -317,3 +330,42 @@ def execute_query(self, query, data_source_id, metadata):
|
||||
@celery.task(base=BaseTask)
|
||||
def record_event(event):
|
||||
models.Event.record(event)
|
||||
|
||||
@celery.task(base=BaseTask)
|
||||
def version_check():
|
||||
run_version_check()
|
||||
|
||||
def notify_hipchat(alert, html, new_state):
|
||||
try:
|
||||
hipchat_client = hipchat.HipChat(token=settings.HIPCHAT_API_TOKEN)
|
||||
message = '[' + new_state.upper() + '] ' + alert.name + '<br />' + html
|
||||
hipchat_client.message_room(settings.HIPCHAT_ROOM_ID, settings.NAME, message, message_format='html')
|
||||
except:
|
||||
logger.exception("hipchat send ERROR.")
|
||||
|
||||
def notify_mail(alert, html, new_state, app):
|
||||
recipients = [s.email for s in alert.subscribers()]
|
||||
logger.debug("Notifying: %s", recipients)
|
||||
try:
|
||||
with app.app_context():
|
||||
message = Message(recipients=recipients,
|
||||
subject="[{1}] {0}".format(alert.name, new_state.upper()),
|
||||
html=html)
|
||||
mail.send(message)
|
||||
except:
|
||||
logger.exception("mail send ERROR.")
|
||||
|
||||
def notify_webhook(alert, query, html, new_state):
|
||||
try:
|
||||
data = {
|
||||
'event': 'alert_state_change',
|
||||
'alert': alert.to_dict(full=False),
|
||||
'url_base': settings.HOST
|
||||
}
|
||||
headers = {'Content-Type': 'application/json'}
|
||||
auth = HTTPBasicAuth(settings.WEBHOOK_USERNAME, settings.WEBHOOK_PASSWORD) if settings.WEBHOOK_USERNAME else None
|
||||
resp = requests.post(settings.WEBHOOK_ENDPOINT, data=json_dumps(data), auth=auth, headers=headers)
|
||||
if resp.status_code != 200:
|
||||
logger.error("webhook send ERROR. status_code => {status}".format(status=resp.status_code))
|
||||
except:
|
||||
logger.exception("webhook send ERROR.")
|
||||
|
||||
51
redash/version_check.py
Normal file
51
redash/version_check.py
Normal file
@@ -0,0 +1,51 @@
|
||||
import logging
|
||||
import requests
|
||||
import semver
|
||||
|
||||
from redash import __version__ as current_version
|
||||
from redash import redis_connection
|
||||
from redash.utils import json_dumps
|
||||
|
||||
REDIS_KEY = "new_version_available"
|
||||
|
||||
|
||||
def run_version_check():
|
||||
logging.info("Performing version check.")
|
||||
logging.info("Current version: %s", current_version)
|
||||
|
||||
data = json_dumps({
|
||||
'current_version': current_version
|
||||
})
|
||||
headers = {'content-type': 'application/json'}
|
||||
|
||||
try:
|
||||
response = requests.post('https://version.redash.io/api/report?channel=stable',
|
||||
data=data, headers=headers, timeout=3.0)
|
||||
latest_version = response.json()['release']['version']
|
||||
|
||||
_compare_and_update(latest_version)
|
||||
except requests.RequestException:
|
||||
logging.exception("Failed checking for new version.")
|
||||
except (ValueError, KeyError):
|
||||
logging.exception("Failed checking for new version (probably bad/non-JSON response).")
|
||||
|
||||
|
||||
def reset_new_version_status():
|
||||
latest_version = get_latest_version()
|
||||
if latest_version:
|
||||
_compare_and_update(latest_version)
|
||||
|
||||
|
||||
def get_latest_version():
|
||||
return redis_connection.get(REDIS_KEY)
|
||||
|
||||
|
||||
def _compare_and_update(latest_version):
|
||||
# TODO: support alpha channel (allow setting which channel to check & parse build number)
|
||||
is_newer = semver.compare(current_version, latest_version) == -1
|
||||
logging.info("Latest version: %s (newer: %s)", latest_version, is_newer)
|
||||
|
||||
if is_newer:
|
||||
redis_connection.set(REDIS_KEY, latest_version)
|
||||
else:
|
||||
redis_connection.delete(REDIS_KEY)
|
||||
@@ -1,5 +1,7 @@
|
||||
from random import randint
|
||||
from celery import Celery
|
||||
from datetime import timedelta
|
||||
from celery.schedules import crontab
|
||||
from redash import settings, __version__
|
||||
|
||||
|
||||
@@ -22,6 +24,14 @@ celery_schedule = {
|
||||
}
|
||||
}
|
||||
|
||||
if settings.VERSION_CHECK:
|
||||
celery_schedule['version_check'] = {
|
||||
'task': 'redash.tasks.version_check',
|
||||
# We need to schedule the version check to run at a random hour/minute, to spread the requests from all users
|
||||
# evenly.
|
||||
'schedule': crontab(minute=randint(0, 59), hour=randint(0, 23))
|
||||
}
|
||||
|
||||
if settings.QUERY_RESULTS_CLEANUP_ENABLED:
|
||||
celery_schedule['cleanup_query_results'] = {
|
||||
'task': 'redash.tasks.cleanup_query_results',
|
||||
|
||||
@@ -33,3 +33,5 @@ pysaml2==2.4.0
|
||||
pycrypto==2.6.1
|
||||
funcy==1.5
|
||||
raven==5.6.0
|
||||
semver==2.2.1
|
||||
python-simple-hipchat==0.4.0
|
||||
|
||||
4
requirements_oracle_ds.txt
Normal file
4
requirements_oracle_ds.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
# Requires installation of, or similar versions of:
|
||||
# oracle-instantclient12.1-basic_12.1.0.2.0-2_amd64.deb
|
||||
# oracle-instantclient12.1-devel_12.1.0.2.0-2_amd64.deb
|
||||
cx_Oracle==5.2
|
||||
@@ -1,12 +0,0 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
|
||||
VAGRANTFILE_API_VERSION = "2"
|
||||
|
||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||
# Every Vagrant virtual environment requires a box to build off of.
|
||||
config.vm.box = "box-cutter/debian76"
|
||||
config.vm.provision "shell", path: "setup.sh"
|
||||
config.vm.network "forwarded_port", guest: 80, host: 9001
|
||||
end
|
||||
1
setup/amazon_linux/README.md
Normal file
1
setup/amazon_linux/README.md
Normal file
@@ -0,0 +1 @@
|
||||
Bootstrap script for Amazon Linux AMI. *Not supported*, we recommend to use the Docker images instead.
|
||||
@@ -2,8 +2,7 @@
|
||||
set -eu
|
||||
|
||||
REDASH_BASE_PATH=/opt/redash
|
||||
FILES_BASE_URL=https://raw.githubusercontent.com/EverythingMe/redash/docs_setup/setup/files/
|
||||
FILE_BASE_URL_FOR_AMAZON_LINUX=https://raw.githubusercontent.com/EverythingMe/redash/master/setup/files/
|
||||
FILES_BASE_URL=https://raw.githubusercontent.com/getredash/redash/master/setup/amazon_linux/files/
|
||||
# Verify running as root:
|
||||
if [ "$(id -u)" != "0" ]; then
|
||||
if [ $# -ne 0 ]; then
|
||||
@@ -106,7 +105,7 @@ fi
|
||||
|
||||
# Install latest version
|
||||
REDASH_VERSION=${REDASH_VERSION-0.6.3.b906}
|
||||
LATEST_URL="https://github.com/EverythingMe/redash/releases/download/v${REDASH_VERSION}/redash.$REDASH_VERSION.tar.gz"
|
||||
LATEST_URL="https://github.com/getredash/redash/releases/download/v${REDASH_VERSION}/redash.$REDASH_VERSION.tar.gz"
|
||||
VERSION_DIR="/opt/redash/redash.$REDASH_VERSION"
|
||||
REDASH_TARBALL=/tmp/redash.tar.gz
|
||||
REDASH_TARBALL=/tmp/redash.tar.gz
|
||||
@@ -178,7 +177,7 @@ fi
|
||||
|
||||
|
||||
# Get supervisord startup script
|
||||
sudo -u redash wget -O /opt/redash/supervisord/supervisord.conf $FILE_BASE_URL_FOR_AMAZON_LINUX"supervisord_for_amazon_linux.conf"
|
||||
sudo -u redash wget -O /opt/redash/supervisord/supervisord.conf $FILES_BASE_URL"supervisord.conf"
|
||||
|
||||
# install start-stop-daemon
|
||||
wget http://developer.axis.com/download/distribution/apps-sys-utils-start-stop-daemon-IR1_9_18-2.tar.gz
|
||||
@@ -187,7 +186,7 @@ cd apps/sys-utils/start-stop-daemon-IR1_9_18-2/
|
||||
gcc start-stop-daemon.c -o start-stop-daemon
|
||||
cp start-stop-daemon /sbin/
|
||||
|
||||
wget -O /etc/init.d/redash_supervisord $FILE_BASE_URL_FOR_AMAZON_LINUX"redash_supervisord_init_for_amazon_linux"
|
||||
wget -O /etc/init.d/redash_supervisord $FILES_BASE_URL"redash_supervisord_init"
|
||||
add_service "redash_supervisord"
|
||||
|
||||
# Nginx setup
|
||||
1
setup/docker/README.md
Normal file
1
setup/docker/README.md
Normal file
@@ -0,0 +1 @@
|
||||
Files used for the Docker image creation.
|
||||
27
setup/docker/create_database.sh
Normal file
27
setup/docker/create_database.sh
Normal file
@@ -0,0 +1,27 @@
|
||||
#!/bin/bash
|
||||
# This script assumes you're using docker-compose, with at least two images: redash for the redash instance
|
||||
# and postgres for the postgres instance.
|
||||
#
|
||||
# This script is not idempotent and should be run once.
|
||||
|
||||
run_redash="docker-compose run --rm redash"
|
||||
|
||||
$run_redash /opt/redash/current/manage.py database create_tables
|
||||
|
||||
# Create default admin user
|
||||
$run_redash /opt/redash/current/manage.py users create --admin --password admin "Admin" "admin"
|
||||
|
||||
# This is a hack to get the Postgres IP and PORT from the instance itself.
|
||||
temp_env_file=`mktemp /tmp/pg_env.XXXXXX`
|
||||
docker-compose run --rm postgres env > $temp_env_file
|
||||
source $temp_env_file
|
||||
|
||||
run_psql="docker-compose run --rm postgres psql -h $POSTGRES_PORT_5432_TCP_ADDR -p $POSTGRES_PORT_5432_TCP_PORT -U postgres"
|
||||
|
||||
# Create redash_reader user. We don't use a strong password, as the instance supposed to be accesible only from the redash host.
|
||||
$run_psql -c "CREATE ROLE redash_reader WITH PASSWORD 'redash_reader' NOCREATEROLE NOCREATEDB NOSUPERUSER LOGIN"
|
||||
$run_psql -c "grant select(id,name,type) ON data_sources to redash_reader;"
|
||||
$run_psql -c "grant select(id,name) ON users to redash_reader;"
|
||||
$run_psql -c "grant select on activity_log, events, queries, dashboards, widgets, visualizations, query_results to redash_reader;"
|
||||
|
||||
$run_redash /opt/redash/current/manage.py ds new -n "re:dash metadata" -t "pg" -o "{\"user\": \"redash_reader\", \"password\": \"redash_reader\", \"host\": \"postgres\", \"dbname\": \"postgres\"}"
|
||||
@@ -1,7 +1,7 @@
|
||||
[supervisord]
|
||||
nodaemon=true
|
||||
logfile=/opt/redash/logs/supervisord.log
|
||||
pidfile=/opt/redash/supervisord/supervisord.pid
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
directory=/opt/redash/current
|
||||
|
||||
[inet_http_server]
|
||||
@@ -11,38 +11,46 @@ port = 0.0.0.0:9001
|
||||
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
|
||||
|
||||
[program:redash_server]
|
||||
command=/opt/redash/current/bin/run gunicorn -b 0.0.0.0:5000 --name redash -w 4 redash.wsgi:app
|
||||
command=gunicorn -b 0.0.0.0:5000 --name redash -w 4 redash.wsgi:app
|
||||
directory=/opt/redash/current
|
||||
process_name=redash_server
|
||||
numprocs=1
|
||||
priority=999
|
||||
autostart=true
|
||||
autorestart=true
|
||||
stdout_logfile=/opt/redash/logs/api.log
|
||||
stderr_logfile=/opt/redash/logs/api_error.log
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
user=redash
|
||||
|
||||
# There are two queue types here: one for ad-hoc queries, and one for the refresh of scheduled queries
|
||||
# (note that "scheduled_queries" appears only in the queue list of "redash_celery_scheduled").
|
||||
# The default concurrency level for each is 2 (-c2), you can increase based on your machine's resources.
|
||||
|
||||
[program:redash_celery]
|
||||
command=sudo -u redash /opt/redash/current/bin/run celery worker --app=redash.worker --beat -c2 -Qqueries,celery
|
||||
command=celery worker --app=redash.worker --beat -c2 -Qqueries,celery
|
||||
directory=/opt/redash/current
|
||||
process_name=redash_celery
|
||||
numprocs=1
|
||||
priority=999
|
||||
autostart=true
|
||||
autorestart=true
|
||||
stdout_logfile=/opt/redash/logs/celery.log
|
||||
stderr_logfile=/opt/redash/logs/celery_error.log
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
user=redash
|
||||
|
||||
[program:redash_celery_scheduled]
|
||||
command=sudo -u redash /opt/redash/current/bin/run celery worker --app=redash.worker -c2 -Qscheduled_queries
|
||||
command=celery worker --app=redash.worker -c1 -Qscheduled_queries
|
||||
directory=/opt/redash/current
|
||||
process_name=redash_celery_scheduled
|
||||
numprocs=1
|
||||
priority=999
|
||||
autostart=true
|
||||
autorestart=true
|
||||
stdout_logfile=/opt/redash/logs/celery.log
|
||||
stderr_logfile=/opt/redash/logs/celery_error.log
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
user=redash
|
||||
@@ -1,33 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Create database / tables
|
||||
pg_user_exists=0
|
||||
psql --host=postgres --username=postgres postgres -tAc "SELECT 1 FROM pg_roles WHERE rolname='redash'" | grep -q 1 || pg_user_exists=$?
|
||||
if [ $pg_user_exists -ne 0 ]; then
|
||||
echo "Creating redash postgres user & database."
|
||||
createuser redash --username=postgres --host=postgres --no-superuser --no-createdb --no-createrole
|
||||
createdb redash --username=postgres --host=postgres --owner=redash
|
||||
|
||||
cd /opt/redash/current
|
||||
./manage.py database create_tables
|
||||
fi
|
||||
|
||||
# Create default admin user
|
||||
cd /opt/redash/current
|
||||
# TODO: make sure user created only once
|
||||
# TODO: generate temp password and print to screen
|
||||
./manage.py users create --admin --password admin "Admin" "admin"
|
||||
|
||||
# Create re:dash read only pg user & setup data source
|
||||
pg_user_exists=0
|
||||
psql --host=postgres --username=postgres -tAc "SELECT 1 FROM pg_roles WHERE rolname='redash_reader'" | grep -q 1 || pg_user_exists=$?
|
||||
if [ $pg_user_exists -ne 0 ]; then
|
||||
echo "Creating redash reader postgres user."
|
||||
REDASH_READER_PASSWORD=$(pwgen -1)
|
||||
psql --host=postgres --username=postgres -c "CREATE ROLE redash_reader WITH PASSWORD '$REDASH_READER_PASSWORD' NOCREATEROLE NOCREATEDB NOSUPERUSER LOGIN"
|
||||
psql --host=postgres --username=postgres -c "grant select(id,name,type) ON data_sources to redash_reader;" redash
|
||||
psql --host=postgres --username=postgres -c "grant select(id,name) ON users to redash_reader;" redash
|
||||
psql --host=postgres --username=postgres -c "grant select on activity_log, events, queries, dashboards, widgets, visualizations, query_results to redash_reader;" redash
|
||||
|
||||
cd /opt/redash/current
|
||||
./manage.py ds new -n "re:dash metadata" -t "pg" -o "{\"user\": \"redash_reader\", \"password\": \"$REDASH_READER_PASSWORD\", \"host\": \"localhost\", \"dbname\": \"redash\"}"
|
||||
fi
|
||||
@@ -1,6 +0,0 @@
|
||||
import urllib2
|
||||
import json
|
||||
|
||||
latest = json.load(urllib2.urlopen("https://api.github.com/repos/EverythingMe/redash/releases/latest"))
|
||||
|
||||
print latest['assets'][0]['browser_download_url']
|
||||
@@ -12,7 +12,7 @@
|
||||
"access_key": "{{user `aws_access_key`}}",
|
||||
"secret_key": "{{user `aws_secret_key`}}",
|
||||
"region": "eu-west-1",
|
||||
"source_ami": "ami-20cc9d57",
|
||||
"source_ami": "ami-63a19214",
|
||||
"instance_type": "t2.micro",
|
||||
"ssh_username": "ubuntu",
|
||||
"ami_name": "redash-{{user `image_version`}}-eu-west-1"
|
||||
@@ -21,7 +21,7 @@
|
||||
"provisioners": [
|
||||
{
|
||||
"type": "shell",
|
||||
"script": "bootstrap.sh",
|
||||
"script": "ubuntu/bootstrap.sh",
|
||||
"execute_command": "{{ .Vars }} sudo -E -S bash '{{ .Path }}'",
|
||||
"environment_vars": ["REDASH_VERSION={{user `redash_version`}}"]
|
||||
}
|
||||
|
||||
1
setup/ubuntu/README.md
Normal file
1
setup/ubuntu/README.md
Normal file
@@ -0,0 +1 @@
|
||||
Bootstrap scripts for Ubuntu (tested on Ubuntu 14.04, although should work with 12.04).
|
||||
@@ -2,7 +2,7 @@
|
||||
set -eu
|
||||
|
||||
REDASH_BASE_PATH=/opt/redash
|
||||
FILES_BASE_URL=https://raw.githubusercontent.com/EverythingMe/redash/docs_setup/setup/files/
|
||||
FILES_BASE_URL=https://raw.githubusercontent.com/getredash/redash/docker/setup/ubuntu/files/
|
||||
|
||||
# Verify running as root:
|
||||
if [ "$(id -u)" != "0" ]; then
|
||||
@@ -101,7 +101,7 @@ fi
|
||||
|
||||
# Install latest version
|
||||
REDASH_VERSION=${REDASH_VERSION-0.7.1.b1015}
|
||||
LATEST_URL="https://github.com/EverythingMe/redash/releases/download/v${REDASH_VERSION}/redash.$REDASH_VERSION.tar.gz"
|
||||
LATEST_URL="https://github.com/getredash/redash/releases/download/v${REDASH_VERSION}/redash.$REDASH_VERSION.tar.gz"
|
||||
VERSION_DIR="/opt/redash/redash.$REDASH_VERSION"
|
||||
REDASH_TARBALL=/tmp/redash.tar.gz
|
||||
|
||||
@@ -176,4 +176,3 @@ rm /etc/nginx/sites-enabled/default
|
||||
wget -O /etc/nginx/sites-available/redash $FILES_BASE_URL"nginx_redash_site"
|
||||
ln -nfs /etc/nginx/sites-available/redash /etc/nginx/sites-enabled/redash
|
||||
service nginx restart
|
||||
|
||||
6
setup/ubuntu/files/env
Normal file
6
setup/ubuntu/files/env
Normal file
@@ -0,0 +1,6 @@
|
||||
export REDASH_STATIC_ASSETS_PATH="../rd_ui/dist/"
|
||||
export REDASH_LOG_LEVEL="INFO"
|
||||
export REDASH_REDIS_URL=redis://localhost:6379/0
|
||||
export REDASH_DATABASE_URL="postgresql://redash"
|
||||
export REDASH_COOKIE_SECRET=veryverysecret
|
||||
export REDASH_GOOGLE_APPS_DOMAIN=
|
||||
20
setup/ubuntu/files/nginx_redash_site
Normal file
20
setup/ubuntu/files/nginx_redash_site
Normal file
@@ -0,0 +1,20 @@
|
||||
upstream rd_servers {
|
||||
server 127.0.0.1:5000;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 80 default;
|
||||
|
||||
access_log /var/log/nginx/rd.access.log;
|
||||
|
||||
gzip on;
|
||||
gzip_types *;
|
||||
gzip_proxied any;
|
||||
|
||||
location / {
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_pass http://rd_servers;
|
||||
}
|
||||
}
|
||||
162
setup/ubuntu/files/postgres_apt.sh
Normal file
162
setup/ubuntu/files/postgres_apt.sh
Normal file
@@ -0,0 +1,162 @@
|
||||
#!/bin/sh
|
||||
|
||||
# script to add apt.postgresql.org to sources.list
|
||||
|
||||
# from command line
|
||||
CODENAME="$1"
|
||||
# lsb_release is the best interface, but not always available
|
||||
if [ -z "$CODENAME" ]; then
|
||||
CODENAME=$(lsb_release -cs 2>/dev/null)
|
||||
fi
|
||||
# parse os-release (unreliable, does not work on Ubuntu)
|
||||
if [ -z "$CODENAME" -a -f /etc/os-release ]; then
|
||||
. /etc/os-release
|
||||
# Debian: VERSION="7.0 (wheezy)"
|
||||
# Ubuntu: VERSION="13.04, Raring Ringtail"
|
||||
CODENAME=$(echo $VERSION | sed -ne 's/.*(\(.*\)).*/\1/')
|
||||
fi
|
||||
# guess from sources.list
|
||||
if [ -z "$CODENAME" ]; then
|
||||
CODENAME=$(grep '^deb ' /etc/apt/sources.list | head -n1 | awk '{ print $3 }')
|
||||
fi
|
||||
# complain if no result yet
|
||||
if [ -z "$CODENAME" ]; then
|
||||
cat <<EOF
|
||||
Could not determine the distribution codename. Please report this as a bug to
|
||||
pgsql-pkg-debian@postgresql.org. As a workaround, you can call this script with
|
||||
the proper codename as parameter, e.g. "$0 squeeze".
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# errors are non-fatal above
|
||||
set -e
|
||||
|
||||
cat <<EOF
|
||||
This script will enable the PostgreSQL APT repository on apt.postgresql.org on
|
||||
your system. The distribution codename used will be $CODENAME-pgdg.
|
||||
|
||||
EOF
|
||||
|
||||
case $CODENAME in
|
||||
# known distributions
|
||||
sid|wheezy|squeeze|lenny|etch) ;;
|
||||
precise|lucid) ;;
|
||||
*) # unknown distribution, verify on the web
|
||||
DISTURL="http://apt.postgresql.org/pub/repos/apt/dists/"
|
||||
if [ -x /usr/bin/curl ]; then
|
||||
DISTHTML=$(curl -s $DISTURL)
|
||||
elif [ -x /usr/bin/wget ]; then
|
||||
DISTHTML=$(wget --quiet -O - $DISTURL)
|
||||
fi
|
||||
if [ "$DISTHTML" ]; then
|
||||
if ! echo "$DISTHTML" | grep -q "$CODENAME-pgdg"; then
|
||||
cat <<EOF
|
||||
Your system is using the distribution codename $CODENAME, but $CODENAME-pgdg
|
||||
does not seem to be a valid distribution on
|
||||
$DISTURL
|
||||
|
||||
We abort the installation here. If you want to use a distribution different
|
||||
from your system, you can call this script with an explicit codename, e.g.
|
||||
"$0 precise".
|
||||
|
||||
Specifically, if you are using a non-LTS Ubuntu release, refer to
|
||||
https://wiki.postgresql.org/wiki/Apt/FAQ#I_am_using_a_non-LTS_release_of_Ubuntu
|
||||
|
||||
For more information, refer to https://wiki.postgresql.org/wiki/Apt
|
||||
or ask on the mailing list for assistance: pgsql-pkg-debian@postgresql.org
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "Writing /etc/apt/sources.list.d/pgdg.list ..."
|
||||
cat > /etc/apt/sources.list.d/pgdg.list <<EOF
|
||||
deb http://apt.postgresql.org/pub/repos/apt/ $CODENAME-pgdg main
|
||||
#deb-src http://apt.postgresql.org/pub/repos/apt/ $CODENAME-pgdg main
|
||||
EOF
|
||||
|
||||
echo "Importing repository signing key ..."
|
||||
KEYRING="/etc/apt/trusted.gpg.d/apt.postgresql.org.gpg"
|
||||
test -e $KEYRING || touch $KEYRING
|
||||
apt-key --keyring $KEYRING add - <<EOF
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
Version: GnuPG v1
|
||||
|
||||
mQINBE6XR8IBEACVdDKT2HEH1IyHzXkb4nIWAY7echjRxo7MTcj4vbXAyBKOfjja
|
||||
UrBEJWHN6fjKJXOYWXHLIYg0hOGeW9qcSiaa1/rYIbOzjfGfhE4x0Y+NJHS1db0V
|
||||
G6GUj3qXaeyqIJGS2z7m0Thy4Lgr/LpZlZ78Nf1fliSzBlMo1sV7PpP/7zUO+aA4
|
||||
bKa8Rio3weMXQOZgclzgeSdqtwKnyKTQdXY5MkH1QXyFIk1nTfWwyqpJjHlgtwMi
|
||||
c2cxjqG5nnV9rIYlTTjYG6RBglq0SmzF/raBnF4Lwjxq4qRqvRllBXdFu5+2pMfC
|
||||
IZ10HPRdqDCTN60DUix+BTzBUT30NzaLhZbOMT5RvQtvTVgWpeIn20i2NrPWNCUh
|
||||
hj490dKDLpK/v+A5/i8zPvN4c6MkDHi1FZfaoz3863dylUBR3Ip26oM0hHXf4/2U
|
||||
A/oA4pCl2W0hc4aNtozjKHkVjRx5Q8/hVYu+39csFWxo6YSB/KgIEw+0W8DiTII3
|
||||
RQj/OlD68ZDmGLyQPiJvaEtY9fDrcSpI0Esm0i4sjkNbuuh0Cvwwwqo5EF1zfkVj
|
||||
Tqz2REYQGMJGc5LUbIpk5sMHo1HWV038TWxlDRwtOdzw08zQA6BeWe9FOokRPeR2
|
||||
AqhyaJJwOZJodKZ76S+LDwFkTLzEKnYPCzkoRwLrEdNt1M7wQBThnC5z6wARAQAB
|
||||
tBxQb3N0Z3JlU1FMIERlYmlhbiBSZXBvc2l0b3J5iQI9BBMBCAAnAhsDBQsJCAcD
|
||||
BRUKCQgLBRYCAwEAAh4BAheABQJS6RUZBQkOhCctAAoJEH/MfUaszEz4zmQP/2ad
|
||||
HtuaXL5Xu3C3NGLha/aQb9iSJC8z5vN55HMCpsWlmslCBuEr+qR+oZvPkvwh0Io/
|
||||
8hQl/qN54DMNifRwVL2n2eG52yNERie9BrAMK2kNFZZCH4OxlMN0876BmDuNq2U6
|
||||
7vUtCv+pxT+g9R1LvlPgLCTjS3m+qMqUICJ310BMT2cpYlJx3YqXouFkdWBVurI0
|
||||
pGU/+QtydcJALz5eZbzlbYSPWbOm2ZSS2cLrCsVNFDOAbYLtUn955yXB5s4rIscE
|
||||
vTzBxPgID1iBknnPzdu2tCpk07yJleiupxI1yXstCtvhGCbiAbGFDaKzhgcAxSIX
|
||||
0ZPahpaYLdCkcoLlfgD+ar4K8veSK2LazrhO99O0onRG0p7zuXszXphO4E/WdbTO
|
||||
yDD35qCqYeAX6TaB+2l4kIdVqPgoXT/doWVLUK2NjZtd3JpMWI0OGYDFn2DAvgwP
|
||||
xqKEoGTOYuoWKssnwLlA/ZMETegak27gFAKfoQlmHjeA/PLC2KRYd6Wg2DSifhn+
|
||||
2MouoE4XFfeekVBQx98rOQ5NLwy/TYlsHXm1n0RW86ETN3chj/PPWjsi80t5oepx
|
||||
82azRoVu95LJUkHpPLYyqwfueoVzp2+B2hJU2Rg7w+cJq64TfeJG8hrc93MnSKIb
|
||||
zTvXfdPtvYdHhhA2LYu4+5mh5ASlAMJXD7zIOZt2iEYEEBEIAAYFAk6XSO4ACgkQ
|
||||
xa93SlhRC1qmjwCg9U7U+XN7Gc/dhY/eymJqmzUGT/gAn0guvoX75Y+BsZlI6dWn
|
||||
qaFU6N8HiQIcBBABCAAGBQJOl0kLAAoJEExaa6sS0qeuBfEP/3AnLrcKx+dFKERX
|
||||
o4NBCGWr+i1CnowupKS3rm2xLbmiB969szG5TxnOIvnjECqPz6skK3HkV3jTZaju
|
||||
v3sR6M2ItpnrncWuiLnYcCSDp9TEMpCWzTEgtrBlKdVuTNTeRGILeIcvqoZX5w+u
|
||||
i0eBvvbeRbHEyUsvOEnYjrqoAjqUJj5FUZtR1+V9fnZp8zDgpOSxx0LomnFdKnhj
|
||||
uyXAQlRCA6/roVNR9ruRjxTR5ubteZ9ubTsVYr2/eMYOjQ46LhAgR+3Alblu/WHB
|
||||
MR/9F9//RuOa43R5Sjx9TiFCYol+Ozk8XRt3QGweEH51YkSYY3oRbHBb2Fkql6N6
|
||||
YFqlLBL7/aiWnNmRDEs/cdpo9HpFsbjOv4RlsSXQfvvfOayHpT5nO1UQFzoyMVpJ
|
||||
615zwmQDJT5Qy7uvr2eQYRV9AXt8t/H+xjQsRZCc5YVmeAo91qIzI/tA2gtXik49
|
||||
6yeziZbfUvcZzuzjjxFExss4DSAwMgorvBeIbiz2k2qXukbqcTjB2XqAlZasd6Ll
|
||||
nLXpQdqDV3McYkP/MvttWh3w+J/woiBcA7yEI5e3YJk97uS6+ssbqLEd0CcdT+qz
|
||||
+Waw0z/ZIU99Lfh2Qm77OT6vr//Zulw5ovjZVO2boRIcve7S97gQ4KC+G/+QaRS+
|
||||
VPZ67j5UMxqtT/Y4+NHcQGgwF/1iiQI9BBMBCAAnAhsDBQsJCAcDBRUKCQgLBRYC
|
||||
AwEAAh4BAheABQJQeSssBQkDwxbfAAoJEH/MfUaszEz4bgkP/0AI0UgDgkNNqplA
|
||||
IpE/pkwem2jgGpJGKurh2xDu6j2ZL+BPzPhzyCeMHZwTXkkI373TXGQQP8dIa+RD
|
||||
HAZ3iijw4+ISdKWpziEUJjUk04UMPTlN+dYJt2EHLQDD0VLtX0yQC/wLmVEH/REp
|
||||
oclbVjZR/+ehwX2IxOIlXmkZJDSycl975FnSUjMAvyzty8P9DN0fIrQ7Ju+BfMOM
|
||||
TnUkOdp0kRUYez7pxbURJfkM0NxAP1geACI91aISBpFg3zxQs1d3MmUIhJ4wHvYB
|
||||
uaR7Fx1FkLAxWddre/OCYJBsjucE9uqc04rgKVjN5P/VfqNxyUoB+YZ+8Lk4t03p
|
||||
RBcD9XzcyOYlFLWXbcWxTn1jJ2QMqRIWi5lzZIOMw5B+OK9LLPX0dAwIFGr9WtuV
|
||||
J2zp+D4CBEMtn4Byh8EaQsttHeqAkpZoMlrEeNBDz2L7RquPQNmiuom15nb7xU/k
|
||||
7PGfqtkpBaaGBV9tJkdp7BdH27dZXx+uT+uHbpMXkRrXliHjWpAw+NGwADh/Pjmq
|
||||
ExlQSdgAiXy1TTOdzxKH7WrwMFGDK0fddKr8GH3f+Oq4eOoNRa6/UhTCmBPbryCS
|
||||
IA7EAd0Aae9YaLlOB+eTORg/F1EWLPm34kKSRtae3gfHuY2cdUmoDVnOF8C9hc0P
|
||||
bL65G4NWPt+fW7lIj+0+kF19s2PviQI9BBMBCAAnAhsDBQsJCAcDBRUKCQgLBRYC
|
||||
AwEAAh4BAheABQJRKm2VBQkINsBBAAoJEH/MfUaszEz4RTEP/1sQHyjHaUiAPaCA
|
||||
v8jw/3SaWP/g8qLjpY6ROjLnDMvwKwRAoxUwcIv4/TWDOMpwJN+CJIbjXsXNYvf9
|
||||
OX+UTOvq4iwi4ADrAAw2xw+Jomc6EsYla+hkN2FzGzhpXfZFfUsuphjY3FKL+4hX
|
||||
H+R8ucNwIz3yrkfc17MMn8yFNWFzm4omU9/JeeaafwUoLxlULL2zY7H3+QmxCl0u
|
||||
6t8VvlszdEFhemLHzVYRY0Ro/ISrR78CnANNsMIy3i11U5uvdeWVCoWV1BXNLzOD
|
||||
4+BIDbMB/Do8PQCWiliSGZi8lvmj/sKbumMFQonMQWOfQswTtqTyQ3yhUM1LaxK5
|
||||
PYq13rggi3rA8oq8SYb/KNCQL5pzACji4TRVK0kNpvtxJxe84X8+9IB1vhBvF/Ji
|
||||
/xDd/3VDNPY+k1a47cON0S8Qc8DA3mq4hRfcgvuWy7ZxoMY7AfSJOhleb9+PzRBB
|
||||
n9agYgMxZg1RUWZazQ5KuoJqbxpwOYVFja/stItNS4xsmi0lh2I4MNlBEDqnFLUx
|
||||
SvTDc22c3uJlWhzBM/f2jH19uUeqm4jaggob3iJvJmK+Q7Ns3WcfhuWwCnc1+58d
|
||||
iFAMRUCRBPeFS0qd56QGk1r97B6+3UfLUslCfaaA8IMOFvQSHJwDO87xWGyxeRTY
|
||||
IIP9up4xwgje9LB7fMxsSkCDTHOk
|
||||
=s3DI
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
EOF
|
||||
|
||||
echo "Running apt-get update ..."
|
||||
apt-get update
|
||||
|
||||
cat <<EOF
|
||||
|
||||
You can now start installing packages from apt.postgresql.org.
|
||||
|
||||
Have a look at https://wiki.postgresql.org/wiki/Apt for more information;
|
||||
most notably the FAQ at https://wiki.postgresql.org/wiki/Apt/FAQ
|
||||
EOF
|
||||
785
setup/ubuntu/files/redis.conf
Normal file
785
setup/ubuntu/files/redis.conf
Normal file
@@ -0,0 +1,785 @@
|
||||
## Generated by install_server.sh ##
|
||||
# Redis configuration file example
|
||||
|
||||
# Note on units: when memory size is needed, it is possible to specify
|
||||
# it in the usual form of 1k 5GB 4M and so forth:
|
||||
#
|
||||
# 1k => 1000 bytes
|
||||
# 1kb => 1024 bytes
|
||||
# 1m => 1000000 bytes
|
||||
# 1mb => 1024*1024 bytes
|
||||
# 1g => 1000000000 bytes
|
||||
# 1gb => 1024*1024*1024 bytes
|
||||
#
|
||||
# units are case insensitive so 1GB 1Gb 1gB are all the same.
|
||||
|
||||
################################## INCLUDES ###################################
|
||||
|
||||
# Include one or more other config files here. This is useful if you
|
||||
# have a standard template that goes to all Redis server but also need
|
||||
# to customize a few per-server settings. Include files can include
|
||||
# other files, so use this wisely.
|
||||
#
|
||||
# Notice option "include" won't be rewritten by command "CONFIG REWRITE"
|
||||
# from admin or Redis Sentinel. Since Redis always uses the last processed
|
||||
# line as value of a configuration directive, you'd better put includes
|
||||
# at the beginning of this file to avoid overwriting config change at runtime.
|
||||
#
|
||||
# If instead you are interested in using includes to override configuration
|
||||
# options, it is better to use include as the last line.
|
||||
#
|
||||
# include /path/to/local.conf
|
||||
# include /path/to/other.conf
|
||||
|
||||
################################ GENERAL #####################################
|
||||
|
||||
# By default Redis does not run as a daemon. Use 'yes' if you need it.
|
||||
# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
|
||||
daemonize yes
|
||||
|
||||
# When running daemonized, Redis writes a pid file in /var/run/redis.pid by
|
||||
# default. You can specify a custom pid file location here.
|
||||
pidfile /var/run/redis_6379.pid
|
||||
|
||||
# Accept connections on the specified port, default is 6379.
|
||||
# If port 0 is specified Redis will not listen on a TCP socket.
|
||||
port 6379
|
||||
|
||||
# TCP listen() backlog.
|
||||
#
|
||||
# In high requests-per-second environments you need an high backlog in order
|
||||
# to avoid slow clients connections issues. Note that the Linux kernel
|
||||
# will silently truncate it to the value of /proc/sys/net/core/somaxconn so
|
||||
# make sure to raise both the value of somaxconn and tcp_max_syn_backlog
|
||||
# in order to get the desired effect.
|
||||
tcp-backlog 511
|
||||
|
||||
# By default Redis listens for connections from all the network interfaces
|
||||
# available on the server. It is possible to listen to just one or multiple
|
||||
# interfaces using the "bind" configuration directive, followed by one or
|
||||
# more IP addresses.
|
||||
#
|
||||
# Examples:
|
||||
#
|
||||
# bind 192.168.1.100 10.0.0.1
|
||||
bind 127.0.0.1
|
||||
|
||||
# Specify the path for the Unix socket that will be used to listen for
|
||||
# incoming connections. There is no default, so Redis will not listen
|
||||
# on a unix socket when not specified.
|
||||
#
|
||||
# unixsocket /tmp/redis.sock
|
||||
# unixsocketperm 700
|
||||
|
||||
# Close the connection after a client is idle for N seconds (0 to disable)
|
||||
timeout 0
|
||||
|
||||
# TCP keepalive.
|
||||
#
|
||||
# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
|
||||
# of communication. This is useful for two reasons:
|
||||
#
|
||||
# 1) Detect dead peers.
|
||||
# 2) Take the connection alive from the point of view of network
|
||||
# equipment in the middle.
|
||||
#
|
||||
# On Linux, the specified value (in seconds) is the period used to send ACKs.
|
||||
# Note that to close the connection the double of the time is needed.
|
||||
# On other kernels the period depends on the kernel configuration.
|
||||
#
|
||||
# A reasonable value for this option is 60 seconds.
|
||||
tcp-keepalive 0
|
||||
|
||||
# Specify the server verbosity level.
|
||||
# This can be one of:
|
||||
# debug (a lot of information, useful for development/testing)
|
||||
# verbose (many rarely useful info, but not a mess like the debug level)
|
||||
# notice (moderately verbose, what you want in production probably)
|
||||
# warning (only very important / critical messages are logged)
|
||||
loglevel notice
|
||||
|
||||
# Specify the log file name. Also the empty string can be used to force
|
||||
# Redis to log on the standard output. Note that if you use standard
|
||||
# output for logging but daemonize, logs will be sent to /dev/null
|
||||
logfile /var/log/redis_6379.log
|
||||
|
||||
# To enable logging to the system logger, just set 'syslog-enabled' to yes,
|
||||
# and optionally update the other syslog parameters to suit your needs.
|
||||
# syslog-enabled no
|
||||
|
||||
# Specify the syslog identity.
|
||||
# syslog-ident redis
|
||||
|
||||
# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
|
||||
# syslog-facility local0
|
||||
|
||||
# Set the number of databases. The default database is DB 0, you can select
|
||||
# a different one on a per-connection basis using SELECT <dbid> where
|
||||
# dbid is a number between 0 and 'databases'-1
|
||||
databases 16
|
||||
|
||||
################################ SNAPSHOTTING ################################
|
||||
#
|
||||
# Save the DB on disk:
|
||||
#
|
||||
# save <seconds> <changes>
|
||||
#
|
||||
# Will save the DB if both the given number of seconds and the given
|
||||
# number of write operations against the DB occurred.
|
||||
#
|
||||
# In the example below the behaviour will be to save:
|
||||
# after 900 sec (15 min) if at least 1 key changed
|
||||
# after 300 sec (5 min) if at least 10 keys changed
|
||||
# after 60 sec if at least 10000 keys changed
|
||||
#
|
||||
# Note: you can disable saving at all commenting all the "save" lines.
|
||||
#
|
||||
# It is also possible to remove all the previously configured save
|
||||
# points by adding a save directive with a single empty string argument
|
||||
# like in the following example:
|
||||
#
|
||||
# save ""
|
||||
|
||||
save 900 1
|
||||
save 300 10
|
||||
save 60 10000
|
||||
|
||||
# By default Redis will stop accepting writes if RDB snapshots are enabled
|
||||
# (at least one save point) and the latest background save failed.
|
||||
# This will make the user aware (in a hard way) that data is not persisting
|
||||
# on disk properly, otherwise chances are that no one will notice and some
|
||||
# disaster will happen.
|
||||
#
|
||||
# If the background saving process will start working again Redis will
|
||||
# automatically allow writes again.
|
||||
#
|
||||
# However if you have setup your proper monitoring of the Redis server
|
||||
# and persistence, you may want to disable this feature so that Redis will
|
||||
# continue to work as usual even if there are problems with disk,
|
||||
# permissions, and so forth.
|
||||
stop-writes-on-bgsave-error yes
|
||||
|
||||
# Compress string objects using LZF when dump .rdb databases?
|
||||
# For default that's set to 'yes' as it's almost always a win.
|
||||
# If you want to save some CPU in the saving child set it to 'no' but
|
||||
# the dataset will likely be bigger if you have compressible values or keys.
|
||||
rdbcompression yes
|
||||
|
||||
# Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
|
||||
# This makes the format more resistant to corruption but there is a performance
|
||||
# hit to pay (around 10%) when saving and loading RDB files, so you can disable it
|
||||
# for maximum performances.
|
||||
#
|
||||
# RDB files created with checksum disabled have a checksum of zero that will
|
||||
# tell the loading code to skip the check.
|
||||
rdbchecksum yes
|
||||
|
||||
# The filename where to dump the DB
|
||||
dbfilename dump.rdb
|
||||
|
||||
# The working directory.
|
||||
#
|
||||
# The DB will be written inside this directory, with the filename specified
|
||||
# above using the 'dbfilename' configuration directive.
|
||||
#
|
||||
# The Append Only File will also be created inside this directory.
|
||||
#
|
||||
# Note that you must specify a directory here, not a file name.
|
||||
dir /var/lib/redis/6379
|
||||
|
||||
################################# REPLICATION #################################
|
||||
|
||||
# Master-Slave replication. Use slaveof to make a Redis instance a copy of
|
||||
# another Redis server. A few things to understand ASAP about Redis replication.
|
||||
#
|
||||
# 1) Redis replication is asynchronous, but you can configure a master to
|
||||
# stop accepting writes if it appears to be not connected with at least
|
||||
# a given number of slaves.
|
||||
# 2) Redis slaves are able to perform a partial resynchronization with the
|
||||
# master if the replication link is lost for a relatively small amount of
|
||||
# time. You may want to configure the replication backlog size (see the next
|
||||
# sections of this file) with a sensible value depending on your needs.
|
||||
# 3) Replication is automatic and does not need user intervention. After a
|
||||
# network partition slaves automatically try to reconnect to masters
|
||||
# and resynchronize with them.
|
||||
#
|
||||
# slaveof <masterip> <masterport>
|
||||
|
||||
# If the master is password protected (using the "requirepass" configuration
|
||||
# directive below) it is possible to tell the slave to authenticate before
|
||||
# starting the replication synchronization process, otherwise the master will
|
||||
# refuse the slave request.
|
||||
#
|
||||
# masterauth <master-password>
|
||||
|
||||
# When a slave loses its connection with the master, or when the replication
|
||||
# is still in progress, the slave can act in two different ways:
|
||||
#
|
||||
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
|
||||
# still reply to client requests, possibly with out of date data, or the
|
||||
# data set may just be empty if this is the first synchronization.
|
||||
#
|
||||
# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
|
||||
# an error "SYNC with master in progress" to all the kind of commands
|
||||
# but to INFO and SLAVEOF.
|
||||
#
|
||||
slave-serve-stale-data yes
|
||||
|
||||
# You can configure a slave instance to accept writes or not. Writing against
|
||||
# a slave instance may be useful to store some ephemeral data (because data
|
||||
# written on a slave will be easily deleted after resync with the master) but
|
||||
# may also cause problems if clients are writing to it because of a
|
||||
# misconfiguration.
|
||||
#
|
||||
# Since Redis 2.6 by default slaves are read-only.
|
||||
#
|
||||
# Note: read only slaves are not designed to be exposed to untrusted clients
|
||||
# on the internet. It's just a protection layer against misuse of the instance.
|
||||
# Still a read only slave exports by default all the administrative commands
|
||||
# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
|
||||
# security of read only slaves using 'rename-command' to shadow all the
|
||||
# administrative / dangerous commands.
|
||||
slave-read-only yes
|
||||
|
||||
# Slaves send PINGs to server in a predefined interval. It's possible to change
|
||||
# this interval with the repl_ping_slave_period option. The default value is 10
|
||||
# seconds.
|
||||
#
|
||||
# repl-ping-slave-period 10
|
||||
|
||||
# The following option sets the replication timeout for:
|
||||
#
|
||||
# 1) Bulk transfer I/O during SYNC, from the point of view of slave.
|
||||
# 2) Master timeout from the point of view of slaves (data, pings).
|
||||
# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings).
|
||||
#
|
||||
# It is important to make sure that this value is greater than the value
|
||||
# specified for repl-ping-slave-period otherwise a timeout will be detected
|
||||
# every time there is low traffic between the master and the slave.
|
||||
#
|
||||
# repl-timeout 60
|
||||
|
||||
# Disable TCP_NODELAY on the slave socket after SYNC?
|
||||
#
|
||||
# If you select "yes" Redis will use a smaller number of TCP packets and
|
||||
# less bandwidth to send data to slaves. But this can add a delay for
|
||||
# the data to appear on the slave side, up to 40 milliseconds with
|
||||
# Linux kernels using a default configuration.
|
||||
#
|
||||
# If you select "no" the delay for data to appear on the slave side will
|
||||
# be reduced but more bandwidth will be used for replication.
|
||||
#
|
||||
# By default we optimize for low latency, but in very high traffic conditions
|
||||
# or when the master and slaves are many hops away, turning this to "yes" may
|
||||
# be a good idea.
|
||||
repl-disable-tcp-nodelay no
|
||||
|
||||
# Set the replication backlog size. The backlog is a buffer that accumulates
|
||||
# slave data when slaves are disconnected for some time, so that when a slave
|
||||
# wants to reconnect again, often a full resync is not needed, but a partial
|
||||
# resync is enough, just passing the portion of data the slave missed while
|
||||
# disconnected.
|
||||
#
|
||||
# The biggest the replication backlog, the longer the time the slave can be
|
||||
# disconnected and later be able to perform a partial resynchronization.
|
||||
#
|
||||
# The backlog is only allocated once there is at least a slave connected.
|
||||
#
|
||||
# repl-backlog-size 1mb
|
||||
|
||||
# After a master has no longer connected slaves for some time, the backlog
|
||||
# will be freed. The following option configures the amount of seconds that
|
||||
# need to elapse, starting from the time the last slave disconnected, for
|
||||
# the backlog buffer to be freed.
|
||||
#
|
||||
# A value of 0 means to never release the backlog.
|
||||
#
|
||||
# repl-backlog-ttl 3600
|
||||
|
||||
# The slave priority is an integer number published by Redis in the INFO output.
|
||||
# It is used by Redis Sentinel in order to select a slave to promote into a
|
||||
# master if the master is no longer working correctly.
|
||||
#
|
||||
# A slave with a low priority number is considered better for promotion, so
|
||||
# for instance if there are three slaves with priority 10, 100, 25 Sentinel will
|
||||
# pick the one with priority 10, that is the lowest.
|
||||
#
|
||||
# However a special priority of 0 marks the slave as not able to perform the
|
||||
# role of master, so a slave with priority of 0 will never be selected by
|
||||
# Redis Sentinel for promotion.
|
||||
#
|
||||
# By default the priority is 100.
|
||||
slave-priority 100
|
||||
|
||||
# It is possible for a master to stop accepting writes if there are less than
|
||||
# N slaves connected, having a lag less or equal than M seconds.
|
||||
#
|
||||
# The N slaves need to be in "online" state.
|
||||
#
|
||||
# The lag in seconds, that must be <= the specified value, is calculated from
|
||||
# the last ping received from the slave, that is usually sent every second.
|
||||
#
|
||||
# This option does not GUARANTEES that N replicas will accept the write, but
|
||||
# will limit the window of exposure for lost writes in case not enough slaves
|
||||
# are available, to the specified number of seconds.
|
||||
#
|
||||
# For example to require at least 3 slaves with a lag <= 10 seconds use:
|
||||
#
|
||||
# min-slaves-to-write 3
|
||||
# min-slaves-max-lag 10
|
||||
#
|
||||
# Setting one or the other to 0 disables the feature.
|
||||
#
|
||||
# By default min-slaves-to-write is set to 0 (feature disabled) and
|
||||
# min-slaves-max-lag is set to 10.
|
||||
|
||||
################################## SECURITY ###################################
|
||||
|
||||
# Require clients to issue AUTH <PASSWORD> before processing any other
|
||||
# commands. This might be useful in environments in which you do not trust
|
||||
# others with access to the host running redis-server.
|
||||
#
|
||||
# This should stay commented out for backward compatibility and because most
|
||||
# people do not need auth (e.g. they run their own servers).
|
||||
#
|
||||
# Warning: since Redis is pretty fast an outside user can try up to
|
||||
# 150k passwords per second against a good box. This means that you should
|
||||
# use a very strong password otherwise it will be very easy to break.
|
||||
#
|
||||
# requirepass foobared
|
||||
|
||||
# Command renaming.
|
||||
#
|
||||
# It is possible to change the name of dangerous commands in a shared
|
||||
# environment. For instance the CONFIG command may be renamed into something
|
||||
# hard to guess so that it will still be available for internal-use tools
|
||||
# but not available for general clients.
|
||||
#
|
||||
# Example:
|
||||
#
|
||||
# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
|
||||
#
|
||||
# It is also possible to completely kill a command by renaming it into
|
||||
# an empty string:
|
||||
#
|
||||
# rename-command CONFIG ""
|
||||
#
|
||||
# Please note that changing the name of commands that are logged into the
|
||||
# AOF file or transmitted to slaves may cause problems.
|
||||
|
||||
################################### LIMITS ####################################
|
||||
|
||||
# Set the max number of connected clients at the same time. By default
|
||||
# this limit is set to 10000 clients, however if the Redis server is not
|
||||
# able to configure the process file limit to allow for the specified limit
|
||||
# the max number of allowed clients is set to the current file limit
|
||||
# minus 32 (as Redis reserves a few file descriptors for internal uses).
|
||||
#
|
||||
# Once the limit is reached Redis will close all the new connections sending
|
||||
# an error 'max number of clients reached'.
|
||||
#
|
||||
# maxclients 10000
|
||||
|
||||
# Don't use more memory than the specified amount of bytes.
|
||||
# When the memory limit is reached Redis will try to remove keys
|
||||
# according to the eviction policy selected (see maxmemory-policy).
|
||||
#
|
||||
# If Redis can't remove keys according to the policy, or if the policy is
|
||||
# set to 'noeviction', Redis will start to reply with errors to commands
|
||||
# that would use more memory, like SET, LPUSH, and so on, and will continue
|
||||
# to reply to read-only commands like GET.
|
||||
#
|
||||
# This option is usually useful when using Redis as an LRU cache, or to set
|
||||
# a hard memory limit for an instance (using the 'noeviction' policy).
|
||||
#
|
||||
# WARNING: If you have slaves attached to an instance with maxmemory on,
|
||||
# the size of the output buffers needed to feed the slaves are subtracted
|
||||
# from the used memory count, so that network problems / resyncs will
|
||||
# not trigger a loop where keys are evicted, and in turn the output
|
||||
# buffer of slaves is full with DELs of keys evicted triggering the deletion
|
||||
# of more keys, and so forth until the database is completely emptied.
|
||||
#
|
||||
# In short... if you have slaves attached it is suggested that you set a lower
|
||||
# limit for maxmemory so that there is some free RAM on the system for slave
|
||||
# output buffers (but this is not needed if the policy is 'noeviction').
|
||||
#
|
||||
# maxmemory <bytes>
|
||||
|
||||
# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
|
||||
# is reached. You can select among five behaviors:
|
||||
#
|
||||
# volatile-lru -> remove the key with an expire set using an LRU algorithm
|
||||
# allkeys-lru -> remove any key accordingly to the LRU algorithm
|
||||
# volatile-random -> remove a random key with an expire set
|
||||
# allkeys-random -> remove a random key, any key
|
||||
# volatile-ttl -> remove the key with the nearest expire time (minor TTL)
|
||||
# noeviction -> don't expire at all, just return an error on write operations
|
||||
#
|
||||
# Note: with any of the above policies, Redis will return an error on write
|
||||
# operations, when there are not suitable keys for eviction.
|
||||
#
|
||||
# At the date of writing this commands are: set setnx setex append
|
||||
# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
|
||||
# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
|
||||
# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
|
||||
# getset mset msetnx exec sort
|
||||
#
|
||||
# The default is:
|
||||
#
|
||||
# maxmemory-policy volatile-lru
|
||||
|
||||
# LRU and minimal TTL algorithms are not precise algorithms but approximated
|
||||
# algorithms (in order to save memory), so you can select as well the sample
|
||||
# size to check. For instance for default Redis will check three keys and
|
||||
# pick the one that was used less recently, you can change the sample size
|
||||
# using the following configuration directive.
|
||||
#
|
||||
# maxmemory-samples 3
|
||||
|
||||
############################## APPEND ONLY MODE ###############################
|
||||
|
||||
# By default Redis asynchronously dumps the dataset on disk. This mode is
|
||||
# good enough in many applications, but an issue with the Redis process or
|
||||
# a power outage may result into a few minutes of writes lost (depending on
|
||||
# the configured save points).
|
||||
#
|
||||
# The Append Only File is an alternative persistence mode that provides
|
||||
# much better durability. For instance using the default data fsync policy
|
||||
# (see later in the config file) Redis can lose just one second of writes in a
|
||||
# dramatic event like a server power outage, or a single write if something
|
||||
# wrong with the Redis process itself happens, but the operating system is
|
||||
# still running correctly.
|
||||
#
|
||||
# AOF and RDB persistence can be enabled at the same time without problems.
|
||||
# If the AOF is enabled on startup Redis will load the AOF, that is the file
|
||||
# with the better durability guarantees.
|
||||
#
|
||||
# Please check http://redis.io/topics/persistence for more information.
|
||||
|
||||
appendonly no
|
||||
|
||||
# The name of the append only file (default: "appendonly.aof")
|
||||
|
||||
appendfilename "appendonly.aof"
|
||||
|
||||
# The fsync() call tells the Operating System to actually write data on disk
|
||||
# instead to wait for more data in the output buffer. Some OS will really flush
|
||||
# data on disk, some other OS will just try to do it ASAP.
|
||||
#
|
||||
# Redis supports three different modes:
|
||||
#
|
||||
# no: don't fsync, just let the OS flush the data when it wants. Faster.
|
||||
# always: fsync after every write to the append only log . Slow, Safest.
|
||||
# everysec: fsync only one time every second. Compromise.
|
||||
#
|
||||
# The default is "everysec", as that's usually the right compromise between
|
||||
# speed and data safety. It's up to you to understand if you can relax this to
|
||||
# "no" that will let the operating system flush the output buffer when
|
||||
# it wants, for better performances (but if you can live with the idea of
|
||||
# some data loss consider the default persistence mode that's snapshotting),
|
||||
# or on the contrary, use "always" that's very slow but a bit safer than
|
||||
# everysec.
|
||||
#
|
||||
# More details please check the following article:
|
||||
# http://antirez.com/post/redis-persistence-demystified.html
|
||||
#
|
||||
# If unsure, use "everysec".
|
||||
|
||||
# appendfsync always
|
||||
appendfsync everysec
|
||||
# appendfsync no
|
||||
|
||||
# When the AOF fsync policy is set to always or everysec, and a background
|
||||
# saving process (a background save or AOF log background rewriting) is
|
||||
# performing a lot of I/O against the disk, in some Linux configurations
|
||||
# Redis may block too long on the fsync() call. Note that there is no fix for
|
||||
# this currently, as even performing fsync in a different thread will block
|
||||
# our synchronous write(2) call.
|
||||
#
|
||||
# In order to mitigate this problem it's possible to use the following option
|
||||
# that will prevent fsync() from being called in the main process while a
|
||||
# BGSAVE or BGREWRITEAOF is in progress.
|
||||
#
|
||||
# This means that while another child is saving, the durability of Redis is
|
||||
# the same as "appendfsync none". In practical terms, this means that it is
|
||||
# possible to lose up to 30 seconds of log in the worst scenario (with the
|
||||
# default Linux settings).
|
||||
#
|
||||
# If you have latency problems turn this to "yes". Otherwise leave it as
|
||||
# "no" that is the safest pick from the point of view of durability.
|
||||
|
||||
no-appendfsync-on-rewrite no
|
||||
|
||||
# Automatic rewrite of the append only file.
|
||||
# Redis is able to automatically rewrite the log file implicitly calling
|
||||
# BGREWRITEAOF when the AOF log size grows by the specified percentage.
|
||||
#
|
||||
# This is how it works: Redis remembers the size of the AOF file after the
|
||||
# latest rewrite (if no rewrite has happened since the restart, the size of
|
||||
# the AOF at startup is used).
|
||||
#
|
||||
# This base size is compared to the current size. If the current size is
|
||||
# bigger than the specified percentage, the rewrite is triggered. Also
|
||||
# you need to specify a minimal size for the AOF file to be rewritten, this
|
||||
# is useful to avoid rewriting the AOF file even if the percentage increase
|
||||
# is reached but it is still pretty small.
|
||||
#
|
||||
# Specify a percentage of zero in order to disable the automatic AOF
|
||||
# rewrite feature.
|
||||
|
||||
auto-aof-rewrite-percentage 100
|
||||
auto-aof-rewrite-min-size 64mb
|
||||
|
||||
# An AOF file may be found to be truncated at the end during the Redis
|
||||
# startup process, when the AOF data gets loaded back into memory.
|
||||
# This may happen when the system where Redis is running
|
||||
# crashes, especially when an ext4 filesystem is mounted without the
|
||||
# data=ordered option (however this can't happen when Redis itself
|
||||
# crashes or aborts but the operating system still works correctly).
|
||||
#
|
||||
# Redis can either exit with an error when this happens, or load as much
|
||||
# data as possible (the default now) and start if the AOF file is found
|
||||
# to be truncated at the end. The following option controls this behavior.
|
||||
#
|
||||
# If aof-load-truncated is set to yes, a truncated AOF file is loaded and
|
||||
# the Redis server starts emitting a log to inform the user of the event.
|
||||
# Otherwise if the option is set to no, the server aborts with an error
|
||||
# and refuses to start. When the option is set to no, the user requires
|
||||
# to fix the AOF file using the "redis-check-aof" utility before to restart
|
||||
# the server.
|
||||
#
|
||||
# Note that if the AOF file will be found to be corrupted in the middle
|
||||
# the server will still exit with an error. This option only applies when
|
||||
# Redis will try to read more data from the AOF file but not enough bytes
|
||||
# will be found.
|
||||
aof-load-truncated yes
|
||||
|
||||
################################ LUA SCRIPTING ###############################
|
||||
|
||||
# Max execution time of a Lua script in milliseconds.
|
||||
#
|
||||
# If the maximum execution time is reached Redis will log that a script is
|
||||
# still in execution after the maximum allowed time and will start to
|
||||
# reply to queries with an error.
|
||||
#
|
||||
# When a long running script exceed the maximum execution time only the
|
||||
# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
|
||||
# used to stop a script that did not yet called write commands. The second
|
||||
# is the only way to shut down the server in the case a write commands was
|
||||
# already issue by the script but the user don't want to wait for the natural
|
||||
# termination of the script.
|
||||
#
|
||||
# Set it to 0 or a negative value for unlimited execution without warnings.
|
||||
lua-time-limit 5000
|
||||
|
||||
################################## SLOW LOG ###################################
|
||||
|
||||
# The Redis Slow Log is a system to log queries that exceeded a specified
|
||||
# execution time. The execution time does not include the I/O operations
|
||||
# like talking with the client, sending the reply and so forth,
|
||||
# but just the time needed to actually execute the command (this is the only
|
||||
# stage of command execution where the thread is blocked and can not serve
|
||||
# other requests in the meantime).
|
||||
#
|
||||
# You can configure the slow log with two parameters: one tells Redis
|
||||
# what is the execution time, in microseconds, to exceed in order for the
|
||||
# command to get logged, and the other parameter is the length of the
|
||||
# slow log. When a new command is logged the oldest one is removed from the
|
||||
# queue of logged commands.
|
||||
|
||||
# The following time is expressed in microseconds, so 1000000 is equivalent
|
||||
# to one second. Note that a negative number disables the slow log, while
|
||||
# a value of zero forces the logging of every command.
|
||||
slowlog-log-slower-than 10000
|
||||
|
||||
# There is no limit to this length. Just be aware that it will consume memory.
|
||||
# You can reclaim memory used by the slow log with SLOWLOG RESET.
|
||||
slowlog-max-len 128
|
||||
|
||||
################################ LATENCY MONITOR ##############################
|
||||
|
||||
# The Redis latency monitoring subsystem samples different operations
|
||||
# at runtime in order to collect data related to possible sources of
|
||||
# latency of a Redis instance.
|
||||
#
|
||||
# Via the LATENCY command this information is available to the user that can
|
||||
# print graphs and obtain reports.
|
||||
#
|
||||
# The system only logs operations that were performed in a time equal or
|
||||
# greater than the amount of milliseconds specified via the
|
||||
# latency-monitor-threshold configuration directive. When its value is set
|
||||
# to zero, the latency monitor is turned off.
|
||||
#
|
||||
# By default latency monitoring is disabled since it is mostly not needed
|
||||
# if you don't have latency issues, and collecting data has a performance
|
||||
# impact, that while very small, can be measured under big load. Latency
|
||||
# monitoring can easily be enalbed at runtime using the command
|
||||
# "CONFIG SET latency-monitor-threshold <milliseconds>" if needed.
|
||||
latency-monitor-threshold 0
|
||||
|
||||
############################# Event notification ##############################
|
||||
|
||||
# Redis can notify Pub/Sub clients about events happening in the key space.
|
||||
# This feature is documented at http://redis.io/topics/notifications
|
||||
#
|
||||
# For instance if keyspace events notification is enabled, and a client
|
||||
# performs a DEL operation on key "foo" stored in the Database 0, two
|
||||
# messages will be published via Pub/Sub:
|
||||
#
|
||||
# PUBLISH __keyspace@0__:foo del
|
||||
# PUBLISH __keyevent@0__:del foo
|
||||
#
|
||||
# It is possible to select the events that Redis will notify among a set
|
||||
# of classes. Every class is identified by a single character:
|
||||
#
|
||||
# K Keyspace events, published with __keyspace@<db>__ prefix.
|
||||
# E Keyevent events, published with __keyevent@<db>__ prefix.
|
||||
# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
|
||||
# $ String commands
|
||||
# l List commands
|
||||
# s Set commands
|
||||
# h Hash commands
|
||||
# z Sorted set commands
|
||||
# x Expired events (events generated every time a key expires)
|
||||
# e Evicted events (events generated when a key is evicted for maxmemory)
|
||||
# A Alias for g$lshzxe, so that the "AKE" string means all the events.
|
||||
#
|
||||
# The "notify-keyspace-events" takes as argument a string that is composed
|
||||
# by zero or multiple characters. The empty string means that notifications
|
||||
# are disabled at all.
|
||||
#
|
||||
# Example: to enable list and generic events, from the point of view of the
|
||||
# event name, use:
|
||||
#
|
||||
# notify-keyspace-events Elg
|
||||
#
|
||||
# Example 2: to get the stream of the expired keys subscribing to channel
|
||||
# name __keyevent@0__:expired use:
|
||||
#
|
||||
# notify-keyspace-events Ex
|
||||
#
|
||||
# By default all notifications are disabled because most users don't need
|
||||
# this feature and the feature has some overhead. Note that if you don't
|
||||
# specify at least one of K or E, no events will be delivered.
|
||||
notify-keyspace-events ""
|
||||
|
||||
############################### ADVANCED CONFIG ###############################
|
||||
|
||||
# Hashes are encoded using a memory efficient data structure when they have a
|
||||
# small number of entries, and the biggest entry does not exceed a given
|
||||
# threshold. These thresholds can be configured using the following directives.
|
||||
hash-max-ziplist-entries 512
|
||||
hash-max-ziplist-value 64
|
||||
|
||||
# Similarly to hashes, small lists are also encoded in a special way in order
|
||||
# to save a lot of space. The special representation is only used when
|
||||
# you are under the following limits:
|
||||
list-max-ziplist-entries 512
|
||||
list-max-ziplist-value 64
|
||||
|
||||
# Sets have a special encoding in just one case: when a set is composed
|
||||
# of just strings that happens to be integers in radix 10 in the range
|
||||
# of 64 bit signed integers.
|
||||
# The following configuration setting sets the limit in the size of the
|
||||
# set in order to use this special memory saving encoding.
|
||||
set-max-intset-entries 512
|
||||
|
||||
# Similarly to hashes and lists, sorted sets are also specially encoded in
|
||||
# order to save a lot of space. This encoding is only used when the length and
|
||||
# elements of a sorted set are below the following limits:
|
||||
zset-max-ziplist-entries 128
|
||||
zset-max-ziplist-value 64
|
||||
|
||||
# HyperLogLog sparse representation bytes limit. The limit includes the
|
||||
# 16 bytes header. When an HyperLogLog using the sparse representation crosses
|
||||
# this limit, it is converted into the dense representation.
|
||||
#
|
||||
# A value greater than 16000 is totally useless, since at that point the
|
||||
# dense representation is more memory efficient.
|
||||
#
|
||||
# The suggested value is ~ 3000 in order to have the benefits of
|
||||
# the space efficient encoding without slowing down too much PFADD,
|
||||
# which is O(N) with the sparse encoding. The value can be raised to
|
||||
# ~ 10000 when CPU is not a concern, but space is, and the data set is
|
||||
# composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
|
||||
hll-sparse-max-bytes 3000
|
||||
|
||||
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
|
||||
# order to help rehashing the main Redis hash table (the one mapping top-level
|
||||
# keys to values). The hash table implementation Redis uses (see dict.c)
|
||||
# performs a lazy rehashing: the more operation you run into a hash table
|
||||
# that is rehashing, the more rehashing "steps" are performed, so if the
|
||||
# server is idle the rehashing is never complete and some more memory is used
|
||||
# by the hash table.
|
||||
#
|
||||
# The default is to use this millisecond 10 times every second in order to
|
||||
# active rehashing the main dictionaries, freeing memory when possible.
|
||||
#
|
||||
# If unsure:
|
||||
# use "activerehashing no" if you have hard latency requirements and it is
|
||||
# not a good thing in your environment that Redis can reply form time to time
|
||||
# to queries with 2 milliseconds delay.
|
||||
#
|
||||
# use "activerehashing yes" if you don't have such hard requirements but
|
||||
# want to free memory asap when possible.
|
||||
activerehashing yes
|
||||
|
||||
# The client output buffer limits can be used to force disconnection of clients
|
||||
# that are not reading data from the server fast enough for some reason (a
|
||||
# common reason is that a Pub/Sub client can't consume messages as fast as the
|
||||
# publisher can produce them).
|
||||
#
|
||||
# The limit can be set differently for the three different classes of clients:
|
||||
#
|
||||
# normal -> normal clients including MONITOR clients
|
||||
# slave -> slave clients
|
||||
# pubsub -> clients subscribed to at least one pubsub channel or pattern
|
||||
#
|
||||
# The syntax of every client-output-buffer-limit directive is the following:
|
||||
#
|
||||
# client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
|
||||
#
|
||||
# A client is immediately disconnected once the hard limit is reached, or if
|
||||
# the soft limit is reached and remains reached for the specified number of
|
||||
# seconds (continuously).
|
||||
# So for instance if the hard limit is 32 megabytes and the soft limit is
|
||||
# 16 megabytes / 10 seconds, the client will get disconnected immediately
|
||||
# if the size of the output buffers reach 32 megabytes, but will also get
|
||||
# disconnected if the client reaches 16 megabytes and continuously overcomes
|
||||
# the limit for 10 seconds.
|
||||
#
|
||||
# By default normal clients are not limited because they don't receive data
|
||||
# without asking (in a push way), but just after a request, so only
|
||||
# asynchronous clients may create a scenario where data is requested faster
|
||||
# than it can read.
|
||||
#
|
||||
# Instead there is a default limit for pubsub and slave clients, since
|
||||
# subscribers and slaves receive data in a push fashion.
|
||||
#
|
||||
# Both the hard or the soft limit can be disabled by setting them to zero.
|
||||
client-output-buffer-limit normal 0 0 0
|
||||
client-output-buffer-limit slave 256mb 64mb 60
|
||||
client-output-buffer-limit pubsub 32mb 8mb 60
|
||||
|
||||
# Redis calls an internal function to perform many background tasks, like
|
||||
# closing connections of clients in timeout, purging expired keys that are
|
||||
# never requested, and so forth.
|
||||
#
|
||||
# Not all tasks are performed with the same frequency, but Redis checks for
|
||||
# tasks to perform accordingly to the specified "hz" value.
|
||||
#
|
||||
# By default "hz" is set to 10. Raising the value will use more CPU when
|
||||
# Redis is idle, but at the same time will make Redis more responsive when
|
||||
# there are many keys expiring at the same time, and timeouts may be
|
||||
# handled with more precision.
|
||||
#
|
||||
# The range is between 1 and 500, however a value over 100 is usually not
|
||||
# a good idea. Most users should use the default of 10 and raise this up to
|
||||
# 100 only in environments where very low latency is required.
|
||||
hz 10
|
||||
|
||||
# When a child rewrites the AOF file, if the following option is enabled
|
||||
# the file will be fsync-ed every 32 MB of data generated. This is useful
|
||||
# in order to commit the file to the disk more incrementally and avoid
|
||||
# big latency spikes.
|
||||
aof-rewrite-incremental-fsync yes
|
||||
66
setup/ubuntu/files/redis_init
Normal file
66
setup/ubuntu/files/redis_init
Normal file
@@ -0,0 +1,66 @@
|
||||
#!/bin/sh
|
||||
|
||||
EXEC=/usr/local/bin/redis-server
|
||||
CLIEXEC=/usr/local/bin/redis-cli
|
||||
PIDFILE=/var/run/redis_6379.pid
|
||||
CONF="/etc/redis/6379.conf"
|
||||
REDISPORT="6379"
|
||||
###############
|
||||
# SysV Init Information
|
||||
# chkconfig: - 58 74
|
||||
# description: redis_6379 is the redis daemon.
|
||||
### BEGIN INIT INFO
|
||||
# Provides: redis_6379
|
||||
# Required-Start: $network $local_fs $remote_fs
|
||||
# Required-Stop: $network $local_fs $remote_fs
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Should-Start: $syslog $named
|
||||
# Should-Stop: $syslog $named
|
||||
# Short-Description: start and stop redis_6379
|
||||
# Description: Redis daemon
|
||||
### END INIT INFO
|
||||
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
if [ -f $PIDFILE ]
|
||||
then
|
||||
echo "$PIDFILE exists, process is already running or crashed"
|
||||
else
|
||||
echo "Starting Redis server..."
|
||||
$EXEC $CONF
|
||||
fi
|
||||
;;
|
||||
stop)
|
||||
if [ ! -f $PIDFILE ]
|
||||
then
|
||||
echo "$PIDFILE does not exist, process is not running"
|
||||
else
|
||||
PID=$(cat $PIDFILE)
|
||||
echo "Stopping ..."
|
||||
$CLIEXEC -p $REDISPORT shutdown
|
||||
while [ -x /proc/${PID} ]
|
||||
do
|
||||
echo "Waiting for Redis to shutdown ..."
|
||||
sleep 1
|
||||
done
|
||||
echo "Redis stopped"
|
||||
fi
|
||||
;;
|
||||
status)
|
||||
if [ ! -f $PIDFILE ]
|
||||
then
|
||||
echo 'Redis is not running'
|
||||
else
|
||||
echo "Redis is running ($(<$PIDFILE))"
|
||||
fi
|
||||
;;
|
||||
restart)
|
||||
$0 stop
|
||||
$0 start
|
||||
;;
|
||||
*)
|
||||
echo "Please use start, stop, restart or status as first argument"
|
||||
;;
|
||||
esac
|
||||
3
setup/ubuntu_docker/README.md
Normal file
3
setup/ubuntu_docker/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
Bootstrap scripts for Ubuntu (tested on Ubuntu 14.04) using Docker images.
|
||||
|
||||
**Work in progress, not ready yet.**
|
||||
47
setup/ubuntu_docker/bootstrap.sh
Normal file
47
setup/ubuntu_docker/bootstrap.sh
Normal file
@@ -0,0 +1,47 @@
|
||||
#!/bin/bash
|
||||
set -eu
|
||||
|
||||
REDASH_BASE_PATH=/opt/redash_docker
|
||||
# TODO: change this to master after merging:
|
||||
FILES_BASE_URL=https://raw.githubusercontent.com/getredash/redash/docker/setup/ubuntu_docker/files/
|
||||
|
||||
# Verify running as root:
|
||||
if [ "$(id -u)" != "0" ]; then
|
||||
if [ $# -ne 0 ]; then
|
||||
echo "Failed running with sudo. Exiting." 1>&2
|
||||
exit 1
|
||||
fi
|
||||
echo "This script must be run as root. Trying to run with sudo."
|
||||
sudo bash $0 --with-sudo
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Base packages
|
||||
apt-get update
|
||||
apt-get install -y python-pip
|
||||
|
||||
# Install Docker
|
||||
# TODO: copy script into setup files? Install docker from package? Use different base image?
|
||||
curl -sSL https://get.docker.com/ | sh
|
||||
|
||||
pip install docker-compose
|
||||
|
||||
mkdir /opt/redash-docker
|
||||
mkdir /opt/redash-docker/nginx
|
||||
mkdir /opt/redash-docker/postgres-data
|
||||
mkdir /opt/redash-docker/supervisord
|
||||
|
||||
# Get docker-compose file
|
||||
wget $FILES_BASE_URL"docker-compose.yml" -O /opt/redash-docker/docker-compose.yml
|
||||
wget $FILES_BASE_URL"nginx_redash_site" -O /opt/redash-docker/nginx/nginx.conf
|
||||
|
||||
# Add to .profile docker compose file location
|
||||
# Setup upstart (?) for docker-compose
|
||||
wget $FILES_BASE_URL"upstart.conf" -O /etc/init/redash-docker.conf
|
||||
# Start everything
|
||||
initctl reload-configuration
|
||||
service redash-docker start
|
||||
|
||||
# TODO:
|
||||
# 1. Create database / tables
|
||||
# 2. Add the user to the docker group (sudo usermod -aG docker your-user).
|
||||
28
setup/ubuntu_docker/files/docker-compose.yml
Normal file
28
setup/ubuntu_docker/files/docker-compose.yml
Normal file
@@ -0,0 +1,28 @@
|
||||
redash:
|
||||
image: redash/redash
|
||||
ports:
|
||||
- "5000:5000"
|
||||
links:
|
||||
- redis
|
||||
- postgres
|
||||
environment:
|
||||
REDASH_STATIC_ASSETS_PATH: ../rd_ui/dist/
|
||||
REDASH_LOG_LEVEL: INFO
|
||||
REDASH_REDIS_URL: redis://redis:6379/0
|
||||
REDASH_DATABASE_URL: postgresql://postgres@postgres/postgres
|
||||
REDASH_COOKIE_SECRET: veryverysecret
|
||||
REDASH_GOOGLE_APPS_DOMAIN:
|
||||
redis:
|
||||
image: redis:2.8
|
||||
postgres:
|
||||
image: postgres:9.3
|
||||
volumes:
|
||||
- /opt/redash-docker/postgres-data:/var/lib/postgresql/data
|
||||
nginx:
|
||||
image: nginx
|
||||
ports:
|
||||
- "80:80"
|
||||
volumes:
|
||||
- "/opt/redash-docker/nginx/nginx.conf:/etc/nginx/conf.d/default.conf"
|
||||
links:
|
||||
- redash
|
||||
19
setup/ubuntu_docker/files/nginx_redash_site
Normal file
19
setup/ubuntu_docker/files/nginx_redash_site
Normal file
@@ -0,0 +1,19 @@
|
||||
upstream redash {
|
||||
server redash:5000;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 80 default;
|
||||
|
||||
gzip on;
|
||||
gzip_types *;
|
||||
gzip_proxied any;
|
||||
|
||||
location / {
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_pass http://redash;
|
||||
}
|
||||
}
|
||||
7
setup/ubuntu_docker/files/upstart.conf
Normal file
7
setup/ubuntu_docker/files/upstart.conf
Normal file
@@ -0,0 +1,7 @@
|
||||
description "Start re:dash Docker containers"
|
||||
start on filesystem and started docker
|
||||
stop on runlevel [!2345]
|
||||
respawn
|
||||
script
|
||||
docker-compose -f /opt/redash-docker/docker-compose.yml up
|
||||
end script
|
||||
@@ -52,7 +52,7 @@ class TestParseQueryJson(TestCase):
|
||||
self.assertEqual(query_data['test_dict']['b']['date'], datetime.datetime(2014, 10, 4, 0, 0))
|
||||
|
||||
def test_handles_nested_fields(self):
|
||||
# https://github.com/EverythingMe/redash/issues/597
|
||||
# https://github.com/getredash/redash/issues/597
|
||||
query = {
|
||||
"collection": "bus",
|
||||
"aggregate": [
|
||||
|
||||
14
tests/query_runner/test_mql.py
Normal file
14
tests/query_runner/test_mql.py
Normal file
@@ -0,0 +1,14 @@
|
||||
from datetime import datetime
|
||||
from unittest import TestCase
|
||||
from redash.query_runner import TYPE_DATETIME, TYPE_INTEGER, TYPE_STRING
|
||||
from redash.query_runner.mql import deduce_columns
|
||||
|
||||
|
||||
class TestMQL(TestCase):
|
||||
def test_deduce_columns(self):
|
||||
self.assertEquals(deduce_columns([{'a': 1}]),
|
||||
[{'name': 'a', 'friendly_name': 'a', 'type': TYPE_INTEGER}])
|
||||
self.assertEquals(deduce_columns([{'a': 'foo'}]),
|
||||
[{'name': 'a', 'friendly_name': 'a', 'type': TYPE_STRING}])
|
||||
self.assertEquals(deduce_columns([{'a': datetime.now()}]),
|
||||
[{'name': 'a', 'friendly_name': 'a', 'type': TYPE_DATETIME}])
|
||||
Reference in New Issue
Block a user