mirror of
https://github.com/getredash/redash.git
synced 2025-12-26 21:01:31 -05:00
Compare commits
89 Commits
v0.4.0+b54
...
v0.4.0+b63
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9a3b25eb50 | ||
|
|
6da890dfb8 | ||
|
|
0d35ec7139 | ||
|
|
dc0f9a63cb | ||
|
|
21c042996e | ||
|
|
5f22adadf2 | ||
|
|
4e8888ce2f | ||
|
|
0a69609d38 | ||
|
|
2dbcd88313 | ||
|
|
6b0775f7c7 | ||
|
|
e85d3c3c9f | ||
|
|
e20f57bba8 | ||
|
|
933ace2e38 | ||
|
|
4c1e5aed6b | ||
|
|
77d982b4aa | ||
|
|
02c8163265 | ||
|
|
ef868dbb6e | ||
|
|
b2bab33baa | ||
|
|
149e0835f8 | ||
|
|
50bed1d8f2 | ||
|
|
d4b5d78743 | ||
|
|
7fc82a2562 | ||
|
|
92fb138c2c | ||
|
|
71b4b45a3c | ||
|
|
07f4a1b227 | ||
|
|
e116e88e98 | ||
|
|
2278a181ca | ||
|
|
98dc75a404 | ||
|
|
536918aab3 | ||
|
|
c75ac80c7a | ||
|
|
522d8542e9 | ||
|
|
562df44c22 | ||
|
|
86e6798c96 | ||
|
|
db7a287e82 | ||
|
|
518206f208 | ||
|
|
bcee1e12b4 | ||
|
|
410f4f35e2 | ||
|
|
84ea9fec43 | ||
|
|
cda82b7adc | ||
|
|
f2d8c2020b | ||
|
|
1b82ecbc46 | ||
|
|
e381331c36 | ||
|
|
ff58247987 | ||
|
|
dcf0d2cbe3 | ||
|
|
eb99fa5671 | ||
|
|
ce3e19f212 | ||
|
|
44dca6da01 | ||
|
|
34c9fee540 | ||
|
|
e0b13b2ffa | ||
|
|
df362c12b6 | ||
|
|
0d1f8c948a | ||
|
|
f523378326 | ||
|
|
b0f9e49709 | ||
|
|
b6dbb4e3f8 | ||
|
|
3f6a0e8ffa | ||
|
|
a7bcc6d31e | ||
|
|
8aa2d8e70a | ||
|
|
4720e12be7 | ||
|
|
5463591f0d | ||
|
|
2a0198fba8 | ||
|
|
652f214b25 | ||
|
|
aa49780134 | ||
|
|
f483b61cfb | ||
|
|
38a189b671 | ||
|
|
c2331988db | ||
|
|
eff5bdb454 | ||
|
|
bd1babec3a | ||
|
|
d43c2bbf62 | ||
|
|
87db8099d6 | ||
|
|
ebea118c7d | ||
|
|
297ac5c9bd | ||
|
|
9b23fb4235 | ||
|
|
0a71f5e22d | ||
|
|
0a8aaceb85 | ||
|
|
00979f3ad7 | ||
|
|
c7b48837f2 | ||
|
|
418c5322c1 | ||
|
|
dc5b4c26a3 | ||
|
|
9ed0a5ba85 | ||
|
|
db0770fc17 | ||
|
|
9bb58e71d2 | ||
|
|
560598eaad | ||
|
|
f9144fc927 | ||
|
|
883bf173c0 | ||
|
|
3f2bb65b32 | ||
|
|
3917af019a | ||
|
|
e88837e835 | ||
|
|
7abdc2543e | ||
|
|
91ab90a6fe |
2
.landscape.yaml
Normal file
2
.landscape.yaml
Normal file
@@ -0,0 +1,2 @@
|
||||
ignore-paths:
|
||||
- migrations
|
||||
@@ -1,2 +1,2 @@
|
||||
web: ./manage.py runserver -p $PORT
|
||||
web: ./manage.py runserver -p $PORT --host 0.0.0.0
|
||||
worker: ./bin/run celery worker --app=redash.worker --beat -Qqueries,celery,scheduled_queries
|
||||
|
||||
@@ -24,13 +24,11 @@ Today **_re:dash_** has support for querying multiple databases, including: Reds
|
||||
|
||||

|
||||
|
||||
You can try out the demo instance: http://rd-demo.herokuapp.com/ (login with any Google account).
|
||||
|
||||
Due to Heroku dev plan limits, it has a small database of flights (see schema [here](http://rd-demo.herokuapp.com/dashboard/schema)). Also due to another Heroku limitation, it is running with the regular user, hence you can DELETE or INSERT data/tables. Please be nice and don't do this.
|
||||
You can try out the demo instance: http://demo.redash.io/ (login with any Google account).
|
||||
|
||||
## Getting Started
|
||||
|
||||
* [Setting re:dash on your own server (Ubuntu)](https://github.com/EverythingMe/redash/wiki/Setting-re:dash-on-your-own-server-(for-version-0.4))
|
||||
* [Setting up re:dash instance](https://github.com/EverythingMe/redash/wiki/Setting-up-re:dash-instance) (includes links to ready made AWS/GCE images).
|
||||
* Additional documentation in the [Wiki](https://github.com/everythingme/redash/wiki).
|
||||
|
||||
|
||||
|
||||
11
Vagrantfile
vendored
Normal file
11
Vagrantfile
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
|
||||
VAGRANTFILE_API_VERSION = "2"
|
||||
|
||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||
config.vm.box = "redash/dev"
|
||||
config.vm.synced_folder "./", "/opt/redash/current"
|
||||
config.vm.network "forwarded_port", guest: 5000, host: 9001
|
||||
end
|
||||
140
manage.py
140
manage.py
@@ -2,17 +2,19 @@
|
||||
"""
|
||||
CLI to manage redash.
|
||||
"""
|
||||
import datetime
|
||||
from flask.ext.script import Manager, prompt_pass
|
||||
from flask.ext.script import Manager
|
||||
|
||||
from redash import settings, models, __version__
|
||||
from redash.wsgi import app
|
||||
from redash.import_export import import_manager
|
||||
from redash.cli import users, database, data_sources
|
||||
|
||||
manager = Manager(app)
|
||||
database_manager = Manager(help="Manages the database (create/drop tables).")
|
||||
users_manager = Manager(help="Users management commands.")
|
||||
data_sources_manager = Manager(help="Data sources management commands.")
|
||||
manager.add_command("database", database.manager)
|
||||
manager.add_command("users", users.manager)
|
||||
manager.add_command("import", import_manager)
|
||||
manager.add_command("ds", data_sources.manager)
|
||||
|
||||
|
||||
@manager.command
|
||||
def version():
|
||||
@@ -22,7 +24,7 @@ def version():
|
||||
|
||||
@manager.command
|
||||
def runworkers():
|
||||
"""Prints deprecation warning."""
|
||||
"""Start workers (deprecated)."""
|
||||
print "** This command is deprecated. Please use Celery's CLI to control the workers. **"
|
||||
|
||||
|
||||
@@ -31,8 +33,10 @@ def make_shell_context():
|
||||
from redash.models import db
|
||||
return dict(app=app, db=db, models=models)
|
||||
|
||||
|
||||
@manager.command
|
||||
def check_settings():
|
||||
"""Show the settings as re:dash sees them (useful for debugging)."""
|
||||
from types import ModuleType
|
||||
|
||||
for name in dir(settings):
|
||||
@@ -40,130 +44,6 @@ def check_settings():
|
||||
if not callable(item) and not name.startswith("__") and not isinstance(item, ModuleType):
|
||||
print "{} = {}".format(name, item)
|
||||
|
||||
@manager.command
|
||||
def import_events(events_file):
|
||||
import json
|
||||
from collections import Counter
|
||||
|
||||
count = Counter()
|
||||
|
||||
with open(events_file) as f:
|
||||
for line in f:
|
||||
try:
|
||||
event = json.loads(line)
|
||||
|
||||
user = event.pop('user_id')
|
||||
action = event.pop('action')
|
||||
object_type = event.pop('object_type')
|
||||
object_id = event.pop('object_id', None)
|
||||
|
||||
if object_id == 'dashboard' and object_type == 'dashboard':
|
||||
count['bad dashboard id'] += 1
|
||||
continue
|
||||
|
||||
created_at = datetime.datetime.utcfromtimestamp(event.pop('timestamp'))
|
||||
additional_properties = json.dumps(event)
|
||||
|
||||
models.Event.create(user=user, action=action, object_type=object_type, object_id=object_id,
|
||||
additional_properties=additional_properties, created_at=created_at)
|
||||
|
||||
count['imported'] += 1
|
||||
|
||||
except Exception as ex:
|
||||
print "Failed importing line:"
|
||||
print line
|
||||
print ex.message
|
||||
count[ex.message] += 1
|
||||
count['failed'] += 1
|
||||
|
||||
models.db.close_db(None)
|
||||
|
||||
for k, v in count.iteritems():
|
||||
print k
|
||||
print v
|
||||
|
||||
|
||||
@database_manager.command
|
||||
def create_tables():
|
||||
"""Creates the database tables."""
|
||||
from redash.models import create_db, init_db
|
||||
|
||||
create_db(True, False)
|
||||
init_db()
|
||||
|
||||
@database_manager.command
|
||||
def drop_tables():
|
||||
"""Drop the database tables."""
|
||||
from redash.models import create_db
|
||||
|
||||
create_db(False, True)
|
||||
|
||||
|
||||
@users_manager.option('email', help="User's email")
|
||||
@users_manager.option('name', help="User's full name")
|
||||
@users_manager.option('--admin', dest='is_admin', action="store_true", default=False, help="set user as admin")
|
||||
@users_manager.option('--google', dest='google_auth', action="store_true", default=False, help="user uses Google Auth to login")
|
||||
@users_manager.option('--password', dest='password', default=None, help="Password for users who don't use Google Auth (leave blank for prompt).")
|
||||
@users_manager.option('--groups', dest='groups', default=models.User.DEFAULT_GROUPS, help="Comma seperated list of groups (leave blank for default).")
|
||||
def create(email, name, groups, is_admin=False, google_auth=False, password=None):
|
||||
print "Creating user (%s, %s)..." % (email, name)
|
||||
print "Admin: %r" % is_admin
|
||||
print "Login with Google Auth: %r\n" % google_auth
|
||||
if isinstance(groups, basestring):
|
||||
groups= groups.split(',')
|
||||
groups.remove('') # in case it was empty string
|
||||
|
||||
if is_admin:
|
||||
groups += ['admin']
|
||||
|
||||
user = models.User(email=email, name=name, groups=groups)
|
||||
if not google_auth:
|
||||
password = password or prompt_pass("Password")
|
||||
user.hash_password(password)
|
||||
|
||||
try:
|
||||
user.save()
|
||||
except Exception, e:
|
||||
print "Failed creating user: %s" % e.message
|
||||
|
||||
|
||||
@users_manager.option('email', help="email address of user to delete")
|
||||
def delete(email):
|
||||
deleted_count = models.User.delete().where(models.User.email == email).execute()
|
||||
print "Deleted %d users." % deleted_count
|
||||
|
||||
@data_sources_manager.command
|
||||
def import_from_settings(name=None):
|
||||
"""Import data source from settings (env variables)."""
|
||||
name = name or "Default"
|
||||
data_source = models.DataSource.create(name=name,
|
||||
type=settings.CONNECTION_ADAPTER,
|
||||
options=settings.CONNECTION_STRING)
|
||||
|
||||
print "Imported data source from settings (id={}).".format(data_source.id)
|
||||
|
||||
|
||||
@data_sources_manager.command
|
||||
def list():
|
||||
"""List currently configured data sources"""
|
||||
for ds in models.DataSource.select():
|
||||
print "Name: {}\nType: {}\nOptions: {}".format(ds.name, ds.type, ds.options)
|
||||
|
||||
@data_sources_manager.command
|
||||
def new(name, type, options):
|
||||
"""Create new data source"""
|
||||
# TODO: validate it's a valid type and in the future, validate the options.
|
||||
print "Creating {} data source ({}) with options:\n{}".format(type, name, options)
|
||||
data_source = models.DataSource.create(name=name,
|
||||
type=type,
|
||||
options=options)
|
||||
print "Id: {}".format(data_source.id)
|
||||
|
||||
|
||||
manager.add_command("database", database_manager)
|
||||
manager.add_command("users", users_manager)
|
||||
manager.add_command("import", import_manager)
|
||||
manager.add_command("ds", data_sources_manager)
|
||||
|
||||
if __name__ == '__main__':
|
||||
manager.run()
|
||||
@@ -163,7 +163,6 @@ module.exports = function (grunt) {
|
||||
// Automatically inject Bower components into the app
|
||||
wiredep: {
|
||||
options: {
|
||||
cwd: '<%= yeoman.app %>'
|
||||
},
|
||||
app: {
|
||||
src: ['<%= yeoman.app %>/index.html'],
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
<link rel="stylesheet" href="/bower_components/pivottable/dist/pivot.css">
|
||||
<link rel="stylesheet" href="/bower_components/cornelius/src/cornelius.css">
|
||||
<link rel="stylesheet" href="/bower_components/select2/select2.css">
|
||||
<link rel="stylesheet" href="/bower_components/angular-ui-select/dist/select.css">
|
||||
<link rel="stylesheet" href="/bower_components/pace/themes/pace-theme-minimal.css">
|
||||
<link rel="stylesheet" href="/styles/redash.css">
|
||||
<!-- endbuild -->
|
||||
@@ -65,6 +66,12 @@
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
<form class="navbar-form navbar-left" role="search" ng-submit="searchQueries()">
|
||||
<div class="form-group">
|
||||
<input type="text" ng-model="term" class="form-control" placeholder="Search queries...">
|
||||
</div>
|
||||
<button type="submit" class="btn btn-default"><span class="glyphicon glyphicon-search"></span></button>
|
||||
</form>
|
||||
<ul class="nav navbar-nav navbar-right">
|
||||
<p class="navbar-text avatar" ng-show="currentUser.id" ng-cloak>
|
||||
<img ng-src="{{currentUser.gravatar_url}}" class="img-circle" alt="{{currentUser.name}}"/>
|
||||
@@ -110,6 +117,7 @@
|
||||
<script src="/bower_components/mousetrap/plugins/global-bind/mousetrap-global-bind.js"></script>
|
||||
<script src="/bower_components/select2/select2.js"></script>
|
||||
<script src="/bower_components/angular-ui-select2/src/select2.js"></script>
|
||||
<script src="/bower_components/angular-ui-select/dist/select.js"></script>
|
||||
<script src="/bower_components/underscore.string/lib/underscore.string.js"></script>
|
||||
<script src="/bower_components/marked/lib/marked.js"></script>
|
||||
<script src="/scripts/ng_highchart.js"></script>
|
||||
@@ -133,6 +141,7 @@
|
||||
<script src="/scripts/visualizations/base.js"></script>
|
||||
<script src="/scripts/visualizations/chart.js"></script>
|
||||
<script src="/scripts/visualizations/cohort.js"></script>
|
||||
<script src="/scripts/visualizations/counter.js"></script>
|
||||
<script src="/scripts/visualizations/table.js"></script>
|
||||
<script src="/scripts/visualizations/pivot.js"></script>
|
||||
<script src="/scripts/directives/directives.js"></script>
|
||||
|
||||
@@ -14,7 +14,8 @@ angular.module('redash', [
|
||||
'ui.bootstrap',
|
||||
'smartTable.table',
|
||||
'ngResource',
|
||||
'ngRoute'
|
||||
'ngRoute',
|
||||
'ui.select'
|
||||
]).config(['$routeProvider', '$locationProvider', '$compileProvider', 'growlProvider',
|
||||
function ($routeProvider, $locationProvider, $compileProvider, growlProvider) {
|
||||
if (featureFlags.clientSideMetrics) {
|
||||
@@ -55,6 +56,11 @@ angular.module('redash', [
|
||||
}]
|
||||
}
|
||||
});
|
||||
$routeProvider.when('/queries/search', {
|
||||
templateUrl: '/views/queries_search_results.html',
|
||||
controller: 'QuerySearchCtrl',
|
||||
reloadOnSearch: true,
|
||||
});
|
||||
$routeProvider.when('/queries/:queryId', {
|
||||
templateUrl: '/views/query.html',
|
||||
controller: 'QueryViewCtrl',
|
||||
|
||||
@@ -1,12 +1,71 @@
|
||||
(function () {
|
||||
var QuerySearchCtrl = function($scope, $location, $filter, Events, Query) {
|
||||
$scope.$parent.pageTitle = "Queries Search";
|
||||
|
||||
$scope.gridConfig = {
|
||||
isPaginationEnabled: true,
|
||||
itemsByPage: 50,
|
||||
maxSize: 8,
|
||||
};
|
||||
|
||||
var dateFormatter = function (value) {
|
||||
if (!value) return "-";
|
||||
return value.format("DD/MM/YY HH:mm");
|
||||
}
|
||||
|
||||
$scope.gridColumns = [
|
||||
{
|
||||
"label": "Name",
|
||||
"map": "name",
|
||||
"cellTemplateUrl": "/views/queries_query_name_cell.html"
|
||||
},
|
||||
{
|
||||
'label': 'Created By',
|
||||
'map': 'user.name'
|
||||
},
|
||||
{
|
||||
'label': 'Created At',
|
||||
'map': 'created_at',
|
||||
'formatFunction': dateFormatter
|
||||
},
|
||||
{
|
||||
'label': 'Update Schedule',
|
||||
'map': 'ttl',
|
||||
'formatFunction': function (value) {
|
||||
return $filter('refreshRateHumanize')(value);
|
||||
}
|
||||
}
|
||||
];
|
||||
|
||||
$scope.queries = [];
|
||||
$scope.$parent.term = $location.search().q;
|
||||
|
||||
Query.search({q: $scope.term }, function(results) {
|
||||
$scope.queries = _.map(results, function(query) {
|
||||
query.created_at = moment(query.created_at);
|
||||
return query;
|
||||
});
|
||||
});
|
||||
|
||||
$scope.search = function() {
|
||||
if (!angular.isString($scope.term) || $scope.term.trim() == "") {
|
||||
$scope.queries = [];
|
||||
return;
|
||||
}
|
||||
|
||||
$location.search({q: $scope.term});
|
||||
};
|
||||
|
||||
Events.record(currentUser, "search", "query", "", {"term": $scope.term});
|
||||
};
|
||||
|
||||
var QueriesCtrl = function ($scope, $http, $location, $filter, Query) {
|
||||
$scope.$parent.pageTitle = "All Queries";
|
||||
$scope.gridConfig = {
|
||||
isPaginationEnabled: true,
|
||||
itemsByPage: 50,
|
||||
maxSize: 8,
|
||||
isGlobalSearchActivated: true
|
||||
}
|
||||
isGlobalSearchActivated: true};
|
||||
|
||||
$scope.allQueries = [];
|
||||
$scope.queries = [];
|
||||
@@ -35,7 +94,7 @@
|
||||
Query.query(function (queries) {
|
||||
$scope.allQueries = _.map(queries, function (query) {
|
||||
query.created_at = moment(query.created_at);
|
||||
query.last_retrieved_at = moment(query.last_retrieved_at);
|
||||
query.retrieved_at = moment(query.retrieved_at);
|
||||
return query;
|
||||
});
|
||||
|
||||
@@ -58,35 +117,17 @@
|
||||
'formatFunction': dateFormatter
|
||||
},
|
||||
{
|
||||
'label': 'Runtime (avg)',
|
||||
'map': 'avg_runtime',
|
||||
'formatFunction': function (value) {
|
||||
return $filter('durationHumanize')(value);
|
||||
}
|
||||
},
|
||||
{
|
||||
'label': 'Runtime (min)',
|
||||
'map': 'min_runtime',
|
||||
'formatFunction': function (value) {
|
||||
return $filter('durationHumanize')(value);
|
||||
}
|
||||
},
|
||||
{
|
||||
'label': 'Runtime (max)',
|
||||
'map': 'max_runtime',
|
||||
'label': 'Runtime',
|
||||
'map': 'runtime',
|
||||
'formatFunction': function (value) {
|
||||
return $filter('durationHumanize')(value);
|
||||
}
|
||||
},
|
||||
{
|
||||
'label': 'Last Executed At',
|
||||
'map': 'last_retrieved_at',
|
||||
'map': 'retrieved_at',
|
||||
'formatFunction': dateFormatter
|
||||
},
|
||||
{
|
||||
'label': 'Times Executed',
|
||||
'map': 'times_retrieved'
|
||||
},
|
||||
{
|
||||
'label': 'Update Schedule',
|
||||
'map': 'ttl',
|
||||
@@ -95,6 +136,7 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
$scope.tabs = [
|
||||
{"name": "My Queries", "key": "my"},
|
||||
{"key": "all", "name": "All Queries"},
|
||||
@@ -110,7 +152,7 @@
|
||||
});
|
||||
}
|
||||
|
||||
var MainCtrl = function ($scope, Dashboard, notifications) {
|
||||
var MainCtrl = function ($scope, $location, Dashboard, notifications) {
|
||||
if (featureFlags.clientSideMetrics) {
|
||||
$scope.$on('$locationChangeSuccess', function(event, newLocation, oldLocation) {
|
||||
// This will be called once per actual page load.
|
||||
@@ -133,7 +175,11 @@
|
||||
$scope.otherDashboards = $scope.allDashboards['Other'] || [];
|
||||
$scope.groupedDashboards = _.omit($scope.allDashboards, 'Other');
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
$scope.searchQueries = function() {
|
||||
$location.path('/queries/search').search({q: $scope.term});
|
||||
};
|
||||
|
||||
$scope.reloadDashboards();
|
||||
|
||||
@@ -165,5 +211,6 @@
|
||||
angular.module('redash.controllers', [])
|
||||
.controller('QueriesCtrl', ['$scope', '$http', '$location', '$filter', 'Query', QueriesCtrl])
|
||||
.controller('IndexCtrl', ['$scope', 'Events', 'Dashboard', IndexCtrl])
|
||||
.controller('MainCtrl', ['$scope', 'Dashboard', 'notifications', MainCtrl]);
|
||||
.controller('MainCtrl', ['$scope', '$location', 'Dashboard', 'notifications', MainCtrl])
|
||||
.controller('QuerySearchCtrl', ['$scope', '$location', '$filter', 'Events', 'Query', QuerySearchCtrl]);
|
||||
})();
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
(function() {
|
||||
var DashboardCtrl = function($scope, Events, Widget, $routeParams, $http, $timeout, $q, Dashboard) {
|
||||
var DashboardCtrl = function($scope, Events, Widget, $routeParams, $location, $http, $timeout, $q, Dashboard) {
|
||||
$scope.refreshEnabled = false;
|
||||
$scope.refreshRate = 60;
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
return _.map(row, function (widget) {
|
||||
var w = new Widget(widget);
|
||||
|
||||
if (w.visualization && dashboard.dashboard_filters_enabled) {
|
||||
if (w.visualization) {
|
||||
promises.push(w.getQuery().getQueryResultPromise());
|
||||
}
|
||||
|
||||
@@ -32,22 +32,23 @@
|
||||
// TODO: first object should be a copy, otherwise one of the chart filters behaves different than the others.
|
||||
filters[filter.name] = filter;
|
||||
filters[filter.name].originFilters = [];
|
||||
if (_.has($location.search(), filter.name)) {
|
||||
filter.current = $location.search()[filter.name];
|
||||
}
|
||||
|
||||
$scope.$watch(function () { return filter.current }, function (value) {
|
||||
_.each(filter.originFilters, function (originFilter) {
|
||||
originFilter.current = value;
|
||||
});
|
||||
});
|
||||
};
|
||||
}
|
||||
|
||||
// TODO: merge values.
|
||||
filters[filter.name].originFilters.push(filter);
|
||||
});
|
||||
});
|
||||
|
||||
if (dashboard.dashboard_filters_enabled) {
|
||||
$scope.filters = _.values(filters);
|
||||
}
|
||||
$scope.filters = _.values(filters);
|
||||
});
|
||||
|
||||
|
||||
@@ -74,7 +75,7 @@
|
||||
_.each(row, function(widget, i) {
|
||||
var newWidget = newWidgets[widget.id];
|
||||
if (newWidget && newWidget[0].visualization.query.latest_query_data_id != widget.visualization.query.latest_query_data_id) {
|
||||
row[i] = newWidget[0];
|
||||
row[i] = new Widget(newWidget[0]);
|
||||
}
|
||||
});
|
||||
});
|
||||
@@ -83,8 +84,8 @@
|
||||
});
|
||||
|
||||
}, $scope.refreshRate);
|
||||
};
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
$scope.triggerRefresh = function() {
|
||||
$scope.refreshEnabled = !$scope.refreshEnabled;
|
||||
@@ -137,7 +138,7 @@
|
||||
};
|
||||
|
||||
angular.module('redash.controllers')
|
||||
.controller('DashboardCtrl', ['$scope', 'Events', 'Widget', '$routeParams', '$http', '$timeout', '$q', 'Dashboard', DashboardCtrl])
|
||||
.controller('DashboardCtrl', ['$scope', 'Events', 'Widget', '$routeParams', '$location', '$http', '$timeout', '$q', 'Dashboard', DashboardCtrl])
|
||||
.controller('WidgetCtrl', ['$scope', 'Events', 'Query', WidgetCtrl])
|
||||
|
||||
})();
|
||||
})();
|
||||
|
||||
@@ -21,8 +21,13 @@
|
||||
$scope.saveQuery();
|
||||
}
|
||||
},
|
||||
// Cmd+Enter for Mac
|
||||
'meta+enter': function () {
|
||||
$scope.executeQuery();
|
||||
},
|
||||
// Ctrl+Enter for PC
|
||||
'ctrl+enter': function () {
|
||||
$scope.executeQuery();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -147,22 +147,22 @@
|
||||
var reset = function() {
|
||||
$scope.saveInProgress = false;
|
||||
$scope.widgetSize = 1;
|
||||
$scope.queryId = null;
|
||||
$scope.selectedVis = null;
|
||||
$scope.query = null;
|
||||
$scope.query = {};
|
||||
$scope.selected_query = undefined;
|
||||
$scope.text = "";
|
||||
};
|
||||
|
||||
reset();
|
||||
|
||||
$scope.loadVisualizations = function () {
|
||||
if (!$scope.queryId) {
|
||||
if (!$scope.query.selected) {
|
||||
return;
|
||||
}
|
||||
|
||||
Query.get({ id: $scope.queryId }, function(query) {
|
||||
Query.get({ id: $scope.query.selected.id }, function(query) {
|
||||
if (query) {
|
||||
$scope.query = query;
|
||||
$scope.selected_query = query;
|
||||
if (query.visualizations.length) {
|
||||
$scope.selectedVis = query.visualizations[0];
|
||||
}
|
||||
@@ -170,6 +170,20 @@
|
||||
});
|
||||
};
|
||||
|
||||
$scope.searchQueries = function (term) {
|
||||
if (!term || term.length < 3) {
|
||||
return;
|
||||
}
|
||||
|
||||
Query.search({q: term}, function(results) {
|
||||
$scope.queries = results;
|
||||
});
|
||||
};
|
||||
|
||||
$scope.$watch('query', function () {
|
||||
$scope.loadVisualizations();
|
||||
}, true);
|
||||
|
||||
$scope.saveWidget = function() {
|
||||
$scope.saveInProgress = true;
|
||||
|
||||
|
||||
@@ -97,14 +97,24 @@
|
||||
value: '=',
|
||||
ignoreBlanks: '=',
|
||||
editable: '=',
|
||||
done: '='
|
||||
done: '=',
|
||||
},
|
||||
template: function (tElement, tAttrs) {
|
||||
var elType = tAttrs.editor || 'input';
|
||||
var placeholder = tAttrs.placeholder || 'Click to edit';
|
||||
return '<span ng-click="editable && edit()" ng-bind="value" ng-class="{editable: editable}"></span>' +
|
||||
'<span ng-click="editable && edit()" ng-show="editable && !value" ng-class="{editable: editable}">' + placeholder + '</span>' +
|
||||
'<{elType} ng-model="value" class="rd-form-control"></{elType}>'.replace('{elType}', elType);
|
||||
|
||||
var viewMode = '';
|
||||
|
||||
if (tAttrs.markdown == "true") {
|
||||
viewMode = '<span ng-click="editable && edit()" ng-bind-html="value|markdown" ng-class="{editable: editable}"></span>';
|
||||
} else {
|
||||
viewMode = '<span ng-click="editable && edit()" ng-bind="value" ng-class="{editable: editable}"></span>';
|
||||
}
|
||||
|
||||
var placeholderSpan = '<span ng-click="editable && edit()" ng-show="editable && !value" ng-class="{editable: editable}">' + placeholder + '</span>';
|
||||
var editor = '<{elType} ng-model="value" class="rd-form-control"></{elType}>'.replace('{elType}', elType);
|
||||
|
||||
return viewMode + placeholderSpan + editor;
|
||||
},
|
||||
link: function ($scope, element, attrs) {
|
||||
// Let's get a reference to the input element, as we'll want to reference it.
|
||||
@@ -224,4 +234,17 @@
|
||||
'</span>'
|
||||
}
|
||||
});
|
||||
|
||||
// Used instead of autofocus attribute, which doesn't work in Angular as there is no real page load.
|
||||
directives.directive('autofocus',
|
||||
['$timeout', function ($timeout) {
|
||||
return {
|
||||
link: function (scope, element) {
|
||||
$timeout(function () {
|
||||
element[0].focus();
|
||||
});
|
||||
}
|
||||
};
|
||||
}]
|
||||
);
|
||||
})();
|
||||
|
||||
@@ -70,6 +70,18 @@ angular.module('redash.filters', []).
|
||||
|
||||
.filter('markdown', ['$sce', function ($sce) {
|
||||
return function (text) {
|
||||
if (!text) {
|
||||
return "";
|
||||
}
|
||||
return $sce.trustAsHtml(marked(text));
|
||||
}
|
||||
}]);
|
||||
}])
|
||||
|
||||
.filter('trustAsHtml', ['$sce', function ($sce) {
|
||||
return function (text) {
|
||||
if (!text) {
|
||||
return "";
|
||||
}
|
||||
return $sce.trustAsHtml(text);
|
||||
}
|
||||
}]);
|
||||
|
||||
@@ -1,9 +1,20 @@
|
||||
(function () {
|
||||
'use strict';
|
||||
|
||||
var ColorPalette = {
|
||||
'Blue':'#4572A7',
|
||||
'Red':'#AA4643',
|
||||
'Green': '#89A54E',
|
||||
'Purple': '#80699B',
|
||||
'Cyan': '#3D96AE',
|
||||
'Orange': '#DB843D',
|
||||
'Light Blue': '#92A8CD',
|
||||
'Lilac': '#A47D7C',
|
||||
'Light Green': '#B5CA92',
|
||||
};
|
||||
|
||||
Highcharts.setOptions({
|
||||
colors: ["#4572A7", "#AA4643", "#89A54E", "#80699B", "#3D96AE",
|
||||
"#DB843D", "#92A8CD", "#A47D7C", "#B5CA92"]
|
||||
colors: _.values(ColorPalette)
|
||||
});
|
||||
|
||||
var defaultOptions = {
|
||||
@@ -204,6 +215,7 @@
|
||||
};
|
||||
|
||||
angular.module('highchart', [])
|
||||
.constant('ColorPalette', ColorPalette)
|
||||
.directive('chart', ['$timeout', function ($timeout) {
|
||||
return {
|
||||
restrict: 'E',
|
||||
@@ -338,4 +350,4 @@
|
||||
};
|
||||
|
||||
}]);
|
||||
})();
|
||||
})();
|
||||
|
||||
@@ -22,6 +22,8 @@
|
||||
} else if (_.isString(v) && v.match(/^\d{4}-\d{2}-\d{2}/)) {
|
||||
row[k] = moment(v);
|
||||
columnTypes[k] = 'date';
|
||||
} else if (typeof(v) == 'object') {
|
||||
row[k] = JSON.stringify(v);
|
||||
}
|
||||
}, this);
|
||||
}, this);
|
||||
@@ -375,7 +377,7 @@
|
||||
};
|
||||
|
||||
var Query = function ($resource, QueryResult, DataSource) {
|
||||
var Query = $resource('/api/queries/:id', {id: '@id'});
|
||||
var Query = $resource('/api/queries/:id', {id: '@id'}, {search: {method: 'get', isArray: true, url: "/api/queries/search"}});
|
||||
|
||||
Query.newQuery = function () {
|
||||
return new Query({
|
||||
|
||||
@@ -74,11 +74,13 @@
|
||||
};
|
||||
});
|
||||
|
||||
chartVisualization.directive('chartEditor', function () {
|
||||
chartVisualization.directive('chartEditor', function (ColorPalette) {
|
||||
return {
|
||||
restrict: 'E',
|
||||
templateUrl: '/views/visualizations/chart_editor.html',
|
||||
link: function (scope, element, attrs) {
|
||||
scope.palette = ColorPalette;
|
||||
|
||||
scope.seriesTypes = {
|
||||
'Line': 'line',
|
||||
'Column': 'column',
|
||||
|
||||
61
rd_ui/app/scripts/visualizations/counter.js
Normal file
61
rd_ui/app/scripts/visualizations/counter.js
Normal file
@@ -0,0 +1,61 @@
|
||||
'use strict';
|
||||
|
||||
(function() {
|
||||
var module = angular.module('redash.visualization');
|
||||
|
||||
module.config(['VisualizationProvider', function(VisualizationProvider) {
|
||||
var renderTemplate =
|
||||
'<counter-renderer ' +
|
||||
'options="visualization.options" query-result="queryResult">' +
|
||||
'</counter-renderer>';
|
||||
|
||||
var editTemplate = '<counter-editor></counter-editor>';
|
||||
var defaultOptions = {};
|
||||
|
||||
VisualizationProvider.registerVisualization({
|
||||
type: 'COUNTER',
|
||||
name: 'Counter',
|
||||
renderTemplate: renderTemplate,
|
||||
editorTemplate: editTemplate,
|
||||
defaultOptions: defaultOptions
|
||||
});
|
||||
}
|
||||
]);
|
||||
|
||||
module.directive('counterRenderer', function() {
|
||||
return {
|
||||
restrict: 'E',
|
||||
templateUrl: '/views/visualizations/counter.html',
|
||||
link: function($scope, elm, attrs) {
|
||||
$scope.visualization.options.rowNumber =
|
||||
$scope.visualization.options.rowNumber || 0;
|
||||
|
||||
$scope.$watch('[queryResult && queryResult.getData(), visualization.options]',
|
||||
function() {
|
||||
var queryData = $scope.queryResult.getData();
|
||||
if (queryData) {
|
||||
var rowNumber = $scope.visualization.options.rowNumber || 0;
|
||||
var counterColName = $scope.visualization.options.counterColName || 'counter';
|
||||
var targetColName = $scope.visualization.options.targetColName || 'target';
|
||||
|
||||
$scope.counterValue = queryData[rowNumber][counterColName];
|
||||
$scope.targetValue = queryData[rowNumber][targetColName];
|
||||
|
||||
if ($scope.targetValue) {
|
||||
$scope.delta = $scope.counterValue - $scope.targetValue;
|
||||
$scope.trendPositive = $scope.delta >= 0;
|
||||
}
|
||||
}
|
||||
}, true);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
module.directive('counterEditor', function() {
|
||||
return {
|
||||
restrict: 'E',
|
||||
templateUrl: '/views/visualizations/counter_editor.html'
|
||||
}
|
||||
});
|
||||
|
||||
})();
|
||||
@@ -270,6 +270,35 @@ to add those CSS styles here. */
|
||||
pivot-table-renderer > table, grid-renderer > div, visualization-renderer > div {
|
||||
overflow: auto;
|
||||
}
|
||||
counter-renderer {
|
||||
display: block;
|
||||
text-align: center;
|
||||
}
|
||||
counter-renderer counter {
|
||||
margin: 0 auto;
|
||||
background: #f9f9f9;
|
||||
padding: 15px 50px;
|
||||
display: block;;
|
||||
}
|
||||
counter-renderer value,
|
||||
counter-renderer counter-target {
|
||||
font-size: 80px;
|
||||
display: block;
|
||||
}
|
||||
counter-renderer counter-target {
|
||||
color: #ccc;
|
||||
}
|
||||
counter-renderer counter.positive value {
|
||||
color: #5cb85c;
|
||||
}
|
||||
counter-renderer counter.negative value {
|
||||
color: #d9534f;
|
||||
margin-right: 15px;
|
||||
}
|
||||
counter-renderer counter-name {
|
||||
font-size: 40px;
|
||||
display: block;
|
||||
}
|
||||
|
||||
.rd-widget-textbox p {
|
||||
margin-bottom: 0;
|
||||
|
||||
@@ -29,7 +29,7 @@
|
||||
<span ng-hide="currentUser.hasPermission('view_query')">{{query.name}}</span>
|
||||
<query-link query="query" visualization="widget.visualization" ng-show="currentUser.hasPermission('view_query')"></query-link>
|
||||
</p>
|
||||
<div class="text-muted" ng-bind="query.description"></div>
|
||||
<div class="text-muted" ng-bind-html="query.description | markdown"></div>
|
||||
</h3>
|
||||
</div>
|
||||
|
||||
|
||||
@@ -22,22 +22,22 @@
|
||||
</div>
|
||||
|
||||
<div ng-show="isVisualization()">
|
||||
<p>
|
||||
<form class="form-inline" role="form" ng-submit="loadVisualizations()">
|
||||
<div class="form-group">
|
||||
<input class="form-control" placeholder="Query Id" ng-model="queryId">
|
||||
</div>
|
||||
<button type="submit" class="btn btn-primary" ng-disabled="!queryId">
|
||||
Load visualizations
|
||||
</button>
|
||||
</form>
|
||||
</p>
|
||||
<div class="form-group">
|
||||
<ui-select ng-model="query.selected" theme="bootstrap" reset-search-input="false">
|
||||
<ui-select-match placeholder="Search a query by name">{{$select.selected.name}}</ui-select-match>
|
||||
<ui-select-choices repeat="q in queries"
|
||||
refresh="searchQueries($select.search)"
|
||||
refresh-delay="0">
|
||||
<div ng-bind-html="q.name | highlight: $select.search | trustAsHtml"></div>
|
||||
</ui-select-choices>
|
||||
</ui-select>
|
||||
</div>
|
||||
|
||||
<div ng-show="query">
|
||||
<div class="form-group">
|
||||
<label for="">Choose Visualization</label>
|
||||
<select ng-model="selectedVis" ng-options="vis as vis.name group by vis.type for vis in query.visualizations" class="form-control"></select>
|
||||
</div>
|
||||
<div ng-show="selected_query">
|
||||
<div class="form-group">
|
||||
<label for="">Choose Visualization</label>
|
||||
<select ng-model="selectedVis" ng-options="vis as vis.name group by vis.type for vis in selected_query.visualizations" class="form-control"></select>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
19
rd_ui/app/views/queries_search_results.html
Normal file
19
rd_ui/app/views/queries_search_results.html
Normal file
@@ -0,0 +1,19 @@
|
||||
<div class="container">
|
||||
<div class="row">
|
||||
<p>
|
||||
<form class="form-inline" role="form" ng-submit="search()">
|
||||
<div class="form-group">
|
||||
<input class="form-control" placeholder="Search..." ng-model="term" autofocus>
|
||||
</div>
|
||||
<button type="submit" class="btn btn-primary">
|
||||
<span class="glyphicon glyphicon-search"></span>
|
||||
</button>
|
||||
</form>
|
||||
</p>
|
||||
|
||||
<smart-table rows="queries" columns="gridColumns"
|
||||
config="gridConfig"
|
||||
class="table table-condensed table-hover"></smart-table>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
@@ -12,7 +12,14 @@
|
||||
</h2>
|
||||
<p>
|
||||
<em>
|
||||
<edit-in-place editable="isQueryOwner" done="saveDescription" editor="textarea" placeholder="No description" ignore-blanks='false' value="query.description"></edit-in-place>
|
||||
<edit-in-place editable="isQueryOwner"
|
||||
done="saveDescription"
|
||||
editor="textarea"
|
||||
placeholder="No description"
|
||||
ignore-blanks='false'
|
||||
value="query.description"
|
||||
markdown="true">
|
||||
</edit-in-place>
|
||||
</em>
|
||||
</p>
|
||||
</div>
|
||||
|
||||
@@ -91,6 +91,13 @@
|
||||
placeholder="{{seriesName}}">
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label class="control-label col-sm-3">Color</label>
|
||||
|
||||
<div class="col-sm-9">
|
||||
<select class="form-control" ng-model="visualization.options.seriesOptions[seriesName].color" ng-options="val as key for (key,val) in palette"></select>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
5
rd_ui/app/views/visualizations/counter.html
Normal file
5
rd_ui/app/views/visualizations/counter.html
Normal file
@@ -0,0 +1,5 @@
|
||||
<counter ng-class="{'positive': targetValue && trendPositive, 'negative': targetValue && !trendPositive}">
|
||||
<value>{{counterValue|number}}</value>
|
||||
<counter-target ng-if="targetValue">({{targetValue|number}})</counter-target>
|
||||
<counter-name>{{visualization.name}}</counter-name>
|
||||
</counter>
|
||||
20
rd_ui/app/views/visualizations/counter_editor.html
Normal file
20
rd_ui/app/views/visualizations/counter_editor.html
Normal file
@@ -0,0 +1,20 @@
|
||||
<div class="form-horizontal">
|
||||
<div class="form-group">
|
||||
<label class="col-lg-6">Row Number</label>
|
||||
<div class="col-lg-6">
|
||||
<input type="number" ng-model="visualization.options.rowNumber" class="form-control">
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label class="col-lg-6">Counter Column Name</label>
|
||||
<div class="col-lg-6">
|
||||
<select ng-options="name for name in queryResult.columnNames" ng-model="visualization.options.counterColName" class="form-control"></select>
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label class="col-lg-6">Target Column Name</label>
|
||||
<div class="col-lg-6">
|
||||
<select ng-options="name for name in queryResult.columnNames" ng-model="visualization.options.targetColName" class="form-control"></select>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -2,7 +2,10 @@
|
||||
"name": "rdUi",
|
||||
"version": "0.1.0",
|
||||
"dependencies": {
|
||||
"angular": "1.2.7",
|
||||
"angular": "1.2.18",
|
||||
"angular-resource": "1.2.18",
|
||||
"angular-route": "1.2.18",
|
||||
"angular-growl": "0.4.0",
|
||||
"json3": "3.2.4",
|
||||
"jquery": "1.9.1",
|
||||
"bootstrap": "3.0.0",
|
||||
@@ -13,9 +16,6 @@
|
||||
"angular-ui-codemirror": "0.0.5",
|
||||
"highcharts": "3.0.10",
|
||||
"underscore": "1.5.1",
|
||||
"angular-resource": "1.2.15",
|
||||
"angular-growl": "0.3.1",
|
||||
"angular-route": "1.2.7",
|
||||
"pivottable": "~1.1.1",
|
||||
"cornelius": "https://github.com/restorando/cornelius.git",
|
||||
"gridster": "0.2.0",
|
||||
@@ -25,13 +25,14 @@
|
||||
"underscore.string": "~2.3.3",
|
||||
"marked": "~0.3.2",
|
||||
"bucky": "~0.2.6",
|
||||
"pace": "~0.5.1"
|
||||
"pace": "~0.5.1",
|
||||
"angular-ui-select": "0.8.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
"angular-mocks": "~1.0.7",
|
||||
"angular-scenario": "~1.0.7"
|
||||
"angular-mocks": "1.2.18",
|
||||
"angular-scenario": "1.2.18"
|
||||
},
|
||||
"resolutions": {
|
||||
"angular": "1.2.7"
|
||||
"angular": "1.2.18"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -47,6 +47,7 @@ module.exports = function(config) {
|
||||
'app/bower_components/mousetrap/plugins/global-bind/mousetrap-global-bind.js',
|
||||
'app/bower_components/select2/select2.js',
|
||||
'app/bower_components/angular-ui-select2/src/select2.js',
|
||||
'app/bower_components/angular-ui-select/dist/select.js',
|
||||
'app/bower_components/underscore.string/lib/underscore.string.js',
|
||||
'app/bower_components/marked/lib/marked.js',
|
||||
'app/scripts/ng_highchart.js',
|
||||
|
||||
@@ -3,7 +3,7 @@ import urlparse
|
||||
import redis
|
||||
from statsd import StatsClient
|
||||
|
||||
from redash import settings, events
|
||||
from redash import settings
|
||||
|
||||
__version__ = '0.4.0'
|
||||
|
||||
@@ -14,8 +14,7 @@ def setup_logging():
|
||||
handler.setFormatter(formatter)
|
||||
logging.getLogger().addHandler(handler)
|
||||
logging.getLogger().setLevel(settings.LOG_LEVEL)
|
||||
|
||||
events.setup_logging(settings.EVENTS_LOG_PATH, settings.EVENTS_CONSOLE_OUTPUT)
|
||||
logging.getLogger("passlib").setLevel("ERROR")
|
||||
|
||||
|
||||
def create_redis_connection():
|
||||
|
||||
0
redash/cli/__init__.py
Normal file
0
redash/cli/__init__.py
Normal file
60
redash/cli/data_sources.py
Normal file
60
redash/cli/data_sources.py
Normal file
@@ -0,0 +1,60 @@
|
||||
from flask.ext.script import Manager
|
||||
from redash import models
|
||||
|
||||
manager = Manager(help="Data sources management commands.")
|
||||
|
||||
@manager.command
|
||||
def list():
|
||||
"""List currently configured data sources"""
|
||||
for i, ds in enumerate(models.DataSource.select()):
|
||||
if i > 0:
|
||||
print "-"*20
|
||||
|
||||
print "Id: {}\nName: {}\nType: {}\nOptions: {}".format(ds.id, ds.name, ds.type, ds.options)
|
||||
|
||||
|
||||
@manager.command
|
||||
def new(name, type, options):
|
||||
"""Create new data source"""
|
||||
# TODO: validate it's a valid type and in the future, validate the options.
|
||||
print "Creating {} data source ({}) with options:\n{}".format(type, name, options)
|
||||
data_source = models.DataSource.create(name=name,
|
||||
type=type,
|
||||
options=options)
|
||||
print "Id: {}".format(data_source.id)
|
||||
|
||||
|
||||
@manager.command
|
||||
def delete(name):
|
||||
"""Deletes data source by name"""
|
||||
try:
|
||||
data_source = models.DataSource.get(models.DataSource.name==name)
|
||||
print "Deleting data source: {} (id={})".format(name, data_source.id)
|
||||
data_source.delete_instance()
|
||||
except models.DataSource.DoesNotExist:
|
||||
print "Couldn't find data source named: {}".format(name)
|
||||
|
||||
|
||||
def update_attr(obj, attr, new_value):
|
||||
if new_value is not None:
|
||||
old_value = getattr(obj, attr)
|
||||
print "Updating {}: {} -> {}".format(attr, old_value, new_value)
|
||||
setattr(obj, attr, new_value)
|
||||
|
||||
|
||||
@manager.option('name', default=None, help="name of data source to edit")
|
||||
@manager.option('--name', dest='new_name', default=None, help="new name for the data source")
|
||||
@manager.option('--options', dest='options', default=None, help="updated options for the data source")
|
||||
@manager.option('--type', dest='type', default=None, help="new type for the data source")
|
||||
def edit(name, new_name=None, options=None, type=None):
|
||||
"""Edit data source settings (name, options, type)"""
|
||||
try:
|
||||
data_source = models.DataSource.get(models.DataSource.name==name)
|
||||
update_attr(data_source, "name", new_name)
|
||||
update_attr(data_source, "type", type)
|
||||
update_attr(data_source, "options", options)
|
||||
data_source.save()
|
||||
|
||||
except models.DataSource.DoesNotExist:
|
||||
print "Couldn't find data source named: {}".format(name)
|
||||
|
||||
19
redash/cli/database.py
Normal file
19
redash/cli/database.py
Normal file
@@ -0,0 +1,19 @@
|
||||
from flask.ext.script import Manager
|
||||
|
||||
manager = Manager(help="Manages the database (create/drop tables).")
|
||||
|
||||
@manager.command
|
||||
def create_tables():
|
||||
"""Creates the database tables."""
|
||||
from redash.models import create_db, init_db
|
||||
|
||||
create_db(True, False)
|
||||
init_db()
|
||||
|
||||
@manager.command
|
||||
def drop_tables():
|
||||
"""Drop the database tables."""
|
||||
from redash.models import create_db
|
||||
|
||||
create_db(False, True)
|
||||
|
||||
74
redash/cli/users.py
Normal file
74
redash/cli/users.py
Normal file
@@ -0,0 +1,74 @@
|
||||
from flask.ext.script import Manager, prompt_pass
|
||||
from redash import models
|
||||
|
||||
manager = Manager(help="Users management commands.")
|
||||
|
||||
@manager.option('email', help="email address of the user to grant admin to")
|
||||
def grant_admin(email):
|
||||
try:
|
||||
user = models.User.get_by_email(email)
|
||||
|
||||
user.groups.append('admin')
|
||||
user.save()
|
||||
|
||||
print "User updated."
|
||||
except models.User.DoesNotExist:
|
||||
print "User [%s] not found." % email
|
||||
|
||||
|
||||
@manager.option('email', help="User's email")
|
||||
@manager.option('name', help="User's full name")
|
||||
@manager.option('--admin', dest='is_admin', action="store_true", default=False, help="set user as admin")
|
||||
@manager.option('--google', dest='google_auth', action="store_true", default=False, help="user uses Google Auth to login")
|
||||
@manager.option('--password', dest='password', default=None, help="Password for users who don't use Google Auth (leave blank for prompt).")
|
||||
@manager.option('--groups', dest='groups', default=models.User.DEFAULT_GROUPS, help="Comma seperated list of groups (leave blank for default).")
|
||||
def create(email, name, groups, is_admin=False, google_auth=False, password=None):
|
||||
print "Creating user (%s, %s)..." % (email, name)
|
||||
print "Admin: %r" % is_admin
|
||||
print "Login with Google Auth: %r\n" % google_auth
|
||||
if isinstance(groups, basestring):
|
||||
groups= groups.split(',')
|
||||
groups.remove('') # in case it was empty string
|
||||
|
||||
if is_admin:
|
||||
groups += ['admin']
|
||||
|
||||
user = models.User(email=email, name=name, groups=groups)
|
||||
if not google_auth:
|
||||
password = password or prompt_pass("Password")
|
||||
user.hash_password(password)
|
||||
|
||||
try:
|
||||
user.save()
|
||||
except Exception, e:
|
||||
print "Failed creating user: %s" % e.message
|
||||
|
||||
|
||||
@manager.option('email', help="email address of user to delete")
|
||||
def delete(email):
|
||||
deleted_count = models.User.delete().where(models.User.email == email).execute()
|
||||
print "Deleted %d users." % deleted_count
|
||||
|
||||
|
||||
@manager.option('password', help="new password for the user")
|
||||
@manager.option('email', help="email address of the user to change password for")
|
||||
def password(email, password):
|
||||
try:
|
||||
user = models.User.get_by_email(email)
|
||||
|
||||
user.hash_password(password)
|
||||
user.save()
|
||||
|
||||
print "User updated."
|
||||
except models.User.DoesNotExist:
|
||||
print "User [%s] not found." % email
|
||||
|
||||
|
||||
@manager.command
|
||||
def list():
|
||||
"""List all users"""
|
||||
for i, user in enumerate(models.User.select()):
|
||||
if i > 0:
|
||||
print "-"*20
|
||||
|
||||
print "Id: {}\nName: {}\nEmail: {}".format(user.id, user.name.encode('utf-8'), user.email)
|
||||
@@ -10,23 +10,20 @@ import json
|
||||
import numbers
|
||||
import cStringIO
|
||||
import datetime
|
||||
import logging
|
||||
|
||||
from flask import render_template, send_from_directory, make_response, request, jsonify, redirect, \
|
||||
session, url_for
|
||||
from flask.ext.restful import Resource, abort
|
||||
from flask_login import current_user, login_user, logout_user
|
||||
|
||||
import sqlparse
|
||||
import events
|
||||
from permissions import require_permission
|
||||
|
||||
from redash import redis_connection, statsd_client, models, settings, utils, __version__
|
||||
from redash.wsgi import app, auth, api
|
||||
from redash.tasks import QueryTask, record_event
|
||||
from redash.cache import headers as cache_headers
|
||||
from redash.permissions import require_permission
|
||||
|
||||
import logging
|
||||
from tasks import QueryTask
|
||||
|
||||
from cache import headers as cache_headers
|
||||
|
||||
@app.route('/ping', methods=['GET'])
|
||||
def ping():
|
||||
@@ -103,6 +100,7 @@ def status_api():
|
||||
status['version'] = __version__
|
||||
status['queries_count'] = models.Query.select().count()
|
||||
status['query_results_count'] = models.QueryResult.select().count()
|
||||
status['unused_query_results_count'] = models.QueryResult.unused().count()
|
||||
status['dashboards_count'] = models.Dashboard.select().count()
|
||||
status['widgets_count'] = models.Widget.select().count()
|
||||
|
||||
@@ -158,7 +156,7 @@ class EventAPI(BaseResource):
|
||||
def post(self):
|
||||
events_list = request.get_json(force=True)
|
||||
for event in events_list:
|
||||
events.record_event(event)
|
||||
record_event.delay(event)
|
||||
|
||||
|
||||
api.add_resource(EventAPI, '/api/events', endpoint='events')
|
||||
@@ -279,6 +277,14 @@ api.add_resource(WidgetListAPI, '/api/widgets', endpoint='widgets')
|
||||
api.add_resource(WidgetAPI, '/api/widgets/<int:widget_id>', endpoint='widget')
|
||||
|
||||
|
||||
class QuerySearchAPI(BaseResource):
|
||||
@require_permission('view_query')
|
||||
def get(self):
|
||||
term = request.args.get('q', '')
|
||||
|
||||
return [q.to_dict() for q in models.Query.search(term)]
|
||||
|
||||
|
||||
class QueryListAPI(BaseResource):
|
||||
@require_permission('create_query')
|
||||
def post(self):
|
||||
@@ -327,6 +333,7 @@ class QueryAPI(BaseResource):
|
||||
else:
|
||||
abort(404, message="Query not found.")
|
||||
|
||||
api.add_resource(QuerySearchAPI, '/api/queries/search', endpoint='queries_search')
|
||||
api.add_resource(QueryListAPI, '/api/queries', endpoint='queries')
|
||||
api.add_resource(QueryAPI, '/api/queries/<query_id>', endpoint='query')
|
||||
|
||||
|
||||
@@ -23,8 +23,12 @@ def get_query_runner(connection_type, connection_string):
|
||||
elif connection_type == 'url':
|
||||
from redash.data import query_runner_url
|
||||
runner = query_runner_url.url(connection_string)
|
||||
elif connection_type == "mongo":
|
||||
from redash.data import query_runner_mongodb
|
||||
connection_params = json.loads(connection_string)
|
||||
runner = query_runner_mongodb.mongodb(connection_params)
|
||||
else:
|
||||
from redash.data import query_runner_pg
|
||||
runner = query_runner_pg.pg(connection_string)
|
||||
|
||||
return runner
|
||||
return runner
|
||||
|
||||
242
redash/data/query_runner_mongodb.py
Normal file
242
redash/data/query_runner_mongodb.py
Normal file
@@ -0,0 +1,242 @@
|
||||
import datetime
|
||||
import logging
|
||||
import json
|
||||
import sys
|
||||
import re
|
||||
import time
|
||||
from redash.utils import JSONEncoder
|
||||
|
||||
try:
|
||||
import pymongo
|
||||
from bson.objectid import ObjectId
|
||||
from bson.son import SON
|
||||
except ImportError:
|
||||
print "Missing dependencies. Please install pymongo."
|
||||
print "You can use pip: pip install pymongo"
|
||||
raise
|
||||
|
||||
TYPES_MAP = {
|
||||
ObjectId : "string",
|
||||
str : "string",
|
||||
unicode : "string",
|
||||
int : "integer",
|
||||
long : "integer",
|
||||
float : "float",
|
||||
bool : "boolean",
|
||||
datetime.datetime: "datetime",
|
||||
}
|
||||
|
||||
date_regex = re.compile("ISODate\(\"(.*)\"\)", re.IGNORECASE)
|
||||
|
||||
# Simple query example:
|
||||
#
|
||||
# {
|
||||
# "collection" : "my_collection",
|
||||
# "query" : {
|
||||
# "date" : {
|
||||
# "$gt" : "ISODate(\"2015-01-15 11:41\")",
|
||||
# },
|
||||
# "type" : 1
|
||||
# },
|
||||
# "fields" : {
|
||||
# "_id" : 1,
|
||||
# "name" : 2
|
||||
# },
|
||||
# "sort" : [
|
||||
# {
|
||||
# "name" : "date",
|
||||
# "direction" : -1
|
||||
# }
|
||||
# ]
|
||||
#
|
||||
# }
|
||||
#
|
||||
#
|
||||
# Aggregation
|
||||
# ===========
|
||||
# Uses a syntax similar to the one used in PyMongo, however to support the
|
||||
# correct order of sorting, it uses a regular list for the "$sort" operation
|
||||
# that converts into a SON (sorted dictionary) object before execution.
|
||||
#
|
||||
# Aggregation query example:
|
||||
#
|
||||
# {
|
||||
# "collection" : "things",
|
||||
# "aggregate" : [
|
||||
# {
|
||||
# "$unwind" : "$tags"
|
||||
# },
|
||||
# {
|
||||
# "$group" : {
|
||||
# {
|
||||
# "_id" : "$tags",
|
||||
# "count" : { "$sum" : 1 }
|
||||
# }
|
||||
# }
|
||||
# },
|
||||
# {
|
||||
# "$sort" : [
|
||||
# {
|
||||
# "name" : "count",
|
||||
# "direction" : -1
|
||||
# },
|
||||
# {
|
||||
# "name" : "_id",
|
||||
# "direction" : -1
|
||||
# }
|
||||
# ]
|
||||
# }
|
||||
# ]
|
||||
# }
|
||||
#
|
||||
#
|
||||
def mongodb(connection_string):
|
||||
def _get_column_by_name(columns, column_name):
|
||||
for c in columns:
|
||||
if "name" in c and c["name"] == column_name:
|
||||
return c
|
||||
|
||||
return None
|
||||
|
||||
def _convert_date(q, field_name):
|
||||
m = date_regex.findall(q[field_name])
|
||||
if len(m) > 0:
|
||||
if q[field_name].find(":") == -1:
|
||||
q[field_name] = datetime.datetime.fromtimestamp(time.mktime(time.strptime(m[0], "%Y-%m-%d")))
|
||||
else:
|
||||
q[field_name] = datetime.datetime.fromtimestamp(time.mktime(time.strptime(m[0], "%Y-%m-%d %H:%M")))
|
||||
|
||||
def query_runner(query):
|
||||
if not "dbName" in connection_string or not connection_string["dbName"]:
|
||||
return None, "dbName is missing from connection string JSON or is empty"
|
||||
|
||||
db_name = connection_string["dbName"]
|
||||
|
||||
if not "connectionString" in connection_string or not connection_string["connectionString"]:
|
||||
return None, "connectionString is missing from connection string JSON or is empty"
|
||||
|
||||
is_replica_set = True if "replicaSetName" in connection_string and connection_string["replicaSetName"] else False
|
||||
|
||||
if is_replica_set:
|
||||
if not connection_string["replicaSetName"]:
|
||||
return None, "replicaSetName is set in the connection string JSON but is empty"
|
||||
|
||||
db_connection = pymongo.MongoReplicaSetClient(connection_string["connectionString"], replicaSet=connection_string["replicaSetName"])
|
||||
else:
|
||||
db_connection = pymongo.MongoClient(connection_string["connectionString"])
|
||||
|
||||
if db_name not in db_connection.database_names():
|
||||
return None, "Unknown database name '%s'" % db_name
|
||||
|
||||
db = db_connection[db_name]
|
||||
|
||||
logging.debug("mongodb connection string: %s", connection_string)
|
||||
logging.debug("mongodb got query: %s", query)
|
||||
|
||||
try:
|
||||
query_data = json.loads(query)
|
||||
except:
|
||||
return None, "Invalid query format. The query is not a valid JSON."
|
||||
|
||||
if "query" in query_data and "aggregate" in query_data:
|
||||
return None, "'query' and 'aggregate' sections cannot be used at the same time"
|
||||
|
||||
collection = None
|
||||
if not "collection" in query_data:
|
||||
return None, "'collection' must be set"
|
||||
else:
|
||||
collection = query_data["collection"]
|
||||
|
||||
q = None
|
||||
if "query" in query_data:
|
||||
q = query_data["query"]
|
||||
for k in q:
|
||||
if q[k] and type(q[k]) in [str, unicode]:
|
||||
logging.debug(q[k])
|
||||
_convert_date(q, k)
|
||||
elif q[k] and type(q[k]) is dict:
|
||||
for k2 in q[k]:
|
||||
if type(q[k][k2]) in [str, unicode]:
|
||||
_convert_date(q[k], k2)
|
||||
|
||||
f = None
|
||||
|
||||
aggregate = None
|
||||
if "aggregate" in query_data:
|
||||
aggregate = query_data["aggregate"]
|
||||
for step in aggregate:
|
||||
if "$sort" in step:
|
||||
sort_list = []
|
||||
for sort_item in step["$sort"]:
|
||||
sort_list.append((sort_item["name"], sort_item["direction"]))
|
||||
|
||||
step["$sort"] = SON(sort_list)
|
||||
|
||||
if aggregate:
|
||||
pass
|
||||
else:
|
||||
s = None
|
||||
if "sort" in query_data and query_data["sort"]:
|
||||
s = []
|
||||
for field in query_data["sort"]:
|
||||
s.append((field["name"], field["direction"]))
|
||||
|
||||
if "fields" in query_data:
|
||||
f = query_data["fields"]
|
||||
|
||||
columns = []
|
||||
rows = []
|
||||
|
||||
error = None
|
||||
json_data = None
|
||||
|
||||
cursor = None
|
||||
if q or (not q and not aggregate):
|
||||
if s:
|
||||
cursor = db[collection].find(q, f).sort(s)
|
||||
else:
|
||||
cursor = db[collection].find(q, f)
|
||||
|
||||
if "skip" in query_data:
|
||||
cursor = cursor.skip(query_data["skip"])
|
||||
|
||||
if "limit" in query_data:
|
||||
cursor = cursor.limit(query_data["limit"])
|
||||
|
||||
elif aggregate:
|
||||
r = db[collection].aggregate(aggregate)
|
||||
cursor = r["result"]
|
||||
|
||||
for r in cursor:
|
||||
for k in r:
|
||||
if _get_column_by_name(columns, k) is None:
|
||||
columns.append({
|
||||
"name": k,
|
||||
"friendly_name": k,
|
||||
"type": TYPES_MAP[type(r[k])] if type(r[k]) in TYPES_MAP else None
|
||||
})
|
||||
|
||||
# Convert ObjectId to string
|
||||
if type(r[k]) == ObjectId:
|
||||
r[k] = str(r[k])
|
||||
|
||||
rows.append(r)
|
||||
|
||||
if f:
|
||||
ordered_columns = []
|
||||
for k in sorted(f, key=f.get):
|
||||
ordered_columns.append(_get_column_by_name(columns, k))
|
||||
|
||||
columns = ordered_columns
|
||||
|
||||
data = {
|
||||
"columns": columns,
|
||||
"rows": rows
|
||||
}
|
||||
error = None
|
||||
json_data = json.dumps(data, cls=JSONEncoder)
|
||||
|
||||
return json_data, error
|
||||
|
||||
query_runner.annotate_query = False
|
||||
return query_runner
|
||||
@@ -17,6 +17,9 @@ def script(connection_string):
|
||||
json_data = None
|
||||
error = None
|
||||
|
||||
if connection_string is None:
|
||||
return None, "script execution path is not set. Please reconfigure the data source"
|
||||
|
||||
# Poor man's protection against running scripts from output the scripts directory
|
||||
if connection_string.find("../") > -1:
|
||||
return None, "Scripts can only be run from the configured scripts directory"
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
import logging
|
||||
import json
|
||||
|
||||
logger = logging.getLogger("redash.events")
|
||||
logger.propagate = False
|
||||
|
||||
|
||||
def setup_logging(log_path, console_output=False):
|
||||
if log_path:
|
||||
fh = logging.FileHandler(log_path)
|
||||
formatter = logging.Formatter('%(message)s')
|
||||
fh.setFormatter(formatter)
|
||||
logger.addHandler(fh)
|
||||
|
||||
if console_output:
|
||||
handler = logging.StreamHandler()
|
||||
formatter = logging.Formatter('[%(name)s] %(message)s')
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
|
||||
|
||||
def record_event(event):
|
||||
logger.info(json.dumps(event))
|
||||
@@ -1,8 +1,11 @@
|
||||
import contextlib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from redash import models
|
||||
from flask.ext.script import Manager
|
||||
|
||||
logger = logging.getLogger()
|
||||
|
||||
class Importer(object):
|
||||
def __init__(self, object_mapping=None, data_source=None):
|
||||
@@ -22,22 +25,17 @@ class Importer(object):
|
||||
|
||||
return query_result
|
||||
|
||||
|
||||
def import_query(self, user, query):
|
||||
query_result = self.import_query_result(query['latest_query_data'])
|
||||
|
||||
new_query = self._get_or_create(models.Query, query['id'], name=query['name'],
|
||||
user=user,
|
||||
ttl=-1,
|
||||
query=query['query'],
|
||||
query_hash=query['query_hash'],
|
||||
description=query['description'],
|
||||
latest_query_data=query_result,
|
||||
data_source=self.data_source)
|
||||
|
||||
return new_query
|
||||
|
||||
|
||||
def import_visualization(self, user, visualization):
|
||||
query = self.import_query(user, visualization['query'])
|
||||
|
||||
@@ -50,9 +48,13 @@ class Importer(object):
|
||||
return new_visualization
|
||||
|
||||
def import_widget(self, dashboard, widget):
|
||||
visualization = self.import_visualization(dashboard.user, widget['visualization'])
|
||||
if 'visualization' in widget:
|
||||
visualization = self.import_visualization(dashboard.user, widget['visualization'])
|
||||
else:
|
||||
visualization = None
|
||||
|
||||
new_widget = self._get_or_create(models.Widget, widget['id'],
|
||||
text=widget.get('text', None),
|
||||
dashboard=dashboard,
|
||||
width=widget['width'],
|
||||
options=json.dumps(widget['options']),
|
||||
@@ -91,6 +93,7 @@ class Importer(object):
|
||||
|
||||
def _get_or_create(self, object_type, external_id, **properties):
|
||||
internal_id = self._get_mapping(object_type, external_id)
|
||||
logger.info("Creating %s with external id: %s and internal id: %s", object_type, external_id, internal_id)
|
||||
if internal_id:
|
||||
update = object_type.update(**properties).where(object_type.id == internal_id)
|
||||
update.execute()
|
||||
@@ -114,11 +117,21 @@ export_manager = Manager(help="export utilities")
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def importer_with_mapping_file(mapping_filename):
|
||||
def importer_with_mapping_file(mapping_filename, data_source_id=None):
|
||||
# Touch file in case it doesn't exists
|
||||
if not os.path.isfile(mapping_filename):
|
||||
with open(mapping_filename, 'w') as f:
|
||||
f.write("{}")
|
||||
|
||||
with open(mapping_filename) as f:
|
||||
mapping = json.loads(f.read())
|
||||
|
||||
importer = Importer(object_mapping=mapping, data_source=get_data_source())
|
||||
if data_source_id is not None:
|
||||
data_source = models.DataSource.get_by_id(data_source_id)
|
||||
else:
|
||||
data_source = get_data_source()
|
||||
|
||||
importer = Importer(object_mapping=mapping, data_source=data_source)
|
||||
yield importer
|
||||
|
||||
with open(mapping_filename, 'w') as f:
|
||||
@@ -146,12 +159,13 @@ def query(mapping_filename, query_filename, user_id):
|
||||
|
||||
|
||||
@import_manager.command
|
||||
def dashboard(mapping_filename, dashboard_filename, user_id):
|
||||
def dashboard(mapping_filename, dashboard_filename, user_id, data_source_id=None):
|
||||
user = models.User.get_by_id(user_id)
|
||||
|
||||
with open(dashboard_filename) as f:
|
||||
dashboard = json.loads(f.read())
|
||||
|
||||
with importer_with_mapping_file(mapping_filename) as importer:
|
||||
with importer_with_mapping_file(mapping_filename, data_source_id) as importer:
|
||||
importer.import_dashboard(user, dashboard)
|
||||
|
||||
|
||||
|
||||
@@ -225,6 +225,15 @@ class QueryResult(BaseModel):
|
||||
'retrieved_at': self.retrieved_at
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def unused(cls):
|
||||
week_ago = datetime.datetime.now() - datetime.timedelta(days=7)
|
||||
|
||||
unused_results = cls.select().where(Query.id == None, cls.retrieved_at < week_ago)\
|
||||
.join(Query, join_type=peewee.JOIN_LEFT_OUTER)
|
||||
|
||||
return unused_results
|
||||
|
||||
@classmethod
|
||||
def get_latest(cls, data_source, query, ttl=0):
|
||||
query_hash = utils.gen_query_hash(query)
|
||||
@@ -305,11 +314,8 @@ class Query(BaseModel):
|
||||
d['user_id'] = self._data['user']
|
||||
|
||||
if with_stats:
|
||||
d['avg_runtime'] = self.avg_runtime
|
||||
d['min_runtime'] = self.min_runtime
|
||||
d['max_runtime'] = self.max_runtime
|
||||
d['last_retrieved_at'] = self.last_retrieved_at
|
||||
d['times_retrieved'] = self.times_retrieved
|
||||
d['retrieved_at'] = self.retrieved_at
|
||||
d['runtime'] = self.runtime
|
||||
|
||||
if with_visualizations:
|
||||
d['visualizations'] = [vis.to_dict(with_query=False)
|
||||
@@ -319,15 +325,11 @@ class Query(BaseModel):
|
||||
|
||||
@classmethod
|
||||
def all_queries(cls):
|
||||
q = Query.select(Query, User,
|
||||
peewee.fn.Count(QueryResult.id).alias('times_retrieved'),
|
||||
peewee.fn.Avg(QueryResult.runtime).alias('avg_runtime'),
|
||||
peewee.fn.Min(QueryResult.runtime).alias('min_runtime'),
|
||||
peewee.fn.Max(QueryResult.runtime).alias('max_runtime'),
|
||||
peewee.fn.Max(QueryResult.retrieved_at).alias('last_retrieved_at'))\
|
||||
q = Query.select(Query, User, QueryResult.retrieved_at, QueryResult.runtime)\
|
||||
.join(QueryResult, join_type=peewee.JOIN_LEFT_OUTER)\
|
||||
.switch(Query).join(User)\
|
||||
.group_by(Query.id, User.id)
|
||||
.group_by(Query.id, User.id, QueryResult.id, QueryResult.retrieved_at, QueryResult.runtime)\
|
||||
.order_by(cls.created_at.desc())
|
||||
|
||||
return q
|
||||
|
||||
@@ -348,6 +350,17 @@ class Query(BaseModel):
|
||||
|
||||
return queries
|
||||
|
||||
@classmethod
|
||||
def search(cls, term):
|
||||
# This is very naive implementation of search, to be replaced with PostgreSQL full-text-search solution.
|
||||
|
||||
where = (cls.name**"%{}%".format(term)) | (cls.description**"%{}%".format(term))
|
||||
|
||||
if term.isdigit():
|
||||
where |= cls.id == term
|
||||
|
||||
return cls.select().where(where).order_by(cls.created_at.desc())
|
||||
|
||||
@classmethod
|
||||
def update_instance(cls, query_id, **kwargs):
|
||||
if 'query' in kwargs:
|
||||
@@ -366,6 +379,14 @@ class Query(BaseModel):
|
||||
self.api_key = hashlib.sha1(
|
||||
u''.join((str(time.time()), self.query, str(self._data['user']), self.name)).encode('utf-8')).hexdigest()
|
||||
|
||||
@property
|
||||
def runtime(self):
|
||||
return self.latest_query_data.runtime
|
||||
|
||||
@property
|
||||
def retrieved_at(self):
|
||||
return self.latest_query_data.retrieved_at
|
||||
|
||||
def __unicode__(self):
|
||||
return unicode(self.id)
|
||||
|
||||
@@ -520,6 +541,21 @@ class Event(BaseModel):
|
||||
def __unicode__(self):
|
||||
return u"%s,%s,%s,%s" % (self._data['user'], self.action, self.object_type, self.object_id)
|
||||
|
||||
@classmethod
|
||||
def record(cls, event):
|
||||
user = event.pop('user_id')
|
||||
action = event.pop('action')
|
||||
object_type = event.pop('object_type')
|
||||
object_id = event.pop('object_id', None)
|
||||
|
||||
created_at = datetime.datetime.utcfromtimestamp(event.pop('timestamp'))
|
||||
additional_properties = json.dumps(event)
|
||||
|
||||
event = cls.create(user=user, action=action, object_type=object_type, object_id=object_id,
|
||||
additional_properties=additional_properties, created_at=created_at)
|
||||
|
||||
return event
|
||||
|
||||
|
||||
all_models = (DataSource, User, QueryResult, Query, Dashboard, Visualization, Widget, ActivityLog, Group, Event)
|
||||
|
||||
|
||||
@@ -56,6 +56,10 @@ CELERY_BROKER = os.environ.get("REDASH_CELERY_BROKER", REDIS_URL)
|
||||
CELERY_BACKEND = os.environ.get("REDASH_CELERY_BACKEND", REDIS_URL)
|
||||
CELERY_FLOWER_URL = os.environ.get("REDASH_CELERY_FLOWER_URL", "/flower")
|
||||
|
||||
# The following enables periodic job (every 5 minutes) of removing unused query results. Behind this "feature flag" until
|
||||
# proved to be "safe".
|
||||
QUERY_RESULTS_CLEANUP_ENABLED = parse_boolean(os.environ.get("REDASH_QUERY_RESULTS_CLEANUP_ENABLED", "false"))
|
||||
|
||||
# Google Apps domain to allow access from; any user with email in this Google Apps will be allowed
|
||||
# access
|
||||
GOOGLE_APPS_DOMAIN = os.environ.get("REDASH_GOOGLE_APPS_DOMAIN", "")
|
||||
@@ -70,8 +74,6 @@ WORKERS_COUNT = int(os.environ.get("REDASH_WORKERS_COUNT", "2"))
|
||||
JOB_EXPIRY_TIME = int(os.environ.get("REDASH_JOB_EXPIRY_TIME", 3600*6))
|
||||
COOKIE_SECRET = os.environ.get("REDASH_COOKIE_SECRET", "c292a0a3aa32397cdb050e233733900f")
|
||||
LOG_LEVEL = os.environ.get("REDASH_LOG_LEVEL", "INFO")
|
||||
EVENTS_LOG_PATH = os.environ.get("REDASH_EVENTS_LOG_PATH", "")
|
||||
EVENTS_CONSOLE_OUTPUT = parse_boolean(os.environ.get("REDASH_EVENTS_CONSOLE_OUTPUT", "false"))
|
||||
CLIENT_SIDE_METRICS = parse_boolean(os.environ.get("REDASH_CLIENT_SIDE_METRICS", "false"))
|
||||
ANALYTICS = os.environ.get("REDASH_ANALYTICS", "")
|
||||
|
||||
|
||||
@@ -202,6 +202,22 @@ def cleanup_tasks():
|
||||
redis_connection.delete(lock_keys[i])
|
||||
|
||||
|
||||
@celery.task(base=BaseTask)
|
||||
def cleanup_query_results():
|
||||
"""
|
||||
Job to cleanup unused query results -- such that no query links to them anymore, and older than a week (so it's less
|
||||
likely to be open in someone's browser and be used).
|
||||
|
||||
Each time the job deletes only 100 query results so it won't choke the database in case of many such results.
|
||||
"""
|
||||
|
||||
unused_query_results = models.QueryResult.unused().limit(100)
|
||||
total_unused_query_results = models.QueryResult.unused().count()
|
||||
deleted_count = models.QueryResult.delete().where(models.QueryResult.id << unused_query_results).execute()
|
||||
|
||||
logger.info("Deleted %d unused query results out of total of %d." % (deleted_count, total_unused_query_results))
|
||||
|
||||
|
||||
@celery.task(bind=True, base=BaseTask, track_started=True)
|
||||
def execute_query(self, query, data_source_id):
|
||||
# TODO: maybe this should be a class?
|
||||
@@ -246,3 +262,7 @@ def execute_query(self, query, data_source_id):
|
||||
|
||||
return query_result.id
|
||||
|
||||
|
||||
@celery.task(base=BaseTask)
|
||||
def record_event(event):
|
||||
models.Event.record(event)
|
||||
|
||||
@@ -7,19 +7,26 @@ celery = Celery('redash',
|
||||
broker=settings.CELERY_BROKER,
|
||||
include='redash.tasks')
|
||||
|
||||
celery.conf.update(CELERY_RESULT_BACKEND=settings.CELERY_BACKEND,
|
||||
CELERYBEAT_SCHEDULE={
|
||||
'refresh_queries': {
|
||||
'task': 'redash.tasks.refresh_queries',
|
||||
'schedule': timedelta(seconds=30)
|
||||
},
|
||||
'cleanup_tasks': {
|
||||
'task': 'redash.tasks.cleanup_tasks',
|
||||
'schedule': timedelta(minutes=5)
|
||||
},
|
||||
},
|
||||
CELERY_TIMEZONE='UTC')
|
||||
celery_schedule = {
|
||||
'refresh_queries': {
|
||||
'task': 'redash.tasks.refresh_queries',
|
||||
'schedule': timedelta(seconds=30)
|
||||
},
|
||||
'cleanup_tasks': {
|
||||
'task': 'redash.tasks.cleanup_tasks',
|
||||
'schedule': timedelta(minutes=5)
|
||||
}
|
||||
}
|
||||
|
||||
if settings.QUERY_RESULTS_CLEANUP_ENABLED:
|
||||
celery_schedule['cleanup_query_results'] = {
|
||||
'task': 'redash.tasks.cleanup_query_results',
|
||||
'schedule': timedelta(minutes=5)
|
||||
}
|
||||
|
||||
celery.conf.update(CELERY_RESULT_BACKEND=settings.CELERY_BACKEND,
|
||||
CELERYBEAT_SCHEDULE=celery_schedule,
|
||||
CELERY_TIMEZONE='UTC')
|
||||
|
||||
if __name__ == '__main__':
|
||||
celery.start()
|
||||
12
setup/Vagrantfile_debian
Normal file
12
setup/Vagrantfile_debian
Normal file
@@ -0,0 +1,12 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
|
||||
VAGRANTFILE_API_VERSION = "2"
|
||||
|
||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||
# Every Vagrant virtual environment requires a box to build off of.
|
||||
config.vm.box = "box-cutter/debian76"
|
||||
config.vm.provision "shell", path: "setup.sh"
|
||||
config.vm.network "forwarded_port", guest: 80, host: 9001
|
||||
end
|
||||
177
setup/bootstrap.sh
Normal file
177
setup/bootstrap.sh
Normal file
@@ -0,0 +1,177 @@
|
||||
#!/bin/bash
|
||||
set -eu
|
||||
|
||||
REDASH_BASE_PATH=/opt/redash
|
||||
FILES_BASE_URL=https://raw.githubusercontent.com/EverythingMe/redash/docs_setup/setup/files/
|
||||
|
||||
# Verify running as root:
|
||||
if [ "$(id -u)" != "0" ]; then
|
||||
if [ $# -ne 0 ]; then
|
||||
echo "Failed running with sudo. Exiting." 1>&2
|
||||
exit 1
|
||||
fi
|
||||
echo "This script must be run as root. Trying to run with sudo."
|
||||
sudo bash $0 --with-sudo
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Base packages
|
||||
apt-get update
|
||||
apt-get install -y python-pip python-dev nginx curl build-essential pwgen
|
||||
|
||||
# redash user
|
||||
# TODO: check user doesn't exist yet?
|
||||
adduser --system --no-create-home --disabled-login --gecos "" redash
|
||||
|
||||
# PostgreSQL
|
||||
pg_available=0
|
||||
psql --version || pg_available=$?
|
||||
if [ $pg_available -ne 0 ]; then
|
||||
wget $FILES_BASE_URL"postgres_apt.sh" -O /tmp/postgres_apt.sh
|
||||
bash /tmp/postgres_apt.sh
|
||||
apt-get update
|
||||
apt-get -y install postgresql-9.3 postgresql-server-dev-9.3
|
||||
fi
|
||||
|
||||
add_service() {
|
||||
service_name=$1
|
||||
service_command="/etc/init.d/$service_name"
|
||||
|
||||
echo "Adding service: $service_name (/etc/init.d/$service_name)."
|
||||
chmod +x $service_command
|
||||
|
||||
if command -v chkconfig >/dev/null 2>&1; then
|
||||
# we're chkconfig, so lets add to chkconfig and put in runlevel 345
|
||||
chkconfig --add $service_name && echo "Successfully added to chkconfig!"
|
||||
chkconfig --level 345 $service_name on && echo "Successfully added to runlevels 345!"
|
||||
elif command -v update-rc.d >/dev/null 2>&1; then
|
||||
#if we're not a chkconfig box assume we're able to use update-rc.d
|
||||
update-rc.d $service_name defaults && echo "Success!"
|
||||
else
|
||||
echo "No supported init tool found."
|
||||
fi
|
||||
|
||||
$service_command start
|
||||
}
|
||||
|
||||
# Redis
|
||||
redis_available=0
|
||||
redis-cli --version || redis_available=$?
|
||||
if [ $redis_available -ne 0 ]; then
|
||||
wget http://download.redis.io/releases/redis-2.8.17.tar.gz
|
||||
tar xzf redis-2.8.17.tar.gz
|
||||
rm redis-2.8.17.tar.gz
|
||||
cd redis-2.8.17
|
||||
make
|
||||
make install
|
||||
|
||||
# Setup process init & configuration
|
||||
|
||||
REDIS_PORT=6379
|
||||
REDIS_CONFIG_FILE="/etc/redis/$REDIS_PORT.conf"
|
||||
REDIS_LOG_FILE="/var/log/redis_$REDIS_PORT.log"
|
||||
REDIS_DATA_DIR="/var/lib/redis/$REDIS_PORT"
|
||||
|
||||
mkdir -p `dirname "$REDIS_CONFIG_FILE"` || die "Could not create redis config directory"
|
||||
mkdir -p `dirname "$REDIS_LOG_FILE"` || die "Could not create redis log dir"
|
||||
mkdir -p "$REDIS_DATA_DIR" || die "Could not create redis data directory"
|
||||
|
||||
wget -O /etc/init.d/redis_6379 $FILES_BASE_URL"redis_init"
|
||||
wget -O $REDIS_CONFIG_FILE $FILES_BASE_URL"redis.conf"
|
||||
|
||||
add_service "redis_$REDIS_PORT"
|
||||
|
||||
cd ..
|
||||
rm -rf redis-2.8.17
|
||||
fi
|
||||
|
||||
# Directories
|
||||
if [ ! -d "$REDASH_BASE_PATH" ]; then
|
||||
sudo mkdir /opt/redash
|
||||
sudo chown redash /opt/redash
|
||||
sudo -u redash mkdir /opt/redash/logs
|
||||
fi
|
||||
|
||||
# Default config file
|
||||
if [ ! -f "/opt/redash/.env" ]; then
|
||||
sudo -u redash wget $FILES_BASE_URL"env" -O /opt/redash/.env
|
||||
fi
|
||||
|
||||
# Install latest version
|
||||
REDASH_VERSION=${REDASH_VERSION-0.4.0.b589}
|
||||
LATEST_URL="https://github.com/EverythingMe/redash/releases/download/v${REDASH_VERSION/.b/%2Bb}/redash.$REDASH_VERSION.tar.gz"
|
||||
VERSION_DIR="/opt/redash/redash.$REDASH_VERSION"
|
||||
REDASH_TARBALL=/tmp/redash.tar.gz
|
||||
REDASH_TARBALL=/tmp/redash.tar.gz
|
||||
|
||||
if [ ! -d "$VERSION_DIR" ]; then
|
||||
sudo -u redash wget $LATEST_URL -O $REDASH_TARBALL
|
||||
sudo -u redash mkdir $VERSION_DIR
|
||||
sudo -u redash tar -C $VERSION_DIR -xvf $REDASH_TARBALL
|
||||
ln -nfs $VERSION_DIR /opt/redash/current
|
||||
ln -nfs /opt/redash/.env /opt/redash/current/.env
|
||||
|
||||
cd /opt/redash/current
|
||||
|
||||
# TODO: venv?
|
||||
pip install -r requirements.txt
|
||||
fi
|
||||
|
||||
# Create database / tables
|
||||
pg_user_exists=0
|
||||
sudo -u postgres psql postgres -tAc "SELECT 1 FROM pg_roles WHERE rolname='redash'" | grep -q 1 || pg_user_exists=$?
|
||||
if [ $pg_user_exists -ne 0 ]; then
|
||||
echo "Creating redash postgres user & database."
|
||||
sudo -u postgres createuser redash --no-superuser --no-createdb --no-createrole
|
||||
sudo -u postgres createdb redash --owner=redash
|
||||
|
||||
cd /opt/redash/current
|
||||
sudo -u redash bin/run ./manage.py database create_tables
|
||||
fi
|
||||
|
||||
# Create default admin user
|
||||
cd /opt/redash/current
|
||||
# TODO: make sure user created only once
|
||||
# TODO: generate temp password and print to screen
|
||||
sudo -u redash bin/run ./manage.py users create --admin --password admin "Admin" "admin"
|
||||
|
||||
# Create re:dash read only pg user & setup data source
|
||||
pg_user_exists=0
|
||||
sudo -u postgres psql postgres -tAc "SELECT 1 FROM pg_roles WHERE rolname='redash_reader'" | grep -q 1 || pg_user_exists=$?
|
||||
if [ $pg_user_exists -ne 0 ]; then
|
||||
echo "Creating redash reader postgres user."
|
||||
REDASH_READER_PASSWORD=$(pwgen -1)
|
||||
sudo -u postgres psql -c "CREATE ROLE redash_reader WITH PASSWORD '$REDASH_READER_PASSWORD' NOCREATEROLE NOCREATEDB NOSUPERUSER LOGIN"
|
||||
sudo -u redash psql -c "grant select(id,name,type) ON data_sources to redash_reader;" redash
|
||||
sudo -u redash psql -c "grant select on events, queries, dashboards, widgets, visualizations, query_results to redash_reader;" redash
|
||||
|
||||
cd /opt/redash/current
|
||||
sudo -u redash bin/run ./manage.py ds new "re:dash metadata" "pg" "user=redash_reader password=$REDASH_READER_PASSWORD host=localhost dbname=redash"
|
||||
fi
|
||||
|
||||
# BigQuery dependencies:
|
||||
apt-get install -y libffi-dev libssl-dev
|
||||
pip install google-api-python-client==1.2 pyOpenSSL==0.14 oauth2client==1.2
|
||||
|
||||
# MySQL dependencies:
|
||||
apt-get install -y libmysqlclient-dev
|
||||
pip install MySQL-python==1.2.5
|
||||
|
||||
# Mongo dependencies:
|
||||
pip install pymongo==2.7.2
|
||||
|
||||
# Setup supervisord + sysv init startup script
|
||||
sudo -u redash mkdir -p /opt/redash/supervisord
|
||||
pip install supervisor==3.1.2 # TODO: move to requirements.txt
|
||||
|
||||
# Get supervisord startup script
|
||||
sudo -u redash wget -O /opt/redash/supervisord/supervisord.conf $FILES_BASE_URL"supervisord.conf"
|
||||
|
||||
wget -O /etc/init.d/redash_supervisord $FILES_BASE_URL"redash_supervisord_init"
|
||||
add_service "redash_supervisord"
|
||||
|
||||
# Nginx setup
|
||||
rm /etc/nginx/sites-enabled/default
|
||||
wget -O /etc/nginx/sites-available/redash $FILES_BASE_URL"nginx_redash_site"
|
||||
ln -nfs /etc/nginx/sites-available/redash /etc/nginx/sites-enabled/redash
|
||||
service nginx restart
|
||||
9
setup/files/env
Normal file
9
setup/files/env
Normal file
@@ -0,0 +1,9 @@
|
||||
export REDASH_CONNECTION_ADAPTER=pg
|
||||
export REDASH_CONNECTION_STRING="dbname=redash"
|
||||
export REDASH_STATIC_ASSETS_PATH="../rd_ui/dist/"
|
||||
export REDASH_LOG_LEVEL="INFO"
|
||||
export REDASH_WORKERS_COUNT=6
|
||||
export REDASH_REDIS_URL=redis://localhost:6379/1
|
||||
export REDASH_DATABASE_URL="postgresql://redash"
|
||||
export REDASH_COOKIE_SECRET=veryverysecret
|
||||
export REDASH_GOOGLE_APPS_DOMAIN=
|
||||
20
setup/files/nginx_redash_site
Normal file
20
setup/files/nginx_redash_site
Normal file
@@ -0,0 +1,20 @@
|
||||
upstream rd_servers {
|
||||
server 127.0.0.1:5000;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 80 default;
|
||||
|
||||
access_log /var/log/nginx/rd.access.log;
|
||||
|
||||
gzip on;
|
||||
gzip_types *;
|
||||
gzip_proxied any;
|
||||
|
||||
location / {
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_pass http://rd_servers;
|
||||
}
|
||||
}
|
||||
162
setup/files/postgres_apt.sh
Normal file
162
setup/files/postgres_apt.sh
Normal file
@@ -0,0 +1,162 @@
|
||||
#!/bin/sh
|
||||
|
||||
# script to add apt.postgresql.org to sources.list
|
||||
|
||||
# from command line
|
||||
CODENAME="$1"
|
||||
# lsb_release is the best interface, but not always available
|
||||
if [ -z "$CODENAME" ]; then
|
||||
CODENAME=$(lsb_release -cs 2>/dev/null)
|
||||
fi
|
||||
# parse os-release (unreliable, does not work on Ubuntu)
|
||||
if [ -z "$CODENAME" -a -f /etc/os-release ]; then
|
||||
. /etc/os-release
|
||||
# Debian: VERSION="7.0 (wheezy)"
|
||||
# Ubuntu: VERSION="13.04, Raring Ringtail"
|
||||
CODENAME=$(echo $VERSION | sed -ne 's/.*(\(.*\)).*/\1/')
|
||||
fi
|
||||
# guess from sources.list
|
||||
if [ -z "$CODENAME" ]; then
|
||||
CODENAME=$(grep '^deb ' /etc/apt/sources.list | head -n1 | awk '{ print $3 }')
|
||||
fi
|
||||
# complain if no result yet
|
||||
if [ -z "$CODENAME" ]; then
|
||||
cat <<EOF
|
||||
Could not determine the distribution codename. Please report this as a bug to
|
||||
pgsql-pkg-debian@postgresql.org. As a workaround, you can call this script with
|
||||
the proper codename as parameter, e.g. "$0 squeeze".
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# errors are non-fatal above
|
||||
set -e
|
||||
|
||||
cat <<EOF
|
||||
This script will enable the PostgreSQL APT repository on apt.postgresql.org on
|
||||
your system. The distribution codename used will be $CODENAME-pgdg.
|
||||
|
||||
EOF
|
||||
|
||||
case $CODENAME in
|
||||
# known distributions
|
||||
sid|wheezy|squeeze|lenny|etch) ;;
|
||||
precise|lucid) ;;
|
||||
*) # unknown distribution, verify on the web
|
||||
DISTURL="http://apt.postgresql.org/pub/repos/apt/dists/"
|
||||
if [ -x /usr/bin/curl ]; then
|
||||
DISTHTML=$(curl -s $DISTURL)
|
||||
elif [ -x /usr/bin/wget ]; then
|
||||
DISTHTML=$(wget --quiet -O - $DISTURL)
|
||||
fi
|
||||
if [ "$DISTHTML" ]; then
|
||||
if ! echo "$DISTHTML" | grep -q "$CODENAME-pgdg"; then
|
||||
cat <<EOF
|
||||
Your system is using the distribution codename $CODENAME, but $CODENAME-pgdg
|
||||
does not seem to be a valid distribution on
|
||||
$DISTURL
|
||||
|
||||
We abort the installation here. If you want to use a distribution different
|
||||
from your system, you can call this script with an explicit codename, e.g.
|
||||
"$0 precise".
|
||||
|
||||
Specifically, if you are using a non-LTS Ubuntu release, refer to
|
||||
https://wiki.postgresql.org/wiki/Apt/FAQ#I_am_using_a_non-LTS_release_of_Ubuntu
|
||||
|
||||
For more information, refer to https://wiki.postgresql.org/wiki/Apt
|
||||
or ask on the mailing list for assistance: pgsql-pkg-debian@postgresql.org
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "Writing /etc/apt/sources.list.d/pgdg.list ..."
|
||||
cat > /etc/apt/sources.list.d/pgdg.list <<EOF
|
||||
deb http://apt.postgresql.org/pub/repos/apt/ $CODENAME-pgdg main
|
||||
#deb-src http://apt.postgresql.org/pub/repos/apt/ $CODENAME-pgdg main
|
||||
EOF
|
||||
|
||||
echo "Importing repository signing key ..."
|
||||
KEYRING="/etc/apt/trusted.gpg.d/apt.postgresql.org.gpg"
|
||||
test -e $KEYRING || touch $KEYRING
|
||||
apt-key --keyring $KEYRING add - <<EOF
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
Version: GnuPG v1
|
||||
|
||||
mQINBE6XR8IBEACVdDKT2HEH1IyHzXkb4nIWAY7echjRxo7MTcj4vbXAyBKOfjja
|
||||
UrBEJWHN6fjKJXOYWXHLIYg0hOGeW9qcSiaa1/rYIbOzjfGfhE4x0Y+NJHS1db0V
|
||||
G6GUj3qXaeyqIJGS2z7m0Thy4Lgr/LpZlZ78Nf1fliSzBlMo1sV7PpP/7zUO+aA4
|
||||
bKa8Rio3weMXQOZgclzgeSdqtwKnyKTQdXY5MkH1QXyFIk1nTfWwyqpJjHlgtwMi
|
||||
c2cxjqG5nnV9rIYlTTjYG6RBglq0SmzF/raBnF4Lwjxq4qRqvRllBXdFu5+2pMfC
|
||||
IZ10HPRdqDCTN60DUix+BTzBUT30NzaLhZbOMT5RvQtvTVgWpeIn20i2NrPWNCUh
|
||||
hj490dKDLpK/v+A5/i8zPvN4c6MkDHi1FZfaoz3863dylUBR3Ip26oM0hHXf4/2U
|
||||
A/oA4pCl2W0hc4aNtozjKHkVjRx5Q8/hVYu+39csFWxo6YSB/KgIEw+0W8DiTII3
|
||||
RQj/OlD68ZDmGLyQPiJvaEtY9fDrcSpI0Esm0i4sjkNbuuh0Cvwwwqo5EF1zfkVj
|
||||
Tqz2REYQGMJGc5LUbIpk5sMHo1HWV038TWxlDRwtOdzw08zQA6BeWe9FOokRPeR2
|
||||
AqhyaJJwOZJodKZ76S+LDwFkTLzEKnYPCzkoRwLrEdNt1M7wQBThnC5z6wARAQAB
|
||||
tBxQb3N0Z3JlU1FMIERlYmlhbiBSZXBvc2l0b3J5iQI9BBMBCAAnAhsDBQsJCAcD
|
||||
BRUKCQgLBRYCAwEAAh4BAheABQJS6RUZBQkOhCctAAoJEH/MfUaszEz4zmQP/2ad
|
||||
HtuaXL5Xu3C3NGLha/aQb9iSJC8z5vN55HMCpsWlmslCBuEr+qR+oZvPkvwh0Io/
|
||||
8hQl/qN54DMNifRwVL2n2eG52yNERie9BrAMK2kNFZZCH4OxlMN0876BmDuNq2U6
|
||||
7vUtCv+pxT+g9R1LvlPgLCTjS3m+qMqUICJ310BMT2cpYlJx3YqXouFkdWBVurI0
|
||||
pGU/+QtydcJALz5eZbzlbYSPWbOm2ZSS2cLrCsVNFDOAbYLtUn955yXB5s4rIscE
|
||||
vTzBxPgID1iBknnPzdu2tCpk07yJleiupxI1yXstCtvhGCbiAbGFDaKzhgcAxSIX
|
||||
0ZPahpaYLdCkcoLlfgD+ar4K8veSK2LazrhO99O0onRG0p7zuXszXphO4E/WdbTO
|
||||
yDD35qCqYeAX6TaB+2l4kIdVqPgoXT/doWVLUK2NjZtd3JpMWI0OGYDFn2DAvgwP
|
||||
xqKEoGTOYuoWKssnwLlA/ZMETegak27gFAKfoQlmHjeA/PLC2KRYd6Wg2DSifhn+
|
||||
2MouoE4XFfeekVBQx98rOQ5NLwy/TYlsHXm1n0RW86ETN3chj/PPWjsi80t5oepx
|
||||
82azRoVu95LJUkHpPLYyqwfueoVzp2+B2hJU2Rg7w+cJq64TfeJG8hrc93MnSKIb
|
||||
zTvXfdPtvYdHhhA2LYu4+5mh5ASlAMJXD7zIOZt2iEYEEBEIAAYFAk6XSO4ACgkQ
|
||||
xa93SlhRC1qmjwCg9U7U+XN7Gc/dhY/eymJqmzUGT/gAn0guvoX75Y+BsZlI6dWn
|
||||
qaFU6N8HiQIcBBABCAAGBQJOl0kLAAoJEExaa6sS0qeuBfEP/3AnLrcKx+dFKERX
|
||||
o4NBCGWr+i1CnowupKS3rm2xLbmiB969szG5TxnOIvnjECqPz6skK3HkV3jTZaju
|
||||
v3sR6M2ItpnrncWuiLnYcCSDp9TEMpCWzTEgtrBlKdVuTNTeRGILeIcvqoZX5w+u
|
||||
i0eBvvbeRbHEyUsvOEnYjrqoAjqUJj5FUZtR1+V9fnZp8zDgpOSxx0LomnFdKnhj
|
||||
uyXAQlRCA6/roVNR9ruRjxTR5ubteZ9ubTsVYr2/eMYOjQ46LhAgR+3Alblu/WHB
|
||||
MR/9F9//RuOa43R5Sjx9TiFCYol+Ozk8XRt3QGweEH51YkSYY3oRbHBb2Fkql6N6
|
||||
YFqlLBL7/aiWnNmRDEs/cdpo9HpFsbjOv4RlsSXQfvvfOayHpT5nO1UQFzoyMVpJ
|
||||
615zwmQDJT5Qy7uvr2eQYRV9AXt8t/H+xjQsRZCc5YVmeAo91qIzI/tA2gtXik49
|
||||
6yeziZbfUvcZzuzjjxFExss4DSAwMgorvBeIbiz2k2qXukbqcTjB2XqAlZasd6Ll
|
||||
nLXpQdqDV3McYkP/MvttWh3w+J/woiBcA7yEI5e3YJk97uS6+ssbqLEd0CcdT+qz
|
||||
+Waw0z/ZIU99Lfh2Qm77OT6vr//Zulw5ovjZVO2boRIcve7S97gQ4KC+G/+QaRS+
|
||||
VPZ67j5UMxqtT/Y4+NHcQGgwF/1iiQI9BBMBCAAnAhsDBQsJCAcDBRUKCQgLBRYC
|
||||
AwEAAh4BAheABQJQeSssBQkDwxbfAAoJEH/MfUaszEz4bgkP/0AI0UgDgkNNqplA
|
||||
IpE/pkwem2jgGpJGKurh2xDu6j2ZL+BPzPhzyCeMHZwTXkkI373TXGQQP8dIa+RD
|
||||
HAZ3iijw4+ISdKWpziEUJjUk04UMPTlN+dYJt2EHLQDD0VLtX0yQC/wLmVEH/REp
|
||||
oclbVjZR/+ehwX2IxOIlXmkZJDSycl975FnSUjMAvyzty8P9DN0fIrQ7Ju+BfMOM
|
||||
TnUkOdp0kRUYez7pxbURJfkM0NxAP1geACI91aISBpFg3zxQs1d3MmUIhJ4wHvYB
|
||||
uaR7Fx1FkLAxWddre/OCYJBsjucE9uqc04rgKVjN5P/VfqNxyUoB+YZ+8Lk4t03p
|
||||
RBcD9XzcyOYlFLWXbcWxTn1jJ2QMqRIWi5lzZIOMw5B+OK9LLPX0dAwIFGr9WtuV
|
||||
J2zp+D4CBEMtn4Byh8EaQsttHeqAkpZoMlrEeNBDz2L7RquPQNmiuom15nb7xU/k
|
||||
7PGfqtkpBaaGBV9tJkdp7BdH27dZXx+uT+uHbpMXkRrXliHjWpAw+NGwADh/Pjmq
|
||||
ExlQSdgAiXy1TTOdzxKH7WrwMFGDK0fddKr8GH3f+Oq4eOoNRa6/UhTCmBPbryCS
|
||||
IA7EAd0Aae9YaLlOB+eTORg/F1EWLPm34kKSRtae3gfHuY2cdUmoDVnOF8C9hc0P
|
||||
bL65G4NWPt+fW7lIj+0+kF19s2PviQI9BBMBCAAnAhsDBQsJCAcDBRUKCQgLBRYC
|
||||
AwEAAh4BAheABQJRKm2VBQkINsBBAAoJEH/MfUaszEz4RTEP/1sQHyjHaUiAPaCA
|
||||
v8jw/3SaWP/g8qLjpY6ROjLnDMvwKwRAoxUwcIv4/TWDOMpwJN+CJIbjXsXNYvf9
|
||||
OX+UTOvq4iwi4ADrAAw2xw+Jomc6EsYla+hkN2FzGzhpXfZFfUsuphjY3FKL+4hX
|
||||
H+R8ucNwIz3yrkfc17MMn8yFNWFzm4omU9/JeeaafwUoLxlULL2zY7H3+QmxCl0u
|
||||
6t8VvlszdEFhemLHzVYRY0Ro/ISrR78CnANNsMIy3i11U5uvdeWVCoWV1BXNLzOD
|
||||
4+BIDbMB/Do8PQCWiliSGZi8lvmj/sKbumMFQonMQWOfQswTtqTyQ3yhUM1LaxK5
|
||||
PYq13rggi3rA8oq8SYb/KNCQL5pzACji4TRVK0kNpvtxJxe84X8+9IB1vhBvF/Ji
|
||||
/xDd/3VDNPY+k1a47cON0S8Qc8DA3mq4hRfcgvuWy7ZxoMY7AfSJOhleb9+PzRBB
|
||||
n9agYgMxZg1RUWZazQ5KuoJqbxpwOYVFja/stItNS4xsmi0lh2I4MNlBEDqnFLUx
|
||||
SvTDc22c3uJlWhzBM/f2jH19uUeqm4jaggob3iJvJmK+Q7Ns3WcfhuWwCnc1+58d
|
||||
iFAMRUCRBPeFS0qd56QGk1r97B6+3UfLUslCfaaA8IMOFvQSHJwDO87xWGyxeRTY
|
||||
IIP9up4xwgje9LB7fMxsSkCDTHOk
|
||||
=s3DI
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
EOF
|
||||
|
||||
echo "Running apt-get update ..."
|
||||
apt-get update
|
||||
|
||||
cat <<EOF
|
||||
|
||||
You can now start installing packages from apt.postgresql.org.
|
||||
|
||||
Have a look at https://wiki.postgresql.org/wiki/Apt for more information;
|
||||
most notably the FAQ at https://wiki.postgresql.org/wiki/Apt/FAQ
|
||||
EOF
|
||||
129
setup/files/redash_supervisord_init
Normal file
129
setup/files/redash_supervisord_init
Normal file
@@ -0,0 +1,129 @@
|
||||
#!/bin/sh
|
||||
# /etc/init.d/redash_supervisord
|
||||
### BEGIN INIT INFO
|
||||
# Provides: supervisord
|
||||
# Required-Start: $remote_fs $syslog
|
||||
# Required-Stop: $remote_fs $syslog
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: process supervisor
|
||||
### END INIT INFO
|
||||
|
||||
# Author: Ron DuPlain <ron.duplain@gmail.com>
|
||||
|
||||
# Do NOT "set -e"
|
||||
|
||||
# PATH should only include /usr/* if it runs after the mountnfs.sh script
|
||||
PATH=/sbin:/usr/sbin:/usr/local/sbin:/bin:/usr/bin:/usr/local/bin
|
||||
NAME=supervisord
|
||||
DESC="process supervisor"
|
||||
DAEMON=/usr/local/bin/$NAME
|
||||
DAEMON_ARGS="--configuration /opt/redash/supervisord/supervisord.conf "
|
||||
PIDFILE=/opt/redash/supervisord/supervisord.pid
|
||||
SCRIPTNAME=/etc/init.d/redash_supervisord
|
||||
USER=redash
|
||||
|
||||
# Exit if the package is not installed
|
||||
[ -x "$DAEMON" ] || exit 0
|
||||
|
||||
# Read configuration variable file if it is present
|
||||
[ -r /etc/default/$NAME ] && . /etc/default/$NAME
|
||||
|
||||
# Load the VERBOSE setting and other rcS variables
|
||||
. /lib/init/vars.sh
|
||||
|
||||
# Define LSB log_* functions.
|
||||
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
|
||||
# and status_of_proc is working.
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
#
|
||||
# Function that starts the daemon/service
|
||||
#
|
||||
do_start()
|
||||
{
|
||||
# Return
|
||||
# 0 if daemon has been started
|
||||
# 1 if daemon was already running
|
||||
# 2 if daemon could not be started
|
||||
start-stop-daemon --start --quiet --pidfile $PIDFILE --user $USER --chuid $USER --exec $DAEMON --test > /dev/null \
|
||||
|| return 1
|
||||
start-stop-daemon --start --quiet --pidfile $PIDFILE --user $USER --chuid $USER --exec $DAEMON -- \
|
||||
$DAEMON_ARGS \
|
||||
|| return 2
|
||||
# Add code here, if necessary, that waits for the process to be ready
|
||||
# to handle requests from services started subsequently which depend
|
||||
# on this one. As a last resort, sleep for some time.
|
||||
}
|
||||
|
||||
#
|
||||
# Function that stops the daemon/service
|
||||
#
|
||||
do_stop()
|
||||
{
|
||||
# Return
|
||||
# 0 if daemon has been stopped
|
||||
# 1 if daemon was already stopped
|
||||
# 2 if daemon could not be stopped
|
||||
# other if a failure occurred
|
||||
start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --user $USER --chuid $USER --name $NAME
|
||||
RETVAL="$?"
|
||||
[ "$RETVAL" = 2 ] && return 2
|
||||
# Wait for children to finish too if this is a daemon that forks
|
||||
# and if the daemon is only ever run from this initscript.
|
||||
# If the above conditions are not satisfied then add some other code
|
||||
# that waits for the process to drop all resources that could be
|
||||
# needed by services started subsequently. A last resort is to
|
||||
# sleep for some time.
|
||||
start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 --user $USER --chuid $USER --exec $DAEMON
|
||||
[ "$?" = 2 ] && return 2
|
||||
# Many daemons don't delete their pidfiles when they exit.
|
||||
rm -f $PIDFILE
|
||||
return "$RETVAL"
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
[ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
|
||||
do_start
|
||||
case "$?" in
|
||||
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
|
||||
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
|
||||
esac
|
||||
;;
|
||||
stop)
|
||||
[ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
|
||||
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
|
||||
esac
|
||||
;;
|
||||
status)
|
||||
status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
|
||||
;;
|
||||
restart)
|
||||
log_daemon_msg "Restarting $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1)
|
||||
do_start
|
||||
case "$?" in
|
||||
0) log_end_msg 0 ;;
|
||||
1) log_end_msg 1 ;; # Old process is still running
|
||||
*) log_end_msg 1 ;; # Failed to start
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
# Failed to stop
|
||||
log_end_msg 1
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $SCRIPTNAME {start|stop|status|restart}" >&2
|
||||
exit 3
|
||||
;;
|
||||
esac
|
||||
|
||||
:
|
||||
785
setup/files/redis.conf
Normal file
785
setup/files/redis.conf
Normal file
@@ -0,0 +1,785 @@
|
||||
## Generated by install_server.sh ##
|
||||
# Redis configuration file example
|
||||
|
||||
# Note on units: when memory size is needed, it is possible to specify
|
||||
# it in the usual form of 1k 5GB 4M and so forth:
|
||||
#
|
||||
# 1k => 1000 bytes
|
||||
# 1kb => 1024 bytes
|
||||
# 1m => 1000000 bytes
|
||||
# 1mb => 1024*1024 bytes
|
||||
# 1g => 1000000000 bytes
|
||||
# 1gb => 1024*1024*1024 bytes
|
||||
#
|
||||
# units are case insensitive so 1GB 1Gb 1gB are all the same.
|
||||
|
||||
################################## INCLUDES ###################################
|
||||
|
||||
# Include one or more other config files here. This is useful if you
|
||||
# have a standard template that goes to all Redis server but also need
|
||||
# to customize a few per-server settings. Include files can include
|
||||
# other files, so use this wisely.
|
||||
#
|
||||
# Notice option "include" won't be rewritten by command "CONFIG REWRITE"
|
||||
# from admin or Redis Sentinel. Since Redis always uses the last processed
|
||||
# line as value of a configuration directive, you'd better put includes
|
||||
# at the beginning of this file to avoid overwriting config change at runtime.
|
||||
#
|
||||
# If instead you are interested in using includes to override configuration
|
||||
# options, it is better to use include as the last line.
|
||||
#
|
||||
# include /path/to/local.conf
|
||||
# include /path/to/other.conf
|
||||
|
||||
################################ GENERAL #####################################
|
||||
|
||||
# By default Redis does not run as a daemon. Use 'yes' if you need it.
|
||||
# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
|
||||
daemonize yes
|
||||
|
||||
# When running daemonized, Redis writes a pid file in /var/run/redis.pid by
|
||||
# default. You can specify a custom pid file location here.
|
||||
pidfile /var/run/redis_6379.pid
|
||||
|
||||
# Accept connections on the specified port, default is 6379.
|
||||
# If port 0 is specified Redis will not listen on a TCP socket.
|
||||
port 6379
|
||||
|
||||
# TCP listen() backlog.
|
||||
#
|
||||
# In high requests-per-second environments you need an high backlog in order
|
||||
# to avoid slow clients connections issues. Note that the Linux kernel
|
||||
# will silently truncate it to the value of /proc/sys/net/core/somaxconn so
|
||||
# make sure to raise both the value of somaxconn and tcp_max_syn_backlog
|
||||
# in order to get the desired effect.
|
||||
tcp-backlog 511
|
||||
|
||||
# By default Redis listens for connections from all the network interfaces
|
||||
# available on the server. It is possible to listen to just one or multiple
|
||||
# interfaces using the "bind" configuration directive, followed by one or
|
||||
# more IP addresses.
|
||||
#
|
||||
# Examples:
|
||||
#
|
||||
# bind 192.168.1.100 10.0.0.1
|
||||
# bind 127.0.0.1
|
||||
|
||||
# Specify the path for the Unix socket that will be used to listen for
|
||||
# incoming connections. There is no default, so Redis will not listen
|
||||
# on a unix socket when not specified.
|
||||
#
|
||||
# unixsocket /tmp/redis.sock
|
||||
# unixsocketperm 700
|
||||
|
||||
# Close the connection after a client is idle for N seconds (0 to disable)
|
||||
timeout 0
|
||||
|
||||
# TCP keepalive.
|
||||
#
|
||||
# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
|
||||
# of communication. This is useful for two reasons:
|
||||
#
|
||||
# 1) Detect dead peers.
|
||||
# 2) Take the connection alive from the point of view of network
|
||||
# equipment in the middle.
|
||||
#
|
||||
# On Linux, the specified value (in seconds) is the period used to send ACKs.
|
||||
# Note that to close the connection the double of the time is needed.
|
||||
# On other kernels the period depends on the kernel configuration.
|
||||
#
|
||||
# A reasonable value for this option is 60 seconds.
|
||||
tcp-keepalive 0
|
||||
|
||||
# Specify the server verbosity level.
|
||||
# This can be one of:
|
||||
# debug (a lot of information, useful for development/testing)
|
||||
# verbose (many rarely useful info, but not a mess like the debug level)
|
||||
# notice (moderately verbose, what you want in production probably)
|
||||
# warning (only very important / critical messages are logged)
|
||||
loglevel notice
|
||||
|
||||
# Specify the log file name. Also the empty string can be used to force
|
||||
# Redis to log on the standard output. Note that if you use standard
|
||||
# output for logging but daemonize, logs will be sent to /dev/null
|
||||
logfile /var/log/redis_6379.log
|
||||
|
||||
# To enable logging to the system logger, just set 'syslog-enabled' to yes,
|
||||
# and optionally update the other syslog parameters to suit your needs.
|
||||
# syslog-enabled no
|
||||
|
||||
# Specify the syslog identity.
|
||||
# syslog-ident redis
|
||||
|
||||
# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
|
||||
# syslog-facility local0
|
||||
|
||||
# Set the number of databases. The default database is DB 0, you can select
|
||||
# a different one on a per-connection basis using SELECT <dbid> where
|
||||
# dbid is a number between 0 and 'databases'-1
|
||||
databases 16
|
||||
|
||||
################################ SNAPSHOTTING ################################
|
||||
#
|
||||
# Save the DB on disk:
|
||||
#
|
||||
# save <seconds> <changes>
|
||||
#
|
||||
# Will save the DB if both the given number of seconds and the given
|
||||
# number of write operations against the DB occurred.
|
||||
#
|
||||
# In the example below the behaviour will be to save:
|
||||
# after 900 sec (15 min) if at least 1 key changed
|
||||
# after 300 sec (5 min) if at least 10 keys changed
|
||||
# after 60 sec if at least 10000 keys changed
|
||||
#
|
||||
# Note: you can disable saving at all commenting all the "save" lines.
|
||||
#
|
||||
# It is also possible to remove all the previously configured save
|
||||
# points by adding a save directive with a single empty string argument
|
||||
# like in the following example:
|
||||
#
|
||||
# save ""
|
||||
|
||||
save 900 1
|
||||
save 300 10
|
||||
save 60 10000
|
||||
|
||||
# By default Redis will stop accepting writes if RDB snapshots are enabled
|
||||
# (at least one save point) and the latest background save failed.
|
||||
# This will make the user aware (in a hard way) that data is not persisting
|
||||
# on disk properly, otherwise chances are that no one will notice and some
|
||||
# disaster will happen.
|
||||
#
|
||||
# If the background saving process will start working again Redis will
|
||||
# automatically allow writes again.
|
||||
#
|
||||
# However if you have setup your proper monitoring of the Redis server
|
||||
# and persistence, you may want to disable this feature so that Redis will
|
||||
# continue to work as usual even if there are problems with disk,
|
||||
# permissions, and so forth.
|
||||
stop-writes-on-bgsave-error yes
|
||||
|
||||
# Compress string objects using LZF when dump .rdb databases?
|
||||
# For default that's set to 'yes' as it's almost always a win.
|
||||
# If you want to save some CPU in the saving child set it to 'no' but
|
||||
# the dataset will likely be bigger if you have compressible values or keys.
|
||||
rdbcompression yes
|
||||
|
||||
# Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
|
||||
# This makes the format more resistant to corruption but there is a performance
|
||||
# hit to pay (around 10%) when saving and loading RDB files, so you can disable it
|
||||
# for maximum performances.
|
||||
#
|
||||
# RDB files created with checksum disabled have a checksum of zero that will
|
||||
# tell the loading code to skip the check.
|
||||
rdbchecksum yes
|
||||
|
||||
# The filename where to dump the DB
|
||||
dbfilename dump.rdb
|
||||
|
||||
# The working directory.
|
||||
#
|
||||
# The DB will be written inside this directory, with the filename specified
|
||||
# above using the 'dbfilename' configuration directive.
|
||||
#
|
||||
# The Append Only File will also be created inside this directory.
|
||||
#
|
||||
# Note that you must specify a directory here, not a file name.
|
||||
dir /var/lib/redis/6379
|
||||
|
||||
################################# REPLICATION #################################
|
||||
|
||||
# Master-Slave replication. Use slaveof to make a Redis instance a copy of
|
||||
# another Redis server. A few things to understand ASAP about Redis replication.
|
||||
#
|
||||
# 1) Redis replication is asynchronous, but you can configure a master to
|
||||
# stop accepting writes if it appears to be not connected with at least
|
||||
# a given number of slaves.
|
||||
# 2) Redis slaves are able to perform a partial resynchronization with the
|
||||
# master if the replication link is lost for a relatively small amount of
|
||||
# time. You may want to configure the replication backlog size (see the next
|
||||
# sections of this file) with a sensible value depending on your needs.
|
||||
# 3) Replication is automatic and does not need user intervention. After a
|
||||
# network partition slaves automatically try to reconnect to masters
|
||||
# and resynchronize with them.
|
||||
#
|
||||
# slaveof <masterip> <masterport>
|
||||
|
||||
# If the master is password protected (using the "requirepass" configuration
|
||||
# directive below) it is possible to tell the slave to authenticate before
|
||||
# starting the replication synchronization process, otherwise the master will
|
||||
# refuse the slave request.
|
||||
#
|
||||
# masterauth <master-password>
|
||||
|
||||
# When a slave loses its connection with the master, or when the replication
|
||||
# is still in progress, the slave can act in two different ways:
|
||||
#
|
||||
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
|
||||
# still reply to client requests, possibly with out of date data, or the
|
||||
# data set may just be empty if this is the first synchronization.
|
||||
#
|
||||
# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
|
||||
# an error "SYNC with master in progress" to all the kind of commands
|
||||
# but to INFO and SLAVEOF.
|
||||
#
|
||||
slave-serve-stale-data yes
|
||||
|
||||
# You can configure a slave instance to accept writes or not. Writing against
|
||||
# a slave instance may be useful to store some ephemeral data (because data
|
||||
# written on a slave will be easily deleted after resync with the master) but
|
||||
# may also cause problems if clients are writing to it because of a
|
||||
# misconfiguration.
|
||||
#
|
||||
# Since Redis 2.6 by default slaves are read-only.
|
||||
#
|
||||
# Note: read only slaves are not designed to be exposed to untrusted clients
|
||||
# on the internet. It's just a protection layer against misuse of the instance.
|
||||
# Still a read only slave exports by default all the administrative commands
|
||||
# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
|
||||
# security of read only slaves using 'rename-command' to shadow all the
|
||||
# administrative / dangerous commands.
|
||||
slave-read-only yes
|
||||
|
||||
# Slaves send PINGs to server in a predefined interval. It's possible to change
|
||||
# this interval with the repl_ping_slave_period option. The default value is 10
|
||||
# seconds.
|
||||
#
|
||||
# repl-ping-slave-period 10
|
||||
|
||||
# The following option sets the replication timeout for:
|
||||
#
|
||||
# 1) Bulk transfer I/O during SYNC, from the point of view of slave.
|
||||
# 2) Master timeout from the point of view of slaves (data, pings).
|
||||
# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings).
|
||||
#
|
||||
# It is important to make sure that this value is greater than the value
|
||||
# specified for repl-ping-slave-period otherwise a timeout will be detected
|
||||
# every time there is low traffic between the master and the slave.
|
||||
#
|
||||
# repl-timeout 60
|
||||
|
||||
# Disable TCP_NODELAY on the slave socket after SYNC?
|
||||
#
|
||||
# If you select "yes" Redis will use a smaller number of TCP packets and
|
||||
# less bandwidth to send data to slaves. But this can add a delay for
|
||||
# the data to appear on the slave side, up to 40 milliseconds with
|
||||
# Linux kernels using a default configuration.
|
||||
#
|
||||
# If you select "no" the delay for data to appear on the slave side will
|
||||
# be reduced but more bandwidth will be used for replication.
|
||||
#
|
||||
# By default we optimize for low latency, but in very high traffic conditions
|
||||
# or when the master and slaves are many hops away, turning this to "yes" may
|
||||
# be a good idea.
|
||||
repl-disable-tcp-nodelay no
|
||||
|
||||
# Set the replication backlog size. The backlog is a buffer that accumulates
|
||||
# slave data when slaves are disconnected for some time, so that when a slave
|
||||
# wants to reconnect again, often a full resync is not needed, but a partial
|
||||
# resync is enough, just passing the portion of data the slave missed while
|
||||
# disconnected.
|
||||
#
|
||||
# The biggest the replication backlog, the longer the time the slave can be
|
||||
# disconnected and later be able to perform a partial resynchronization.
|
||||
#
|
||||
# The backlog is only allocated once there is at least a slave connected.
|
||||
#
|
||||
# repl-backlog-size 1mb
|
||||
|
||||
# After a master has no longer connected slaves for some time, the backlog
|
||||
# will be freed. The following option configures the amount of seconds that
|
||||
# need to elapse, starting from the time the last slave disconnected, for
|
||||
# the backlog buffer to be freed.
|
||||
#
|
||||
# A value of 0 means to never release the backlog.
|
||||
#
|
||||
# repl-backlog-ttl 3600
|
||||
|
||||
# The slave priority is an integer number published by Redis in the INFO output.
|
||||
# It is used by Redis Sentinel in order to select a slave to promote into a
|
||||
# master if the master is no longer working correctly.
|
||||
#
|
||||
# A slave with a low priority number is considered better for promotion, so
|
||||
# for instance if there are three slaves with priority 10, 100, 25 Sentinel will
|
||||
# pick the one with priority 10, that is the lowest.
|
||||
#
|
||||
# However a special priority of 0 marks the slave as not able to perform the
|
||||
# role of master, so a slave with priority of 0 will never be selected by
|
||||
# Redis Sentinel for promotion.
|
||||
#
|
||||
# By default the priority is 100.
|
||||
slave-priority 100
|
||||
|
||||
# It is possible for a master to stop accepting writes if there are less than
|
||||
# N slaves connected, having a lag less or equal than M seconds.
|
||||
#
|
||||
# The N slaves need to be in "online" state.
|
||||
#
|
||||
# The lag in seconds, that must be <= the specified value, is calculated from
|
||||
# the last ping received from the slave, that is usually sent every second.
|
||||
#
|
||||
# This option does not GUARANTEES that N replicas will accept the write, but
|
||||
# will limit the window of exposure for lost writes in case not enough slaves
|
||||
# are available, to the specified number of seconds.
|
||||
#
|
||||
# For example to require at least 3 slaves with a lag <= 10 seconds use:
|
||||
#
|
||||
# min-slaves-to-write 3
|
||||
# min-slaves-max-lag 10
|
||||
#
|
||||
# Setting one or the other to 0 disables the feature.
|
||||
#
|
||||
# By default min-slaves-to-write is set to 0 (feature disabled) and
|
||||
# min-slaves-max-lag is set to 10.
|
||||
|
||||
################################## SECURITY ###################################
|
||||
|
||||
# Require clients to issue AUTH <PASSWORD> before processing any other
|
||||
# commands. This might be useful in environments in which you do not trust
|
||||
# others with access to the host running redis-server.
|
||||
#
|
||||
# This should stay commented out for backward compatibility and because most
|
||||
# people do not need auth (e.g. they run their own servers).
|
||||
#
|
||||
# Warning: since Redis is pretty fast an outside user can try up to
|
||||
# 150k passwords per second against a good box. This means that you should
|
||||
# use a very strong password otherwise it will be very easy to break.
|
||||
#
|
||||
# requirepass foobared
|
||||
|
||||
# Command renaming.
|
||||
#
|
||||
# It is possible to change the name of dangerous commands in a shared
|
||||
# environment. For instance the CONFIG command may be renamed into something
|
||||
# hard to guess so that it will still be available for internal-use tools
|
||||
# but not available for general clients.
|
||||
#
|
||||
# Example:
|
||||
#
|
||||
# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
|
||||
#
|
||||
# It is also possible to completely kill a command by renaming it into
|
||||
# an empty string:
|
||||
#
|
||||
# rename-command CONFIG ""
|
||||
#
|
||||
# Please note that changing the name of commands that are logged into the
|
||||
# AOF file or transmitted to slaves may cause problems.
|
||||
|
||||
################################### LIMITS ####################################
|
||||
|
||||
# Set the max number of connected clients at the same time. By default
|
||||
# this limit is set to 10000 clients, however if the Redis server is not
|
||||
# able to configure the process file limit to allow for the specified limit
|
||||
# the max number of allowed clients is set to the current file limit
|
||||
# minus 32 (as Redis reserves a few file descriptors for internal uses).
|
||||
#
|
||||
# Once the limit is reached Redis will close all the new connections sending
|
||||
# an error 'max number of clients reached'.
|
||||
#
|
||||
# maxclients 10000
|
||||
|
||||
# Don't use more memory than the specified amount of bytes.
|
||||
# When the memory limit is reached Redis will try to remove keys
|
||||
# according to the eviction policy selected (see maxmemory-policy).
|
||||
#
|
||||
# If Redis can't remove keys according to the policy, or if the policy is
|
||||
# set to 'noeviction', Redis will start to reply with errors to commands
|
||||
# that would use more memory, like SET, LPUSH, and so on, and will continue
|
||||
# to reply to read-only commands like GET.
|
||||
#
|
||||
# This option is usually useful when using Redis as an LRU cache, or to set
|
||||
# a hard memory limit for an instance (using the 'noeviction' policy).
|
||||
#
|
||||
# WARNING: If you have slaves attached to an instance with maxmemory on,
|
||||
# the size of the output buffers needed to feed the slaves are subtracted
|
||||
# from the used memory count, so that network problems / resyncs will
|
||||
# not trigger a loop where keys are evicted, and in turn the output
|
||||
# buffer of slaves is full with DELs of keys evicted triggering the deletion
|
||||
# of more keys, and so forth until the database is completely emptied.
|
||||
#
|
||||
# In short... if you have slaves attached it is suggested that you set a lower
|
||||
# limit for maxmemory so that there is some free RAM on the system for slave
|
||||
# output buffers (but this is not needed if the policy is 'noeviction').
|
||||
#
|
||||
# maxmemory <bytes>
|
||||
|
||||
# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
|
||||
# is reached. You can select among five behaviors:
|
||||
#
|
||||
# volatile-lru -> remove the key with an expire set using an LRU algorithm
|
||||
# allkeys-lru -> remove any key accordingly to the LRU algorithm
|
||||
# volatile-random -> remove a random key with an expire set
|
||||
# allkeys-random -> remove a random key, any key
|
||||
# volatile-ttl -> remove the key with the nearest expire time (minor TTL)
|
||||
# noeviction -> don't expire at all, just return an error on write operations
|
||||
#
|
||||
# Note: with any of the above policies, Redis will return an error on write
|
||||
# operations, when there are not suitable keys for eviction.
|
||||
#
|
||||
# At the date of writing this commands are: set setnx setex append
|
||||
# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
|
||||
# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
|
||||
# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
|
||||
# getset mset msetnx exec sort
|
||||
#
|
||||
# The default is:
|
||||
#
|
||||
# maxmemory-policy volatile-lru
|
||||
|
||||
# LRU and minimal TTL algorithms are not precise algorithms but approximated
|
||||
# algorithms (in order to save memory), so you can select as well the sample
|
||||
# size to check. For instance for default Redis will check three keys and
|
||||
# pick the one that was used less recently, you can change the sample size
|
||||
# using the following configuration directive.
|
||||
#
|
||||
# maxmemory-samples 3
|
||||
|
||||
############################## APPEND ONLY MODE ###############################
|
||||
|
||||
# By default Redis asynchronously dumps the dataset on disk. This mode is
|
||||
# good enough in many applications, but an issue with the Redis process or
|
||||
# a power outage may result into a few minutes of writes lost (depending on
|
||||
# the configured save points).
|
||||
#
|
||||
# The Append Only File is an alternative persistence mode that provides
|
||||
# much better durability. For instance using the default data fsync policy
|
||||
# (see later in the config file) Redis can lose just one second of writes in a
|
||||
# dramatic event like a server power outage, or a single write if something
|
||||
# wrong with the Redis process itself happens, but the operating system is
|
||||
# still running correctly.
|
||||
#
|
||||
# AOF and RDB persistence can be enabled at the same time without problems.
|
||||
# If the AOF is enabled on startup Redis will load the AOF, that is the file
|
||||
# with the better durability guarantees.
|
||||
#
|
||||
# Please check http://redis.io/topics/persistence for more information.
|
||||
|
||||
appendonly no
|
||||
|
||||
# The name of the append only file (default: "appendonly.aof")
|
||||
|
||||
appendfilename "appendonly.aof"
|
||||
|
||||
# The fsync() call tells the Operating System to actually write data on disk
|
||||
# instead to wait for more data in the output buffer. Some OS will really flush
|
||||
# data on disk, some other OS will just try to do it ASAP.
|
||||
#
|
||||
# Redis supports three different modes:
|
||||
#
|
||||
# no: don't fsync, just let the OS flush the data when it wants. Faster.
|
||||
# always: fsync after every write to the append only log . Slow, Safest.
|
||||
# everysec: fsync only one time every second. Compromise.
|
||||
#
|
||||
# The default is "everysec", as that's usually the right compromise between
|
||||
# speed and data safety. It's up to you to understand if you can relax this to
|
||||
# "no" that will let the operating system flush the output buffer when
|
||||
# it wants, for better performances (but if you can live with the idea of
|
||||
# some data loss consider the default persistence mode that's snapshotting),
|
||||
# or on the contrary, use "always" that's very slow but a bit safer than
|
||||
# everysec.
|
||||
#
|
||||
# More details please check the following article:
|
||||
# http://antirez.com/post/redis-persistence-demystified.html
|
||||
#
|
||||
# If unsure, use "everysec".
|
||||
|
||||
# appendfsync always
|
||||
appendfsync everysec
|
||||
# appendfsync no
|
||||
|
||||
# When the AOF fsync policy is set to always or everysec, and a background
|
||||
# saving process (a background save or AOF log background rewriting) is
|
||||
# performing a lot of I/O against the disk, in some Linux configurations
|
||||
# Redis may block too long on the fsync() call. Note that there is no fix for
|
||||
# this currently, as even performing fsync in a different thread will block
|
||||
# our synchronous write(2) call.
|
||||
#
|
||||
# In order to mitigate this problem it's possible to use the following option
|
||||
# that will prevent fsync() from being called in the main process while a
|
||||
# BGSAVE or BGREWRITEAOF is in progress.
|
||||
#
|
||||
# This means that while another child is saving, the durability of Redis is
|
||||
# the same as "appendfsync none". In practical terms, this means that it is
|
||||
# possible to lose up to 30 seconds of log in the worst scenario (with the
|
||||
# default Linux settings).
|
||||
#
|
||||
# If you have latency problems turn this to "yes". Otherwise leave it as
|
||||
# "no" that is the safest pick from the point of view of durability.
|
||||
|
||||
no-appendfsync-on-rewrite no
|
||||
|
||||
# Automatic rewrite of the append only file.
|
||||
# Redis is able to automatically rewrite the log file implicitly calling
|
||||
# BGREWRITEAOF when the AOF log size grows by the specified percentage.
|
||||
#
|
||||
# This is how it works: Redis remembers the size of the AOF file after the
|
||||
# latest rewrite (if no rewrite has happened since the restart, the size of
|
||||
# the AOF at startup is used).
|
||||
#
|
||||
# This base size is compared to the current size. If the current size is
|
||||
# bigger than the specified percentage, the rewrite is triggered. Also
|
||||
# you need to specify a minimal size for the AOF file to be rewritten, this
|
||||
# is useful to avoid rewriting the AOF file even if the percentage increase
|
||||
# is reached but it is still pretty small.
|
||||
#
|
||||
# Specify a percentage of zero in order to disable the automatic AOF
|
||||
# rewrite feature.
|
||||
|
||||
auto-aof-rewrite-percentage 100
|
||||
auto-aof-rewrite-min-size 64mb
|
||||
|
||||
# An AOF file may be found to be truncated at the end during the Redis
|
||||
# startup process, when the AOF data gets loaded back into memory.
|
||||
# This may happen when the system where Redis is running
|
||||
# crashes, especially when an ext4 filesystem is mounted without the
|
||||
# data=ordered option (however this can't happen when Redis itself
|
||||
# crashes or aborts but the operating system still works correctly).
|
||||
#
|
||||
# Redis can either exit with an error when this happens, or load as much
|
||||
# data as possible (the default now) and start if the AOF file is found
|
||||
# to be truncated at the end. The following option controls this behavior.
|
||||
#
|
||||
# If aof-load-truncated is set to yes, a truncated AOF file is loaded and
|
||||
# the Redis server starts emitting a log to inform the user of the event.
|
||||
# Otherwise if the option is set to no, the server aborts with an error
|
||||
# and refuses to start. When the option is set to no, the user requires
|
||||
# to fix the AOF file using the "redis-check-aof" utility before to restart
|
||||
# the server.
|
||||
#
|
||||
# Note that if the AOF file will be found to be corrupted in the middle
|
||||
# the server will still exit with an error. This option only applies when
|
||||
# Redis will try to read more data from the AOF file but not enough bytes
|
||||
# will be found.
|
||||
aof-load-truncated yes
|
||||
|
||||
################################ LUA SCRIPTING ###############################
|
||||
|
||||
# Max execution time of a Lua script in milliseconds.
|
||||
#
|
||||
# If the maximum execution time is reached Redis will log that a script is
|
||||
# still in execution after the maximum allowed time and will start to
|
||||
# reply to queries with an error.
|
||||
#
|
||||
# When a long running script exceed the maximum execution time only the
|
||||
# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
|
||||
# used to stop a script that did not yet called write commands. The second
|
||||
# is the only way to shut down the server in the case a write commands was
|
||||
# already issue by the script but the user don't want to wait for the natural
|
||||
# termination of the script.
|
||||
#
|
||||
# Set it to 0 or a negative value for unlimited execution without warnings.
|
||||
lua-time-limit 5000
|
||||
|
||||
################################## SLOW LOG ###################################
|
||||
|
||||
# The Redis Slow Log is a system to log queries that exceeded a specified
|
||||
# execution time. The execution time does not include the I/O operations
|
||||
# like talking with the client, sending the reply and so forth,
|
||||
# but just the time needed to actually execute the command (this is the only
|
||||
# stage of command execution where the thread is blocked and can not serve
|
||||
# other requests in the meantime).
|
||||
#
|
||||
# You can configure the slow log with two parameters: one tells Redis
|
||||
# what is the execution time, in microseconds, to exceed in order for the
|
||||
# command to get logged, and the other parameter is the length of the
|
||||
# slow log. When a new command is logged the oldest one is removed from the
|
||||
# queue of logged commands.
|
||||
|
||||
# The following time is expressed in microseconds, so 1000000 is equivalent
|
||||
# to one second. Note that a negative number disables the slow log, while
|
||||
# a value of zero forces the logging of every command.
|
||||
slowlog-log-slower-than 10000
|
||||
|
||||
# There is no limit to this length. Just be aware that it will consume memory.
|
||||
# You can reclaim memory used by the slow log with SLOWLOG RESET.
|
||||
slowlog-max-len 128
|
||||
|
||||
################################ LATENCY MONITOR ##############################
|
||||
|
||||
# The Redis latency monitoring subsystem samples different operations
|
||||
# at runtime in order to collect data related to possible sources of
|
||||
# latency of a Redis instance.
|
||||
#
|
||||
# Via the LATENCY command this information is available to the user that can
|
||||
# print graphs and obtain reports.
|
||||
#
|
||||
# The system only logs operations that were performed in a time equal or
|
||||
# greater than the amount of milliseconds specified via the
|
||||
# latency-monitor-threshold configuration directive. When its value is set
|
||||
# to zero, the latency monitor is turned off.
|
||||
#
|
||||
# By default latency monitoring is disabled since it is mostly not needed
|
||||
# if you don't have latency issues, and collecting data has a performance
|
||||
# impact, that while very small, can be measured under big load. Latency
|
||||
# monitoring can easily be enalbed at runtime using the command
|
||||
# "CONFIG SET latency-monitor-threshold <milliseconds>" if needed.
|
||||
latency-monitor-threshold 0
|
||||
|
||||
############################# Event notification ##############################
|
||||
|
||||
# Redis can notify Pub/Sub clients about events happening in the key space.
|
||||
# This feature is documented at http://redis.io/topics/notifications
|
||||
#
|
||||
# For instance if keyspace events notification is enabled, and a client
|
||||
# performs a DEL operation on key "foo" stored in the Database 0, two
|
||||
# messages will be published via Pub/Sub:
|
||||
#
|
||||
# PUBLISH __keyspace@0__:foo del
|
||||
# PUBLISH __keyevent@0__:del foo
|
||||
#
|
||||
# It is possible to select the events that Redis will notify among a set
|
||||
# of classes. Every class is identified by a single character:
|
||||
#
|
||||
# K Keyspace events, published with __keyspace@<db>__ prefix.
|
||||
# E Keyevent events, published with __keyevent@<db>__ prefix.
|
||||
# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
|
||||
# $ String commands
|
||||
# l List commands
|
||||
# s Set commands
|
||||
# h Hash commands
|
||||
# z Sorted set commands
|
||||
# x Expired events (events generated every time a key expires)
|
||||
# e Evicted events (events generated when a key is evicted for maxmemory)
|
||||
# A Alias for g$lshzxe, so that the "AKE" string means all the events.
|
||||
#
|
||||
# The "notify-keyspace-events" takes as argument a string that is composed
|
||||
# by zero or multiple characters. The empty string means that notifications
|
||||
# are disabled at all.
|
||||
#
|
||||
# Example: to enable list and generic events, from the point of view of the
|
||||
# event name, use:
|
||||
#
|
||||
# notify-keyspace-events Elg
|
||||
#
|
||||
# Example 2: to get the stream of the expired keys subscribing to channel
|
||||
# name __keyevent@0__:expired use:
|
||||
#
|
||||
# notify-keyspace-events Ex
|
||||
#
|
||||
# By default all notifications are disabled because most users don't need
|
||||
# this feature and the feature has some overhead. Note that if you don't
|
||||
# specify at least one of K or E, no events will be delivered.
|
||||
notify-keyspace-events ""
|
||||
|
||||
############################### ADVANCED CONFIG ###############################
|
||||
|
||||
# Hashes are encoded using a memory efficient data structure when they have a
|
||||
# small number of entries, and the biggest entry does not exceed a given
|
||||
# threshold. These thresholds can be configured using the following directives.
|
||||
hash-max-ziplist-entries 512
|
||||
hash-max-ziplist-value 64
|
||||
|
||||
# Similarly to hashes, small lists are also encoded in a special way in order
|
||||
# to save a lot of space. The special representation is only used when
|
||||
# you are under the following limits:
|
||||
list-max-ziplist-entries 512
|
||||
list-max-ziplist-value 64
|
||||
|
||||
# Sets have a special encoding in just one case: when a set is composed
|
||||
# of just strings that happens to be integers in radix 10 in the range
|
||||
# of 64 bit signed integers.
|
||||
# The following configuration setting sets the limit in the size of the
|
||||
# set in order to use this special memory saving encoding.
|
||||
set-max-intset-entries 512
|
||||
|
||||
# Similarly to hashes and lists, sorted sets are also specially encoded in
|
||||
# order to save a lot of space. This encoding is only used when the length and
|
||||
# elements of a sorted set are below the following limits:
|
||||
zset-max-ziplist-entries 128
|
||||
zset-max-ziplist-value 64
|
||||
|
||||
# HyperLogLog sparse representation bytes limit. The limit includes the
|
||||
# 16 bytes header. When an HyperLogLog using the sparse representation crosses
|
||||
# this limit, it is converted into the dense representation.
|
||||
#
|
||||
# A value greater than 16000 is totally useless, since at that point the
|
||||
# dense representation is more memory efficient.
|
||||
#
|
||||
# The suggested value is ~ 3000 in order to have the benefits of
|
||||
# the space efficient encoding without slowing down too much PFADD,
|
||||
# which is O(N) with the sparse encoding. The value can be raised to
|
||||
# ~ 10000 when CPU is not a concern, but space is, and the data set is
|
||||
# composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
|
||||
hll-sparse-max-bytes 3000
|
||||
|
||||
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
|
||||
# order to help rehashing the main Redis hash table (the one mapping top-level
|
||||
# keys to values). The hash table implementation Redis uses (see dict.c)
|
||||
# performs a lazy rehashing: the more operation you run into a hash table
|
||||
# that is rehashing, the more rehashing "steps" are performed, so if the
|
||||
# server is idle the rehashing is never complete and some more memory is used
|
||||
# by the hash table.
|
||||
#
|
||||
# The default is to use this millisecond 10 times every second in order to
|
||||
# active rehashing the main dictionaries, freeing memory when possible.
|
||||
#
|
||||
# If unsure:
|
||||
# use "activerehashing no" if you have hard latency requirements and it is
|
||||
# not a good thing in your environment that Redis can reply form time to time
|
||||
# to queries with 2 milliseconds delay.
|
||||
#
|
||||
# use "activerehashing yes" if you don't have such hard requirements but
|
||||
# want to free memory asap when possible.
|
||||
activerehashing yes
|
||||
|
||||
# The client output buffer limits can be used to force disconnection of clients
|
||||
# that are not reading data from the server fast enough for some reason (a
|
||||
# common reason is that a Pub/Sub client can't consume messages as fast as the
|
||||
# publisher can produce them).
|
||||
#
|
||||
# The limit can be set differently for the three different classes of clients:
|
||||
#
|
||||
# normal -> normal clients including MONITOR clients
|
||||
# slave -> slave clients
|
||||
# pubsub -> clients subscribed to at least one pubsub channel or pattern
|
||||
#
|
||||
# The syntax of every client-output-buffer-limit directive is the following:
|
||||
#
|
||||
# client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
|
||||
#
|
||||
# A client is immediately disconnected once the hard limit is reached, or if
|
||||
# the soft limit is reached and remains reached for the specified number of
|
||||
# seconds (continuously).
|
||||
# So for instance if the hard limit is 32 megabytes and the soft limit is
|
||||
# 16 megabytes / 10 seconds, the client will get disconnected immediately
|
||||
# if the size of the output buffers reach 32 megabytes, but will also get
|
||||
# disconnected if the client reaches 16 megabytes and continuously overcomes
|
||||
# the limit for 10 seconds.
|
||||
#
|
||||
# By default normal clients are not limited because they don't receive data
|
||||
# without asking (in a push way), but just after a request, so only
|
||||
# asynchronous clients may create a scenario where data is requested faster
|
||||
# than it can read.
|
||||
#
|
||||
# Instead there is a default limit for pubsub and slave clients, since
|
||||
# subscribers and slaves receive data in a push fashion.
|
||||
#
|
||||
# Both the hard or the soft limit can be disabled by setting them to zero.
|
||||
client-output-buffer-limit normal 0 0 0
|
||||
client-output-buffer-limit slave 256mb 64mb 60
|
||||
client-output-buffer-limit pubsub 32mb 8mb 60
|
||||
|
||||
# Redis calls an internal function to perform many background tasks, like
|
||||
# closing connections of clients in timeout, purging expired keys that are
|
||||
# never requested, and so forth.
|
||||
#
|
||||
# Not all tasks are performed with the same frequency, but Redis checks for
|
||||
# tasks to perform accordingly to the specified "hz" value.
|
||||
#
|
||||
# By default "hz" is set to 10. Raising the value will use more CPU when
|
||||
# Redis is idle, but at the same time will make Redis more responsive when
|
||||
# there are many keys expiring at the same time, and timeouts may be
|
||||
# handled with more precision.
|
||||
#
|
||||
# The range is between 1 and 500, however a value over 100 is usually not
|
||||
# a good idea. Most users should use the default of 10 and raise this up to
|
||||
# 100 only in environments where very low latency is required.
|
||||
hz 10
|
||||
|
||||
# When a child rewrites the AOF file, if the following option is enabled
|
||||
# the file will be fsync-ed every 32 MB of data generated. This is useful
|
||||
# in order to commit the file to the disk more incrementally and avoid
|
||||
# big latency spikes.
|
||||
aof-rewrite-incremental-fsync yes
|
||||
66
setup/files/redis_init
Normal file
66
setup/files/redis_init
Normal file
@@ -0,0 +1,66 @@
|
||||
#!/bin/sh
|
||||
|
||||
EXEC=/usr/local/bin/redis-server
|
||||
CLIEXEC=/usr/local/bin/redis-cli
|
||||
PIDFILE=/var/run/redis_6379.pid
|
||||
CONF="/etc/redis/6379.conf"
|
||||
REDISPORT="6379"
|
||||
###############
|
||||
# SysV Init Information
|
||||
# chkconfig: - 58 74
|
||||
# description: redis_6379 is the redis daemon.
|
||||
### BEGIN INIT INFO
|
||||
# Provides: redis_6379
|
||||
# Required-Start: $network $local_fs $remote_fs
|
||||
# Required-Stop: $network $local_fs $remote_fs
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Should-Start: $syslog $named
|
||||
# Should-Stop: $syslog $named
|
||||
# Short-Description: start and stop redis_6379
|
||||
# Description: Redis daemon
|
||||
### END INIT INFO
|
||||
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
if [ -f $PIDFILE ]
|
||||
then
|
||||
echo "$PIDFILE exists, process is already running or crashed"
|
||||
else
|
||||
echo "Starting Redis server..."
|
||||
$EXEC $CONF
|
||||
fi
|
||||
;;
|
||||
stop)
|
||||
if [ ! -f $PIDFILE ]
|
||||
then
|
||||
echo "$PIDFILE does not exist, process is not running"
|
||||
else
|
||||
PID=$(cat $PIDFILE)
|
||||
echo "Stopping ..."
|
||||
$CLIEXEC -p $REDISPORT shutdown
|
||||
while [ -x /proc/${PID} ]
|
||||
do
|
||||
echo "Waiting for Redis to shutdown ..."
|
||||
sleep 1
|
||||
done
|
||||
echo "Redis stopped"
|
||||
fi
|
||||
;;
|
||||
status)
|
||||
if [ ! -f $PIDFILE ]
|
||||
then
|
||||
echo 'Redis is not running'
|
||||
else
|
||||
echo "Redis is running ($(<$PIDFILE))"
|
||||
fi
|
||||
;;
|
||||
restart)
|
||||
$0 stop
|
||||
$0 start
|
||||
;;
|
||||
*)
|
||||
echo "Please use start, stop, restart or status as first argument"
|
||||
;;
|
||||
esac
|
||||
31
setup/files/supervisord.conf
Normal file
31
setup/files/supervisord.conf
Normal file
@@ -0,0 +1,31 @@
|
||||
[supervisord]
|
||||
nodaemon=false
|
||||
logfile=/opt/redash/logs/supervisord.log
|
||||
pidfile=/opt/redash/supervisord/supervisord.pid
|
||||
directory=/opt/redash/current
|
||||
|
||||
[inet_http_server]
|
||||
port = 127.0.0.1:9001
|
||||
|
||||
[rpcinterface:supervisor]
|
||||
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
|
||||
|
||||
[program:redash_server]
|
||||
command=/opt/redash/current/bin/run gunicorn -b 127.0.0.1:5000 --name redash -w 4 redash.wsgi:app
|
||||
process_name=redash_server
|
||||
numprocs=1
|
||||
priority=999
|
||||
autostart=true
|
||||
autorestart=true
|
||||
stdout_logfile=/opt/redash/logs/api.log
|
||||
stderr_logfile=/opt/redash/logs/api_error.log
|
||||
|
||||
[program:redash_celery]
|
||||
command=/opt/redash/current/bin/run celery worker --app=redash.worker --beat -Qqueries,celery,scheduled_queries
|
||||
process_name=redash_celery
|
||||
numprocs=1
|
||||
priority=999
|
||||
autostart=true
|
||||
autorestart=true
|
||||
stdout_logfile=/opt/redash/logs/celery.log
|
||||
stderr_logfile=/opt/redash/logs/celery_error.log
|
||||
49
setup/packer.json
Normal file
49
setup/packer.json
Normal file
@@ -0,0 +1,49 @@
|
||||
{
|
||||
"variables": {
|
||||
"aws_access_key": "",
|
||||
"aws_secret_key": "",
|
||||
"redash_version": "0.4.0.b589",
|
||||
"image_version": "040b589"
|
||||
},
|
||||
"builders": [
|
||||
{
|
||||
"name": "redash-us-east-1",
|
||||
"type": "amazon-ebs",
|
||||
"access_key": "{{user `aws_access_key`}}",
|
||||
"secret_key": "{{user `aws_secret_key`}}",
|
||||
"region": "us-east-1",
|
||||
"source_ami": "ami-fe7cc796",
|
||||
"instance_type": "t2.micro",
|
||||
"ssh_username": "ubuntu",
|
||||
"ami_name": "redash-{{user `image_version`}}-us-east-1"
|
||||
},
|
||||
{
|
||||
"name": "redash-eu-west-1",
|
||||
"type": "amazon-ebs",
|
||||
"access_key": "{{user `aws_access_key`}}",
|
||||
"secret_key": "{{user `aws_secret_key`}}",
|
||||
"region": "eu-west-1",
|
||||
"source_ami": "ami-d2ff50a5",
|
||||
"instance_type": "t2.micro",
|
||||
"ssh_username": "ubuntu",
|
||||
"ami_name": "redash-{{user `image_version`}}-eu-west-1"
|
||||
},
|
||||
{
|
||||
"type": "googlecompute",
|
||||
"bucket_name": "redash-images",
|
||||
"account_file": "account.json",
|
||||
"client_secrets_file": "client_secret.json",
|
||||
"project_id": "redash-bird-123",
|
||||
"source_image": "debian-7-wheezy-v20141017",
|
||||
"zone": "us-central1-a",
|
||||
"image_name": "redash-{{user `image_version`}}"
|
||||
}
|
||||
],
|
||||
"provisioners": [
|
||||
{
|
||||
"type": "shell",
|
||||
"script": "bootstrap.sh",
|
||||
"environment_vars": ["REDASH_VERSION={{user `redash_version`}}"]
|
||||
}
|
||||
]
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
@@ -26,9 +26,8 @@ class ImportTest(BaseTestCase):
|
||||
self.assertEqual(dashboard.widgets.count(),
|
||||
reduce(lambda s, row: s + len(row), self.dashboard['widgets'], 0))
|
||||
|
||||
self.assertEqual(models.Visualization.select().count(), dashboard.widgets.count())
|
||||
self.assertEqual(models.Query.select().count(), dashboard.widgets.count()-1)
|
||||
self.assertEqual(models.QueryResult.select().count(), dashboard.widgets.count()-1)
|
||||
self.assertEqual(models.Visualization.select().count(), dashboard.widgets.count()-1)
|
||||
self.assertEqual(models.Query.select().count(), dashboard.widgets.count()-2)
|
||||
|
||||
def test_imports_updates_existing_models(self):
|
||||
importer = import_export.Importer(data_source=data_source_factory.create())
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import datetime
|
||||
import json
|
||||
from tests import BaseTestCase
|
||||
from redash import models
|
||||
from factories import dashboard_factory, query_factory, data_source_factory, query_result_factory
|
||||
from factories import dashboard_factory, query_factory, data_source_factory, query_result_factory, user_factory
|
||||
from redash.utils import gen_query_hash
|
||||
|
||||
|
||||
@@ -29,6 +30,40 @@ class QueryTest(BaseTestCase):
|
||||
|
||||
self.assertNotEquals(old_hash, q.query_hash)
|
||||
|
||||
def test_search_finds_in_name(self):
|
||||
q1 = query_factory.create(name="Testing search")
|
||||
q2 = query_factory.create(name="Testing searching")
|
||||
q3 = query_factory.create(name="Testing sea rch")
|
||||
|
||||
queries = models.Query.search("search")
|
||||
|
||||
self.assertIn(q1, queries)
|
||||
self.assertIn(q2, queries)
|
||||
self.assertNotIn(q3, queries)
|
||||
|
||||
def test_search_finds_in_description(self):
|
||||
q1 = query_factory.create(description="Testing search")
|
||||
q2 = query_factory.create(description="Testing searching")
|
||||
q3 = query_factory.create(description="Testing sea rch")
|
||||
|
||||
queries = models.Query.search("search")
|
||||
|
||||
self.assertIn(q1, queries)
|
||||
self.assertIn(q2, queries)
|
||||
self.assertNotIn(q3, queries)
|
||||
|
||||
def test_search_by_id_returns_query(self):
|
||||
q1 = query_factory.create(description="Testing search")
|
||||
q2 = query_factory.create(description="Testing searching")
|
||||
q3 = query_factory.create(description="Testing sea rch")
|
||||
|
||||
|
||||
queries = models.Query.search(str(q3.id))
|
||||
|
||||
self.assertIn(q3, queries)
|
||||
self.assertNotIn(q1, queries)
|
||||
self.assertNotIn(q2, queries)
|
||||
|
||||
|
||||
class QueryResultTest(BaseTestCase):
|
||||
def setUp(self):
|
||||
@@ -93,6 +128,26 @@ class QueryResultTest(BaseTestCase):
|
||||
|
||||
self.assertEqual(found_query_result.id, qr.id)
|
||||
|
||||
|
||||
class TestUnusedQueryResults(BaseTestCase):
|
||||
def test_returns_only_unused_query_results(self):
|
||||
two_weeks_ago = datetime.datetime.now() - datetime.timedelta(days=14)
|
||||
qr = query_result_factory.create()
|
||||
query = query_factory.create(latest_query_data=qr)
|
||||
unused_qr = query_result_factory.create(retrieved_at=two_weeks_ago)
|
||||
|
||||
self.assertIn(unused_qr, models.QueryResult.unused())
|
||||
self.assertNotIn(qr, models.QueryResult.unused())
|
||||
|
||||
def test_returns_only_over_a_week_old_results(self):
|
||||
two_weeks_ago = datetime.datetime.now() - datetime.timedelta(days=14)
|
||||
unused_qr = query_result_factory.create(retrieved_at=two_weeks_ago)
|
||||
new_unused_qr = query_result_factory.create()
|
||||
|
||||
self.assertIn(unused_qr, models.QueryResult.unused())
|
||||
self.assertNotIn(new_unused_qr, models.QueryResult.unused())
|
||||
|
||||
|
||||
class TestQueryResultStoreResult(BaseTestCase):
|
||||
def setUp(self):
|
||||
super(TestQueryResultStoreResult, self).setUp()
|
||||
@@ -148,4 +203,38 @@ class TestQueryResultStoreResult(BaseTestCase):
|
||||
|
||||
self.assertEqual(models.Query.get_by_id(query1.id)._data['latest_query_data'], query_result.id)
|
||||
self.assertEqual(models.Query.get_by_id(query2.id)._data['latest_query_data'], query_result.id)
|
||||
self.assertNotEqual(models.Query.get_by_id(query3.id)._data['latest_query_data'], query_result.id)
|
||||
self.assertNotEqual(models.Query.get_by_id(query3.id)._data['latest_query_data'], query_result.id)
|
||||
|
||||
|
||||
class TestEvents(BaseTestCase):
|
||||
def raw_event(self):
|
||||
timestamp = 1411778709.791
|
||||
user = user_factory.create()
|
||||
created_at = datetime.datetime.utcfromtimestamp(timestamp)
|
||||
raw_event = {"action": "view",
|
||||
"timestamp": timestamp,
|
||||
"object_type": "dashboard",
|
||||
"user_id": user.id,
|
||||
"object_id": 1}
|
||||
|
||||
return raw_event, user, created_at
|
||||
|
||||
def test_records_event(self):
|
||||
raw_event, user, created_at = self.raw_event()
|
||||
|
||||
event = models.Event.record(raw_event)
|
||||
|
||||
self.assertEqual(event.user, user)
|
||||
self.assertEqual(event.action, "view")
|
||||
self.assertEqual(event.object_type, "dashboard")
|
||||
self.assertEqual(event.object_id, 1)
|
||||
self.assertEqual(event.created_at, created_at)
|
||||
|
||||
def test_records_additional_properties(self):
|
||||
raw_event, _, _ = self.raw_event()
|
||||
additional_properties = {'test': 1, 'test2': 2, 'whatever': "abc"}
|
||||
raw_event.update(additional_properties)
|
||||
|
||||
event = models.Event.record(raw_event)
|
||||
|
||||
self.assertDictEqual(json.loads(event.additional_properties), additional_properties)
|
||||
Reference in New Issue
Block a user