mirror of
https://github.com/getredash/redash.git
synced 2025-12-19 17:37:19 -05:00
Replace Celery with RQ (except for execute_query tasks) (#4093)
* add rq and an rq_worker service * add rq_scheduler and an rq_scheduler service * move beat schedule to periodic_jobs queue * move version checks to RQ * move query result cleanup to RQ * use timedelta and DRY up a bit * move custom tasks to RQ * do actual schema refreshes in rq * rename 'period_jobs' to 'periodic', as it obviously holds jobs * move send_email to rq * DRY up enqueues * ditch and use a partially applied decorator * move subscribe to rq * move check_alerts_for_query to rq * move record_event to rq * make tests play nicely with rq * 👋 beat * rename rq_scheduler to plain scheduler, now that there's no Celery scheduler entrypoint * add some color to rq-worker's output * add logging context to rq jobs (while keeping execute_query context via get_task_logger for now) * move schedule to its own module * cancel previously scheduled periodic jobs. not sure this is a good idea. * rename redash.scheduler to redash.schedule * allow custom dynamic jobs to be added decleratively * add basic monitoring to rq queues * add worker monitoring * pleasing the CodeClimate overlords * adjust cypress docker-compose.yml to include rq changes * DRY up Cypress docker-compose * add rq dependencies to cypress docker-compose service * an odd attempt at watching docker-compose logs when running with Cypress * Revert "an odd attempt at watching docker-compose logs when running with Cypress" This reverts commit016bd1a93e. * show docker-compose logs at Cypress shutdown * Revert "DRY up Cypress docker-compose" This reverts commit43abac7084. * minimal version for binding is 3.2 * remove unneccesary code reloads on cypress * add a command which errors if any of the workers running inside the current machine haven't been active in the last minute * SCHEMAS_REFRESH_QUEUE is no longer a required setting * split tasks/queries.py to execution.py and maintenance.py * fix tests after query execution split * pleasing the CodeClimate overlords * rename worker to celery_worker and rq_worker to worker * use /rq_status instead of /jobs * show started jobs' time ago according to UTC * replace all spaces in column names * fix query tests after execution split * exit with an int * general lint * add an entrypoint for rq_healthcheck * fix indentation * delete all existing periodic jobs before scheduling them * remove some unrequired requires * move schedule example to redash.schedule * add RQ integration to Sentry's setup * pleasing the CodeClimate overlords * remove replication settings from docker-compose - a proper way to scale using docker-compose would be the --scale CLI option, which will be described in the knowledge based * revert to calling a function in dynamic settings to allow periodic jobs to be scheduled after app has been loaded * don't need to depend on context when templating failure reports * set the timeout_ttl to double the interval to avoid job results from expiring and having periodic jobs not reschedule * whoops, bad merge * describe custom jobs and don't actually schedule them * fix merge
This commit is contained in:
@@ -1,9 +1,9 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
worker() {
|
||||
celery_worker() {
|
||||
WORKERS_COUNT=${WORKERS_COUNT:-2}
|
||||
QUEUES=${QUEUES:-queries,scheduled_queries,celery,schemas}
|
||||
QUEUES=${QUEUES:-queries,scheduled_queries}
|
||||
WORKER_EXTRA_OPTIONS=${WORKER_EXTRA_OPTIONS:-}
|
||||
|
||||
echo "Starting $WORKERS_COUNT workers for queues: $QUEUES..."
|
||||
@@ -11,23 +11,36 @@ worker() {
|
||||
}
|
||||
|
||||
scheduler() {
|
||||
WORKERS_COUNT=${WORKERS_COUNT:-1}
|
||||
QUEUES=${QUEUES:-celery}
|
||||
SCHEDULE_DB=${SCHEDULE_DB:-celerybeat-schedule}
|
||||
echo "Starting RQ scheduler..."
|
||||
|
||||
echo "Starting scheduler and $WORKERS_COUNT workers for queues: $QUEUES..."
|
||||
exec /app/manage.py rq scheduler
|
||||
}
|
||||
|
||||
exec /usr/local/bin/celery worker --app=redash.worker --beat -s$SCHEDULE_DB -c$WORKERS_COUNT -Q$QUEUES -linfo --max-tasks-per-child=10 -Ofair
|
||||
dev_scheduler() {
|
||||
echo "Starting dev RQ scheduler..."
|
||||
|
||||
exec watchmedo auto-restart --directory=./redash/ --pattern=*.py --recursive -- ./manage.py rq scheduler
|
||||
}
|
||||
|
||||
worker() {
|
||||
echo "Starting RQ worker..."
|
||||
|
||||
exec /app/manage.py rq worker $QUEUES
|
||||
}
|
||||
|
||||
dev_worker() {
|
||||
echo "Starting dev RQ worker..."
|
||||
|
||||
exec watchmedo auto-restart --directory=./redash/ --pattern=*.py --recursive -- ./manage.py rq worker $QUEUES
|
||||
}
|
||||
|
||||
dev_celery_worker() {
|
||||
WORKERS_COUNT=${WORKERS_COUNT:-2}
|
||||
QUEUES=${QUEUES:-queries,scheduled_queries,celery,schemas}
|
||||
SCHEDULE_DB=${SCHEDULE_DB:-celerybeat-schedule}
|
||||
QUEUES=${QUEUES:-queries,scheduled_queries}
|
||||
|
||||
echo "Starting dev scheduler and $WORKERS_COUNT workers for queues: $QUEUES..."
|
||||
echo "Starting $WORKERS_COUNT workers for queues: $QUEUES..."
|
||||
|
||||
exec watchmedo auto-restart --directory=./redash/ --pattern=*.py --recursive -- /usr/local/bin/celery worker --app=redash.worker --beat -s$SCHEDULE_DB -c$WORKERS_COUNT -Q$QUEUES -linfo --max-tasks-per-child=10 -Ofair
|
||||
exec watchmedo auto-restart --directory=./redash/ --pattern=*.py --recursive -- /usr/local/bin/celery worker --app=redash.worker -c$WORKERS_COUNT -Q$QUEUES -linfo --max-tasks-per-child=10 -Ofair
|
||||
}
|
||||
|
||||
server() {
|
||||
@@ -45,6 +58,10 @@ celery_healthcheck() {
|
||||
exec /usr/local/bin/celery inspect ping --app=redash.worker -d celery@$HOSTNAME
|
||||
}
|
||||
|
||||
rq_healthcheck() {
|
||||
exec /app/manage.py rq healthcheck
|
||||
}
|
||||
|
||||
help() {
|
||||
echo "Redash Docker."
|
||||
echo ""
|
||||
@@ -52,10 +69,14 @@ help() {
|
||||
echo ""
|
||||
|
||||
echo "server -- start Redash server (with gunicorn)"
|
||||
echo "worker -- start Celery worker"
|
||||
echo "scheduler -- start Celery worker with a beat (scheduler) process"
|
||||
echo "dev_worker -- start Celery worker with a beat (scheduler) process which picks up code changes and reloads"
|
||||
echo "celery_worker -- start Celery worker"
|
||||
echo "dev_celery_worker -- start Celery worker process which picks up code changes and reloads"
|
||||
echo "worker -- start a single RQ worker"
|
||||
echo "dev_worker -- start a single RQ worker with code reloading"
|
||||
echo "scheduler -- start an rq-scheduler instance"
|
||||
echo "dev_scheduler -- start an rq-scheduler instance with code reloading"
|
||||
echo "celery_healthcheck -- runs a Celery healthcheck. Useful for Docker's HEALTHCHECK mechanism."
|
||||
echo "rq_healthcheck -- runs a RQ healthcheck that verifies that all local workers are active. Useful for Docker's HEALTHCHECK mechanism."
|
||||
echo ""
|
||||
echo "shell -- open shell"
|
||||
echo "dev_server -- start Flask development server with debugger and auto reload"
|
||||
@@ -89,10 +110,30 @@ case "$1" in
|
||||
shift
|
||||
scheduler
|
||||
;;
|
||||
dev_scheduler)
|
||||
shift
|
||||
dev_scheduler
|
||||
;;
|
||||
celery_worker)
|
||||
shift
|
||||
celery_worker
|
||||
;;
|
||||
dev_celery_worker)
|
||||
shift
|
||||
dev_celery_worker
|
||||
;;
|
||||
dev_worker)
|
||||
shift
|
||||
dev_worker
|
||||
;;
|
||||
rq_healthcheck)
|
||||
shift
|
||||
rq_healthcheck
|
||||
;;
|
||||
celery_healthcheck)
|
||||
shift
|
||||
celery_healthcheck
|
||||
;;
|
||||
dev_server)
|
||||
export FLASK_DEBUG=1
|
||||
exec /app/manage.py runserver --debugger --reload -h 0.0.0.0
|
||||
|
||||
Reference in New Issue
Block a user