mirror of
https://github.com/getredash/redash.git
synced 2025-12-19 17:37:19 -05:00
Revert build (2 of 2) (#6967)
This commit is contained in:
25
.ci/compose.ci.yaml
Normal file
25
.ci/compose.ci.yaml
Normal file
@@ -0,0 +1,25 @@
|
||||
services:
|
||||
redash:
|
||||
build: ../
|
||||
command: manage version
|
||||
depends_on:
|
||||
- postgres
|
||||
- redis
|
||||
ports:
|
||||
- "5000:5000"
|
||||
environment:
|
||||
PYTHONUNBUFFERED: 0
|
||||
REDASH_LOG_LEVEL: "INFO"
|
||||
REDASH_REDIS_URL: "redis://redis:6379/0"
|
||||
POSTGRES_PASSWORD: "FmTKs5vX52ufKR1rd8tn4MoSP7zvCJwb"
|
||||
REDASH_DATABASE_URL: "postgresql://postgres:FmTKs5vX52ufKR1rd8tn4MoSP7zvCJwb@postgres/postgres"
|
||||
REDASH_COOKIE_SECRET: "2H9gNG9obnAQ9qnR9BDTQUph6CbXKCzF"
|
||||
redis:
|
||||
image: redis:7-alpine
|
||||
restart: unless-stopped
|
||||
postgres:
|
||||
image: pgautoupgrade/pgautoupgrade:latest
|
||||
command: "postgres -c fsync=off -c full_page_writes=off -c synchronous_commit=OFF"
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
POSTGRES_HOST_AUTH_METHOD: "trust"
|
||||
73
.ci/compose.cypress.yaml
Normal file
73
.ci/compose.cypress.yaml
Normal file
@@ -0,0 +1,73 @@
|
||||
x-redash-service: &redash-service
|
||||
build:
|
||||
context: ../
|
||||
args:
|
||||
install_groups: "main"
|
||||
code_coverage: ${CODE_COVERAGE}
|
||||
x-redash-environment: &redash-environment
|
||||
REDASH_LOG_LEVEL: "INFO"
|
||||
REDASH_REDIS_URL: "redis://redis:6379/0"
|
||||
POSTGRES_PASSWORD: "FmTKs5vX52ufKR1rd8tn4MoSP7zvCJwb"
|
||||
REDASH_DATABASE_URL: "postgresql://postgres:FmTKs5vX52ufKR1rd8tn4MoSP7zvCJwb@postgres/postgres"
|
||||
REDASH_RATELIMIT_ENABLED: "false"
|
||||
REDASH_ENFORCE_CSRF: "true"
|
||||
REDASH_COOKIE_SECRET: "2H9gNG9obnAQ9qnR9BDTQUph6CbXKCzF"
|
||||
services:
|
||||
server:
|
||||
<<: *redash-service
|
||||
command: server
|
||||
depends_on:
|
||||
- postgres
|
||||
- redis
|
||||
ports:
|
||||
- "5000:5000"
|
||||
environment:
|
||||
<<: *redash-environment
|
||||
PYTHONUNBUFFERED: 0
|
||||
scheduler:
|
||||
<<: *redash-service
|
||||
command: scheduler
|
||||
depends_on:
|
||||
- server
|
||||
environment:
|
||||
<<: *redash-environment
|
||||
worker:
|
||||
<<: *redash-service
|
||||
command: worker
|
||||
depends_on:
|
||||
- server
|
||||
environment:
|
||||
<<: *redash-environment
|
||||
PYTHONUNBUFFERED: 0
|
||||
cypress:
|
||||
ipc: host
|
||||
build:
|
||||
context: ../
|
||||
dockerfile: .ci/Dockerfile.cypress
|
||||
depends_on:
|
||||
- server
|
||||
- worker
|
||||
- scheduler
|
||||
environment:
|
||||
CYPRESS_baseUrl: "http://server:5000"
|
||||
CYPRESS_coverage: ${CODE_COVERAGE}
|
||||
PERCY_TOKEN: ${PERCY_TOKEN}
|
||||
PERCY_BRANCH: ${CIRCLE_BRANCH}
|
||||
PERCY_COMMIT: ${CIRCLE_SHA1}
|
||||
PERCY_PULL_REQUEST: ${CIRCLE_PR_NUMBER}
|
||||
COMMIT_INFO_BRANCH: ${CIRCLE_BRANCH}
|
||||
COMMIT_INFO_MESSAGE: ${COMMIT_INFO_MESSAGE}
|
||||
COMMIT_INFO_AUTHOR: ${CIRCLE_USERNAME}
|
||||
COMMIT_INFO_SHA: ${CIRCLE_SHA1}
|
||||
COMMIT_INFO_REMOTE: ${CIRCLE_REPOSITORY_URL}
|
||||
CYPRESS_PROJECT_ID: ${CYPRESS_PROJECT_ID}
|
||||
CYPRESS_RECORD_KEY: ${CYPRESS_RECORD_KEY}
|
||||
redis:
|
||||
image: redis:7-alpine
|
||||
restart: unless-stopped
|
||||
postgres:
|
||||
image: pgautoupgrade/pgautoupgrade:latest
|
||||
command: "postgres -c fsync=off -c full_page_writes=off -c synchronous_commit=OFF"
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
POSTGRES_HOST_AUTH_METHOD: "trust"
|
||||
39
.ci/docker_build
Executable file
39
.ci/docker_build
Executable file
@@ -0,0 +1,39 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script only needs to run on the main Redash repo
|
||||
|
||||
if [ "${GITHUB_REPOSITORY}" != "getredash/redash" ]; then
|
||||
echo "Skipping image build for Docker Hub, as this isn't the main Redash repository"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "${GITHUB_REF_NAME}" != "master" ] && [ "${GITHUB_REF_NAME}" != "preview-image" ]; then
|
||||
echo "Skipping image build for Docker Hub, as this isn't the 'master' nor 'preview-image' branch"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "x${DOCKER_USER}" = "x" ] || [ "x${DOCKER_PASS}" = "x" ]; then
|
||||
echo "Skipping image build for Docker Hub, as the login details aren't available"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
set -e
|
||||
VERSION=$(jq -r .version package.json)
|
||||
VERSION_TAG="$VERSION.b${GITHUB_RUN_ID}.${GITHUB_RUN_NUMBER}"
|
||||
|
||||
export DOCKER_BUILDKIT=1
|
||||
export COMPOSE_DOCKER_CLI_BUILD=1
|
||||
|
||||
docker login -u "${DOCKER_USER}" -p "${DOCKER_PASS}"
|
||||
|
||||
DOCKERHUB_REPO="redash/redash"
|
||||
DOCKER_TAGS="-t redash/redash:preview -t redash/preview:${VERSION_TAG}"
|
||||
|
||||
# Build the docker container
|
||||
docker build --build-arg install_groups="main,all_ds,dev" ${DOCKER_TAGS} .
|
||||
|
||||
# Push the container to the preview build locations
|
||||
docker push "${DOCKERHUB_REPO}:preview"
|
||||
docker push "redash/preview:${VERSION_TAG}"
|
||||
|
||||
echo "Built: ${VERSION_TAG}"
|
||||
9
.ci/pack
Executable file
9
.ci/pack
Executable file
@@ -0,0 +1,9 @@
|
||||
#!/bin/bash
|
||||
NAME=redash
|
||||
VERSION=$(jq -r .version package.json)
|
||||
FULL_VERSION=$VERSION+b$CIRCLE_BUILD_NUM
|
||||
FILENAME=$NAME.$FULL_VERSION.tar.gz
|
||||
|
||||
mkdir -p /tmp/artifacts/
|
||||
|
||||
tar -zcv -f /tmp/artifacts/$FILENAME --exclude=".git" --exclude="optipng*" --exclude="cypress" --exclude="*.pyc" --exclude="*.pyo" --exclude="venv" *
|
||||
6
.ci/update_version
Executable file
6
.ci/update_version
Executable file
@@ -0,0 +1,6 @@
|
||||
#!/bin/bash
|
||||
VERSION=$(jq -r .version package.json)
|
||||
FULL_VERSION=${VERSION}+b${GITHUB_RUN_ID}.${GITHUB_RUN_NUMBER}
|
||||
|
||||
sed -ri "s/^__version__ = '([A-Za-z0-9.-]*)'/__version__ = '${FULL_VERSION}'/" redash/__init__.py
|
||||
sed -i "s/dev/${GITHUB_SHA}/" client/app/version.json
|
||||
52
Dockerfile
52
Dockerfile
@@ -1,35 +1,30 @@
|
||||
# Controls whether to build the frontend assets
|
||||
ARG FRONTEND_BUILD_MODE=0
|
||||
FROM node:18-bookworm as frontend-builder
|
||||
|
||||
# MODE 0: create empty files. useful for backend tests
|
||||
FROM alpine:3.19 as frontend-builder-0
|
||||
RUN \
|
||||
mkdir -p /frontend/client/dist && \
|
||||
touch /frontend/client/dist/multi_org.html && \
|
||||
touch /frontend/client/dist/index.html
|
||||
|
||||
# MODE 1: copy static frontend from host, useful for CI to ignore building static content multiple times
|
||||
FROM alpine:3.19 as frontend-builder-1
|
||||
COPY client/dist /frontend/client/dist
|
||||
|
||||
# MODE 2: build static content in docker, can be used for a local development
|
||||
FROM node:18-bookworm as frontend-builder-2
|
||||
RUN npm install --global --force yarn@1.22.22
|
||||
|
||||
# Controls whether to build the frontend assets
|
||||
ARG skip_frontend_build
|
||||
|
||||
ENV CYPRESS_INSTALL_BINARY=0
|
||||
ENV PUPPETEER_SKIP_CHROMIUM_DOWNLOAD=1
|
||||
|
||||
RUN useradd -m -d /frontend redash
|
||||
USER redash
|
||||
|
||||
WORKDIR /frontend
|
||||
COPY --chown=redash package.json yarn.lock .yarnrc /frontend/
|
||||
COPY --chown=redash viz-lib /frontend/viz-lib
|
||||
COPY --chown=redash scripts /frontend/scripts
|
||||
|
||||
RUN yarn --frozen-lockfile --network-concurrency 1;
|
||||
# Controls whether to instrument code for coverage information
|
||||
ARG code_coverage
|
||||
ENV BABEL_ENV=${code_coverage:+test}
|
||||
|
||||
RUN if [ "x$skip_frontend_build" = "x" ] ; then yarn --frozen-lockfile --network-concurrency 1; fi
|
||||
|
||||
COPY --chown=redash client /frontend/client
|
||||
COPY --chown=redash webpack.config.js /frontend/
|
||||
RUN yarn build
|
||||
|
||||
FROM frontend-builder-${FRONTEND_BUILD_MODE} as frontend-builder
|
||||
RUN if [ "x$skip_frontend_build" = "x" ] ; then yarn build; else mkdir -p /frontend/client/dist && touch /frontend/client/dist/multi_org.html && touch /frontend/client/dist/index.html; fi
|
||||
|
||||
FROM python:3.8-slim-bookworm
|
||||
|
||||
@@ -66,18 +61,17 @@ RUN apt-get update && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN \
|
||||
curl https://packages.microsoft.com/config/debian/12/prod.list > /etc/apt/sources.list.d/mssql-release.list && \
|
||||
curl https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor -o /usr/share/keyrings/microsoft-prod.gpg && \
|
||||
apt update && \
|
||||
ACCEPT_EULA=Y apt install -y --no-install-recommends msodbcsql18 && \
|
||||
apt clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
ARG databricks_odbc_driver_url=https://databricks-bi-artifacts.s3.us-east-2.amazonaws.com/simbaspark-drivers/odbc/2.6.26/SimbaSparkODBC-2.6.26.1045-Debian-64bit.zip
|
||||
RUN if [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
|
||||
curl "$databricks_odbc_driver_url" --location --output /tmp/simba_odbc.zip \
|
||||
curl https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor -o /usr/share/keyrings/microsoft-prod.gpg \
|
||||
&& curl https://packages.microsoft.com/config/debian/12/prod.list > /etc/apt/sources.list.d/mssql-release.list \
|
||||
&& apt-get update \
|
||||
&& ACCEPT_EULA=Y apt-get install -y --no-install-recommends msodbcsql17 \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& curl "$databricks_odbc_driver_url" --location --output /tmp/simba_odbc.zip \
|
||||
&& chmod 600 /tmp/simba_odbc.zip \
|
||||
&& unzip /tmp/simba_odbc.zip -d /tmp/simba \
|
||||
&& dpkg -i /tmp/simba/*.deb \
|
||||
@@ -97,8 +91,8 @@ COPY pyproject.toml poetry.lock ./
|
||||
ARG POETRY_OPTIONS="--no-root --no-interaction --no-ansi"
|
||||
# for LDAP authentication, install with `ldap3` group
|
||||
# disabled by default due to GPL license conflict
|
||||
ARG INSTALL_GROUPS="main,all_ds,dev"
|
||||
RUN /etc/poetry/bin/poetry install --only $INSTALL_GROUPS $POETRY_OPTIONS
|
||||
ARG install_groups="main,all_ds,dev"
|
||||
RUN /etc/poetry/bin/poetry install --only $install_groups $POETRY_OPTIONS
|
||||
|
||||
COPY --chown=redash . /app
|
||||
COPY --from=frontend-builder --chown=redash /frontend/client/dist /app/client/dist
|
||||
|
||||
24
Makefile
24
Makefile
@@ -1,18 +1,10 @@
|
||||
.PHONY: compose_build up test_db create_database create_db clean clean-all down tests lint backend-unit-tests frontend-unit-tests pydeps test build watch start redis-cli bash
|
||||
|
||||
export COMPOSE_DOCKER_CLI_BUILD=1
|
||||
export DOCKER_BUILDKIT=1
|
||||
export COMPOSE_PROFILES=local
|
||||
.PHONY: compose_build up test_db create_database clean clean-all down tests lint backend-unit-tests frontend-unit-tests test build watch start redis-cli bash
|
||||
|
||||
compose_build: .env
|
||||
docker compose build
|
||||
COMPOSE_DOCKER_CLI_BUILD=1 DOCKER_BUILDKIT=1 docker compose build
|
||||
|
||||
up:
|
||||
docker compose up -d redis postgres
|
||||
docker compose exec -u postgres postgres psql postgres --csv \
|
||||
-1tqc "SELECT table_name FROM information_schema.tables WHERE table_name = 'organizations'" 2> /dev/null \
|
||||
| grep -q "organizations" || make create_database
|
||||
docker compose up -d --build
|
||||
COMPOSE_DOCKER_CLI_BUILD=1 DOCKER_BUILDKIT=1 docker compose up -d --build
|
||||
|
||||
test_db:
|
||||
@for i in `seq 1 5`; do \
|
||||
@@ -21,11 +13,9 @@ test_db:
|
||||
done
|
||||
docker compose exec postgres sh -c 'psql -U postgres -c "drop database if exists tests;" && psql -U postgres -c "create database tests;"'
|
||||
|
||||
create_db: .env
|
||||
create_database: .env
|
||||
docker compose run server create_db
|
||||
|
||||
create_database: create_db
|
||||
|
||||
clean:
|
||||
docker compose down
|
||||
docker compose --project-name cypress down
|
||||
@@ -54,12 +44,6 @@ env: .env
|
||||
format:
|
||||
pre-commit run --all-files
|
||||
|
||||
pydeps:
|
||||
pip3 install wheel
|
||||
pip3 install --upgrade black ruff launchpadlib pip setuptools
|
||||
pip3 install poetry
|
||||
poetry install --only main,all_ds,dev
|
||||
|
||||
tests:
|
||||
docker compose run server tests
|
||||
|
||||
|
||||
@@ -1,48 +1,25 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
if [ -z $REDASH_REDIS_URL ]; then
|
||||
export REDASH_REDIS_URL=redis://:${REDASH_REDIS_PASSWORD}@${REDASH_REDIS_HOSTNAME}:${REDASH_REDIS_PORT}/${REDASH_REDIS_NAME}
|
||||
fi
|
||||
|
||||
if [ -z $REDASH_DATABASE_URL ]; then
|
||||
export REDASH_DATABASE_URL=postgresql://${REDASH_DATABASE_USER}:${REDASH_DATABASE_PASSWORD}@${REDASH_DATABASE_HOSTNAME}:${REDASH_DATABASE_PORT}/${REDASH_DATABASE_NAME}
|
||||
fi
|
||||
|
||||
scheduler() {
|
||||
echo "Starting RQ scheduler..."
|
||||
|
||||
case $REDASH_PRODUCTION in
|
||||
true)
|
||||
echo "Starting RQ scheduler in production mode"
|
||||
exec ./manage.py rq scheduler
|
||||
;;
|
||||
*)
|
||||
echo "Starting RQ scheduler in dev mode"
|
||||
exec watchmedo auto-restart \
|
||||
--directory=./redash/ \
|
||||
--pattern=*.py \
|
||||
--recursive -- ./manage.py rq scheduler $QUEUES
|
||||
;;
|
||||
esac
|
||||
exec /app/manage.py rq scheduler
|
||||
}
|
||||
|
||||
dev_scheduler() {
|
||||
echo "Starting dev RQ scheduler..."
|
||||
|
||||
exec watchmedo auto-restart --directory=./redash/ --pattern=*.py --recursive -- ./manage.py rq scheduler
|
||||
}
|
||||
|
||||
worker() {
|
||||
echo "Starting RQ worker..."
|
||||
|
||||
export WORKERS_COUNT=${WORKERS_COUNT:-2}
|
||||
export QUEUES=${QUEUES:-}
|
||||
case $REDASH_PRODUCTION in
|
||||
true)
|
||||
echo "Starting RQ worker in production mode"
|
||||
exec supervisord -c worker.conf
|
||||
;;
|
||||
*)
|
||||
echo "Starting RQ worker in dev mode"
|
||||
exec watchmedo auto-restart \
|
||||
--directory=./redash/ \
|
||||
--pattern=*.py \
|
||||
--recursive -- ./manage.py rq worker $QUEUES
|
||||
;;
|
||||
esac
|
||||
|
||||
exec supervisord -c worker.conf
|
||||
}
|
||||
|
||||
workers_healthcheck() {
|
||||
@@ -58,63 +35,22 @@ workers_healthcheck() {
|
||||
fi
|
||||
}
|
||||
|
||||
dev_worker() {
|
||||
echo "Starting dev RQ worker..."
|
||||
|
||||
exec watchmedo auto-restart --directory=./redash/ --pattern=*.py --recursive -- ./manage.py rq worker $QUEUES
|
||||
}
|
||||
|
||||
server() {
|
||||
# Recycle gunicorn workers every n-th request. See http://docs.gunicorn.org/en/stable/settings.html#max-requests for more details.
|
||||
case $REDASH_PRODUCTION in
|
||||
true)
|
||||
echo "Starting Redash Server in production mode"
|
||||
MAX_REQUESTS=${MAX_REQUESTS:-1000}
|
||||
MAX_REQUESTS_JITTER=${MAX_REQUESTS_JITTER:-100}
|
||||
TIMEOUT=${REDASH_GUNICORN_TIMEOUT:-60}
|
||||
exec /usr/local/bin/gunicorn \
|
||||
-b 0.0.0.0:5000 \
|
||||
--name redash \
|
||||
-w${REDASH_WEB_WORKERS:-4} redash.wsgi:app \
|
||||
--max-requests $MAX_REQUESTS \
|
||||
--max-requests-jitter $MAX_REQUESTS_JITTER \
|
||||
--timeout $TIMEOUT
|
||||
;;
|
||||
*)
|
||||
echo "Starting Redash Server in a dev mode"
|
||||
export FLASK_DEBUG=1
|
||||
exec /app/manage.py runserver --debugger --reload -h 0.0.0.0
|
||||
;;
|
||||
esac
|
||||
MAX_REQUESTS=${MAX_REQUESTS:-1000}
|
||||
MAX_REQUESTS_JITTER=${MAX_REQUESTS_JITTER:-100}
|
||||
TIMEOUT=${REDASH_GUNICORN_TIMEOUT:-60}
|
||||
exec /usr/local/bin/gunicorn -b 0.0.0.0:5000 --name redash -w${REDASH_WEB_WORKERS:-4} redash.wsgi:app --max-requests $MAX_REQUESTS --max-requests-jitter $MAX_REQUESTS_JITTER --timeout $TIMEOUT
|
||||
}
|
||||
|
||||
create_db() {
|
||||
REDASH_DATABASE_MIGRATE_TIMEOUT=${REDASH_DATABASE_UPGRADE_TIMEOUT:-600}
|
||||
REDASH_DATABASE_MIGRATE_MAX_ATTEMPTS=${REDASH_DATABASE_MIGRATE_MAX_ATTEMPTS:-5}
|
||||
REDASH_DATABASE_MIGRATE_RETRY_WAIT=${REDASH_DATABASE_MIGRATE_RETRY_WAIT:-10}
|
||||
ATTEMPTS=1
|
||||
while ((ATTEMPTS <= REDASH_DATABASE_MIGRATE_MAX_ATTEMPTS)); do
|
||||
echo "Creating or updating Redash database, attempt ${ATTEMPTS} of ${REDASH_DATABASE_MIGRATE_MAX_ATTEMPTS}"
|
||||
ATTEMPTS=$((ATTEMPTS+1))
|
||||
timeout $REDASH_DATABASE_MIGRATE_TIMEOUT /app/manage.py database create_tables
|
||||
timeout $REDASH_DATABASE_MIGRATE_TIMEOUT /app/manage.py db upgrade
|
||||
STATUS=$(timeout $REDASH_DATABASE_MIGRATE_TIMEOUT /app/manage.py status 2>&1)
|
||||
RETCODE=$?
|
||||
case "$RETCODE" in
|
||||
0)
|
||||
exit 0
|
||||
;;
|
||||
124)
|
||||
echo "Status command timed out after ${REDASH_DATABASE_MIGRATE_TIMEOUT} seconds."
|
||||
;;
|
||||
esac
|
||||
case "$STATUS" in
|
||||
*sqlalchemy.exc.OperationalError*)
|
||||
echo "Database not yet functional, waiting."
|
||||
;;
|
||||
*sqlalchemy.exc.ProgrammingError*)
|
||||
echo "Database does not appear to be installed."
|
||||
;;
|
||||
esac
|
||||
echo "Waiting ${REDASH_DATABASE_MIGRATE_RETRY_WAIT} seconds before retrying."
|
||||
sleep ${REDASH_DATABASE_MIGRATE_RETRY_WAIT}
|
||||
done
|
||||
echo "Reached ${REDASH_DATABASE_MIGRATE_MAX_ATTEMPTS} attempts, giving up."
|
||||
exit 1
|
||||
exec /app/manage.py database create_tables
|
||||
}
|
||||
|
||||
help() {
|
||||
@@ -125,16 +61,21 @@ help() {
|
||||
|
||||
echo "server -- start Redash server (with gunicorn)"
|
||||
echo "worker -- start a single RQ worker"
|
||||
echo "dev_worker -- start a single RQ worker with code reloading"
|
||||
echo "scheduler -- start an rq-scheduler instance"
|
||||
echo "dev_scheduler -- start an rq-scheduler instance with code reloading"
|
||||
echo ""
|
||||
echo "shell -- open shell"
|
||||
echo "dev_server -- start Flask development server with debugger and auto reload"
|
||||
echo "debug -- start Flask development server with remote debugger via ptvsd"
|
||||
echo "create_db -- create database tables and run migrations"
|
||||
echo "create_db -- create database tables"
|
||||
echo "manage -- CLI to manage redash"
|
||||
echo "tests -- run tests"
|
||||
}
|
||||
|
||||
tests() {
|
||||
export REDASH_DATABASE_URL="postgresql://postgres@postgres/tests"
|
||||
|
||||
if [ $# -eq 0 ]; then
|
||||
TEST_ARGS=tests/
|
||||
else
|
||||
@@ -160,10 +101,22 @@ case "$1" in
|
||||
shift
|
||||
scheduler
|
||||
;;
|
||||
dev_scheduler)
|
||||
shift
|
||||
dev_scheduler
|
||||
;;
|
||||
dev_worker)
|
||||
shift
|
||||
dev_worker
|
||||
;;
|
||||
celery_healthcheck)
|
||||
shift
|
||||
echo "DEPRECATED: Celery has been replaced with RQ and now performs healthchecks autonomously as part of the 'worker' entrypoint."
|
||||
;;
|
||||
dev_server)
|
||||
export FLASK_DEBUG=1
|
||||
exec /app/manage.py runserver --debugger --reload -h 0.0.0.0
|
||||
;;
|
||||
debug)
|
||||
export FLASK_DEBUG=1
|
||||
export REMOTE_DEBUG=1
|
||||
|
||||
@@ -43,18 +43,18 @@ function seedDatabase(seedValues) {
|
||||
|
||||
function buildServer() {
|
||||
console.log("Building the server...");
|
||||
execSync("docker compose build", { stdio: "inherit" });
|
||||
execSync("docker compose -p cypress build", { stdio: "inherit" });
|
||||
}
|
||||
|
||||
function startServer() {
|
||||
console.log("Starting the server...");
|
||||
execSync("docker compose up -d", { stdio: "inherit" });
|
||||
execSync("docker compose run server create_db", { stdio: "inherit" });
|
||||
execSync("docker compose -p cypress up -d", { stdio: "inherit" });
|
||||
execSync("docker compose -p cypress run server create_db", { stdio: "inherit" });
|
||||
}
|
||||
|
||||
function stopServer() {
|
||||
console.log("Stopping the server...");
|
||||
execSync("docker compose down", { stdio: "inherit" });
|
||||
execSync("docker compose -p cypress down", { stdio: "inherit" });
|
||||
}
|
||||
|
||||
function runCypressCI() {
|
||||
@@ -68,7 +68,7 @@ function runCypressCI() {
|
||||
}
|
||||
|
||||
execSync(
|
||||
"docker compose run --name cypress cypress ./node_modules/.bin/percy exec -t 300 -- ./node_modules/.bin/cypress run $CYPRESS_OPTIONS",
|
||||
"COMMIT_INFO_MESSAGE=$(git show -s --format=%s) docker compose run --name cypress cypress ./node_modules/.bin/percy exec -t 300 -- ./node_modules/.bin/cypress run $CYPRESS_OPTIONS",
|
||||
{ stdio: "inherit" }
|
||||
);
|
||||
}
|
||||
|
||||
@@ -53,12 +53,11 @@ describe("Dashboard Sharing", () => {
|
||||
};
|
||||
|
||||
const dashboardUrl = this.dashboardUrl;
|
||||
cy.createQuery({ options }).then(({ id: queryId, name: queryName }) => {
|
||||
cy.createQuery({ options }).then(({ id: queryId }) => {
|
||||
cy.visit(dashboardUrl);
|
||||
editDashboard();
|
||||
cy.getByTestId("AddWidgetButton").click();
|
||||
cy.getByTestId("AddWidgetDialog").within(() => {
|
||||
cy.get("input").type(queryName);
|
||||
cy.get(`.query-selector-result[data-test="QueryId${queryId}"]`).click();
|
||||
});
|
||||
cy.contains("button", "Add to Dashboard").click();
|
||||
@@ -179,12 +178,11 @@ describe("Dashboard Sharing", () => {
|
||||
};
|
||||
|
||||
const dashboardUrl = this.dashboardUrl;
|
||||
cy.createQuery({ options }).then(({ id: queryId, name: queryName }) => {
|
||||
cy.createQuery({ options }).then(({ id: queryId }) => {
|
||||
cy.visit(dashboardUrl);
|
||||
editDashboard();
|
||||
cy.getByTestId("AddWidgetButton").click();
|
||||
cy.getByTestId("AddWidgetDialog").within(() => {
|
||||
cy.get("input").type(queryName);
|
||||
cy.get(`.query-selector-result[data-test="QueryId${queryId}"]`).click();
|
||||
});
|
||||
cy.contains("button", "Add to Dashboard").click();
|
||||
|
||||
@@ -18,12 +18,11 @@ describe("Widget", () => {
|
||||
};
|
||||
|
||||
it("adds widget", function() {
|
||||
cy.createQuery().then(({ id: queryId, name: queryName }) => {
|
||||
cy.createQuery().then(({ id: queryId }) => {
|
||||
cy.visit(this.dashboardUrl);
|
||||
editDashboard();
|
||||
cy.getByTestId("AddWidgetButton").click();
|
||||
cy.getByTestId("AddWidgetDialog").within(() => {
|
||||
cy.get("input").type(queryName);
|
||||
cy.get(`.query-selector-result[data-test="QueryId${queryId}"]`).click();
|
||||
});
|
||||
cy.contains("button", "Add to Dashboard").click();
|
||||
|
||||
89
compose.yaml
89
compose.yaml
@@ -1,38 +1,52 @@
|
||||
# This configuration file is for the **development** setup.
|
||||
# For a production example please refer to getredash/setup repository on GitHub.
|
||||
x-redash-service: &redash-service
|
||||
build:
|
||||
context: .
|
||||
args:
|
||||
skip_frontend_build: "true" # set to empty string to build
|
||||
volumes:
|
||||
- .:/app
|
||||
env_file:
|
||||
- .env
|
||||
x-redash-environment: &redash-environment
|
||||
REDASH_LOG_LEVEL: "INFO"
|
||||
REDASH_REDIS_URL: "redis://redis:6379/0"
|
||||
REDASH_DATABASE_URL: "postgresql://postgres@postgres/postgres"
|
||||
REDASH_RATELIMIT_ENABLED: "false"
|
||||
REDASH_MAIL_DEFAULT_SENDER: "redash@example.com"
|
||||
REDASH_MAIL_SERVER: "email"
|
||||
REDASH_MAIL_PORT: 1025
|
||||
REDASH_ENFORCE_CSRF: "true"
|
||||
REDASH_GUNICORN_TIMEOUT: 60
|
||||
# Set secret keys in the .env file
|
||||
services:
|
||||
server:
|
||||
extends:
|
||||
file: compose.base.yaml
|
||||
service: .redash
|
||||
command: server
|
||||
<<: *redash-service
|
||||
command: dev_server
|
||||
depends_on:
|
||||
- postgres
|
||||
- redis
|
||||
ports:
|
||||
- "${REDASH_PORT:-5001}:5000"
|
||||
- "5001:5000"
|
||||
- "5678:5678"
|
||||
environment:
|
||||
<<: *redash-environment
|
||||
PYTHONUNBUFFERED: 0
|
||||
scheduler:
|
||||
extends:
|
||||
file: compose.base.yaml
|
||||
service: .redash
|
||||
profiles:
|
||||
- e2e
|
||||
- local
|
||||
command: scheduler
|
||||
depends_on:
|
||||
- server
|
||||
worker:
|
||||
extends:
|
||||
file: compose.base.yaml
|
||||
service: .redash
|
||||
profiles:
|
||||
- e2e
|
||||
- local
|
||||
command: worker
|
||||
<<: *redash-service
|
||||
command: dev_scheduler
|
||||
depends_on:
|
||||
- server
|
||||
environment:
|
||||
<<: *redash-environment
|
||||
worker:
|
||||
<<: *redash-service
|
||||
command: dev_worker
|
||||
depends_on:
|
||||
- server
|
||||
environment:
|
||||
<<: *redash-environment
|
||||
PYTHONUNBUFFERED: 0
|
||||
redis:
|
||||
image: redis:7-alpine
|
||||
@@ -40,42 +54,17 @@ services:
|
||||
postgres:
|
||||
image: pgautoupgrade/pgautoupgrade:latest
|
||||
ports:
|
||||
- "${POSTGRES_PORT:-15432}:5432"
|
||||
- "15432:5432"
|
||||
# The following turns the DB into less durable, but gains significant performance improvements for the tests run (x3
|
||||
# improvement on my personal machine). We should consider moving this into a dedicated Docker Compose configuration for
|
||||
# tests.
|
||||
command: postgres -c fsync=off -c full_page_writes=off -c synchronous_commit=OFF
|
||||
command: "postgres -c fsync=off -c full_page_writes=off -c synchronous_commit=OFF"
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
POSTGRES_HOST_AUTH_METHOD: trust
|
||||
POSTGRES_HOST_AUTH_METHOD: "trust"
|
||||
email:
|
||||
image: maildev/maildev
|
||||
ports:
|
||||
- "1080:1080"
|
||||
- "1025:1025"
|
||||
restart: unless-stopped
|
||||
cypress:
|
||||
ipc: host
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.cypress
|
||||
profiles:
|
||||
- e2e
|
||||
depends_on:
|
||||
- server
|
||||
- worker
|
||||
- scheduler
|
||||
environment:
|
||||
CYPRESS_baseUrl: http://server:5000
|
||||
PERCY_TOKEN: ${PERCY_TOKEN:-""}
|
||||
PERCY_BRANCH: ${PERCY_BRANCH:-""}
|
||||
PERCY_COMMIT: ${PERCY_COMMIT:-""}
|
||||
PERCY_PULL_REQUEST: ${PERCY_PULL_REQUEST:-}
|
||||
COMMIT_INFO_BRANCH: ${COMMIT_INFO_BRANCH:-""}
|
||||
COMMIT_INFO_MESSAGE: ${COMMIT_INFO_MESSAGE:-""}
|
||||
COMMIT_INFO_AUTHOR: ${COMMIT_INFO_AUTHOR:-""}
|
||||
COMMIT_INFO_SHA: ${COMMIT_INFO_SHA:-""}
|
||||
COMMIT_INFO_REMOTE: ${COMMIT_INFO_REMOTE:-""}
|
||||
CYPRESS_PROJECT_ID: ${CYPRESS_PROJECT_ID:-""}
|
||||
CYPRESS_RECORD_KEY: ${CYPRESS_RECORD_KEY:-""}
|
||||
CYPRESS_COVERAGE: ${CYPRESS_COVERAGE:-true}
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
"jest": "TZ=Africa/Khartoum jest",
|
||||
"test": "run-s type-check jest",
|
||||
"test:watch": "jest --watch",
|
||||
"cypress": "COMPOSE_PROFILES=local node client/cypress/cypress.js",
|
||||
"cypress": "node client/cypress/cypress.js",
|
||||
"preinstall": "cd viz-lib && yarn link --link-folder ../.yarn",
|
||||
"postinstall": "(cd viz-lib && yarn --frozen-lockfile && yarn build:babel) && yarn link --link-folder ./.yarn @redash/viz"
|
||||
},
|
||||
|
||||
@@ -436,7 +436,6 @@ class TestQueryResultExcelResponse(BaseTestCase):
|
||||
|
||||
class TestJobResource(BaseTestCase):
|
||||
def test_cancels_queued_queries(self):
|
||||
|
||||
query = self.factory.create_query()
|
||||
job_id = self.make_request(
|
||||
"post",
|
||||
|
||||
Reference in New Issue
Block a user