mirror of
https://github.com/getredash/redash.git
synced 2025-12-25 01:03:20 -05:00
Compare commits
94 Commits
23.12.0-de
...
24.05.0-de
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2b974e12ed | ||
|
|
372adfed6b | ||
|
|
dbab9cadb4 | ||
|
|
06244716e6 | ||
|
|
f09760389a | ||
|
|
84e6d3cad5 | ||
|
|
3399e3761e | ||
|
|
1c48b2218b | ||
|
|
5ac5d86f5e | ||
|
|
5e4764af9c | ||
|
|
e2a39de7d1 | ||
|
|
6c68b48917 | ||
|
|
7e8a61c73d | ||
|
|
991e94dd6a | ||
|
|
2ffeecb813 | ||
|
|
3dd855aef1 | ||
|
|
713aca440a | ||
|
|
70bb684d9e | ||
|
|
4034f791c3 | ||
|
|
b9875a231b | ||
|
|
062a70cf20 | ||
|
|
c12d45077a | ||
|
|
6d6412753d | ||
|
|
275e12e7c1 | ||
|
|
77d7508cee | ||
|
|
9601660751 | ||
|
|
45c6fa0591 | ||
|
|
95ecb8e229 | ||
|
|
cb0707176c | ||
|
|
d7247f8b84 | ||
|
|
776703fab7 | ||
|
|
34cde71238 | ||
|
|
f631075be3 | ||
|
|
3f19534301 | ||
|
|
24dec192ee | ||
|
|
82d88ed4eb | ||
|
|
af0773c58a | ||
|
|
15e6583d72 | ||
|
|
4eb5f4e47f | ||
|
|
a0f5c706ff | ||
|
|
702a550659 | ||
|
|
38a06c7ab9 | ||
|
|
a6074878bb | ||
|
|
fb348c7116 | ||
|
|
24419863ec | ||
|
|
c4d3d9c683 | ||
|
|
1672cd9280 | ||
|
|
6575a6499a | ||
|
|
e360e4658e | ||
|
|
107933c363 | ||
|
|
667a696ca5 | ||
|
|
7d0d242072 | ||
|
|
d554136f70 | ||
|
|
34723e2f3e | ||
|
|
11794b3fe3 | ||
|
|
3997916d77 | ||
|
|
b09a2256dc | ||
|
|
95a45bb4dc | ||
|
|
7cd03c797c | ||
|
|
1200f9887a | ||
|
|
81d22f1eb2 | ||
|
|
2fe0326280 | ||
|
|
094984f564 | ||
|
|
52cd6ff006 | ||
|
|
939bec2114 | ||
|
|
320fddfd52 | ||
|
|
ab39283ae6 | ||
|
|
6386905616 | ||
|
|
d986b976e5 | ||
|
|
a600921c0b | ||
|
|
af2f4af8a2 | ||
|
|
49a5e74283 | ||
|
|
b98b5f2ba4 | ||
|
|
d245ff7bb1 | ||
|
|
97db492531 | ||
|
|
30e7392933 | ||
|
|
a54171f2c2 | ||
|
|
cd03da3260 | ||
|
|
4c47bef582 | ||
|
|
ec1c4d07de | ||
|
|
4d5103978b | ||
|
|
3c2c2786ed | ||
|
|
cd482e780a | ||
|
|
4d81c3148d | ||
|
|
1b1b9bd98d | ||
|
|
473cf29c9f | ||
|
|
cbde237b12 | ||
|
|
998dc31eb0 | ||
|
|
2505e8ab3b | ||
|
|
858fc4d78f | ||
|
|
3e500ea18e | ||
|
|
58bf96c298 | ||
|
|
66ef942572 | ||
|
|
9bbdb4b765 |
@@ -1,26 +0,0 @@
|
||||
version: '2.2'
|
||||
services:
|
||||
redash:
|
||||
build: ../
|
||||
command: manage version
|
||||
depends_on:
|
||||
- postgres
|
||||
- redis
|
||||
ports:
|
||||
- "5000:5000"
|
||||
environment:
|
||||
PYTHONUNBUFFERED: 0
|
||||
REDASH_LOG_LEVEL: "INFO"
|
||||
REDASH_REDIS_URL: "redis://redis:6379/0"
|
||||
POSTGRES_PASSWORD: "FmTKs5vX52ufKR1rd8tn4MoSP7zvCJwb"
|
||||
REDASH_DATABASE_URL: "postgresql://postgres:FmTKs5vX52ufKR1rd8tn4MoSP7zvCJwb@postgres/postgres"
|
||||
REDASH_COOKIE_SECRET: "2H9gNG9obnAQ9qnR9BDTQUph6CbXKCzF"
|
||||
redis:
|
||||
image: redis:7-alpine
|
||||
restart: unless-stopped
|
||||
postgres:
|
||||
image: pgautoupgrade/pgautoupgrade:15-alpine3.8
|
||||
command: "postgres -c fsync=off -c full_page_writes=off -c synchronous_commit=OFF"
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
POSTGRES_HOST_AUTH_METHOD: "trust"
|
||||
@@ -1,74 +0,0 @@
|
||||
version: "2.2"
|
||||
x-redash-service: &redash-service
|
||||
build:
|
||||
context: ../
|
||||
args:
|
||||
install_groups: "main"
|
||||
code_coverage: ${CODE_COVERAGE}
|
||||
x-redash-environment: &redash-environment
|
||||
REDASH_LOG_LEVEL: "INFO"
|
||||
REDASH_REDIS_URL: "redis://redis:6379/0"
|
||||
POSTGRES_PASSWORD: "FmTKs5vX52ufKR1rd8tn4MoSP7zvCJwb"
|
||||
REDASH_DATABASE_URL: "postgresql://postgres:FmTKs5vX52ufKR1rd8tn4MoSP7zvCJwb@postgres/postgres"
|
||||
REDASH_RATELIMIT_ENABLED: "false"
|
||||
REDASH_ENFORCE_CSRF: "true"
|
||||
REDASH_COOKIE_SECRET: "2H9gNG9obnAQ9qnR9BDTQUph6CbXKCzF"
|
||||
services:
|
||||
server:
|
||||
<<: *redash-service
|
||||
command: server
|
||||
depends_on:
|
||||
- postgres
|
||||
- redis
|
||||
ports:
|
||||
- "5000:5000"
|
||||
environment:
|
||||
<<: *redash-environment
|
||||
PYTHONUNBUFFERED: 0
|
||||
scheduler:
|
||||
<<: *redash-service
|
||||
command: scheduler
|
||||
depends_on:
|
||||
- server
|
||||
environment:
|
||||
<<: *redash-environment
|
||||
worker:
|
||||
<<: *redash-service
|
||||
command: worker
|
||||
depends_on:
|
||||
- server
|
||||
environment:
|
||||
<<: *redash-environment
|
||||
PYTHONUNBUFFERED: 0
|
||||
cypress:
|
||||
ipc: host
|
||||
build:
|
||||
context: ../
|
||||
dockerfile: .ci/Dockerfile.cypress
|
||||
depends_on:
|
||||
- server
|
||||
- worker
|
||||
- scheduler
|
||||
environment:
|
||||
CYPRESS_baseUrl: "http://server:5000"
|
||||
CYPRESS_coverage: ${CODE_COVERAGE}
|
||||
PERCY_TOKEN: ${PERCY_TOKEN}
|
||||
PERCY_BRANCH: ${CIRCLE_BRANCH}
|
||||
PERCY_COMMIT: ${CIRCLE_SHA1}
|
||||
PERCY_PULL_REQUEST: ${CIRCLE_PR_NUMBER}
|
||||
COMMIT_INFO_BRANCH: ${CIRCLE_BRANCH}
|
||||
COMMIT_INFO_MESSAGE: ${COMMIT_INFO_MESSAGE}
|
||||
COMMIT_INFO_AUTHOR: ${CIRCLE_USERNAME}
|
||||
COMMIT_INFO_SHA: ${CIRCLE_SHA1}
|
||||
COMMIT_INFO_REMOTE: ${CIRCLE_REPOSITORY_URL}
|
||||
CYPRESS_PROJECT_ID: ${CYPRESS_PROJECT_ID}
|
||||
CYPRESS_RECORD_KEY: ${CYPRESS_RECORD_KEY}
|
||||
redis:
|
||||
image: redis:7-alpine
|
||||
restart: unless-stopped
|
||||
postgres:
|
||||
image: pgautoupgrade/pgautoupgrade:15-alpine3.8
|
||||
command: "postgres -c fsync=off -c full_page_writes=off -c synchronous_commit=OFF"
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
POSTGRES_HOST_AUTH_METHOD: "trust"
|
||||
@@ -1,39 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script only needs to run on the main Redash repo
|
||||
|
||||
if [ "${GITHUB_REPOSITORY}" != "getredash/redash" ]; then
|
||||
echo "Skipping image build for Docker Hub, as this isn't the main Redash repository"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "${GITHUB_REF_NAME}" != "master" ] && [ "${GITHUB_REF_NAME}" != "preview-image" ]; then
|
||||
echo "Skipping image build for Docker Hub, as this isn't the 'master' nor 'preview-image' branch"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "x${DOCKER_USER}" = "x" ] || [ "x${DOCKER_PASS}" = "x" ]; then
|
||||
echo "Skipping image build for Docker Hub, as the login details aren't available"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
set -e
|
||||
VERSION=$(jq -r .version package.json)
|
||||
VERSION_TAG="$VERSION.b${GITHUB_RUN_ID}.${GITHUB_RUN_NUMBER}"
|
||||
|
||||
export DOCKER_BUILDKIT=1
|
||||
export COMPOSE_DOCKER_CLI_BUILD=1
|
||||
|
||||
docker login -u "${DOCKER_USER}" -p "${DOCKER_PASS}"
|
||||
|
||||
DOCKERHUB_REPO="redash/redash"
|
||||
DOCKER_TAGS="-t redash/redash:preview -t redash/preview:${VERSION_TAG}"
|
||||
|
||||
# Build the docker container
|
||||
docker build --build-arg install_groups="main,all_ds,dev" ${DOCKER_TAGS} .
|
||||
|
||||
# Push the container to the preview build locations
|
||||
docker push "${DOCKERHUB_REPO}:preview"
|
||||
docker push "redash/preview:${VERSION_TAG}"
|
||||
|
||||
echo "Built: ${VERSION_TAG}"
|
||||
9
.ci/pack
9
.ci/pack
@@ -1,9 +0,0 @@
|
||||
#!/bin/bash
|
||||
NAME=redash
|
||||
VERSION=$(jq -r .version package.json)
|
||||
FULL_VERSION=$VERSION+b$CIRCLE_BUILD_NUM
|
||||
FILENAME=$NAME.$FULL_VERSION.tar.gz
|
||||
|
||||
mkdir -p /tmp/artifacts/
|
||||
|
||||
tar -zcv -f /tmp/artifacts/$FILENAME --exclude=".git" --exclude="optipng*" --exclude="cypress" --exclude="*.pyc" --exclude="*.pyo" --exclude="venv" *
|
||||
@@ -1,6 +0,0 @@
|
||||
#!/bin/bash
|
||||
VERSION=$(jq -r .version package.json)
|
||||
FULL_VERSION=${VERSION}+b${GITHUB_RUN_ID}.${GITHUB_RUN_NUMBER}
|
||||
|
||||
sed -ri "s/^__version__ = '([A-Za-z0-9.-]*)'/__version__ = '${FULL_VERSION}'/" redash/__init__.py
|
||||
sed -i "s/dev/${GITHUB_SHA}/" client/app/version.json
|
||||
@@ -1,5 +1,4 @@
|
||||
client/.tmp/
|
||||
client/dist/
|
||||
node_modules/
|
||||
viz-lib/node_modules/
|
||||
.tmp/
|
||||
|
||||
222
.github/workflows/ci.yml
vendored
222
.github/workflows/ci.yml
vendored
@@ -3,20 +3,44 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
tags:
|
||||
- '*'
|
||||
pull_request_target:
|
||||
branches:
|
||||
- master
|
||||
env:
|
||||
NODE_VERSION: 16.20.1
|
||||
CYPRESS_COVERAGE: "true"
|
||||
NODE_VERSION: 18
|
||||
YARN_VERSION: 1.22.22
|
||||
REDASH_COOKIE_SECRET: 2H9gNG9obnAQ9qnR9BDTQUph6CbXKCzF
|
||||
REDASH_SECRET_KEY: 2H9gNG9obnAQ9qnR9BDTQUph6CbXKCzF
|
||||
COMPOSE_DOCKER_CLI_BUILD: 1
|
||||
DOCKER_BUILDKIT: 1
|
||||
FRONTEND_BUILD_MODE: 1
|
||||
INSTALL_GROUPS: main,all_ds,dev
|
||||
PERCY_BRANCH: ${{github.head_ref || github.ref_name}}
|
||||
PERCY_COMMIT: ${{github.sha}}
|
||||
PERCY_PULL_REQUEST: ${{github.event.number}}
|
||||
COMMIT_INFO_BRANCH: ${{github.head_ref || github.ref_name}}
|
||||
COMMIT_INFO_MESSAGE: ${{github.event.head_commit.message}}
|
||||
COMMIT_INFO_AUTHOR: ${{github.event.pull_request.user.login}}
|
||||
COMMIT_INFO_SHA: ${{github.sha}}
|
||||
COMMIT_INFO_REMOTE: ${{github.server_url}}/${{github.repository}}
|
||||
jobs:
|
||||
backend-lint:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- if: github.event.pull_request.mergeable == 'false'
|
||||
name: Exit if PR is not mergeable
|
||||
run: exit 1
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
- uses: actions/setup-python@v4
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.8'
|
||||
- run: sudo pip install black==23.1.0 ruff==0.0.287
|
||||
- run: sudo pip install black==24.3.0 ruff==0.1.9
|
||||
- run: ruff check .
|
||||
- run: black --check .
|
||||
|
||||
@@ -24,26 +48,28 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
needs: backend-lint
|
||||
env:
|
||||
COMPOSE_FILE: .ci/docker-compose.ci.yml
|
||||
COMPOSE_PROJECT_NAME: redash
|
||||
COMPOSE_DOCKER_CLI_BUILD: 1
|
||||
DOCKER_BUILDKIT: 1
|
||||
FRONTEND_BUILD_MODE: 0
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- if: github.event.pull_request.mergeable == 'false'
|
||||
name: Exit if PR is not mergeable
|
||||
run: exit 1
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- name: Build Docker Images
|
||||
run: |
|
||||
set -x
|
||||
docker compose build --build-arg install_groups="main,all_ds,dev" --build-arg skip_frontend_build=true
|
||||
touch .env
|
||||
docker compose build
|
||||
docker compose up -d
|
||||
sleep 10
|
||||
- name: Create Test Database
|
||||
run: docker compose -p redash run --rm postgres psql -h postgres -U postgres -c "create database tests;"
|
||||
run: docker compose run --rm postgres psql -h postgres -U postgres -c "create database tests;"
|
||||
- name: List Enabled Query Runners
|
||||
run: docker compose -p redash run --rm redash manage ds list_types
|
||||
run: docker compose run --rm server manage ds list_types
|
||||
- name: Run Tests
|
||||
run: docker compose -p redash run --name tests redash tests --junitxml=junit.xml --cov-report=xml --cov=redash --cov-config=.coveragerc tests/
|
||||
run: docker compose run --name tests server tests --junitxml=junit.xml --cov-report=xml --cov=redash --cov-config=.coveragerc tests/
|
||||
- name: Copy Test Results
|
||||
run: |
|
||||
mkdir -p /tmp/test-results/unit-tests
|
||||
@@ -51,90 +77,107 @@ jobs:
|
||||
docker cp tests:/app/junit.xml /tmp/test-results/unit-tests/results.xml
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
- name: Store Test Results
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: test-results
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
- name: Store Test Results
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: backend-test-results
|
||||
path: /tmp/test-results
|
||||
- name: Store Coverage Results
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage
|
||||
name: backend-coverage
|
||||
path: coverage.xml
|
||||
|
||||
frontend-lint:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- if: github.event.pull_request.mergeable == 'false'
|
||||
name: Exit if PR is not mergeable
|
||||
run: exit 1
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
- uses: actions/setup-node@v3
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: ${{ env.NODE_VERSION }}
|
||||
cache: 'yarn'
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
npm install --global --force yarn@1.22.19
|
||||
yarn cache clean && yarn --frozen-lockfile --network-concurrency 1
|
||||
npm install --global --force yarn@$YARN_VERSION
|
||||
yarn cache clean
|
||||
yarn --frozen-lockfile --network-concurrency 1
|
||||
- name: Run Lint
|
||||
run: yarn lint:ci
|
||||
- name: Store Test Results
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-results
|
||||
name: frontend-test-results
|
||||
path: /tmp/test-results
|
||||
|
||||
frontend-unit-tests:
|
||||
runs-on: ubuntu-22.04
|
||||
needs: frontend-lint
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- if: github.event.pull_request.mergeable == 'false'
|
||||
name: Exit if PR is not mergeable
|
||||
run: exit 1
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
- uses: actions/setup-node@v3
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: ${{ env.NODE_VERSION }}
|
||||
cache: 'yarn'
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
npm install --global --force yarn@1.22.19
|
||||
yarn cache clean && yarn --frozen-lockfile --network-concurrency 1
|
||||
npm install --global --force yarn@$YARN_VERSION
|
||||
yarn cache clean
|
||||
yarn --frozen-lockfile --network-concurrency 1
|
||||
- name: Run App Tests
|
||||
run: yarn test
|
||||
- name: Run Visualizations Tests
|
||||
run: cd viz-lib && yarn test
|
||||
run: |
|
||||
cd viz-lib
|
||||
yarn test
|
||||
- run: yarn lint
|
||||
|
||||
frontend-e2e-tests:
|
||||
runs-on: ubuntu-22.04
|
||||
needs: frontend-lint
|
||||
env:
|
||||
COMPOSE_FILE: .ci/docker-compose.cypress.yml
|
||||
COMPOSE_PROJECT_NAME: cypress
|
||||
PERCY_TOKEN_ENCODED: ZGRiY2ZmZDQ0OTdjMzM5ZWE0ZGQzNTZiOWNkMDRjOTk4Zjg0ZjMxMWRmMDZiM2RjOTYxNDZhOGExMjI4ZDE3MA==
|
||||
CYPRESS_PROJECT_ID_ENCODED: OTI0Y2th
|
||||
CYPRESS_RECORD_KEY_ENCODED: YzA1OTIxMTUtYTA1Yy00NzQ2LWEyMDMtZmZjMDgwZGI2ODgx
|
||||
CYPRESS_INSTALL_BINARY: 0
|
||||
PUPPETEER_SKIP_CHROMIUM_DOWNLOAD: 1
|
||||
INSTALL_GROUPS: main
|
||||
COMPOSE_PROFILES: e2e
|
||||
PERCY_TOKEN: ${{ secrets.PERCY_TOKEN }}
|
||||
CYPRESS_PROJECT_ID: ${{ secrets.CYPRESS_PROJECT_ID }}
|
||||
CYPRESS_RECORD_KEY: ${{ secrets.CYPRESS_RECORD_KEY }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- if: github.event.pull_request.mergeable == 'false'
|
||||
name: Exit if PR is not mergeable
|
||||
run: exit 1
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
- uses: actions/setup-node@v3
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: ${{ env.NODE_VERSION }}
|
||||
cache: 'yarn'
|
||||
- name: Enable Code Coverage Report For Master Branch
|
||||
if: endsWith(github.ref, '/master')
|
||||
run: |
|
||||
echo "CODE_COVERAGE=true" >> "$GITHUB_ENV"
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
npm install --global --force yarn@1.22.19
|
||||
yarn cache clean && yarn --frozen-lockfile --network-concurrency 1
|
||||
npm install --global --force yarn@$YARN_VERSION
|
||||
yarn cache clean
|
||||
yarn --frozen-lockfile --network-concurrency 1
|
||||
- name: Setup Redash Server
|
||||
run: |
|
||||
set -x
|
||||
touch .env
|
||||
yarn build
|
||||
yarn cypress build
|
||||
yarn cypress start -- --skip-db-seed
|
||||
docker compose run cypress yarn cypress db-seed
|
||||
@@ -146,93 +189,12 @@ jobs:
|
||||
- name: Copy Code Coverage Results
|
||||
run: docker cp cypress:/usr/src/app/coverage ./coverage || true
|
||||
- name: Store Coverage Results
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage
|
||||
name: frontend-coverage
|
||||
path: coverage
|
||||
|
||||
build-skip-check:
|
||||
runs-on: ubuntu-22.04
|
||||
outputs:
|
||||
skip: ${{ steps.skip-check.outputs.skip }}
|
||||
steps:
|
||||
- name: Skip?
|
||||
id: skip-check
|
||||
run: |
|
||||
if [[ "${{ vars.DOCKER_USER }}" == '' ]]; then
|
||||
echo 'Docker user is empty. Skipping build+push'
|
||||
echo skip=true >> "$GITHUB_OUTPUT"
|
||||
elif [[ "${{ secrets.DOCKER_PASS }}" == '' ]]; then
|
||||
echo 'Docker password is empty. Skipping build+push'
|
||||
echo skip=true >> "$GITHUB_OUTPUT"
|
||||
elif [[ "${{ github.ref_name }}" != 'master' ]]; then
|
||||
echo 'Ref name is not `master`. Skipping build+push'
|
||||
echo skip=true >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo 'Docker user and password are set and branch is `master`.'
|
||||
echo 'Building + pushing `preview` image.'
|
||||
echo skip=false >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
build-docker-image:
|
||||
runs-on: ubuntu-22.04
|
||||
needs:
|
||||
- backend-unit-tests
|
||||
- frontend-unit-tests
|
||||
- frontend-e2e-tests
|
||||
- build-skip-check
|
||||
if: needs.build-skip-check.outputs.skip == 'false'
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: ${{ env.NODE_VERSION }}
|
||||
cache: 'yarn'
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
npm install --global --force yarn@1.22.19
|
||||
yarn cache clean && yarn --frozen-lockfile --network-concurrency 1
|
||||
|
||||
- name: Set up QEMU
|
||||
timeout-minutes: 1
|
||||
uses: docker/setup-qemu-action@v2.2.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ vars.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_PASS }}
|
||||
|
||||
- name: Bump version
|
||||
id: version
|
||||
run: |
|
||||
set -x
|
||||
.ci/update_version
|
||||
VERSION=$(jq -r .version package.json)
|
||||
VERSION_TAG="${VERSION}.b${GITHUB_RUN_ID}.${GITHUB_RUN_NUMBER}"
|
||||
echo "VERSION_TAG=$VERSION_TAG" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Build and push preview image to Docker Hub
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
push: true
|
||||
tags: |
|
||||
redash/redash:preview
|
||||
redash/preview:${{ steps.version.outputs.VERSION_TAG }}
|
||||
context: .
|
||||
build-args: |
|
||||
test_all_deps=true
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
platforms: linux/amd64
|
||||
env:
|
||||
DOCKER_CONTENT_TRUST: true
|
||||
|
||||
- name: "Failure: output container logs to console"
|
||||
if: failure()
|
||||
run: docker compose logs
|
||||
name: frontend
|
||||
path: client/dist
|
||||
retention-days: 1
|
||||
|
||||
2
.github/workflows/periodic-snapshot.yml
vendored
2
.github/workflows/periodic-snapshot.yml
vendored
@@ -13,6 +13,8 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ssh-key: ${{secrets.ACTION_PUSH_KEY}}
|
||||
- run: |
|
||||
date="$(date +%y.%m).0-dev"
|
||||
gawk -i inplace -F: -v q=\" -v tag=$date '/^ "version": / { print $1 FS, q tag q ","; next} { print }' package.json
|
||||
|
||||
154
.github/workflows/preview-image.yml
vendored
Normal file
154
.github/workflows/preview-image.yml
vendored
Normal file
@@ -0,0 +1,154 @@
|
||||
name: Preview Image
|
||||
on:
|
||||
workflow_run:
|
||||
workflows:
|
||||
- Tests
|
||||
types:
|
||||
- completed
|
||||
branches:
|
||||
- master
|
||||
|
||||
env:
|
||||
DOCKER_REPO: redash
|
||||
|
||||
jobs:
|
||||
build-skip-check:
|
||||
runs-on: ubuntu-22.04
|
||||
if: ${{ github.event.workflow_run.conclusion == 'success' }}
|
||||
outputs:
|
||||
skip: ${{ steps.skip-check.outputs.skip }}
|
||||
steps:
|
||||
- name: Skip?
|
||||
id: skip-check
|
||||
run: |
|
||||
if [[ "${{ vars.DOCKER_USER }}" == '' ]]; then
|
||||
echo 'Docker user is empty. Skipping build+push'
|
||||
echo skip=true >> "$GITHUB_OUTPUT"
|
||||
elif [[ "${{ secrets.DOCKER_PASS }}" == '' ]]; then
|
||||
echo 'Docker password is empty. Skipping build+push'
|
||||
echo skip=true >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo 'Docker user and password are set and branch is `master`.'
|
||||
echo 'Building + pushing `preview` image.'
|
||||
echo skip=false >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
build-docker-image:
|
||||
runs-on: ubuntu-22.04
|
||||
needs:
|
||||
- build-skip-check
|
||||
outputs:
|
||||
version: ${{ steps.version.outputs.VERSION_TAG }}
|
||||
repo: ${{ steps.version.outputs.DOCKER_REPO }}
|
||||
if: needs.build-skip-check.outputs.skip == 'false'
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform:
|
||||
- linux/amd64
|
||||
- linux/arm64
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
ref: ${{ github.event.push.after }}
|
||||
- uses: dawidd6/action-download-artifact@v3
|
||||
with:
|
||||
name: frontend
|
||||
workflow: ci.yml
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
run_id: ${{ github.event.workflow_run.id }}
|
||||
path: client/dist
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ vars.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_PASS }}
|
||||
- name: Set version
|
||||
id: version
|
||||
run: |
|
||||
set -x
|
||||
VERSION=$(jq -r .version package.json)
|
||||
FULL_VERSION=${VERSION}-b${GITHUB_RUN_ID}.${GITHUB_RUN_NUMBER}
|
||||
sed -ri "s/^__version__ = ([A-Za-z0-9.-]*)'/__version__ = '${FULL_VERSION}'/" redash/__init__.py
|
||||
sed -i "s/dev/${GITHUB_SHA}/" client/app/version.json
|
||||
echo "VERSION_TAG=$FULL_VERSION" >> "$GITHUB_OUTPUT"
|
||||
platform=${{ matrix.platform }}
|
||||
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
||||
echo "SCOPE=${platform//\//-}" >> $GITHUB_ENV
|
||||
if [[ "${{ vars.DOCKER_REPO }}" != "" ]]; then
|
||||
echo "DOCKER_REPO=${{ vars.DOCKER_REPO }}" >> $GITHUB_ENV
|
||||
echo "DOCKER_REPO=${{ vars.DOCKER_REPO }}" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "DOCKER_REPO=${DOCKER_REPO}" >> $GITHUB_ENV
|
||||
echo "DOCKER_REPO=${DOCKER_REPO}" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
- name: Build and push preview image to Docker Hub
|
||||
uses: docker/build-push-action@v5
|
||||
id: build
|
||||
with:
|
||||
push: true
|
||||
context: .
|
||||
cache-from: type=gha,scope=${{ env.SCOPE }}
|
||||
cache-to: type=gha,mode=max,scope=${{ env.SCOPE }}
|
||||
platforms: ${{ matrix.platform }}
|
||||
outputs: type=image,name=${{ env.DOCKER_REPO }}/redash,push-by-digest=true,name-canonical=true,push=true
|
||||
build-args: |
|
||||
FRONTEND_BUILD_MODE=1
|
||||
env:
|
||||
DOCKER_CONTENT_TRUST: true
|
||||
- name: Export digest
|
||||
run: |
|
||||
mkdir -p /tmp/digests
|
||||
digest="${{ steps.build.outputs.digest }}"
|
||||
touch "/tmp/digests/${digest#sha256:}"
|
||||
- name: Upload digest
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: digests-${{ env.PLATFORM_PAIR }}
|
||||
path: /tmp/digests/*
|
||||
if-no-files-found: error
|
||||
retention-days: 1
|
||||
|
||||
publish-docker-manifest:
|
||||
runs-on: ubuntu-22.04
|
||||
needs:
|
||||
- build-skip-check
|
||||
- build-docker-image
|
||||
if: needs.build-skip-check.outputs.skip == 'false'
|
||||
steps:
|
||||
- name: Download digests
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
pattern: digests-*
|
||||
path: /tmp/digests
|
||||
merge-multiple: true
|
||||
- name: Setup Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ needs.build-docker-image.outputs.repo }}/redash
|
||||
tags: preview
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ vars.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_PASS }}
|
||||
- name: Create manifest list and push
|
||||
working-directory: /tmp/digests
|
||||
run: |
|
||||
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
|
||||
$(printf '${{ needs.build-docker-image.outputs.repo }}/redash@sha256:%s ' *)
|
||||
- name: Inspect image
|
||||
run: |
|
||||
REDASH_IMAGE="${{ needs.build-docker-image.outputs.repo }}/redash:${{ steps.meta.outputs.version }}"
|
||||
docker buildx imagetools inspect $REDASH_IMAGE
|
||||
- name: Push image ${{ needs.build-docker-image.outputs.repo }}/preview image
|
||||
run: |
|
||||
REDASH_IMAGE="${{ needs.build-docker-image.outputs.repo }}/redash:preview"
|
||||
PREVIEW_IMAGE="${{ needs.build-docker-image.outputs.repo }}/preview:${{ needs.build-docker-image.outputs.version }}"
|
||||
docker buildx imagetools create --tag $PREVIEW_IMAGE $REDASH_IMAGE
|
||||
53
Dockerfile
53
Dockerfile
@@ -1,29 +1,35 @@
|
||||
FROM node:16.20.1-bookworm as frontend-builder
|
||||
|
||||
RUN npm install --global --force yarn@1.22.19
|
||||
|
||||
# Controls whether to build the frontend assets
|
||||
ARG skip_frontend_build
|
||||
ARG FRONTEND_BUILD_MODE=0
|
||||
|
||||
# MODE 0: create empty files. useful for backend tests
|
||||
FROM alpine:3.19 as frontend-builder-0
|
||||
RUN \
|
||||
mkdir -p /frontend/client/dist && \
|
||||
touch /frontend/client/dist/multi_org.html && \
|
||||
touch /frontend/client/dist/index.html
|
||||
|
||||
# MODE 1: copy static frontend from host, useful for CI to ignore building static content multiple times
|
||||
FROM alpine:3.19 as frontend-builder-1
|
||||
COPY client/dist /frontend/client/dist
|
||||
|
||||
# MODE 2: build static content in docker, can be used for a local development
|
||||
FROM node:18-bookworm as frontend-builder-2
|
||||
RUN npm install --global --force yarn@1.22.22
|
||||
ENV CYPRESS_INSTALL_BINARY=0
|
||||
ENV PUPPETEER_SKIP_CHROMIUM_DOWNLOAD=1
|
||||
|
||||
RUN useradd -m -d /frontend redash
|
||||
USER redash
|
||||
|
||||
WORKDIR /frontend
|
||||
COPY --chown=redash package.json yarn.lock .yarnrc /frontend/
|
||||
COPY --chown=redash viz-lib /frontend/viz-lib
|
||||
COPY --chown=redash scripts /frontend/scripts
|
||||
|
||||
# Controls whether to instrument code for coverage information
|
||||
ARG code_coverage
|
||||
ENV BABEL_ENV=${code_coverage:+test}
|
||||
|
||||
RUN if [ "x$skip_frontend_build" = "x" ] ; then yarn --frozen-lockfile --network-concurrency 1; fi
|
||||
|
||||
RUN yarn --frozen-lockfile --network-concurrency 1;
|
||||
COPY --chown=redash client /frontend/client
|
||||
COPY --chown=redash webpack.config.js /frontend/
|
||||
RUN if [ "x$skip_frontend_build" = "x" ] ; then yarn build; else mkdir -p /frontend/client/dist && touch /frontend/client/dist/multi_org.html && touch /frontend/client/dist/index.html; fi
|
||||
RUN yarn build
|
||||
|
||||
FROM frontend-builder-${FRONTEND_BUILD_MODE} as frontend-builder
|
||||
|
||||
FROM python:3.8-slim-bookworm
|
||||
|
||||
@@ -60,17 +66,18 @@ RUN apt-get update && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN \
|
||||
curl https://packages.microsoft.com/config/debian/12/prod.list > /etc/apt/sources.list.d/mssql-release.list && \
|
||||
curl https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor -o /usr/share/keyrings/microsoft-prod.gpg && \
|
||||
apt update && \
|
||||
ACCEPT_EULA=Y apt install -y --no-install-recommends msodbcsql18 && \
|
||||
apt clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
ARG databricks_odbc_driver_url=https://databricks-bi-artifacts.s3.us-east-2.amazonaws.com/simbaspark-drivers/odbc/2.6.26/SimbaSparkODBC-2.6.26.1045-Debian-64bit.zip
|
||||
RUN if [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
|
||||
curl https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor -o /usr/share/keyrings/microsoft-prod.gpg \
|
||||
&& curl https://packages.microsoft.com/config/debian/12/prod.list > /etc/apt/sources.list.d/mssql-release.list \
|
||||
&& apt-get update \
|
||||
&& ACCEPT_EULA=Y apt-get install -y --no-install-recommends msodbcsql17 \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& curl "$databricks_odbc_driver_url" --location --output /tmp/simba_odbc.zip \
|
||||
curl "$databricks_odbc_driver_url" --location --output /tmp/simba_odbc.zip \
|
||||
&& chmod 600 /tmp/simba_odbc.zip \
|
||||
&& unzip /tmp/simba_odbc.zip -d /tmp/simba \
|
||||
&& dpkg -i /tmp/simba/*.deb \
|
||||
@@ -90,8 +97,8 @@ COPY pyproject.toml poetry.lock ./
|
||||
ARG POETRY_OPTIONS="--no-root --no-interaction --no-ansi"
|
||||
# for LDAP authentication, install with `ldap3` group
|
||||
# disabled by default due to GPL license conflict
|
||||
ARG install_groups="main,all_ds,dev"
|
||||
RUN /etc/poetry/bin/poetry install --only $install_groups $POETRY_OPTIONS
|
||||
ARG INSTALL_GROUPS="main,all_ds,dev"
|
||||
RUN /etc/poetry/bin/poetry install --only $INSTALL_GROUPS $POETRY_OPTIONS
|
||||
|
||||
COPY --chown=redash . /app
|
||||
COPY --from=frontend-builder --chown=redash /frontend/client/dist /app/client/dist
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
FROM cypress/browsers:node16.18.0-chrome90-ff88
|
||||
FROM cypress/browsers:node18.12.0-chrome106-ff106
|
||||
|
||||
ENV APP /usr/src/app
|
||||
WORKDIR $APP
|
||||
|
||||
COPY package.json yarn.lock .yarnrc $APP/
|
||||
COPY viz-lib $APP/viz-lib
|
||||
RUN npm install yarn@1.22.19 -g && yarn --frozen-lockfile --network-concurrency 1 > /dev/null
|
||||
RUN npm install yarn@1.22.22 -g && yarn --frozen-lockfile --network-concurrency 1 > /dev/null
|
||||
|
||||
COPY . $APP
|
||||
|
||||
40
Makefile
40
Makefile
@@ -1,10 +1,18 @@
|
||||
.PHONY: compose_build up test_db create_database clean down tests lint backend-unit-tests frontend-unit-tests test build watch start redis-cli bash
|
||||
.PHONY: compose_build up test_db create_database create_db clean clean-all down tests lint backend-unit-tests frontend-unit-tests pydeps test build watch start redis-cli bash
|
||||
|
||||
export COMPOSE_DOCKER_CLI_BUILD=1
|
||||
export DOCKER_BUILDKIT=1
|
||||
export COMPOSE_PROFILES=local
|
||||
|
||||
compose_build: .env
|
||||
COMPOSE_DOCKER_CLI_BUILD=1 DOCKER_BUILDKIT=1 docker compose build
|
||||
docker compose build
|
||||
|
||||
up:
|
||||
COMPOSE_DOCKER_CLI_BUILD=1 DOCKER_BUILDKIT=1 docker compose up -d --build
|
||||
docker compose up -d redis postgres
|
||||
docker compose exec -u postgres postgres psql postgres --csv \
|
||||
-1tqc "SELECT table_name FROM information_schema.tables WHERE table_name = 'organizations'" 2> /dev/null \
|
||||
| grep -q "organizations" || make create_database
|
||||
docker compose up -d --build
|
||||
|
||||
test_db:
|
||||
@for i in `seq 1 5`; do \
|
||||
@@ -13,11 +21,27 @@ test_db:
|
||||
done
|
||||
docker compose exec postgres sh -c 'psql -U postgres -c "drop database if exists tests;" && psql -U postgres -c "create database tests;"'
|
||||
|
||||
create_database: .env
|
||||
create_db: .env
|
||||
docker compose run server create_db
|
||||
|
||||
create_database: create_db
|
||||
|
||||
clean:
|
||||
docker compose down && docker compose rm
|
||||
docker compose down
|
||||
docker compose --project-name cypress down
|
||||
docker compose rm --stop --force
|
||||
docker compose --project-name cypress rm --stop --force
|
||||
docker image rm --force \
|
||||
cypress-server:latest cypress-worker:latest cypress-scheduler:latest \
|
||||
redash-server:latest redash-worker:latest redash-scheduler:latest
|
||||
docker container prune --force
|
||||
docker image prune --force
|
||||
docker volume prune --force
|
||||
|
||||
clean-all: clean
|
||||
docker image rm --force \
|
||||
redash/redash:10.1.0.b50633 redis:7-alpine maildev/maildev:latest \
|
||||
pgautoupgrade/pgautoupgrade:15-alpine3.8 pgautoupgrade/pgautoupgrade:latest
|
||||
|
||||
down:
|
||||
docker compose down
|
||||
@@ -30,6 +54,12 @@ env: .env
|
||||
format:
|
||||
pre-commit run --all-files
|
||||
|
||||
pydeps:
|
||||
pip3 install wheel
|
||||
pip3 install --upgrade black ruff launchpadlib pip setuptools
|
||||
pip3 install poetry
|
||||
poetry install --only main,all_ds,dev
|
||||
|
||||
tests:
|
||||
docker compose run server tests
|
||||
|
||||
|
||||
@@ -61,6 +61,7 @@ Redash supports more than 35 SQL and NoSQL [data sources](https://redash.io/help
|
||||
- Apache Hive
|
||||
- Apache Impala
|
||||
- InfluxDB
|
||||
- InfluxDBv2
|
||||
- IBM Netezza Performance Server
|
||||
- JIRA (JQL)
|
||||
- JSON
|
||||
@@ -83,6 +84,7 @@ Redash supports more than 35 SQL and NoSQL [data sources](https://redash.io/help
|
||||
- Python
|
||||
- Qubole
|
||||
- Rockset
|
||||
- RisingWave
|
||||
- Salesforce
|
||||
- ScyllaDB
|
||||
- Shell Scripts
|
||||
|
||||
@@ -1,25 +1,48 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
if [ -z $REDASH_REDIS_URL ]; then
|
||||
export REDASH_REDIS_URL=redis://:${REDASH_REDIS_PASSWORD}@${REDASH_REDIS_HOSTNAME}:${REDASH_REDIS_PORT}/${REDASH_REDIS_NAME}
|
||||
fi
|
||||
|
||||
if [ -z $REDASH_DATABASE_URL ]; then
|
||||
export REDASH_DATABASE_URL=postgresql://${REDASH_DATABASE_USER}:${REDASH_DATABASE_PASSWORD}@${REDASH_DATABASE_HOSTNAME}:${REDASH_DATABASE_PORT}/${REDASH_DATABASE_NAME}
|
||||
fi
|
||||
|
||||
scheduler() {
|
||||
echo "Starting RQ scheduler..."
|
||||
|
||||
exec /app/manage.py rq scheduler
|
||||
}
|
||||
|
||||
dev_scheduler() {
|
||||
echo "Starting dev RQ scheduler..."
|
||||
|
||||
exec watchmedo auto-restart --directory=./redash/ --pattern=*.py --recursive -- ./manage.py rq scheduler
|
||||
case $REDASH_PRODUCTION in
|
||||
true)
|
||||
echo "Starting RQ scheduler in production mode"
|
||||
exec ./manage.py rq scheduler
|
||||
;;
|
||||
*)
|
||||
echo "Starting RQ scheduler in dev mode"
|
||||
exec watchmedo auto-restart \
|
||||
--directory=./redash/ \
|
||||
--pattern=*.py \
|
||||
--recursive -- ./manage.py rq scheduler $QUEUES
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
worker() {
|
||||
echo "Starting RQ worker..."
|
||||
|
||||
export WORKERS_COUNT=${WORKERS_COUNT:-2}
|
||||
export QUEUES=${QUEUES:-}
|
||||
|
||||
exec supervisord -c worker.conf
|
||||
case $REDASH_PRODUCTION in
|
||||
true)
|
||||
echo "Starting RQ worker in production mode"
|
||||
exec supervisord -c worker.conf
|
||||
;;
|
||||
*)
|
||||
echo "Starting RQ worker in dev mode"
|
||||
exec watchmedo auto-restart \
|
||||
--directory=./redash/ \
|
||||
--pattern=*.py \
|
||||
--recursive -- ./manage.py rq worker $QUEUES
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
workers_healthcheck() {
|
||||
@@ -35,22 +58,63 @@ workers_healthcheck() {
|
||||
fi
|
||||
}
|
||||
|
||||
dev_worker() {
|
||||
echo "Starting dev RQ worker..."
|
||||
|
||||
exec watchmedo auto-restart --directory=./redash/ --pattern=*.py --recursive -- ./manage.py rq worker $QUEUES
|
||||
}
|
||||
|
||||
server() {
|
||||
# Recycle gunicorn workers every n-th request. See http://docs.gunicorn.org/en/stable/settings.html#max-requests for more details.
|
||||
MAX_REQUESTS=${MAX_REQUESTS:-1000}
|
||||
MAX_REQUESTS_JITTER=${MAX_REQUESTS_JITTER:-100}
|
||||
TIMEOUT=${REDASH_GUNICORN_TIMEOUT:-60}
|
||||
exec /usr/local/bin/gunicorn -b 0.0.0.0:5000 --name redash -w${REDASH_WEB_WORKERS:-4} redash.wsgi:app --max-requests $MAX_REQUESTS --max-requests-jitter $MAX_REQUESTS_JITTER --timeout $TIMEOUT
|
||||
case $REDASH_PRODUCTION in
|
||||
true)
|
||||
echo "Starting Redash Server in production mode"
|
||||
MAX_REQUESTS=${MAX_REQUESTS:-1000}
|
||||
MAX_REQUESTS_JITTER=${MAX_REQUESTS_JITTER:-100}
|
||||
TIMEOUT=${REDASH_GUNICORN_TIMEOUT:-60}
|
||||
exec /usr/local/bin/gunicorn \
|
||||
-b 0.0.0.0:5000 \
|
||||
--name redash \
|
||||
-w${REDASH_WEB_WORKERS:-4} redash.wsgi:app \
|
||||
--max-requests $MAX_REQUESTS \
|
||||
--max-requests-jitter $MAX_REQUESTS_JITTER \
|
||||
--timeout $TIMEOUT
|
||||
;;
|
||||
*)
|
||||
echo "Starting Redash Server in a dev mode"
|
||||
export FLASK_DEBUG=1
|
||||
exec /app/manage.py runserver --debugger --reload -h 0.0.0.0
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
create_db() {
|
||||
exec /app/manage.py database create_tables
|
||||
REDASH_DATABASE_MIGRATE_TIMEOUT=${REDASH_DATABASE_UPGRADE_TIMEOUT:-600}
|
||||
REDASH_DATABASE_MIGRATE_MAX_ATTEMPTS=${REDASH_DATABASE_MIGRATE_MAX_ATTEMPTS:-5}
|
||||
REDASH_DATABASE_MIGRATE_RETRY_WAIT=${REDASH_DATABASE_MIGRATE_RETRY_WAIT:-10}
|
||||
ATTEMPTS=1
|
||||
while ((ATTEMPTS <= REDASH_DATABASE_MIGRATE_MAX_ATTEMPTS)); do
|
||||
echo "Creating or updating Redash database, attempt ${ATTEMPTS} of ${REDASH_DATABASE_MIGRATE_MAX_ATTEMPTS}"
|
||||
ATTEMPTS=$((ATTEMPTS+1))
|
||||
timeout $REDASH_DATABASE_MIGRATE_TIMEOUT /app/manage.py database create_tables
|
||||
timeout $REDASH_DATABASE_MIGRATE_TIMEOUT /app/manage.py db upgrade
|
||||
STATUS=$(timeout $REDASH_DATABASE_MIGRATE_TIMEOUT /app/manage.py status 2>&1)
|
||||
RETCODE=$?
|
||||
case "$RETCODE" in
|
||||
0)
|
||||
exit 0
|
||||
;;
|
||||
124)
|
||||
echo "Status command timed out after ${REDASH_DATABASE_MIGRATE_TIMEOUT} seconds."
|
||||
;;
|
||||
esac
|
||||
case "$STATUS" in
|
||||
*sqlalchemy.exc.OperationalError*)
|
||||
echo "Database not yet functional, waiting."
|
||||
;;
|
||||
*sqlalchemy.exc.ProgrammingError*)
|
||||
echo "Database does not appear to be installed."
|
||||
;;
|
||||
esac
|
||||
echo "Waiting ${REDASH_DATABASE_MIGRATE_RETRY_WAIT} seconds before retrying."
|
||||
sleep ${REDASH_DATABASE_MIGRATE_RETRY_WAIT}
|
||||
done
|
||||
echo "Reached ${REDASH_DATABASE_MIGRATE_MAX_ATTEMPTS} attempts, giving up."
|
||||
exit 1
|
||||
}
|
||||
|
||||
help() {
|
||||
@@ -61,21 +125,16 @@ help() {
|
||||
|
||||
echo "server -- start Redash server (with gunicorn)"
|
||||
echo "worker -- start a single RQ worker"
|
||||
echo "dev_worker -- start a single RQ worker with code reloading"
|
||||
echo "scheduler -- start an rq-scheduler instance"
|
||||
echo "dev_scheduler -- start an rq-scheduler instance with code reloading"
|
||||
echo ""
|
||||
echo "shell -- open shell"
|
||||
echo "dev_server -- start Flask development server with debugger and auto reload"
|
||||
echo "debug -- start Flask development server with remote debugger via ptvsd"
|
||||
echo "create_db -- create database tables"
|
||||
echo "create_db -- create database tables and run migrations"
|
||||
echo "manage -- CLI to manage redash"
|
||||
echo "tests -- run tests"
|
||||
}
|
||||
|
||||
tests() {
|
||||
export REDASH_DATABASE_URL="postgresql://postgres@postgres/tests"
|
||||
|
||||
if [ $# -eq 0 ]; then
|
||||
TEST_ARGS=tests/
|
||||
else
|
||||
@@ -101,22 +160,10 @@ case "$1" in
|
||||
shift
|
||||
scheduler
|
||||
;;
|
||||
dev_scheduler)
|
||||
shift
|
||||
dev_scheduler
|
||||
;;
|
||||
dev_worker)
|
||||
shift
|
||||
dev_worker
|
||||
;;
|
||||
celery_healthcheck)
|
||||
shift
|
||||
echo "DEPRECATED: Celery has been replaced with RQ and now performs healthchecks autonomously as part of the 'worker' entrypoint."
|
||||
;;
|
||||
dev_server)
|
||||
export FLASK_DEBUG=1
|
||||
exec /app/manage.py runserver --debugger --reload -h 0.0.0.0
|
||||
;;
|
||||
debug)
|
||||
export FLASK_DEBUG=1
|
||||
export REMOTE_DEBUG=1
|
||||
|
||||
BIN
client/app/assets/images/db-logos/influxdbv2.png
Normal file
BIN
client/app/assets/images/db-logos/influxdbv2.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 18 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 2.4 KiB |
BIN
client/app/assets/images/db-logos/risingwave.png
Normal file
BIN
client/app/assets/images/db-logos/risingwave.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 9.7 KiB |
@@ -1,6 +1,5 @@
|
||||
import React from "react";
|
||||
import Link from "@/components/Link";
|
||||
import { clientConfig, currentUser } from "@/services/auth";
|
||||
import { clientConfig } from "@/services/auth";
|
||||
import frontendVersion from "@/version.json";
|
||||
|
||||
export default function VersionInfo() {
|
||||
@@ -10,15 +9,6 @@ export default function VersionInfo() {
|
||||
Version: {clientConfig.version}
|
||||
{frontendVersion !== clientConfig.version && ` (${frontendVersion.substring(0, 8)})`}
|
||||
</div>
|
||||
{clientConfig.newVersionAvailable && currentUser.hasPermission("super_admin") && (
|
||||
<div className="m-t-10">
|
||||
{/* eslint-disable react/jsx-no-target-blank */}
|
||||
<Link href="https://version.redash.io/" className="update-available" target="_blank" rel="noopener">
|
||||
Update Available <i className="fa fa-external-link m-l-5" aria-hidden="true" />
|
||||
<span className="sr-only">(opens in a new tab)</span>
|
||||
</Link>
|
||||
</div>
|
||||
)}
|
||||
</React.Fragment>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,79 +0,0 @@
|
||||
import React, { useState } from "react";
|
||||
import Card from "antd/lib/card";
|
||||
import Button from "antd/lib/button";
|
||||
import Typography from "antd/lib/typography";
|
||||
import { clientConfig } from "@/services/auth";
|
||||
import Link from "@/components/Link";
|
||||
import HelpTrigger from "@/components/HelpTrigger";
|
||||
import DynamicComponent from "@/components/DynamicComponent";
|
||||
import OrgSettings from "@/services/organizationSettings";
|
||||
|
||||
const Text = Typography.Text;
|
||||
|
||||
function BeaconConsent() {
|
||||
const [hide, setHide] = useState(false);
|
||||
|
||||
if (!clientConfig.showBeaconConsentMessage || hide) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const hideConsentCard = () => {
|
||||
clientConfig.showBeaconConsentMessage = false;
|
||||
setHide(true);
|
||||
};
|
||||
|
||||
const confirmConsent = confirm => {
|
||||
let message = "🙏 Thank you.";
|
||||
|
||||
if (!confirm) {
|
||||
message = "Settings Saved.";
|
||||
}
|
||||
|
||||
OrgSettings.save({ beacon_consent: confirm }, message)
|
||||
// .then(() => {
|
||||
// // const settings = get(response, 'settings');
|
||||
// // this.setState({ settings, formValues: { ...settings } });
|
||||
// })
|
||||
.finally(hideConsentCard);
|
||||
};
|
||||
|
||||
return (
|
||||
<DynamicComponent name="BeaconConsent">
|
||||
<div className="m-t-10 tiled">
|
||||
<Card
|
||||
title={
|
||||
<>
|
||||
Would you be ok with sharing anonymous usage data with the Redash team?{" "}
|
||||
<HelpTrigger type="USAGE_DATA_SHARING" />
|
||||
</>
|
||||
}
|
||||
bordered={false}>
|
||||
<Text>Help Redash improve by automatically sending anonymous usage data:</Text>
|
||||
<div className="m-t-5">
|
||||
<ul>
|
||||
<li> Number of users, queries, dashboards, alerts, widgets and visualizations.</li>
|
||||
<li> Types of data sources, alert destinations and visualizations.</li>
|
||||
</ul>
|
||||
</div>
|
||||
<Text>All data is aggregated and will never include any sensitive or private data.</Text>
|
||||
<div className="m-t-5">
|
||||
<Button type="primary" className="m-r-5" onClick={() => confirmConsent(true)}>
|
||||
Yes
|
||||
</Button>
|
||||
<Button type="default" onClick={() => confirmConsent(false)}>
|
||||
No
|
||||
</Button>
|
||||
</div>
|
||||
<div className="m-t-15">
|
||||
<Text type="secondary">
|
||||
You can change this setting anytime from the{" "}
|
||||
<Link href="settings/organization">Organization Settings</Link> page.
|
||||
</Text>
|
||||
</div>
|
||||
</Card>
|
||||
</div>
|
||||
</DynamicComponent>
|
||||
);
|
||||
}
|
||||
|
||||
export default BeaconConsent;
|
||||
@@ -23,7 +23,6 @@ export const TYPES = mapValues(
|
||||
VALUE_SOURCE_OPTIONS: ["/user-guide/querying/query-parameters#Value-Source-Options", "Guide: Value Source Options"],
|
||||
SHARE_DASHBOARD: ["/user-guide/dashboards/sharing-dashboards", "Guide: Sharing and Embedding Dashboards"],
|
||||
AUTHENTICATION_OPTIONS: ["/user-guide/users/authentication-options", "Guide: Authentication Options"],
|
||||
USAGE_DATA_SHARING: ["/open-source/admin-guide/usage-data", "Help: Anonymous Usage Data Sharing"],
|
||||
DS_ATHENA: ["/data-sources/amazon-athena-setup", "Guide: Help Setting up Amazon Athena"],
|
||||
DS_BIGQUERY: ["/data-sources/bigquery-setup", "Guide: Help Setting up BigQuery"],
|
||||
DS_URL: ["/data-sources/querying-urls", "Guide: Help Setting up URL"],
|
||||
|
||||
@@ -19,6 +19,7 @@ import PlainButton from "@/components/PlainButton";
|
||||
import ExpandedWidgetDialog from "@/components/dashboards/ExpandedWidgetDialog";
|
||||
import EditParameterMappingsDialog from "@/components/dashboards/EditParameterMappingsDialog";
|
||||
import VisualizationRenderer from "@/components/visualizations/VisualizationRenderer";
|
||||
import { ExecutionStatus } from "@/services/query-result";
|
||||
|
||||
import Widget from "./Widget";
|
||||
|
||||
@@ -278,7 +279,7 @@ class VisualizationWidget extends React.Component {
|
||||
const widgetQueryResult = widget.getQueryResult();
|
||||
const widgetStatus = widgetQueryResult && widgetQueryResult.getStatus();
|
||||
switch (widgetStatus) {
|
||||
case "failed":
|
||||
case ExecutionStatus.FAILED:
|
||||
return (
|
||||
<div className="body-row-auto scrollbox">
|
||||
{widgetQueryResult.getError() && (
|
||||
@@ -288,7 +289,7 @@ class VisualizationWidget extends React.Component {
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
case "done":
|
||||
case ExecutionStatus.FINISHED:
|
||||
return (
|
||||
<div className="body-row-auto scrollbox">
|
||||
<VisualizationRenderer
|
||||
|
||||
@@ -16,6 +16,7 @@ import LoadingState from "../items-list/components/LoadingState";
|
||||
const SchemaItemColumnType = PropTypes.shape({
|
||||
name: PropTypes.string.isRequired,
|
||||
type: PropTypes.string,
|
||||
comment: PropTypes.string,
|
||||
});
|
||||
|
||||
export const SchemaItemType = PropTypes.shape({
|
||||
@@ -47,13 +48,30 @@ function SchemaItem({ item, expanded, onToggle, onSelect, ...props }) {
|
||||
return (
|
||||
<div {...props}>
|
||||
<div className="schema-list-item">
|
||||
<PlainButton className="table-name" onClick={onToggle}>
|
||||
<i className="fa fa-table m-r-5" aria-hidden="true" />
|
||||
<strong>
|
||||
<span title={item.name}>{tableDisplayName}</span>
|
||||
{!isNil(item.size) && <span> ({item.size})</span>}
|
||||
</strong>
|
||||
</PlainButton>
|
||||
{item.description ? (
|
||||
<Tooltip
|
||||
title={item.description}
|
||||
mouseEnterDelay={0}
|
||||
mouseLeaveDelay={0}
|
||||
placement="right"
|
||||
arrowPointAtCenter>
|
||||
<PlainButton className="table-name" onClick={onToggle}>
|
||||
<i className="fa fa-table m-r-5" aria-hidden="true" />
|
||||
<strong>
|
||||
<span title={item.name}>{tableDisplayName}</span>
|
||||
{!isNil(item.size) && <span> ({item.size})</span>}
|
||||
</strong>
|
||||
</PlainButton>
|
||||
</Tooltip>
|
||||
) : (
|
||||
<PlainButton className="table-name" onClick={onToggle}>
|
||||
<i className="fa fa-table m-r-5" aria-hidden="true" />
|
||||
<strong>
|
||||
<span title={item.name}>{tableDisplayName}</span>
|
||||
{!isNil(item.size) && <span> ({item.size})</span>}
|
||||
</strong>
|
||||
</PlainButton>
|
||||
)}
|
||||
<Tooltip
|
||||
title="Insert table name into query text"
|
||||
mouseEnterDelay={0}
|
||||
@@ -73,22 +91,34 @@ function SchemaItem({ item, expanded, onToggle, onSelect, ...props }) {
|
||||
map(item.columns, column => {
|
||||
const columnName = get(column, "name");
|
||||
const columnType = get(column, "type");
|
||||
return (
|
||||
<Tooltip
|
||||
title="Insert column name into query text"
|
||||
mouseEnterDelay={0}
|
||||
mouseLeaveDelay={0}
|
||||
placement="rightTop">
|
||||
<PlainButton key={columnName} className="table-open-item" onClick={e => handleSelect(e, columnName)}>
|
||||
<div>
|
||||
{columnName} {columnType && <span className="column-type">{columnType}</span>}
|
||||
</div>
|
||||
const columnComment = get(column, "comment");
|
||||
if (columnComment) {
|
||||
return (
|
||||
<Tooltip title={columnComment} mouseEnterDelay={0} mouseLeaveDelay={0} placement="rightTop">
|
||||
<PlainButton
|
||||
key={columnName}
|
||||
className="table-open-item"
|
||||
onClick={e => handleSelect(e, columnName)}>
|
||||
<div>
|
||||
{columnName} {columnType && <span className="column-type">{columnType}</span>}
|
||||
</div>
|
||||
|
||||
<div className="copy-to-editor">
|
||||
<i className="fa fa-angle-double-right" aria-hidden="true" />
|
||||
</div>
|
||||
</PlainButton>
|
||||
</Tooltip>
|
||||
<div className="copy-to-editor">
|
||||
<i className="fa fa-angle-double-right" aria-hidden="true" />
|
||||
</div>
|
||||
</PlainButton>
|
||||
</Tooltip>
|
||||
);
|
||||
}
|
||||
return (
|
||||
<PlainButton key={columnName} className="table-open-item" onClick={e => handleSelect(e, columnName)}>
|
||||
<div>
|
||||
{columnName} {columnType && <span className="column-type">{columnType}</span>}
|
||||
</div>
|
||||
<div className="copy-to-editor">
|
||||
<i className="fa fa-angle-double-right" aria-hidden="true" />
|
||||
</div>
|
||||
</PlainButton>
|
||||
);
|
||||
})
|
||||
)}
|
||||
|
||||
@@ -148,7 +148,9 @@ function EditVisualizationDialog({ dialog, visualization, query, queryResult })
|
||||
|
||||
function dismiss() {
|
||||
const optionsChanged = !isEqual(options, defaultState.originalOptions);
|
||||
confirmDialogClose(nameChanged || optionsChanged).then(dialog.dismiss);
|
||||
confirmDialogClose(nameChanged || optionsChanged)
|
||||
.then(dialog.dismiss)
|
||||
.catch(() => {});
|
||||
}
|
||||
|
||||
// When editing existing visualization chart type selector is disabled, so add only existing visualization's
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
<meta charset="UTF-8" />
|
||||
<base href="{{base_href}}" />
|
||||
<title><%= htmlWebpackPlugin.options.title %></title>
|
||||
<script src="/static/unsupportedRedirect.js" async></script>
|
||||
<script src="<%= htmlWebpackPlugin.options.staticPath %>unsupportedRedirect.js" async></script>
|
||||
|
||||
<link rel="icon" type="image/png" sizes="32x32" href="/static/images/favicon-32x32.png" />
|
||||
<link rel="icon" type="image/png" sizes="96x96" href="/static/images/favicon-96x96.png" />
|
||||
|
||||
@@ -118,28 +118,9 @@ class ShareDashboardDialog extends React.Component {
|
||||
/>
|
||||
</Form.Item>
|
||||
{dashboard.public_url && (
|
||||
<>
|
||||
<Form.Item>
|
||||
<Alert
|
||||
message={
|
||||
<div>
|
||||
Custom rule for hiding filter components when sharing links:
|
||||
<br />
|
||||
You can hide filter components by appending `&hide_filter={"{{"} component_name{"}}"}` to the
|
||||
sharing URL.
|
||||
<br />
|
||||
Example: http://{"{{"}ip{"}}"}:{"{{"}port{"}}"}/public/dashboards/{"{{"}id{"}}"}
|
||||
?p_country=ghana&p_site=10&hide_filter=country
|
||||
</div>
|
||||
}
|
||||
type="warning"
|
||||
/>
|
||||
</Form.Item>
|
||||
|
||||
<Form.Item label="Secret address" {...this.formItemProps}>
|
||||
<InputWithCopy value={dashboard.public_url} data-test="SecretAddress" />
|
||||
</Form.Item>
|
||||
</>
|
||||
<Form.Item label="Secret address" {...this.formItemProps}>
|
||||
<InputWithCopy value={dashboard.public_url} data-test="SecretAddress" />
|
||||
</Form.Item>
|
||||
)}
|
||||
</Form>
|
||||
</Modal>
|
||||
|
||||
@@ -6,7 +6,6 @@ import Link from "@/components/Link";
|
||||
import routeWithUserSession from "@/components/ApplicationArea/routeWithUserSession";
|
||||
import EmptyState, { EmptyStateHelpMessage } from "@/components/empty-state/EmptyState";
|
||||
import DynamicComponent from "@/components/DynamicComponent";
|
||||
import BeaconConsent from "@/components/BeaconConsent";
|
||||
import PlainButton from "@/components/PlainButton";
|
||||
|
||||
import { axios } from "@/services/axios";
|
||||
@@ -89,7 +88,6 @@ export default function Home() {
|
||||
</DynamicComponent>
|
||||
<DynamicComponent name="HomeExtra" />
|
||||
<DashboardAndQueryFavoritesList />
|
||||
<BeaconConsent />
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
|
||||
@@ -380,7 +380,9 @@ function QuerySource(props) {
|
||||
<QueryVisualizationTabs
|
||||
queryResult={queryResult}
|
||||
visualizations={query.visualizations}
|
||||
showNewVisualizationButton={queryFlags.canEdit && queryResultData.status === ExecutionStatus.DONE}
|
||||
showNewVisualizationButton={
|
||||
queryFlags.canEdit && queryResultData.status === ExecutionStatus.FINISHED
|
||||
}
|
||||
canDeleteVisualizations={queryFlags.canEdit}
|
||||
selectedTab={selectedVisualization}
|
||||
onChangeTab={setSelectedVisualization}
|
||||
|
||||
@@ -165,7 +165,7 @@ function QueryView(props) {
|
||||
<QueryVisualizationTabs
|
||||
queryResult={queryResult}
|
||||
visualizations={query.visualizations}
|
||||
showNewVisualizationButton={queryFlags.canEdit && queryResultData.status === ExecutionStatus.DONE}
|
||||
showNewVisualizationButton={queryFlags.canEdit && queryResultData.status === ExecutionStatus.FINISHED}
|
||||
canDeleteVisualizations={queryFlags.canEdit}
|
||||
selectedTab={selectedVisualization}
|
||||
onChangeTab={setSelectedVisualization}
|
||||
|
||||
@@ -1,37 +1,45 @@
|
||||
import { includes } from "lodash";
|
||||
import React from "react";
|
||||
import PropTypes from "prop-types";
|
||||
import Alert from "antd/lib/alert";
|
||||
import Button from "antd/lib/button";
|
||||
import Timer from "@/components/Timer";
|
||||
import { ExecutionStatus } from "@/services/query-result";
|
||||
|
||||
export default function QueryExecutionStatus({ status, updatedAt, error, isCancelling, onCancel }) {
|
||||
const alertType = status === "failed" ? "error" : "info";
|
||||
const showTimer = status !== "failed" && updatedAt;
|
||||
const isCancelButtonAvailable = includes(["waiting", "processing"], status);
|
||||
const alertType = status === ExecutionStatus.FAILED ? "error" : "info";
|
||||
const showTimer = status !== ExecutionStatus.FAILED && updatedAt;
|
||||
const isCancelButtonAvailable = [
|
||||
ExecutionStatus.SCHEDULED,
|
||||
ExecutionStatus.QUEUED,
|
||||
ExecutionStatus.STARTED,
|
||||
ExecutionStatus.DEFERRED,
|
||||
].includes(status);
|
||||
let message = isCancelling ? <React.Fragment>Cancelling…</React.Fragment> : null;
|
||||
|
||||
switch (status) {
|
||||
case "waiting":
|
||||
case ExecutionStatus.QUEUED:
|
||||
if (!isCancelling) {
|
||||
message = <React.Fragment>Query in queue…</React.Fragment>;
|
||||
}
|
||||
break;
|
||||
case "processing":
|
||||
case ExecutionStatus.STARTED:
|
||||
if (!isCancelling) {
|
||||
message = <React.Fragment>Executing query…</React.Fragment>;
|
||||
}
|
||||
break;
|
||||
case "loading-result":
|
||||
case ExecutionStatus.LOADING_RESULT:
|
||||
message = <React.Fragment>Loading results…</React.Fragment>;
|
||||
break;
|
||||
case "failed":
|
||||
case ExecutionStatus.FAILED:
|
||||
message = (
|
||||
<React.Fragment>
|
||||
Error running query: <strong>{error}</strong>
|
||||
</React.Fragment>
|
||||
);
|
||||
break;
|
||||
case ExecutionStatus.CANCELED:
|
||||
message = <React.Fragment>Query was canceled</React.Fragment>;
|
||||
break;
|
||||
// no default
|
||||
}
|
||||
|
||||
@@ -66,7 +74,7 @@ QueryExecutionStatus.propTypes = {
|
||||
};
|
||||
|
||||
QueryExecutionStatus.defaultProps = {
|
||||
status: "waiting",
|
||||
status: ExecutionStatus.QUEUED,
|
||||
updatedAt: null,
|
||||
error: null,
|
||||
isCancelling: true,
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
import React from "react";
|
||||
import Form from "antd/lib/form";
|
||||
import Checkbox from "antd/lib/checkbox";
|
||||
import Skeleton from "antd/lib/skeleton";
|
||||
import HelpTrigger from "@/components/HelpTrigger";
|
||||
import DynamicComponent from "@/components/DynamicComponent";
|
||||
import { SettingsEditorPropTypes, SettingsEditorDefaultProps } from "../prop-types";
|
||||
|
||||
export default function BeaconConsentSettings(props) {
|
||||
const { values, onChange, loading } = props;
|
||||
|
||||
return (
|
||||
<DynamicComponent name="OrganizationSettings.BeaconConsentSettings" {...props}>
|
||||
<Form.Item
|
||||
label={
|
||||
<span>
|
||||
Anonymous Usage Data Sharing
|
||||
<HelpTrigger className="m-l-5 m-r-5" type="USAGE_DATA_SHARING" />
|
||||
</span>
|
||||
}>
|
||||
{loading ? (
|
||||
<Skeleton title={{ width: 300 }} paragraph={false} active />
|
||||
) : (
|
||||
<Checkbox
|
||||
name="beacon_consent"
|
||||
checked={values.beacon_consent}
|
||||
onChange={e => onChange({ beacon_consent: e.target.checked })}>
|
||||
Help Redash improve by automatically sending anonymous usage data
|
||||
</Checkbox>
|
||||
)}
|
||||
</Form.Item>
|
||||
</DynamicComponent>
|
||||
);
|
||||
}
|
||||
|
||||
BeaconConsentSettings.propTypes = SettingsEditorPropTypes;
|
||||
|
||||
BeaconConsentSettings.defaultProps = SettingsEditorDefaultProps;
|
||||
@@ -4,7 +4,6 @@ import DynamicComponent from "@/components/DynamicComponent";
|
||||
import FormatSettings from "./FormatSettings";
|
||||
import PlotlySettings from "./PlotlySettings";
|
||||
import FeatureFlagsSettings from "./FeatureFlagsSettings";
|
||||
import BeaconConsentSettings from "./BeaconConsentSettings";
|
||||
|
||||
export default function GeneralSettings(props) {
|
||||
return (
|
||||
@@ -14,7 +13,6 @@ export default function GeneralSettings(props) {
|
||||
<FormatSettings {...props} />
|
||||
<PlotlySettings {...props} />
|
||||
<FeatureFlagsSettings {...props} />
|
||||
<BeaconConsentSettings {...props} />
|
||||
</DynamicComponent>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -50,18 +50,15 @@ const QueryResultResource = {
|
||||
};
|
||||
|
||||
export const ExecutionStatus = {
|
||||
WAITING: "waiting",
|
||||
PROCESSING: "processing",
|
||||
DONE: "done",
|
||||
QUEUED: "queued",
|
||||
STARTED: "started",
|
||||
FINISHED: "finished",
|
||||
FAILED: "failed",
|
||||
LOADING_RESULT: "loading-result",
|
||||
};
|
||||
|
||||
const statuses = {
|
||||
1: ExecutionStatus.WAITING,
|
||||
2: ExecutionStatus.PROCESSING,
|
||||
3: ExecutionStatus.DONE,
|
||||
4: ExecutionStatus.FAILED,
|
||||
CANCELED: "canceled",
|
||||
DEFERRED: "deferred",
|
||||
SCHEDULED: "scheduled",
|
||||
STOPPED: "stopped",
|
||||
};
|
||||
|
||||
function handleErrorResponse(queryResult, error) {
|
||||
@@ -80,7 +77,7 @@ function handleErrorResponse(queryResult, error) {
|
||||
queryResult.update({
|
||||
job: {
|
||||
error: "cached query result unavailable, please execute again.",
|
||||
status: 4,
|
||||
status: ExecutionStatus.FAILED,
|
||||
},
|
||||
});
|
||||
return;
|
||||
@@ -91,7 +88,7 @@ function handleErrorResponse(queryResult, error) {
|
||||
queryResult.update({
|
||||
job: {
|
||||
error: get(error, "response.data.message", "Unknown error occurred. Please try again later."),
|
||||
status: 4,
|
||||
status: ExecutionStatus.FAILED,
|
||||
},
|
||||
});
|
||||
}
|
||||
@@ -102,11 +99,19 @@ function sleep(ms) {
|
||||
|
||||
export function fetchDataFromJob(jobId, interval = 1000) {
|
||||
return axios.get(`api/jobs/${jobId}`).then(data => {
|
||||
const status = statuses[data.job.status];
|
||||
if (status === ExecutionStatus.WAITING || status === ExecutionStatus.PROCESSING) {
|
||||
const status = data.job.status;
|
||||
if (
|
||||
[ExecutionStatus.QUEUED, ExecutionStatus.STARTED, ExecutionStatus.SCHEDULED, ExecutionStatus.DEFERRED].includes(
|
||||
status
|
||||
)
|
||||
) {
|
||||
return sleep(interval).then(() => fetchDataFromJob(data.job.id));
|
||||
} else if (status === ExecutionStatus.DONE) {
|
||||
return data.job.result;
|
||||
} else if (status === ExecutionStatus.FINISHED) {
|
||||
return data.job.result_id;
|
||||
} else if (status === ExecutionStatus.CANCELED) {
|
||||
return Promise.reject("Job was canceled");
|
||||
} else if (status === ExecutionStatus.STOPPED) {
|
||||
return Promise.reject("Job was stopped");
|
||||
} else if (status === ExecutionStatus.FAILED) {
|
||||
return Promise.reject(data.job.error);
|
||||
}
|
||||
@@ -122,7 +127,7 @@ class QueryResult {
|
||||
this.deferred = defer();
|
||||
this.job = {};
|
||||
this.query_result = {};
|
||||
this.status = "waiting";
|
||||
this.status = ExecutionStatus.QUEUED;
|
||||
|
||||
this.updatedAt = moment();
|
||||
|
||||
@@ -138,8 +143,8 @@ class QueryResult {
|
||||
extend(this, props);
|
||||
|
||||
if ("query_result" in props) {
|
||||
this.status = ExecutionStatus.DONE;
|
||||
this.deferred.onStatusChange(ExecutionStatus.DONE);
|
||||
this.status = ExecutionStatus.FINISHED;
|
||||
this.deferred.onStatusChange(ExecutionStatus.FINISHED);
|
||||
|
||||
const columnTypes = {};
|
||||
|
||||
@@ -183,11 +188,10 @@ class QueryResult {
|
||||
});
|
||||
|
||||
this.deferred.resolve(this);
|
||||
} else if (this.job.status === 3 || this.job.status === 2) {
|
||||
this.deferred.onStatusChange(ExecutionStatus.PROCESSING);
|
||||
this.status = "processing";
|
||||
} else if (this.job.status === 4) {
|
||||
this.status = statuses[this.job.status];
|
||||
} else if (this.job.status === ExecutionStatus.STARTED || this.job.status === ExecutionStatus.FINISHED) {
|
||||
this.status = ExecutionStatus.STARTED;
|
||||
} else if (this.job.status === ExecutionStatus.FAILED) {
|
||||
this.status = this.job.status;
|
||||
this.deferred.reject(new QueryResultError(this.job.error));
|
||||
} else {
|
||||
this.deferred.onStatusChange(undefined);
|
||||
@@ -211,7 +215,7 @@ class QueryResult {
|
||||
if (this.isLoadingResult) {
|
||||
return ExecutionStatus.LOADING_RESULT;
|
||||
}
|
||||
return this.status || statuses[this.job.status];
|
||||
return this.status || this.job.status;
|
||||
}
|
||||
|
||||
getError() {
|
||||
@@ -374,7 +378,7 @@ class QueryResult {
|
||||
this.isLoadingResult = true;
|
||||
this.deferred.onStatusChange(ExecutionStatus.LOADING_RESULT);
|
||||
|
||||
QueryResultResource.get({ id: this.job.query_result_id })
|
||||
QueryResultResource.get({ id: this.job.result_id })
|
||||
.then(response => {
|
||||
this.update(response);
|
||||
this.isLoadingResult = false;
|
||||
@@ -389,7 +393,7 @@ class QueryResult {
|
||||
this.update({
|
||||
job: {
|
||||
error: "failed communicating with server. Please check your Internet connection and try again.",
|
||||
status: 4,
|
||||
status: ExecutionStatus.FAILED,
|
||||
},
|
||||
});
|
||||
this.isLoadingResult = false;
|
||||
@@ -413,9 +417,9 @@ class QueryResult {
|
||||
.then(jobResponse => {
|
||||
this.update(jobResponse);
|
||||
|
||||
if (this.getStatus() === "processing" && this.job.query_result_id && this.job.query_result_id !== "None") {
|
||||
if (this.getStatus() === ExecutionStatus.STARTED && this.job.result_id && this.job.result_id !== "None") {
|
||||
loadResult();
|
||||
} else if (this.getStatus() !== "failed") {
|
||||
} else if (this.getStatus() !== ExecutionStatus.FAILED) {
|
||||
const waitTime = tryNumber > 10 ? 3000 : 500;
|
||||
setTimeout(() => {
|
||||
this.refreshStatus(query, parameters, tryNumber + 1);
|
||||
@@ -428,7 +432,7 @@ class QueryResult {
|
||||
this.update({
|
||||
job: {
|
||||
error: "failed communicating with server. Please check your Internet connection and try again.",
|
||||
status: 4,
|
||||
status: ExecutionStatus.FAILED,
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
@@ -2,6 +2,7 @@ import moment from "moment";
|
||||
import debug from "debug";
|
||||
import Mustache from "mustache";
|
||||
import { axios } from "@/services/axios";
|
||||
import { ExecutionStatus } from "@/services/query-result";
|
||||
import {
|
||||
zipObject,
|
||||
isEmpty,
|
||||
@@ -103,7 +104,7 @@ export class Query {
|
||||
return new QueryResult({
|
||||
job: {
|
||||
error: `missing ${valuesWord} for ${missingParams.join(", ")} ${paramsWord}.`,
|
||||
status: 4,
|
||||
status: ExecutionStatus.FAILED,
|
||||
},
|
||||
});
|
||||
}
|
||||
@@ -360,7 +361,7 @@ export class QueryResultError {
|
||||
|
||||
// eslint-disable-next-line class-methods-use-this
|
||||
getStatus() {
|
||||
return "failed";
|
||||
return ExecutionStatus.FAILED;
|
||||
}
|
||||
|
||||
// eslint-disable-next-line class-methods-use-this
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
/* eslint-disable import/no-extraneous-dependencies, no-console */
|
||||
const { find } = require("lodash");
|
||||
const atob = require("atob");
|
||||
const { execSync } = require("child_process");
|
||||
const { get, post } = require("request").defaults({ jar: true });
|
||||
const { seedData } = require("./seed-data");
|
||||
@@ -44,44 +43,32 @@ function seedDatabase(seedValues) {
|
||||
|
||||
function buildServer() {
|
||||
console.log("Building the server...");
|
||||
execSync("docker compose -p cypress build", { stdio: "inherit" });
|
||||
execSync("docker compose build", { stdio: "inherit" });
|
||||
}
|
||||
|
||||
function startServer() {
|
||||
console.log("Starting the server...");
|
||||
execSync("docker compose -p cypress up -d", { stdio: "inherit" });
|
||||
execSync("docker compose -p cypress run server create_db", { stdio: "inherit" });
|
||||
execSync("docker compose up -d", { stdio: "inherit" });
|
||||
execSync("docker compose run server create_db", { stdio: "inherit" });
|
||||
}
|
||||
|
||||
function stopServer() {
|
||||
console.log("Stopping the server...");
|
||||
execSync("docker compose -p cypress down", { stdio: "inherit" });
|
||||
execSync("docker compose down", { stdio: "inherit" });
|
||||
}
|
||||
|
||||
function runCypressCI() {
|
||||
const {
|
||||
PERCY_TOKEN_ENCODED,
|
||||
CYPRESS_PROJECT_ID_ENCODED,
|
||||
CYPRESS_RECORD_KEY_ENCODED,
|
||||
GITHUB_REPOSITORY,
|
||||
CYPRESS_OPTIONS, // eslint-disable-line no-unused-vars
|
||||
} = process.env;
|
||||
|
||||
if (GITHUB_REPOSITORY === "getredash/redash") {
|
||||
if (PERCY_TOKEN_ENCODED) {
|
||||
process.env.PERCY_TOKEN = atob(`${PERCY_TOKEN_ENCODED}`);
|
||||
}
|
||||
if (CYPRESS_PROJECT_ID_ENCODED) {
|
||||
process.env.CYPRESS_PROJECT_ID = atob(`${CYPRESS_PROJECT_ID_ENCODED}`);
|
||||
}
|
||||
if (CYPRESS_RECORD_KEY_ENCODED) {
|
||||
process.env.CYPRESS_RECORD_KEY = atob(`${CYPRESS_RECORD_KEY_ENCODED}`);
|
||||
}
|
||||
process.env.CYPRESS_OPTIONS = "--record";
|
||||
}
|
||||
|
||||
execSync(
|
||||
"COMMIT_INFO_MESSAGE=$(git show -s --format=%s) docker compose run --name cypress cypress ./node_modules/.bin/percy exec -t 300 -- ./node_modules/.bin/cypress run $CYPRESS_OPTIONS",
|
||||
"docker compose run --name cypress cypress ./node_modules/.bin/percy exec -t 300 -- ./node_modules/.bin/cypress run $CYPRESS_OPTIONS",
|
||||
{ stdio: "inherit" }
|
||||
);
|
||||
}
|
||||
|
||||
@@ -53,11 +53,12 @@ describe("Dashboard Sharing", () => {
|
||||
};
|
||||
|
||||
const dashboardUrl = this.dashboardUrl;
|
||||
cy.createQuery({ options }).then(({ id: queryId }) => {
|
||||
cy.createQuery({ options }).then(({ id: queryId, name: queryName }) => {
|
||||
cy.visit(dashboardUrl);
|
||||
editDashboard();
|
||||
cy.getByTestId("AddWidgetButton").click();
|
||||
cy.getByTestId("AddWidgetDialog").within(() => {
|
||||
cy.get("input").type(queryName);
|
||||
cy.get(`.query-selector-result[data-test="QueryId${queryId}"]`).click();
|
||||
});
|
||||
cy.contains("button", "Add to Dashboard").click();
|
||||
@@ -178,11 +179,12 @@ describe("Dashboard Sharing", () => {
|
||||
};
|
||||
|
||||
const dashboardUrl = this.dashboardUrl;
|
||||
cy.createQuery({ options }).then(({ id: queryId }) => {
|
||||
cy.createQuery({ options }).then(({ id: queryId, name: queryName }) => {
|
||||
cy.visit(dashboardUrl);
|
||||
editDashboard();
|
||||
cy.getByTestId("AddWidgetButton").click();
|
||||
cy.getByTestId("AddWidgetDialog").within(() => {
|
||||
cy.get("input").type(queryName);
|
||||
cy.get(`.query-selector-result[data-test="QueryId${queryId}"]`).click();
|
||||
});
|
||||
cy.contains("button", "Add to Dashboard").click();
|
||||
|
||||
@@ -18,11 +18,12 @@ describe("Widget", () => {
|
||||
};
|
||||
|
||||
it("adds widget", function() {
|
||||
cy.createQuery().then(({ id: queryId }) => {
|
||||
cy.createQuery().then(({ id: queryId, name: queryName }) => {
|
||||
cy.visit(this.dashboardUrl);
|
||||
editDashboard();
|
||||
cy.getByTestId("AddWidgetButton").click();
|
||||
cy.getByTestId("AddWidgetDialog").within(() => {
|
||||
cy.get("input").type(queryName);
|
||||
cy.get(`.query-selector-result[data-test="QueryId${queryId}"]`).click();
|
||||
});
|
||||
cy.contains("button", "Add to Dashboard").click();
|
||||
|
||||
24
compose.base.yaml
Normal file
24
compose.base.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
services:
|
||||
.redash:
|
||||
build:
|
||||
context: .
|
||||
args:
|
||||
FRONTEND_BUILD_MODE: ${FRONTEND_BUILD_MODE:-2}
|
||||
INSTALL_GROUPS: ${INSTALL_GROUPS:-main,all_ds,dev}
|
||||
volumes:
|
||||
- $PWD:${SERVER_MOUNT:-/ignore}
|
||||
command: manage version
|
||||
environment:
|
||||
REDASH_LOG_LEVEL: INFO
|
||||
REDASH_REDIS_URL: redis://redis:6379/0
|
||||
REDASH_DATABASE_URL: postgresql://postgres@postgres/postgres
|
||||
REDASH_RATELIMIT_ENABLED: false
|
||||
REDASH_MAIL_DEFAULT_SENDER: redash@example.com
|
||||
REDASH_MAIL_SERVER: email
|
||||
REDASH_MAIL_PORT: 1025
|
||||
REDASH_ENFORCE_CSRF: true
|
||||
REDASH_COOKIE_SECRET: ${REDASH_COOKIE_SECRET}
|
||||
REDASH_SECRET_KEY: ${REDASH_SECRET_KEY}
|
||||
REDASH_PRODUCTION: ${REDASH_PRODUCTION:-true}
|
||||
env_file:
|
||||
- .env
|
||||
81
compose.yaml
Normal file
81
compose.yaml
Normal file
@@ -0,0 +1,81 @@
|
||||
services:
|
||||
server:
|
||||
extends:
|
||||
file: compose.base.yaml
|
||||
service: .redash
|
||||
command: server
|
||||
depends_on:
|
||||
- postgres
|
||||
- redis
|
||||
ports:
|
||||
- "${REDASH_PORT:-5001}:5000"
|
||||
- "5678:5678"
|
||||
environment:
|
||||
PYTHONUNBUFFERED: 0
|
||||
scheduler:
|
||||
extends:
|
||||
file: compose.base.yaml
|
||||
service: .redash
|
||||
profiles:
|
||||
- e2e
|
||||
- local
|
||||
command: scheduler
|
||||
depends_on:
|
||||
- server
|
||||
worker:
|
||||
extends:
|
||||
file: compose.base.yaml
|
||||
service: .redash
|
||||
profiles:
|
||||
- e2e
|
||||
- local
|
||||
command: worker
|
||||
depends_on:
|
||||
- server
|
||||
environment:
|
||||
PYTHONUNBUFFERED: 0
|
||||
redis:
|
||||
image: redis:7-alpine
|
||||
restart: unless-stopped
|
||||
postgres:
|
||||
image: postgres:16-alpine
|
||||
ports:
|
||||
- "${POSTGRES_PORT:-15432}:5432"
|
||||
# The following turns the DB into less durable, but gains significant performance improvements for the tests run (x3
|
||||
# improvement on my personal machine). We should consider moving this into a dedicated Docker Compose configuration for
|
||||
# tests.
|
||||
command: postgres -c fsync=off -c full_page_writes=off -c synchronous_commit=OFF
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
POSTGRES_HOST_AUTH_METHOD: trust
|
||||
email:
|
||||
image: maildev/maildev
|
||||
ports:
|
||||
- "1080:1080"
|
||||
- "1025:1025"
|
||||
restart: unless-stopped
|
||||
cypress:
|
||||
ipc: host
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.cypress
|
||||
profiles:
|
||||
- e2e
|
||||
depends_on:
|
||||
- server
|
||||
- worker
|
||||
- scheduler
|
||||
environment:
|
||||
CYPRESS_baseUrl: http://server:5000
|
||||
PERCY_TOKEN: ${PERCY_TOKEN:-""}
|
||||
PERCY_BRANCH: ${PERCY_BRANCH:-""}
|
||||
PERCY_COMMIT: ${PERCY_COMMIT:-""}
|
||||
PERCY_PULL_REQUEST: ${PERCY_PULL_REQUEST:-}
|
||||
COMMIT_INFO_BRANCH: ${COMMIT_INFO_BRANCH:-""}
|
||||
COMMIT_INFO_MESSAGE: ${COMMIT_INFO_MESSAGE:-""}
|
||||
COMMIT_INFO_AUTHOR: ${COMMIT_INFO_AUTHOR:-""}
|
||||
COMMIT_INFO_SHA: ${COMMIT_INFO_SHA:-""}
|
||||
COMMIT_INFO_REMOTE: ${COMMIT_INFO_REMOTE:-""}
|
||||
CYPRESS_PROJECT_ID: ${CYPRESS_PROJECT_ID:-""}
|
||||
CYPRESS_RECORD_KEY: ${CYPRESS_RECORD_KEY:-""}
|
||||
CYPRESS_COVERAGE: ${CYPRESS_COVERAGE:-true}
|
||||
@@ -1,71 +0,0 @@
|
||||
# This configuration file is for the **development** setup.
|
||||
# For a production example please refer to getredash/setup repository on GitHub.
|
||||
version: "2.2"
|
||||
x-redash-service: &redash-service
|
||||
build:
|
||||
context: .
|
||||
args:
|
||||
skip_frontend_build: "true" # set to empty string to build
|
||||
volumes:
|
||||
- .:/app
|
||||
env_file:
|
||||
- .env
|
||||
x-redash-environment: &redash-environment
|
||||
REDASH_LOG_LEVEL: "INFO"
|
||||
REDASH_REDIS_URL: "redis://redis:6379/0"
|
||||
REDASH_DATABASE_URL: "postgresql://postgres@postgres/postgres"
|
||||
REDASH_RATELIMIT_ENABLED: "false"
|
||||
REDASH_MAIL_DEFAULT_SENDER: "redash@example.com"
|
||||
REDASH_MAIL_SERVER: "email"
|
||||
REDASH_MAIL_PORT: 1025
|
||||
REDASH_ENFORCE_CSRF: "true"
|
||||
REDASH_GUNICORN_TIMEOUT: 60
|
||||
# Set secret keys in the .env file
|
||||
services:
|
||||
server:
|
||||
<<: *redash-service
|
||||
command: dev_server
|
||||
depends_on:
|
||||
- postgres
|
||||
- redis
|
||||
ports:
|
||||
- "5001:5000"
|
||||
- "5678:5678"
|
||||
environment:
|
||||
<<: *redash-environment
|
||||
PYTHONUNBUFFERED: 0
|
||||
scheduler:
|
||||
<<: *redash-service
|
||||
command: dev_scheduler
|
||||
depends_on:
|
||||
- server
|
||||
environment:
|
||||
<<: *redash-environment
|
||||
worker:
|
||||
<<: *redash-service
|
||||
command: dev_worker
|
||||
depends_on:
|
||||
- server
|
||||
environment:
|
||||
<<: *redash-environment
|
||||
PYTHONUNBUFFERED: 0
|
||||
redis:
|
||||
image: redis:7-alpine
|
||||
restart: unless-stopped
|
||||
postgres:
|
||||
image: pgautoupgrade/pgautoupgrade:15-alpine3.8
|
||||
ports:
|
||||
- "15432:5432"
|
||||
# The following turns the DB into less durable, but gains significant performance improvements for the tests run (x3
|
||||
# improvement on my personal machine). We should consider moving this into a dedicated Docker Compose configuration for
|
||||
# tests.
|
||||
command: "postgres -c fsync=off -c full_page_writes=off -c synchronous_commit=OFF"
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
POSTGRES_HOST_AUTH_METHOD: "trust"
|
||||
email:
|
||||
image: maildev/maildev
|
||||
ports:
|
||||
- "1080:1080"
|
||||
- "1025:1025"
|
||||
restart: unless-stopped
|
||||
@@ -7,7 +7,7 @@ Create Date: 2020-12-23 21:35:32.766354
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects import postgresql
|
||||
from sqlalchemy.dialects.postgresql import JSON
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '0ec979123ba4'
|
||||
@@ -18,7 +18,7 @@ depends_on = None
|
||||
|
||||
def upgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.add_column('dashboards', sa.Column('options', postgresql.JSON(astext_type=sa.Text()), server_default='{}', nullable=False))
|
||||
op.add_column('dashboards', sa.Column('options', JSON(astext_type=sa.Text()), server_default='{}', nullable=False))
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
|
||||
@@ -10,8 +10,7 @@ import json
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.sql import table
|
||||
|
||||
from redash.models import MutableDict, PseudoJSON
|
||||
from redash.models import MutableDict
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
@@ -41,7 +40,7 @@ def upgrade():
|
||||
"queries",
|
||||
sa.Column(
|
||||
"schedule",
|
||||
MutableDict.as_mutable(PseudoJSON),
|
||||
sa.Text(),
|
||||
nullable=False,
|
||||
server_default=json.dumps({}),
|
||||
),
|
||||
@@ -51,7 +50,7 @@ def upgrade():
|
||||
queries = table(
|
||||
"queries",
|
||||
sa.Column("id", sa.Integer, primary_key=True),
|
||||
sa.Column("schedule", MutableDict.as_mutable(PseudoJSON)),
|
||||
sa.Column("schedule", sa.Text()),
|
||||
sa.Column("old_schedule", sa.String(length=10)),
|
||||
)
|
||||
|
||||
@@ -85,7 +84,7 @@ def downgrade():
|
||||
"queries",
|
||||
sa.Column(
|
||||
"old_schedule",
|
||||
MutableDict.as_mutable(PseudoJSON),
|
||||
sa.Text(),
|
||||
nullable=False,
|
||||
server_default=json.dumps({}),
|
||||
),
|
||||
@@ -93,8 +92,8 @@ def downgrade():
|
||||
|
||||
queries = table(
|
||||
"queries",
|
||||
sa.Column("schedule", MutableDict.as_mutable(PseudoJSON)),
|
||||
sa.Column("old_schedule", MutableDict.as_mutable(PseudoJSON)),
|
||||
sa.Column("schedule", sa.Text()),
|
||||
sa.Column("old_schedule", sa.Text()),
|
||||
)
|
||||
|
||||
op.execute(queries.update().values({"old_schedule": queries.c.schedule}))
|
||||
@@ -106,7 +105,7 @@ def downgrade():
|
||||
"queries",
|
||||
sa.Column("id", sa.Integer, primary_key=True),
|
||||
sa.Column("schedule", sa.String(length=10)),
|
||||
sa.Column("old_schedule", MutableDict.as_mutable(PseudoJSON)),
|
||||
sa.Column("old_schedule", sa.Text()),
|
||||
)
|
||||
|
||||
conn = op.get_bind()
|
||||
|
||||
@@ -0,0 +1,135 @@
|
||||
"""change type of json fields from varchar to json
|
||||
|
||||
Revision ID: 7205816877ec
|
||||
Revises: 7ce5925f832b
|
||||
Create Date: 2024-01-03 13:55:18.885021
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects.postgresql import JSONB, JSON
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '7205816877ec'
|
||||
down_revision = '7ce5925f832b'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
connection = op.get_bind()
|
||||
op.alter_column('queries', 'options',
|
||||
existing_type=sa.Text(),
|
||||
type_=JSONB(astext_type=sa.Text()),
|
||||
nullable=True,
|
||||
postgresql_using='options::jsonb',
|
||||
server_default=sa.text("'{}'::jsonb"))
|
||||
op.alter_column('queries', 'schedule',
|
||||
existing_type=sa.Text(),
|
||||
type_=JSONB(astext_type=sa.Text()),
|
||||
nullable=True,
|
||||
postgresql_using='schedule::jsonb',
|
||||
server_default=sa.text("'{}'::jsonb"))
|
||||
op.alter_column('events', 'additional_properties',
|
||||
existing_type=sa.Text(),
|
||||
type_=JSONB(astext_type=sa.Text()),
|
||||
nullable=True,
|
||||
postgresql_using='additional_properties::jsonb',
|
||||
server_default=sa.text("'{}'::jsonb"))
|
||||
op.alter_column('organizations', 'settings',
|
||||
existing_type=sa.Text(),
|
||||
type_=JSONB(astext_type=sa.Text()),
|
||||
nullable=True,
|
||||
postgresql_using='settings::jsonb',
|
||||
server_default=sa.text("'{}'::jsonb"))
|
||||
op.alter_column('alerts', 'options',
|
||||
existing_type=JSON(astext_type=sa.Text()),
|
||||
type_=JSONB(astext_type=sa.Text()),
|
||||
nullable=True,
|
||||
postgresql_using='options::jsonb',
|
||||
server_default=sa.text("'{}'::jsonb"))
|
||||
op.alter_column('dashboards', 'options',
|
||||
existing_type=JSON(astext_type=sa.Text()),
|
||||
type_=JSONB(astext_type=sa.Text()),
|
||||
postgresql_using='options::jsonb',
|
||||
server_default=sa.text("'{}'::jsonb"))
|
||||
op.alter_column('dashboards', 'layout',
|
||||
existing_type=sa.Text(),
|
||||
type_=JSONB(astext_type=sa.Text()),
|
||||
postgresql_using='layout::jsonb',
|
||||
server_default=sa.text("'{}'::jsonb"))
|
||||
op.alter_column('changes', 'change',
|
||||
existing_type=JSON(astext_type=sa.Text()),
|
||||
type_=JSONB(astext_type=sa.Text()),
|
||||
postgresql_using='change::jsonb',
|
||||
server_default=sa.text("'{}'::jsonb"))
|
||||
op.alter_column('visualizations', 'options',
|
||||
existing_type=sa.Text(),
|
||||
type_=JSONB(astext_type=sa.Text()),
|
||||
postgresql_using='options::jsonb',
|
||||
server_default=sa.text("'{}'::jsonb"))
|
||||
op.alter_column('widgets', 'options',
|
||||
existing_type=sa.Text(),
|
||||
type_=JSONB(astext_type=sa.Text()),
|
||||
postgresql_using='options::jsonb',
|
||||
server_default=sa.text("'{}'::jsonb"))
|
||||
|
||||
|
||||
def downgrade():
|
||||
connection = op.get_bind()
|
||||
op.alter_column('queries', 'options',
|
||||
existing_type=JSONB(astext_type=sa.Text()),
|
||||
type_=sa.Text(),
|
||||
postgresql_using='options::text',
|
||||
existing_nullable=True,
|
||||
server_default=sa.text("'{}'::text"))
|
||||
op.alter_column('queries', 'schedule',
|
||||
existing_type=JSONB(astext_type=sa.Text()),
|
||||
type_=sa.Text(),
|
||||
postgresql_using='schedule::text',
|
||||
existing_nullable=True,
|
||||
server_default=sa.text("'{}'::text"))
|
||||
op.alter_column('events', 'additional_properties',
|
||||
existing_type=JSONB(astext_type=sa.Text()),
|
||||
type_=sa.Text(),
|
||||
postgresql_using='additional_properties::text',
|
||||
existing_nullable=True,
|
||||
server_default=sa.text("'{}'::text"))
|
||||
op.alter_column('organizations', 'settings',
|
||||
existing_type=JSONB(astext_type=sa.Text()),
|
||||
type_=sa.Text(),
|
||||
postgresql_using='settings::text',
|
||||
existing_nullable=True,
|
||||
server_default=sa.text("'{}'::text"))
|
||||
op.alter_column('alerts', 'options',
|
||||
existing_type=JSONB(astext_type=sa.Text()),
|
||||
type_=JSON(astext_type=sa.Text()),
|
||||
postgresql_using='options::json',
|
||||
existing_nullable=True,
|
||||
server_default=sa.text("'{}'::json"))
|
||||
op.alter_column('dashboards', 'options',
|
||||
existing_type=JSONB(astext_type=sa.Text()),
|
||||
type_=JSON(astext_type=sa.Text()),
|
||||
postgresql_using='options::json',
|
||||
server_default=sa.text("'{}'::json"))
|
||||
op.alter_column('dashboards', 'layout',
|
||||
existing_type=JSONB(astext_type=sa.Text()),
|
||||
type_=sa.Text(),
|
||||
postgresql_using='layout::text',
|
||||
server_default=sa.text("'{}'::text"))
|
||||
op.alter_column('changes', 'change',
|
||||
existing_type=JSONB(astext_type=sa.Text()),
|
||||
type_=JSON(astext_type=sa.Text()),
|
||||
postgresql_using='change::json',
|
||||
server_default=sa.text("'{}'::json"))
|
||||
op.alter_column('visualizations', 'options',
|
||||
type_=sa.Text(),
|
||||
existing_type=JSONB(astext_type=sa.Text()),
|
||||
postgresql_using='options::text',
|
||||
server_default=sa.text("'{}'::text"))
|
||||
op.alter_column('widgets', 'options',
|
||||
type_=sa.Text(),
|
||||
existing_type=JSONB(astext_type=sa.Text()),
|
||||
postgresql_using='options::text',
|
||||
server_default=sa.text("'{}'::text"))
|
||||
@@ -7,10 +7,9 @@ Create Date: 2019-01-17 13:22:21.729334
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects import postgresql
|
||||
from sqlalchemy.sql import table
|
||||
|
||||
from redash.models import MutableDict, PseudoJSON
|
||||
from redash.models import MutableDict
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "73beceabb948"
|
||||
@@ -43,7 +42,7 @@ def upgrade():
|
||||
queries = table(
|
||||
"queries",
|
||||
sa.Column("id", sa.Integer, primary_key=True),
|
||||
sa.Column("schedule", MutableDict.as_mutable(PseudoJSON)),
|
||||
sa.Column("schedule", sa.Text()),
|
||||
)
|
||||
|
||||
conn = op.get_bind()
|
||||
|
||||
@@ -6,7 +6,7 @@ Create Date: 2018-01-31 15:20:30.396533
|
||||
|
||||
"""
|
||||
|
||||
import simplejson
|
||||
import json
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
@@ -27,7 +27,7 @@ def upgrade():
|
||||
dashboard_result = db.session.execute("SELECT id, layout FROM dashboards")
|
||||
for dashboard in dashboard_result:
|
||||
print(" Updating dashboard: {}".format(dashboard["id"]))
|
||||
layout = simplejson.loads(dashboard["layout"])
|
||||
layout = json.loads(dashboard["layout"])
|
||||
|
||||
print(" Building widgets map:")
|
||||
widgets = {}
|
||||
@@ -53,7 +53,7 @@ def upgrade():
|
||||
if widget is None:
|
||||
continue
|
||||
|
||||
options = simplejson.loads(widget["options"]) or {}
|
||||
options = json.loads(widget["options"]) or {}
|
||||
options["position"] = {
|
||||
"row": row_index,
|
||||
"col": column_index * column_size,
|
||||
@@ -62,7 +62,7 @@ def upgrade():
|
||||
|
||||
db.session.execute(
|
||||
"UPDATE widgets SET options=:options WHERE id=:id",
|
||||
{"options": simplejson.dumps(options), "id": widget_id},
|
||||
{"options": json.dumps(options), "id": widget_id},
|
||||
)
|
||||
|
||||
dashboard_result.close()
|
||||
|
||||
@@ -7,7 +7,7 @@ Create Date: 2019-01-31 09:21:31.517265
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects import postgresql
|
||||
from sqlalchemy.dialects.postgresql import BYTEA
|
||||
from sqlalchemy.sql import table
|
||||
from sqlalchemy_utils.types.encrypted.encrypted_type import FernetEngine
|
||||
|
||||
@@ -15,10 +15,8 @@ from redash import settings
|
||||
from redash.utils.configuration import ConfigurationContainer
|
||||
from redash.models.types import (
|
||||
EncryptedConfiguration,
|
||||
Configuration,
|
||||
MutableDict,
|
||||
MutableList,
|
||||
PseudoJSON,
|
||||
)
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
@@ -31,7 +29,7 @@ depends_on = None
|
||||
def upgrade():
|
||||
op.add_column(
|
||||
"data_sources",
|
||||
sa.Column("encrypted_options", postgresql.BYTEA(), nullable=True),
|
||||
sa.Column("encrypted_options", BYTEA(), nullable=True),
|
||||
)
|
||||
|
||||
# copy values
|
||||
@@ -46,7 +44,14 @@ def upgrade():
|
||||
)
|
||||
),
|
||||
),
|
||||
sa.Column("options", ConfigurationContainer.as_mutable(Configuration)),
|
||||
sa.Column(
|
||||
"options",
|
||||
ConfigurationContainer.as_mutable(
|
||||
EncryptedConfiguration(
|
||||
sa.Text, settings.DATASOURCE_SECRET_KEY, FernetEngine
|
||||
)
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
conn = op.get_bind()
|
||||
|
||||
@@ -9,7 +9,7 @@ import re
|
||||
from funcy import flatten, compact
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects import postgresql
|
||||
from sqlalchemy.dialects.postgresql import ARRAY
|
||||
from redash import models
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
@@ -21,10 +21,10 @@ depends_on = None
|
||||
|
||||
def upgrade():
|
||||
op.add_column(
|
||||
"dashboards", sa.Column("tags", postgresql.ARRAY(sa.Unicode()), nullable=True)
|
||||
"dashboards", sa.Column("tags", ARRAY(sa.Unicode()), nullable=True)
|
||||
)
|
||||
op.add_column(
|
||||
"queries", sa.Column("tags", postgresql.ARRAY(sa.Unicode()), nullable=True)
|
||||
"queries", sa.Column("tags", ARRAY(sa.Unicode()), nullable=True)
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -7,17 +7,14 @@ Create Date: 2020-12-14 21:42:48.661684
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects import postgresql
|
||||
from sqlalchemy.dialects.postgresql import BYTEA
|
||||
from sqlalchemy.sql import table
|
||||
from sqlalchemy_utils.types.encrypted.encrypted_type import FernetEngine
|
||||
|
||||
from redash import settings
|
||||
from redash.utils.configuration import ConfigurationContainer
|
||||
from redash.models.base import key_type
|
||||
from redash.models.types import (
|
||||
EncryptedConfiguration,
|
||||
Configuration,
|
||||
)
|
||||
from redash.models.types import EncryptedConfiguration
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
@@ -30,7 +27,7 @@ depends_on = None
|
||||
def upgrade():
|
||||
op.add_column(
|
||||
"notification_destinations",
|
||||
sa.Column("encrypted_options", postgresql.BYTEA(), nullable=True)
|
||||
sa.Column("encrypted_options", BYTEA(), nullable=True)
|
||||
)
|
||||
|
||||
# copy values
|
||||
@@ -45,7 +42,14 @@ def upgrade():
|
||||
)
|
||||
),
|
||||
),
|
||||
sa.Column("options", ConfigurationContainer.as_mutable(Configuration)),
|
||||
sa.Column(
|
||||
"options",
|
||||
ConfigurationContainer.as_mutable(
|
||||
EncryptedConfiguration(
|
||||
sa.Text, settings.DATASOURCE_SECRET_KEY, FernetEngine
|
||||
)
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
conn = op.get_bind()
|
||||
|
||||
@@ -7,7 +7,7 @@ Create Date: 2018-11-08 16:12:17.023569
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects import postgresql
|
||||
from sqlalchemy.dialects.postgresql import JSON
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "e7f8a917aa8e"
|
||||
@@ -21,7 +21,7 @@ def upgrade():
|
||||
"users",
|
||||
sa.Column(
|
||||
"details",
|
||||
postgresql.JSON(astext_type=sa.Text()),
|
||||
JSON(astext_type=sa.Text()),
|
||||
server_default="{}",
|
||||
nullable=True,
|
||||
),
|
||||
|
||||
@@ -7,7 +7,7 @@ Create Date: 2022-01-31 15:24:16.507888
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects import postgresql
|
||||
from sqlalchemy.dialects.postgresql import JSON, JSONB
|
||||
|
||||
from redash.models import db
|
||||
|
||||
@@ -23,8 +23,8 @@ def upgrade():
|
||||
|
||||
### commands auto generated by Alembic - please adjust! ###
|
||||
op.alter_column('users', 'details',
|
||||
existing_type=postgresql.JSON(astext_type=sa.Text()),
|
||||
type_=postgresql.JSONB(astext_type=sa.Text()),
|
||||
existing_type=JSON(astext_type=sa.Text()),
|
||||
type_=JSONB(astext_type=sa.Text()),
|
||||
existing_nullable=True,
|
||||
existing_server_default=sa.text("'{}'::jsonb"))
|
||||
### end Alembic commands ###
|
||||
@@ -52,8 +52,8 @@ def downgrade():
|
||||
connection.execute(update_query)
|
||||
db.session.commit()
|
||||
op.alter_column('users', 'details',
|
||||
existing_type=postgresql.JSONB(astext_type=sa.Text()),
|
||||
type_=postgresql.JSON(astext_type=sa.Text()),
|
||||
existing_type=JSONB(astext_type=sa.Text()),
|
||||
type_=JSON(astext_type=sa.Text()),
|
||||
existing_nullable=True,
|
||||
existing_server_default=sa.text("'{}'::json"))
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
command = "cd ../ && yarn cache clean && yarn --frozen-lockfile --network-concurrency 1 && yarn build && cd ./client"
|
||||
|
||||
[build.environment]
|
||||
NODE_VERSION = "16.20.1"
|
||||
NODE_VERSION = "18"
|
||||
NETLIFY_USE_YARN = "true"
|
||||
YARN_VERSION = "1.22.19"
|
||||
CYPRESS_INSTALL_BINARY = "0"
|
||||
|
||||
20
package.json
20
package.json
@@ -1,20 +1,19 @@
|
||||
{
|
||||
"name": "redash-client",
|
||||
"version": "23.12.0-dev",
|
||||
"version": "24.05.0-dev",
|
||||
"description": "The frontend part of Redash.",
|
||||
"main": "index.js",
|
||||
"scripts": {
|
||||
"start": "npm-run-all --parallel watch:viz webpack-dev-server",
|
||||
"clean": "rm -rf ./client/dist/",
|
||||
"build:viz": "(cd viz-lib && yarn build:babel)",
|
||||
"build": "yarn clean && yarn build:viz && NODE_ENV=production webpack",
|
||||
"build:old-node-version": "yarn clean && NODE_ENV=production node --max-old-space-size=4096 node_modules/.bin/webpack",
|
||||
"watch:app": "webpack watch --progress",
|
||||
"build": "yarn clean && yarn build:viz && NODE_OPTIONS=--openssl-legacy-provider NODE_ENV=production webpack",
|
||||
"watch:app": "NODE_OPTIONS=--openssl-legacy-provider webpack watch --progress",
|
||||
"watch:viz": "(cd viz-lib && yarn watch:babel)",
|
||||
"watch": "npm-run-all --parallel watch:*",
|
||||
"webpack-dev-server": "webpack-dev-server",
|
||||
"analyze": "yarn clean && BUNDLE_ANALYZER=on webpack",
|
||||
"analyze:build": "yarn clean && NODE_ENV=production BUNDLE_ANALYZER=on webpack",
|
||||
"analyze": "yarn clean && BUNDLE_ANALYZER=on NODE_OPTIONS=--openssl-legacy-provider webpack",
|
||||
"analyze:build": "yarn clean && NODE_ENV=production BUNDLE_ANALYZER=on NODE_OPTIONS=--openssl-legacy-provider webpack",
|
||||
"lint": "yarn lint:base --ext .js --ext .jsx --ext .ts --ext .tsx ./client",
|
||||
"lint:fix": "yarn lint:base --fix --ext .js --ext .jsx --ext .ts --ext .tsx ./client",
|
||||
"lint:base": "eslint --config ./client/.eslintrc.js --ignore-path ./client/.eslintignore",
|
||||
@@ -25,7 +24,7 @@
|
||||
"jest": "TZ=Africa/Khartoum jest",
|
||||
"test": "run-s type-check jest",
|
||||
"test:watch": "jest --watch",
|
||||
"cypress": "node client/cypress/cypress.js",
|
||||
"cypress": "COMPOSE_PROFILES=local node client/cypress/cypress.js",
|
||||
"preinstall": "cd viz-lib && yarn link --link-folder ../.yarn",
|
||||
"postinstall": "(cd viz-lib && yarn --frozen-lockfile && yarn build:babel) && yarn link --link-folder ./.yarn @redash/viz"
|
||||
},
|
||||
@@ -34,7 +33,8 @@
|
||||
"url": "git+https://github.com/getredash/redash.git"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">14.16.0 <17.0.0",
|
||||
"node": ">16.0 <21.0",
|
||||
"npm": "please-use-yarn",
|
||||
"yarn": "^1.22.10"
|
||||
},
|
||||
"author": "Redash Contributors",
|
||||
@@ -178,6 +178,10 @@
|
||||
"viz-lib/**"
|
||||
]
|
||||
},
|
||||
"browser": {
|
||||
"fs": false,
|
||||
"path": false
|
||||
},
|
||||
"//": "browserslist set to 'Async functions' compatibility",
|
||||
"browserslist": [
|
||||
"Edge >= 15",
|
||||
|
||||
1528
poetry.lock
generated
1528
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -12,7 +12,7 @@ force-exclude = '''
|
||||
|
||||
[tool.poetry]
|
||||
name = "redash"
|
||||
version = "23.12.0-dev"
|
||||
version = "24.05.0-dev"
|
||||
description = "Make Your Company Data Driven. Connect to any data source, easily visualize, dashboard and share your data."
|
||||
authors = ["Arik Fraimovich <arik@redash.io>"]
|
||||
# to be added to/removed from the mailing list, please reach out to Arik via the above email or Discord
|
||||
@@ -43,10 +43,10 @@ flask-wtf = "1.1.1"
|
||||
funcy = "1.13"
|
||||
gevent = "23.9.1"
|
||||
greenlet = "2.0.2"
|
||||
gunicorn = "20.0.4"
|
||||
gunicorn = "22.0.0"
|
||||
httplib2 = "0.19.0"
|
||||
itsdangerous = "2.1.2"
|
||||
jinja2 = "3.1.2"
|
||||
jinja2 = "3.1.3"
|
||||
jsonschema = "3.1.1"
|
||||
markupsafe = "2.1.1"
|
||||
maxminddb-geolite2 = "2018.703"
|
||||
@@ -66,15 +66,14 @@ redis = "4.6.0"
|
||||
regex = "2023.8.8"
|
||||
requests = "2.31.0"
|
||||
restrictedpython = "6.2"
|
||||
rq = "1.9.0"
|
||||
rq-scheduler = "0.11.0"
|
||||
rq = "1.16.1"
|
||||
rq-scheduler = "0.13.1"
|
||||
semver = "2.8.1"
|
||||
sentry-sdk = "1.28.1"
|
||||
simplejson = "3.16.0"
|
||||
sqlalchemy = "1.3.24"
|
||||
sqlalchemy-searchable = "1.2.0"
|
||||
sqlalchemy-utils = "0.34.2"
|
||||
sqlparse = "0.4.4"
|
||||
sqlparse = "0.5.0"
|
||||
sshtunnel = "0.1.5"
|
||||
statsd = "3.3.0"
|
||||
supervisor = "4.1.0"
|
||||
@@ -104,13 +103,14 @@ google-api-python-client = "1.7.11"
|
||||
gspread = "5.11.2"
|
||||
impyla = "0.16.0"
|
||||
influxdb = "5.2.3"
|
||||
influxdb-client = "1.38.0"
|
||||
memsql = "3.2.0"
|
||||
mysqlclient = "2.1.1"
|
||||
nzalchemy = "^11.0.2"
|
||||
nzpy = ">=1.15"
|
||||
oauth2client = "4.1.3"
|
||||
openpyxl = "3.0.7"
|
||||
oracledb = "1.4.0"
|
||||
oracledb = "2.1.2"
|
||||
pandas = "1.3.4"
|
||||
phoenixdb = "0.7"
|
||||
pinotdb = ">=0.4.5"
|
||||
@@ -121,12 +121,11 @@ pydruid = "0.5.7"
|
||||
pyexasol = "0.12.0"
|
||||
pyhive = "0.6.1"
|
||||
pyignite = "0.6.1"
|
||||
pymongo = { version = "4.3.3", extras = ["srv", "tls"] }
|
||||
pymongo = { version = "4.6.3", extras = ["srv", "tls"] }
|
||||
pymssql = "2.2.8"
|
||||
pyodbc = "4.0.28"
|
||||
python-arango = "6.1.0"
|
||||
python-rapidjson = "1.1.0"
|
||||
qds-sdk = ">=1.9.6"
|
||||
requests-aws-sign = "0.1.5"
|
||||
sasl = ">=0.1.3"
|
||||
simple-salesforce = "0.74.3"
|
||||
@@ -152,7 +151,7 @@ optional = true
|
||||
pytest = "7.4.0"
|
||||
coverage = "7.2.7"
|
||||
freezegun = "1.2.1"
|
||||
jwcrypto = "1.5.0"
|
||||
jwcrypto = "1.5.6"
|
||||
mock = "5.0.2"
|
||||
pre-commit = "3.3.3"
|
||||
ptpython = "3.0.23"
|
||||
@@ -168,7 +167,7 @@ build-backend = "poetry.core.masonry.api"
|
||||
[tool.ruff]
|
||||
exclude = [".git", "viz-lib", "node_modules", "migrations"]
|
||||
ignore = ["E501"]
|
||||
select = ["C9", "E", "F", "W", "I001"]
|
||||
select = ["C9", "E", "F", "W", "I001", "UP004"]
|
||||
|
||||
[tool.ruff.mccabe]
|
||||
max-complexity = 15
|
||||
|
||||
@@ -14,7 +14,7 @@ from redash.app import create_app # noqa
|
||||
from redash.destinations import import_destinations
|
||||
from redash.query_runner import import_query_runners
|
||||
|
||||
__version__ = "23.12.0-dev"
|
||||
__version__ = "24.05.0-dev"
|
||||
|
||||
|
||||
if os.environ.get("REMOTE_DEBUG"):
|
||||
|
||||
@@ -36,14 +36,10 @@ def create_app():
|
||||
from .metrics import request as request_metrics
|
||||
from .models import db, users
|
||||
from .utils import sentry
|
||||
from .version_check import reset_new_version_status
|
||||
|
||||
sentry.init()
|
||||
app = Redash()
|
||||
|
||||
# Check and update the cached version for use by the client
|
||||
reset_new_version_status()
|
||||
|
||||
security.init_app(app)
|
||||
request_metrics.init_app(app)
|
||||
db.init_app(app)
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import json
|
||||
import logging
|
||||
|
||||
import jwt
|
||||
import requests
|
||||
import simplejson
|
||||
|
||||
logger = logging.getLogger("jwt_auth")
|
||||
|
||||
@@ -25,7 +25,7 @@ def get_public_key_from_net(url):
|
||||
if "keys" in data:
|
||||
public_keys = []
|
||||
for key_dict in data["keys"]:
|
||||
public_key = jwt.algorithms.RSAAlgorithm.from_jwk(simplejson.dumps(key_dict))
|
||||
public_key = jwt.algorithms.RSAAlgorithm.from_jwk(json.dumps(key_dict))
|
||||
public_keys.append(public_key)
|
||||
|
||||
get_public_keys.key_cache[url] = public_keys
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import json
|
||||
|
||||
import click
|
||||
import simplejson
|
||||
from flask import current_app
|
||||
from flask.cli import FlaskGroup, run_command, with_appcontext
|
||||
from rq import Connection
|
||||
@@ -53,7 +54,7 @@ def version():
|
||||
@manager.command()
|
||||
def status():
|
||||
with Connection(rq_redis_connection):
|
||||
print(simplejson.dumps(get_status(), indent=2))
|
||||
print(json.dumps(get_status(), indent=2))
|
||||
|
||||
|
||||
@manager.command()
|
||||
|
||||
@@ -5,7 +5,7 @@ logger = logging.getLogger(__name__)
|
||||
__all__ = ["BaseDestination", "register", "get_destination", "import_destinations"]
|
||||
|
||||
|
||||
class BaseDestination(object):
|
||||
class BaseDestination:
|
||||
deprecated = False
|
||||
|
||||
def __init__(self, configuration):
|
||||
|
||||
@@ -42,8 +42,8 @@ class Discord(BaseDestination):
|
||||
"inline": True,
|
||||
},
|
||||
]
|
||||
if alert.options.get("custom_body"):
|
||||
fields.append({"name": "Description", "value": alert.options["custom_body"]})
|
||||
if alert.custom_body:
|
||||
fields.append({"name": "Description", "value": alert.custom_body})
|
||||
if new_state == Alert.TRIGGERED_STATE:
|
||||
if alert.options.get("custom_subject"):
|
||||
text = alert.options["custom_subject"]
|
||||
|
||||
@@ -26,13 +26,13 @@ class Slack(BaseDestination):
|
||||
fields = [
|
||||
{
|
||||
"title": "Query",
|
||||
"type": "mrkdwn",
|
||||
"value": "{host}/queries/{query_id}".format(host=host, query_id=query.id),
|
||||
"short": True,
|
||||
},
|
||||
{
|
||||
"title": "Alert",
|
||||
"type": "mrkdwn",
|
||||
"value": "{host}/alerts/{alert_id}".format(host=host, alert_id=alert.id),
|
||||
"short": True,
|
||||
},
|
||||
]
|
||||
if alert.custom_body:
|
||||
@@ -50,7 +50,7 @@ class Slack(BaseDestination):
|
||||
payload = {"attachments": [{"text": text, "color": color, "fields": fields}]}
|
||||
|
||||
try:
|
||||
resp = requests.post(options.get("url"), data=json_dumps(payload), timeout=5.0)
|
||||
resp = requests.post(options.get("url"), data=json_dumps(payload).encode("utf-8"), timeout=5.0)
|
||||
logging.warning(resp.text)
|
||||
if resp.status_code != 200:
|
||||
logging.error("Slack send ERROR. status_code => {status}".format(status=resp.status_code))
|
||||
|
||||
@@ -236,11 +236,11 @@ api.add_org_resource(
|
||||
)
|
||||
api.add_org_resource(
|
||||
QueryResultResource,
|
||||
"/api/query_results/<query_result_id>.<filetype>",
|
||||
"/api/query_results/<query_result_id>",
|
||||
"/api/query_results/<result_id>.<filetype>",
|
||||
"/api/query_results/<result_id>",
|
||||
"/api/queries/<query_id>/results",
|
||||
"/api/queries/<query_id>/results.<filetype>",
|
||||
"/api/queries/<query_id>/results/<query_result_id>.<filetype>",
|
||||
"/api/queries/<query_id>/results/<result_id>.<filetype>",
|
||||
endpoint="query_result",
|
||||
)
|
||||
api.add_org_resource(
|
||||
|
||||
@@ -15,7 +15,6 @@ from redash.authentication.account import (
|
||||
)
|
||||
from redash.handlers import routes
|
||||
from redash.handlers.base import json_response, org_scoped_rule
|
||||
from redash.version_check import get_latest_version
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -256,15 +255,11 @@ def number_format_config():
|
||||
|
||||
def client_config():
|
||||
if not current_user.is_api_user() and current_user.is_authenticated:
|
||||
client_config = {
|
||||
"newVersionAvailable": bool(get_latest_version()),
|
||||
client_config_inner = {
|
||||
"version": __version__,
|
||||
}
|
||||
else:
|
||||
client_config = {}
|
||||
|
||||
if current_user.has_permission("admin") and current_org.get_setting("beacon_consent") is None:
|
||||
client_config["showBeaconConsentMessage"] = True
|
||||
client_config_inner = {}
|
||||
|
||||
defaults = {
|
||||
"allowScriptsInUserInput": settings.ALLOW_SCRIPTS_IN_USER_INPUT,
|
||||
@@ -284,12 +279,12 @@ def client_config():
|
||||
"tableCellMaxJSONSize": settings.TABLE_CELL_MAX_JSON_SIZE,
|
||||
}
|
||||
|
||||
client_config.update(defaults)
|
||||
client_config.update({"basePath": base_href()})
|
||||
client_config.update(date_time_format_config())
|
||||
client_config.update(number_format_config())
|
||||
client_config_inner.update(defaults)
|
||||
client_config_inner.update({"basePath": base_href()})
|
||||
client_config_inner.update(date_time_format_config())
|
||||
client_config_inner.update(number_format_config())
|
||||
|
||||
return client_config
|
||||
return client_config_inner
|
||||
|
||||
|
||||
def messages():
|
||||
|
||||
@@ -5,7 +5,7 @@ from flask import Blueprint, current_app, request
|
||||
from flask_login import current_user, login_required
|
||||
from flask_restful import Resource, abort
|
||||
from sqlalchemy import cast
|
||||
from sqlalchemy.dialects import postgresql
|
||||
from sqlalchemy.dialects.postgresql import ARRAY
|
||||
from sqlalchemy.orm.exc import NoResultFound
|
||||
from sqlalchemy_utils.functions import sort_query
|
||||
|
||||
@@ -114,7 +114,7 @@ def json_response(response):
|
||||
def filter_by_tags(result_set, column):
|
||||
if request.args.getlist("tags"):
|
||||
tags = request.args.getlist("tags")
|
||||
result_set = result_set.filter(cast(column, postgresql.ARRAY(db.Text)).contains(tags))
|
||||
result_set = result_set.filter(cast(column, ARRAY(db.Text)).contains(tags))
|
||||
return result_set
|
||||
|
||||
|
||||
|
||||
@@ -96,7 +96,7 @@ class DashboardListResource(BaseResource):
|
||||
org=self.current_org,
|
||||
user=self.current_user,
|
||||
is_draft=True,
|
||||
layout="[]",
|
||||
layout=[],
|
||||
)
|
||||
models.db.session.add(dashboard)
|
||||
models.db.session.commit()
|
||||
|
||||
@@ -5,6 +5,7 @@ import regex
|
||||
from flask import make_response, request
|
||||
from flask_login import current_user
|
||||
from flask_restful import abort
|
||||
from rq.job import JobStatus
|
||||
|
||||
from redash import models, settings
|
||||
from redash.handlers.base import BaseResource, get_object_or_404, record_event
|
||||
@@ -38,7 +39,7 @@ from redash.utils import (
|
||||
|
||||
|
||||
def error_response(message, http_status=400):
|
||||
return {"job": {"status": 4, "error": message}}, http_status
|
||||
return {"job": {"status": JobStatus.FAILED, "error": message}}, http_status
|
||||
|
||||
|
||||
error_messages = {
|
||||
@@ -225,7 +226,7 @@ class QueryResultResource(BaseResource):
|
||||
headers["Access-Control-Allow-Credentials"] = str(settings.ACCESS_CONTROL_ALLOW_CREDENTIALS).lower()
|
||||
|
||||
@require_any_of_permission(("view_query", "execute_query"))
|
||||
def options(self, query_id=None, query_result_id=None, filetype="json"):
|
||||
def options(self, query_id=None, result_id=None, filetype="json"):
|
||||
headers = {}
|
||||
self.add_cors_headers(headers)
|
||||
|
||||
@@ -285,12 +286,12 @@ class QueryResultResource(BaseResource):
|
||||
return error_messages["no_permission"]
|
||||
|
||||
@require_any_of_permission(("view_query", "execute_query"))
|
||||
def get(self, query_id=None, query_result_id=None, filetype="json"):
|
||||
def get(self, query_id=None, result_id=None, filetype="json"):
|
||||
"""
|
||||
Retrieve query results.
|
||||
|
||||
:param number query_id: The ID of the query whose results should be fetched
|
||||
:param number query_result_id: the ID of the query result to fetch
|
||||
:param number result_id: the ID of the query result to fetch
|
||||
:param string filetype: Format to return. One of 'json', 'xlsx', or 'csv'. Defaults to 'json'.
|
||||
|
||||
:<json number id: Query result ID
|
||||
@@ -305,13 +306,13 @@ class QueryResultResource(BaseResource):
|
||||
# This method handles two cases: retrieving result by id & retrieving result by query id.
|
||||
# They need to be split, as they have different logic (for example, retrieving by query id
|
||||
# should check for query parameters and shouldn't cache the result).
|
||||
should_cache = query_result_id is not None
|
||||
should_cache = result_id is not None
|
||||
|
||||
query_result = None
|
||||
query = None
|
||||
|
||||
if query_result_id:
|
||||
query_result = get_object_or_404(models.QueryResult.get_by_id_and_org, query_result_id, self.current_org)
|
||||
if result_id:
|
||||
query_result = get_object_or_404(models.QueryResult.get_by_id_and_org, result_id, self.current_org)
|
||||
|
||||
if query_id is not None:
|
||||
query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
|
||||
@@ -346,7 +347,7 @@ class QueryResultResource(BaseResource):
|
||||
event["object_id"] = query_id
|
||||
else:
|
||||
event["object_type"] = "query_result"
|
||||
event["object_id"] = query_result_id
|
||||
event["object_id"] = result_id
|
||||
|
||||
self.record_event(event)
|
||||
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
from flask import g, redirect, render_template, request, url_for
|
||||
from flask_login import login_user
|
||||
from wtforms import BooleanField, Form, PasswordField, StringField, validators
|
||||
from wtforms import Form, PasswordField, StringField, validators
|
||||
from wtforms.fields.html5 import EmailField
|
||||
|
||||
from redash import settings
|
||||
from redash.authentication.org_resolving import current_org
|
||||
from redash.handlers.base import routes
|
||||
from redash.models import Group, Organization, User, db
|
||||
from redash.tasks.general import subscribe
|
||||
|
||||
|
||||
class SetupForm(Form):
|
||||
@@ -15,8 +14,6 @@ class SetupForm(Form):
|
||||
email = EmailField("Email Address", validators=[validators.Email()])
|
||||
password = PasswordField("Password", validators=[validators.Length(6)])
|
||||
org_name = StringField("Organization Name", validators=[validators.InputRequired()])
|
||||
security_notifications = BooleanField()
|
||||
newsletter = BooleanField()
|
||||
|
||||
|
||||
def create_org(org_name, user_name, email, password):
|
||||
@@ -57,8 +54,6 @@ def setup():
|
||||
return redirect("/")
|
||||
|
||||
form = SetupForm(request.form)
|
||||
form.newsletter.data = True
|
||||
form.security_notifications.data = True
|
||||
|
||||
if request.method == "POST" and form.validate():
|
||||
default_org, user = create_org(form.org_name.data, form.name.data, form.email.data, form.password.data)
|
||||
@@ -66,10 +61,6 @@ def setup():
|
||||
g.org = default_org
|
||||
login_user(user)
|
||||
|
||||
# signup to newsletter if needed
|
||||
if form.newsletter.data or form.security_notifications:
|
||||
subscribe.delay(form.data)
|
||||
|
||||
return redirect(url_for("redash.index", org_slug=None))
|
||||
|
||||
return render_template("setup.html", form=form)
|
||||
|
||||
@@ -7,7 +7,6 @@ from redash.permissions import (
|
||||
require_permission,
|
||||
)
|
||||
from redash.serializers import serialize_visualization
|
||||
from redash.utils import json_dumps
|
||||
|
||||
|
||||
class VisualizationListResource(BaseResource):
|
||||
@@ -18,7 +17,6 @@ class VisualizationListResource(BaseResource):
|
||||
query = get_object_or_404(models.Query.get_by_id_and_org, kwargs.pop("query_id"), self.current_org)
|
||||
require_object_modify_permission(query, self.current_user)
|
||||
|
||||
kwargs["options"] = json_dumps(kwargs["options"])
|
||||
kwargs["query_rel"] = query
|
||||
|
||||
vis = models.Visualization(**kwargs)
|
||||
@@ -34,8 +32,6 @@ class VisualizationResource(BaseResource):
|
||||
require_object_modify_permission(vis.query_rel, self.current_user)
|
||||
|
||||
kwargs = request.get_json(force=True)
|
||||
if "options" in kwargs:
|
||||
kwargs["options"] = json_dumps(kwargs["options"])
|
||||
|
||||
kwargs.pop("id", None)
|
||||
kwargs.pop("query_id", None)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import json
|
||||
import os
|
||||
|
||||
import simplejson
|
||||
from flask import url_for
|
||||
|
||||
WEBPACK_MANIFEST_PATH = os.path.join(os.path.dirname(__file__), "../../client/dist/", "asset-manifest.json")
|
||||
@@ -15,7 +15,7 @@ def configure_webpack(app):
|
||||
if assets is None or app.debug:
|
||||
try:
|
||||
with open(WEBPACK_MANIFEST_PATH) as fp:
|
||||
assets = simplejson.load(fp)
|
||||
assets = json.load(fp)
|
||||
except IOError:
|
||||
app.logger.exception("Unable to load webpack manifest")
|
||||
assets = {}
|
||||
|
||||
@@ -9,7 +9,6 @@ from redash.permissions import (
|
||||
view_only,
|
||||
)
|
||||
from redash.serializers import serialize_widget
|
||||
from redash.utils import json_dumps
|
||||
|
||||
|
||||
class WidgetListResource(BaseResource):
|
||||
@@ -30,7 +29,6 @@ class WidgetListResource(BaseResource):
|
||||
dashboard = models.Dashboard.get_by_id_and_org(widget_properties.get("dashboard_id"), self.current_org)
|
||||
require_object_modify_permission(dashboard, self.current_user)
|
||||
|
||||
widget_properties["options"] = json_dumps(widget_properties["options"])
|
||||
widget_properties.pop("id", None)
|
||||
|
||||
visualization_id = widget_properties.pop("visualization_id")
|
||||
@@ -44,7 +42,6 @@ class WidgetListResource(BaseResource):
|
||||
|
||||
widget = models.Widget(**widget_properties)
|
||||
models.db.session.add(widget)
|
||||
models.db.session.commit()
|
||||
|
||||
models.db.session.commit()
|
||||
return serialize_widget(widget)
|
||||
@@ -65,7 +62,7 @@ class WidgetResource(BaseResource):
|
||||
require_object_modify_permission(widget.dashboard, self.current_user)
|
||||
widget_properties = request.get_json(force=True)
|
||||
widget.text = widget_properties["text"]
|
||||
widget.options = json_dumps(widget_properties["options"])
|
||||
widget.options = widget_properties["options"]
|
||||
models.db.session.commit()
|
||||
return serialize_widget(widget)
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ import time
|
||||
|
||||
import pytz
|
||||
from sqlalchemy import UniqueConstraint, and_, cast, distinct, func, or_
|
||||
from sqlalchemy.dialects import postgresql
|
||||
from sqlalchemy.dialects.postgresql import ARRAY, DOUBLE_PRECISION, JSONB
|
||||
from sqlalchemy.event import listens_for
|
||||
from sqlalchemy.ext.hybrid import hybrid_property
|
||||
from sqlalchemy.orm import (
|
||||
@@ -40,14 +40,17 @@ from redash.models.base import (
|
||||
from redash.models.changes import Change, ChangeTrackingMixin # noqa
|
||||
from redash.models.mixins import BelongsToOrgMixin, TimestampMixin
|
||||
from redash.models.organizations import Organization
|
||||
from redash.models.parameterized_query import ParameterizedQuery
|
||||
from redash.models.parameterized_query import (
|
||||
InvalidParameterError,
|
||||
ParameterizedQuery,
|
||||
QueryDetachedFromDataSourceError,
|
||||
)
|
||||
from redash.models.types import (
|
||||
Configuration,
|
||||
EncryptedConfiguration,
|
||||
JSONText,
|
||||
MutableDict,
|
||||
MutableList,
|
||||
PseudoJSON,
|
||||
pseudo_json_cast_property,
|
||||
json_cast_property,
|
||||
)
|
||||
from redash.models.users import ( # noqa
|
||||
AccessPermission,
|
||||
@@ -80,7 +83,7 @@ from redash.utils.configuration import ConfigurationContainer
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ScheduledQueriesExecutions(object):
|
||||
class ScheduledQueriesExecutions:
|
||||
KEY_NAME = "sq:executed_at"
|
||||
|
||||
def __init__(self):
|
||||
@@ -123,7 +126,10 @@ class DataSource(BelongsToOrgMixin, db.Model):
|
||||
|
||||
data_source_groups = db.relationship("DataSourceGroup", back_populates="data_source", cascade="all")
|
||||
__tablename__ = "data_sources"
|
||||
__table_args__ = (db.Index("data_sources_org_id_name", "org_id", "name"),)
|
||||
__table_args__ = (
|
||||
db.Index("data_sources_org_id_name", "org_id", "name"),
|
||||
{"extend_existing": True},
|
||||
)
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.id == other.id
|
||||
@@ -221,7 +227,16 @@ class DataSource(BelongsToOrgMixin, db.Model):
|
||||
|
||||
def _sort_schema(self, schema):
|
||||
return [
|
||||
{"name": i["name"], "columns": sorted(i["columns"], key=lambda x: x["name"] if isinstance(x, dict) else x)}
|
||||
{
|
||||
"name": i["name"],
|
||||
"description": i.get("description"),
|
||||
"columns": sorted(
|
||||
i["columns"],
|
||||
key=lambda col: (
|
||||
("partition" in col["type"], col.get("idx", 0), col["name"]) if isinstance(col, dict) else col
|
||||
),
|
||||
),
|
||||
}
|
||||
for i in sorted(schema, key=lambda x: x["name"])
|
||||
]
|
||||
|
||||
@@ -297,34 +312,11 @@ class DataSourceGroup(db.Model):
|
||||
view_only = Column(db.Boolean, default=False)
|
||||
|
||||
__tablename__ = "data_source_groups"
|
||||
|
||||
|
||||
DESERIALIZED_DATA_ATTR = "_deserialized_data"
|
||||
|
||||
|
||||
class DBPersistence(object):
|
||||
@property
|
||||
def data(self):
|
||||
if self._data is None:
|
||||
return None
|
||||
|
||||
if not hasattr(self, DESERIALIZED_DATA_ATTR):
|
||||
setattr(self, DESERIALIZED_DATA_ATTR, json_loads(self._data))
|
||||
|
||||
return self._deserialized_data
|
||||
|
||||
@data.setter
|
||||
def data(self, data):
|
||||
if hasattr(self, DESERIALIZED_DATA_ATTR):
|
||||
delattr(self, DESERIALIZED_DATA_ATTR)
|
||||
self._data = data
|
||||
|
||||
|
||||
QueryResultPersistence = settings.dynamic_settings.QueryResultPersistence or DBPersistence
|
||||
__table_args__ = ({"extend_existing": True},)
|
||||
|
||||
|
||||
@generic_repr("id", "org_id", "data_source_id", "query_hash", "runtime", "retrieved_at")
|
||||
class QueryResult(db.Model, QueryResultPersistence, BelongsToOrgMixin):
|
||||
class QueryResult(db.Model, BelongsToOrgMixin):
|
||||
id = primary_key("QueryResult")
|
||||
org_id = Column(key_type("Organization"), db.ForeignKey("organizations.id"))
|
||||
org = db.relationship(Organization)
|
||||
@@ -332,8 +324,8 @@ class QueryResult(db.Model, QueryResultPersistence, BelongsToOrgMixin):
|
||||
data_source = db.relationship(DataSource, backref=backref("query_results"))
|
||||
query_hash = Column(db.String(32), index=True)
|
||||
query_text = Column("query", db.Text)
|
||||
_data = Column("data", db.Text)
|
||||
runtime = Column(postgresql.DOUBLE_PRECISION)
|
||||
data = Column(JSONText, nullable=True)
|
||||
runtime = Column(DOUBLE_PRECISION)
|
||||
retrieved_at = Column(db.DateTime(True))
|
||||
|
||||
__tablename__ = "query_results"
|
||||
@@ -474,11 +466,11 @@ class Query(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):
|
||||
last_modified_by = db.relationship(User, backref="modified_queries", foreign_keys=[last_modified_by_id])
|
||||
is_archived = Column(db.Boolean, default=False, index=True)
|
||||
is_draft = Column(db.Boolean, default=True, index=True)
|
||||
schedule = Column(MutableDict.as_mutable(PseudoJSON), nullable=True)
|
||||
interval = pseudo_json_cast_property(db.Integer, "schedule", "interval", default=0)
|
||||
schedule = Column(MutableDict.as_mutable(JSONB), nullable=True)
|
||||
interval = json_cast_property(db.Integer, "schedule", "interval", default=0)
|
||||
schedule_failures = Column(db.Integer, default=0)
|
||||
visualizations = db.relationship("Visualization", cascade="all, delete-orphan")
|
||||
options = Column(MutableDict.as_mutable(PseudoJSON), default={})
|
||||
options = Column(MutableDict.as_mutable(JSONB), default={})
|
||||
search_vector = Column(
|
||||
TSVectorType(
|
||||
"id",
|
||||
@@ -489,7 +481,7 @@ class Query(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):
|
||||
),
|
||||
nullable=True,
|
||||
)
|
||||
tags = Column("tags", MutableList.as_mutable(postgresql.ARRAY(db.Unicode)), nullable=True)
|
||||
tags = Column("tags", MutableList.as_mutable(ARRAY(db.Unicode)), nullable=True)
|
||||
|
||||
query_class = SearchBaseQuery
|
||||
__tablename__ = "queries"
|
||||
@@ -525,7 +517,7 @@ class Query(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):
|
||||
name="Table",
|
||||
description="",
|
||||
type="TABLE",
|
||||
options="{}",
|
||||
options={},
|
||||
)
|
||||
)
|
||||
return query
|
||||
@@ -591,11 +583,12 @@ class Query(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):
|
||||
@classmethod
|
||||
def past_scheduled_queries(cls):
|
||||
now = utils.utcnow()
|
||||
queries = Query.query.filter(Query.schedule.isnot(None)).order_by(Query.id)
|
||||
queries = Query.query.filter(func.jsonb_typeof(Query.schedule) != "null").order_by(Query.id)
|
||||
return [
|
||||
query
|
||||
for query in queries
|
||||
if query.schedule["until"] is not None
|
||||
if "until" in query.schedule
|
||||
and query.schedule["until"] is not None
|
||||
and pytz.utc.localize(datetime.datetime.strptime(query.schedule["until"], "%Y-%m-%d")) <= now
|
||||
]
|
||||
|
||||
@@ -603,7 +596,7 @@ class Query(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):
|
||||
def outdated_queries(cls):
|
||||
queries = (
|
||||
Query.query.options(joinedload(Query.latest_query_data).load_only("retrieved_at"))
|
||||
.filter(Query.schedule.isnot(None))
|
||||
.filter(func.jsonb_typeof(Query.schedule) != "null")
|
||||
.order_by(Query.id)
|
||||
.all()
|
||||
)
|
||||
@@ -831,7 +824,20 @@ class Query(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):
|
||||
def update_query_hash(self):
|
||||
should_apply_auto_limit = self.options.get("apply_auto_limit", False) if self.options else False
|
||||
query_runner = self.data_source.query_runner if self.data_source else BaseQueryRunner({})
|
||||
self.query_hash = query_runner.gen_query_hash(self.query_text, should_apply_auto_limit)
|
||||
query_text = self.query_text
|
||||
|
||||
parameters_dict = {p["name"]: p.get("value") for p in self.parameters} if self.options else {}
|
||||
if any(parameters_dict):
|
||||
try:
|
||||
query_text = self.parameterized.apply(parameters_dict).query
|
||||
except InvalidParameterError as e:
|
||||
logging.info(f"Unable to update hash for query {self.id} because of invalid parameters: {str(e)}")
|
||||
except QueryDetachedFromDataSourceError as e:
|
||||
logging.info(
|
||||
f"Unable to update hash for query {self.id} because of dropdown query {e.query_id} is unattached from datasource"
|
||||
)
|
||||
|
||||
self.query_hash = query_runner.gen_query_hash(query_text, should_apply_auto_limit)
|
||||
|
||||
|
||||
@listens_for(Query, "before_insert")
|
||||
@@ -936,7 +942,7 @@ class Alert(TimestampMixin, BelongsToOrgMixin, db.Model):
|
||||
query_rel = db.relationship(Query, backref=backref("alerts", cascade="all"))
|
||||
user_id = Column(key_type("User"), db.ForeignKey("users.id"))
|
||||
user = db.relationship(User, backref="alerts")
|
||||
options = Column(MutableDict.as_mutable(PseudoJSON))
|
||||
options = Column(MutableDict.as_mutable(JSONB), nullable=True)
|
||||
state = Column(db.String(255), default=UNKNOWN_STATE)
|
||||
subscriptions = db.relationship("AlertSubscription", cascade="all, delete-orphan")
|
||||
last_triggered_at = Column(db.DateTime(True), nullable=True)
|
||||
@@ -1047,13 +1053,13 @@ class Dashboard(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model
|
||||
user_id = Column(key_type("User"), db.ForeignKey("users.id"))
|
||||
user = db.relationship(User)
|
||||
# layout is no longer used, but kept so we know how to render old dashboards.
|
||||
layout = Column(db.Text)
|
||||
layout = Column(MutableList.as_mutable(JSONB), default=[])
|
||||
dashboard_filters_enabled = Column(db.Boolean, default=False)
|
||||
is_archived = Column(db.Boolean, default=False, index=True)
|
||||
is_draft = Column(db.Boolean, default=True, index=True)
|
||||
widgets = db.relationship("Widget", backref="dashboard", lazy="dynamic")
|
||||
tags = Column("tags", MutableList.as_mutable(postgresql.ARRAY(db.Unicode)), nullable=True)
|
||||
options = Column(MutableDict.as_mutable(postgresql.JSON), server_default="{}", default={})
|
||||
tags = Column("tags", MutableList.as_mutable(ARRAY(db.Unicode)), nullable=True)
|
||||
options = Column(MutableDict.as_mutable(JSONB), default={})
|
||||
|
||||
__tablename__ = "dashboards"
|
||||
__mapper_args__ = {"version_id_col": version}
|
||||
@@ -1166,7 +1172,7 @@ class Visualization(TimestampMixin, BelongsToOrgMixin, db.Model):
|
||||
query_rel = db.relationship(Query, back_populates="visualizations")
|
||||
name = Column(db.String(255))
|
||||
description = Column(db.String(4096), nullable=True)
|
||||
options = Column(db.Text)
|
||||
options = Column(MutableDict.as_mutable(JSONB), nullable=True)
|
||||
|
||||
__tablename__ = "visualizations"
|
||||
|
||||
@@ -1193,7 +1199,7 @@ class Widget(TimestampMixin, BelongsToOrgMixin, db.Model):
|
||||
visualization = db.relationship(Visualization, backref=backref("widgets", cascade="delete"))
|
||||
text = Column(db.Text, nullable=True)
|
||||
width = Column(db.Integer)
|
||||
options = Column(db.Text)
|
||||
options = Column(MutableDict.as_mutable(JSONB), default={})
|
||||
dashboard_id = Column(key_type("Dashboard"), db.ForeignKey("dashboards.id"), index=True)
|
||||
|
||||
__tablename__ = "widgets"
|
||||
@@ -1225,7 +1231,7 @@ class Event(db.Model):
|
||||
action = Column(db.String(255))
|
||||
object_type = Column(db.String(255))
|
||||
object_id = Column(db.String(255), nullable=True)
|
||||
additional_properties = Column(MutableDict.as_mutable(PseudoJSON), nullable=True, default={})
|
||||
additional_properties = Column(MutableDict.as_mutable(JSONB), nullable=True, default={})
|
||||
created_at = Column(db.DateTime(True), default=db.func.now())
|
||||
|
||||
__tablename__ = "events"
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
import functools
|
||||
|
||||
from flask_sqlalchemy import BaseQuery, SQLAlchemy
|
||||
from sqlalchemy.dialects import postgresql
|
||||
from sqlalchemy.dialects.postgresql import UUID
|
||||
from sqlalchemy.orm import object_session
|
||||
from sqlalchemy.pool import NullPool
|
||||
from sqlalchemy_searchable import SearchQueryMixin, make_searchable, vectorizer
|
||||
|
||||
from redash import settings
|
||||
from redash.utils import json_dumps
|
||||
from redash.utils import json_dumps, json_loads
|
||||
|
||||
|
||||
class RedashSQLAlchemy(SQLAlchemy):
|
||||
@@ -28,7 +28,10 @@ class RedashSQLAlchemy(SQLAlchemy):
|
||||
return options
|
||||
|
||||
|
||||
db = RedashSQLAlchemy(session_options={"expire_on_commit": False})
|
||||
db = RedashSQLAlchemy(
|
||||
session_options={"expire_on_commit": False},
|
||||
engine_options={"json_serializer": json_dumps, "json_deserializer": json_loads},
|
||||
)
|
||||
# Make sure the SQLAlchemy mappers are all properly configured first.
|
||||
# This is required by SQLAlchemy-Searchable as it adds DDL listeners
|
||||
# on the configuration phase of models.
|
||||
@@ -50,7 +53,7 @@ def integer_vectorizer(column):
|
||||
return db.func.cast(column, db.Text)
|
||||
|
||||
|
||||
@vectorizer(postgresql.UUID)
|
||||
@vectorizer(UUID)
|
||||
def uuid_vectorizer(column):
|
||||
return db.func.cast(column, db.Text)
|
||||
|
||||
@@ -68,7 +71,7 @@ def gfk_type(cls):
|
||||
return cls
|
||||
|
||||
|
||||
class GFKBase(object):
|
||||
class GFKBase:
|
||||
"""
|
||||
Compatibility with 'generic foreign key' approach Peewee used.
|
||||
"""
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
from sqlalchemy.dialects.postgresql import JSONB
|
||||
from sqlalchemy.inspection import inspect
|
||||
from sqlalchemy_utils.models import generic_repr
|
||||
|
||||
from .base import Column, GFKBase, db, key_type, primary_key
|
||||
from .types import PseudoJSON
|
||||
|
||||
|
||||
@generic_repr("id", "object_type", "object_id", "created_at")
|
||||
@@ -13,7 +13,7 @@ class Change(GFKBase, db.Model):
|
||||
object_version = Column(db.Integer, default=0)
|
||||
user_id = Column(key_type("User"), db.ForeignKey("users.id"))
|
||||
user = db.relationship("User", backref="changes")
|
||||
change = Column(PseudoJSON)
|
||||
change = Column(JSONB)
|
||||
created_at = Column(db.DateTime(True), default=db.func.now())
|
||||
|
||||
__tablename__ = "changes"
|
||||
@@ -45,7 +45,7 @@ class Change(GFKBase, db.Model):
|
||||
)
|
||||
|
||||
|
||||
class ChangeTrackingMixin(object):
|
||||
class ChangeTrackingMixin:
|
||||
skipped_fields = ("id", "created_at", "updated_at", "version")
|
||||
_clean_values = None
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ from sqlalchemy.event import listens_for
|
||||
from .base import Column, db
|
||||
|
||||
|
||||
class TimestampMixin(object):
|
||||
class TimestampMixin:
|
||||
updated_at = Column(db.DateTime(True), default=db.func.now(), nullable=False)
|
||||
created_at = Column(db.DateTime(True), default=db.func.now(), nullable=False)
|
||||
|
||||
@@ -17,7 +17,7 @@ def timestamp_before_update(mapper, connection, target):
|
||||
target.updated_at = db.func.now()
|
||||
|
||||
|
||||
class BelongsToOrgMixin(object):
|
||||
class BelongsToOrgMixin:
|
||||
@classmethod
|
||||
def get_by_id_and_org(cls, object_id, org, org_cls=None):
|
||||
query = cls.query.filter(cls.id == object_id)
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
from sqlalchemy.dialects.postgresql import JSONB
|
||||
from sqlalchemy.orm.attributes import flag_modified
|
||||
from sqlalchemy_utils.models import generic_repr
|
||||
|
||||
@@ -5,7 +6,7 @@ from redash.settings.organization import settings as org_settings
|
||||
|
||||
from .base import Column, db, primary_key
|
||||
from .mixins import TimestampMixin
|
||||
from .types import MutableDict, PseudoJSON
|
||||
from .types import MutableDict
|
||||
from .users import Group, User
|
||||
|
||||
|
||||
@@ -17,7 +18,7 @@ class Organization(TimestampMixin, db.Model):
|
||||
id = primary_key("Organization")
|
||||
name = Column(db.String(255))
|
||||
slug = Column(db.String(255), unique=True)
|
||||
settings = Column(MutableDict.as_mutable(PseudoJSON))
|
||||
settings = Column(MutableDict.as_mutable(JSONB), default={})
|
||||
groups = db.relationship("Group", lazy="dynamic")
|
||||
events = db.relationship("Event", lazy="dynamic", order_by="desc(Event.created_at)")
|
||||
|
||||
|
||||
@@ -103,7 +103,7 @@ def _is_value_within_options(value, dropdown_options, allow_list=False):
|
||||
return str(value) in dropdown_options
|
||||
|
||||
|
||||
class ParameterizedQuery(object):
|
||||
class ParameterizedQuery:
|
||||
def __init__(self, template, schema=None, org=None):
|
||||
self.schema = schema or []
|
||||
self.org = org
|
||||
|
||||
@@ -1,25 +1,12 @@
|
||||
from sqlalchemy import cast
|
||||
from sqlalchemy.dialects.postgresql import JSON
|
||||
from sqlalchemy.ext.indexable import index_property
|
||||
from sqlalchemy.ext.mutable import Mutable
|
||||
from sqlalchemy.types import TypeDecorator
|
||||
from sqlalchemy_utils import EncryptedType
|
||||
|
||||
from redash.models.base import db
|
||||
from redash.utils import json_dumps, json_loads
|
||||
from redash.utils.configuration import ConfigurationContainer
|
||||
|
||||
from .base import db
|
||||
|
||||
|
||||
class Configuration(TypeDecorator):
|
||||
impl = db.Text
|
||||
|
||||
def process_bind_param(self, value, dialect):
|
||||
return value.to_json()
|
||||
|
||||
def process_result_value(self, value, dialect):
|
||||
return ConfigurationContainer.from_json(value)
|
||||
|
||||
|
||||
class EncryptedConfiguration(EncryptedType):
|
||||
def process_bind_param(self, value, dialect):
|
||||
@@ -31,8 +18,8 @@ class EncryptedConfiguration(EncryptedType):
|
||||
)
|
||||
|
||||
|
||||
# XXX replace PseudoJSON and MutableDict with real JSON field
|
||||
class PseudoJSON(TypeDecorator):
|
||||
# Utilized for cases when JSON size is bigger than JSONB (255MB) or JSON (10MB) limit
|
||||
class JSONText(TypeDecorator):
|
||||
impl = db.Text
|
||||
|
||||
def process_bind_param(self, value, dialect):
|
||||
@@ -107,19 +94,3 @@ class json_cast_property(index_property):
|
||||
def expr(self, model):
|
||||
expr = super(json_cast_property, self).expr(model)
|
||||
return expr.astext.cast(self.cast_type)
|
||||
|
||||
|
||||
class pseudo_json_cast_property(index_property):
|
||||
"""
|
||||
A SQLAlchemy index property that is able to cast the
|
||||
entity attribute as the specified cast type. Useful
|
||||
for PseudoJSON colums for easier querying/filtering.
|
||||
"""
|
||||
|
||||
def __init__(self, cast_type, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.cast_type = cast_type
|
||||
|
||||
def expr(self, model):
|
||||
expr = cast(getattr(model, self.attr_name), JSON)[self.index]
|
||||
return expr.astext.cast(self.cast_type)
|
||||
|
||||
@@ -8,7 +8,7 @@ from operator import or_
|
||||
from flask import current_app, request_started, url_for
|
||||
from flask_login import AnonymousUserMixin, UserMixin, current_user
|
||||
from passlib.apps import custom_app_context as pwd_context
|
||||
from sqlalchemy.dialects import postgresql
|
||||
from sqlalchemy.dialects.postgresql import ARRAY, JSONB
|
||||
from sqlalchemy_utils import EmailType
|
||||
from sqlalchemy_utils.models import generic_repr
|
||||
|
||||
@@ -60,7 +60,7 @@ def init_app(app):
|
||||
request_started.connect(update_user_active_at, app)
|
||||
|
||||
|
||||
class PermissionsCheckMixin(object):
|
||||
class PermissionsCheckMixin:
|
||||
def has_permission(self, permission):
|
||||
return self.has_permissions((permission,))
|
||||
|
||||
@@ -84,14 +84,14 @@ class User(TimestampMixin, db.Model, BelongsToOrgMixin, UserMixin, PermissionsCh
|
||||
password_hash = Column(db.String(128), nullable=True)
|
||||
group_ids = Column(
|
||||
"groups",
|
||||
MutableList.as_mutable(postgresql.ARRAY(key_type("Group"))),
|
||||
MutableList.as_mutable(ARRAY(key_type("Group"))),
|
||||
nullable=True,
|
||||
)
|
||||
api_key = Column(db.String(40), default=lambda: generate_token(40), unique=True)
|
||||
|
||||
disabled_at = Column(db.DateTime(True), default=None, nullable=True)
|
||||
details = Column(
|
||||
MutableDict.as_mutable(postgresql.JSONB),
|
||||
MutableDict.as_mutable(JSONB),
|
||||
nullable=True,
|
||||
server_default="{}",
|
||||
default={},
|
||||
@@ -267,7 +267,7 @@ class Group(db.Model, BelongsToOrgMixin):
|
||||
org = db.relationship("Organization", back_populates="groups")
|
||||
type = Column(db.String(255), default=REGULAR_GROUP)
|
||||
name = Column(db.String(100))
|
||||
permissions = Column(postgresql.ARRAY(db.String(255)), default=DEFAULT_PERMISSIONS)
|
||||
permissions = Column(ARRAY(db.String(255)), default=DEFAULT_PERMISSIONS)
|
||||
created_at = Column(db.DateTime(True), default=db.func.now())
|
||||
|
||||
__tablename__ = "groups"
|
||||
|
||||
@@ -54,7 +54,7 @@ def require_access(obj, user, need_view_only):
|
||||
abort(403)
|
||||
|
||||
|
||||
class require_permissions(object):
|
||||
class require_permissions:
|
||||
def __init__(self, permissions, allow_one=False):
|
||||
self.permissions = permissions
|
||||
self.allow_one = allow_one
|
||||
|
||||
@@ -9,7 +9,6 @@ from rq.timeouts import JobTimeoutException
|
||||
from sshtunnel import open_tunnel
|
||||
|
||||
from redash import settings, utils
|
||||
from redash.utils import json_loads
|
||||
from redash.utils.requests_session import (
|
||||
UnacceptableAddressException,
|
||||
requests_or_advocate,
|
||||
@@ -114,12 +113,13 @@ class NotSupported(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class BaseQueryRunner(object):
|
||||
class BaseQueryRunner:
|
||||
deprecated = False
|
||||
should_annotate_query = True
|
||||
noop_query = None
|
||||
limit_query = " LIMIT 1000"
|
||||
limit_keywords = ["LIMIT", "OFFSET"]
|
||||
limit_after_select = False
|
||||
|
||||
def __init__(self, configuration):
|
||||
self.syntax = "sql"
|
||||
@@ -243,7 +243,7 @@ class BaseQueryRunner(object):
|
||||
|
||||
if error is not None:
|
||||
raise Exception("Failed running query [%s]." % query)
|
||||
return json_loads(results)["rows"]
|
||||
return results["rows"]
|
||||
|
||||
@classmethod
|
||||
def to_dict(cls):
|
||||
@@ -302,10 +302,19 @@ class BaseSQLQueryRunner(BaseQueryRunner):
|
||||
parsed_query = sqlparse.parse(query)[0]
|
||||
limit_tokens = sqlparse.parse(self.limit_query)[0].tokens
|
||||
length = len(parsed_query.tokens)
|
||||
if parsed_query.tokens[length - 1].ttype == sqlparse.tokens.Punctuation:
|
||||
parsed_query.tokens[length - 1 : length - 1] = limit_tokens
|
||||
if not self.limit_after_select:
|
||||
if parsed_query.tokens[length - 1].ttype == sqlparse.tokens.Punctuation:
|
||||
parsed_query.tokens[length - 1 : length - 1] = limit_tokens
|
||||
else:
|
||||
parsed_query.tokens += limit_tokens
|
||||
else:
|
||||
parsed_query.tokens += limit_tokens
|
||||
for i in range(length - 1, -1, -1):
|
||||
if parsed_query[i].value.upper() == "SELECT":
|
||||
index = parsed_query.token_index(parsed_query[i + 1])
|
||||
parsed_query = sqlparse.sql.Statement(
|
||||
parsed_query.tokens[:index] + limit_tokens + parsed_query.tokens[index:]
|
||||
)
|
||||
break
|
||||
return str(parsed_query)
|
||||
|
||||
def apply_auto_limit(self, query_text, should_apply_auto_limit):
|
||||
|
||||
@@ -63,5 +63,8 @@ class AmazonElasticsearchService(ElasticSearch2):
|
||||
|
||||
self.auth = AWSV4Sign(cred, region, "es")
|
||||
|
||||
def get_auth(self):
|
||||
return self.auth
|
||||
|
||||
|
||||
register(AmazonElasticsearchService)
|
||||
|
||||
@@ -7,7 +7,6 @@ from redash.query_runner import (
|
||||
BaseQueryRunner,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -81,12 +80,11 @@ class Arango(BaseQueryRunner):
|
||||
"rows": result,
|
||||
}
|
||||
|
||||
json_data = json_dumps(data, ignore_nan=True)
|
||||
error = None
|
||||
except Exception:
|
||||
raise
|
||||
|
||||
return json_data, error
|
||||
return data, error
|
||||
|
||||
|
||||
register(Arango)
|
||||
|
||||
@@ -12,7 +12,6 @@ from redash.query_runner import (
|
||||
register,
|
||||
)
|
||||
from redash.settings import parse_boolean
|
||||
from redash.utils import json_dumps, json_loads
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
ANNOTATE_QUERY = parse_boolean(os.environ.get("ATHENA_ANNOTATE_QUERY", "true"))
|
||||
@@ -22,7 +21,9 @@ OPTIONAL_CREDENTIALS = parse_boolean(os.environ.get("ATHENA_OPTIONAL_CREDENTIALS
|
||||
|
||||
try:
|
||||
import boto3
|
||||
import pandas as pd
|
||||
import pyathena
|
||||
from pyathena.pandas_cursor import PandasCursor
|
||||
|
||||
enabled = True
|
||||
except ImportError:
|
||||
@@ -47,7 +48,7 @@ _TYPE_MAPPINGS = {
|
||||
}
|
||||
|
||||
|
||||
class SimpleFormatter(object):
|
||||
class SimpleFormatter:
|
||||
def format(self, operation, parameters=None):
|
||||
return operation
|
||||
|
||||
@@ -189,10 +190,35 @@ class Athena(BaseQueryRunner):
|
||||
logger.warning("Glue table doesn't have StorageDescriptor: %s", table_name)
|
||||
continue
|
||||
if table_name not in schema:
|
||||
column = [columns["Name"] for columns in table["StorageDescriptor"]["Columns"]]
|
||||
schema[table_name] = {"name": table_name, "columns": column}
|
||||
for partition in table.get("PartitionKeys", []):
|
||||
schema[table_name]["columns"].append(partition["Name"])
|
||||
columns = []
|
||||
for cols in table["StorageDescriptor"]["Columns"]:
|
||||
c = {
|
||||
"name": cols["Name"],
|
||||
}
|
||||
if "Type" in cols:
|
||||
c["type"] = cols["Type"]
|
||||
if "Comment" in cols:
|
||||
c["comment"] = cols["Comment"]
|
||||
columns.append(c)
|
||||
|
||||
schema[table_name] = {
|
||||
"name": table_name,
|
||||
"columns": columns,
|
||||
"description": table.get("Description"),
|
||||
}
|
||||
for idx, partition in enumerate(table.get("PartitionKeys", [])):
|
||||
schema[table_name]["columns"].append(
|
||||
{
|
||||
"name": partition["Name"],
|
||||
"type": "partition",
|
||||
"idx": idx,
|
||||
}
|
||||
)
|
||||
if "Type" in partition:
|
||||
_type = partition["Type"]
|
||||
c["type"] = f"partition ({_type})"
|
||||
if "Comment" in partition:
|
||||
c["comment"] = partition["Comment"]
|
||||
return list(schema.values())
|
||||
|
||||
def get_schema(self, get_stats=False):
|
||||
@@ -210,7 +236,6 @@ class Athena(BaseQueryRunner):
|
||||
if error is not None:
|
||||
self._handle_run_query_error(error)
|
||||
|
||||
results = json_loads(results)
|
||||
for row in results["rows"]:
|
||||
table_name = "{0}.{1}".format(row["table_schema"], row["table_name"])
|
||||
if table_name not in schema:
|
||||
@@ -227,6 +252,7 @@ class Athena(BaseQueryRunner):
|
||||
kms_key=self.configuration.get("kms_key", None),
|
||||
work_group=self.configuration.get("work_group", "primary"),
|
||||
formatter=SimpleFormatter(),
|
||||
cursor_class=PandasCursor,
|
||||
**self._get_iam_credentials(user=user),
|
||||
).cursor()
|
||||
|
||||
@@ -234,7 +260,8 @@ class Athena(BaseQueryRunner):
|
||||
cursor.execute(query)
|
||||
column_tuples = [(i[0], _TYPE_MAPPINGS.get(i[1], None)) for i in cursor.description]
|
||||
columns = self.fetch_columns(column_tuples)
|
||||
rows = [dict(zip(([c["name"] for c in columns]), r)) for i, r in enumerate(cursor.fetchall())]
|
||||
df = cursor.as_pandas().replace({pd.NA: None})
|
||||
rows = df.to_dict(orient="records")
|
||||
qbytes = None
|
||||
athena_query_id = None
|
||||
try:
|
||||
@@ -257,14 +284,13 @@ class Athena(BaseQueryRunner):
|
||||
},
|
||||
}
|
||||
|
||||
json_data = json_dumps(data, ignore_nan=True)
|
||||
error = None
|
||||
except Exception:
|
||||
if cursor.query_id:
|
||||
cursor.cancel()
|
||||
raise
|
||||
|
||||
return json_data, error
|
||||
return data, error
|
||||
|
||||
|
||||
register(Athena)
|
||||
|
||||
@@ -13,7 +13,7 @@ from redash.query_runner import (
|
||||
JobTimeoutException,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps, json_loads
|
||||
from redash.utils import json_loads
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -157,17 +157,16 @@ class AxibaseTSD(BaseQueryRunner):
|
||||
columns, rows = generate_rows_and_columns(data)
|
||||
|
||||
data = {"columns": columns, "rows": rows}
|
||||
json_data = json_dumps(data)
|
||||
error = None
|
||||
|
||||
except SQLException as e:
|
||||
json_data = None
|
||||
data = None
|
||||
error = e.content
|
||||
except (KeyboardInterrupt, InterruptException, JobTimeoutException):
|
||||
sql.cancel_query(query_id)
|
||||
raise
|
||||
|
||||
return json_data, error
|
||||
return data, error
|
||||
|
||||
def get_schema(self, get_stats=False):
|
||||
connection = atsd_client.connect_url(
|
||||
|
||||
@@ -8,7 +8,7 @@ from redash.query_runner import (
|
||||
BaseQueryRunner,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps, json_loads
|
||||
from redash.utils import json_loads
|
||||
|
||||
try:
|
||||
from azure.kusto.data.exceptions import KustoServiceError
|
||||
@@ -124,16 +124,15 @@ class AzureKusto(BaseQueryRunner):
|
||||
|
||||
error = None
|
||||
data = {"columns": columns, "rows": rows}
|
||||
json_data = json_dumps(data)
|
||||
|
||||
except KustoServiceError as err:
|
||||
json_data = None
|
||||
data = None
|
||||
try:
|
||||
error = err.args[1][0]["error"]["@message"]
|
||||
except (IndexError, KeyError):
|
||||
error = err.args[1]
|
||||
|
||||
return json_data, error
|
||||
return data, error
|
||||
|
||||
def get_schema(self, get_stats=False):
|
||||
query = ".show database schema as json"
|
||||
@@ -143,8 +142,6 @@ class AzureKusto(BaseQueryRunner):
|
||||
if error is not None:
|
||||
self._handle_run_query_error(error)
|
||||
|
||||
results = json_loads(results)
|
||||
|
||||
schema_as_json = json_loads(results["rows"][0]["DatabaseSchema"])
|
||||
tables_list = schema_as_json["Databases"][self.configuration["database"]]["Tables"].values()
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ from redash.query_runner import (
|
||||
JobTimeoutException,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps, json_loads
|
||||
from redash.utils import json_loads
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -100,7 +100,7 @@ class BigQuery(BaseQueryRunner):
|
||||
|
||||
def __init__(self, configuration):
|
||||
super().__init__(configuration)
|
||||
self.should_annotate_query = configuration["useQueryAnnotation"]
|
||||
self.should_annotate_query = configuration.get("useQueryAnnotation", False)
|
||||
|
||||
@classmethod
|
||||
def enabled(cls):
|
||||
@@ -318,7 +318,6 @@ class BigQuery(BaseQueryRunner):
|
||||
if error is not None:
|
||||
self._handle_run_query_error(error)
|
||||
|
||||
results = json_loads(results)
|
||||
for row in results["rows"]:
|
||||
table_name = "{0}.{1}".format(row["table_schema"], row["table_name"])
|
||||
if table_name not in schema:
|
||||
@@ -346,9 +345,8 @@ class BigQuery(BaseQueryRunner):
|
||||
data = self._get_query_result(jobs, query)
|
||||
error = None
|
||||
|
||||
json_data = json_dumps(data, ignore_nan=True)
|
||||
except apiclient.errors.HttpError as e:
|
||||
json_data = None
|
||||
data = None
|
||||
if e.resp.status in [400, 404]:
|
||||
error = json_loads(e.content)["error"]["message"]
|
||||
else:
|
||||
@@ -363,7 +361,7 @@ class BigQuery(BaseQueryRunner):
|
||||
|
||||
raise
|
||||
|
||||
return json_data, error
|
||||
return data, error
|
||||
|
||||
|
||||
register(BigQuery)
|
||||
|
||||
@@ -5,7 +5,6 @@ from base64 import b64decode
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
||||
from redash.query_runner import BaseQueryRunner, register
|
||||
from redash.utils import JSONEncoder, json_dumps, json_loads
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -27,13 +26,6 @@ def generate_ssl_options_dict(protocol, cert_path=None):
|
||||
return ssl_options
|
||||
|
||||
|
||||
class CassandraJSONEncoder(JSONEncoder):
|
||||
def default(self, o):
|
||||
if isinstance(o, sortedset):
|
||||
return list(o)
|
||||
return super(CassandraJSONEncoder, self).default(o)
|
||||
|
||||
|
||||
class Cassandra(BaseQueryRunner):
|
||||
noop_query = "SELECT dateof(now()) FROM system.local"
|
||||
|
||||
@@ -41,6 +33,12 @@ class Cassandra(BaseQueryRunner):
|
||||
def enabled(cls):
|
||||
return enabled
|
||||
|
||||
@classmethod
|
||||
def custom_json_encoder(cls, dec, o):
|
||||
if isinstance(o, sortedset):
|
||||
return list(o)
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def configuration_schema(cls):
|
||||
return {
|
||||
@@ -86,7 +84,6 @@ class Cassandra(BaseQueryRunner):
|
||||
select release_version from system.local;
|
||||
"""
|
||||
results, error = self.run_query(query, None)
|
||||
results = json_loads(results)
|
||||
release_version = results["rows"][0]["release_version"]
|
||||
|
||||
query = """
|
||||
@@ -107,7 +104,6 @@ class Cassandra(BaseQueryRunner):
|
||||
)
|
||||
|
||||
results, error = self.run_query(query, None)
|
||||
results = json_loads(results)
|
||||
|
||||
schema = {}
|
||||
for row in results["rows"]:
|
||||
@@ -155,9 +151,8 @@ class Cassandra(BaseQueryRunner):
|
||||
rows = [dict(zip(column_names, row)) for row in result]
|
||||
|
||||
data = {"columns": columns, "rows": rows}
|
||||
json_data = json_dumps(data, cls=CassandraJSONEncoder)
|
||||
|
||||
return json_data, None
|
||||
return data, None
|
||||
|
||||
def _generate_cert_file(self):
|
||||
cert_encoded_bytes = self.configuration.get("sslCertificateFile", None)
|
||||
|
||||
@@ -15,7 +15,6 @@ from redash.query_runner import (
|
||||
register,
|
||||
split_sql_statements,
|
||||
)
|
||||
from redash.utils import json_dumps, json_loads
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -85,8 +84,6 @@ class ClickHouse(BaseSQLQueryRunner):
|
||||
if error is not None:
|
||||
self._handle_run_query_error(error)
|
||||
|
||||
results = json_loads(results)
|
||||
|
||||
for row in results["rows"]:
|
||||
table_name = "{}.{}".format(row["database"], row["table"])
|
||||
|
||||
@@ -124,7 +121,7 @@ class ClickHouse(BaseSQLQueryRunner):
|
||||
verify=verify,
|
||||
)
|
||||
|
||||
if r.status_code != 200:
|
||||
if not r.ok:
|
||||
raise Exception(r.text)
|
||||
|
||||
# In certain situations the response body can be empty even if the query was successful, for example
|
||||
@@ -132,7 +129,11 @@ class ClickHouse(BaseSQLQueryRunner):
|
||||
if not r.text:
|
||||
return {}
|
||||
|
||||
return r.json()
|
||||
response = r.json()
|
||||
if "exception" in response:
|
||||
raise Exception(response["exception"])
|
||||
|
||||
return response
|
||||
except requests.RequestException as e:
|
||||
if e.response:
|
||||
details = "({}, Status Code: {})".format(e.__class__.__name__, e.response.status_code)
|
||||
@@ -200,25 +201,24 @@ class ClickHouse(BaseSQLQueryRunner):
|
||||
queries = split_multi_query(query)
|
||||
|
||||
if not queries:
|
||||
json_data = None
|
||||
data = None
|
||||
error = "Query is empty"
|
||||
return json_data, error
|
||||
return data, error
|
||||
|
||||
try:
|
||||
# If just one query was given no session is needed
|
||||
if len(queries) == 1:
|
||||
results = self._clickhouse_query(queries[0])
|
||||
data = self._clickhouse_query(queries[0])
|
||||
else:
|
||||
# If more than one query was given, a session is needed. Parameter session_check must be false
|
||||
# for the first query
|
||||
session_id = "redash_{}".format(uuid4().hex)
|
||||
|
||||
results = self._clickhouse_query(queries[0], session_id, session_check=False)
|
||||
data = self._clickhouse_query(queries[0], session_id, session_check=False)
|
||||
|
||||
for query in queries[1:]:
|
||||
results = self._clickhouse_query(query, session_id, session_check=True)
|
||||
data = self._clickhouse_query(query, session_id, session_check=True)
|
||||
|
||||
data = json_dumps(results)
|
||||
error = None
|
||||
except Exception as e:
|
||||
data = None
|
||||
|
||||
@@ -3,7 +3,7 @@ import datetime
|
||||
import yaml
|
||||
|
||||
from redash.query_runner import BaseQueryRunner, register
|
||||
from redash.utils import json_dumps, parse_human_time
|
||||
from redash.utils import parse_human_time
|
||||
|
||||
try:
|
||||
import boto3
|
||||
@@ -121,7 +121,7 @@ class CloudWatch(BaseQueryRunner):
|
||||
|
||||
rows, columns = parse_response(results)
|
||||
|
||||
return json_dumps({"rows": rows, "columns": columns}), None
|
||||
return {"rows": rows, "columns": columns}, None
|
||||
|
||||
|
||||
register(CloudWatch)
|
||||
|
||||
@@ -4,7 +4,7 @@ import time
|
||||
import yaml
|
||||
|
||||
from redash.query_runner import BaseQueryRunner, register
|
||||
from redash.utils import json_dumps, parse_human_time
|
||||
from redash.utils import parse_human_time
|
||||
|
||||
try:
|
||||
import boto3
|
||||
@@ -146,7 +146,7 @@ class CloudWatchInsights(BaseQueryRunner):
|
||||
time.sleep(POLL_INTERVAL)
|
||||
elapsed += POLL_INTERVAL
|
||||
|
||||
return json_dumps(data), None
|
||||
return data, None
|
||||
|
||||
|
||||
register(CloudWatchInsights)
|
||||
|
||||
@@ -9,7 +9,6 @@ import logging
|
||||
from os import environ
|
||||
|
||||
from redash.query_runner import BaseQueryRunner
|
||||
from redash.utils import json_dumps, json_loads
|
||||
|
||||
from . import register
|
||||
|
||||
@@ -115,7 +114,7 @@ class CorporateMemoryQueryRunner(BaseQueryRunner):
|
||||
logger.info("results are: {}".format(results))
|
||||
# Not sure why we do not use the json package here but all other
|
||||
# query runner do it the same way :-)
|
||||
sparql_results = json_loads(results)
|
||||
sparql_results = results
|
||||
# transform all bindings to redash rows
|
||||
rows = []
|
||||
for sparql_row in sparql_results["results"]["bindings"]:
|
||||
@@ -133,7 +132,7 @@ class CorporateMemoryQueryRunner(BaseQueryRunner):
|
||||
columns.append({"name": var, "friendly_name": var, "type": "string"})
|
||||
# Not sure why we do not use the json package here but all other
|
||||
# query runner do it the same way :-)
|
||||
return json_dumps({"columns": columns, "rows": rows})
|
||||
return {"columns": columns, "rows": rows}
|
||||
|
||||
@classmethod
|
||||
def name(cls):
|
||||
|
||||
@@ -10,7 +10,6 @@ from redash.query_runner import (
|
||||
BaseQueryRunner,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
try:
|
||||
@@ -155,7 +154,7 @@ class Couchbase(BaseQueryRunner):
|
||||
rows, columns = parse_results(result.json()["results"])
|
||||
data = {"columns": columns, "rows": rows}
|
||||
|
||||
return json_dumps(data), None
|
||||
return data, None
|
||||
|
||||
@classmethod
|
||||
def name(cls):
|
||||
|
||||
@@ -4,7 +4,6 @@ import logging
|
||||
import yaml
|
||||
|
||||
from redash.query_runner import BaseQueryRunner, NotSupported, register
|
||||
from redash.utils import json_dumps
|
||||
from redash.utils.requests_session import (
|
||||
UnacceptableAddressException,
|
||||
requests_or_advocate,
|
||||
@@ -96,19 +95,18 @@ class CSV(BaseQueryRunner):
|
||||
break
|
||||
data["rows"] = df[labels].replace({np.nan: None}).to_dict(orient="records")
|
||||
|
||||
json_data = json_dumps(data)
|
||||
error = None
|
||||
except KeyboardInterrupt:
|
||||
error = "Query cancelled by user."
|
||||
json_data = None
|
||||
data = None
|
||||
except UnacceptableAddressException:
|
||||
error = "Can't query private addresses."
|
||||
json_data = None
|
||||
data = None
|
||||
except Exception as e:
|
||||
error = "Error reading {0}. {1}".format(path, str(e))
|
||||
json_data = None
|
||||
data = None
|
||||
|
||||
return json_data, error
|
||||
return data, error
|
||||
|
||||
def get_schema(self):
|
||||
raise NotSupported()
|
||||
|
||||
@@ -16,7 +16,6 @@ from redash.query_runner import (
|
||||
BaseQueryRunner,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps, json_loads
|
||||
|
||||
|
||||
class Databend(BaseQueryRunner):
|
||||
@@ -85,11 +84,10 @@ class Databend(BaseQueryRunner):
|
||||
|
||||
data = {"columns": columns, "rows": rows}
|
||||
error = None
|
||||
json_data = json_dumps(data)
|
||||
finally:
|
||||
connection.close()
|
||||
|
||||
return json_data, error
|
||||
return data, error
|
||||
|
||||
def get_schema(self, get_stats=False):
|
||||
query = """
|
||||
@@ -106,7 +104,6 @@ class Databend(BaseQueryRunner):
|
||||
self._handle_run_query_error(error)
|
||||
|
||||
schema = {}
|
||||
results = json_loads(results)
|
||||
|
||||
for row in results["rows"]:
|
||||
table_name = "{}.{}".format(row["table_schema"], row["table_name"])
|
||||
@@ -133,7 +130,6 @@ class Databend(BaseQueryRunner):
|
||||
self._handle_run_query_error(error)
|
||||
|
||||
schema = {}
|
||||
results = json_loads(results)
|
||||
|
||||
for row in results["rows"]:
|
||||
table_name = "{}.{}".format(row["table_schema"], row["table_name"])
|
||||
|
||||
@@ -16,7 +16,6 @@ from redash.query_runner import (
|
||||
split_sql_statements,
|
||||
)
|
||||
from redash.settings import cast_int_or_default
|
||||
from redash.utils import json_dumps, json_loads
|
||||
|
||||
try:
|
||||
import pyodbc
|
||||
@@ -115,16 +114,13 @@ class Databricks(BaseSQLQueryRunner):
|
||||
logger.warning("Truncated result set.")
|
||||
statsd_client.incr("redash.query_runner.databricks.truncated")
|
||||
data["truncated"] = True
|
||||
json_data = json_dumps(data)
|
||||
error = None
|
||||
else:
|
||||
error = None
|
||||
json_data = json_dumps(
|
||||
{
|
||||
"columns": [{"name": "result", "type": TYPE_STRING}],
|
||||
"rows": [{"result": "No data was returned."}],
|
||||
}
|
||||
)
|
||||
data = {
|
||||
"columns": [{"name": "result", "type": TYPE_STRING}],
|
||||
"rows": [{"result": "No data was returned."}],
|
||||
}
|
||||
|
||||
cursor.close()
|
||||
except pyodbc.Error as e:
|
||||
@@ -132,9 +128,9 @@ class Databricks(BaseSQLQueryRunner):
|
||||
error = str(e.args[1])
|
||||
else:
|
||||
error = str(e)
|
||||
json_data = None
|
||||
data = None
|
||||
|
||||
return json_data, error
|
||||
return data, error
|
||||
|
||||
def get_schema(self):
|
||||
raise NotSupported()
|
||||
@@ -146,8 +142,6 @@ class Databricks(BaseSQLQueryRunner):
|
||||
if error is not None:
|
||||
self._handle_run_query_error(error)
|
||||
|
||||
results = json_loads(results)
|
||||
|
||||
first_column_name = results["columns"][0]["name"]
|
||||
return [row[first_column_name] for row in results["rows"]]
|
||||
|
||||
|
||||
@@ -11,7 +11,6 @@ from redash.query_runner import (
|
||||
JobTimeoutException,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps, json_loads
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -78,8 +77,6 @@ class DB2(BaseSQLQueryRunner):
|
||||
if error is not None:
|
||||
self._handle_run_query_error(error)
|
||||
|
||||
results = json_loads(results)
|
||||
|
||||
for row in results["rows"]:
|
||||
if row["TABLE_SCHEMA"] != "public":
|
||||
table_name = "{}.{}".format(row["TABLE_SCHEMA"], row["TABLE_NAME"])
|
||||
@@ -130,23 +127,22 @@ class DB2(BaseSQLQueryRunner):
|
||||
|
||||
data = {"columns": columns, "rows": rows}
|
||||
error = None
|
||||
json_data = json_dumps(data)
|
||||
else:
|
||||
error = "Query completed but it returned no data."
|
||||
json_data = None
|
||||
data = None
|
||||
except (select.error, OSError):
|
||||
error = "Query interrupted. Please retry."
|
||||
json_data = None
|
||||
data = None
|
||||
except ibm_db_dbi.DatabaseError as e:
|
||||
error = str(e)
|
||||
json_data = None
|
||||
data = None
|
||||
except (KeyboardInterrupt, InterruptException, JobTimeoutException):
|
||||
connection.cancel()
|
||||
raise
|
||||
finally:
|
||||
connection.close()
|
||||
|
||||
return json_data, error
|
||||
return data, error
|
||||
|
||||
|
||||
register(DB2)
|
||||
|
||||
@@ -8,7 +8,6 @@ except ImportError:
|
||||
enabled = False
|
||||
|
||||
from redash.query_runner import BaseQueryRunner, register
|
||||
from redash.utils import json_dumps
|
||||
|
||||
|
||||
def reduce_item(reduced_item, key, value):
|
||||
@@ -81,7 +80,7 @@ class Dgraph(BaseQueryRunner):
|
||||
client_stub.close()
|
||||
|
||||
def run_query(self, query, user):
|
||||
json_data = None
|
||||
data = None
|
||||
error = None
|
||||
|
||||
try:
|
||||
@@ -109,12 +108,10 @@ class Dgraph(BaseQueryRunner):
|
||||
|
||||
# finally, assemble both the columns and data
|
||||
data = {"columns": columns, "rows": processed_data}
|
||||
|
||||
json_data = json_dumps(data)
|
||||
except Exception as e:
|
||||
error = e
|
||||
|
||||
return json_data, error
|
||||
return data, error
|
||||
|
||||
def get_schema(self, get_stats=False):
|
||||
"""Queries Dgraph for all the predicates, their types, their tokenizers, etc.
|
||||
|
||||
@@ -13,7 +13,6 @@ from redash.query_runner import (
|
||||
guess_type,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps, json_loads
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -98,9 +97,7 @@ class Drill(BaseHTTPQueryRunner):
|
||||
if error is not None:
|
||||
return None, error
|
||||
|
||||
results = parse_response(response.json())
|
||||
|
||||
return json_dumps(results), None
|
||||
return parse_response(response.json()), None
|
||||
|
||||
def get_schema(self, get_stats=False):
|
||||
query = """
|
||||
@@ -132,8 +129,6 @@ class Drill(BaseHTTPQueryRunner):
|
||||
if error is not None:
|
||||
self._handle_run_query_error(error)
|
||||
|
||||
results = json_loads(results)
|
||||
|
||||
schema = {}
|
||||
|
||||
for row in results["rows"]:
|
||||
|
||||
@@ -12,7 +12,6 @@ from redash.query_runner import (
|
||||
BaseQueryRunner,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps, json_loads
|
||||
|
||||
TYPES_MAP = {1: TYPE_STRING, 2: TYPE_INTEGER, 3: TYPE_BOOLEAN}
|
||||
|
||||
@@ -59,12 +58,10 @@ class Druid(BaseQueryRunner):
|
||||
|
||||
data = {"columns": columns, "rows": rows}
|
||||
error = None
|
||||
json_data = json_dumps(data)
|
||||
print(json_data)
|
||||
finally:
|
||||
connection.close()
|
||||
|
||||
return json_data, error
|
||||
return data, error
|
||||
|
||||
def get_schema(self, get_stats=False):
|
||||
query = """
|
||||
@@ -81,7 +78,6 @@ class Druid(BaseQueryRunner):
|
||||
self._handle_run_query_error(error)
|
||||
|
||||
schema = {}
|
||||
results = json_loads(results)
|
||||
|
||||
for row in results["rows"]:
|
||||
table_name = "{}.{}".format(row["TABLE_SCHEMA"], row["TABLE_NAME"])
|
||||
|
||||
@@ -19,7 +19,6 @@ try:
|
||||
except ImportError:
|
||||
enabled = False
|
||||
|
||||
from redash.utils import json_dumps
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -106,18 +105,17 @@ class e6data(BaseQueryRunner):
|
||||
columns.append({"name": column_name, "type": column_type})
|
||||
rows = [dict(zip([c["name"] for c in columns], r)) for r in results]
|
||||
data = {"columns": columns, "rows": rows}
|
||||
json_data = json_dumps(data)
|
||||
error = None
|
||||
|
||||
except Exception as error:
|
||||
logger.debug(error)
|
||||
json_data = None
|
||||
data = None
|
||||
finally:
|
||||
if cursor is not None:
|
||||
cursor.clear()
|
||||
cursor.close()
|
||||
|
||||
return json_data, error
|
||||
return data, error
|
||||
|
||||
def test_connection(self):
|
||||
self.noop_query = "SELECT 1"
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user