mirror of
https://github.com/getredash/redash.git
synced 2025-12-25 10:00:45 -05:00
Compare commits
43 Commits
24.04.0-de
...
24.05.0-de
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2b974e12ed | ||
|
|
372adfed6b | ||
|
|
dbab9cadb4 | ||
|
|
06244716e6 | ||
|
|
f09760389a | ||
|
|
84e6d3cad5 | ||
|
|
3399e3761e | ||
|
|
1c48b2218b | ||
|
|
5ac5d86f5e | ||
|
|
5e4764af9c | ||
|
|
e2a39de7d1 | ||
|
|
6c68b48917 | ||
|
|
7e8a61c73d | ||
|
|
991e94dd6a | ||
|
|
2ffeecb813 | ||
|
|
3dd855aef1 | ||
|
|
713aca440a | ||
|
|
70bb684d9e | ||
|
|
4034f791c3 | ||
|
|
b9875a231b | ||
|
|
062a70cf20 | ||
|
|
c12d45077a | ||
|
|
6d6412753d | ||
|
|
275e12e7c1 | ||
|
|
77d7508cee | ||
|
|
9601660751 | ||
|
|
45c6fa0591 | ||
|
|
95ecb8e229 | ||
|
|
cb0707176c | ||
|
|
d7247f8b84 | ||
|
|
776703fab7 | ||
|
|
34cde71238 | ||
|
|
f631075be3 | ||
|
|
3f19534301 | ||
|
|
24dec192ee | ||
|
|
82d88ed4eb | ||
|
|
af0773c58a | ||
|
|
15e6583d72 | ||
|
|
4eb5f4e47f | ||
|
|
a0f5c706ff | ||
|
|
702a550659 | ||
|
|
38a06c7ab9 | ||
|
|
a6074878bb |
@@ -1,25 +0,0 @@
|
||||
services:
|
||||
redash:
|
||||
build: ../
|
||||
command: manage version
|
||||
depends_on:
|
||||
- postgres
|
||||
- redis
|
||||
ports:
|
||||
- "5000:5000"
|
||||
environment:
|
||||
PYTHONUNBUFFERED: 0
|
||||
REDASH_LOG_LEVEL: "INFO"
|
||||
REDASH_REDIS_URL: "redis://redis:6379/0"
|
||||
POSTGRES_PASSWORD: "FmTKs5vX52ufKR1rd8tn4MoSP7zvCJwb"
|
||||
REDASH_DATABASE_URL: "postgresql://postgres:FmTKs5vX52ufKR1rd8tn4MoSP7zvCJwb@postgres/postgres"
|
||||
REDASH_COOKIE_SECRET: "2H9gNG9obnAQ9qnR9BDTQUph6CbXKCzF"
|
||||
redis:
|
||||
image: redis:7-alpine
|
||||
restart: unless-stopped
|
||||
postgres:
|
||||
image: pgautoupgrade/pgautoupgrade:15-alpine3.8
|
||||
command: "postgres -c fsync=off -c full_page_writes=off -c synchronous_commit=OFF"
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
POSTGRES_HOST_AUTH_METHOD: "trust"
|
||||
@@ -1,73 +0,0 @@
|
||||
x-redash-service: &redash-service
|
||||
build:
|
||||
context: ../
|
||||
args:
|
||||
install_groups: "main"
|
||||
code_coverage: ${CODE_COVERAGE}
|
||||
x-redash-environment: &redash-environment
|
||||
REDASH_LOG_LEVEL: "INFO"
|
||||
REDASH_REDIS_URL: "redis://redis:6379/0"
|
||||
POSTGRES_PASSWORD: "FmTKs5vX52ufKR1rd8tn4MoSP7zvCJwb"
|
||||
REDASH_DATABASE_URL: "postgresql://postgres:FmTKs5vX52ufKR1rd8tn4MoSP7zvCJwb@postgres/postgres"
|
||||
REDASH_RATELIMIT_ENABLED: "false"
|
||||
REDASH_ENFORCE_CSRF: "true"
|
||||
REDASH_COOKIE_SECRET: "2H9gNG9obnAQ9qnR9BDTQUph6CbXKCzF"
|
||||
services:
|
||||
server:
|
||||
<<: *redash-service
|
||||
command: server
|
||||
depends_on:
|
||||
- postgres
|
||||
- redis
|
||||
ports:
|
||||
- "5000:5000"
|
||||
environment:
|
||||
<<: *redash-environment
|
||||
PYTHONUNBUFFERED: 0
|
||||
scheduler:
|
||||
<<: *redash-service
|
||||
command: scheduler
|
||||
depends_on:
|
||||
- server
|
||||
environment:
|
||||
<<: *redash-environment
|
||||
worker:
|
||||
<<: *redash-service
|
||||
command: worker
|
||||
depends_on:
|
||||
- server
|
||||
environment:
|
||||
<<: *redash-environment
|
||||
PYTHONUNBUFFERED: 0
|
||||
cypress:
|
||||
ipc: host
|
||||
build:
|
||||
context: ../
|
||||
dockerfile: .ci/Dockerfile.cypress
|
||||
depends_on:
|
||||
- server
|
||||
- worker
|
||||
- scheduler
|
||||
environment:
|
||||
CYPRESS_baseUrl: "http://server:5000"
|
||||
CYPRESS_coverage: ${CODE_COVERAGE}
|
||||
PERCY_TOKEN: ${PERCY_TOKEN}
|
||||
PERCY_BRANCH: ${CIRCLE_BRANCH}
|
||||
PERCY_COMMIT: ${CIRCLE_SHA1}
|
||||
PERCY_PULL_REQUEST: ${CIRCLE_PR_NUMBER}
|
||||
COMMIT_INFO_BRANCH: ${CIRCLE_BRANCH}
|
||||
COMMIT_INFO_MESSAGE: ${COMMIT_INFO_MESSAGE}
|
||||
COMMIT_INFO_AUTHOR: ${CIRCLE_USERNAME}
|
||||
COMMIT_INFO_SHA: ${CIRCLE_SHA1}
|
||||
COMMIT_INFO_REMOTE: ${CIRCLE_REPOSITORY_URL}
|
||||
CYPRESS_PROJECT_ID: ${CYPRESS_PROJECT_ID}
|
||||
CYPRESS_RECORD_KEY: ${CYPRESS_RECORD_KEY}
|
||||
redis:
|
||||
image: redis:7-alpine
|
||||
restart: unless-stopped
|
||||
postgres:
|
||||
image: pgautoupgrade/pgautoupgrade:15-alpine3.8
|
||||
command: "postgres -c fsync=off -c full_page_writes=off -c synchronous_commit=OFF"
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
POSTGRES_HOST_AUTH_METHOD: "trust"
|
||||
@@ -1,39 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script only needs to run on the main Redash repo
|
||||
|
||||
if [ "${GITHUB_REPOSITORY}" != "getredash/redash" ]; then
|
||||
echo "Skipping image build for Docker Hub, as this isn't the main Redash repository"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "${GITHUB_REF_NAME}" != "master" ] && [ "${GITHUB_REF_NAME}" != "preview-image" ]; then
|
||||
echo "Skipping image build for Docker Hub, as this isn't the 'master' nor 'preview-image' branch"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "x${DOCKER_USER}" = "x" ] || [ "x${DOCKER_PASS}" = "x" ]; then
|
||||
echo "Skipping image build for Docker Hub, as the login details aren't available"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
set -e
|
||||
VERSION=$(jq -r .version package.json)
|
||||
VERSION_TAG="$VERSION.b${GITHUB_RUN_ID}.${GITHUB_RUN_NUMBER}"
|
||||
|
||||
export DOCKER_BUILDKIT=1
|
||||
export COMPOSE_DOCKER_CLI_BUILD=1
|
||||
|
||||
docker login -u "${DOCKER_USER}" -p "${DOCKER_PASS}"
|
||||
|
||||
DOCKERHUB_REPO="redash/redash"
|
||||
DOCKER_TAGS="-t redash/redash:preview -t redash/preview:${VERSION_TAG}"
|
||||
|
||||
# Build the docker container
|
||||
docker build --build-arg install_groups="main,all_ds,dev" ${DOCKER_TAGS} .
|
||||
|
||||
# Push the container to the preview build locations
|
||||
docker push "${DOCKERHUB_REPO}:preview"
|
||||
docker push "redash/preview:${VERSION_TAG}"
|
||||
|
||||
echo "Built: ${VERSION_TAG}"
|
||||
9
.ci/pack
9
.ci/pack
@@ -1,9 +0,0 @@
|
||||
#!/bin/bash
|
||||
NAME=redash
|
||||
VERSION=$(jq -r .version package.json)
|
||||
FULL_VERSION=$VERSION+b$CIRCLE_BUILD_NUM
|
||||
FILENAME=$NAME.$FULL_VERSION.tar.gz
|
||||
|
||||
mkdir -p /tmp/artifacts/
|
||||
|
||||
tar -zcv -f /tmp/artifacts/$FILENAME --exclude=".git" --exclude="optipng*" --exclude="cypress" --exclude="*.pyc" --exclude="*.pyo" --exclude="venv" *
|
||||
@@ -1,6 +0,0 @@
|
||||
#!/bin/bash
|
||||
VERSION=$(jq -r .version package.json)
|
||||
FULL_VERSION=${VERSION}+b${GITHUB_RUN_ID}.${GITHUB_RUN_NUMBER}
|
||||
|
||||
sed -ri "s/^__version__ = '([A-Za-z0-9.-]*)'/__version__ = '${FULL_VERSION}'/" redash/__init__.py
|
||||
sed -i "s/dev/${GITHUB_SHA}/" client/app/version.json
|
||||
@@ -1,5 +1,4 @@
|
||||
client/.tmp/
|
||||
client/dist/
|
||||
node_modules/
|
||||
viz-lib/node_modules/
|
||||
.tmp/
|
||||
|
||||
96
.github/workflows/ci.yml
vendored
96
.github/workflows/ci.yml
vendored
@@ -3,11 +3,29 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
tags:
|
||||
- '*'
|
||||
pull_request_target:
|
||||
branches:
|
||||
- master
|
||||
env:
|
||||
CYPRESS_COVERAGE: "true"
|
||||
NODE_VERSION: 18
|
||||
YARN_VERSION: 1.22.22
|
||||
REDASH_COOKIE_SECRET: 2H9gNG9obnAQ9qnR9BDTQUph6CbXKCzF
|
||||
REDASH_SECRET_KEY: 2H9gNG9obnAQ9qnR9BDTQUph6CbXKCzF
|
||||
COMPOSE_DOCKER_CLI_BUILD: 1
|
||||
DOCKER_BUILDKIT: 1
|
||||
FRONTEND_BUILD_MODE: 1
|
||||
INSTALL_GROUPS: main,all_ds,dev
|
||||
PERCY_BRANCH: ${{github.head_ref || github.ref_name}}
|
||||
PERCY_COMMIT: ${{github.sha}}
|
||||
PERCY_PULL_REQUEST: ${{github.event.number}}
|
||||
COMMIT_INFO_BRANCH: ${{github.head_ref || github.ref_name}}
|
||||
COMMIT_INFO_MESSAGE: ${{github.event.head_commit.message}}
|
||||
COMMIT_INFO_AUTHOR: ${{github.event.pull_request.user.login}}
|
||||
COMMIT_INFO_SHA: ${{github.sha}}
|
||||
COMMIT_INFO_REMOTE: ${{github.server_url}}/${{github.repository}}
|
||||
jobs:
|
||||
backend-lint:
|
||||
runs-on: ubuntu-22.04
|
||||
@@ -19,10 +37,10 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 1
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: actions/setup-python@v4
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.8'
|
||||
- run: sudo pip install black==23.1.0 ruff==0.0.287
|
||||
- run: sudo pip install black==24.3.0 ruff==0.1.9
|
||||
- run: ruff check .
|
||||
- run: black --check .
|
||||
|
||||
@@ -30,10 +48,7 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
needs: backend-lint
|
||||
env:
|
||||
COMPOSE_FILE: .ci/compose.ci.yaml
|
||||
COMPOSE_PROJECT_NAME: redash
|
||||
COMPOSE_DOCKER_CLI_BUILD: 1
|
||||
DOCKER_BUILDKIT: 1
|
||||
FRONTEND_BUILD_MODE: 0
|
||||
steps:
|
||||
- if: github.event.pull_request.mergeable == 'false'
|
||||
name: Exit if PR is not mergeable
|
||||
@@ -45,15 +60,16 @@ jobs:
|
||||
- name: Build Docker Images
|
||||
run: |
|
||||
set -x
|
||||
docker compose build --build-arg install_groups="main,all_ds,dev" --build-arg skip_frontend_build=true
|
||||
touch .env
|
||||
docker compose build
|
||||
docker compose up -d
|
||||
sleep 10
|
||||
- name: Create Test Database
|
||||
run: docker compose -p redash run --rm postgres psql -h postgres -U postgres -c "create database tests;"
|
||||
run: docker compose run --rm postgres psql -h postgres -U postgres -c "create database tests;"
|
||||
- name: List Enabled Query Runners
|
||||
run: docker compose -p redash run --rm redash manage ds list_types
|
||||
run: docker compose run --rm server manage ds list_types
|
||||
- name: Run Tests
|
||||
run: docker compose -p redash run --name tests redash tests --junitxml=junit.xml --cov-report=xml --cov=redash --cov-config=.coveragerc tests/
|
||||
run: docker compose run --name tests server tests --junitxml=junit.xml --cov-report=xml --cov=redash --cov-config=.coveragerc tests/
|
||||
- name: Copy Test Results
|
||||
run: |
|
||||
mkdir -p /tmp/test-results/unit-tests
|
||||
@@ -61,15 +77,17 @@ jobs:
|
||||
docker cp tests:/app/junit.xml /tmp/test-results/unit-tests/results.xml
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
- name: Store Test Results
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: test-results
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
- name: Store Test Results
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: backend-test-results
|
||||
path: /tmp/test-results
|
||||
- name: Store Coverage Results
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage
|
||||
name: backend-coverage
|
||||
path: coverage.xml
|
||||
|
||||
frontend-lint:
|
||||
@@ -82,20 +100,21 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 1
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: actions/setup-node@v3
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: ${{ env.NODE_VERSION }}
|
||||
cache: 'yarn'
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
npm install --global --force yarn@1.22.19
|
||||
yarn cache clean && yarn --frozen-lockfile --network-concurrency 1
|
||||
npm install --global --force yarn@$YARN_VERSION
|
||||
yarn cache clean
|
||||
yarn --frozen-lockfile --network-concurrency 1
|
||||
- name: Run Lint
|
||||
run: yarn lint:ci
|
||||
- name: Store Test Results
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-results
|
||||
name: frontend-test-results
|
||||
path: /tmp/test-results
|
||||
|
||||
frontend-unit-tests:
|
||||
@@ -109,28 +128,31 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 1
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: actions/setup-node@v3
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: ${{ env.NODE_VERSION }}
|
||||
cache: 'yarn'
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
npm install --global --force yarn@1.22.19
|
||||
yarn cache clean && yarn --frozen-lockfile --network-concurrency 1
|
||||
npm install --global --force yarn@$YARN_VERSION
|
||||
yarn cache clean
|
||||
yarn --frozen-lockfile --network-concurrency 1
|
||||
- name: Run App Tests
|
||||
run: yarn test
|
||||
- name: Run Visualizations Tests
|
||||
run: cd viz-lib && yarn test
|
||||
run: |
|
||||
cd viz-lib
|
||||
yarn test
|
||||
- run: yarn lint
|
||||
|
||||
frontend-e2e-tests:
|
||||
runs-on: ubuntu-22.04
|
||||
needs: frontend-lint
|
||||
env:
|
||||
COMPOSE_FILE: .ci/compose.cypress.yaml
|
||||
COMPOSE_PROJECT_NAME: cypress
|
||||
CYPRESS_INSTALL_BINARY: 0
|
||||
PUPPETEER_SKIP_CHROMIUM_DOWNLOAD: 1
|
||||
INSTALL_GROUPS: main
|
||||
COMPOSE_PROFILES: e2e
|
||||
PERCY_TOKEN: ${{ secrets.PERCY_TOKEN }}
|
||||
CYPRESS_PROJECT_ID: ${{ secrets.CYPRESS_PROJECT_ID }}
|
||||
CYPRESS_RECORD_KEY: ${{ secrets.CYPRESS_RECORD_KEY }}
|
||||
@@ -142,21 +164,20 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 1
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: actions/setup-node@v3
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: ${{ env.NODE_VERSION }}
|
||||
cache: 'yarn'
|
||||
- name: Enable Code Coverage Report For Master Branch
|
||||
if: endsWith(github.ref, '/master')
|
||||
run: |
|
||||
echo "CODE_COVERAGE=true" >> "$GITHUB_ENV"
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
npm install --global --force yarn@1.22.19
|
||||
yarn cache clean && yarn --frozen-lockfile --network-concurrency 1
|
||||
npm install --global --force yarn@$YARN_VERSION
|
||||
yarn cache clean
|
||||
yarn --frozen-lockfile --network-concurrency 1
|
||||
- name: Setup Redash Server
|
||||
run: |
|
||||
set -x
|
||||
touch .env
|
||||
yarn build
|
||||
yarn cypress build
|
||||
yarn cypress start -- --skip-db-seed
|
||||
docker compose run cypress yarn cypress db-seed
|
||||
@@ -168,7 +189,12 @@ jobs:
|
||||
- name: Copy Code Coverage Results
|
||||
run: docker cp cypress:/usr/src/app/coverage ./coverage || true
|
||||
- name: Store Coverage Results
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage
|
||||
name: frontend-coverage
|
||||
path: coverage
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: frontend
|
||||
path: client/dist
|
||||
retention-days: 1
|
||||
|
||||
129
.github/workflows/preview-image.yml
vendored
129
.github/workflows/preview-image.yml
vendored
@@ -1,15 +1,20 @@
|
||||
name: Preview Image
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*-dev'
|
||||
workflow_run:
|
||||
workflows:
|
||||
- Tests
|
||||
types:
|
||||
- completed
|
||||
branches:
|
||||
- master
|
||||
|
||||
env:
|
||||
NODE_VERSION: 18
|
||||
DOCKER_REPO: redash
|
||||
|
||||
jobs:
|
||||
build-skip-check:
|
||||
runs-on: ubuntu-22.04
|
||||
if: ${{ github.event.workflow_run.conclusion == 'success' }}
|
||||
outputs:
|
||||
skip: ${{ steps.skip-check.outputs.skip }}
|
||||
steps:
|
||||
@@ -32,56 +37,118 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
needs:
|
||||
- build-skip-check
|
||||
outputs:
|
||||
version: ${{ steps.version.outputs.VERSION_TAG }}
|
||||
repo: ${{ steps.version.outputs.DOCKER_REPO }}
|
||||
if: needs.build-skip-check.outputs.skip == 'false'
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform:
|
||||
- linux/amd64
|
||||
- linux/arm64
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
ref: ${{ github.event.push.after }}
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
- uses: dawidd6/action-download-artifact@v3
|
||||
with:
|
||||
node-version: ${{ env.NODE_VERSION }}
|
||||
cache: 'yarn'
|
||||
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
npm install --global --force yarn@1.22.19
|
||||
yarn cache clean && yarn --frozen-lockfile --network-concurrency 1
|
||||
|
||||
name: frontend
|
||||
workflow: ci.yml
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
run_id: ${{ github.event.workflow_run.id }}
|
||||
path: client/dist
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ vars.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_PASS }}
|
||||
|
||||
- name: Set version
|
||||
id: version
|
||||
run: |
|
||||
set -x
|
||||
.ci/update_version
|
||||
VERSION_TAG=$(jq -r .version package.json)
|
||||
echo "VERSION_TAG=$VERSION_TAG" >> "$GITHUB_OUTPUT"
|
||||
|
||||
VERSION=$(jq -r .version package.json)
|
||||
FULL_VERSION=${VERSION}-b${GITHUB_RUN_ID}.${GITHUB_RUN_NUMBER}
|
||||
sed -ri "s/^__version__ = ([A-Za-z0-9.-]*)'/__version__ = '${FULL_VERSION}'/" redash/__init__.py
|
||||
sed -i "s/dev/${GITHUB_SHA}/" client/app/version.json
|
||||
echo "VERSION_TAG=$FULL_VERSION" >> "$GITHUB_OUTPUT"
|
||||
platform=${{ matrix.platform }}
|
||||
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
||||
echo "SCOPE=${platform//\//-}" >> $GITHUB_ENV
|
||||
if [[ "${{ vars.DOCKER_REPO }}" != "" ]]; then
|
||||
echo "DOCKER_REPO=${{ vars.DOCKER_REPO }}" >> $GITHUB_ENV
|
||||
echo "DOCKER_REPO=${{ vars.DOCKER_REPO }}" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "DOCKER_REPO=${DOCKER_REPO}" >> $GITHUB_ENV
|
||||
echo "DOCKER_REPO=${DOCKER_REPO}" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
- name: Build and push preview image to Docker Hub
|
||||
uses: docker/build-push-action@v4
|
||||
uses: docker/build-push-action@v5
|
||||
id: build
|
||||
with:
|
||||
push: true
|
||||
tags: |
|
||||
redash/redash:preview
|
||||
redash/preview:${{ steps.version.outputs.VERSION_TAG }}
|
||||
context: .
|
||||
cache-from: type=gha,scope=${{ env.SCOPE }}
|
||||
cache-to: type=gha,mode=max,scope=${{ env.SCOPE }}
|
||||
platforms: ${{ matrix.platform }}
|
||||
outputs: type=image,name=${{ env.DOCKER_REPO }}/redash,push-by-digest=true,name-canonical=true,push=true
|
||||
build-args: |
|
||||
test_all_deps=true
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
platforms: linux/amd64
|
||||
FRONTEND_BUILD_MODE=1
|
||||
env:
|
||||
DOCKER_CONTENT_TRUST: true
|
||||
- name: Export digest
|
||||
run: |
|
||||
mkdir -p /tmp/digests
|
||||
digest="${{ steps.build.outputs.digest }}"
|
||||
touch "/tmp/digests/${digest#sha256:}"
|
||||
- name: Upload digest
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: digests-${{ env.PLATFORM_PAIR }}
|
||||
path: /tmp/digests/*
|
||||
if-no-files-found: error
|
||||
retention-days: 1
|
||||
|
||||
- name: "Failure: output container logs to console"
|
||||
if: failure()
|
||||
run: docker compose logs
|
||||
publish-docker-manifest:
|
||||
runs-on: ubuntu-22.04
|
||||
needs:
|
||||
- build-skip-check
|
||||
- build-docker-image
|
||||
if: needs.build-skip-check.outputs.skip == 'false'
|
||||
steps:
|
||||
- name: Download digests
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
pattern: digests-*
|
||||
path: /tmp/digests
|
||||
merge-multiple: true
|
||||
- name: Setup Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ needs.build-docker-image.outputs.repo }}/redash
|
||||
tags: preview
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ vars.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_PASS }}
|
||||
- name: Create manifest list and push
|
||||
working-directory: /tmp/digests
|
||||
run: |
|
||||
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
|
||||
$(printf '${{ needs.build-docker-image.outputs.repo }}/redash@sha256:%s ' *)
|
||||
- name: Inspect image
|
||||
run: |
|
||||
REDASH_IMAGE="${{ needs.build-docker-image.outputs.repo }}/redash:${{ steps.meta.outputs.version }}"
|
||||
docker buildx imagetools inspect $REDASH_IMAGE
|
||||
- name: Push image ${{ needs.build-docker-image.outputs.repo }}/preview image
|
||||
run: |
|
||||
REDASH_IMAGE="${{ needs.build-docker-image.outputs.repo }}/redash:preview"
|
||||
PREVIEW_IMAGE="${{ needs.build-docker-image.outputs.repo }}/preview:${{ needs.build-docker-image.outputs.version }}"
|
||||
docker buildx imagetools create --tag $PREVIEW_IMAGE $REDASH_IMAGE
|
||||
|
||||
52
Dockerfile
52
Dockerfile
@@ -1,30 +1,35 @@
|
||||
FROM node:18-bookworm as frontend-builder
|
||||
|
||||
RUN npm install --global --force yarn@1.22.19
|
||||
|
||||
# Controls whether to build the frontend assets
|
||||
ARG skip_frontend_build
|
||||
ARG FRONTEND_BUILD_MODE=0
|
||||
|
||||
# MODE 0: create empty files. useful for backend tests
|
||||
FROM alpine:3.19 as frontend-builder-0
|
||||
RUN \
|
||||
mkdir -p /frontend/client/dist && \
|
||||
touch /frontend/client/dist/multi_org.html && \
|
||||
touch /frontend/client/dist/index.html
|
||||
|
||||
# MODE 1: copy static frontend from host, useful for CI to ignore building static content multiple times
|
||||
FROM alpine:3.19 as frontend-builder-1
|
||||
COPY client/dist /frontend/client/dist
|
||||
|
||||
# MODE 2: build static content in docker, can be used for a local development
|
||||
FROM node:18-bookworm as frontend-builder-2
|
||||
RUN npm install --global --force yarn@1.22.22
|
||||
ENV CYPRESS_INSTALL_BINARY=0
|
||||
ENV PUPPETEER_SKIP_CHROMIUM_DOWNLOAD=1
|
||||
|
||||
RUN useradd -m -d /frontend redash
|
||||
USER redash
|
||||
|
||||
WORKDIR /frontend
|
||||
COPY --chown=redash package.json yarn.lock .yarnrc /frontend/
|
||||
COPY --chown=redash viz-lib /frontend/viz-lib
|
||||
COPY --chown=redash scripts /frontend/scripts
|
||||
|
||||
# Controls whether to instrument code for coverage information
|
||||
ARG code_coverage
|
||||
ENV BABEL_ENV=${code_coverage:+test}
|
||||
|
||||
RUN if [ "x$skip_frontend_build" = "x" ] ; then yarn --frozen-lockfile --network-concurrency 1; fi
|
||||
|
||||
RUN yarn --frozen-lockfile --network-concurrency 1;
|
||||
COPY --chown=redash client /frontend/client
|
||||
COPY --chown=redash webpack.config.js /frontend/
|
||||
RUN if [ "x$skip_frontend_build" = "x" ] ; then yarn build; else mkdir -p /frontend/client/dist && touch /frontend/client/dist/multi_org.html && touch /frontend/client/dist/index.html; fi
|
||||
RUN yarn build
|
||||
|
||||
FROM frontend-builder-${FRONTEND_BUILD_MODE} as frontend-builder
|
||||
|
||||
FROM python:3.8-slim-bookworm
|
||||
|
||||
@@ -61,17 +66,18 @@ RUN apt-get update && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN \
|
||||
curl https://packages.microsoft.com/config/debian/12/prod.list > /etc/apt/sources.list.d/mssql-release.list && \
|
||||
curl https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor -o /usr/share/keyrings/microsoft-prod.gpg && \
|
||||
apt update && \
|
||||
ACCEPT_EULA=Y apt install -y --no-install-recommends msodbcsql18 && \
|
||||
apt clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
ARG databricks_odbc_driver_url=https://databricks-bi-artifacts.s3.us-east-2.amazonaws.com/simbaspark-drivers/odbc/2.6.26/SimbaSparkODBC-2.6.26.1045-Debian-64bit.zip
|
||||
RUN if [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
|
||||
curl https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor -o /usr/share/keyrings/microsoft-prod.gpg \
|
||||
&& curl https://packages.microsoft.com/config/debian/12/prod.list > /etc/apt/sources.list.d/mssql-release.list \
|
||||
&& apt-get update \
|
||||
&& ACCEPT_EULA=Y apt-get install -y --no-install-recommends msodbcsql17 \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& curl "$databricks_odbc_driver_url" --location --output /tmp/simba_odbc.zip \
|
||||
curl "$databricks_odbc_driver_url" --location --output /tmp/simba_odbc.zip \
|
||||
&& chmod 600 /tmp/simba_odbc.zip \
|
||||
&& unzip /tmp/simba_odbc.zip -d /tmp/simba \
|
||||
&& dpkg -i /tmp/simba/*.deb \
|
||||
@@ -91,8 +97,8 @@ COPY pyproject.toml poetry.lock ./
|
||||
ARG POETRY_OPTIONS="--no-root --no-interaction --no-ansi"
|
||||
# for LDAP authentication, install with `ldap3` group
|
||||
# disabled by default due to GPL license conflict
|
||||
ARG install_groups="main,all_ds,dev"
|
||||
RUN /etc/poetry/bin/poetry install --only $install_groups $POETRY_OPTIONS
|
||||
ARG INSTALL_GROUPS="main,all_ds,dev"
|
||||
RUN /etc/poetry/bin/poetry install --only $INSTALL_GROUPS $POETRY_OPTIONS
|
||||
|
||||
COPY --chown=redash . /app
|
||||
COPY --from=frontend-builder --chown=redash /frontend/client/dist /app/client/dist
|
||||
|
||||
@@ -5,7 +5,7 @@ WORKDIR $APP
|
||||
|
||||
COPY package.json yarn.lock .yarnrc $APP/
|
||||
COPY viz-lib $APP/viz-lib
|
||||
RUN npm install yarn@1.22.19 -g && yarn --frozen-lockfile --network-concurrency 1 > /dev/null
|
||||
RUN npm install yarn@1.22.22 -g && yarn --frozen-lockfile --network-concurrency 1 > /dev/null
|
||||
|
||||
COPY . $APP
|
||||
|
||||
40
Makefile
40
Makefile
@@ -1,10 +1,18 @@
|
||||
.PHONY: compose_build up test_db create_database clean down tests lint backend-unit-tests frontend-unit-tests test build watch start redis-cli bash
|
||||
.PHONY: compose_build up test_db create_database create_db clean clean-all down tests lint backend-unit-tests frontend-unit-tests pydeps test build watch start redis-cli bash
|
||||
|
||||
export COMPOSE_DOCKER_CLI_BUILD=1
|
||||
export DOCKER_BUILDKIT=1
|
||||
export COMPOSE_PROFILES=local
|
||||
|
||||
compose_build: .env
|
||||
COMPOSE_DOCKER_CLI_BUILD=1 DOCKER_BUILDKIT=1 docker compose build
|
||||
docker compose build
|
||||
|
||||
up:
|
||||
COMPOSE_DOCKER_CLI_BUILD=1 DOCKER_BUILDKIT=1 docker compose up -d --build
|
||||
docker compose up -d redis postgres
|
||||
docker compose exec -u postgres postgres psql postgres --csv \
|
||||
-1tqc "SELECT table_name FROM information_schema.tables WHERE table_name = 'organizations'" 2> /dev/null \
|
||||
| grep -q "organizations" || make create_database
|
||||
docker compose up -d --build
|
||||
|
||||
test_db:
|
||||
@for i in `seq 1 5`; do \
|
||||
@@ -13,11 +21,27 @@ test_db:
|
||||
done
|
||||
docker compose exec postgres sh -c 'psql -U postgres -c "drop database if exists tests;" && psql -U postgres -c "create database tests;"'
|
||||
|
||||
create_database: .env
|
||||
create_db: .env
|
||||
docker compose run server create_db
|
||||
|
||||
create_database: create_db
|
||||
|
||||
clean:
|
||||
docker compose down && docker compose rm
|
||||
docker compose down
|
||||
docker compose --project-name cypress down
|
||||
docker compose rm --stop --force
|
||||
docker compose --project-name cypress rm --stop --force
|
||||
docker image rm --force \
|
||||
cypress-server:latest cypress-worker:latest cypress-scheduler:latest \
|
||||
redash-server:latest redash-worker:latest redash-scheduler:latest
|
||||
docker container prune --force
|
||||
docker image prune --force
|
||||
docker volume prune --force
|
||||
|
||||
clean-all: clean
|
||||
docker image rm --force \
|
||||
redash/redash:10.1.0.b50633 redis:7-alpine maildev/maildev:latest \
|
||||
pgautoupgrade/pgautoupgrade:15-alpine3.8 pgautoupgrade/pgautoupgrade:latest
|
||||
|
||||
down:
|
||||
docker compose down
|
||||
@@ -30,6 +54,12 @@ env: .env
|
||||
format:
|
||||
pre-commit run --all-files
|
||||
|
||||
pydeps:
|
||||
pip3 install wheel
|
||||
pip3 install --upgrade black ruff launchpadlib pip setuptools
|
||||
pip3 install poetry
|
||||
poetry install --only main,all_ds,dev
|
||||
|
||||
tests:
|
||||
docker compose run server tests
|
||||
|
||||
|
||||
@@ -1,25 +1,48 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
if [ -z $REDASH_REDIS_URL ]; then
|
||||
export REDASH_REDIS_URL=redis://:${REDASH_REDIS_PASSWORD}@${REDASH_REDIS_HOSTNAME}:${REDASH_REDIS_PORT}/${REDASH_REDIS_NAME}
|
||||
fi
|
||||
|
||||
if [ -z $REDASH_DATABASE_URL ]; then
|
||||
export REDASH_DATABASE_URL=postgresql://${REDASH_DATABASE_USER}:${REDASH_DATABASE_PASSWORD}@${REDASH_DATABASE_HOSTNAME}:${REDASH_DATABASE_PORT}/${REDASH_DATABASE_NAME}
|
||||
fi
|
||||
|
||||
scheduler() {
|
||||
echo "Starting RQ scheduler..."
|
||||
|
||||
exec /app/manage.py rq scheduler
|
||||
}
|
||||
|
||||
dev_scheduler() {
|
||||
echo "Starting dev RQ scheduler..."
|
||||
|
||||
exec watchmedo auto-restart --directory=./redash/ --pattern=*.py --recursive -- ./manage.py rq scheduler
|
||||
case $REDASH_PRODUCTION in
|
||||
true)
|
||||
echo "Starting RQ scheduler in production mode"
|
||||
exec ./manage.py rq scheduler
|
||||
;;
|
||||
*)
|
||||
echo "Starting RQ scheduler in dev mode"
|
||||
exec watchmedo auto-restart \
|
||||
--directory=./redash/ \
|
||||
--pattern=*.py \
|
||||
--recursive -- ./manage.py rq scheduler $QUEUES
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
worker() {
|
||||
echo "Starting RQ worker..."
|
||||
|
||||
export WORKERS_COUNT=${WORKERS_COUNT:-2}
|
||||
export QUEUES=${QUEUES:-}
|
||||
|
||||
exec supervisord -c worker.conf
|
||||
case $REDASH_PRODUCTION in
|
||||
true)
|
||||
echo "Starting RQ worker in production mode"
|
||||
exec supervisord -c worker.conf
|
||||
;;
|
||||
*)
|
||||
echo "Starting RQ worker in dev mode"
|
||||
exec watchmedo auto-restart \
|
||||
--directory=./redash/ \
|
||||
--pattern=*.py \
|
||||
--recursive -- ./manage.py rq worker $QUEUES
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
workers_healthcheck() {
|
||||
@@ -35,22 +58,63 @@ workers_healthcheck() {
|
||||
fi
|
||||
}
|
||||
|
||||
dev_worker() {
|
||||
echo "Starting dev RQ worker..."
|
||||
|
||||
exec watchmedo auto-restart --directory=./redash/ --pattern=*.py --recursive -- ./manage.py rq worker $QUEUES
|
||||
}
|
||||
|
||||
server() {
|
||||
# Recycle gunicorn workers every n-th request. See http://docs.gunicorn.org/en/stable/settings.html#max-requests for more details.
|
||||
MAX_REQUESTS=${MAX_REQUESTS:-1000}
|
||||
MAX_REQUESTS_JITTER=${MAX_REQUESTS_JITTER:-100}
|
||||
TIMEOUT=${REDASH_GUNICORN_TIMEOUT:-60}
|
||||
exec /usr/local/bin/gunicorn -b 0.0.0.0:5000 --name redash -w${REDASH_WEB_WORKERS:-4} redash.wsgi:app --max-requests $MAX_REQUESTS --max-requests-jitter $MAX_REQUESTS_JITTER --timeout $TIMEOUT
|
||||
case $REDASH_PRODUCTION in
|
||||
true)
|
||||
echo "Starting Redash Server in production mode"
|
||||
MAX_REQUESTS=${MAX_REQUESTS:-1000}
|
||||
MAX_REQUESTS_JITTER=${MAX_REQUESTS_JITTER:-100}
|
||||
TIMEOUT=${REDASH_GUNICORN_TIMEOUT:-60}
|
||||
exec /usr/local/bin/gunicorn \
|
||||
-b 0.0.0.0:5000 \
|
||||
--name redash \
|
||||
-w${REDASH_WEB_WORKERS:-4} redash.wsgi:app \
|
||||
--max-requests $MAX_REQUESTS \
|
||||
--max-requests-jitter $MAX_REQUESTS_JITTER \
|
||||
--timeout $TIMEOUT
|
||||
;;
|
||||
*)
|
||||
echo "Starting Redash Server in a dev mode"
|
||||
export FLASK_DEBUG=1
|
||||
exec /app/manage.py runserver --debugger --reload -h 0.0.0.0
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
create_db() {
|
||||
exec /app/manage.py database create_tables
|
||||
REDASH_DATABASE_MIGRATE_TIMEOUT=${REDASH_DATABASE_UPGRADE_TIMEOUT:-600}
|
||||
REDASH_DATABASE_MIGRATE_MAX_ATTEMPTS=${REDASH_DATABASE_MIGRATE_MAX_ATTEMPTS:-5}
|
||||
REDASH_DATABASE_MIGRATE_RETRY_WAIT=${REDASH_DATABASE_MIGRATE_RETRY_WAIT:-10}
|
||||
ATTEMPTS=1
|
||||
while ((ATTEMPTS <= REDASH_DATABASE_MIGRATE_MAX_ATTEMPTS)); do
|
||||
echo "Creating or updating Redash database, attempt ${ATTEMPTS} of ${REDASH_DATABASE_MIGRATE_MAX_ATTEMPTS}"
|
||||
ATTEMPTS=$((ATTEMPTS+1))
|
||||
timeout $REDASH_DATABASE_MIGRATE_TIMEOUT /app/manage.py database create_tables
|
||||
timeout $REDASH_DATABASE_MIGRATE_TIMEOUT /app/manage.py db upgrade
|
||||
STATUS=$(timeout $REDASH_DATABASE_MIGRATE_TIMEOUT /app/manage.py status 2>&1)
|
||||
RETCODE=$?
|
||||
case "$RETCODE" in
|
||||
0)
|
||||
exit 0
|
||||
;;
|
||||
124)
|
||||
echo "Status command timed out after ${REDASH_DATABASE_MIGRATE_TIMEOUT} seconds."
|
||||
;;
|
||||
esac
|
||||
case "$STATUS" in
|
||||
*sqlalchemy.exc.OperationalError*)
|
||||
echo "Database not yet functional, waiting."
|
||||
;;
|
||||
*sqlalchemy.exc.ProgrammingError*)
|
||||
echo "Database does not appear to be installed."
|
||||
;;
|
||||
esac
|
||||
echo "Waiting ${REDASH_DATABASE_MIGRATE_RETRY_WAIT} seconds before retrying."
|
||||
sleep ${REDASH_DATABASE_MIGRATE_RETRY_WAIT}
|
||||
done
|
||||
echo "Reached ${REDASH_DATABASE_MIGRATE_MAX_ATTEMPTS} attempts, giving up."
|
||||
exit 1
|
||||
}
|
||||
|
||||
help() {
|
||||
@@ -61,21 +125,16 @@ help() {
|
||||
|
||||
echo "server -- start Redash server (with gunicorn)"
|
||||
echo "worker -- start a single RQ worker"
|
||||
echo "dev_worker -- start a single RQ worker with code reloading"
|
||||
echo "scheduler -- start an rq-scheduler instance"
|
||||
echo "dev_scheduler -- start an rq-scheduler instance with code reloading"
|
||||
echo ""
|
||||
echo "shell -- open shell"
|
||||
echo "dev_server -- start Flask development server with debugger and auto reload"
|
||||
echo "debug -- start Flask development server with remote debugger via ptvsd"
|
||||
echo "create_db -- create database tables"
|
||||
echo "create_db -- create database tables and run migrations"
|
||||
echo "manage -- CLI to manage redash"
|
||||
echo "tests -- run tests"
|
||||
}
|
||||
|
||||
tests() {
|
||||
export REDASH_DATABASE_URL="postgresql://postgres@postgres/tests"
|
||||
|
||||
if [ $# -eq 0 ]; then
|
||||
TEST_ARGS=tests/
|
||||
else
|
||||
@@ -101,22 +160,10 @@ case "$1" in
|
||||
shift
|
||||
scheduler
|
||||
;;
|
||||
dev_scheduler)
|
||||
shift
|
||||
dev_scheduler
|
||||
;;
|
||||
dev_worker)
|
||||
shift
|
||||
dev_worker
|
||||
;;
|
||||
celery_healthcheck)
|
||||
shift
|
||||
echo "DEPRECATED: Celery has been replaced with RQ and now performs healthchecks autonomously as part of the 'worker' entrypoint."
|
||||
;;
|
||||
dev_server)
|
||||
export FLASK_DEBUG=1
|
||||
exec /app/manage.py runserver --debugger --reload -h 0.0.0.0
|
||||
;;
|
||||
debug)
|
||||
export FLASK_DEBUG=1
|
||||
export REMOTE_DEBUG=1
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 2.4 KiB |
@@ -1,6 +1,5 @@
|
||||
import React from "react";
|
||||
import Link from "@/components/Link";
|
||||
import { clientConfig, currentUser } from "@/services/auth";
|
||||
import { clientConfig } from "@/services/auth";
|
||||
import frontendVersion from "@/version.json";
|
||||
|
||||
export default function VersionInfo() {
|
||||
@@ -10,15 +9,6 @@ export default function VersionInfo() {
|
||||
Version: {clientConfig.version}
|
||||
{frontendVersion !== clientConfig.version && ` (${frontendVersion.substring(0, 8)})`}
|
||||
</div>
|
||||
{clientConfig.newVersionAvailable && currentUser.hasPermission("super_admin") && (
|
||||
<div className="m-t-10">
|
||||
{/* eslint-disable react/jsx-no-target-blank */}
|
||||
<Link href="https://version.redash.io/" className="update-available" target="_blank" rel="noopener">
|
||||
Update Available <i className="fa fa-external-link m-l-5" aria-hidden="true" />
|
||||
<span className="sr-only">(opens in a new tab)</span>
|
||||
</Link>
|
||||
</div>
|
||||
)}
|
||||
</React.Fragment>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,79 +0,0 @@
|
||||
import React, { useState } from "react";
|
||||
import Card from "antd/lib/card";
|
||||
import Button from "antd/lib/button";
|
||||
import Typography from "antd/lib/typography";
|
||||
import { clientConfig } from "@/services/auth";
|
||||
import Link from "@/components/Link";
|
||||
import HelpTrigger from "@/components/HelpTrigger";
|
||||
import DynamicComponent from "@/components/DynamicComponent";
|
||||
import OrgSettings from "@/services/organizationSettings";
|
||||
|
||||
const Text = Typography.Text;
|
||||
|
||||
function BeaconConsent() {
|
||||
const [hide, setHide] = useState(false);
|
||||
|
||||
if (!clientConfig.showBeaconConsentMessage || hide) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const hideConsentCard = () => {
|
||||
clientConfig.showBeaconConsentMessage = false;
|
||||
setHide(true);
|
||||
};
|
||||
|
||||
const confirmConsent = confirm => {
|
||||
let message = "🙏 Thank you.";
|
||||
|
||||
if (!confirm) {
|
||||
message = "Settings Saved.";
|
||||
}
|
||||
|
||||
OrgSettings.save({ beacon_consent: confirm }, message)
|
||||
// .then(() => {
|
||||
// // const settings = get(response, 'settings');
|
||||
// // this.setState({ settings, formValues: { ...settings } });
|
||||
// })
|
||||
.finally(hideConsentCard);
|
||||
};
|
||||
|
||||
return (
|
||||
<DynamicComponent name="BeaconConsent">
|
||||
<div className="m-t-10 tiled">
|
||||
<Card
|
||||
title={
|
||||
<>
|
||||
Would you be ok with sharing anonymous usage data with the Redash team?{" "}
|
||||
<HelpTrigger type="USAGE_DATA_SHARING" />
|
||||
</>
|
||||
}
|
||||
bordered={false}>
|
||||
<Text>Help Redash improve by automatically sending anonymous usage data:</Text>
|
||||
<div className="m-t-5">
|
||||
<ul>
|
||||
<li> Number of users, queries, dashboards, alerts, widgets and visualizations.</li>
|
||||
<li> Types of data sources, alert destinations and visualizations.</li>
|
||||
</ul>
|
||||
</div>
|
||||
<Text>All data is aggregated and will never include any sensitive or private data.</Text>
|
||||
<div className="m-t-5">
|
||||
<Button type="primary" className="m-r-5" onClick={() => confirmConsent(true)}>
|
||||
Yes
|
||||
</Button>
|
||||
<Button type="default" onClick={() => confirmConsent(false)}>
|
||||
No
|
||||
</Button>
|
||||
</div>
|
||||
<div className="m-t-15">
|
||||
<Text type="secondary">
|
||||
You can change this setting anytime from the{" "}
|
||||
<Link href="settings/organization">Organization Settings</Link> page.
|
||||
</Text>
|
||||
</div>
|
||||
</Card>
|
||||
</div>
|
||||
</DynamicComponent>
|
||||
);
|
||||
}
|
||||
|
||||
export default BeaconConsent;
|
||||
@@ -23,7 +23,6 @@ export const TYPES = mapValues(
|
||||
VALUE_SOURCE_OPTIONS: ["/user-guide/querying/query-parameters#Value-Source-Options", "Guide: Value Source Options"],
|
||||
SHARE_DASHBOARD: ["/user-guide/dashboards/sharing-dashboards", "Guide: Sharing and Embedding Dashboards"],
|
||||
AUTHENTICATION_OPTIONS: ["/user-guide/users/authentication-options", "Guide: Authentication Options"],
|
||||
USAGE_DATA_SHARING: ["/open-source/admin-guide/usage-data", "Help: Anonymous Usage Data Sharing"],
|
||||
DS_ATHENA: ["/data-sources/amazon-athena-setup", "Guide: Help Setting up Amazon Athena"],
|
||||
DS_BIGQUERY: ["/data-sources/bigquery-setup", "Guide: Help Setting up BigQuery"],
|
||||
DS_URL: ["/data-sources/querying-urls", "Guide: Help Setting up URL"],
|
||||
|
||||
@@ -19,6 +19,7 @@ import PlainButton from "@/components/PlainButton";
|
||||
import ExpandedWidgetDialog from "@/components/dashboards/ExpandedWidgetDialog";
|
||||
import EditParameterMappingsDialog from "@/components/dashboards/EditParameterMappingsDialog";
|
||||
import VisualizationRenderer from "@/components/visualizations/VisualizationRenderer";
|
||||
import { ExecutionStatus } from "@/services/query-result";
|
||||
|
||||
import Widget from "./Widget";
|
||||
|
||||
@@ -278,7 +279,7 @@ class VisualizationWidget extends React.Component {
|
||||
const widgetQueryResult = widget.getQueryResult();
|
||||
const widgetStatus = widgetQueryResult && widgetQueryResult.getStatus();
|
||||
switch (widgetStatus) {
|
||||
case "failed":
|
||||
case ExecutionStatus.FAILED:
|
||||
return (
|
||||
<div className="body-row-auto scrollbox">
|
||||
{widgetQueryResult.getError() && (
|
||||
@@ -288,7 +289,7 @@ class VisualizationWidget extends React.Component {
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
case "done":
|
||||
case ExecutionStatus.FINISHED:
|
||||
return (
|
||||
<div className="body-row-auto scrollbox">
|
||||
<VisualizationRenderer
|
||||
|
||||
@@ -16,6 +16,7 @@ import LoadingState from "../items-list/components/LoadingState";
|
||||
const SchemaItemColumnType = PropTypes.shape({
|
||||
name: PropTypes.string.isRequired,
|
||||
type: PropTypes.string,
|
||||
comment: PropTypes.string,
|
||||
});
|
||||
|
||||
export const SchemaItemType = PropTypes.shape({
|
||||
@@ -47,13 +48,30 @@ function SchemaItem({ item, expanded, onToggle, onSelect, ...props }) {
|
||||
return (
|
||||
<div {...props}>
|
||||
<div className="schema-list-item">
|
||||
<PlainButton className="table-name" onClick={onToggle}>
|
||||
<i className="fa fa-table m-r-5" aria-hidden="true" />
|
||||
<strong>
|
||||
<span title={item.name}>{tableDisplayName}</span>
|
||||
{!isNil(item.size) && <span> ({item.size})</span>}
|
||||
</strong>
|
||||
</PlainButton>
|
||||
{item.description ? (
|
||||
<Tooltip
|
||||
title={item.description}
|
||||
mouseEnterDelay={0}
|
||||
mouseLeaveDelay={0}
|
||||
placement="right"
|
||||
arrowPointAtCenter>
|
||||
<PlainButton className="table-name" onClick={onToggle}>
|
||||
<i className="fa fa-table m-r-5" aria-hidden="true" />
|
||||
<strong>
|
||||
<span title={item.name}>{tableDisplayName}</span>
|
||||
{!isNil(item.size) && <span> ({item.size})</span>}
|
||||
</strong>
|
||||
</PlainButton>
|
||||
</Tooltip>
|
||||
) : (
|
||||
<PlainButton className="table-name" onClick={onToggle}>
|
||||
<i className="fa fa-table m-r-5" aria-hidden="true" />
|
||||
<strong>
|
||||
<span title={item.name}>{tableDisplayName}</span>
|
||||
{!isNil(item.size) && <span> ({item.size})</span>}
|
||||
</strong>
|
||||
</PlainButton>
|
||||
)}
|
||||
<Tooltip
|
||||
title="Insert table name into query text"
|
||||
mouseEnterDelay={0}
|
||||
@@ -73,22 +91,34 @@ function SchemaItem({ item, expanded, onToggle, onSelect, ...props }) {
|
||||
map(item.columns, column => {
|
||||
const columnName = get(column, "name");
|
||||
const columnType = get(column, "type");
|
||||
return (
|
||||
<Tooltip
|
||||
title="Insert column name into query text"
|
||||
mouseEnterDelay={0}
|
||||
mouseLeaveDelay={0}
|
||||
placement="rightTop">
|
||||
<PlainButton key={columnName} className="table-open-item" onClick={e => handleSelect(e, columnName)}>
|
||||
<div>
|
||||
{columnName} {columnType && <span className="column-type">{columnType}</span>}
|
||||
</div>
|
||||
const columnComment = get(column, "comment");
|
||||
if (columnComment) {
|
||||
return (
|
||||
<Tooltip title={columnComment} mouseEnterDelay={0} mouseLeaveDelay={0} placement="rightTop">
|
||||
<PlainButton
|
||||
key={columnName}
|
||||
className="table-open-item"
|
||||
onClick={e => handleSelect(e, columnName)}>
|
||||
<div>
|
||||
{columnName} {columnType && <span className="column-type">{columnType}</span>}
|
||||
</div>
|
||||
|
||||
<div className="copy-to-editor">
|
||||
<i className="fa fa-angle-double-right" aria-hidden="true" />
|
||||
</div>
|
||||
</PlainButton>
|
||||
</Tooltip>
|
||||
<div className="copy-to-editor">
|
||||
<i className="fa fa-angle-double-right" aria-hidden="true" />
|
||||
</div>
|
||||
</PlainButton>
|
||||
</Tooltip>
|
||||
);
|
||||
}
|
||||
return (
|
||||
<PlainButton key={columnName} className="table-open-item" onClick={e => handleSelect(e, columnName)}>
|
||||
<div>
|
||||
{columnName} {columnType && <span className="column-type">{columnType}</span>}
|
||||
</div>
|
||||
<div className="copy-to-editor">
|
||||
<i className="fa fa-angle-double-right" aria-hidden="true" />
|
||||
</div>
|
||||
</PlainButton>
|
||||
);
|
||||
})
|
||||
)}
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
<meta charset="UTF-8" />
|
||||
<base href="{{base_href}}" />
|
||||
<title><%= htmlWebpackPlugin.options.title %></title>
|
||||
<script src="/static/unsupportedRedirect.js" async></script>
|
||||
<script src="<%= htmlWebpackPlugin.options.staticPath %>unsupportedRedirect.js" async></script>
|
||||
|
||||
<link rel="icon" type="image/png" sizes="32x32" href="/static/images/favicon-32x32.png" />
|
||||
<link rel="icon" type="image/png" sizes="96x96" href="/static/images/favicon-96x96.png" />
|
||||
|
||||
@@ -6,7 +6,6 @@ import Link from "@/components/Link";
|
||||
import routeWithUserSession from "@/components/ApplicationArea/routeWithUserSession";
|
||||
import EmptyState, { EmptyStateHelpMessage } from "@/components/empty-state/EmptyState";
|
||||
import DynamicComponent from "@/components/DynamicComponent";
|
||||
import BeaconConsent from "@/components/BeaconConsent";
|
||||
import PlainButton from "@/components/PlainButton";
|
||||
|
||||
import { axios } from "@/services/axios";
|
||||
@@ -89,7 +88,6 @@ export default function Home() {
|
||||
</DynamicComponent>
|
||||
<DynamicComponent name="HomeExtra" />
|
||||
<DashboardAndQueryFavoritesList />
|
||||
<BeaconConsent />
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
|
||||
@@ -380,7 +380,9 @@ function QuerySource(props) {
|
||||
<QueryVisualizationTabs
|
||||
queryResult={queryResult}
|
||||
visualizations={query.visualizations}
|
||||
showNewVisualizationButton={queryFlags.canEdit && queryResultData.status === ExecutionStatus.DONE}
|
||||
showNewVisualizationButton={
|
||||
queryFlags.canEdit && queryResultData.status === ExecutionStatus.FINISHED
|
||||
}
|
||||
canDeleteVisualizations={queryFlags.canEdit}
|
||||
selectedTab={selectedVisualization}
|
||||
onChangeTab={setSelectedVisualization}
|
||||
|
||||
@@ -165,7 +165,7 @@ function QueryView(props) {
|
||||
<QueryVisualizationTabs
|
||||
queryResult={queryResult}
|
||||
visualizations={query.visualizations}
|
||||
showNewVisualizationButton={queryFlags.canEdit && queryResultData.status === ExecutionStatus.DONE}
|
||||
showNewVisualizationButton={queryFlags.canEdit && queryResultData.status === ExecutionStatus.FINISHED}
|
||||
canDeleteVisualizations={queryFlags.canEdit}
|
||||
selectedTab={selectedVisualization}
|
||||
onChangeTab={setSelectedVisualization}
|
||||
|
||||
@@ -1,37 +1,45 @@
|
||||
import { includes } from "lodash";
|
||||
import React from "react";
|
||||
import PropTypes from "prop-types";
|
||||
import Alert from "antd/lib/alert";
|
||||
import Button from "antd/lib/button";
|
||||
import Timer from "@/components/Timer";
|
||||
import { ExecutionStatus } from "@/services/query-result";
|
||||
|
||||
export default function QueryExecutionStatus({ status, updatedAt, error, isCancelling, onCancel }) {
|
||||
const alertType = status === "failed" ? "error" : "info";
|
||||
const showTimer = status !== "failed" && updatedAt;
|
||||
const isCancelButtonAvailable = includes(["waiting", "processing"], status);
|
||||
const alertType = status === ExecutionStatus.FAILED ? "error" : "info";
|
||||
const showTimer = status !== ExecutionStatus.FAILED && updatedAt;
|
||||
const isCancelButtonAvailable = [
|
||||
ExecutionStatus.SCHEDULED,
|
||||
ExecutionStatus.QUEUED,
|
||||
ExecutionStatus.STARTED,
|
||||
ExecutionStatus.DEFERRED,
|
||||
].includes(status);
|
||||
let message = isCancelling ? <React.Fragment>Cancelling…</React.Fragment> : null;
|
||||
|
||||
switch (status) {
|
||||
case "waiting":
|
||||
case ExecutionStatus.QUEUED:
|
||||
if (!isCancelling) {
|
||||
message = <React.Fragment>Query in queue…</React.Fragment>;
|
||||
}
|
||||
break;
|
||||
case "processing":
|
||||
case ExecutionStatus.STARTED:
|
||||
if (!isCancelling) {
|
||||
message = <React.Fragment>Executing query…</React.Fragment>;
|
||||
}
|
||||
break;
|
||||
case "loading-result":
|
||||
case ExecutionStatus.LOADING_RESULT:
|
||||
message = <React.Fragment>Loading results…</React.Fragment>;
|
||||
break;
|
||||
case "failed":
|
||||
case ExecutionStatus.FAILED:
|
||||
message = (
|
||||
<React.Fragment>
|
||||
Error running query: <strong>{error}</strong>
|
||||
</React.Fragment>
|
||||
);
|
||||
break;
|
||||
case ExecutionStatus.CANCELED:
|
||||
message = <React.Fragment>Query was canceled</React.Fragment>;
|
||||
break;
|
||||
// no default
|
||||
}
|
||||
|
||||
@@ -66,7 +74,7 @@ QueryExecutionStatus.propTypes = {
|
||||
};
|
||||
|
||||
QueryExecutionStatus.defaultProps = {
|
||||
status: "waiting",
|
||||
status: ExecutionStatus.QUEUED,
|
||||
updatedAt: null,
|
||||
error: null,
|
||||
isCancelling: true,
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
import React from "react";
|
||||
import Form from "antd/lib/form";
|
||||
import Checkbox from "antd/lib/checkbox";
|
||||
import Skeleton from "antd/lib/skeleton";
|
||||
import HelpTrigger from "@/components/HelpTrigger";
|
||||
import DynamicComponent from "@/components/DynamicComponent";
|
||||
import { SettingsEditorPropTypes, SettingsEditorDefaultProps } from "../prop-types";
|
||||
|
||||
export default function BeaconConsentSettings(props) {
|
||||
const { values, onChange, loading } = props;
|
||||
|
||||
return (
|
||||
<DynamicComponent name="OrganizationSettings.BeaconConsentSettings" {...props}>
|
||||
<Form.Item
|
||||
label={
|
||||
<span>
|
||||
Anonymous Usage Data Sharing
|
||||
<HelpTrigger className="m-l-5 m-r-5" type="USAGE_DATA_SHARING" />
|
||||
</span>
|
||||
}>
|
||||
{loading ? (
|
||||
<Skeleton title={{ width: 300 }} paragraph={false} active />
|
||||
) : (
|
||||
<Checkbox
|
||||
name="beacon_consent"
|
||||
checked={values.beacon_consent}
|
||||
onChange={e => onChange({ beacon_consent: e.target.checked })}>
|
||||
Help Redash improve by automatically sending anonymous usage data
|
||||
</Checkbox>
|
||||
)}
|
||||
</Form.Item>
|
||||
</DynamicComponent>
|
||||
);
|
||||
}
|
||||
|
||||
BeaconConsentSettings.propTypes = SettingsEditorPropTypes;
|
||||
|
||||
BeaconConsentSettings.defaultProps = SettingsEditorDefaultProps;
|
||||
@@ -4,7 +4,6 @@ import DynamicComponent from "@/components/DynamicComponent";
|
||||
import FormatSettings from "./FormatSettings";
|
||||
import PlotlySettings from "./PlotlySettings";
|
||||
import FeatureFlagsSettings from "./FeatureFlagsSettings";
|
||||
import BeaconConsentSettings from "./BeaconConsentSettings";
|
||||
|
||||
export default function GeneralSettings(props) {
|
||||
return (
|
||||
@@ -14,7 +13,6 @@ export default function GeneralSettings(props) {
|
||||
<FormatSettings {...props} />
|
||||
<PlotlySettings {...props} />
|
||||
<FeatureFlagsSettings {...props} />
|
||||
<BeaconConsentSettings {...props} />
|
||||
</DynamicComponent>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -50,18 +50,15 @@ const QueryResultResource = {
|
||||
};
|
||||
|
||||
export const ExecutionStatus = {
|
||||
WAITING: "waiting",
|
||||
PROCESSING: "processing",
|
||||
DONE: "done",
|
||||
QUEUED: "queued",
|
||||
STARTED: "started",
|
||||
FINISHED: "finished",
|
||||
FAILED: "failed",
|
||||
LOADING_RESULT: "loading-result",
|
||||
};
|
||||
|
||||
const statuses = {
|
||||
1: ExecutionStatus.WAITING,
|
||||
2: ExecutionStatus.PROCESSING,
|
||||
3: ExecutionStatus.DONE,
|
||||
4: ExecutionStatus.FAILED,
|
||||
CANCELED: "canceled",
|
||||
DEFERRED: "deferred",
|
||||
SCHEDULED: "scheduled",
|
||||
STOPPED: "stopped",
|
||||
};
|
||||
|
||||
function handleErrorResponse(queryResult, error) {
|
||||
@@ -80,7 +77,7 @@ function handleErrorResponse(queryResult, error) {
|
||||
queryResult.update({
|
||||
job: {
|
||||
error: "cached query result unavailable, please execute again.",
|
||||
status: 4,
|
||||
status: ExecutionStatus.FAILED,
|
||||
},
|
||||
});
|
||||
return;
|
||||
@@ -91,7 +88,7 @@ function handleErrorResponse(queryResult, error) {
|
||||
queryResult.update({
|
||||
job: {
|
||||
error: get(error, "response.data.message", "Unknown error occurred. Please try again later."),
|
||||
status: 4,
|
||||
status: ExecutionStatus.FAILED,
|
||||
},
|
||||
});
|
||||
}
|
||||
@@ -102,11 +99,19 @@ function sleep(ms) {
|
||||
|
||||
export function fetchDataFromJob(jobId, interval = 1000) {
|
||||
return axios.get(`api/jobs/${jobId}`).then(data => {
|
||||
const status = statuses[data.job.status];
|
||||
if (status === ExecutionStatus.WAITING || status === ExecutionStatus.PROCESSING) {
|
||||
const status = data.job.status;
|
||||
if (
|
||||
[ExecutionStatus.QUEUED, ExecutionStatus.STARTED, ExecutionStatus.SCHEDULED, ExecutionStatus.DEFERRED].includes(
|
||||
status
|
||||
)
|
||||
) {
|
||||
return sleep(interval).then(() => fetchDataFromJob(data.job.id));
|
||||
} else if (status === ExecutionStatus.DONE) {
|
||||
return data.job.result;
|
||||
} else if (status === ExecutionStatus.FINISHED) {
|
||||
return data.job.result_id;
|
||||
} else if (status === ExecutionStatus.CANCELED) {
|
||||
return Promise.reject("Job was canceled");
|
||||
} else if (status === ExecutionStatus.STOPPED) {
|
||||
return Promise.reject("Job was stopped");
|
||||
} else if (status === ExecutionStatus.FAILED) {
|
||||
return Promise.reject(data.job.error);
|
||||
}
|
||||
@@ -122,7 +127,7 @@ class QueryResult {
|
||||
this.deferred = defer();
|
||||
this.job = {};
|
||||
this.query_result = {};
|
||||
this.status = "waiting";
|
||||
this.status = ExecutionStatus.QUEUED;
|
||||
|
||||
this.updatedAt = moment();
|
||||
|
||||
@@ -138,8 +143,8 @@ class QueryResult {
|
||||
extend(this, props);
|
||||
|
||||
if ("query_result" in props) {
|
||||
this.status = ExecutionStatus.DONE;
|
||||
this.deferred.onStatusChange(ExecutionStatus.DONE);
|
||||
this.status = ExecutionStatus.FINISHED;
|
||||
this.deferred.onStatusChange(ExecutionStatus.FINISHED);
|
||||
|
||||
const columnTypes = {};
|
||||
|
||||
@@ -183,11 +188,10 @@ class QueryResult {
|
||||
});
|
||||
|
||||
this.deferred.resolve(this);
|
||||
} else if (this.job.status === 3 || this.job.status === 2) {
|
||||
this.deferred.onStatusChange(ExecutionStatus.PROCESSING);
|
||||
this.status = "processing";
|
||||
} else if (this.job.status === 4) {
|
||||
this.status = statuses[this.job.status];
|
||||
} else if (this.job.status === ExecutionStatus.STARTED || this.job.status === ExecutionStatus.FINISHED) {
|
||||
this.status = ExecutionStatus.STARTED;
|
||||
} else if (this.job.status === ExecutionStatus.FAILED) {
|
||||
this.status = this.job.status;
|
||||
this.deferred.reject(new QueryResultError(this.job.error));
|
||||
} else {
|
||||
this.deferred.onStatusChange(undefined);
|
||||
@@ -211,7 +215,7 @@ class QueryResult {
|
||||
if (this.isLoadingResult) {
|
||||
return ExecutionStatus.LOADING_RESULT;
|
||||
}
|
||||
return this.status || statuses[this.job.status];
|
||||
return this.status || this.job.status;
|
||||
}
|
||||
|
||||
getError() {
|
||||
@@ -374,7 +378,7 @@ class QueryResult {
|
||||
this.isLoadingResult = true;
|
||||
this.deferred.onStatusChange(ExecutionStatus.LOADING_RESULT);
|
||||
|
||||
QueryResultResource.get({ id: this.job.query_result_id })
|
||||
QueryResultResource.get({ id: this.job.result_id })
|
||||
.then(response => {
|
||||
this.update(response);
|
||||
this.isLoadingResult = false;
|
||||
@@ -389,7 +393,7 @@ class QueryResult {
|
||||
this.update({
|
||||
job: {
|
||||
error: "failed communicating with server. Please check your Internet connection and try again.",
|
||||
status: 4,
|
||||
status: ExecutionStatus.FAILED,
|
||||
},
|
||||
});
|
||||
this.isLoadingResult = false;
|
||||
@@ -413,9 +417,9 @@ class QueryResult {
|
||||
.then(jobResponse => {
|
||||
this.update(jobResponse);
|
||||
|
||||
if (this.getStatus() === "processing" && this.job.query_result_id && this.job.query_result_id !== "None") {
|
||||
if (this.getStatus() === ExecutionStatus.STARTED && this.job.result_id && this.job.result_id !== "None") {
|
||||
loadResult();
|
||||
} else if (this.getStatus() !== "failed") {
|
||||
} else if (this.getStatus() !== ExecutionStatus.FAILED) {
|
||||
const waitTime = tryNumber > 10 ? 3000 : 500;
|
||||
setTimeout(() => {
|
||||
this.refreshStatus(query, parameters, tryNumber + 1);
|
||||
@@ -428,7 +432,7 @@ class QueryResult {
|
||||
this.update({
|
||||
job: {
|
||||
error: "failed communicating with server. Please check your Internet connection and try again.",
|
||||
status: 4,
|
||||
status: ExecutionStatus.FAILED,
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
@@ -2,6 +2,7 @@ import moment from "moment";
|
||||
import debug from "debug";
|
||||
import Mustache from "mustache";
|
||||
import { axios } from "@/services/axios";
|
||||
import { ExecutionStatus } from "@/services/query-result";
|
||||
import {
|
||||
zipObject,
|
||||
isEmpty,
|
||||
@@ -103,7 +104,7 @@ export class Query {
|
||||
return new QueryResult({
|
||||
job: {
|
||||
error: `missing ${valuesWord} for ${missingParams.join(", ")} ${paramsWord}.`,
|
||||
status: 4,
|
||||
status: ExecutionStatus.FAILED,
|
||||
},
|
||||
});
|
||||
}
|
||||
@@ -360,7 +361,7 @@ export class QueryResultError {
|
||||
|
||||
// eslint-disable-next-line class-methods-use-this
|
||||
getStatus() {
|
||||
return "failed";
|
||||
return ExecutionStatus.FAILED;
|
||||
}
|
||||
|
||||
// eslint-disable-next-line class-methods-use-this
|
||||
|
||||
@@ -43,18 +43,18 @@ function seedDatabase(seedValues) {
|
||||
|
||||
function buildServer() {
|
||||
console.log("Building the server...");
|
||||
execSync("docker compose -p cypress build", { stdio: "inherit" });
|
||||
execSync("docker compose build", { stdio: "inherit" });
|
||||
}
|
||||
|
||||
function startServer() {
|
||||
console.log("Starting the server...");
|
||||
execSync("docker compose -p cypress up -d", { stdio: "inherit" });
|
||||
execSync("docker compose -p cypress run server create_db", { stdio: "inherit" });
|
||||
execSync("docker compose up -d", { stdio: "inherit" });
|
||||
execSync("docker compose run server create_db", { stdio: "inherit" });
|
||||
}
|
||||
|
||||
function stopServer() {
|
||||
console.log("Stopping the server...");
|
||||
execSync("docker compose -p cypress down", { stdio: "inherit" });
|
||||
execSync("docker compose down", { stdio: "inherit" });
|
||||
}
|
||||
|
||||
function runCypressCI() {
|
||||
@@ -68,7 +68,7 @@ function runCypressCI() {
|
||||
}
|
||||
|
||||
execSync(
|
||||
"COMMIT_INFO_MESSAGE=$(git show -s --format=%s) docker compose run --name cypress cypress ./node_modules/.bin/percy exec -t 300 -- ./node_modules/.bin/cypress run $CYPRESS_OPTIONS",
|
||||
"docker compose run --name cypress cypress ./node_modules/.bin/percy exec -t 300 -- ./node_modules/.bin/cypress run $CYPRESS_OPTIONS",
|
||||
{ stdio: "inherit" }
|
||||
);
|
||||
}
|
||||
|
||||
@@ -53,11 +53,12 @@ describe("Dashboard Sharing", () => {
|
||||
};
|
||||
|
||||
const dashboardUrl = this.dashboardUrl;
|
||||
cy.createQuery({ options }).then(({ id: queryId }) => {
|
||||
cy.createQuery({ options }).then(({ id: queryId, name: queryName }) => {
|
||||
cy.visit(dashboardUrl);
|
||||
editDashboard();
|
||||
cy.getByTestId("AddWidgetButton").click();
|
||||
cy.getByTestId("AddWidgetDialog").within(() => {
|
||||
cy.get("input").type(queryName);
|
||||
cy.get(`.query-selector-result[data-test="QueryId${queryId}"]`).click();
|
||||
});
|
||||
cy.contains("button", "Add to Dashboard").click();
|
||||
@@ -178,11 +179,12 @@ describe("Dashboard Sharing", () => {
|
||||
};
|
||||
|
||||
const dashboardUrl = this.dashboardUrl;
|
||||
cy.createQuery({ options }).then(({ id: queryId }) => {
|
||||
cy.createQuery({ options }).then(({ id: queryId, name: queryName }) => {
|
||||
cy.visit(dashboardUrl);
|
||||
editDashboard();
|
||||
cy.getByTestId("AddWidgetButton").click();
|
||||
cy.getByTestId("AddWidgetDialog").within(() => {
|
||||
cy.get("input").type(queryName);
|
||||
cy.get(`.query-selector-result[data-test="QueryId${queryId}"]`).click();
|
||||
});
|
||||
cy.contains("button", "Add to Dashboard").click();
|
||||
|
||||
@@ -18,11 +18,12 @@ describe("Widget", () => {
|
||||
};
|
||||
|
||||
it("adds widget", function() {
|
||||
cy.createQuery().then(({ id: queryId }) => {
|
||||
cy.createQuery().then(({ id: queryId, name: queryName }) => {
|
||||
cy.visit(this.dashboardUrl);
|
||||
editDashboard();
|
||||
cy.getByTestId("AddWidgetButton").click();
|
||||
cy.getByTestId("AddWidgetDialog").within(() => {
|
||||
cy.get("input").type(queryName);
|
||||
cy.get(`.query-selector-result[data-test="QueryId${queryId}"]`).click();
|
||||
});
|
||||
cy.contains("button", "Add to Dashboard").click();
|
||||
|
||||
24
compose.base.yaml
Normal file
24
compose.base.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
services:
|
||||
.redash:
|
||||
build:
|
||||
context: .
|
||||
args:
|
||||
FRONTEND_BUILD_MODE: ${FRONTEND_BUILD_MODE:-2}
|
||||
INSTALL_GROUPS: ${INSTALL_GROUPS:-main,all_ds,dev}
|
||||
volumes:
|
||||
- $PWD:${SERVER_MOUNT:-/ignore}
|
||||
command: manage version
|
||||
environment:
|
||||
REDASH_LOG_LEVEL: INFO
|
||||
REDASH_REDIS_URL: redis://redis:6379/0
|
||||
REDASH_DATABASE_URL: postgresql://postgres@postgres/postgres
|
||||
REDASH_RATELIMIT_ENABLED: false
|
||||
REDASH_MAIL_DEFAULT_SENDER: redash@example.com
|
||||
REDASH_MAIL_SERVER: email
|
||||
REDASH_MAIL_PORT: 1025
|
||||
REDASH_ENFORCE_CSRF: true
|
||||
REDASH_COOKIE_SECRET: ${REDASH_COOKIE_SECRET}
|
||||
REDASH_SECRET_KEY: ${REDASH_SECRET_KEY}
|
||||
REDASH_PRODUCTION: ${REDASH_PRODUCTION:-true}
|
||||
env_file:
|
||||
- .env
|
||||
85
compose.yaml
85
compose.yaml
@@ -1,70 +1,81 @@
|
||||
# This configuration file is for the **development** setup.
|
||||
# For a production example please refer to getredash/setup repository on GitHub.
|
||||
x-redash-service: &redash-service
|
||||
build:
|
||||
context: .
|
||||
args:
|
||||
skip_frontend_build: "true" # set to empty string to build
|
||||
volumes:
|
||||
- .:/app
|
||||
env_file:
|
||||
- .env
|
||||
x-redash-environment: &redash-environment
|
||||
REDASH_LOG_LEVEL: "INFO"
|
||||
REDASH_REDIS_URL: "redis://redis:6379/0"
|
||||
REDASH_DATABASE_URL: "postgresql://postgres@postgres/postgres"
|
||||
REDASH_RATELIMIT_ENABLED: "false"
|
||||
REDASH_MAIL_DEFAULT_SENDER: "redash@example.com"
|
||||
REDASH_MAIL_SERVER: "email"
|
||||
REDASH_MAIL_PORT: 1025
|
||||
REDASH_ENFORCE_CSRF: "true"
|
||||
REDASH_GUNICORN_TIMEOUT: 60
|
||||
# Set secret keys in the .env file
|
||||
services:
|
||||
server:
|
||||
<<: *redash-service
|
||||
command: dev_server
|
||||
extends:
|
||||
file: compose.base.yaml
|
||||
service: .redash
|
||||
command: server
|
||||
depends_on:
|
||||
- postgres
|
||||
- redis
|
||||
ports:
|
||||
- "5001:5000"
|
||||
- "${REDASH_PORT:-5001}:5000"
|
||||
- "5678:5678"
|
||||
environment:
|
||||
<<: *redash-environment
|
||||
PYTHONUNBUFFERED: 0
|
||||
scheduler:
|
||||
<<: *redash-service
|
||||
command: dev_scheduler
|
||||
extends:
|
||||
file: compose.base.yaml
|
||||
service: .redash
|
||||
profiles:
|
||||
- e2e
|
||||
- local
|
||||
command: scheduler
|
||||
depends_on:
|
||||
- server
|
||||
environment:
|
||||
<<: *redash-environment
|
||||
worker:
|
||||
<<: *redash-service
|
||||
command: dev_worker
|
||||
extends:
|
||||
file: compose.base.yaml
|
||||
service: .redash
|
||||
profiles:
|
||||
- e2e
|
||||
- local
|
||||
command: worker
|
||||
depends_on:
|
||||
- server
|
||||
environment:
|
||||
<<: *redash-environment
|
||||
PYTHONUNBUFFERED: 0
|
||||
redis:
|
||||
image: redis:7-alpine
|
||||
restart: unless-stopped
|
||||
postgres:
|
||||
image: pgautoupgrade/pgautoupgrade:15-alpine3.8
|
||||
image: postgres:16-alpine
|
||||
ports:
|
||||
- "15432:5432"
|
||||
- "${POSTGRES_PORT:-15432}:5432"
|
||||
# The following turns the DB into less durable, but gains significant performance improvements for the tests run (x3
|
||||
# improvement on my personal machine). We should consider moving this into a dedicated Docker Compose configuration for
|
||||
# tests.
|
||||
command: "postgres -c fsync=off -c full_page_writes=off -c synchronous_commit=OFF"
|
||||
command: postgres -c fsync=off -c full_page_writes=off -c synchronous_commit=OFF
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
POSTGRES_HOST_AUTH_METHOD: "trust"
|
||||
POSTGRES_HOST_AUTH_METHOD: trust
|
||||
email:
|
||||
image: maildev/maildev
|
||||
ports:
|
||||
- "1080:1080"
|
||||
- "1025:1025"
|
||||
restart: unless-stopped
|
||||
cypress:
|
||||
ipc: host
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.cypress
|
||||
profiles:
|
||||
- e2e
|
||||
depends_on:
|
||||
- server
|
||||
- worker
|
||||
- scheduler
|
||||
environment:
|
||||
CYPRESS_baseUrl: http://server:5000
|
||||
PERCY_TOKEN: ${PERCY_TOKEN:-""}
|
||||
PERCY_BRANCH: ${PERCY_BRANCH:-""}
|
||||
PERCY_COMMIT: ${PERCY_COMMIT:-""}
|
||||
PERCY_PULL_REQUEST: ${PERCY_PULL_REQUEST:-}
|
||||
COMMIT_INFO_BRANCH: ${COMMIT_INFO_BRANCH:-""}
|
||||
COMMIT_INFO_MESSAGE: ${COMMIT_INFO_MESSAGE:-""}
|
||||
COMMIT_INFO_AUTHOR: ${COMMIT_INFO_AUTHOR:-""}
|
||||
COMMIT_INFO_SHA: ${COMMIT_INFO_SHA:-""}
|
||||
COMMIT_INFO_REMOTE: ${COMMIT_INFO_REMOTE:-""}
|
||||
CYPRESS_PROJECT_ID: ${CYPRESS_PROJECT_ID:-""}
|
||||
CYPRESS_RECORD_KEY: ${CYPRESS_RECORD_KEY:-""}
|
||||
CYPRESS_COVERAGE: ${CYPRESS_COVERAGE:-true}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "redash-client",
|
||||
"version": "24.04.0-dev",
|
||||
"version": "24.05.0-dev",
|
||||
"description": "The frontend part of Redash.",
|
||||
"main": "index.js",
|
||||
"scripts": {
|
||||
@@ -24,7 +24,7 @@
|
||||
"jest": "TZ=Africa/Khartoum jest",
|
||||
"test": "run-s type-check jest",
|
||||
"test:watch": "jest --watch",
|
||||
"cypress": "node client/cypress/cypress.js",
|
||||
"cypress": "COMPOSE_PROFILES=local node client/cypress/cypress.js",
|
||||
"preinstall": "cd viz-lib && yarn link --link-folder ../.yarn",
|
||||
"postinstall": "(cd viz-lib && yarn --frozen-lockfile && yarn build:babel) && yarn link --link-folder ./.yarn @redash/viz"
|
||||
},
|
||||
|
||||
361
poetry.lock
generated
361
poetry.lock
generated
@@ -1,4 +1,4 @@
|
||||
# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand.
|
||||
# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand.
|
||||
|
||||
[[package]]
|
||||
name = "adal"
|
||||
@@ -419,17 +419,6 @@ files = [
|
||||
{file = "blinker-1.6.2.tar.gz", hash = "sha256:4afd3de66ef3a9f8067559fb7a1cbe555c17dcbe15971b05d1b625c3e7abe213"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "boto"
|
||||
version = "2.49.0"
|
||||
description = "Amazon Web Services Library"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "boto-2.49.0-py2.py3-none-any.whl", hash = "sha256:147758d41ae7240dc989f0039f27da8ca0d53734be0eb869ef16e3adcfa462e8"},
|
||||
{file = "boto-2.49.0.tar.gz", hash = "sha256:ea0d3b40a2d852767be77ca343b58a9e3a4b00d9db440efb8da74b4e58025e5a"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "boto3"
|
||||
version = "1.28.8"
|
||||
@@ -891,20 +880,15 @@ tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.1
|
||||
toml = ["tomli"]
|
||||
|
||||
[[package]]
|
||||
name = "croniter"
|
||||
version = "2.0.1"
|
||||
description = "croniter provides iteration for datetime object with cron like format"
|
||||
name = "crontab"
|
||||
version = "1.0.1"
|
||||
description = "Parse and use crontab schedules in Python"
|
||||
optional = false
|
||||
python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "croniter-2.0.1-py2.py3-none-any.whl", hash = "sha256:4cb064ce2d8f695b3b078be36ff50115cf8ac306c10a7e8653ee2a5b534673d7"},
|
||||
{file = "croniter-2.0.1.tar.gz", hash = "sha256:d199b2ec3ea5e82988d1f72022433c5f9302b3b3ea9e6bfd6a1518f6ea5e700a"},
|
||||
{file = "crontab-1.0.1.tar.gz", hash = "sha256:89477e3f93c81365e738d5ee2659509e6373bb2846de13922663e79aa74c6b91"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
python-dateutil = "*"
|
||||
pytz = ">2021.1"
|
||||
|
||||
[[package]]
|
||||
name = "cryptography"
|
||||
version = "41.0.6"
|
||||
@@ -1041,22 +1025,23 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "dnspython"
|
||||
version = "2.4.2"
|
||||
version = "2.6.1"
|
||||
description = "DNS toolkit"
|
||||
optional = false
|
||||
python-versions = ">=3.8,<4.0"
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "dnspython-2.4.2-py3-none-any.whl", hash = "sha256:57c6fbaaeaaf39c891292012060beb141791735dbb4004798328fc2c467402d8"},
|
||||
{file = "dnspython-2.4.2.tar.gz", hash = "sha256:8dcfae8c7460a2f84b4072e26f1c9f4101ca20c071649cb7c34e8b6a93d58984"},
|
||||
{file = "dnspython-2.6.1-py3-none-any.whl", hash = "sha256:5ef3b9680161f6fa89daf8ad451b5f1a33b18ae8a1c6778cdf4b43f08c0a6e50"},
|
||||
{file = "dnspython-2.6.1.tar.gz", hash = "sha256:e8f0f9c23a7b7cb99ded64e6c3a6f3e701d78f50c55e002b839dea7225cff7cc"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
dnssec = ["cryptography (>=2.6,<42.0)"]
|
||||
doh = ["h2 (>=4.1.0)", "httpcore (>=0.17.3)", "httpx (>=0.24.1)"]
|
||||
doq = ["aioquic (>=0.9.20)"]
|
||||
idna = ["idna (>=2.1,<4.0)"]
|
||||
trio = ["trio (>=0.14,<0.23)"]
|
||||
wmi = ["wmi (>=1.5.1,<2.0.0)"]
|
||||
dev = ["black (>=23.1.0)", "coverage (>=7.0)", "flake8 (>=7)", "mypy (>=1.8)", "pylint (>=3)", "pytest (>=7.4)", "pytest-cov (>=4.1.0)", "sphinx (>=7.2.0)", "twine (>=4.0.0)", "wheel (>=0.42.0)"]
|
||||
dnssec = ["cryptography (>=41)"]
|
||||
doh = ["h2 (>=4.1.0)", "httpcore (>=1.0.0)", "httpx (>=0.26.0)"]
|
||||
doq = ["aioquic (>=0.9.25)"]
|
||||
idna = ["idna (>=3.6)"]
|
||||
trio = ["trio (>=0.23)"]
|
||||
wmi = ["wmi (>=1.5.1)"]
|
||||
|
||||
[[package]]
|
||||
name = "e6data-python-connector"
|
||||
@@ -1729,22 +1714,23 @@ google-auth-oauthlib = ">=0.4.1"
|
||||
|
||||
[[package]]
|
||||
name = "gunicorn"
|
||||
version = "20.0.4"
|
||||
version = "22.0.0"
|
||||
description = "WSGI HTTP Server for UNIX"
|
||||
optional = false
|
||||
python-versions = ">=3.4"
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "gunicorn-20.0.4-py2.py3-none-any.whl", hash = "sha256:cd4a810dd51bf497552cf3f863b575dabd73d6ad6a91075b65936b151cbf4f9c"},
|
||||
{file = "gunicorn-20.0.4.tar.gz", hash = "sha256:1904bb2b8a43658807108d59c3f3d56c2b6121a701161de0ddf9ad140073c626"},
|
||||
{file = "gunicorn-22.0.0-py3-none-any.whl", hash = "sha256:350679f91b24062c86e386e198a15438d53a7a8207235a78ba1b53df4c4378d9"},
|
||||
{file = "gunicorn-22.0.0.tar.gz", hash = "sha256:4a0b436239ff76fb33f11c07a16482c521a7e09c1ce3cc293c2330afe01bec63"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
setuptools = ">=3.0"
|
||||
packaging = "*"
|
||||
|
||||
[package.extras]
|
||||
eventlet = ["eventlet (>=0.9.7)"]
|
||||
gevent = ["gevent (>=0.13)"]
|
||||
eventlet = ["eventlet (>=0.24.1,!=0.36.0)"]
|
||||
gevent = ["gevent (>=1.4.0)"]
|
||||
setproctitle = ["setproctitle"]
|
||||
testing = ["coverage", "eventlet", "gevent", "pytest", "pytest-cov"]
|
||||
tornado = ["tornado (>=0.2)"]
|
||||
|
||||
[[package]]
|
||||
@@ -1832,13 +1818,13 @@ license = ["ukkonen"]
|
||||
|
||||
[[package]]
|
||||
name = "idna"
|
||||
version = "3.6"
|
||||
version = "3.7"
|
||||
description = "Internationalized Domain Names in Applications (IDNA)"
|
||||
optional = false
|
||||
python-versions = ">=3.5"
|
||||
files = [
|
||||
{file = "idna-3.6-py3-none-any.whl", hash = "sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f"},
|
||||
{file = "idna-3.6.tar.gz", hash = "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca"},
|
||||
{file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"},
|
||||
{file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1897,16 +1883,6 @@ thriftpy2 = {version = ">=0.4.0,<0.5.0", markers = "python_version >= \"3.0\""}
|
||||
[package.extras]
|
||||
kerberos = ["thrift_sasl (==0.2.1)"]
|
||||
|
||||
[[package]]
|
||||
name = "inflection"
|
||||
version = "0.3.1"
|
||||
description = "A port of Ruby on Rails inflector to Python"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "inflection-0.3.1.tar.gz", hash = "sha256:18ea7fb7a7d152853386523def08736aa8c32636b047ade55f7578c4edeb16ca"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "influxdb"
|
||||
version = "5.2.3"
|
||||
@@ -2662,42 +2638,42 @@ et-xmlfile = "*"
|
||||
|
||||
[[package]]
|
||||
name = "oracledb"
|
||||
version = "2.0.1"
|
||||
version = "2.1.2"
|
||||
description = "Python interface to Oracle Database"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "oracledb-2.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:deff10e6fd97cc6f6fa9bc94ebcc3875a38fbcfd1de5ce4c372658ff82d5037b"},
|
||||
{file = "oracledb-2.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1b68a09ead904982f260bdb1c9f1565777f0d8893e95599a460068d2824d9a6"},
|
||||
{file = "oracledb-2.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f77566314fc2a2da6022d9bbb769c0d08724c6b321ce0bae73b627d65639cccc"},
|
||||
{file = "oracledb-2.0.1-cp310-cp310-win32.whl", hash = "sha256:b82d92c3c25550033bf41263f30fa9c775bd35b5e57d95b5d2a4d4ae83e456c6"},
|
||||
{file = "oracledb-2.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:608a7baf1adb952848e1534dab59aa3b1b7bf3771e6940d042b40826f2747aaf"},
|
||||
{file = "oracledb-2.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:903a51550ad339d475fd1cd77059157687608fc5da3af3641728c0baada5aa06"},
|
||||
{file = "oracledb-2.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:465389b3f9b54edcb364d66d848b048982beaaa4a24ff9a23b29582e5ac33dc9"},
|
||||
{file = "oracledb-2.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d504cc4ee4d2c511ca3185ef8ebf04a538d749fc30d4cc59843a6cff5d11df6"},
|
||||
{file = "oracledb-2.0.1-cp311-cp311-win32.whl", hash = "sha256:4b8a23bcd94a790f92680ae44f877ba8e01626c9deb41e9f788a1ef788ff2ca8"},
|
||||
{file = "oracledb-2.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:b301f68bf1f4e79e7b1b00dc070853b3256f00dea503f80ce7d22cba5f6559f4"},
|
||||
{file = "oracledb-2.0.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:4fcd5f3127cd833939dfccc615c0ae328535feba3c66765d8308e2bbdecda5dd"},
|
||||
{file = "oracledb-2.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c7cc2c51ed0657f6bda23002e7e407c4c971c9c5d90baa073d78b056e77672d"},
|
||||
{file = "oracledb-2.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3338e31fa946b84baf1ae675cb9ab242464dd072cbafc7e885a1b1130215fa9d"},
|
||||
{file = "oracledb-2.0.1-cp312-cp312-win32.whl", hash = "sha256:a64690c6efef0ebe04684ce893a116357b02c269e1ae88653165ac04bbcb5a8a"},
|
||||
{file = "oracledb-2.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:08827b11a8a5e6485b1f694714550c856db8f288b85f0e0c9a5a2bbbde1a92ae"},
|
||||
{file = "oracledb-2.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:73f90d6ea1a3b9b26457ecd380541fe97216867b5c757e3d66f047aa2c0e94e7"},
|
||||
{file = "oracledb-2.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3840ceecb83dc55c8e03d3f559eb0a640cf06a7584f30cb760ba580fdbfa3501"},
|
||||
{file = "oracledb-2.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:805d1d736ca137427521499f9a58ccfe981a1ccbb9be5cabc2e204dee7700790"},
|
||||
{file = "oracledb-2.0.1-cp37-cp37m-win32.whl", hash = "sha256:1dd8a0f39cb62b33a6bba2b601435522ee42ad5a526db3d2f858b671bf87d956"},
|
||||
{file = "oracledb-2.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:45eaf9eb0d7c27ba9b1cfb09c86ffefeeffa82d90dc3ad910725623a11495e1e"},
|
||||
{file = "oracledb-2.0.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:b646b8bcacb1163a64646922965060131982641d704396585ed5f663329e3a0e"},
|
||||
{file = "oracledb-2.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b89b812e79ddfd670b0147717b3c1c8da1f2b967c5d2bc6dd8ce96f6ffef214"},
|
||||
{file = "oracledb-2.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf901522ee069fe6a44c32864b1b42016613b78f7acfddd2075d0d3f117ac6f7"},
|
||||
{file = "oracledb-2.0.1-cp38-cp38-win32.whl", hash = "sha256:bd3d2f8ec54d741b74aaa551d127d25a6bbd01351736dc2645ed0d2c6b3f828e"},
|
||||
{file = "oracledb-2.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:ba7c086d17852564c3e906ce5f285e97d3da261672febfc83e4a8d5882795ae8"},
|
||||
{file = "oracledb-2.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:4f1d99ed802131197b21b011bdbf90f418ff7fac95daf2419c79dd6355c469b5"},
|
||||
{file = "oracledb-2.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b555d1da8ac6a0922e66127b1ed424f6814d9789b1551df0d6d37df8d006e79"},
|
||||
{file = "oracledb-2.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f05c7a5285c06a2acd55ed60aa24bd1adb931686f56684847672c9cef7dfec3"},
|
||||
{file = "oracledb-2.0.1-cp39-cp39-win32.whl", hash = "sha256:bb5dda68fb8ca7c817d2b70f3fdc38cfca1f635e35850ffb282f44c86885a22d"},
|
||||
{file = "oracledb-2.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:20c4bfff62df2198f99c5fc58c8df6ef934f5583875e5ab43bf6438b920f0cba"},
|
||||
{file = "oracledb-2.0.1.tar.gz", hash = "sha256:c12235a9eef123038184e57f3b9b145e149b22654e8242024cf4e81cd890f523"},
|
||||
{file = "oracledb-2.1.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4ffaba9504c638c29129b484cf547accf750bd0f86df1ca6194646a4d2540691"},
|
||||
{file = "oracledb-2.1.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71d98deb1e3a500920f5460d457925f0c8cef8d037881fdbd16df1c4734453dd"},
|
||||
{file = "oracledb-2.1.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bde2df672704fbe12ab0653f6e808b1ed62de28c6864b17fc3a1fcac9c1fd472"},
|
||||
{file = "oracledb-2.1.2-cp310-cp310-win32.whl", hash = "sha256:3b3798a1220fc8736a37b9280d0ae4cdf263bb203fc6e2b3a82c33f9a2010702"},
|
||||
{file = "oracledb-2.1.2-cp310-cp310-win_amd64.whl", hash = "sha256:92620efd5eb0d23b252d75f2f2ff1deadf25f44546903e3283760cb276d524ed"},
|
||||
{file = "oracledb-2.1.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:b913a164e1830d0e955b88d97c5e4da4d2402f8a8b0d38febb6ad5a8ef9e4743"},
|
||||
{file = "oracledb-2.1.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c53827344c6d001f492aee0a3acb6c1b6c0f3030c2f5dc8cb86dc4f0bb4dd1ab"},
|
||||
{file = "oracledb-2.1.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50225074841d5f9b281d620c012ced4b0946ff5a941c8b639be7babda5190709"},
|
||||
{file = "oracledb-2.1.2-cp311-cp311-win32.whl", hash = "sha256:a043b4df2919411b787bcd24ffa4286249a11d05d29bb20bb076d108c3c6f777"},
|
||||
{file = "oracledb-2.1.2-cp311-cp311-win_amd64.whl", hash = "sha256:9edce208c26ee018e43b75323888743031be3e9f0c0e4221abf037129c12d949"},
|
||||
{file = "oracledb-2.1.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:08aa313b801dda950918168d3962ba59a617adce143e0c2bf1ee9b847695faaa"},
|
||||
{file = "oracledb-2.1.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:de5c932b04d3bcdd22c71c0e5c5e1d16b6a3a2fc68dc472ee3a12e677461354c"},
|
||||
{file = "oracledb-2.1.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d590caf39b1901bcba394fcda9815438faff0afaf374025f89ef5d65993d0a4"},
|
||||
{file = "oracledb-2.1.2-cp312-cp312-win32.whl", hash = "sha256:1e3ffdfe76c97d1ca13a3fecf239c96d3889015bb5b775dc22b947108044b01e"},
|
||||
{file = "oracledb-2.1.2-cp312-cp312-win_amd64.whl", hash = "sha256:8c1eaf8c74bb6de5772de768f2f3f5eb935ab935c633d3a012ddff7e691a2073"},
|
||||
{file = "oracledb-2.1.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e2ee06e154e08cc5e4037855d74dc6e37dc054c91a7a1a372bb60d4442e2ed3d"},
|
||||
{file = "oracledb-2.1.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a21d84aaf5dddab0cfa8ab7c23272c0295a5c796f212a4ce8a6b499643663dd"},
|
||||
{file = "oracledb-2.1.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b337f7cf30753c3a32302fbc25ca80d7ff5049dd9333e681236a674a90c21caf"},
|
||||
{file = "oracledb-2.1.2-cp37-cp37m-win32.whl", hash = "sha256:b5d936763a9b26d32c4e460dbb346c2a962fcc98e6df33dd2d81fdc2eb26f1e4"},
|
||||
{file = "oracledb-2.1.2-cp37-cp37m-win_amd64.whl", hash = "sha256:0ea32b87b7202811d85082f10bf7789747ce45f195be4199c5611e7d76a79e78"},
|
||||
{file = "oracledb-2.1.2-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:f94b22da87e051e3a8620d2b04d99e1cc9d9abb4da6736d6ae0ca436ba03fb86"},
|
||||
{file = "oracledb-2.1.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:391034ee66717dba514e765263d08d18a2aa7badde373f82599b89e46fa3720a"},
|
||||
{file = "oracledb-2.1.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a2d9891244b9b94465e30af8cc79380bbb41081c5dc0511cbc94cc250e9e26d"},
|
||||
{file = "oracledb-2.1.2-cp38-cp38-win32.whl", hash = "sha256:9a9a6e0bf61952c2c82614b98fe896d2cda17d81ffca4527556e6607b10e3365"},
|
||||
{file = "oracledb-2.1.2-cp38-cp38-win_amd64.whl", hash = "sha256:d9a6447589f203ca846526c99a667537b099d54ddeff09d24f9da59bdcc8f98b"},
|
||||
{file = "oracledb-2.1.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8eb688dd1f8ea2038d17bc84fb651aa1e994b155d3cb8b8387df70ab2a7b4c4c"},
|
||||
{file = "oracledb-2.1.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f22c31b894bb085a33d70e174c9bcd0abafc630c2c941ff0d630ee3852f1aa6"},
|
||||
{file = "oracledb-2.1.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e5bc03520b8bd4dbf2ac4d937d298a85a7208ffbeec738eea92ad7bb00e7134a"},
|
||||
{file = "oracledb-2.1.2-cp39-cp39-win32.whl", hash = "sha256:5d4f6bd1036d7edbb96d8d31f0ca53696a013c00ac82fc19ac0ca374d2265b2c"},
|
||||
{file = "oracledb-2.1.2-cp39-cp39-win_amd64.whl", hash = "sha256:69bde9770392c1c859b1e1d767dbb9ca4c57e3f2946ca90c779d9402a7e96111"},
|
||||
{file = "oracledb-2.1.2.tar.gz", hash = "sha256:3054bcc295d7378834ba7a5aceb865985e954915f9b07a843ea84c3824c6a0b2"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -3390,85 +3366,93 @@ tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "pymongo"
|
||||
version = "4.3.3"
|
||||
version = "4.6.3"
|
||||
description = "Python driver for MongoDB <http://www.mongodb.org>"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "pymongo-4.3.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:74731c9e423c93cbe791f60c27030b6af6a948cef67deca079da6cd1bb583a8e"},
|
||||
{file = "pymongo-4.3.3-cp310-cp310-manylinux1_i686.whl", hash = "sha256:66413c50d510e5bcb0afc79880d1693a2185bcea003600ed898ada31338c004e"},
|
||||
{file = "pymongo-4.3.3-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:9b87b23570565a6ddaa9244d87811c2ee9cffb02a753c8a2da9c077283d85845"},
|
||||
{file = "pymongo-4.3.3-cp310-cp310-manylinux2014_i686.whl", hash = "sha256:695939036a320f4329ccf1627edefbbb67cc7892b8222d297b0dd2313742bfee"},
|
||||
{file = "pymongo-4.3.3-cp310-cp310-manylinux2014_ppc64le.whl", hash = "sha256:ffcc8394123ea8d43fff8e5d000095fe7741ce3f8988366c5c919c4f5eb179d3"},
|
||||
{file = "pymongo-4.3.3-cp310-cp310-manylinux2014_s390x.whl", hash = "sha256:943f208840777f34312c103a2d1caab02d780c4e9be26b3714acf6c4715ba7e1"},
|
||||
{file = "pymongo-4.3.3-cp310-cp310-manylinux2014_x86_64.whl", hash = "sha256:01f7cbe88d22440b6594c955e37312d932fd632ffed1a86d0c361503ca82cc9d"},
|
||||
{file = "pymongo-4.3.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cdb87309de97c63cb9a69132e1cb16be470e58cffdfbad68fdd1dc292b22a840"},
|
||||
{file = "pymongo-4.3.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d86c35d94b5499689354ccbc48438a79f449481ee6300f3e905748edceed78e7"},
|
||||
{file = "pymongo-4.3.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a966d5304b7d90c45c404914e06bbf02c5bf7e99685c6c12f0047ef2aa837142"},
|
||||
{file = "pymongo-4.3.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be1d2ce7e269215c3ee9a215e296b7a744aff4f39233486d2c4d77f5f0c561a6"},
|
||||
{file = "pymongo-4.3.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:55b6163dac53ef1e5d834297810c178050bd0548a4136cd4e0f56402185916ca"},
|
||||
{file = "pymongo-4.3.3-cp310-cp310-win32.whl", hash = "sha256:dc0cff74cd36d7e1edba91baa09622c35a8a57025f2f2b7a41e3f83b1db73186"},
|
||||
{file = "pymongo-4.3.3-cp310-cp310-win_amd64.whl", hash = "sha256:cafa52873ae12baa512a8721afc20de67a36886baae6a5f394ddef0ce9391f91"},
|
||||
{file = "pymongo-4.3.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:599d3f6fbef31933b96e2d906b0f169b3371ff79ea6aaf6ecd76c947a3508a3d"},
|
||||
{file = "pymongo-4.3.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c0640b4e9d008e13956b004d1971a23377b3d45491f87082161c92efb1e6c0d6"},
|
||||
{file = "pymongo-4.3.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:341221e2f2866a5960e6f8610f4cbac0bb13097f3b1a289aa55aba984fc0d969"},
|
||||
{file = "pymongo-4.3.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e7fac06a539daef4fcf5d8288d0d21b412f9b750454cd5a3cf90484665db442a"},
|
||||
{file = "pymongo-4.3.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3a51901066696c4af38c6c63a1f0aeffd5e282367ff475de8c191ec9609b56d"},
|
||||
{file = "pymongo-4.3.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3055510fdfdb1775bc8baa359783022f70bb553f2d46e153c094dfcb08578ff"},
|
||||
{file = "pymongo-4.3.3-cp311-cp311-win32.whl", hash = "sha256:524d78673518dcd352a91541ecd2839c65af92dc883321c2109ef6e5cd22ef23"},
|
||||
{file = "pymongo-4.3.3-cp311-cp311-win_amd64.whl", hash = "sha256:b8a03af1ce79b902a43f5f694c4ca8d92c2a4195db0966f08f266549e2fc49bc"},
|
||||
{file = "pymongo-4.3.3-cp37-cp37m-macosx_10_6_intel.whl", hash = "sha256:39b03045c71f761aee96a12ebfbc2f4be89e724ff6f5e31c2574c1a0e2add8bd"},
|
||||
{file = "pymongo-4.3.3-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:6fcfbf435eebf8a1765c6d1f46821740ebe9f54f815a05c8fc30d789ef43cb12"},
|
||||
{file = "pymongo-4.3.3-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:7d43ac9c7eeda5100fb0a7152fab7099c9cf9e5abd3bb36928eb98c7d7a339c6"},
|
||||
{file = "pymongo-4.3.3-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:3b93043b14ba7eb08c57afca19751658ece1cfa2f0b7b1fb5c7a41452fbb8482"},
|
||||
{file = "pymongo-4.3.3-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:c09956606c08c4a7c6178a04ba2dd9388fcc5db32002ade9c9bc865ab156ab6d"},
|
||||
{file = "pymongo-4.3.3-cp37-cp37m-manylinux2014_ppc64le.whl", hash = "sha256:b0cfe925610f2fd59555bb7fc37bd739e4b197d33f2a8b2fae7b9c0c6640318c"},
|
||||
{file = "pymongo-4.3.3-cp37-cp37m-manylinux2014_s390x.whl", hash = "sha256:4d00b91c77ceb064c9b0459f0d6ea5bfdbc53ea9e17cf75731e151ef25a830c7"},
|
||||
{file = "pymongo-4.3.3-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:c6258a3663780ae47ba73d43eb63c79c40ffddfb764e09b56df33be2f9479837"},
|
||||
{file = "pymongo-4.3.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c29e758f0e734e1e90357ae01ec9c6daf19ff60a051192fe110d8fb25c62600e"},
|
||||
{file = "pymongo-4.3.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:12f3621a46cdc7a9ba8080422262398a91762a581d27e0647746588d3f995c88"},
|
||||
{file = "pymongo-4.3.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:47f7aa217b25833cd6f0e72b0d224be55393c2692b4f5e0561cb3beeb10296e9"},
|
||||
{file = "pymongo-4.3.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c2fdc855149efe7cdcc2a01ca02bfa24761c640203ea94df467f3baf19078be"},
|
||||
{file = "pymongo-4.3.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5effd87c7d363890259eac16c56a4e8da307286012c076223997f8cc4a8c435b"},
|
||||
{file = "pymongo-4.3.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6dd1cf2995fdbd64fc0802313e8323f5fa18994d51af059b5b8862b73b5e53f0"},
|
||||
{file = "pymongo-4.3.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:bb869707d8e30645ed6766e44098600ca6cdf7989c22a3ea2b7966bb1d98d4b2"},
|
||||
{file = "pymongo-4.3.3-cp37-cp37m-win32.whl", hash = "sha256:49210feb0be8051a64d71691f0acbfbedc33e149f0a5d6e271fddf6a12493fed"},
|
||||
{file = "pymongo-4.3.3-cp37-cp37m-win_amd64.whl", hash = "sha256:54c377893f2cbbffe39abcff5ff2e917b082c364521fa079305f6f064e1a24a9"},
|
||||
{file = "pymongo-4.3.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c184ec5be465c0319440734491e1aa4709b5f3ba75fdfc9dbbc2ae715a7f6829"},
|
||||
{file = "pymongo-4.3.3-cp38-cp38-manylinux1_i686.whl", hash = "sha256:dca34367a4e77fcab0693e603a959878eaf2351585e7d752cac544bc6b2dee46"},
|
||||
{file = "pymongo-4.3.3-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:cd6a4afb20fb3c26a7bfd4611a0bbb24d93cbd746f5eb881f114b5e38fd55501"},
|
||||
{file = "pymongo-4.3.3-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:0c466710871d0026c190fc4141e810cf9d9affbf4935e1d273fbdc7d7cda6143"},
|
||||
{file = "pymongo-4.3.3-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:d07d06dba5b5f7d80f9cc45501456e440f759fe79f9895922ed486237ac378a8"},
|
||||
{file = "pymongo-4.3.3-cp38-cp38-manylinux2014_ppc64le.whl", hash = "sha256:711bc52cb98e7892c03e9b669bebd89c0a890a90dbc6d5bb2c47f30239bac6e9"},
|
||||
{file = "pymongo-4.3.3-cp38-cp38-manylinux2014_s390x.whl", hash = "sha256:34b040e095e1671df0c095ec0b04fc4ebb19c4c160f87c2b55c079b16b1a6b00"},
|
||||
{file = "pymongo-4.3.3-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:4ed00f96e147f40b565fe7530d1da0b0f3ab803d5dd5b683834500fa5d195ec4"},
|
||||
{file = "pymongo-4.3.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef888f48eb9203ee1e04b9fb27429017b290fb916f1e7826c2f7808c88798394"},
|
||||
{file = "pymongo-4.3.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:316498b642c00401370b2156b5233b256f9b33799e0a8d9d0b8a7da217a20fca"},
|
||||
{file = "pymongo-4.3.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fa7e202feb683dad74f00dea066690448d0cfa310f8a277db06ec8eb466601b5"},
|
||||
{file = "pymongo-4.3.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:52896e22115c97f1c829db32aa2760b0d61839cfe08b168c2b1d82f31dbc5f55"},
|
||||
{file = "pymongo-4.3.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7c051fe37c96b9878f37fa58906cb53ecd13dcb7341d3a85f1e2e2f6b10782d9"},
|
||||
{file = "pymongo-4.3.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5134d33286c045393c7beb51be29754647cec5ebc051cf82799c5ce9820a2ca2"},
|
||||
{file = "pymongo-4.3.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a9c2885b4a8e6e39db5662d8b02ca6dcec796a45e48c2de12552841f061692ba"},
|
||||
{file = "pymongo-4.3.3-cp38-cp38-win32.whl", hash = "sha256:a6cd6f1db75eb07332bd3710f58f5fce4967eadbf751bad653842750a61bda62"},
|
||||
{file = "pymongo-4.3.3-cp38-cp38-win_amd64.whl", hash = "sha256:d5571b6978750601f783cea07fb6b666837010ca57e5cefa389c1d456f6222e2"},
|
||||
{file = "pymongo-4.3.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:81d1a7303bd02ca1c5be4aacd4db73593f573ba8e0c543c04c6da6275fd7a47e"},
|
||||
{file = "pymongo-4.3.3-cp39-cp39-manylinux1_i686.whl", hash = "sha256:016c412118e1c23fef3a1eada4f83ae6e8844fd91986b2e066fc1b0013cdd9ae"},
|
||||
{file = "pymongo-4.3.3-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:8fd6e191b92a10310f5a6cfe10d6f839d79d192fb02480bda325286bd1c7b385"},
|
||||
{file = "pymongo-4.3.3-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:e2961b05f9c04a53da8bfc72f1910b6aec7205fcf3ac9c036d24619979bbee4b"},
|
||||
{file = "pymongo-4.3.3-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:b38a96b3eed8edc515b38257f03216f382c4389d022a8834667e2bc63c0c0c31"},
|
||||
{file = "pymongo-4.3.3-cp39-cp39-manylinux2014_ppc64le.whl", hash = "sha256:c1a70c51da9fa95bd75c167edb2eb3f3c4d27bc4ddd29e588f21649d014ec0b7"},
|
||||
{file = "pymongo-4.3.3-cp39-cp39-manylinux2014_s390x.whl", hash = "sha256:8a06a0c02f5606330e8f2e2f3b7949877ca7e4024fa2bff5a4506bec66c49ec7"},
|
||||
{file = "pymongo-4.3.3-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:6c2216d8b6a6d019c6f4b1ad55f890e5e77eb089309ffc05b6911c09349e7474"},
|
||||
{file = "pymongo-4.3.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eac0a143ef4f28f49670bf89cb15847eb80b375d55eba401ca2f777cd425f338"},
|
||||
{file = "pymongo-4.3.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:08fc250b5552ee97ceeae0f52d8b04f360291285fc7437f13daa516ce38fdbc6"},
|
||||
{file = "pymongo-4.3.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704d939656e21b073bfcddd7228b29e0e8a93dd27b54240eaafc0b9a631629a6"},
|
||||
{file = "pymongo-4.3.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1074f1a6f23e28b983c96142f2d45be03ec55d93035b471c26889a7ad2365db3"},
|
||||
{file = "pymongo-4.3.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b16250238de8dafca225647608dddc7bbb5dce3dd53b4d8e63c1cc287394c2f"},
|
||||
{file = "pymongo-4.3.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7761cacb8745093062695b11574effea69db636c2fd0a9269a1f0183712927b4"},
|
||||
{file = "pymongo-4.3.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:fd7bb378d82b88387dc10227cfd964f6273eb083e05299e9b97cbe075da12d11"},
|
||||
{file = "pymongo-4.3.3-cp39-cp39-win32.whl", hash = "sha256:dc24d245026a72d9b4953729d31813edd4bd4e5c13622d96e27c284942d33f24"},
|
||||
{file = "pymongo-4.3.3-cp39-cp39-win_amd64.whl", hash = "sha256:fc28e8d85d392a06434e9a934908d97e2cf453d69488d2bcd0bfb881497fd975"},
|
||||
{file = "pymongo-4.3.3.tar.gz", hash = "sha256:34e95ffb0a68bffbc3b437f2d1f25fc916fef3df5cdeed0992da5f42fae9b807"},
|
||||
{file = "pymongo-4.6.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e344d0afdd7c06c1f1e66a4736593293f432defc2191e6b411fc9c82fa8c5adc"},
|
||||
{file = "pymongo-4.6.3-cp310-cp310-manylinux1_i686.whl", hash = "sha256:731a92dfc4022db763bfa835c6bd160f2d2cba6ada75749c2ed500e13983414b"},
|
||||
{file = "pymongo-4.6.3-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:c4726e36a2f7e92f09f5b8e92ba4db7525daffe31a0dcbcf0533edc0ade8c7d8"},
|
||||
{file = "pymongo-4.6.3-cp310-cp310-manylinux2014_i686.whl", hash = "sha256:00e6cfce111883ca63a3c12878286e0b89871f4b840290e61fb6f88ee0e687be"},
|
||||
{file = "pymongo-4.6.3-cp310-cp310-manylinux2014_ppc64le.whl", hash = "sha256:cc7a26edf79015c58eea46feb5b262cece55bc1d4929a8a9e0cbe7e6d6a9b0eb"},
|
||||
{file = "pymongo-4.6.3-cp310-cp310-manylinux2014_s390x.whl", hash = "sha256:4955be64d943b30f2a7ff98d818ca530f7cb37450bc6b32c37e0e74821907ef8"},
|
||||
{file = "pymongo-4.6.3-cp310-cp310-manylinux2014_x86_64.whl", hash = "sha256:af039afc6d787502c02089759778b550cb2f25dbe2780f5b050a2e37031c3fbf"},
|
||||
{file = "pymongo-4.6.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ccc15a7c7a99aed7d0831eaf78a607f1db0c7a255f96e3d18984231acd72f70c"},
|
||||
{file = "pymongo-4.6.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8e97c138d811e9367723fcd07c4402a9211caae20479fdd6301d57762778a69f"},
|
||||
{file = "pymongo-4.6.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ebcc145c74d06296ce0cad35992185064e5cb2aadef719586778c144f0cd4d37"},
|
||||
{file = "pymongo-4.6.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:664c64b6bdb31aceb80f0556951e5e2bf50d359270732268b4e7af00a1cf5d6c"},
|
||||
{file = "pymongo-4.6.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e4056bc421d4df2c61db4e584415f2b0f1eebb92cbf9222f7f38303467c37117"},
|
||||
{file = "pymongo-4.6.3-cp310-cp310-win32.whl", hash = "sha256:cdbea2aac1a4caa66ee912af3601557d2bda2f9f69feec83601c78c7e53ece64"},
|
||||
{file = "pymongo-4.6.3-cp310-cp310-win_amd64.whl", hash = "sha256:6cec7279e5a1b74b257d0270a8c97943d745811066630a6bc6beb413c68c6a33"},
|
||||
{file = "pymongo-4.6.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:138b9fa18d40401c217bc038a48bcde4160b02d36d8632015b1804971a2eaa2f"},
|
||||
{file = "pymongo-4.6.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:60931b0e07448afe8866ffff764cd5bf4b1a855dc84c7dcb3974c6aa6a377a59"},
|
||||
{file = "pymongo-4.6.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9b35f8bded43ff91475305445fedf0613f880ff7e25c75ae1028e1260a9b7a86"},
|
||||
{file = "pymongo-4.6.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:872bad5c83f7eec9da11e1fef5f858c6a4c79fe4a83c7780e7b0fe95d560ae3f"},
|
||||
{file = "pymongo-4.6.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c2ad3e5bfcd345c0bfe9af69a82d720860b5b043c1657ffb513c18a0dee19c19"},
|
||||
{file = "pymongo-4.6.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e208f2ab7b495eff8fd175022abfb0abce6307ac5aee3f4de51fc1a459b71c9"},
|
||||
{file = "pymongo-4.6.3-cp311-cp311-win32.whl", hash = "sha256:4670edbb5ddd71a4d555668ef99b032a5f81b59e4145d66123aa0d831eac7883"},
|
||||
{file = "pymongo-4.6.3-cp311-cp311-win_amd64.whl", hash = "sha256:1c2761302b6cbfd12e239ce1b8061d4cf424a361d199dcb32da534985cae9350"},
|
||||
{file = "pymongo-4.6.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:722f2b709b63311c0efda4fa4c603661faa4bec6bad24a6cc41a3bc6d841bf09"},
|
||||
{file = "pymongo-4.6.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:994386a4d6ad39e18bcede6dc8d1d693ec3ed897b88f86b1841fbc37227406da"},
|
||||
{file = "pymongo-4.6.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:391aea047bba928006114282f175bc8d09c53fe1b7d8920bf888325e229302fe"},
|
||||
{file = "pymongo-4.6.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f4330c022024e7994b630199cdae909123e4b0e9cf15335de71b146c0f6a2435"},
|
||||
{file = "pymongo-4.6.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01277a7e183c59081368e4efbde2b8f577014431b257959ca98d3a4e8682dd51"},
|
||||
{file = "pymongo-4.6.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d30d5d7963453b478016bf7b0d87d7089ca24d93dbdecfbc9aa32f1b4772160a"},
|
||||
{file = "pymongo-4.6.3-cp312-cp312-win32.whl", hash = "sha256:a023804a3ac0f85d4510265b60978522368b5815772262e61e3a2222a8b315c9"},
|
||||
{file = "pymongo-4.6.3-cp312-cp312-win_amd64.whl", hash = "sha256:2a6ae9a600bbc2dbff719c98bf5da584fb8a4f2bb23729a09be2e9c3dbc61c8a"},
|
||||
{file = "pymongo-4.6.3-cp37-cp37m-macosx_10_6_intel.whl", hash = "sha256:3b909e5b1864de01510079b39bbdc480720c37747be5552b354bc73f02c24a3c"},
|
||||
{file = "pymongo-4.6.3-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:48c60bd32ec141c0d45d8471179430003d9fb4490da181b8165fb1dce9cc255c"},
|
||||
{file = "pymongo-4.6.3-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:36d7049fc183fe4edda3eae7f66ea14c660921429e082fe90b4b7f4dc6664a70"},
|
||||
{file = "pymongo-4.6.3-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:18e5c161b18660f1c9d1f78236de45520a436be65e42b7bb51f25f74ad22bdde"},
|
||||
{file = "pymongo-4.6.3-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:e458e6fc2b7dd40d15cda04898bd2d8c9ff7ae086c516bc261628d54eb4e3158"},
|
||||
{file = "pymongo-4.6.3-cp37-cp37m-manylinux2014_ppc64le.whl", hash = "sha256:e420e74c6db4594a6d09f39b58c0772679006cb0b4fc40901ba608794d87dad2"},
|
||||
{file = "pymongo-4.6.3-cp37-cp37m-manylinux2014_s390x.whl", hash = "sha256:9c9340c7161e112e36ebb97fbba1cdbe7db3dfacb694d2918b1f155a01f3d859"},
|
||||
{file = "pymongo-4.6.3-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:26d036e0f5de09d0b21d0fc30314fcf2ae6359e4d43ae109aa6cf27b4ce02d30"},
|
||||
{file = "pymongo-4.6.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b7cf28d9c90e40d4e385b858e4095739829f466f23e08674085161d86bb4bb10"},
|
||||
{file = "pymongo-4.6.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9066dff9dc0a182478ca5885d0b8a2b820b462e19459ada109df7a3ced31b272"},
|
||||
{file = "pymongo-4.6.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e1e1586ebdebe0447a24842480defac17c496430a218486c96e2da3f164c0f05"},
|
||||
{file = "pymongo-4.6.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b3853fb66bf34ce1b6e573e1bbb3cb28763be9d1f57758535757faf1ab2f24a"},
|
||||
{file = "pymongo-4.6.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:462684a6f5ce6f2661c30eab4d1d459231e0eed280f338e716e31a24fc09ccb3"},
|
||||
{file = "pymongo-4.6.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0a4ea44e5a913bdb7c9abd34c69e9fcfac10dfaf49765463e0dc1ea922dd2a9d"},
|
||||
{file = "pymongo-4.6.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:098d420a8214ad25f872de7e8b309441995d12ece0376218a04d9ed5d2222cf3"},
|
||||
{file = "pymongo-4.6.3-cp37-cp37m-win32.whl", hash = "sha256:7330245253fbe2e09845069d2f4d35dd27f63e377034c94cb0ddac18bc8b0d82"},
|
||||
{file = "pymongo-4.6.3-cp37-cp37m-win_amd64.whl", hash = "sha256:151361c101600a85cb1c1e0db4e4b28318b521fcafa9b62d389f7342faaaee80"},
|
||||
{file = "pymongo-4.6.3-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:4d167d546352869125dc86f6fda6dffc627d8a9c8963eaee665825f2520d542b"},
|
||||
{file = "pymongo-4.6.3-cp38-cp38-manylinux1_i686.whl", hash = "sha256:eaf3d594ebfd5e1f3503d81e06a5d78e33cda27418b36c2491c3d4ad4fca5972"},
|
||||
{file = "pymongo-4.6.3-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:7ee79e02a7c5ed34706ecb5dad19e6c7d267cf86d28c075ef3127c58f3081279"},
|
||||
{file = "pymongo-4.6.3-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:af5c5112db04cf62a5d9d224a24f289aaecb47d152c08a457cca81cee061d5bd"},
|
||||
{file = "pymongo-4.6.3-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:6b5aec78aa4840e8d6c3881900259892ab5733a366696ca10d99d68c3d73eaaf"},
|
||||
{file = "pymongo-4.6.3-cp38-cp38-manylinux2014_ppc64le.whl", hash = "sha256:9757602fb45c8ecc1883fe6db7c59c19d87eb3c645ec9342d28a6026837da931"},
|
||||
{file = "pymongo-4.6.3-cp38-cp38-manylinux2014_s390x.whl", hash = "sha256:dde9fb6e105ce054339256a8b7a9775212ebb29596ef4e402d7bbc63b354d202"},
|
||||
{file = "pymongo-4.6.3-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:7df8b166d3db6cfead4cf55b481408d8f0935d8bd8d6dbf64507c49ef82c7200"},
|
||||
{file = "pymongo-4.6.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:53451190b8628e1ce7d1fe105dc376c3f10705127bd3b51fe3e107b9ff1851e6"},
|
||||
{file = "pymongo-4.6.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:75107a386d4ccf5291e75cce8ca3898430e7907f4cc1208a17c9efad33a1ea84"},
|
||||
{file = "pymongo-4.6.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4a0660ce32d8459b7f12dc3ca0141528fead62d3cce31b548f96f30902074cc0"},
|
||||
{file = "pymongo-4.6.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa310096450e9c461b7dfd66cbc1c41771fe36c06200440bb3e062b1d4a06b6e"},
|
||||
{file = "pymongo-4.6.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5f465cca9b178e7bb782f952dd58e9e92f8ba056e585959465f2bb50feddef5f"},
|
||||
{file = "pymongo-4.6.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c67c19f653053ef2ebd7f1837c2978400058d6d7f66ec5760373a21eaf660158"},
|
||||
{file = "pymongo-4.6.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:c701de8e483fb5e53874aab642235361aac6de698146b02c644389eaa8c137b6"},
|
||||
{file = "pymongo-4.6.3-cp38-cp38-win32.whl", hash = "sha256:90525454546536544307e6da9c81f331a71a1b144e2d038fec587cc9f9250285"},
|
||||
{file = "pymongo-4.6.3-cp38-cp38-win_amd64.whl", hash = "sha256:3e1ba5a037c526a3f4060c28f8d45d71ed9626e2bf954b0cd9a8dcc3b45172ee"},
|
||||
{file = "pymongo-4.6.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:14a82593528cddc93cfea5ee78fac95ae763a3a4e124ca79ee0b24fbbc6da1c9"},
|
||||
{file = "pymongo-4.6.3-cp39-cp39-manylinux1_i686.whl", hash = "sha256:cd6c15242d9306ff1748681c3235284cbe9f807aeaa86cd17d85e72af626e9a7"},
|
||||
{file = "pymongo-4.6.3-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:6de33f1b2eed91b802ec7abeb92ffb981d052f3604b45588309aae9e0f6e3c02"},
|
||||
{file = "pymongo-4.6.3-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:0182899aafe830f25cf96c5976d724efeaaf7b6646c15424ad8dd25422b2efe1"},
|
||||
{file = "pymongo-4.6.3-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:8d0ea740a2faa56f930dc82c5976d96c017ece26b29a1cddafb58721c7aab960"},
|
||||
{file = "pymongo-4.6.3-cp39-cp39-manylinux2014_ppc64le.whl", hash = "sha256:5c8a4982f5eb767c6fbfb8fb378683d09bcab7c3251ba64357eef600d43f6c23"},
|
||||
{file = "pymongo-4.6.3-cp39-cp39-manylinux2014_s390x.whl", hash = "sha256:becfa816545a48c8e740ac2fd624c1c121e1362072d68ffcf37a6b1be8ea187e"},
|
||||
{file = "pymongo-4.6.3-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:ff7d1f449fcad23d9bc8e8dc2b9972be38bcd76d99ea5f7d29b2efa929c2a7ff"},
|
||||
{file = "pymongo-4.6.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e097f877de4d6af13a33ef938bf2a2350f424be5deabf8b857da95f5b080487a"},
|
||||
{file = "pymongo-4.6.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:705a9bfd619301ee7e985d6f91f68b15dfcb2f6f36b8cc225cc82d4260d2bce5"},
|
||||
{file = "pymongo-4.6.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2ef1b4992ee1cb8bb16745e70afa0c02c5360220a7a8bb4775888721f052d0a6"},
|
||||
{file = "pymongo-4.6.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3d10bdd46cbc35a2109737d36ffbef32e7420569a87904738ad444ccb7ac2c5"},
|
||||
{file = "pymongo-4.6.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:17c1c143ba77d6e21fc8b48e93f0a5ed982a23447434e9ee4fbb6d633402506b"},
|
||||
{file = "pymongo-4.6.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9e51e30d67b468a2a634ade928b30cb3e420127f148a9aec60de33f39087bdc4"},
|
||||
{file = "pymongo-4.6.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:bec8e4e88984be157408f1923d25869e1b575c07711cdbdde596f66931800934"},
|
||||
{file = "pymongo-4.6.3-cp39-cp39-win32.whl", hash = "sha256:98877a9c4ad42df8253a12d8d17a3265781d1feb5c91c767bd153f88feb0b670"},
|
||||
{file = "pymongo-4.6.3-cp39-cp39-win_amd64.whl", hash = "sha256:6d5b35da9e16cda630baed790ffc3d0d01029d269523a7cec34d2ec7e6823e75"},
|
||||
{file = "pymongo-4.6.3.tar.gz", hash = "sha256:400074090b9a631f120b42c61b222fd743490c133a5d2f99c0208cefcccc964e"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -3476,10 +3460,11 @@ dnspython = ">=1.16.0,<3.0.0"
|
||||
|
||||
[package.extras]
|
||||
aws = ["pymongo-auth-aws (<2.0.0)"]
|
||||
encryption = ["pymongo-auth-aws (<2.0.0)", "pymongocrypt (>=1.3.0,<2.0.0)"]
|
||||
gssapi = ["pykerberos"]
|
||||
ocsp = ["certifi", "pyopenssl (>=17.2.0)", "requests (<3.0.0)", "service-identity (>=18.1.0)"]
|
||||
encryption = ["certifi", "pymongo[aws]", "pymongocrypt (>=1.6.0,<2.0.0)"]
|
||||
gssapi = ["pykerberos", "winkerberos (>=0.5.0)"]
|
||||
ocsp = ["certifi", "cryptography (>=2.5)", "pyopenssl (>=17.2.0)", "requests (<3.0.0)", "service-identity (>=18.1.0)"]
|
||||
snappy = ["python-snappy"]
|
||||
test = ["pytest (>=7)"]
|
||||
zstd = ["zstandard"]
|
||||
|
||||
[[package]]
|
||||
@@ -3840,7 +3825,6 @@ files = [
|
||||
{file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
|
||||
@@ -3875,23 +3859,6 @@ files = [
|
||||
{file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "qds-sdk"
|
||||
version = "1.16.1"
|
||||
description = "Python SDK for coding to the Qubole Data Service API"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "qds_sdk-1.16.1.tar.gz", hash = "sha256:28850682afcf3ab0f2a74a9fd442715519db3ee2ba91c7ecb0b1a56773748ffd"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
boto = ">=2.45.0"
|
||||
inflection = "0.3.1"
|
||||
requests = ">=1.0.3"
|
||||
six = ">=1.2.0"
|
||||
urllib3 = ">=1.0.2"
|
||||
|
||||
[[package]]
|
||||
name = "rdflib"
|
||||
version = "6.3.2"
|
||||
@@ -4163,32 +4130,33 @@ jupyter = ["ipywidgets (>=7.5.1,<9)"]
|
||||
|
||||
[[package]]
|
||||
name = "rq"
|
||||
version = "1.9.0"
|
||||
version = "1.16.1"
|
||||
description = "RQ is a simple, lightweight, library for creating background jobs, and processing them."
|
||||
optional = false
|
||||
python-versions = ">=3.5"
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "rq-1.9.0-py2.py3-none-any.whl", hash = "sha256:7af1e9706dbe6f1eac16dffacd8271ec27c1369950941f14dab6bb08a62979d7"},
|
||||
{file = "rq-1.9.0.tar.gz", hash = "sha256:bdfef943de838955e474cfd0e25b9b8c53ed4b9c361fe4bb11cf56d17a87acc5"},
|
||||
{file = "rq-1.16.1-py3-none-any.whl", hash = "sha256:273de33f10bb9f18cd1e8ccc0a4e8dba2b8eb86a6ab2a91ae674f99bd68025f1"},
|
||||
{file = "rq-1.16.1.tar.gz", hash = "sha256:d9a6314bc759a743b4a5d89aa467eaa3a31dbbc0a34bcd0ee82e8852d9ec166d"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
click = ">=5.0.0"
|
||||
redis = ">=3.5.0"
|
||||
click = ">=5"
|
||||
redis = ">=3.5"
|
||||
|
||||
[[package]]
|
||||
name = "rq-scheduler"
|
||||
version = "0.11.0"
|
||||
version = "0.13.1"
|
||||
description = "Provides job scheduling capabilities to RQ (Redis Queue)"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "rq-scheduler-0.11.0.tar.gz", hash = "sha256:db79bb56cdbc4f7ffdd8bd659e389e91aa0db9c1abf002dc46f5dd6f0dbd2910"},
|
||||
{file = "rq_scheduler-0.11.0-py2.py3-none-any.whl", hash = "sha256:da94e9b6badf112995ff38fe16192e4f4c43c412b3c9614684ed8c8f7ca517d2"},
|
||||
{file = "rq-scheduler-0.13.1.tar.gz", hash = "sha256:89d6a18f215536362b22c0548db7dbb8678bc520c18dc18a82fd0bb2b91695ce"},
|
||||
{file = "rq_scheduler-0.13.1-py2.py3-none-any.whl", hash = "sha256:c2b19c3aedfc7de4d405183c98aa327506e423bf4cdc556af55aaab9bbe5d1a1"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
croniter = ">=0.3.9"
|
||||
crontab = ">=0.23.0"
|
||||
freezegun = "*"
|
||||
python-dateutil = "*"
|
||||
rq = ">=0.13"
|
||||
|
||||
@@ -4654,19 +4622,18 @@ url = ["furl (>=0.4.1)"]
|
||||
|
||||
[[package]]
|
||||
name = "sqlparse"
|
||||
version = "0.4.4"
|
||||
version = "0.5.0"
|
||||
description = "A non-validating SQL parser."
|
||||
optional = false
|
||||
python-versions = ">=3.5"
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "sqlparse-0.4.4-py3-none-any.whl", hash = "sha256:5430a4fe2ac7d0f93e66f1efc6e1338a41884b7ddf2a350cedd20ccc4d9d28f3"},
|
||||
{file = "sqlparse-0.4.4.tar.gz", hash = "sha256:d446183e84b8349fa3061f0fe7f06ca94ba65b426946ffebe6e3e8295332420c"},
|
||||
{file = "sqlparse-0.5.0-py3-none-any.whl", hash = "sha256:c204494cd97479d0e39f28c93d46c0b2d5959c7b9ab904762ea6c7af211c8663"},
|
||||
{file = "sqlparse-0.5.0.tar.gz", hash = "sha256:714d0a4932c059d16189f58ef5411ec2287a4360f17cdd0edd2d09d4c5087c93"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
dev = ["build", "flake8"]
|
||||
dev = ["build", "hatch"]
|
||||
doc = ["sphinx"]
|
||||
test = ["pytest", "pytest-cov"]
|
||||
|
||||
[[package]]
|
||||
name = "sshtunnel"
|
||||
@@ -5333,4 +5300,4 @@ testing = ["coverage (>=5.0.3)", "zope.event", "zope.testing"]
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = ">=3.8,<3.11"
|
||||
content-hash = "db3de74fd61befac80d3a2d3351ebffb719aae21686e2f22a5c07b01fea590ce"
|
||||
content-hash = "e7985ee5c3ca3a4389b4e85fda033a9b3b867dbbe4b4a7fca8ea5c35fc401148"
|
||||
|
||||
@@ -12,7 +12,7 @@ force-exclude = '''
|
||||
|
||||
[tool.poetry]
|
||||
name = "redash"
|
||||
version = "24.04.0-dev"
|
||||
version = "24.05.0-dev"
|
||||
description = "Make Your Company Data Driven. Connect to any data source, easily visualize, dashboard and share your data."
|
||||
authors = ["Arik Fraimovich <arik@redash.io>"]
|
||||
# to be added to/removed from the mailing list, please reach out to Arik via the above email or Discord
|
||||
@@ -43,7 +43,7 @@ flask-wtf = "1.1.1"
|
||||
funcy = "1.13"
|
||||
gevent = "23.9.1"
|
||||
greenlet = "2.0.2"
|
||||
gunicorn = "20.0.4"
|
||||
gunicorn = "22.0.0"
|
||||
httplib2 = "0.19.0"
|
||||
itsdangerous = "2.1.2"
|
||||
jinja2 = "3.1.3"
|
||||
@@ -66,14 +66,14 @@ redis = "4.6.0"
|
||||
regex = "2023.8.8"
|
||||
requests = "2.31.0"
|
||||
restrictedpython = "6.2"
|
||||
rq = "1.9.0"
|
||||
rq-scheduler = "0.11.0"
|
||||
rq = "1.16.1"
|
||||
rq-scheduler = "0.13.1"
|
||||
semver = "2.8.1"
|
||||
sentry-sdk = "1.28.1"
|
||||
sqlalchemy = "1.3.24"
|
||||
sqlalchemy-searchable = "1.2.0"
|
||||
sqlalchemy-utils = "0.34.2"
|
||||
sqlparse = "0.4.4"
|
||||
sqlparse = "0.5.0"
|
||||
sshtunnel = "0.1.5"
|
||||
statsd = "3.3.0"
|
||||
supervisor = "4.1.0"
|
||||
@@ -110,7 +110,7 @@ nzalchemy = "^11.0.2"
|
||||
nzpy = ">=1.15"
|
||||
oauth2client = "4.1.3"
|
||||
openpyxl = "3.0.7"
|
||||
oracledb = "2.0.1"
|
||||
oracledb = "2.1.2"
|
||||
pandas = "1.3.4"
|
||||
phoenixdb = "0.7"
|
||||
pinotdb = ">=0.4.5"
|
||||
@@ -121,12 +121,11 @@ pydruid = "0.5.7"
|
||||
pyexasol = "0.12.0"
|
||||
pyhive = "0.6.1"
|
||||
pyignite = "0.6.1"
|
||||
pymongo = { version = "4.3.3", extras = ["srv", "tls"] }
|
||||
pymongo = { version = "4.6.3", extras = ["srv", "tls"] }
|
||||
pymssql = "2.2.8"
|
||||
pyodbc = "4.0.28"
|
||||
python-arango = "6.1.0"
|
||||
python-rapidjson = "1.1.0"
|
||||
qds-sdk = ">=1.9.6"
|
||||
requests-aws-sign = "0.1.5"
|
||||
sasl = ">=0.1.3"
|
||||
simple-salesforce = "0.74.3"
|
||||
|
||||
@@ -14,7 +14,7 @@ from redash.app import create_app # noqa
|
||||
from redash.destinations import import_destinations
|
||||
from redash.query_runner import import_query_runners
|
||||
|
||||
__version__ = "24.04.0-dev"
|
||||
__version__ = "24.05.0-dev"
|
||||
|
||||
|
||||
if os.environ.get("REMOTE_DEBUG"):
|
||||
|
||||
@@ -36,14 +36,10 @@ def create_app():
|
||||
from .metrics import request as request_metrics
|
||||
from .models import db, users
|
||||
from .utils import sentry
|
||||
from .version_check import reset_new_version_status
|
||||
|
||||
sentry.init()
|
||||
app = Redash()
|
||||
|
||||
# Check and update the cached version for use by the client
|
||||
reset_new_version_status()
|
||||
|
||||
security.init_app(app)
|
||||
request_metrics.init_app(app)
|
||||
db.init_app(app)
|
||||
|
||||
@@ -26,13 +26,13 @@ class Slack(BaseDestination):
|
||||
fields = [
|
||||
{
|
||||
"title": "Query",
|
||||
"type": "mrkdwn",
|
||||
"value": "{host}/queries/{query_id}".format(host=host, query_id=query.id),
|
||||
"short": True,
|
||||
},
|
||||
{
|
||||
"title": "Alert",
|
||||
"type": "mrkdwn",
|
||||
"value": "{host}/alerts/{alert_id}".format(host=host, alert_id=alert.id),
|
||||
"short": True,
|
||||
},
|
||||
]
|
||||
if alert.custom_body:
|
||||
|
||||
@@ -236,11 +236,11 @@ api.add_org_resource(
|
||||
)
|
||||
api.add_org_resource(
|
||||
QueryResultResource,
|
||||
"/api/query_results/<query_result_id>.<filetype>",
|
||||
"/api/query_results/<query_result_id>",
|
||||
"/api/query_results/<result_id>.<filetype>",
|
||||
"/api/query_results/<result_id>",
|
||||
"/api/queries/<query_id>/results",
|
||||
"/api/queries/<query_id>/results.<filetype>",
|
||||
"/api/queries/<query_id>/results/<query_result_id>.<filetype>",
|
||||
"/api/queries/<query_id>/results/<result_id>.<filetype>",
|
||||
endpoint="query_result",
|
||||
)
|
||||
api.add_org_resource(
|
||||
|
||||
@@ -15,7 +15,6 @@ from redash.authentication.account import (
|
||||
)
|
||||
from redash.handlers import routes
|
||||
from redash.handlers.base import json_response, org_scoped_rule
|
||||
from redash.version_check import get_latest_version
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -256,15 +255,11 @@ def number_format_config():
|
||||
|
||||
def client_config():
|
||||
if not current_user.is_api_user() and current_user.is_authenticated:
|
||||
client_config = {
|
||||
"newVersionAvailable": bool(get_latest_version()),
|
||||
client_config_inner = {
|
||||
"version": __version__,
|
||||
}
|
||||
else:
|
||||
client_config = {}
|
||||
|
||||
if current_user.has_permission("admin") and current_org.get_setting("beacon_consent") is None:
|
||||
client_config["showBeaconConsentMessage"] = True
|
||||
client_config_inner = {}
|
||||
|
||||
defaults = {
|
||||
"allowScriptsInUserInput": settings.ALLOW_SCRIPTS_IN_USER_INPUT,
|
||||
@@ -284,12 +279,12 @@ def client_config():
|
||||
"tableCellMaxJSONSize": settings.TABLE_CELL_MAX_JSON_SIZE,
|
||||
}
|
||||
|
||||
client_config.update(defaults)
|
||||
client_config.update({"basePath": base_href()})
|
||||
client_config.update(date_time_format_config())
|
||||
client_config.update(number_format_config())
|
||||
client_config_inner.update(defaults)
|
||||
client_config_inner.update({"basePath": base_href()})
|
||||
client_config_inner.update(date_time_format_config())
|
||||
client_config_inner.update(number_format_config())
|
||||
|
||||
return client_config
|
||||
return client_config_inner
|
||||
|
||||
|
||||
def messages():
|
||||
|
||||
@@ -5,6 +5,7 @@ import regex
|
||||
from flask import make_response, request
|
||||
from flask_login import current_user
|
||||
from flask_restful import abort
|
||||
from rq.job import JobStatus
|
||||
|
||||
from redash import models, settings
|
||||
from redash.handlers.base import BaseResource, get_object_or_404, record_event
|
||||
@@ -38,7 +39,7 @@ from redash.utils import (
|
||||
|
||||
|
||||
def error_response(message, http_status=400):
|
||||
return {"job": {"status": 4, "error": message}}, http_status
|
||||
return {"job": {"status": JobStatus.FAILED, "error": message}}, http_status
|
||||
|
||||
|
||||
error_messages = {
|
||||
@@ -225,7 +226,7 @@ class QueryResultResource(BaseResource):
|
||||
headers["Access-Control-Allow-Credentials"] = str(settings.ACCESS_CONTROL_ALLOW_CREDENTIALS).lower()
|
||||
|
||||
@require_any_of_permission(("view_query", "execute_query"))
|
||||
def options(self, query_id=None, query_result_id=None, filetype="json"):
|
||||
def options(self, query_id=None, result_id=None, filetype="json"):
|
||||
headers = {}
|
||||
self.add_cors_headers(headers)
|
||||
|
||||
@@ -285,12 +286,12 @@ class QueryResultResource(BaseResource):
|
||||
return error_messages["no_permission"]
|
||||
|
||||
@require_any_of_permission(("view_query", "execute_query"))
|
||||
def get(self, query_id=None, query_result_id=None, filetype="json"):
|
||||
def get(self, query_id=None, result_id=None, filetype="json"):
|
||||
"""
|
||||
Retrieve query results.
|
||||
|
||||
:param number query_id: The ID of the query whose results should be fetched
|
||||
:param number query_result_id: the ID of the query result to fetch
|
||||
:param number result_id: the ID of the query result to fetch
|
||||
:param string filetype: Format to return. One of 'json', 'xlsx', or 'csv'. Defaults to 'json'.
|
||||
|
||||
:<json number id: Query result ID
|
||||
@@ -305,13 +306,13 @@ class QueryResultResource(BaseResource):
|
||||
# This method handles two cases: retrieving result by id & retrieving result by query id.
|
||||
# They need to be split, as they have different logic (for example, retrieving by query id
|
||||
# should check for query parameters and shouldn't cache the result).
|
||||
should_cache = query_result_id is not None
|
||||
should_cache = result_id is not None
|
||||
|
||||
query_result = None
|
||||
query = None
|
||||
|
||||
if query_result_id:
|
||||
query_result = get_object_or_404(models.QueryResult.get_by_id_and_org, query_result_id, self.current_org)
|
||||
if result_id:
|
||||
query_result = get_object_or_404(models.QueryResult.get_by_id_and_org, result_id, self.current_org)
|
||||
|
||||
if query_id is not None:
|
||||
query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
|
||||
@@ -346,7 +347,7 @@ class QueryResultResource(BaseResource):
|
||||
event["object_id"] = query_id
|
||||
else:
|
||||
event["object_type"] = "query_result"
|
||||
event["object_id"] = query_result_id
|
||||
event["object_id"] = result_id
|
||||
|
||||
self.record_event(event)
|
||||
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
from flask import g, redirect, render_template, request, url_for
|
||||
from flask_login import login_user
|
||||
from wtforms import BooleanField, Form, PasswordField, StringField, validators
|
||||
from wtforms import Form, PasswordField, StringField, validators
|
||||
from wtforms.fields.html5 import EmailField
|
||||
|
||||
from redash import settings
|
||||
from redash.authentication.org_resolving import current_org
|
||||
from redash.handlers.base import routes
|
||||
from redash.models import Group, Organization, User, db
|
||||
from redash.tasks.general import subscribe
|
||||
|
||||
|
||||
class SetupForm(Form):
|
||||
@@ -15,8 +14,6 @@ class SetupForm(Form):
|
||||
email = EmailField("Email Address", validators=[validators.Email()])
|
||||
password = PasswordField("Password", validators=[validators.Length(6)])
|
||||
org_name = StringField("Organization Name", validators=[validators.InputRequired()])
|
||||
security_notifications = BooleanField()
|
||||
newsletter = BooleanField()
|
||||
|
||||
|
||||
def create_org(org_name, user_name, email, password):
|
||||
@@ -57,8 +54,6 @@ def setup():
|
||||
return redirect("/")
|
||||
|
||||
form = SetupForm(request.form)
|
||||
form.newsletter.data = True
|
||||
form.security_notifications.data = True
|
||||
|
||||
if request.method == "POST" and form.validate():
|
||||
default_org, user = create_org(form.org_name.data, form.name.data, form.email.data, form.password.data)
|
||||
@@ -66,10 +61,6 @@ def setup():
|
||||
g.org = default_org
|
||||
login_user(user)
|
||||
|
||||
# signup to newsletter if needed
|
||||
if form.newsletter.data or form.security_notifications:
|
||||
subscribe.delay(form.data)
|
||||
|
||||
return redirect(url_for("redash.index", org_slug=None))
|
||||
|
||||
return render_template("setup.html", form=form)
|
||||
|
||||
@@ -42,7 +42,6 @@ class WidgetListResource(BaseResource):
|
||||
|
||||
widget = models.Widget(**widget_properties)
|
||||
models.db.session.add(widget)
|
||||
models.db.session.commit()
|
||||
|
||||
models.db.session.commit()
|
||||
return serialize_widget(widget)
|
||||
|
||||
@@ -227,7 +227,16 @@ class DataSource(BelongsToOrgMixin, db.Model):
|
||||
|
||||
def _sort_schema(self, schema):
|
||||
return [
|
||||
{"name": i["name"], "columns": sorted(i["columns"], key=lambda x: x["name"] if isinstance(x, dict) else x)}
|
||||
{
|
||||
"name": i["name"],
|
||||
"description": i.get("description"),
|
||||
"columns": sorted(
|
||||
i["columns"],
|
||||
key=lambda col: (
|
||||
("partition" in col["type"], col.get("idx", 0), col["name"]) if isinstance(col, dict) else col
|
||||
),
|
||||
),
|
||||
}
|
||||
for i in sorted(schema, key=lambda x: x["name"])
|
||||
]
|
||||
|
||||
|
||||
@@ -63,5 +63,8 @@ class AmazonElasticsearchService(ElasticSearch2):
|
||||
|
||||
self.auth = AWSV4Sign(cred, region, "es")
|
||||
|
||||
def get_auth(self):
|
||||
return self.auth
|
||||
|
||||
|
||||
register(AmazonElasticsearchService)
|
||||
|
||||
@@ -21,7 +21,9 @@ OPTIONAL_CREDENTIALS = parse_boolean(os.environ.get("ATHENA_OPTIONAL_CREDENTIALS
|
||||
|
||||
try:
|
||||
import boto3
|
||||
import pandas as pd
|
||||
import pyathena
|
||||
from pyathena.pandas_cursor import PandasCursor
|
||||
|
||||
enabled = True
|
||||
except ImportError:
|
||||
@@ -188,10 +190,35 @@ class Athena(BaseQueryRunner):
|
||||
logger.warning("Glue table doesn't have StorageDescriptor: %s", table_name)
|
||||
continue
|
||||
if table_name not in schema:
|
||||
column = [columns["Name"] for columns in table["StorageDescriptor"]["Columns"]]
|
||||
schema[table_name] = {"name": table_name, "columns": column}
|
||||
for partition in table.get("PartitionKeys", []):
|
||||
schema[table_name]["columns"].append(partition["Name"])
|
||||
columns = []
|
||||
for cols in table["StorageDescriptor"]["Columns"]:
|
||||
c = {
|
||||
"name": cols["Name"],
|
||||
}
|
||||
if "Type" in cols:
|
||||
c["type"] = cols["Type"]
|
||||
if "Comment" in cols:
|
||||
c["comment"] = cols["Comment"]
|
||||
columns.append(c)
|
||||
|
||||
schema[table_name] = {
|
||||
"name": table_name,
|
||||
"columns": columns,
|
||||
"description": table.get("Description"),
|
||||
}
|
||||
for idx, partition in enumerate(table.get("PartitionKeys", [])):
|
||||
schema[table_name]["columns"].append(
|
||||
{
|
||||
"name": partition["Name"],
|
||||
"type": "partition",
|
||||
"idx": idx,
|
||||
}
|
||||
)
|
||||
if "Type" in partition:
|
||||
_type = partition["Type"]
|
||||
c["type"] = f"partition ({_type})"
|
||||
if "Comment" in partition:
|
||||
c["comment"] = partition["Comment"]
|
||||
return list(schema.values())
|
||||
|
||||
def get_schema(self, get_stats=False):
|
||||
@@ -225,6 +252,7 @@ class Athena(BaseQueryRunner):
|
||||
kms_key=self.configuration.get("kms_key", None),
|
||||
work_group=self.configuration.get("work_group", "primary"),
|
||||
formatter=SimpleFormatter(),
|
||||
cursor_class=PandasCursor,
|
||||
**self._get_iam_credentials(user=user),
|
||||
).cursor()
|
||||
|
||||
@@ -232,7 +260,8 @@ class Athena(BaseQueryRunner):
|
||||
cursor.execute(query)
|
||||
column_tuples = [(i[0], _TYPE_MAPPINGS.get(i[1], None)) for i in cursor.description]
|
||||
columns = self.fetch_columns(column_tuples)
|
||||
rows = [dict(zip(([c["name"] for c in columns]), r)) for i, r in enumerate(cursor.fetchall())]
|
||||
df = cursor.as_pandas().replace({pd.NA: None})
|
||||
rows = df.to_dict(orient="records")
|
||||
qbytes = None
|
||||
athena_query_id = None
|
||||
try:
|
||||
|
||||
@@ -129,6 +129,8 @@ class BaseElasticSearch(BaseQueryRunner):
|
||||
for index_name in mappings_data:
|
||||
index_mappings = mappings_data[index_name]
|
||||
for m in index_mappings.get("mappings", {}):
|
||||
if not isinstance(index_mappings["mappings"][m], dict):
|
||||
continue
|
||||
if "properties" not in index_mappings["mappings"][m]:
|
||||
continue
|
||||
for property_name in index_mappings["mappings"][m]["properties"]:
|
||||
|
||||
@@ -45,7 +45,7 @@ class ElasticSearch2(BaseHTTPQueryRunner):
|
||||
self.syntax = "json"
|
||||
|
||||
def get_response(self, url, auth=None, http_method="get", **kwargs):
|
||||
url = "{}{}".format(self.configuration["url"], url)
|
||||
url = "{}{}".format(self.configuration["server"], url)
|
||||
headers = kwargs.pop("headers", {})
|
||||
headers["Accept"] = "application/json"
|
||||
return super().get_response(url, auth, http_method, headers=headers, **kwargs)
|
||||
|
||||
@@ -69,7 +69,7 @@ def datetime_parser(dct):
|
||||
return bson_object_hook(dct, json_options=opts)
|
||||
|
||||
|
||||
def parse_query_json(query):
|
||||
def parse_query_json(query: str):
|
||||
query_data = json_loads(query, object_hook=datetime_parser)
|
||||
return query_data
|
||||
|
||||
@@ -82,26 +82,40 @@ def _get_column_by_name(columns, column_name):
|
||||
return None
|
||||
|
||||
|
||||
def _parse_dict(dic):
|
||||
def _parse_dict(dic: dict, flatten: bool = False) -> dict:
|
||||
res = {}
|
||||
for key, value in dic.items():
|
||||
if isinstance(value, dict):
|
||||
for tmp_key, tmp_value in _parse_dict(value).items():
|
||||
new_key = "{}.{}".format(key, tmp_key)
|
||||
res[new_key] = tmp_value
|
||||
|
||||
def _flatten(x, name=""):
|
||||
if isinstance(x, dict):
|
||||
for k, v in x.items():
|
||||
_flatten(v, "{}.{}".format(name, k))
|
||||
elif isinstance(x, list):
|
||||
for idx, item in enumerate(x):
|
||||
_flatten(item, "{}.{}".format(name, idx))
|
||||
else:
|
||||
res[key] = value
|
||||
res[name[1:]] = x
|
||||
|
||||
if flatten:
|
||||
_flatten(dic)
|
||||
else:
|
||||
for key, value in dic.items():
|
||||
if isinstance(value, dict):
|
||||
for tmp_key, tmp_value in _parse_dict(value).items():
|
||||
new_key = "{}.{}".format(key, tmp_key)
|
||||
res[new_key] = tmp_value
|
||||
else:
|
||||
res[key] = value
|
||||
return res
|
||||
|
||||
|
||||
def parse_results(results):
|
||||
def parse_results(results: list, flatten: bool = False) -> list:
|
||||
rows = []
|
||||
columns = []
|
||||
|
||||
for row in results:
|
||||
parsed_row = {}
|
||||
|
||||
parsed_row = _parse_dict(row)
|
||||
parsed_row = _parse_dict(row, flatten)
|
||||
for column_name, value in parsed_row.items():
|
||||
columns.append(
|
||||
{
|
||||
@@ -140,6 +154,14 @@ class MongoDB(BaseQueryRunner):
|
||||
],
|
||||
"title": "Replica Set Read Preference",
|
||||
},
|
||||
"flatten": {
|
||||
"type": "string",
|
||||
"extendedEnum": [
|
||||
{"value": "False", "name": "False"},
|
||||
{"value": "True", "name": "True"},
|
||||
],
|
||||
"title": "Flatten Results",
|
||||
},
|
||||
},
|
||||
"secret": ["password"],
|
||||
"required": ["connectionString", "dbName"],
|
||||
@@ -160,6 +182,9 @@ class MongoDB(BaseQueryRunner):
|
||||
True if "replicaSetName" in self.configuration and self.configuration["replicaSetName"] else False
|
||||
)
|
||||
|
||||
self.flatten = self.configuration.get("flatten", "False").upper() in ["TRUE", "YES", "ON", "1", "Y", "T"]
|
||||
logger.debug("flatten: {}".format(self.flatten))
|
||||
|
||||
@classmethod
|
||||
def custom_json_encoder(cls, dec, o):
|
||||
if isinstance(o, ObjectId):
|
||||
@@ -278,8 +303,10 @@ class MongoDB(BaseQueryRunner):
|
||||
if "$sort" in step:
|
||||
sort_list = []
|
||||
for sort_item in step["$sort"]:
|
||||
sort_list.append((sort_item["name"], sort_item["direction"]))
|
||||
|
||||
if isinstance(sort_item, dict):
|
||||
sort_list.append((sort_item["name"], sort_item.get("direction", 1)))
|
||||
elif isinstance(sort_item, list):
|
||||
sort_list.append(tuple(sort_item))
|
||||
step["$sort"] = SON(sort_list)
|
||||
|
||||
if "fields" in query_data:
|
||||
@@ -289,7 +316,10 @@ class MongoDB(BaseQueryRunner):
|
||||
if "sort" in query_data and query_data["sort"]:
|
||||
s = []
|
||||
for field_data in query_data["sort"]:
|
||||
s.append((field_data["name"], field_data["direction"]))
|
||||
if isinstance(field_data, dict):
|
||||
s.append((field_data["name"], field_data.get("direction", 1)))
|
||||
elif isinstance(field_data, list):
|
||||
s.append(tuple(field_data))
|
||||
|
||||
columns = []
|
||||
rows = []
|
||||
@@ -330,7 +360,7 @@ class MongoDB(BaseQueryRunner):
|
||||
|
||||
rows.append({"count": cursor})
|
||||
else:
|
||||
rows, columns = parse_results(cursor)
|
||||
rows, columns = parse_results(cursor, flatten=self.flatten)
|
||||
|
||||
if f:
|
||||
ordered_columns = []
|
||||
@@ -340,6 +370,7 @@ class MongoDB(BaseQueryRunner):
|
||||
ordered_columns.append(column)
|
||||
|
||||
columns = ordered_columns
|
||||
logger.debug("columns: {}".format(columns))
|
||||
|
||||
if query_data.get("sortColumns"):
|
||||
reverse = query_data["sortColumns"] == "desc"
|
||||
|
||||
@@ -48,7 +48,7 @@ class SQLServerODBC(BaseSQLQueryRunner):
|
||||
"verify_ssl": {
|
||||
"type": "boolean",
|
||||
"title": "Verify SSL certificate",
|
||||
"default": True,
|
||||
"default": False,
|
||||
},
|
||||
},
|
||||
"order": [
|
||||
@@ -120,14 +120,29 @@ class SQLServerODBC(BaseSQLQueryRunner):
|
||||
db = self.configuration["db"]
|
||||
port = self.configuration.get("port", 1433)
|
||||
|
||||
connection_string_fmt = "DRIVER={{ODBC Driver 17 for SQL Server}};SERVER={},{};DATABASE={};UID={};PWD={}"
|
||||
connection_string = connection_string_fmt.format(server, port, db, user, password)
|
||||
connection_params = {
|
||||
"Driver": "{ODBC Driver 18 for SQL Server}",
|
||||
"Server": server,
|
||||
"Port": port,
|
||||
"Database": db,
|
||||
"Uid": user,
|
||||
"Pwd": password,
|
||||
}
|
||||
|
||||
if self.configuration.get("use_ssl", False):
|
||||
connection_string += ";Encrypt=YES"
|
||||
connection_params["Encrypt"] = "YES"
|
||||
|
||||
if not self.configuration.get("verify_ssl"):
|
||||
connection_string += ";TrustServerCertificate=YES"
|
||||
connection_params["TrustServerCertificate"] = "YES"
|
||||
else:
|
||||
connection_params["TrustServerCertificate"] = "NO"
|
||||
else:
|
||||
connection_params["Encrypt"] = "NO"
|
||||
|
||||
def fn(k):
|
||||
return "{}={}".format(k, connection_params[k])
|
||||
|
||||
connection_string = ";".join(list(map(fn, connection_params)))
|
||||
|
||||
connection = pyodbc.connect(connection_string)
|
||||
cursor = connection.cursor()
|
||||
|
||||
@@ -108,6 +108,8 @@ def build_schema(query_result, schema):
|
||||
column = row["column_name"]
|
||||
if row.get("data_type") is not None:
|
||||
column = {"name": row["column_name"], "type": row["data_type"]}
|
||||
if "column_comment" in row:
|
||||
column["comment"] = row["column_comment"]
|
||||
|
||||
schema[table_name]["columns"].append(column)
|
||||
|
||||
@@ -222,7 +224,9 @@ class PostgreSQL(BaseSQLQueryRunner):
|
||||
SELECT s.nspname as table_schema,
|
||||
c.relname as table_name,
|
||||
a.attname as column_name,
|
||||
null as data_type
|
||||
null as data_type,
|
||||
null as column_comment,
|
||||
null as idx
|
||||
FROM pg_class c
|
||||
JOIN pg_namespace s
|
||||
ON c.relnamespace = s.oid
|
||||
@@ -238,8 +242,16 @@ class PostgreSQL(BaseSQLQueryRunner):
|
||||
SELECT table_schema,
|
||||
table_name,
|
||||
column_name,
|
||||
data_type
|
||||
FROM information_schema.columns
|
||||
data_type,
|
||||
pgd.description,
|
||||
isc.ordinal_position
|
||||
FROM information_schema.columns as isc
|
||||
LEFT JOIN pg_catalog.pg_statio_all_tables as st
|
||||
ON isc.table_schema = st.schemaname
|
||||
AND isc.table_name = st.relname
|
||||
LEFT JOIN pg_catalog.pg_description pgd
|
||||
ON pgd.objoid=st.relid
|
||||
AND pgd.objsubid=isc.ordinal_position
|
||||
WHERE table_schema NOT IN ('pg_catalog', 'information_schema')
|
||||
"""
|
||||
|
||||
|
||||
@@ -1,169 +0,0 @@
|
||||
import logging
|
||||
import time
|
||||
from io import StringIO
|
||||
|
||||
import requests
|
||||
|
||||
from redash.query_runner import (
|
||||
TYPE_STRING,
|
||||
BaseQueryRunner,
|
||||
JobTimeoutException,
|
||||
register,
|
||||
)
|
||||
|
||||
try:
|
||||
import qds_sdk # noqa: F401
|
||||
from qds_sdk.commands import (
|
||||
Command,
|
||||
HiveCommand,
|
||||
PrestoCommand,
|
||||
SqlCommand,
|
||||
)
|
||||
from qds_sdk.qubole import Qubole as qbol
|
||||
|
||||
enabled = True
|
||||
except ImportError:
|
||||
enabled = False
|
||||
|
||||
|
||||
class Qubole(BaseQueryRunner):
|
||||
should_annotate_query = False
|
||||
|
||||
@classmethod
|
||||
def configuration_schema(cls):
|
||||
return {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query_type": {
|
||||
"type": "string",
|
||||
"title": "Query Type (quantum / presto / hive)",
|
||||
"default": "hive",
|
||||
},
|
||||
"endpoint": {
|
||||
"type": "string",
|
||||
"title": "API Endpoint",
|
||||
"default": "https://api.qubole.com",
|
||||
},
|
||||
"token": {"type": "string", "title": "Auth Token"},
|
||||
"cluster": {
|
||||
"type": "string",
|
||||
"title": "Cluster Label",
|
||||
"default": "default",
|
||||
},
|
||||
},
|
||||
"order": ["query_type", "endpoint", "token", "cluster"],
|
||||
"required": ["endpoint", "token"],
|
||||
"secret": ["token"],
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def type(cls):
|
||||
return "qubole"
|
||||
|
||||
@classmethod
|
||||
def name(cls):
|
||||
return "Qubole"
|
||||
|
||||
@classmethod
|
||||
def enabled(cls):
|
||||
return enabled
|
||||
|
||||
def test_connection(self):
|
||||
headers = self._get_header()
|
||||
r = requests.head("%s/api/latest/users" % self.configuration.get("endpoint"), headers=headers)
|
||||
r.status_code == 200
|
||||
|
||||
def run_query(self, query, user):
|
||||
qbol.configure(
|
||||
api_token=self.configuration.get("token"),
|
||||
api_url="%s/api" % self.configuration.get("endpoint"),
|
||||
)
|
||||
|
||||
try:
|
||||
query_type = self.configuration.get("query_type", "hive")
|
||||
|
||||
if query_type == "quantum":
|
||||
cmd = SqlCommand.create(query=query)
|
||||
elif query_type == "hive":
|
||||
cmd = HiveCommand.create(query=query, label=self.configuration.get("cluster"))
|
||||
elif query_type == "presto":
|
||||
cmd = PrestoCommand.create(query=query, label=self.configuration.get("cluster"))
|
||||
else:
|
||||
raise Exception(
|
||||
"Invalid Query Type:%s.\
|
||||
It must be : hive / presto / quantum."
|
||||
% self.configuration.get("query_type")
|
||||
)
|
||||
|
||||
logging.info("Qubole command created with Id: %s and Status: %s", cmd.id, cmd.status)
|
||||
|
||||
while not Command.is_done(cmd.status):
|
||||
time.sleep(qbol.poll_interval)
|
||||
cmd = Command.find(cmd.id)
|
||||
logging.info("Qubole command Id: %s and Status: %s", cmd.id, cmd.status)
|
||||
|
||||
rows = []
|
||||
columns = []
|
||||
error = None
|
||||
|
||||
if cmd.status == "done":
|
||||
fp = StringIO()
|
||||
cmd.get_results(
|
||||
fp=fp,
|
||||
inline=True,
|
||||
delim="\t",
|
||||
fetch=False,
|
||||
qlog=None,
|
||||
arguments=["true"],
|
||||
)
|
||||
|
||||
results = fp.getvalue()
|
||||
fp.close()
|
||||
|
||||
data = results.split("\r\n")
|
||||
columns = self.fetch_columns([(i, TYPE_STRING) for i in data.pop(0).split("\t")])
|
||||
rows = [dict(zip((column["name"] for column in columns), row.split("\t"))) for row in data]
|
||||
|
||||
data = {"columns": columns, "rows": rows}
|
||||
except (KeyboardInterrupt, JobTimeoutException):
|
||||
logging.info("Sending KILL signal to Qubole Command Id: %s", cmd.id)
|
||||
cmd.cancel()
|
||||
raise
|
||||
|
||||
return data, error
|
||||
|
||||
def get_schema(self, get_stats=False):
|
||||
schemas = {}
|
||||
try:
|
||||
headers = self._get_header()
|
||||
content = requests.get(
|
||||
"%s/api/latest/hive?describe=true&per_page=10000" % self.configuration.get("endpoint"),
|
||||
headers=headers,
|
||||
)
|
||||
data = content.json()
|
||||
|
||||
for schema in data["schemas"]:
|
||||
tables = data["schemas"][schema]
|
||||
for table in tables:
|
||||
table_name = list(table.keys())[0]
|
||||
columns = [f["name"] for f in table[table_name]["columns"]]
|
||||
|
||||
if schema != "default":
|
||||
table_name = "{}.{}".format(schema, table_name)
|
||||
|
||||
schemas[table_name] = {"name": table_name, "columns": columns}
|
||||
|
||||
except Exception as e:
|
||||
logging.error("Failed to get schema information from Qubole. Error {}".format(str(e)))
|
||||
|
||||
return list(schemas.values())
|
||||
|
||||
def _get_header(self):
|
||||
return {
|
||||
"Content-type": "application/json",
|
||||
"Accept": "application/json",
|
||||
"X-AUTH-TOKEN": self.configuration.get("token"),
|
||||
}
|
||||
|
||||
|
||||
register(Qubole)
|
||||
@@ -1,3 +1,4 @@
|
||||
import datetime
|
||||
import decimal
|
||||
import hashlib
|
||||
import logging
|
||||
@@ -108,6 +109,8 @@ def flatten(value):
|
||||
return json_dumps(value)
|
||||
elif isinstance(value, decimal.Decimal):
|
||||
return float(value)
|
||||
elif isinstance(value, datetime.timedelta):
|
||||
return str(value)
|
||||
else:
|
||||
return value
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import functools
|
||||
|
||||
from flask import request, session
|
||||
from flask import session
|
||||
from flask_login import current_user
|
||||
from flask_talisman import talisman
|
||||
from flask_wtf.csrf import CSRFProtect, generate_csrf
|
||||
@@ -35,17 +35,6 @@ def init_app(app):
|
||||
|
||||
@app.before_request
|
||||
def check_csrf():
|
||||
# BEGIN workaround until https://github.com/lepture/flask-wtf/pull/419 is merged
|
||||
if request.blueprint in csrf._exempt_blueprints:
|
||||
return
|
||||
|
||||
view = app.view_functions.get(request.endpoint)
|
||||
dest = f"{view.__module__}.{view.__name__}"
|
||||
|
||||
if dest in csrf._exempt_views:
|
||||
return
|
||||
# END workaround
|
||||
|
||||
if not current_user.is_authenticated or "user_id" in session:
|
||||
csrf.protect()
|
||||
|
||||
|
||||
@@ -3,9 +3,11 @@ This will eventually replace all the `to_dict` methods of the different model
|
||||
classes we have. This will ensure cleaner code and better
|
||||
separation of concerns.
|
||||
"""
|
||||
|
||||
from flask_login import current_user
|
||||
from funcy import project
|
||||
from rq.job import JobStatus
|
||||
from rq.results import Result
|
||||
from rq.timeouts import JobTimeoutException
|
||||
|
||||
from redash import models
|
||||
@@ -270,35 +272,19 @@ class DashboardSerializer(Serializer):
|
||||
|
||||
|
||||
def serialize_job(job):
|
||||
# TODO: this is mapping to the old Job class statuses. Need to update the client side and remove this
|
||||
STATUSES = {
|
||||
JobStatus.QUEUED: 1,
|
||||
JobStatus.STARTED: 2,
|
||||
JobStatus.FINISHED: 3,
|
||||
JobStatus.FAILED: 4,
|
||||
}
|
||||
|
||||
job_status = job.get_status()
|
||||
if job.is_started:
|
||||
updated_at = job.started_at or 0
|
||||
else:
|
||||
updated_at = 0
|
||||
|
||||
status = STATUSES[job_status]
|
||||
result = query_result_id = None
|
||||
|
||||
if job.is_cancelled:
|
||||
error = "Query cancelled by user."
|
||||
status = 4
|
||||
elif isinstance(job.result, Exception):
|
||||
error = str(job.result)
|
||||
status = 4
|
||||
elif isinstance(job.result, dict) and "error" in job.result:
|
||||
error = job.result["error"]
|
||||
status = 4
|
||||
else:
|
||||
error = ""
|
||||
result = query_result_id = job.result
|
||||
status = job.get_status()
|
||||
error = result_id = None
|
||||
job_result = job.latest_result()
|
||||
if job_result:
|
||||
if job_result.type == Result.Type.SUCCESSFUL:
|
||||
result_id = job_result.return_value
|
||||
else:
|
||||
error = job_result.exc_string
|
||||
|
||||
return {
|
||||
"job": {
|
||||
@@ -306,7 +292,6 @@ def serialize_job(job):
|
||||
"updated_at": updated_at,
|
||||
"status": status,
|
||||
"error": error,
|
||||
"result": result,
|
||||
"query_result_id": query_result_id,
|
||||
"result_id": result_id,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -312,7 +312,6 @@ default_query_runners = [
|
||||
"redash.query_runner.salesforce",
|
||||
"redash.query_runner.query_results",
|
||||
"redash.query_runner.prometheus",
|
||||
"redash.query_runner.qubole",
|
||||
"redash.query_runner.db2",
|
||||
"redash.query_runner.druid",
|
||||
"redash.query_runner.kylin",
|
||||
@@ -413,7 +412,6 @@ PAGE_SIZE_OPTIONS = list(
|
||||
TABLE_CELL_MAX_JSON_SIZE = int(os.environ.get("REDASH_TABLE_CELL_MAX_JSON_SIZE", 50000))
|
||||
|
||||
# Features:
|
||||
VERSION_CHECK = parse_boolean(os.environ.get("REDASH_VERSION_CHECK", "true"))
|
||||
FEATURE_DISABLE_REFRESH_QUERIES = parse_boolean(os.environ.get("REDASH_FEATURE_DISABLE_REFRESH_QUERIES", "false"))
|
||||
FEATURE_SHOW_QUERY_RESULTS_COUNT = parse_boolean(os.environ.get("REDASH_FEATURE_SHOW_QUERY_RESULTS_COUNT", "true"))
|
||||
FEATURE_ALLOW_CUSTOM_JS_VISUALIZATIONS = parse_boolean(
|
||||
|
||||
@@ -45,7 +45,6 @@ HIDE_PLOTLY_MODE_BAR = parse_boolean(os.environ.get("HIDE_PLOTLY_MODE_BAR", "fal
|
||||
DISABLE_PUBLIC_URLS = parse_boolean(os.environ.get("REDASH_DISABLE_PUBLIC_URLS", "false"))
|
||||
|
||||
settings = {
|
||||
"beacon_consent": None,
|
||||
"auth_password_login_enabled": PASSWORD_LOGIN_ENABLED,
|
||||
"auth_saml_enabled": SAML_LOGIN_ENABLED,
|
||||
"auth_saml_type": SAML_LOGIN_TYPE,
|
||||
|
||||
@@ -7,7 +7,6 @@ from redash.tasks.general import (
|
||||
record_event,
|
||||
send_mail,
|
||||
sync_user_details,
|
||||
version_check,
|
||||
)
|
||||
from redash.tasks.queries import (
|
||||
cleanup_query_results,
|
||||
|
||||
@@ -5,7 +5,6 @@ from redash import mail, models, settings
|
||||
from redash.models import users
|
||||
from redash.query_runner import NotSupported
|
||||
from redash.tasks.worker import Queue
|
||||
from redash.version_check import run_version_check
|
||||
from redash.worker import get_job_logger, job
|
||||
|
||||
logger = get_job_logger(__name__)
|
||||
@@ -30,27 +29,6 @@ def record_event(raw_event):
|
||||
logger.exception("Failed posting to %s", hook)
|
||||
|
||||
|
||||
def version_check():
|
||||
run_version_check()
|
||||
|
||||
|
||||
@job("default")
|
||||
def subscribe(form):
|
||||
logger.info(
|
||||
"Subscribing to: [security notifications=%s], [newsletter=%s]",
|
||||
form["security_notifications"],
|
||||
form["newsletter"],
|
||||
)
|
||||
data = {
|
||||
"admin_name": form["name"],
|
||||
"admin_email": form["email"],
|
||||
"org_name": form["org_name"],
|
||||
"security_notifications": form["security_notifications"],
|
||||
"newsletter": form["newsletter"],
|
||||
}
|
||||
requests.post("https://beacon.redash.io/subscribe", json=data)
|
||||
|
||||
|
||||
@job("emails")
|
||||
def send_mail(to, subject, html, text):
|
||||
try:
|
||||
|
||||
@@ -43,24 +43,24 @@ def enqueue_query(query, data_source, user_id, is_api_key=False, scheduled_query
|
||||
if job_id:
|
||||
logger.info("[%s] Found existing job: %s", query_hash, job_id)
|
||||
job_complete = None
|
||||
job_cancelled = None
|
||||
|
||||
try:
|
||||
job = Job.fetch(job_id)
|
||||
job_exists = True
|
||||
status = job.get_status()
|
||||
job_complete = status in [JobStatus.FINISHED, JobStatus.FAILED]
|
||||
job_cancelled = job.is_cancelled
|
||||
|
||||
job_complete = status in [
|
||||
JobStatus.FINISHED,
|
||||
JobStatus.FAILED,
|
||||
JobStatus.STOPPED,
|
||||
JobStatus.CANCELED,
|
||||
]
|
||||
if job_complete:
|
||||
message = "job found is complete (%s)" % status
|
||||
elif job_cancelled:
|
||||
message = "job found has ben cancelled"
|
||||
except NoSuchJobError:
|
||||
message = "job found has expired"
|
||||
job_exists = False
|
||||
|
||||
lock_is_irrelevant = job_complete or job_cancelled or not job_exists
|
||||
lock_is_irrelevant = job_complete or not job_exists
|
||||
|
||||
if lock_is_irrelevant:
|
||||
logger.info("[%s] %s, removing lock", query_hash, message)
|
||||
|
||||
@@ -8,7 +8,7 @@ from rq_scheduler import Scheduler
|
||||
|
||||
from redash import rq_redis_connection, settings
|
||||
from redash.tasks.failure_report import send_aggregated_errors
|
||||
from redash.tasks.general import sync_user_details, version_check
|
||||
from redash.tasks.general import sync_user_details
|
||||
from redash.tasks.queries import (
|
||||
cleanup_query_results,
|
||||
empty_schedules,
|
||||
@@ -79,9 +79,6 @@ def periodic_job_definitions():
|
||||
},
|
||||
]
|
||||
|
||||
if settings.VERSION_CHECK:
|
||||
jobs.append({"func": version_check, "interval": timedelta(days=1)})
|
||||
|
||||
if settings.QUERY_RESULTS_CLEANUP_ENABLED:
|
||||
jobs.append({"func": cleanup_query_results, "interval": timedelta(minutes=5)})
|
||||
|
||||
|
||||
@@ -65,10 +65,7 @@ class StatsdRecordingWorker(BaseWorker):
|
||||
super().execute_job(job, queue)
|
||||
finally:
|
||||
statsd_client.decr("rq.jobs.running.{}".format(queue.name))
|
||||
if job.get_status() == JobStatus.FINISHED:
|
||||
statsd_client.incr("rq.jobs.finished.{}".format(queue.name))
|
||||
else:
|
||||
statsd_client.incr("rq.jobs.failed.{}".format(queue.name))
|
||||
statsd_client.incr("rq.jobs.{}.{}".format(job.get_status(), queue.name))
|
||||
|
||||
|
||||
class HardLimitingWorker(BaseWorker):
|
||||
@@ -154,7 +151,7 @@ class HardLimitingWorker(BaseWorker):
|
||||
job_status = job.get_status()
|
||||
if job_status is None: # Job completed and its ttl has expired
|
||||
return
|
||||
if job_status not in [JobStatus.FINISHED, JobStatus.FAILED]:
|
||||
if job_status not in [JobStatus.FINISHED, JobStatus.FAILED, JobStatus.STOPPED, JobStatus.CANCELED]:
|
||||
if not job.ended_at:
|
||||
job.ended_at = utcnow()
|
||||
|
||||
|
||||
@@ -42,20 +42,6 @@
|
||||
{{ render_field(form.email) }}
|
||||
{{ render_field(form.password) }}
|
||||
|
||||
<div class="checkbox">
|
||||
<label>
|
||||
{{ form.security_notifications() }}
|
||||
Subscribe to Security Notifications
|
||||
</label>
|
||||
</div>
|
||||
|
||||
<div class="checkbox">
|
||||
<label>
|
||||
{{ form.newsletter() }}
|
||||
Subscribe to newsletter (version updates, no more than once a month)
|
||||
</label>
|
||||
</div>
|
||||
|
||||
<h4 class="m-t-25">General</h4>
|
||||
|
||||
{{ render_field(form.org_name, help_block="Used in email notifications and the UI.") }}
|
||||
|
||||
@@ -1,103 +0,0 @@
|
||||
import logging
|
||||
|
||||
import requests
|
||||
import semver
|
||||
|
||||
from redash import __version__ as current_version
|
||||
from redash import redis_connection
|
||||
from redash.models import Organization, db
|
||||
|
||||
REDIS_KEY = "new_version_available"
|
||||
|
||||
|
||||
def usage_data():
|
||||
counts_query = """
|
||||
SELECT 'users_count' as name, count(0) as value
|
||||
FROM users
|
||||
WHERE disabled_at is null
|
||||
|
||||
UNION ALL
|
||||
|
||||
SELECT 'queries_count' as name, count(0) as value
|
||||
FROM queries
|
||||
WHERE is_archived is false
|
||||
|
||||
UNION ALL
|
||||
|
||||
SELECT 'alerts_count' as name, count(0) as value
|
||||
FROM alerts
|
||||
|
||||
UNION ALL
|
||||
|
||||
SELECT 'dashboards_count' as name, count(0) as value
|
||||
FROM dashboards
|
||||
WHERE is_archived is false
|
||||
|
||||
UNION ALL
|
||||
|
||||
SELECT 'widgets_count' as name, count(0) as value
|
||||
FROM widgets
|
||||
WHERE visualization_id is not null
|
||||
|
||||
UNION ALL
|
||||
|
||||
SELECT 'textbox_count' as name, count(0) as value
|
||||
FROM widgets
|
||||
WHERE visualization_id is null
|
||||
"""
|
||||
|
||||
data_sources_query = "SELECT type, count(0) FROM data_sources GROUP by 1"
|
||||
visualizations_query = "SELECT type, count(0) FROM visualizations GROUP by 1"
|
||||
destinations_query = "SELECT type, count(0) FROM notification_destinations GROUP by 1"
|
||||
|
||||
data = {name: value for (name, value) in db.session.execute(counts_query)}
|
||||
data["data_sources"] = {name: value for (name, value) in db.session.execute(data_sources_query)}
|
||||
data["visualization_types"] = {name: value for (name, value) in db.session.execute(visualizations_query)}
|
||||
data["destination_types"] = {name: value for (name, value) in db.session.execute(destinations_query)}
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def run_version_check():
|
||||
logging.info("Performing version check.")
|
||||
logging.info("Current version: %s", current_version)
|
||||
|
||||
data = {"current_version": current_version}
|
||||
|
||||
if Organization.query.first().get_setting("beacon_consent"):
|
||||
data["usage"] = usage_data()
|
||||
|
||||
try:
|
||||
response = requests.post(
|
||||
"https://version.redash.io/api/report?channel=stable",
|
||||
json=data,
|
||||
timeout=3.0,
|
||||
)
|
||||
latest_version = response.json()["release"]["version"]
|
||||
|
||||
_compare_and_update(latest_version)
|
||||
except requests.RequestException:
|
||||
logging.exception("Failed checking for new version.")
|
||||
except (ValueError, KeyError):
|
||||
logging.exception("Failed checking for new version (probably bad/non-JSON response).")
|
||||
|
||||
|
||||
def reset_new_version_status():
|
||||
latest_version = get_latest_version()
|
||||
if latest_version:
|
||||
_compare_and_update(latest_version)
|
||||
|
||||
|
||||
def get_latest_version():
|
||||
return redis_connection.get(REDIS_KEY)
|
||||
|
||||
|
||||
def _compare_and_update(latest_version):
|
||||
# TODO: support alpha channel (allow setting which channel to check & parse build number)
|
||||
is_newer = semver.compare(current_version, latest_version) == -1
|
||||
logging.info("Latest version: %s (newer: %s)", latest_version, is_newer)
|
||||
|
||||
if is_newer:
|
||||
redis_connection.set(REDIS_KEY, latest_version)
|
||||
else:
|
||||
redis_connection.delete(REDIS_KEY)
|
||||
@@ -5,6 +5,7 @@ from unittest import mock
|
||||
from redash.destinations.asana import Asana
|
||||
from redash.destinations.datadog import Datadog
|
||||
from redash.destinations.discord import Discord
|
||||
from redash.destinations.slack import Slack
|
||||
from redash.destinations.webex import Webex
|
||||
from redash.models import Alert, NotificationDestination
|
||||
from tests import BaseTestCase
|
||||
@@ -201,6 +202,59 @@ def test_asana_notify_calls_requests_post():
|
||||
assert mock_response.status_code == 204
|
||||
|
||||
|
||||
def test_slack_notify_calls_requests_post():
|
||||
alert = mock.Mock(spec_set=["id", "name", "custom_subject", "custom_body", "render_template"])
|
||||
alert.id = 1
|
||||
alert.name = "Test Alert"
|
||||
alert.custom_subject = "Test custom subject"
|
||||
alert.custom_body = "Test custom body"
|
||||
|
||||
alert.render_template = mock.Mock(return_value={"Rendered": "template"})
|
||||
query = mock.Mock()
|
||||
query.id = 1
|
||||
|
||||
user = mock.Mock()
|
||||
app = mock.Mock()
|
||||
host = "https://localhost:5000"
|
||||
options = {"url": "https://slack.com/api/api.test"}
|
||||
metadata = {"Scheduled": False}
|
||||
|
||||
new_state = Alert.TRIGGERED_STATE
|
||||
destination = Slack(options)
|
||||
|
||||
with mock.patch("redash.destinations.slack.requests.post") as mock_post:
|
||||
mock_response = mock.Mock()
|
||||
mock_response.status_code = 204
|
||||
mock_post.return_value = mock_response
|
||||
|
||||
destination.notify(alert, query, user, new_state, app, host, metadata, options)
|
||||
|
||||
query_link = f"{host}/queries/{query.id}"
|
||||
alert_link = f"{host}/alerts/{alert.id}"
|
||||
|
||||
expected_payload = {
|
||||
"attachments": [
|
||||
{
|
||||
"text": "Test custom subject",
|
||||
"color": "#c0392b",
|
||||
"fields": [
|
||||
{"title": "Query", "type": "mrkdwn", "value": query_link},
|
||||
{"title": "Alert", "type": "mrkdwn", "value": alert_link},
|
||||
{"title": "Description", "value": "Test custom body"},
|
||||
],
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
mock_post.assert_called_once_with(
|
||||
"https://slack.com/api/api.test",
|
||||
data=json.dumps(expected_payload).encode(),
|
||||
timeout=5.0,
|
||||
)
|
||||
|
||||
assert mock_response.status_code == 204
|
||||
|
||||
|
||||
def test_webex_notify_calls_requests_post():
|
||||
alert = mock.Mock(spec_set=["id", "name", "custom_subject", "custom_body", "render_template"])
|
||||
alert.id = 1
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
from rq.job import JobStatus
|
||||
|
||||
from redash.handlers.query_results import error_messages, run_query
|
||||
from redash.models import db
|
||||
from tests import BaseTestCase
|
||||
@@ -434,8 +436,6 @@ class TestQueryResultExcelResponse(BaseTestCase):
|
||||
|
||||
class TestJobResource(BaseTestCase):
|
||||
def test_cancels_queued_queries(self):
|
||||
QUEUED = 1
|
||||
FAILED = 4
|
||||
|
||||
query = self.factory.create_query()
|
||||
job_id = self.make_request(
|
||||
@@ -447,10 +447,9 @@ class TestJobResource(BaseTestCase):
|
||||
]["id"]
|
||||
|
||||
status = self.make_request("get", f"/api/jobs/{job_id}").json["job"]["status"]
|
||||
self.assertEqual(status, QUEUED)
|
||||
self.assertEqual(status, JobStatus.QUEUED)
|
||||
|
||||
self.make_request("delete", f"/api/jobs/{job_id}")
|
||||
|
||||
job = self.make_request("get", f"/api/jobs/{job_id}").json["job"]
|
||||
self.assertEqual(job["status"], FAILED)
|
||||
self.assertTrue("cancelled" in job["error"])
|
||||
self.assertEqual(job["status"], JobStatus.CANCELED)
|
||||
|
||||
@@ -8,7 +8,7 @@ from tests import BaseTestCase
|
||||
|
||||
class DataSourceTest(BaseTestCase):
|
||||
def test_get_schema(self):
|
||||
return_value = [{"name": "table", "columns": []}]
|
||||
return_value = [{"name": "table", "columns": [], "description": None}]
|
||||
|
||||
with mock.patch("redash.query_runner.pg.PostgreSQL.get_schema") as patched_get_schema:
|
||||
patched_get_schema.return_value = return_value
|
||||
@@ -18,7 +18,7 @@ class DataSourceTest(BaseTestCase):
|
||||
self.assertEqual(return_value, schema)
|
||||
|
||||
def test_get_schema_uses_cache(self):
|
||||
return_value = [{"name": "table", "columns": []}]
|
||||
return_value = [{"name": "table", "columns": [], "description": None}]
|
||||
with mock.patch("redash.query_runner.pg.PostgreSQL.get_schema") as patched_get_schema:
|
||||
patched_get_schema.return_value = return_value
|
||||
|
||||
@@ -29,12 +29,12 @@ class DataSourceTest(BaseTestCase):
|
||||
self.assertEqual(patched_get_schema.call_count, 1)
|
||||
|
||||
def test_get_schema_skips_cache_with_refresh_true(self):
|
||||
return_value = [{"name": "table", "columns": []}]
|
||||
return_value = [{"name": "table", "columns": [], "description": None}]
|
||||
with mock.patch("redash.query_runner.pg.PostgreSQL.get_schema") as patched_get_schema:
|
||||
patched_get_schema.return_value = return_value
|
||||
|
||||
self.factory.data_source.get_schema()
|
||||
new_return_value = [{"name": "new_table", "columns": []}]
|
||||
new_return_value = [{"name": "new_table", "columns": [], "description": None}]
|
||||
patched_get_schema.return_value = new_return_value
|
||||
schema = self.factory.data_source.get_schema(refresh=True)
|
||||
|
||||
@@ -43,10 +43,11 @@ class DataSourceTest(BaseTestCase):
|
||||
|
||||
def test_schema_sorter(self):
|
||||
input_data = [
|
||||
{"name": "zoo", "columns": ["is_zebra", "is_snake", "is_cow"]},
|
||||
{"name": "zoo", "columns": ["is_zebra", "is_snake", "is_cow"], "description": None},
|
||||
{
|
||||
"name": "all_terain_vehicle",
|
||||
"columns": ["has_wheels", "has_engine", "has_all_wheel_drive"],
|
||||
"description": None,
|
||||
},
|
||||
]
|
||||
|
||||
@@ -54,8 +55,9 @@ class DataSourceTest(BaseTestCase):
|
||||
{
|
||||
"name": "all_terain_vehicle",
|
||||
"columns": ["has_all_wheel_drive", "has_engine", "has_wheels"],
|
||||
"description": None,
|
||||
},
|
||||
{"name": "zoo", "columns": ["is_cow", "is_snake", "is_zebra"]},
|
||||
{"name": "zoo", "columns": ["is_cow", "is_snake", "is_zebra"], "description": None},
|
||||
]
|
||||
|
||||
real_output = self.factory.data_source._sort_schema(input_data)
|
||||
@@ -64,10 +66,11 @@ class DataSourceTest(BaseTestCase):
|
||||
|
||||
def test_model_uses_schema_sorter(self):
|
||||
orig_schema = [
|
||||
{"name": "zoo", "columns": ["is_zebra", "is_snake", "is_cow"]},
|
||||
{"name": "zoo", "columns": ["is_zebra", "is_snake", "is_cow"], "description": None},
|
||||
{
|
||||
"name": "all_terain_vehicle",
|
||||
"columns": ["has_wheels", "has_engine", "has_all_wheel_drive"],
|
||||
"description": None,
|
||||
},
|
||||
]
|
||||
|
||||
@@ -75,8 +78,9 @@ class DataSourceTest(BaseTestCase):
|
||||
{
|
||||
"name": "all_terain_vehicle",
|
||||
"columns": ["has_all_wheel_drive", "has_engine", "has_wheels"],
|
||||
"description": None,
|
||||
},
|
||||
{"name": "zoo", "columns": ["is_cow", "is_snake", "is_zebra"]},
|
||||
{"name": "zoo", "columns": ["is_cow", "is_snake", "is_zebra"], "description": None},
|
||||
]
|
||||
|
||||
with mock.patch("redash.query_runner.pg.PostgreSQL.get_schema") as patched_get_schema:
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
"""
|
||||
Some test cases around the Glue catalog.
|
||||
"""
|
||||
|
||||
from unittest import TestCase
|
||||
|
||||
import botocore
|
||||
@@ -74,7 +75,9 @@ class TestGlueSchema(TestCase):
|
||||
{"DatabaseName": "test1"},
|
||||
)
|
||||
with self.stubber:
|
||||
assert query_runner.get_schema() == [{"columns": ["row_id"], "name": "test1.jdbc_table"}]
|
||||
assert query_runner.get_schema() == [
|
||||
{"columns": [{"name": "row_id", "type": "int"}], "name": "test1.jdbc_table", "description": None}
|
||||
]
|
||||
|
||||
def test_partitioned_table(self):
|
||||
"""
|
||||
@@ -123,7 +126,16 @@ class TestGlueSchema(TestCase):
|
||||
{"DatabaseName": "test1"},
|
||||
)
|
||||
with self.stubber:
|
||||
assert query_runner.get_schema() == [{"columns": ["sk", "category"], "name": "test1.partitioned_table"}]
|
||||
assert query_runner.get_schema() == [
|
||||
{
|
||||
"columns": [
|
||||
{"name": "sk", "type": "partition (int)"},
|
||||
{"name": "category", "type": "partition", "idx": 0},
|
||||
],
|
||||
"name": "test1.partitioned_table",
|
||||
"description": None,
|
||||
}
|
||||
]
|
||||
|
||||
def test_view(self):
|
||||
query_runner = Athena({"glue": True, "region": "mars-east-1"})
|
||||
@@ -155,7 +167,9 @@ class TestGlueSchema(TestCase):
|
||||
{"DatabaseName": "test1"},
|
||||
)
|
||||
with self.stubber:
|
||||
assert query_runner.get_schema() == [{"columns": ["sk"], "name": "test1.view"}]
|
||||
assert query_runner.get_schema() == [
|
||||
{"columns": [{"name": "sk", "type": "int"}], "name": "test1.view", "description": None}
|
||||
]
|
||||
|
||||
def test_dodgy_table_does_not_break_schema_listing(self):
|
||||
"""
|
||||
@@ -195,7 +209,9 @@ class TestGlueSchema(TestCase):
|
||||
{"DatabaseName": "test1"},
|
||||
)
|
||||
with self.stubber:
|
||||
assert query_runner.get_schema() == [{"columns": ["region"], "name": "test1.csv"}]
|
||||
assert query_runner.get_schema() == [
|
||||
{"columns": [{"name": "region", "type": "string"}], "name": "test1.csv", "description": None}
|
||||
]
|
||||
|
||||
def test_no_storage_descriptor_table(self):
|
||||
"""
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
"""
|
||||
Some test cases for JSON api runner
|
||||
"""
|
||||
|
||||
from unittest import TestCase
|
||||
from urllib.parse import urlencode, urljoin
|
||||
|
||||
|
||||
@@ -141,7 +141,13 @@ class TestMongoResults(TestCase):
|
||||
"column": 2,
|
||||
"column2": "test",
|
||||
"column3": "hello",
|
||||
"nested": {"a": 2, "b": "str2", "c": "c", "d": {"e": 3}},
|
||||
"nested": {
|
||||
"a": 2,
|
||||
"b": "str2",
|
||||
"c": "c",
|
||||
"d": {"e": 3},
|
||||
"f": {"h": {"i": ["j", "k", "l"]}},
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
@@ -158,6 +164,7 @@ class TestMongoResults(TestCase):
|
||||
"nested.b": "str2",
|
||||
"nested.c": "c",
|
||||
"nested.d.e": 3,
|
||||
"nested.f.h.i": ["j", "k", "l"],
|
||||
},
|
||||
)
|
||||
|
||||
@@ -167,3 +174,50 @@ class TestMongoResults(TestCase):
|
||||
self.assertIsNotNone(_get_column_by_name(columns, "nested.a"))
|
||||
self.assertIsNotNone(_get_column_by_name(columns, "nested.b"))
|
||||
self.assertIsNotNone(_get_column_by_name(columns, "nested.c"))
|
||||
self.assertIsNotNone(_get_column_by_name(columns, "nested.d.e"))
|
||||
self.assertIsNotNone(_get_column_by_name(columns, "nested.f.h.i"))
|
||||
|
||||
def test_parses_flatten_nested_results(self):
|
||||
raw_results = [
|
||||
{
|
||||
"column": 2,
|
||||
"column2": "test",
|
||||
"column3": "hello",
|
||||
"nested": {
|
||||
"a": 2,
|
||||
"b": "str2",
|
||||
"c": "c",
|
||||
"d": {"e": 3},
|
||||
"f": {"h": {"i": ["j", "k", "l"]}},
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
rows, columns = parse_results(raw_results, flatten=True)
|
||||
print(rows)
|
||||
self.assertDictEqual(
|
||||
rows[0],
|
||||
{
|
||||
"column": 2,
|
||||
"column2": "test",
|
||||
"column3": "hello",
|
||||
"nested.a": 2,
|
||||
"nested.b": "str2",
|
||||
"nested.c": "c",
|
||||
"nested.d.e": 3,
|
||||
"nested.f.h.i.0": "j",
|
||||
"nested.f.h.i.1": "k",
|
||||
"nested.f.h.i.2": "l",
|
||||
},
|
||||
)
|
||||
|
||||
self.assertIsNotNone(_get_column_by_name(columns, "column"))
|
||||
self.assertIsNotNone(_get_column_by_name(columns, "column2"))
|
||||
self.assertIsNotNone(_get_column_by_name(columns, "column3"))
|
||||
self.assertIsNotNone(_get_column_by_name(columns, "nested.a"))
|
||||
self.assertIsNotNone(_get_column_by_name(columns, "nested.b"))
|
||||
self.assertIsNotNone(_get_column_by_name(columns, "nested.c"))
|
||||
self.assertIsNotNone(_get_column_by_name(columns, "nested.d.e"))
|
||||
self.assertIsNotNone(_get_column_by_name(columns, "nested.f.h.i.0"))
|
||||
self.assertIsNotNone(_get_column_by_name(columns, "nested.f.h.i.1"))
|
||||
self.assertIsNotNone(_get_column_by_name(columns, "nested.f.h.i.2"))
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import datetime
|
||||
import decimal
|
||||
import sqlite3
|
||||
from unittest import TestCase
|
||||
@@ -108,11 +109,11 @@ class TestCreateTable(TestCase):
|
||||
create_table(connection, table_name, results)
|
||||
connection.execute("SELECT 1 FROM query_123")
|
||||
|
||||
def test_creates_table_with_decimal_in_column_value(self):
|
||||
def test_creates_table_with_decimal_and_timedelta_in_column_value(self):
|
||||
connection = sqlite3.connect(":memory:")
|
||||
results = {
|
||||
"columns": [{"name": "test1"}, {"name": "test2"}],
|
||||
"rows": [{"test1": 1, "test2": decimal.Decimal(2)}],
|
||||
"columns": [{"name": "test1"}, {"name": "test2"}, {"name": "test3"}],
|
||||
"rows": [{"test1": 1, "test2": decimal.Decimal(2), "test3": datetime.timedelta(seconds=3)}],
|
||||
}
|
||||
table_name = "query_123"
|
||||
create_table(connection, table_name, results)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
"""
|
||||
Some test cases for Trino.
|
||||
"""
|
||||
|
||||
from unittest import TestCase
|
||||
from unittest.mock import patch
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
from mock import Mock, patch
|
||||
from rq import Connection
|
||||
from rq.exceptions import NoSuchJobError
|
||||
from rq.job import JobStatus
|
||||
|
||||
from redash import models, rq_redis_connection
|
||||
from redash.query_runner.pg import PostgreSQL
|
||||
@@ -21,7 +22,7 @@ def fetch_job(*args, **kwargs):
|
||||
|
||||
result = Mock()
|
||||
result.id = job_id
|
||||
result.is_cancelled = False
|
||||
result.get_status = lambda: JobStatus.STARTED
|
||||
|
||||
return result
|
||||
|
||||
@@ -107,7 +108,7 @@ class TestEnqueueTask(BaseTestCase):
|
||||
# "cancel" the previous job
|
||||
def cancel_job(*args, **kwargs):
|
||||
job = fetch_job(*args, **kwargs)
|
||||
job.is_cancelled = True
|
||||
job.get_status = lambda: JobStatus.CANCELED
|
||||
return job
|
||||
|
||||
my_fetch_job.side_effect = cancel_job
|
||||
|
||||
@@ -28,19 +28,19 @@
|
||||
"series": [
|
||||
{
|
||||
"visible": true,
|
||||
"values": [10, 60, 100, 30],
|
||||
"labels": ["Slice 0", "Slice 0", "Slice 0", "Slice 0"],
|
||||
"values": [200],
|
||||
"labels": ["Slice 0"],
|
||||
"type": "pie",
|
||||
"hole": 0.4,
|
||||
"marker": {
|
||||
"colors": ["#356AFF", "#E92828", "#3BD973", "#604FE9"]
|
||||
"colors": ["#356AFF"]
|
||||
},
|
||||
"hoverinfo": "text+label",
|
||||
"hover": [],
|
||||
"text": ["15% (30)", "15% (30)", "15% (30)", "15% (30)"],
|
||||
"text": ["100% (200)"],
|
||||
"textinfo": "percent",
|
||||
"textposition": "inside",
|
||||
"textfont": { "color": ["#ffffff", "#ffffff", "#333333", "#ffffff"] },
|
||||
"textfont": { "color": ["#ffffff"] },
|
||||
"name": "a",
|
||||
"direction": "counterclockwise",
|
||||
"domain": { "x": [0, 0.98], "y": [0, 0.9] }
|
||||
|
||||
@@ -91,27 +91,39 @@ function prepareSeries(series: any, options: any, additionalOptions: any) {
|
||||
};
|
||||
|
||||
const sourceData = new Map();
|
||||
const xValues: any = [];
|
||||
const yValues: any = [];
|
||||
|
||||
//we hold the labels and values in a dictionary so that we can aggregate multiple values for a single label
|
||||
//once we reach the end of the data, we'll convert the dictionary to separate arrays for labels and values
|
||||
const labelsValuesDict: { [key: string]: any } = {};
|
||||
|
||||
const yErrorValues: any = [];
|
||||
each(data, row => {
|
||||
const x = normalizeValue(row.x, options.xAxis.type); // number/datetime/category
|
||||
const y = cleanYValue(row.y, seriesYAxis === "y2" ? options.yAxis[1].type : options.yAxis[0].type); // depends on series type!
|
||||
const yError = cleanNumber(row.yError); // always number
|
||||
const size = cleanNumber(row.size); // always number
|
||||
if (x in labelsValuesDict){
|
||||
labelsValuesDict[x] += y;
|
||||
}
|
||||
else{
|
||||
labelsValuesDict[x] = y;
|
||||
}
|
||||
const aggregatedY = labelsValuesDict[x];
|
||||
|
||||
sourceData.set(x, {
|
||||
x,
|
||||
y,
|
||||
y: aggregatedY,
|
||||
yError,
|
||||
size,
|
||||
yPercent: null, // will be updated later
|
||||
row,
|
||||
});
|
||||
xValues.push(x);
|
||||
yValues.push(y);
|
||||
yErrorValues.push(yError);
|
||||
});
|
||||
|
||||
const xValues = Object.keys(labelsValuesDict);
|
||||
const yValues = Object.values(labelsValuesDict);
|
||||
|
||||
const plotlySeries = {
|
||||
visible: true,
|
||||
hoverinfo: hoverInfoPattern,
|
||||
|
||||
@@ -41,8 +41,10 @@ function prepareSeries(series: any, options: any, additionalOptions: any) {
|
||||
const xPosition = (index % cellsInRow) * cellWidth;
|
||||
const yPosition = Math.floor(index / cellsInRow) * cellHeight;
|
||||
|
||||
const labels: any = [];
|
||||
const values: any = [];
|
||||
//we hold the labels and values in a dictionary so that we can aggregate multiple values for a single label
|
||||
//once we reach the end of the data, we'll convert the dictionary to separate arrays for labels and values
|
||||
const labelsValuesDict: { [key: string]: any } = {};
|
||||
|
||||
const sourceData = new Map();
|
||||
const seriesTotal = reduce(
|
||||
series.data,
|
||||
@@ -55,19 +57,29 @@ function prepareSeries(series: any, options: any, additionalOptions: any) {
|
||||
each(series.data, row => {
|
||||
const x = hasX ? normalizeValue(row.x, options.xAxis.type) : `Slice ${index}`;
|
||||
const y = cleanNumber(row.y);
|
||||
labels.push(x);
|
||||
values.push(y);
|
||||
|
||||
if (x in labelsValuesDict){
|
||||
labelsValuesDict[x] += y;
|
||||
}
|
||||
else{
|
||||
labelsValuesDict[x] = y;
|
||||
}
|
||||
const aggregatedY = labelsValuesDict[x];
|
||||
|
||||
sourceData.set(x, {
|
||||
x,
|
||||
y,
|
||||
yPercent: (y / seriesTotal) * 100,
|
||||
y: aggregatedY,
|
||||
yPercent: (aggregatedY / seriesTotal) * 100,
|
||||
row,
|
||||
});
|
||||
});
|
||||
|
||||
const markerColors = map(series.data, row => getValueColor(row.x));
|
||||
const markerColors = map(Array.from(sourceData.values()), data => getValueColor(data.row.x));
|
||||
const textColors = map(markerColors, c => chooseTextColorForBackground(c));
|
||||
|
||||
const labels = Object.keys(labelsValuesDict);
|
||||
const values = Object.values(labelsValuesDict);
|
||||
|
||||
return {
|
||||
visible: true,
|
||||
values,
|
||||
|
||||
@@ -14425,9 +14425,9 @@ tar-stream@^2.1.4:
|
||||
readable-stream "^3.1.1"
|
||||
|
||||
tar@^6.0.2:
|
||||
version "6.1.15"
|
||||
resolved "https://registry.yarnpkg.com/tar/-/tar-6.1.15.tgz#c9738b0b98845a3b344d334b8fa3041aaba53a69"
|
||||
integrity sha512-/zKt9UyngnxIT/EAGYuxaMYgOIJiP81ab9ZfkILq4oNLPFX50qyYmu7jRj9qeXoxmJHjGlbH0+cm2uy1WCs10A==
|
||||
version "6.2.1"
|
||||
resolved "https://registry.yarnpkg.com/tar/-/tar-6.2.1.tgz#717549c541bc3c2af15751bea94b1dd068d4b03a"
|
||||
integrity sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==
|
||||
dependencies:
|
||||
chownr "^2.0.0"
|
||||
fs-minipass "^2.0.0"
|
||||
|
||||
Reference in New Issue
Block a user