mirror of
https://github.com/getredash/redash.git
synced 2025-12-25 10:00:45 -05:00
Compare commits
1 Commits
24.03.0-de
...
24.01.0-de
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
542c1c324c |
@@ -1,4 +1,4 @@
|
||||
FROM cypress/browsers:node18.12.0-chrome106-ff106
|
||||
FROM cypress/browsers:node16.18.0-chrome90-ff88
|
||||
|
||||
ENV APP /usr/src/app
|
||||
WORKDIR $APP
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
version: '2.2'
|
||||
services:
|
||||
redash:
|
||||
build: ../
|
||||
@@ -1,3 +1,4 @@
|
||||
version: "2.2"
|
||||
x-redash-service: &redash-service
|
||||
build:
|
||||
context: ../
|
||||
132
.github/workflows/ci.yml
vendored
132
.github/workflows/ci.yml
vendored
@@ -3,22 +3,16 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request_target:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
env:
|
||||
NODE_VERSION: 18
|
||||
NODE_VERSION: 16.20.1
|
||||
jobs:
|
||||
backend-lint:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- if: github.event.pull_request.mergeable == 'false'
|
||||
name: Exit if PR is not mergeable
|
||||
run: exit 1
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 1
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.8'
|
||||
@@ -30,18 +24,14 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
needs: backend-lint
|
||||
env:
|
||||
COMPOSE_FILE: .ci/compose.ci.yaml
|
||||
COMPOSE_FILE: .ci/docker-compose.ci.yml
|
||||
COMPOSE_PROJECT_NAME: redash
|
||||
COMPOSE_DOCKER_CLI_BUILD: 1
|
||||
DOCKER_BUILDKIT: 1
|
||||
steps:
|
||||
- if: github.event.pull_request.mergeable == 'false'
|
||||
name: Exit if PR is not mergeable
|
||||
run: exit 1
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 1
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- name: Build Docker Images
|
||||
run: |
|
||||
set -x
|
||||
@@ -75,13 +65,9 @@ jobs:
|
||||
frontend-lint:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- if: github.event.pull_request.mergeable == 'false'
|
||||
name: Exit if PR is not mergeable
|
||||
run: exit 1
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 1
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: ${{ env.NODE_VERSION }}
|
||||
@@ -102,13 +88,9 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
needs: frontend-lint
|
||||
steps:
|
||||
- if: github.event.pull_request.mergeable == 'false'
|
||||
name: Exit if PR is not mergeable
|
||||
run: exit 1
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 1
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: ${{ env.NODE_VERSION }}
|
||||
@@ -127,21 +109,17 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
needs: frontend-lint
|
||||
env:
|
||||
COMPOSE_FILE: .ci/compose.cypress.yaml
|
||||
COMPOSE_FILE: .ci/docker-compose.cypress.yml
|
||||
COMPOSE_PROJECT_NAME: cypress
|
||||
PERCY_TOKEN_ENCODED: ZGRiY2ZmZDQ0OTdjMzM5ZWE0ZGQzNTZiOWNkMDRjOTk4Zjg0ZjMxMWRmMDZiM2RjOTYxNDZhOGExMjI4ZDE3MA==
|
||||
CYPRESS_PROJECT_ID_ENCODED: OTI0Y2th
|
||||
CYPRESS_RECORD_KEY_ENCODED: YzA1OTIxMTUtYTA1Yy00NzQ2LWEyMDMtZmZjMDgwZGI2ODgx
|
||||
CYPRESS_INSTALL_BINARY: 0
|
||||
PUPPETEER_SKIP_CHROMIUM_DOWNLOAD: 1
|
||||
PERCY_TOKEN: ${{ secrets.PERCY_TOKEN }}
|
||||
CYPRESS_PROJECT_ID: ${{ secrets.CYPRESS_PROJECT_ID }}
|
||||
CYPRESS_RECORD_KEY: ${{ secrets.CYPRESS_RECORD_KEY }}
|
||||
steps:
|
||||
- if: github.event.pull_request.mergeable == 'false'
|
||||
name: Exit if PR is not mergeable
|
||||
run: exit 1
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 1
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: ${{ env.NODE_VERSION }}
|
||||
@@ -172,3 +150,89 @@ jobs:
|
||||
with:
|
||||
name: coverage
|
||||
path: coverage
|
||||
|
||||
build-skip-check:
|
||||
runs-on: ubuntu-22.04
|
||||
outputs:
|
||||
skip: ${{ steps.skip-check.outputs.skip }}
|
||||
steps:
|
||||
- name: Skip?
|
||||
id: skip-check
|
||||
run: |
|
||||
if [[ "${{ vars.DOCKER_USER }}" == '' ]]; then
|
||||
echo 'Docker user is empty. Skipping build+push'
|
||||
echo skip=true >> "$GITHUB_OUTPUT"
|
||||
elif [[ "${{ secrets.DOCKER_PASS }}" == '' ]]; then
|
||||
echo 'Docker password is empty. Skipping build+push'
|
||||
echo skip=true >> "$GITHUB_OUTPUT"
|
||||
elif [[ "${{ github.ref_name }}" != 'master' ]]; then
|
||||
echo 'Ref name is not `master`. Skipping build+push'
|
||||
echo skip=true >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo 'Docker user and password are set and branch is `master`.'
|
||||
echo 'Building + pushing `preview` image.'
|
||||
echo skip=false >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
build-docker-image:
|
||||
runs-on: ubuntu-22.04
|
||||
needs:
|
||||
- backend-unit-tests
|
||||
- frontend-unit-tests
|
||||
- frontend-e2e-tests
|
||||
- build-skip-check
|
||||
if: needs.build-skip-check.outputs.skip == 'false'
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 1
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: ${{ env.NODE_VERSION }}
|
||||
cache: 'yarn'
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
npm install --global --force yarn@1.22.19
|
||||
yarn cache clean && yarn --frozen-lockfile --network-concurrency 1
|
||||
|
||||
- name: Set up QEMU
|
||||
timeout-minutes: 1
|
||||
uses: docker/setup-qemu-action@v2.2.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ vars.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_PASS }}
|
||||
|
||||
- name: Bump version
|
||||
id: version
|
||||
run: |
|
||||
set -x
|
||||
.ci/update_version
|
||||
VERSION=$(jq -r .version package.json)
|
||||
VERSION_TAG="${VERSION}.b${GITHUB_RUN_ID}.${GITHUB_RUN_NUMBER}"
|
||||
echo "VERSION_TAG=$VERSION_TAG" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Build and push preview image to Docker Hub
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
push: true
|
||||
tags: |
|
||||
redash/redash:preview
|
||||
redash/preview:${{ steps.version.outputs.VERSION_TAG }}
|
||||
context: .
|
||||
build-args: |
|
||||
test_all_deps=true
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
platforms: linux/amd64
|
||||
env:
|
||||
DOCKER_CONTENT_TRUST: true
|
||||
|
||||
- name: "Failure: output container logs to console"
|
||||
if: failure()
|
||||
run: docker compose logs
|
||||
|
||||
2
.github/workflows/periodic-snapshot.yml
vendored
2
.github/workflows/periodic-snapshot.yml
vendored
@@ -13,8 +13,6 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ssh-key: ${{secrets.ACTION_PUSH_KEY}}
|
||||
- run: |
|
||||
date="$(date +%y.%m).0-dev"
|
||||
gawk -i inplace -F: -v q=\" -v tag=$date '/^ "version": / { print $1 FS, q tag q ","; next} { print }' package.json
|
||||
|
||||
87
.github/workflows/preview-image.yml
vendored
87
.github/workflows/preview-image.yml
vendored
@@ -1,87 +0,0 @@
|
||||
name: Preview Image
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*-dev'
|
||||
|
||||
env:
|
||||
NODE_VERSION: 18
|
||||
|
||||
jobs:
|
||||
build-skip-check:
|
||||
runs-on: ubuntu-22.04
|
||||
outputs:
|
||||
skip: ${{ steps.skip-check.outputs.skip }}
|
||||
steps:
|
||||
- name: Skip?
|
||||
id: skip-check
|
||||
run: |
|
||||
if [[ "${{ vars.DOCKER_USER }}" == '' ]]; then
|
||||
echo 'Docker user is empty. Skipping build+push'
|
||||
echo skip=true >> "$GITHUB_OUTPUT"
|
||||
elif [[ "${{ secrets.DOCKER_PASS }}" == '' ]]; then
|
||||
echo 'Docker password is empty. Skipping build+push'
|
||||
echo skip=true >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo 'Docker user and password are set and branch is `master`.'
|
||||
echo 'Building + pushing `preview` image.'
|
||||
echo skip=false >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
build-docker-image:
|
||||
runs-on: ubuntu-22.04
|
||||
needs:
|
||||
- build-skip-check
|
||||
if: needs.build-skip-check.outputs.skip == 'false'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
ref: ${{ github.event.push.after }}
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: ${{ env.NODE_VERSION }}
|
||||
cache: 'yarn'
|
||||
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
npm install --global --force yarn@1.22.19
|
||||
yarn cache clean && yarn --frozen-lockfile --network-concurrency 1
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ vars.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_PASS }}
|
||||
|
||||
- name: Set version
|
||||
id: version
|
||||
run: |
|
||||
set -x
|
||||
.ci/update_version
|
||||
VERSION_TAG=$(jq -r .version package.json)
|
||||
echo "VERSION_TAG=$VERSION_TAG" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Build and push preview image to Docker Hub
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
push: true
|
||||
tags: |
|
||||
redash/redash:preview
|
||||
redash/preview:${{ steps.version.outputs.VERSION_TAG }}
|
||||
context: .
|
||||
build-args: |
|
||||
test_all_deps=true
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
platforms: linux/amd64
|
||||
env:
|
||||
DOCKER_CONTENT_TRUST: true
|
||||
|
||||
- name: "Failure: output container logs to console"
|
||||
if: failure()
|
||||
run: docker compose logs
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM node:18-bookworm as frontend-builder
|
||||
FROM node:16.20.1-bookworm as frontend-builder
|
||||
|
||||
RUN npm install --global --force yarn@1.22.19
|
||||
|
||||
@@ -14,7 +14,6 @@ USER redash
|
||||
WORKDIR /frontend
|
||||
COPY --chown=redash package.json yarn.lock .yarnrc /frontend/
|
||||
COPY --chown=redash viz-lib /frontend/viz-lib
|
||||
COPY --chown=redash scripts /frontend/scripts
|
||||
|
||||
# Controls whether to instrument code for coverage information
|
||||
ARG code_coverage
|
||||
|
||||
@@ -118,9 +118,28 @@ class ShareDashboardDialog extends React.Component {
|
||||
/>
|
||||
</Form.Item>
|
||||
{dashboard.public_url && (
|
||||
<Form.Item label="Secret address" {...this.formItemProps}>
|
||||
<InputWithCopy value={dashboard.public_url} data-test="SecretAddress" />
|
||||
</Form.Item>
|
||||
<>
|
||||
<Form.Item>
|
||||
<Alert
|
||||
message={
|
||||
<div>
|
||||
Custom rule for hiding filter components when sharing links:
|
||||
<br />
|
||||
You can hide filter components by appending `&hide_filter={"{{"} component_name{"}}"}` to the
|
||||
sharing URL.
|
||||
<br />
|
||||
Example: http://{"{{"}ip{"}}"}:{"{{"}port{"}}"}/public/dashboards/{"{{"}id{"}}"}
|
||||
?p_country=ghana&p_site=10&hide_filter=country
|
||||
</div>
|
||||
}
|
||||
type="warning"
|
||||
/>
|
||||
</Form.Item>
|
||||
|
||||
<Form.Item label="Secret address" {...this.formItemProps}>
|
||||
<InputWithCopy value={dashboard.public_url} data-test="SecretAddress" />
|
||||
</Form.Item>
|
||||
</>
|
||||
)}
|
||||
</Form>
|
||||
</Modal>
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
/* eslint-disable import/no-extraneous-dependencies, no-console */
|
||||
const { find } = require("lodash");
|
||||
const atob = require("atob");
|
||||
const { execSync } = require("child_process");
|
||||
const { get, post } = require("request").defaults({ jar: true });
|
||||
const { seedData } = require("./seed-data");
|
||||
@@ -59,11 +60,23 @@ function stopServer() {
|
||||
|
||||
function runCypressCI() {
|
||||
const {
|
||||
PERCY_TOKEN_ENCODED,
|
||||
CYPRESS_PROJECT_ID_ENCODED,
|
||||
CYPRESS_RECORD_KEY_ENCODED,
|
||||
GITHUB_REPOSITORY,
|
||||
CYPRESS_OPTIONS, // eslint-disable-line no-unused-vars
|
||||
} = process.env;
|
||||
|
||||
if (GITHUB_REPOSITORY === "getredash/redash") {
|
||||
if (PERCY_TOKEN_ENCODED) {
|
||||
process.env.PERCY_TOKEN = atob(`${PERCY_TOKEN_ENCODED}`);
|
||||
}
|
||||
if (CYPRESS_PROJECT_ID_ENCODED) {
|
||||
process.env.CYPRESS_PROJECT_ID = atob(`${CYPRESS_PROJECT_ID_ENCODED}`);
|
||||
}
|
||||
if (CYPRESS_RECORD_KEY_ENCODED) {
|
||||
process.env.CYPRESS_RECORD_KEY = atob(`${CYPRESS_RECORD_KEY_ENCODED}`);
|
||||
}
|
||||
process.env.CYPRESS_OPTIONS = "--record";
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# This configuration file is for the **development** setup.
|
||||
# For a production example please refer to getredash/setup repository on GitHub.
|
||||
version: "2.2"
|
||||
x-redash-service: &redash-service
|
||||
build:
|
||||
context: .
|
||||
@@ -7,7 +7,7 @@ Create Date: 2020-12-23 21:35:32.766354
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects.postgresql import JSON
|
||||
from sqlalchemy.dialects import postgresql
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '0ec979123ba4'
|
||||
@@ -18,7 +18,7 @@ depends_on = None
|
||||
|
||||
def upgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.add_column('dashboards', sa.Column('options', JSON(astext_type=sa.Text()), server_default='{}', nullable=False))
|
||||
op.add_column('dashboards', sa.Column('options', postgresql.JSON(astext_type=sa.Text()), server_default='{}', nullable=False))
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
|
||||
@@ -10,7 +10,8 @@ import json
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.sql import table
|
||||
from redash.models import MutableDict
|
||||
|
||||
from redash.models import MutableDict, PseudoJSON
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
@@ -40,7 +41,7 @@ def upgrade():
|
||||
"queries",
|
||||
sa.Column(
|
||||
"schedule",
|
||||
sa.Text(),
|
||||
MutableDict.as_mutable(PseudoJSON),
|
||||
nullable=False,
|
||||
server_default=json.dumps({}),
|
||||
),
|
||||
@@ -50,7 +51,7 @@ def upgrade():
|
||||
queries = table(
|
||||
"queries",
|
||||
sa.Column("id", sa.Integer, primary_key=True),
|
||||
sa.Column("schedule", sa.Text()),
|
||||
sa.Column("schedule", MutableDict.as_mutable(PseudoJSON)),
|
||||
sa.Column("old_schedule", sa.String(length=10)),
|
||||
)
|
||||
|
||||
@@ -84,7 +85,7 @@ def downgrade():
|
||||
"queries",
|
||||
sa.Column(
|
||||
"old_schedule",
|
||||
sa.Text(),
|
||||
MutableDict.as_mutable(PseudoJSON),
|
||||
nullable=False,
|
||||
server_default=json.dumps({}),
|
||||
),
|
||||
@@ -92,8 +93,8 @@ def downgrade():
|
||||
|
||||
queries = table(
|
||||
"queries",
|
||||
sa.Column("schedule", sa.Text()),
|
||||
sa.Column("old_schedule", sa.Text()),
|
||||
sa.Column("schedule", MutableDict.as_mutable(PseudoJSON)),
|
||||
sa.Column("old_schedule", MutableDict.as_mutable(PseudoJSON)),
|
||||
)
|
||||
|
||||
op.execute(queries.update().values({"old_schedule": queries.c.schedule}))
|
||||
@@ -105,7 +106,7 @@ def downgrade():
|
||||
"queries",
|
||||
sa.Column("id", sa.Integer, primary_key=True),
|
||||
sa.Column("schedule", sa.String(length=10)),
|
||||
sa.Column("old_schedule", sa.Text()),
|
||||
sa.Column("old_schedule", MutableDict.as_mutable(PseudoJSON)),
|
||||
)
|
||||
|
||||
conn = op.get_bind()
|
||||
|
||||
@@ -1,135 +0,0 @@
|
||||
"""change type of json fields from varchar to json
|
||||
|
||||
Revision ID: 7205816877ec
|
||||
Revises: 7ce5925f832b
|
||||
Create Date: 2024-01-03 13:55:18.885021
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects.postgresql import JSONB, JSON
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '7205816877ec'
|
||||
down_revision = '7ce5925f832b'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
connection = op.get_bind()
|
||||
op.alter_column('queries', 'options',
|
||||
existing_type=sa.Text(),
|
||||
type_=JSONB(astext_type=sa.Text()),
|
||||
nullable=True,
|
||||
postgresql_using='options::jsonb',
|
||||
server_default=sa.text("'{}'::jsonb"))
|
||||
op.alter_column('queries', 'schedule',
|
||||
existing_type=sa.Text(),
|
||||
type_=JSONB(astext_type=sa.Text()),
|
||||
nullable=True,
|
||||
postgresql_using='schedule::jsonb',
|
||||
server_default=sa.text("'{}'::jsonb"))
|
||||
op.alter_column('events', 'additional_properties',
|
||||
existing_type=sa.Text(),
|
||||
type_=JSONB(astext_type=sa.Text()),
|
||||
nullable=True,
|
||||
postgresql_using='additional_properties::jsonb',
|
||||
server_default=sa.text("'{}'::jsonb"))
|
||||
op.alter_column('organizations', 'settings',
|
||||
existing_type=sa.Text(),
|
||||
type_=JSONB(astext_type=sa.Text()),
|
||||
nullable=True,
|
||||
postgresql_using='settings::jsonb',
|
||||
server_default=sa.text("'{}'::jsonb"))
|
||||
op.alter_column('alerts', 'options',
|
||||
existing_type=JSON(astext_type=sa.Text()),
|
||||
type_=JSONB(astext_type=sa.Text()),
|
||||
nullable=True,
|
||||
postgresql_using='options::jsonb',
|
||||
server_default=sa.text("'{}'::jsonb"))
|
||||
op.alter_column('dashboards', 'options',
|
||||
existing_type=JSON(astext_type=sa.Text()),
|
||||
type_=JSONB(astext_type=sa.Text()),
|
||||
postgresql_using='options::jsonb',
|
||||
server_default=sa.text("'{}'::jsonb"))
|
||||
op.alter_column('dashboards', 'layout',
|
||||
existing_type=sa.Text(),
|
||||
type_=JSONB(astext_type=sa.Text()),
|
||||
postgresql_using='layout::jsonb',
|
||||
server_default=sa.text("'{}'::jsonb"))
|
||||
op.alter_column('changes', 'change',
|
||||
existing_type=JSON(astext_type=sa.Text()),
|
||||
type_=JSONB(astext_type=sa.Text()),
|
||||
postgresql_using='change::jsonb',
|
||||
server_default=sa.text("'{}'::jsonb"))
|
||||
op.alter_column('visualizations', 'options',
|
||||
existing_type=sa.Text(),
|
||||
type_=JSONB(astext_type=sa.Text()),
|
||||
postgresql_using='options::jsonb',
|
||||
server_default=sa.text("'{}'::jsonb"))
|
||||
op.alter_column('widgets', 'options',
|
||||
existing_type=sa.Text(),
|
||||
type_=JSONB(astext_type=sa.Text()),
|
||||
postgresql_using='options::jsonb',
|
||||
server_default=sa.text("'{}'::jsonb"))
|
||||
|
||||
|
||||
def downgrade():
|
||||
connection = op.get_bind()
|
||||
op.alter_column('queries', 'options',
|
||||
existing_type=JSONB(astext_type=sa.Text()),
|
||||
type_=sa.Text(),
|
||||
postgresql_using='options::text',
|
||||
existing_nullable=True,
|
||||
server_default=sa.text("'{}'::text"))
|
||||
op.alter_column('queries', 'schedule',
|
||||
existing_type=JSONB(astext_type=sa.Text()),
|
||||
type_=sa.Text(),
|
||||
postgresql_using='schedule::text',
|
||||
existing_nullable=True,
|
||||
server_default=sa.text("'{}'::text"))
|
||||
op.alter_column('events', 'additional_properties',
|
||||
existing_type=JSONB(astext_type=sa.Text()),
|
||||
type_=sa.Text(),
|
||||
postgresql_using='additional_properties::text',
|
||||
existing_nullable=True,
|
||||
server_default=sa.text("'{}'::text"))
|
||||
op.alter_column('organizations', 'settings',
|
||||
existing_type=JSONB(astext_type=sa.Text()),
|
||||
type_=sa.Text(),
|
||||
postgresql_using='settings::text',
|
||||
existing_nullable=True,
|
||||
server_default=sa.text("'{}'::text"))
|
||||
op.alter_column('alerts', 'options',
|
||||
existing_type=JSONB(astext_type=sa.Text()),
|
||||
type_=JSON(astext_type=sa.Text()),
|
||||
postgresql_using='options::json',
|
||||
existing_nullable=True,
|
||||
server_default=sa.text("'{}'::json"))
|
||||
op.alter_column('dashboards', 'options',
|
||||
existing_type=JSONB(astext_type=sa.Text()),
|
||||
type_=JSON(astext_type=sa.Text()),
|
||||
postgresql_using='options::json',
|
||||
server_default=sa.text("'{}'::json"))
|
||||
op.alter_column('dashboards', 'layout',
|
||||
existing_type=JSONB(astext_type=sa.Text()),
|
||||
type_=sa.Text(),
|
||||
postgresql_using='layout::text',
|
||||
server_default=sa.text("'{}'::text"))
|
||||
op.alter_column('changes', 'change',
|
||||
existing_type=JSONB(astext_type=sa.Text()),
|
||||
type_=JSON(astext_type=sa.Text()),
|
||||
postgresql_using='change::json',
|
||||
server_default=sa.text("'{}'::json"))
|
||||
op.alter_column('visualizations', 'options',
|
||||
type_=sa.Text(),
|
||||
existing_type=JSONB(astext_type=sa.Text()),
|
||||
postgresql_using='options::text',
|
||||
server_default=sa.text("'{}'::text"))
|
||||
op.alter_column('widgets', 'options',
|
||||
type_=sa.Text(),
|
||||
existing_type=JSONB(astext_type=sa.Text()),
|
||||
postgresql_using='options::text',
|
||||
server_default=sa.text("'{}'::text"))
|
||||
@@ -7,9 +7,10 @@ Create Date: 2019-01-17 13:22:21.729334
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects import postgresql
|
||||
from sqlalchemy.sql import table
|
||||
|
||||
from redash.models import MutableDict
|
||||
from redash.models import MutableDict, PseudoJSON
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "73beceabb948"
|
||||
@@ -42,7 +43,7 @@ def upgrade():
|
||||
queries = table(
|
||||
"queries",
|
||||
sa.Column("id", sa.Integer, primary_key=True),
|
||||
sa.Column("schedule", sa.Text()),
|
||||
sa.Column("schedule", MutableDict.as_mutable(PseudoJSON)),
|
||||
)
|
||||
|
||||
conn = op.get_bind()
|
||||
|
||||
@@ -6,7 +6,7 @@ Create Date: 2018-01-31 15:20:30.396533
|
||||
|
||||
"""
|
||||
|
||||
import json
|
||||
import simplejson
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
@@ -27,7 +27,7 @@ def upgrade():
|
||||
dashboard_result = db.session.execute("SELECT id, layout FROM dashboards")
|
||||
for dashboard in dashboard_result:
|
||||
print(" Updating dashboard: {}".format(dashboard["id"]))
|
||||
layout = json.loads(dashboard["layout"])
|
||||
layout = simplejson.loads(dashboard["layout"])
|
||||
|
||||
print(" Building widgets map:")
|
||||
widgets = {}
|
||||
@@ -53,7 +53,7 @@ def upgrade():
|
||||
if widget is None:
|
||||
continue
|
||||
|
||||
options = json.loads(widget["options"]) or {}
|
||||
options = simplejson.loads(widget["options"]) or {}
|
||||
options["position"] = {
|
||||
"row": row_index,
|
||||
"col": column_index * column_size,
|
||||
@@ -62,7 +62,7 @@ def upgrade():
|
||||
|
||||
db.session.execute(
|
||||
"UPDATE widgets SET options=:options WHERE id=:id",
|
||||
{"options": json.dumps(options), "id": widget_id},
|
||||
{"options": simplejson.dumps(options), "id": widget_id},
|
||||
)
|
||||
|
||||
dashboard_result.close()
|
||||
|
||||
@@ -7,7 +7,7 @@ Create Date: 2019-01-31 09:21:31.517265
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects.postgresql import BYTEA
|
||||
from sqlalchemy.dialects import postgresql
|
||||
from sqlalchemy.sql import table
|
||||
from sqlalchemy_utils.types.encrypted.encrypted_type import FernetEngine
|
||||
|
||||
@@ -15,8 +15,10 @@ from redash import settings
|
||||
from redash.utils.configuration import ConfigurationContainer
|
||||
from redash.models.types import (
|
||||
EncryptedConfiguration,
|
||||
Configuration,
|
||||
MutableDict,
|
||||
MutableList,
|
||||
PseudoJSON,
|
||||
)
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
@@ -29,7 +31,7 @@ depends_on = None
|
||||
def upgrade():
|
||||
op.add_column(
|
||||
"data_sources",
|
||||
sa.Column("encrypted_options", BYTEA(), nullable=True),
|
||||
sa.Column("encrypted_options", postgresql.BYTEA(), nullable=True),
|
||||
)
|
||||
|
||||
# copy values
|
||||
@@ -44,14 +46,7 @@ def upgrade():
|
||||
)
|
||||
),
|
||||
),
|
||||
sa.Column(
|
||||
"options",
|
||||
ConfigurationContainer.as_mutable(
|
||||
EncryptedConfiguration(
|
||||
sa.Text, settings.DATASOURCE_SECRET_KEY, FernetEngine
|
||||
)
|
||||
),
|
||||
),
|
||||
sa.Column("options", ConfigurationContainer.as_mutable(Configuration)),
|
||||
)
|
||||
|
||||
conn = op.get_bind()
|
||||
|
||||
@@ -9,7 +9,7 @@ import re
|
||||
from funcy import flatten, compact
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects.postgresql import ARRAY
|
||||
from sqlalchemy.dialects import postgresql
|
||||
from redash import models
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
@@ -21,10 +21,10 @@ depends_on = None
|
||||
|
||||
def upgrade():
|
||||
op.add_column(
|
||||
"dashboards", sa.Column("tags", ARRAY(sa.Unicode()), nullable=True)
|
||||
"dashboards", sa.Column("tags", postgresql.ARRAY(sa.Unicode()), nullable=True)
|
||||
)
|
||||
op.add_column(
|
||||
"queries", sa.Column("tags", ARRAY(sa.Unicode()), nullable=True)
|
||||
"queries", sa.Column("tags", postgresql.ARRAY(sa.Unicode()), nullable=True)
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -7,14 +7,17 @@ Create Date: 2020-12-14 21:42:48.661684
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects.postgresql import BYTEA
|
||||
from sqlalchemy.dialects import postgresql
|
||||
from sqlalchemy.sql import table
|
||||
from sqlalchemy_utils.types.encrypted.encrypted_type import FernetEngine
|
||||
|
||||
from redash import settings
|
||||
from redash.utils.configuration import ConfigurationContainer
|
||||
from redash.models.base import key_type
|
||||
from redash.models.types import EncryptedConfiguration
|
||||
from redash.models.types import (
|
||||
EncryptedConfiguration,
|
||||
Configuration,
|
||||
)
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
@@ -27,7 +30,7 @@ depends_on = None
|
||||
def upgrade():
|
||||
op.add_column(
|
||||
"notification_destinations",
|
||||
sa.Column("encrypted_options", BYTEA(), nullable=True)
|
||||
sa.Column("encrypted_options", postgresql.BYTEA(), nullable=True)
|
||||
)
|
||||
|
||||
# copy values
|
||||
@@ -42,14 +45,7 @@ def upgrade():
|
||||
)
|
||||
),
|
||||
),
|
||||
sa.Column(
|
||||
"options",
|
||||
ConfigurationContainer.as_mutable(
|
||||
EncryptedConfiguration(
|
||||
sa.Text, settings.DATASOURCE_SECRET_KEY, FernetEngine
|
||||
)
|
||||
),
|
||||
),
|
||||
sa.Column("options", ConfigurationContainer.as_mutable(Configuration)),
|
||||
)
|
||||
|
||||
conn = op.get_bind()
|
||||
|
||||
@@ -7,7 +7,7 @@ Create Date: 2018-11-08 16:12:17.023569
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects.postgresql import JSON
|
||||
from sqlalchemy.dialects import postgresql
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "e7f8a917aa8e"
|
||||
@@ -21,7 +21,7 @@ def upgrade():
|
||||
"users",
|
||||
sa.Column(
|
||||
"details",
|
||||
JSON(astext_type=sa.Text()),
|
||||
postgresql.JSON(astext_type=sa.Text()),
|
||||
server_default="{}",
|
||||
nullable=True,
|
||||
),
|
||||
|
||||
@@ -7,7 +7,7 @@ Create Date: 2022-01-31 15:24:16.507888
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects.postgresql import JSON, JSONB
|
||||
from sqlalchemy.dialects import postgresql
|
||||
|
||||
from redash.models import db
|
||||
|
||||
@@ -23,8 +23,8 @@ def upgrade():
|
||||
|
||||
### commands auto generated by Alembic - please adjust! ###
|
||||
op.alter_column('users', 'details',
|
||||
existing_type=JSON(astext_type=sa.Text()),
|
||||
type_=JSONB(astext_type=sa.Text()),
|
||||
existing_type=postgresql.JSON(astext_type=sa.Text()),
|
||||
type_=postgresql.JSONB(astext_type=sa.Text()),
|
||||
existing_nullable=True,
|
||||
existing_server_default=sa.text("'{}'::jsonb"))
|
||||
### end Alembic commands ###
|
||||
@@ -52,8 +52,8 @@ def downgrade():
|
||||
connection.execute(update_query)
|
||||
db.session.commit()
|
||||
op.alter_column('users', 'details',
|
||||
existing_type=JSONB(astext_type=sa.Text()),
|
||||
type_=JSON(astext_type=sa.Text()),
|
||||
existing_type=postgresql.JSONB(astext_type=sa.Text()),
|
||||
type_=postgresql.JSON(astext_type=sa.Text()),
|
||||
existing_nullable=True,
|
||||
existing_server_default=sa.text("'{}'::json"))
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
command = "cd ../ && yarn cache clean && yarn --frozen-lockfile --network-concurrency 1 && yarn build && cd ./client"
|
||||
|
||||
[build.environment]
|
||||
NODE_VERSION = "18"
|
||||
NODE_VERSION = "16.20.1"
|
||||
NETLIFY_USE_YARN = "true"
|
||||
YARN_VERSION = "1.22.19"
|
||||
CYPRESS_INSTALL_BINARY = "0"
|
||||
|
||||
18
package.json
18
package.json
@@ -1,19 +1,20 @@
|
||||
{
|
||||
"name": "redash-client",
|
||||
"version": "24.03.0-dev",
|
||||
"version": "24.01.0-dev",
|
||||
"description": "The frontend part of Redash.",
|
||||
"main": "index.js",
|
||||
"scripts": {
|
||||
"start": "npm-run-all --parallel watch:viz webpack-dev-server",
|
||||
"clean": "rm -rf ./client/dist/",
|
||||
"build:viz": "(cd viz-lib && yarn build:babel)",
|
||||
"build": "yarn clean && yarn build:viz && NODE_OPTIONS=--openssl-legacy-provider NODE_ENV=production webpack",
|
||||
"watch:app": "NODE_OPTIONS=--openssl-legacy-provider webpack watch --progress",
|
||||
"build": "yarn clean && yarn build:viz && NODE_ENV=production webpack",
|
||||
"build:old-node-version": "yarn clean && NODE_ENV=production node --max-old-space-size=4096 node_modules/.bin/webpack",
|
||||
"watch:app": "webpack watch --progress",
|
||||
"watch:viz": "(cd viz-lib && yarn watch:babel)",
|
||||
"watch": "npm-run-all --parallel watch:*",
|
||||
"webpack-dev-server": "webpack-dev-server",
|
||||
"analyze": "yarn clean && BUNDLE_ANALYZER=on NODE_OPTIONS=--openssl-legacy-provider webpack",
|
||||
"analyze:build": "yarn clean && NODE_ENV=production BUNDLE_ANALYZER=on NODE_OPTIONS=--openssl-legacy-provider webpack",
|
||||
"analyze": "yarn clean && BUNDLE_ANALYZER=on webpack",
|
||||
"analyze:build": "yarn clean && NODE_ENV=production BUNDLE_ANALYZER=on webpack",
|
||||
"lint": "yarn lint:base --ext .js --ext .jsx --ext .ts --ext .tsx ./client",
|
||||
"lint:fix": "yarn lint:base --fix --ext .js --ext .jsx --ext .ts --ext .tsx ./client",
|
||||
"lint:base": "eslint --config ./client/.eslintrc.js --ignore-path ./client/.eslintignore",
|
||||
@@ -33,8 +34,7 @@
|
||||
"url": "git+https://github.com/getredash/redash.git"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">16.0 <21.0",
|
||||
"npm": "please-use-yarn",
|
||||
"node": ">14.16.0 <17.0.0",
|
||||
"yarn": "^1.22.10"
|
||||
},
|
||||
"author": "Redash Contributors",
|
||||
@@ -178,10 +178,6 @@
|
||||
"viz-lib/**"
|
||||
]
|
||||
},
|
||||
"browser": {
|
||||
"fs": false,
|
||||
"path": false
|
||||
},
|
||||
"//": "browserslist set to 'Async functions' compatibility",
|
||||
"browserslist": [
|
||||
"Edge >= 15",
|
||||
|
||||
985
poetry.lock
generated
985
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -12,7 +12,7 @@ force-exclude = '''
|
||||
|
||||
[tool.poetry]
|
||||
name = "redash"
|
||||
version = "24.03.0-dev"
|
||||
version = "24.01.0-dev"
|
||||
description = "Make Your Company Data Driven. Connect to any data source, easily visualize, dashboard and share your data."
|
||||
authors = ["Arik Fraimovich <arik@redash.io>"]
|
||||
# to be added to/removed from the mailing list, please reach out to Arik via the above email or Discord
|
||||
@@ -46,7 +46,7 @@ greenlet = "2.0.2"
|
||||
gunicorn = "20.0.4"
|
||||
httplib2 = "0.19.0"
|
||||
itsdangerous = "2.1.2"
|
||||
jinja2 = "3.1.3"
|
||||
jinja2 = "3.1.2"
|
||||
jsonschema = "3.1.1"
|
||||
markupsafe = "2.1.1"
|
||||
maxminddb-geolite2 = "2018.703"
|
||||
@@ -70,6 +70,7 @@ rq = "1.9.0"
|
||||
rq-scheduler = "0.11.0"
|
||||
semver = "2.8.1"
|
||||
sentry-sdk = "1.28.1"
|
||||
simplejson = "3.16.0"
|
||||
sqlalchemy = "1.3.24"
|
||||
sqlalchemy-searchable = "1.2.0"
|
||||
sqlalchemy-utils = "0.34.2"
|
||||
@@ -110,7 +111,7 @@ nzalchemy = "^11.0.2"
|
||||
nzpy = ">=1.15"
|
||||
oauth2client = "4.1.3"
|
||||
openpyxl = "3.0.7"
|
||||
oracledb = "2.0.1"
|
||||
oracledb = "1.4.0"
|
||||
pandas = "1.3.4"
|
||||
phoenixdb = "0.7"
|
||||
pinotdb = ">=0.4.5"
|
||||
@@ -168,7 +169,7 @@ build-backend = "poetry.core.masonry.api"
|
||||
[tool.ruff]
|
||||
exclude = [".git", "viz-lib", "node_modules", "migrations"]
|
||||
ignore = ["E501"]
|
||||
select = ["C9", "E", "F", "W", "I001", "UP004"]
|
||||
select = ["C9", "E", "F", "W", "I001"]
|
||||
|
||||
[tool.ruff.mccabe]
|
||||
max-complexity = 15
|
||||
|
||||
@@ -14,7 +14,7 @@ from redash.app import create_app # noqa
|
||||
from redash.destinations import import_destinations
|
||||
from redash.query_runner import import_query_runners
|
||||
|
||||
__version__ = "24.03.0-dev"
|
||||
__version__ = "24.01.0-dev"
|
||||
|
||||
|
||||
if os.environ.get("REMOTE_DEBUG"):
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import json
|
||||
import logging
|
||||
|
||||
import jwt
|
||||
import requests
|
||||
import simplejson
|
||||
|
||||
logger = logging.getLogger("jwt_auth")
|
||||
|
||||
@@ -25,7 +25,7 @@ def get_public_key_from_net(url):
|
||||
if "keys" in data:
|
||||
public_keys = []
|
||||
for key_dict in data["keys"]:
|
||||
public_key = jwt.algorithms.RSAAlgorithm.from_jwk(json.dumps(key_dict))
|
||||
public_key = jwt.algorithms.RSAAlgorithm.from_jwk(simplejson.dumps(key_dict))
|
||||
public_keys.append(public_key)
|
||||
|
||||
get_public_keys.key_cache[url] = public_keys
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import json
|
||||
|
||||
import click
|
||||
import simplejson
|
||||
from flask import current_app
|
||||
from flask.cli import FlaskGroup, run_command, with_appcontext
|
||||
from rq import Connection
|
||||
@@ -54,7 +53,7 @@ def version():
|
||||
@manager.command()
|
||||
def status():
|
||||
with Connection(rq_redis_connection):
|
||||
print(json.dumps(get_status(), indent=2))
|
||||
print(simplejson.dumps(get_status(), indent=2))
|
||||
|
||||
|
||||
@manager.command()
|
||||
|
||||
@@ -5,7 +5,7 @@ logger = logging.getLogger(__name__)
|
||||
__all__ = ["BaseDestination", "register", "get_destination", "import_destinations"]
|
||||
|
||||
|
||||
class BaseDestination:
|
||||
class BaseDestination(object):
|
||||
deprecated = False
|
||||
|
||||
def __init__(self, configuration):
|
||||
|
||||
@@ -42,8 +42,8 @@ class Discord(BaseDestination):
|
||||
"inline": True,
|
||||
},
|
||||
]
|
||||
if alert.custom_body:
|
||||
fields.append({"name": "Description", "value": alert.custom_body})
|
||||
if alert.options.get("custom_body"):
|
||||
fields.append({"name": "Description", "value": alert.options["custom_body"]})
|
||||
if new_state == Alert.TRIGGERED_STATE:
|
||||
if alert.options.get("custom_subject"):
|
||||
text = alert.options["custom_subject"]
|
||||
|
||||
@@ -50,7 +50,7 @@ class Slack(BaseDestination):
|
||||
payload = {"attachments": [{"text": text, "color": color, "fields": fields}]}
|
||||
|
||||
try:
|
||||
resp = requests.post(options.get("url"), data=json_dumps(payload).encode("utf-8"), timeout=5.0)
|
||||
resp = requests.post(options.get("url"), data=json_dumps(payload), timeout=5.0)
|
||||
logging.warning(resp.text)
|
||||
if resp.status_code != 200:
|
||||
logging.error("Slack send ERROR. status_code => {status}".format(status=resp.status_code))
|
||||
|
||||
@@ -5,7 +5,7 @@ from flask import Blueprint, current_app, request
|
||||
from flask_login import current_user, login_required
|
||||
from flask_restful import Resource, abort
|
||||
from sqlalchemy import cast
|
||||
from sqlalchemy.dialects.postgresql import ARRAY
|
||||
from sqlalchemy.dialects import postgresql
|
||||
from sqlalchemy.orm.exc import NoResultFound
|
||||
from sqlalchemy_utils.functions import sort_query
|
||||
|
||||
@@ -114,7 +114,7 @@ def json_response(response):
|
||||
def filter_by_tags(result_set, column):
|
||||
if request.args.getlist("tags"):
|
||||
tags = request.args.getlist("tags")
|
||||
result_set = result_set.filter(cast(column, ARRAY(db.Text)).contains(tags))
|
||||
result_set = result_set.filter(cast(column, postgresql.ARRAY(db.Text)).contains(tags))
|
||||
return result_set
|
||||
|
||||
|
||||
|
||||
@@ -96,7 +96,7 @@ class DashboardListResource(BaseResource):
|
||||
org=self.current_org,
|
||||
user=self.current_user,
|
||||
is_draft=True,
|
||||
layout=[],
|
||||
layout="[]",
|
||||
)
|
||||
models.db.session.add(dashboard)
|
||||
models.db.session.commit()
|
||||
|
||||
@@ -7,6 +7,7 @@ from redash.permissions import (
|
||||
require_permission,
|
||||
)
|
||||
from redash.serializers import serialize_visualization
|
||||
from redash.utils import json_dumps
|
||||
|
||||
|
||||
class VisualizationListResource(BaseResource):
|
||||
@@ -17,6 +18,7 @@ class VisualizationListResource(BaseResource):
|
||||
query = get_object_or_404(models.Query.get_by_id_and_org, kwargs.pop("query_id"), self.current_org)
|
||||
require_object_modify_permission(query, self.current_user)
|
||||
|
||||
kwargs["options"] = json_dumps(kwargs["options"])
|
||||
kwargs["query_rel"] = query
|
||||
|
||||
vis = models.Visualization(**kwargs)
|
||||
@@ -32,6 +34,8 @@ class VisualizationResource(BaseResource):
|
||||
require_object_modify_permission(vis.query_rel, self.current_user)
|
||||
|
||||
kwargs = request.get_json(force=True)
|
||||
if "options" in kwargs:
|
||||
kwargs["options"] = json_dumps(kwargs["options"])
|
||||
|
||||
kwargs.pop("id", None)
|
||||
kwargs.pop("query_id", None)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import json
|
||||
import os
|
||||
|
||||
import simplejson
|
||||
from flask import url_for
|
||||
|
||||
WEBPACK_MANIFEST_PATH = os.path.join(os.path.dirname(__file__), "../../client/dist/", "asset-manifest.json")
|
||||
@@ -15,7 +15,7 @@ def configure_webpack(app):
|
||||
if assets is None or app.debug:
|
||||
try:
|
||||
with open(WEBPACK_MANIFEST_PATH) as fp:
|
||||
assets = json.load(fp)
|
||||
assets = simplejson.load(fp)
|
||||
except IOError:
|
||||
app.logger.exception("Unable to load webpack manifest")
|
||||
assets = {}
|
||||
|
||||
@@ -9,6 +9,7 @@ from redash.permissions import (
|
||||
view_only,
|
||||
)
|
||||
from redash.serializers import serialize_widget
|
||||
from redash.utils import json_dumps
|
||||
|
||||
|
||||
class WidgetListResource(BaseResource):
|
||||
@@ -29,6 +30,7 @@ class WidgetListResource(BaseResource):
|
||||
dashboard = models.Dashboard.get_by_id_and_org(widget_properties.get("dashboard_id"), self.current_org)
|
||||
require_object_modify_permission(dashboard, self.current_user)
|
||||
|
||||
widget_properties["options"] = json_dumps(widget_properties["options"])
|
||||
widget_properties.pop("id", None)
|
||||
|
||||
visualization_id = widget_properties.pop("visualization_id")
|
||||
@@ -63,7 +65,7 @@ class WidgetResource(BaseResource):
|
||||
require_object_modify_permission(widget.dashboard, self.current_user)
|
||||
widget_properties = request.get_json(force=True)
|
||||
widget.text = widget_properties["text"]
|
||||
widget.options = widget_properties["options"]
|
||||
widget.options = json_dumps(widget_properties["options"])
|
||||
models.db.session.commit()
|
||||
return serialize_widget(widget)
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ import time
|
||||
|
||||
import pytz
|
||||
from sqlalchemy import UniqueConstraint, and_, cast, distinct, func, or_
|
||||
from sqlalchemy.dialects.postgresql import ARRAY, DOUBLE_PRECISION, JSONB
|
||||
from sqlalchemy.dialects import postgresql
|
||||
from sqlalchemy.event import listens_for
|
||||
from sqlalchemy.ext.hybrid import hybrid_property
|
||||
from sqlalchemy.orm import (
|
||||
@@ -40,17 +40,14 @@ from redash.models.base import (
|
||||
from redash.models.changes import Change, ChangeTrackingMixin # noqa
|
||||
from redash.models.mixins import BelongsToOrgMixin, TimestampMixin
|
||||
from redash.models.organizations import Organization
|
||||
from redash.models.parameterized_query import (
|
||||
InvalidParameterError,
|
||||
ParameterizedQuery,
|
||||
QueryDetachedFromDataSourceError,
|
||||
)
|
||||
from redash.models.parameterized_query import ParameterizedQuery
|
||||
from redash.models.types import (
|
||||
Configuration,
|
||||
EncryptedConfiguration,
|
||||
JSONText,
|
||||
MutableDict,
|
||||
MutableList,
|
||||
json_cast_property,
|
||||
PseudoJSON,
|
||||
pseudo_json_cast_property,
|
||||
)
|
||||
from redash.models.users import ( # noqa
|
||||
AccessPermission,
|
||||
@@ -83,7 +80,7 @@ from redash.utils.configuration import ConfigurationContainer
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ScheduledQueriesExecutions:
|
||||
class ScheduledQueriesExecutions(object):
|
||||
KEY_NAME = "sq:executed_at"
|
||||
|
||||
def __init__(self):
|
||||
@@ -126,10 +123,7 @@ class DataSource(BelongsToOrgMixin, db.Model):
|
||||
|
||||
data_source_groups = db.relationship("DataSourceGroup", back_populates="data_source", cascade="all")
|
||||
__tablename__ = "data_sources"
|
||||
__table_args__ = (
|
||||
db.Index("data_sources_org_id_name", "org_id", "name"),
|
||||
{"extend_existing": True},
|
||||
)
|
||||
__table_args__ = (db.Index("data_sources_org_id_name", "org_id", "name"),)
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.id == other.id
|
||||
@@ -303,11 +297,34 @@ class DataSourceGroup(db.Model):
|
||||
view_only = Column(db.Boolean, default=False)
|
||||
|
||||
__tablename__ = "data_source_groups"
|
||||
__table_args__ = ({"extend_existing": True},)
|
||||
|
||||
|
||||
DESERIALIZED_DATA_ATTR = "_deserialized_data"
|
||||
|
||||
|
||||
class DBPersistence(object):
|
||||
@property
|
||||
def data(self):
|
||||
if self._data is None:
|
||||
return None
|
||||
|
||||
if not hasattr(self, DESERIALIZED_DATA_ATTR):
|
||||
setattr(self, DESERIALIZED_DATA_ATTR, json_loads(self._data))
|
||||
|
||||
return self._deserialized_data
|
||||
|
||||
@data.setter
|
||||
def data(self, data):
|
||||
if hasattr(self, DESERIALIZED_DATA_ATTR):
|
||||
delattr(self, DESERIALIZED_DATA_ATTR)
|
||||
self._data = data
|
||||
|
||||
|
||||
QueryResultPersistence = settings.dynamic_settings.QueryResultPersistence or DBPersistence
|
||||
|
||||
|
||||
@generic_repr("id", "org_id", "data_source_id", "query_hash", "runtime", "retrieved_at")
|
||||
class QueryResult(db.Model, BelongsToOrgMixin):
|
||||
class QueryResult(db.Model, QueryResultPersistence, BelongsToOrgMixin):
|
||||
id = primary_key("QueryResult")
|
||||
org_id = Column(key_type("Organization"), db.ForeignKey("organizations.id"))
|
||||
org = db.relationship(Organization)
|
||||
@@ -315,8 +332,8 @@ class QueryResult(db.Model, BelongsToOrgMixin):
|
||||
data_source = db.relationship(DataSource, backref=backref("query_results"))
|
||||
query_hash = Column(db.String(32), index=True)
|
||||
query_text = Column("query", db.Text)
|
||||
data = Column(JSONText, nullable=True)
|
||||
runtime = Column(DOUBLE_PRECISION)
|
||||
_data = Column("data", db.Text)
|
||||
runtime = Column(postgresql.DOUBLE_PRECISION)
|
||||
retrieved_at = Column(db.DateTime(True))
|
||||
|
||||
__tablename__ = "query_results"
|
||||
@@ -457,11 +474,11 @@ class Query(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):
|
||||
last_modified_by = db.relationship(User, backref="modified_queries", foreign_keys=[last_modified_by_id])
|
||||
is_archived = Column(db.Boolean, default=False, index=True)
|
||||
is_draft = Column(db.Boolean, default=True, index=True)
|
||||
schedule = Column(MutableDict.as_mutable(JSONB), nullable=True)
|
||||
interval = json_cast_property(db.Integer, "schedule", "interval", default=0)
|
||||
schedule = Column(MutableDict.as_mutable(PseudoJSON), nullable=True)
|
||||
interval = pseudo_json_cast_property(db.Integer, "schedule", "interval", default=0)
|
||||
schedule_failures = Column(db.Integer, default=0)
|
||||
visualizations = db.relationship("Visualization", cascade="all, delete-orphan")
|
||||
options = Column(MutableDict.as_mutable(JSONB), default={})
|
||||
options = Column(MutableDict.as_mutable(PseudoJSON), default={})
|
||||
search_vector = Column(
|
||||
TSVectorType(
|
||||
"id",
|
||||
@@ -472,7 +489,7 @@ class Query(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):
|
||||
),
|
||||
nullable=True,
|
||||
)
|
||||
tags = Column("tags", MutableList.as_mutable(ARRAY(db.Unicode)), nullable=True)
|
||||
tags = Column("tags", MutableList.as_mutable(postgresql.ARRAY(db.Unicode)), nullable=True)
|
||||
|
||||
query_class = SearchBaseQuery
|
||||
__tablename__ = "queries"
|
||||
@@ -508,7 +525,7 @@ class Query(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):
|
||||
name="Table",
|
||||
description="",
|
||||
type="TABLE",
|
||||
options={},
|
||||
options="{}",
|
||||
)
|
||||
)
|
||||
return query
|
||||
@@ -574,7 +591,7 @@ class Query(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):
|
||||
@classmethod
|
||||
def past_scheduled_queries(cls):
|
||||
now = utils.utcnow()
|
||||
queries = Query.query.filter(func.jsonb_typeof(Query.schedule) != "null").order_by(Query.id)
|
||||
queries = Query.query.filter(Query.schedule.isnot(None)).order_by(Query.id)
|
||||
return [
|
||||
query
|
||||
for query in queries
|
||||
@@ -586,7 +603,7 @@ class Query(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):
|
||||
def outdated_queries(cls):
|
||||
queries = (
|
||||
Query.query.options(joinedload(Query.latest_query_data).load_only("retrieved_at"))
|
||||
.filter(func.jsonb_typeof(Query.schedule) != "null")
|
||||
.filter(Query.schedule.isnot(None))
|
||||
.order_by(Query.id)
|
||||
.all()
|
||||
)
|
||||
@@ -814,20 +831,7 @@ class Query(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):
|
||||
def update_query_hash(self):
|
||||
should_apply_auto_limit = self.options.get("apply_auto_limit", False) if self.options else False
|
||||
query_runner = self.data_source.query_runner if self.data_source else BaseQueryRunner({})
|
||||
query_text = self.query_text
|
||||
|
||||
parameters_dict = {p["name"]: p.get("value") for p in self.parameters} if self.options else {}
|
||||
if any(parameters_dict):
|
||||
try:
|
||||
query_text = self.parameterized.apply(parameters_dict).query
|
||||
except InvalidParameterError as e:
|
||||
logging.info(f"Unable to update hash for query {self.id} because of invalid parameters: {str(e)}")
|
||||
except QueryDetachedFromDataSourceError as e:
|
||||
logging.info(
|
||||
f"Unable to update hash for query {self.id} because of dropdown query {e.query_id} is unattached from datasource"
|
||||
)
|
||||
|
||||
self.query_hash = query_runner.gen_query_hash(query_text, should_apply_auto_limit)
|
||||
self.query_hash = query_runner.gen_query_hash(self.query_text, should_apply_auto_limit)
|
||||
|
||||
|
||||
@listens_for(Query, "before_insert")
|
||||
@@ -932,7 +936,7 @@ class Alert(TimestampMixin, BelongsToOrgMixin, db.Model):
|
||||
query_rel = db.relationship(Query, backref=backref("alerts", cascade="all"))
|
||||
user_id = Column(key_type("User"), db.ForeignKey("users.id"))
|
||||
user = db.relationship(User, backref="alerts")
|
||||
options = Column(MutableDict.as_mutable(JSONB), nullable=True)
|
||||
options = Column(MutableDict.as_mutable(PseudoJSON))
|
||||
state = Column(db.String(255), default=UNKNOWN_STATE)
|
||||
subscriptions = db.relationship("AlertSubscription", cascade="all, delete-orphan")
|
||||
last_triggered_at = Column(db.DateTime(True), nullable=True)
|
||||
@@ -1043,13 +1047,13 @@ class Dashboard(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model
|
||||
user_id = Column(key_type("User"), db.ForeignKey("users.id"))
|
||||
user = db.relationship(User)
|
||||
# layout is no longer used, but kept so we know how to render old dashboards.
|
||||
layout = Column(MutableList.as_mutable(JSONB), default=[])
|
||||
layout = Column(db.Text)
|
||||
dashboard_filters_enabled = Column(db.Boolean, default=False)
|
||||
is_archived = Column(db.Boolean, default=False, index=True)
|
||||
is_draft = Column(db.Boolean, default=True, index=True)
|
||||
widgets = db.relationship("Widget", backref="dashboard", lazy="dynamic")
|
||||
tags = Column("tags", MutableList.as_mutable(ARRAY(db.Unicode)), nullable=True)
|
||||
options = Column(MutableDict.as_mutable(JSONB), default={})
|
||||
tags = Column("tags", MutableList.as_mutable(postgresql.ARRAY(db.Unicode)), nullable=True)
|
||||
options = Column(MutableDict.as_mutable(postgresql.JSON), server_default="{}", default={})
|
||||
|
||||
__tablename__ = "dashboards"
|
||||
__mapper_args__ = {"version_id_col": version}
|
||||
@@ -1162,7 +1166,7 @@ class Visualization(TimestampMixin, BelongsToOrgMixin, db.Model):
|
||||
query_rel = db.relationship(Query, back_populates="visualizations")
|
||||
name = Column(db.String(255))
|
||||
description = Column(db.String(4096), nullable=True)
|
||||
options = Column(MutableDict.as_mutable(JSONB), nullable=True)
|
||||
options = Column(db.Text)
|
||||
|
||||
__tablename__ = "visualizations"
|
||||
|
||||
@@ -1189,7 +1193,7 @@ class Widget(TimestampMixin, BelongsToOrgMixin, db.Model):
|
||||
visualization = db.relationship(Visualization, backref=backref("widgets", cascade="delete"))
|
||||
text = Column(db.Text, nullable=True)
|
||||
width = Column(db.Integer)
|
||||
options = Column(MutableDict.as_mutable(JSONB), default={})
|
||||
options = Column(db.Text)
|
||||
dashboard_id = Column(key_type("Dashboard"), db.ForeignKey("dashboards.id"), index=True)
|
||||
|
||||
__tablename__ = "widgets"
|
||||
@@ -1221,7 +1225,7 @@ class Event(db.Model):
|
||||
action = Column(db.String(255))
|
||||
object_type = Column(db.String(255))
|
||||
object_id = Column(db.String(255), nullable=True)
|
||||
additional_properties = Column(MutableDict.as_mutable(JSONB), nullable=True, default={})
|
||||
additional_properties = Column(MutableDict.as_mutable(PseudoJSON), nullable=True, default={})
|
||||
created_at = Column(db.DateTime(True), default=db.func.now())
|
||||
|
||||
__tablename__ = "events"
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
import functools
|
||||
|
||||
from flask_sqlalchemy import BaseQuery, SQLAlchemy
|
||||
from sqlalchemy.dialects.postgresql import UUID
|
||||
from sqlalchemy.dialects import postgresql
|
||||
from sqlalchemy.orm import object_session
|
||||
from sqlalchemy.pool import NullPool
|
||||
from sqlalchemy_searchable import SearchQueryMixin, make_searchable, vectorizer
|
||||
|
||||
from redash import settings
|
||||
from redash.utils import json_dumps, json_loads
|
||||
from redash.utils import json_dumps
|
||||
|
||||
|
||||
class RedashSQLAlchemy(SQLAlchemy):
|
||||
@@ -28,10 +28,7 @@ class RedashSQLAlchemy(SQLAlchemy):
|
||||
return options
|
||||
|
||||
|
||||
db = RedashSQLAlchemy(
|
||||
session_options={"expire_on_commit": False},
|
||||
engine_options={"json_serializer": json_dumps, "json_deserializer": json_loads},
|
||||
)
|
||||
db = RedashSQLAlchemy(session_options={"expire_on_commit": False})
|
||||
# Make sure the SQLAlchemy mappers are all properly configured first.
|
||||
# This is required by SQLAlchemy-Searchable as it adds DDL listeners
|
||||
# on the configuration phase of models.
|
||||
@@ -53,7 +50,7 @@ def integer_vectorizer(column):
|
||||
return db.func.cast(column, db.Text)
|
||||
|
||||
|
||||
@vectorizer(UUID)
|
||||
@vectorizer(postgresql.UUID)
|
||||
def uuid_vectorizer(column):
|
||||
return db.func.cast(column, db.Text)
|
||||
|
||||
@@ -71,7 +68,7 @@ def gfk_type(cls):
|
||||
return cls
|
||||
|
||||
|
||||
class GFKBase:
|
||||
class GFKBase(object):
|
||||
"""
|
||||
Compatibility with 'generic foreign key' approach Peewee used.
|
||||
"""
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
from sqlalchemy.dialects.postgresql import JSONB
|
||||
from sqlalchemy.inspection import inspect
|
||||
from sqlalchemy_utils.models import generic_repr
|
||||
|
||||
from .base import Column, GFKBase, db, key_type, primary_key
|
||||
from .types import PseudoJSON
|
||||
|
||||
|
||||
@generic_repr("id", "object_type", "object_id", "created_at")
|
||||
@@ -13,7 +13,7 @@ class Change(GFKBase, db.Model):
|
||||
object_version = Column(db.Integer, default=0)
|
||||
user_id = Column(key_type("User"), db.ForeignKey("users.id"))
|
||||
user = db.relationship("User", backref="changes")
|
||||
change = Column(JSONB)
|
||||
change = Column(PseudoJSON)
|
||||
created_at = Column(db.DateTime(True), default=db.func.now())
|
||||
|
||||
__tablename__ = "changes"
|
||||
@@ -45,7 +45,7 @@ class Change(GFKBase, db.Model):
|
||||
)
|
||||
|
||||
|
||||
class ChangeTrackingMixin:
|
||||
class ChangeTrackingMixin(object):
|
||||
skipped_fields = ("id", "created_at", "updated_at", "version")
|
||||
_clean_values = None
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ from sqlalchemy.event import listens_for
|
||||
from .base import Column, db
|
||||
|
||||
|
||||
class TimestampMixin:
|
||||
class TimestampMixin(object):
|
||||
updated_at = Column(db.DateTime(True), default=db.func.now(), nullable=False)
|
||||
created_at = Column(db.DateTime(True), default=db.func.now(), nullable=False)
|
||||
|
||||
@@ -17,7 +17,7 @@ def timestamp_before_update(mapper, connection, target):
|
||||
target.updated_at = db.func.now()
|
||||
|
||||
|
||||
class BelongsToOrgMixin:
|
||||
class BelongsToOrgMixin(object):
|
||||
@classmethod
|
||||
def get_by_id_and_org(cls, object_id, org, org_cls=None):
|
||||
query = cls.query.filter(cls.id == object_id)
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
from sqlalchemy.dialects.postgresql import JSONB
|
||||
from sqlalchemy.orm.attributes import flag_modified
|
||||
from sqlalchemy_utils.models import generic_repr
|
||||
|
||||
@@ -6,7 +5,7 @@ from redash.settings.organization import settings as org_settings
|
||||
|
||||
from .base import Column, db, primary_key
|
||||
from .mixins import TimestampMixin
|
||||
from .types import MutableDict
|
||||
from .types import MutableDict, PseudoJSON
|
||||
from .users import Group, User
|
||||
|
||||
|
||||
@@ -18,7 +17,7 @@ class Organization(TimestampMixin, db.Model):
|
||||
id = primary_key("Organization")
|
||||
name = Column(db.String(255))
|
||||
slug = Column(db.String(255), unique=True)
|
||||
settings = Column(MutableDict.as_mutable(JSONB), default={})
|
||||
settings = Column(MutableDict.as_mutable(PseudoJSON))
|
||||
groups = db.relationship("Group", lazy="dynamic")
|
||||
events = db.relationship("Event", lazy="dynamic", order_by="desc(Event.created_at)")
|
||||
|
||||
|
||||
@@ -103,7 +103,7 @@ def _is_value_within_options(value, dropdown_options, allow_list=False):
|
||||
return str(value) in dropdown_options
|
||||
|
||||
|
||||
class ParameterizedQuery:
|
||||
class ParameterizedQuery(object):
|
||||
def __init__(self, template, schema=None, org=None):
|
||||
self.schema = schema or []
|
||||
self.org = org
|
||||
|
||||
@@ -1,12 +1,25 @@
|
||||
from sqlalchemy import cast
|
||||
from sqlalchemy.dialects.postgresql import JSON
|
||||
from sqlalchemy.ext.indexable import index_property
|
||||
from sqlalchemy.ext.mutable import Mutable
|
||||
from sqlalchemy.types import TypeDecorator
|
||||
from sqlalchemy_utils import EncryptedType
|
||||
|
||||
from redash.models.base import db
|
||||
from redash.utils import json_dumps, json_loads
|
||||
from redash.utils.configuration import ConfigurationContainer
|
||||
|
||||
from .base import db
|
||||
|
||||
|
||||
class Configuration(TypeDecorator):
|
||||
impl = db.Text
|
||||
|
||||
def process_bind_param(self, value, dialect):
|
||||
return value.to_json()
|
||||
|
||||
def process_result_value(self, value, dialect):
|
||||
return ConfigurationContainer.from_json(value)
|
||||
|
||||
|
||||
class EncryptedConfiguration(EncryptedType):
|
||||
def process_bind_param(self, value, dialect):
|
||||
@@ -18,8 +31,8 @@ class EncryptedConfiguration(EncryptedType):
|
||||
)
|
||||
|
||||
|
||||
# Utilized for cases when JSON size is bigger than JSONB (255MB) or JSON (10MB) limit
|
||||
class JSONText(TypeDecorator):
|
||||
# XXX replace PseudoJSON and MutableDict with real JSON field
|
||||
class PseudoJSON(TypeDecorator):
|
||||
impl = db.Text
|
||||
|
||||
def process_bind_param(self, value, dialect):
|
||||
@@ -94,3 +107,19 @@ class json_cast_property(index_property):
|
||||
def expr(self, model):
|
||||
expr = super(json_cast_property, self).expr(model)
|
||||
return expr.astext.cast(self.cast_type)
|
||||
|
||||
|
||||
class pseudo_json_cast_property(index_property):
|
||||
"""
|
||||
A SQLAlchemy index property that is able to cast the
|
||||
entity attribute as the specified cast type. Useful
|
||||
for PseudoJSON colums for easier querying/filtering.
|
||||
"""
|
||||
|
||||
def __init__(self, cast_type, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.cast_type = cast_type
|
||||
|
||||
def expr(self, model):
|
||||
expr = cast(getattr(model, self.attr_name), JSON)[self.index]
|
||||
return expr.astext.cast(self.cast_type)
|
||||
|
||||
@@ -8,7 +8,7 @@ from operator import or_
|
||||
from flask import current_app, request_started, url_for
|
||||
from flask_login import AnonymousUserMixin, UserMixin, current_user
|
||||
from passlib.apps import custom_app_context as pwd_context
|
||||
from sqlalchemy.dialects.postgresql import ARRAY, JSONB
|
||||
from sqlalchemy.dialects import postgresql
|
||||
from sqlalchemy_utils import EmailType
|
||||
from sqlalchemy_utils.models import generic_repr
|
||||
|
||||
@@ -60,7 +60,7 @@ def init_app(app):
|
||||
request_started.connect(update_user_active_at, app)
|
||||
|
||||
|
||||
class PermissionsCheckMixin:
|
||||
class PermissionsCheckMixin(object):
|
||||
def has_permission(self, permission):
|
||||
return self.has_permissions((permission,))
|
||||
|
||||
@@ -84,14 +84,14 @@ class User(TimestampMixin, db.Model, BelongsToOrgMixin, UserMixin, PermissionsCh
|
||||
password_hash = Column(db.String(128), nullable=True)
|
||||
group_ids = Column(
|
||||
"groups",
|
||||
MutableList.as_mutable(ARRAY(key_type("Group"))),
|
||||
MutableList.as_mutable(postgresql.ARRAY(key_type("Group"))),
|
||||
nullable=True,
|
||||
)
|
||||
api_key = Column(db.String(40), default=lambda: generate_token(40), unique=True)
|
||||
|
||||
disabled_at = Column(db.DateTime(True), default=None, nullable=True)
|
||||
details = Column(
|
||||
MutableDict.as_mutable(JSONB),
|
||||
MutableDict.as_mutable(postgresql.JSONB),
|
||||
nullable=True,
|
||||
server_default="{}",
|
||||
default={},
|
||||
@@ -267,7 +267,7 @@ class Group(db.Model, BelongsToOrgMixin):
|
||||
org = db.relationship("Organization", back_populates="groups")
|
||||
type = Column(db.String(255), default=REGULAR_GROUP)
|
||||
name = Column(db.String(100))
|
||||
permissions = Column(ARRAY(db.String(255)), default=DEFAULT_PERMISSIONS)
|
||||
permissions = Column(postgresql.ARRAY(db.String(255)), default=DEFAULT_PERMISSIONS)
|
||||
created_at = Column(db.DateTime(True), default=db.func.now())
|
||||
|
||||
__tablename__ = "groups"
|
||||
|
||||
@@ -54,7 +54,7 @@ def require_access(obj, user, need_view_only):
|
||||
abort(403)
|
||||
|
||||
|
||||
class require_permissions:
|
||||
class require_permissions(object):
|
||||
def __init__(self, permissions, allow_one=False):
|
||||
self.permissions = permissions
|
||||
self.allow_one = allow_one
|
||||
|
||||
@@ -9,6 +9,7 @@ from rq.timeouts import JobTimeoutException
|
||||
from sshtunnel import open_tunnel
|
||||
|
||||
from redash import settings, utils
|
||||
from redash.utils import json_loads
|
||||
from redash.utils.requests_session import (
|
||||
UnacceptableAddressException,
|
||||
requests_or_advocate,
|
||||
@@ -113,13 +114,12 @@ class NotSupported(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class BaseQueryRunner:
|
||||
class BaseQueryRunner(object):
|
||||
deprecated = False
|
||||
should_annotate_query = True
|
||||
noop_query = None
|
||||
limit_query = " LIMIT 1000"
|
||||
limit_keywords = ["LIMIT", "OFFSET"]
|
||||
limit_after_select = False
|
||||
|
||||
def __init__(self, configuration):
|
||||
self.syntax = "sql"
|
||||
@@ -243,7 +243,7 @@ class BaseQueryRunner:
|
||||
|
||||
if error is not None:
|
||||
raise Exception("Failed running query [%s]." % query)
|
||||
return results["rows"]
|
||||
return json_loads(results)["rows"]
|
||||
|
||||
@classmethod
|
||||
def to_dict(cls):
|
||||
@@ -302,19 +302,10 @@ class BaseSQLQueryRunner(BaseQueryRunner):
|
||||
parsed_query = sqlparse.parse(query)[0]
|
||||
limit_tokens = sqlparse.parse(self.limit_query)[0].tokens
|
||||
length = len(parsed_query.tokens)
|
||||
if not self.limit_after_select:
|
||||
if parsed_query.tokens[length - 1].ttype == sqlparse.tokens.Punctuation:
|
||||
parsed_query.tokens[length - 1 : length - 1] = limit_tokens
|
||||
else:
|
||||
parsed_query.tokens += limit_tokens
|
||||
if parsed_query.tokens[length - 1].ttype == sqlparse.tokens.Punctuation:
|
||||
parsed_query.tokens[length - 1 : length - 1] = limit_tokens
|
||||
else:
|
||||
for i in range(length - 1, -1, -1):
|
||||
if parsed_query[i].value.upper() == "SELECT":
|
||||
index = parsed_query.token_index(parsed_query[i + 1])
|
||||
parsed_query = sqlparse.sql.Statement(
|
||||
parsed_query.tokens[:index] + limit_tokens + parsed_query.tokens[index:]
|
||||
)
|
||||
break
|
||||
parsed_query.tokens += limit_tokens
|
||||
return str(parsed_query)
|
||||
|
||||
def apply_auto_limit(self, query_text, should_apply_auto_limit):
|
||||
|
||||
@@ -7,6 +7,7 @@ from redash.query_runner import (
|
||||
BaseQueryRunner,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -80,11 +81,12 @@ class Arango(BaseQueryRunner):
|
||||
"rows": result,
|
||||
}
|
||||
|
||||
json_data = json_dumps(data, ignore_nan=True)
|
||||
error = None
|
||||
except Exception:
|
||||
raise
|
||||
|
||||
return data, error
|
||||
return json_data, error
|
||||
|
||||
|
||||
register(Arango)
|
||||
|
||||
@@ -12,6 +12,7 @@ from redash.query_runner import (
|
||||
register,
|
||||
)
|
||||
from redash.settings import parse_boolean
|
||||
from redash.utils import json_dumps, json_loads
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
ANNOTATE_QUERY = parse_boolean(os.environ.get("ATHENA_ANNOTATE_QUERY", "true"))
|
||||
@@ -46,7 +47,7 @@ _TYPE_MAPPINGS = {
|
||||
}
|
||||
|
||||
|
||||
class SimpleFormatter:
|
||||
class SimpleFormatter(object):
|
||||
def format(self, operation, parameters=None):
|
||||
return operation
|
||||
|
||||
@@ -209,6 +210,7 @@ class Athena(BaseQueryRunner):
|
||||
if error is not None:
|
||||
self._handle_run_query_error(error)
|
||||
|
||||
results = json_loads(results)
|
||||
for row in results["rows"]:
|
||||
table_name = "{0}.{1}".format(row["table_schema"], row["table_name"])
|
||||
if table_name not in schema:
|
||||
@@ -255,13 +257,14 @@ class Athena(BaseQueryRunner):
|
||||
},
|
||||
}
|
||||
|
||||
json_data = json_dumps(data, ignore_nan=True)
|
||||
error = None
|
||||
except Exception:
|
||||
if cursor.query_id:
|
||||
cursor.cancel()
|
||||
raise
|
||||
|
||||
return data, error
|
||||
return json_data, error
|
||||
|
||||
|
||||
register(Athena)
|
||||
|
||||
@@ -13,7 +13,7 @@ from redash.query_runner import (
|
||||
JobTimeoutException,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_loads
|
||||
from redash.utils import json_dumps, json_loads
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -157,16 +157,17 @@ class AxibaseTSD(BaseQueryRunner):
|
||||
columns, rows = generate_rows_and_columns(data)
|
||||
|
||||
data = {"columns": columns, "rows": rows}
|
||||
json_data = json_dumps(data)
|
||||
error = None
|
||||
|
||||
except SQLException as e:
|
||||
data = None
|
||||
json_data = None
|
||||
error = e.content
|
||||
except (KeyboardInterrupt, InterruptException, JobTimeoutException):
|
||||
sql.cancel_query(query_id)
|
||||
raise
|
||||
|
||||
return data, error
|
||||
return json_data, error
|
||||
|
||||
def get_schema(self, get_stats=False):
|
||||
connection = atsd_client.connect_url(
|
||||
|
||||
@@ -8,7 +8,7 @@ from redash.query_runner import (
|
||||
BaseQueryRunner,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_loads
|
||||
from redash.utils import json_dumps, json_loads
|
||||
|
||||
try:
|
||||
from azure.kusto.data.exceptions import KustoServiceError
|
||||
@@ -124,15 +124,16 @@ class AzureKusto(BaseQueryRunner):
|
||||
|
||||
error = None
|
||||
data = {"columns": columns, "rows": rows}
|
||||
json_data = json_dumps(data)
|
||||
|
||||
except KustoServiceError as err:
|
||||
data = None
|
||||
json_data = None
|
||||
try:
|
||||
error = err.args[1][0]["error"]["@message"]
|
||||
except (IndexError, KeyError):
|
||||
error = err.args[1]
|
||||
|
||||
return data, error
|
||||
return json_data, error
|
||||
|
||||
def get_schema(self, get_stats=False):
|
||||
query = ".show database schema as json"
|
||||
@@ -142,6 +143,8 @@ class AzureKusto(BaseQueryRunner):
|
||||
if error is not None:
|
||||
self._handle_run_query_error(error)
|
||||
|
||||
results = json_loads(results)
|
||||
|
||||
schema_as_json = json_loads(results["rows"][0]["DatabaseSchema"])
|
||||
tables_list = schema_as_json["Databases"][self.configuration["database"]]["Tables"].values()
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ from redash.query_runner import (
|
||||
JobTimeoutException,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_loads
|
||||
from redash.utils import json_dumps, json_loads
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -318,6 +318,7 @@ class BigQuery(BaseQueryRunner):
|
||||
if error is not None:
|
||||
self._handle_run_query_error(error)
|
||||
|
||||
results = json_loads(results)
|
||||
for row in results["rows"]:
|
||||
table_name = "{0}.{1}".format(row["table_schema"], row["table_name"])
|
||||
if table_name not in schema:
|
||||
@@ -345,8 +346,9 @@ class BigQuery(BaseQueryRunner):
|
||||
data = self._get_query_result(jobs, query)
|
||||
error = None
|
||||
|
||||
json_data = json_dumps(data, ignore_nan=True)
|
||||
except apiclient.errors.HttpError as e:
|
||||
data = None
|
||||
json_data = None
|
||||
if e.resp.status in [400, 404]:
|
||||
error = json_loads(e.content)["error"]["message"]
|
||||
else:
|
||||
@@ -361,7 +363,7 @@ class BigQuery(BaseQueryRunner):
|
||||
|
||||
raise
|
||||
|
||||
return data, error
|
||||
return json_data, error
|
||||
|
||||
|
||||
register(BigQuery)
|
||||
|
||||
@@ -5,6 +5,7 @@ from base64 import b64decode
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
||||
from redash.query_runner import BaseQueryRunner, register
|
||||
from redash.utils import JSONEncoder, json_dumps, json_loads
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -26,6 +27,13 @@ def generate_ssl_options_dict(protocol, cert_path=None):
|
||||
return ssl_options
|
||||
|
||||
|
||||
class CassandraJSONEncoder(JSONEncoder):
|
||||
def default(self, o):
|
||||
if isinstance(o, sortedset):
|
||||
return list(o)
|
||||
return super(CassandraJSONEncoder, self).default(o)
|
||||
|
||||
|
||||
class Cassandra(BaseQueryRunner):
|
||||
noop_query = "SELECT dateof(now()) FROM system.local"
|
||||
|
||||
@@ -33,12 +41,6 @@ class Cassandra(BaseQueryRunner):
|
||||
def enabled(cls):
|
||||
return enabled
|
||||
|
||||
@classmethod
|
||||
def custom_json_encoder(cls, dec, o):
|
||||
if isinstance(o, sortedset):
|
||||
return list(o)
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def configuration_schema(cls):
|
||||
return {
|
||||
@@ -84,6 +86,7 @@ class Cassandra(BaseQueryRunner):
|
||||
select release_version from system.local;
|
||||
"""
|
||||
results, error = self.run_query(query, None)
|
||||
results = json_loads(results)
|
||||
release_version = results["rows"][0]["release_version"]
|
||||
|
||||
query = """
|
||||
@@ -104,6 +107,7 @@ class Cassandra(BaseQueryRunner):
|
||||
)
|
||||
|
||||
results, error = self.run_query(query, None)
|
||||
results = json_loads(results)
|
||||
|
||||
schema = {}
|
||||
for row in results["rows"]:
|
||||
@@ -151,8 +155,9 @@ class Cassandra(BaseQueryRunner):
|
||||
rows = [dict(zip(column_names, row)) for row in result]
|
||||
|
||||
data = {"columns": columns, "rows": rows}
|
||||
json_data = json_dumps(data, cls=CassandraJSONEncoder)
|
||||
|
||||
return data, None
|
||||
return json_data, None
|
||||
|
||||
def _generate_cert_file(self):
|
||||
cert_encoded_bytes = self.configuration.get("sslCertificateFile", None)
|
||||
|
||||
@@ -15,6 +15,7 @@ from redash.query_runner import (
|
||||
register,
|
||||
split_sql_statements,
|
||||
)
|
||||
from redash.utils import json_dumps, json_loads
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -84,6 +85,8 @@ class ClickHouse(BaseSQLQueryRunner):
|
||||
if error is not None:
|
||||
self._handle_run_query_error(error)
|
||||
|
||||
results = json_loads(results)
|
||||
|
||||
for row in results["rows"]:
|
||||
table_name = "{}.{}".format(row["database"], row["table"])
|
||||
|
||||
@@ -197,24 +200,25 @@ class ClickHouse(BaseSQLQueryRunner):
|
||||
queries = split_multi_query(query)
|
||||
|
||||
if not queries:
|
||||
data = None
|
||||
json_data = None
|
||||
error = "Query is empty"
|
||||
return data, error
|
||||
return json_data, error
|
||||
|
||||
try:
|
||||
# If just one query was given no session is needed
|
||||
if len(queries) == 1:
|
||||
data = self._clickhouse_query(queries[0])
|
||||
results = self._clickhouse_query(queries[0])
|
||||
else:
|
||||
# If more than one query was given, a session is needed. Parameter session_check must be false
|
||||
# for the first query
|
||||
session_id = "redash_{}".format(uuid4().hex)
|
||||
|
||||
data = self._clickhouse_query(queries[0], session_id, session_check=False)
|
||||
results = self._clickhouse_query(queries[0], session_id, session_check=False)
|
||||
|
||||
for query in queries[1:]:
|
||||
data = self._clickhouse_query(query, session_id, session_check=True)
|
||||
results = self._clickhouse_query(query, session_id, session_check=True)
|
||||
|
||||
data = json_dumps(results)
|
||||
error = None
|
||||
except Exception as e:
|
||||
data = None
|
||||
|
||||
@@ -3,7 +3,7 @@ import datetime
|
||||
import yaml
|
||||
|
||||
from redash.query_runner import BaseQueryRunner, register
|
||||
from redash.utils import parse_human_time
|
||||
from redash.utils import json_dumps, parse_human_time
|
||||
|
||||
try:
|
||||
import boto3
|
||||
@@ -121,7 +121,7 @@ class CloudWatch(BaseQueryRunner):
|
||||
|
||||
rows, columns = parse_response(results)
|
||||
|
||||
return {"rows": rows, "columns": columns}, None
|
||||
return json_dumps({"rows": rows, "columns": columns}), None
|
||||
|
||||
|
||||
register(CloudWatch)
|
||||
|
||||
@@ -4,7 +4,7 @@ import time
|
||||
import yaml
|
||||
|
||||
from redash.query_runner import BaseQueryRunner, register
|
||||
from redash.utils import parse_human_time
|
||||
from redash.utils import json_dumps, parse_human_time
|
||||
|
||||
try:
|
||||
import boto3
|
||||
@@ -146,7 +146,7 @@ class CloudWatchInsights(BaseQueryRunner):
|
||||
time.sleep(POLL_INTERVAL)
|
||||
elapsed += POLL_INTERVAL
|
||||
|
||||
return data, None
|
||||
return json_dumps(data), None
|
||||
|
||||
|
||||
register(CloudWatchInsights)
|
||||
|
||||
@@ -9,6 +9,7 @@ import logging
|
||||
from os import environ
|
||||
|
||||
from redash.query_runner import BaseQueryRunner
|
||||
from redash.utils import json_dumps, json_loads
|
||||
|
||||
from . import register
|
||||
|
||||
@@ -114,7 +115,7 @@ class CorporateMemoryQueryRunner(BaseQueryRunner):
|
||||
logger.info("results are: {}".format(results))
|
||||
# Not sure why we do not use the json package here but all other
|
||||
# query runner do it the same way :-)
|
||||
sparql_results = results
|
||||
sparql_results = json_loads(results)
|
||||
# transform all bindings to redash rows
|
||||
rows = []
|
||||
for sparql_row in sparql_results["results"]["bindings"]:
|
||||
@@ -132,7 +133,7 @@ class CorporateMemoryQueryRunner(BaseQueryRunner):
|
||||
columns.append({"name": var, "friendly_name": var, "type": "string"})
|
||||
# Not sure why we do not use the json package here but all other
|
||||
# query runner do it the same way :-)
|
||||
return {"columns": columns, "rows": rows}
|
||||
return json_dumps({"columns": columns, "rows": rows})
|
||||
|
||||
@classmethod
|
||||
def name(cls):
|
||||
|
||||
@@ -10,6 +10,7 @@ from redash.query_runner import (
|
||||
BaseQueryRunner,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
try:
|
||||
@@ -154,7 +155,7 @@ class Couchbase(BaseQueryRunner):
|
||||
rows, columns = parse_results(result.json()["results"])
|
||||
data = {"columns": columns, "rows": rows}
|
||||
|
||||
return data, None
|
||||
return json_dumps(data), None
|
||||
|
||||
@classmethod
|
||||
def name(cls):
|
||||
|
||||
@@ -4,6 +4,7 @@ import logging
|
||||
import yaml
|
||||
|
||||
from redash.query_runner import BaseQueryRunner, NotSupported, register
|
||||
from redash.utils import json_dumps
|
||||
from redash.utils.requests_session import (
|
||||
UnacceptableAddressException,
|
||||
requests_or_advocate,
|
||||
@@ -95,18 +96,19 @@ class CSV(BaseQueryRunner):
|
||||
break
|
||||
data["rows"] = df[labels].replace({np.nan: None}).to_dict(orient="records")
|
||||
|
||||
json_data = json_dumps(data)
|
||||
error = None
|
||||
except KeyboardInterrupt:
|
||||
error = "Query cancelled by user."
|
||||
data = None
|
||||
json_data = None
|
||||
except UnacceptableAddressException:
|
||||
error = "Can't query private addresses."
|
||||
data = None
|
||||
json_data = None
|
||||
except Exception as e:
|
||||
error = "Error reading {0}. {1}".format(path, str(e))
|
||||
data = None
|
||||
json_data = None
|
||||
|
||||
return data, error
|
||||
return json_data, error
|
||||
|
||||
def get_schema(self):
|
||||
raise NotSupported()
|
||||
|
||||
@@ -16,6 +16,7 @@ from redash.query_runner import (
|
||||
BaseQueryRunner,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps, json_loads
|
||||
|
||||
|
||||
class Databend(BaseQueryRunner):
|
||||
@@ -84,10 +85,11 @@ class Databend(BaseQueryRunner):
|
||||
|
||||
data = {"columns": columns, "rows": rows}
|
||||
error = None
|
||||
json_data = json_dumps(data)
|
||||
finally:
|
||||
connection.close()
|
||||
|
||||
return data, error
|
||||
return json_data, error
|
||||
|
||||
def get_schema(self, get_stats=False):
|
||||
query = """
|
||||
@@ -104,6 +106,7 @@ class Databend(BaseQueryRunner):
|
||||
self._handle_run_query_error(error)
|
||||
|
||||
schema = {}
|
||||
results = json_loads(results)
|
||||
|
||||
for row in results["rows"]:
|
||||
table_name = "{}.{}".format(row["table_schema"], row["table_name"])
|
||||
@@ -130,6 +133,7 @@ class Databend(BaseQueryRunner):
|
||||
self._handle_run_query_error(error)
|
||||
|
||||
schema = {}
|
||||
results = json_loads(results)
|
||||
|
||||
for row in results["rows"]:
|
||||
table_name = "{}.{}".format(row["table_schema"], row["table_name"])
|
||||
|
||||
@@ -16,6 +16,7 @@ from redash.query_runner import (
|
||||
split_sql_statements,
|
||||
)
|
||||
from redash.settings import cast_int_or_default
|
||||
from redash.utils import json_dumps, json_loads
|
||||
|
||||
try:
|
||||
import pyodbc
|
||||
@@ -114,13 +115,16 @@ class Databricks(BaseSQLQueryRunner):
|
||||
logger.warning("Truncated result set.")
|
||||
statsd_client.incr("redash.query_runner.databricks.truncated")
|
||||
data["truncated"] = True
|
||||
json_data = json_dumps(data)
|
||||
error = None
|
||||
else:
|
||||
error = None
|
||||
data = {
|
||||
"columns": [{"name": "result", "type": TYPE_STRING}],
|
||||
"rows": [{"result": "No data was returned."}],
|
||||
}
|
||||
json_data = json_dumps(
|
||||
{
|
||||
"columns": [{"name": "result", "type": TYPE_STRING}],
|
||||
"rows": [{"result": "No data was returned."}],
|
||||
}
|
||||
)
|
||||
|
||||
cursor.close()
|
||||
except pyodbc.Error as e:
|
||||
@@ -128,9 +132,9 @@ class Databricks(BaseSQLQueryRunner):
|
||||
error = str(e.args[1])
|
||||
else:
|
||||
error = str(e)
|
||||
data = None
|
||||
json_data = None
|
||||
|
||||
return data, error
|
||||
return json_data, error
|
||||
|
||||
def get_schema(self):
|
||||
raise NotSupported()
|
||||
@@ -142,6 +146,8 @@ class Databricks(BaseSQLQueryRunner):
|
||||
if error is not None:
|
||||
self._handle_run_query_error(error)
|
||||
|
||||
results = json_loads(results)
|
||||
|
||||
first_column_name = results["columns"][0]["name"]
|
||||
return [row[first_column_name] for row in results["rows"]]
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ from redash.query_runner import (
|
||||
JobTimeoutException,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps, json_loads
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -77,6 +78,8 @@ class DB2(BaseSQLQueryRunner):
|
||||
if error is not None:
|
||||
self._handle_run_query_error(error)
|
||||
|
||||
results = json_loads(results)
|
||||
|
||||
for row in results["rows"]:
|
||||
if row["TABLE_SCHEMA"] != "public":
|
||||
table_name = "{}.{}".format(row["TABLE_SCHEMA"], row["TABLE_NAME"])
|
||||
@@ -127,22 +130,23 @@ class DB2(BaseSQLQueryRunner):
|
||||
|
||||
data = {"columns": columns, "rows": rows}
|
||||
error = None
|
||||
json_data = json_dumps(data)
|
||||
else:
|
||||
error = "Query completed but it returned no data."
|
||||
data = None
|
||||
json_data = None
|
||||
except (select.error, OSError):
|
||||
error = "Query interrupted. Please retry."
|
||||
data = None
|
||||
json_data = None
|
||||
except ibm_db_dbi.DatabaseError as e:
|
||||
error = str(e)
|
||||
data = None
|
||||
json_data = None
|
||||
except (KeyboardInterrupt, InterruptException, JobTimeoutException):
|
||||
connection.cancel()
|
||||
raise
|
||||
finally:
|
||||
connection.close()
|
||||
|
||||
return data, error
|
||||
return json_data, error
|
||||
|
||||
|
||||
register(DB2)
|
||||
|
||||
@@ -8,6 +8,7 @@ except ImportError:
|
||||
enabled = False
|
||||
|
||||
from redash.query_runner import BaseQueryRunner, register
|
||||
from redash.utils import json_dumps
|
||||
|
||||
|
||||
def reduce_item(reduced_item, key, value):
|
||||
@@ -80,7 +81,7 @@ class Dgraph(BaseQueryRunner):
|
||||
client_stub.close()
|
||||
|
||||
def run_query(self, query, user):
|
||||
data = None
|
||||
json_data = None
|
||||
error = None
|
||||
|
||||
try:
|
||||
@@ -108,10 +109,12 @@ class Dgraph(BaseQueryRunner):
|
||||
|
||||
# finally, assemble both the columns and data
|
||||
data = {"columns": columns, "rows": processed_data}
|
||||
|
||||
json_data = json_dumps(data)
|
||||
except Exception as e:
|
||||
error = e
|
||||
|
||||
return data, error
|
||||
return json_data, error
|
||||
|
||||
def get_schema(self, get_stats=False):
|
||||
"""Queries Dgraph for all the predicates, their types, their tokenizers, etc.
|
||||
|
||||
@@ -13,6 +13,7 @@ from redash.query_runner import (
|
||||
guess_type,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps, json_loads
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -97,7 +98,9 @@ class Drill(BaseHTTPQueryRunner):
|
||||
if error is not None:
|
||||
return None, error
|
||||
|
||||
return parse_response(response.json()), None
|
||||
results = parse_response(response.json())
|
||||
|
||||
return json_dumps(results), None
|
||||
|
||||
def get_schema(self, get_stats=False):
|
||||
query = """
|
||||
@@ -129,6 +132,8 @@ class Drill(BaseHTTPQueryRunner):
|
||||
if error is not None:
|
||||
self._handle_run_query_error(error)
|
||||
|
||||
results = json_loads(results)
|
||||
|
||||
schema = {}
|
||||
|
||||
for row in results["rows"]:
|
||||
|
||||
@@ -12,6 +12,7 @@ from redash.query_runner import (
|
||||
BaseQueryRunner,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps, json_loads
|
||||
|
||||
TYPES_MAP = {1: TYPE_STRING, 2: TYPE_INTEGER, 3: TYPE_BOOLEAN}
|
||||
|
||||
@@ -58,10 +59,12 @@ class Druid(BaseQueryRunner):
|
||||
|
||||
data = {"columns": columns, "rows": rows}
|
||||
error = None
|
||||
json_data = json_dumps(data)
|
||||
print(json_data)
|
||||
finally:
|
||||
connection.close()
|
||||
|
||||
return data, error
|
||||
return json_data, error
|
||||
|
||||
def get_schema(self, get_stats=False):
|
||||
query = """
|
||||
@@ -78,6 +81,7 @@ class Druid(BaseQueryRunner):
|
||||
self._handle_run_query_error(error)
|
||||
|
||||
schema = {}
|
||||
results = json_loads(results)
|
||||
|
||||
for row in results["rows"]:
|
||||
table_name = "{}.{}".format(row["TABLE_SCHEMA"], row["TABLE_NAME"])
|
||||
|
||||
@@ -19,6 +19,7 @@ try:
|
||||
except ImportError:
|
||||
enabled = False
|
||||
|
||||
from redash.utils import json_dumps
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -105,17 +106,18 @@ class e6data(BaseQueryRunner):
|
||||
columns.append({"name": column_name, "type": column_type})
|
||||
rows = [dict(zip([c["name"] for c in columns], r)) for r in results]
|
||||
data = {"columns": columns, "rows": rows}
|
||||
json_data = json_dumps(data)
|
||||
error = None
|
||||
|
||||
except Exception as error:
|
||||
logger.debug(error)
|
||||
data = None
|
||||
json_data = None
|
||||
finally:
|
||||
if cursor is not None:
|
||||
cursor.clear()
|
||||
cursor.close()
|
||||
|
||||
return data, error
|
||||
return json_data, error
|
||||
|
||||
def test_connection(self):
|
||||
self.noop_query = "SELECT 1"
|
||||
|
||||
@@ -16,7 +16,7 @@ from redash.query_runner import (
|
||||
JobTimeoutException,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_loads
|
||||
from redash.utils import json_dumps, json_loads
|
||||
|
||||
try:
|
||||
import http.client as http_client
|
||||
@@ -406,18 +406,18 @@ class Kibana(BaseElasticSearch):
|
||||
# TODO: Handle complete ElasticSearch queries (JSON based sent over HTTP POST)
|
||||
raise Exception("Advanced queries are not supported")
|
||||
|
||||
data = {"columns": result_columns, "rows": result_rows}
|
||||
json_data = json_dumps({"columns": result_columns, "rows": result_rows})
|
||||
except requests.HTTPError as e:
|
||||
logger.exception(e)
|
||||
r = e.response
|
||||
error = "Failed to execute query. Return Code: {0} Reason: {1}".format(r.status_code, r.text)
|
||||
data = None
|
||||
json_data = None
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.exception(e)
|
||||
error = "Connection refused"
|
||||
data = None
|
||||
json_data = None
|
||||
|
||||
return data, error
|
||||
return json_data, error
|
||||
|
||||
|
||||
class ElasticSearch(BaseElasticSearch):
|
||||
@@ -460,20 +460,20 @@ class ElasticSearch(BaseElasticSearch):
|
||||
result_rows = []
|
||||
self._parse_results(mappings, result_fields, r.json(), result_columns, result_rows)
|
||||
|
||||
data = {"columns": result_columns, "rows": result_rows}
|
||||
json_data = json_dumps({"columns": result_columns, "rows": result_rows})
|
||||
except (KeyboardInterrupt, JobTimeoutException) as e:
|
||||
logger.exception(e)
|
||||
raise
|
||||
except requests.HTTPError as e:
|
||||
logger.exception(e)
|
||||
error = "Failed to execute query. Return Code: {0} Reason: {1}".format(r.status_code, r.text)
|
||||
data = None
|
||||
json_data = None
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.exception(e)
|
||||
error = "Connection refused"
|
||||
data = None
|
||||
json_data = None
|
||||
|
||||
return data, error
|
||||
return json_data, error
|
||||
|
||||
|
||||
register(Kibana)
|
||||
|
||||
@@ -10,6 +10,7 @@ from redash.query_runner import (
|
||||
BaseHTTPQueryRunner,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps, json_loads
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -61,9 +62,11 @@ class ElasticSearch2(BaseHTTPQueryRunner):
|
||||
query_results = response.json()
|
||||
data = self._parse_results(result_fields, query_results)
|
||||
error = None
|
||||
return data, error
|
||||
json_data = json_dumps(data)
|
||||
return json_data, error
|
||||
|
||||
def _build_query(self, query: str) -> Tuple[dict, str, Optional[list]]:
|
||||
query = json_loads(query)
|
||||
index_name = query.pop("index", "")
|
||||
result_fields = query.pop("result_fields", None)
|
||||
url = "/{}/_search".format(index_name)
|
||||
|
||||
@@ -9,6 +9,7 @@ from redash.query_runner import (
|
||||
BaseQueryRunner,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps
|
||||
|
||||
|
||||
def _exasol_type_mapper(val, data_type):
|
||||
@@ -108,13 +109,14 @@ class Exasol(BaseQueryRunner):
|
||||
|
||||
rows = [dict(zip(cnames, row)) for row in statement]
|
||||
data = {"columns": columns, "rows": rows}
|
||||
json_data = json_dumps(data)
|
||||
finally:
|
||||
if statement is not None:
|
||||
statement.close()
|
||||
|
||||
connection.close()
|
||||
|
||||
return data, error
|
||||
return json_data, error
|
||||
|
||||
def get_schema(self, get_stats=False):
|
||||
query = """
|
||||
|
||||
@@ -3,6 +3,7 @@ import logging
|
||||
import yaml
|
||||
|
||||
from redash.query_runner import BaseQueryRunner, NotSupported, register
|
||||
from redash.utils import json_dumps
|
||||
from redash.utils.requests_session import (
|
||||
UnacceptableAddressException,
|
||||
requests_or_advocate,
|
||||
@@ -93,18 +94,19 @@ class Excel(BaseQueryRunner):
|
||||
break
|
||||
data["rows"] = df[labels].replace({np.nan: None}).to_dict(orient="records")
|
||||
|
||||
json_data = json_dumps(data)
|
||||
error = None
|
||||
except KeyboardInterrupt:
|
||||
error = "Query cancelled by user."
|
||||
data = None
|
||||
json_data = None
|
||||
except UnacceptableAddressException:
|
||||
error = "Can't query private addresses."
|
||||
data = None
|
||||
json_data = None
|
||||
except Exception as e:
|
||||
error = "Error reading {0}. {1}".format(path, str(e))
|
||||
data = None
|
||||
json_data = None
|
||||
|
||||
return data, error
|
||||
return json_data, error
|
||||
|
||||
def get_schema(self):
|
||||
raise NotSupported()
|
||||
|
||||
@@ -12,7 +12,7 @@ from redash.query_runner import (
|
||||
BaseSQLQueryRunner,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_loads
|
||||
from redash.utils import json_dumps, json_loads
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -180,14 +180,15 @@ class GoogleAnalytics(BaseSQLQueryRunner):
|
||||
response = api.get(**params).execute()
|
||||
data = parse_ga_response(response)
|
||||
error = None
|
||||
json_data = json_dumps(data)
|
||||
except HttpError as e:
|
||||
# Make sure we return a more readable error to the end user
|
||||
error = e._get_reason()
|
||||
data = None
|
||||
json_data = None
|
||||
else:
|
||||
error = "Wrong query format."
|
||||
data = None
|
||||
return data, error
|
||||
json_data = None
|
||||
return json_data, error
|
||||
|
||||
|
||||
register(GoogleAnalytics)
|
||||
|
||||
@@ -13,7 +13,7 @@ from redash.query_runner import (
|
||||
BaseQueryRunner,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_loads
|
||||
from redash.utils import json_dumps, json_loads
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -160,8 +160,9 @@ class GoogleAnalytics4(BaseQueryRunner):
|
||||
data = parse_ga_response(raw_result)
|
||||
|
||||
error = None
|
||||
json_data = json_dumps(data)
|
||||
|
||||
return data, error
|
||||
return json_data, error
|
||||
|
||||
def test_connection(self):
|
||||
try:
|
||||
|
||||
@@ -11,7 +11,7 @@ from redash.query_runner import (
|
||||
BaseSQLQueryRunner,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_loads
|
||||
from redash.utils import json_dumps, json_loads
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -151,14 +151,15 @@ class GoogleSearchConsole(BaseSQLQueryRunner):
|
||||
response = api.searchanalytics().query(siteUrl=site_url, body=params).execute()
|
||||
data = parse_ga_response(response, params["dimensions"])
|
||||
error = None
|
||||
json_data = json_dumps(data)
|
||||
except HttpError as e:
|
||||
# Make sure we return a more readable error to the end user
|
||||
error = e._get_reason()
|
||||
data = None
|
||||
json_data = None
|
||||
else:
|
||||
error = "Wrong query format."
|
||||
data = None
|
||||
return data, error
|
||||
json_data = None
|
||||
return json_data, error
|
||||
|
||||
|
||||
register(GoogleSearchConsole)
|
||||
|
||||
@@ -16,7 +16,7 @@ from redash.query_runner import (
|
||||
guess_type,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_loads
|
||||
from redash.utils import json_dumps, json_loads
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -257,7 +257,7 @@ class GoogleSpreadsheet(BaseQueryRunner):
|
||||
|
||||
data = parse_spreadsheet(SpreadsheetWrapper(spreadsheet), worksheet_num_or_title)
|
||||
|
||||
return data, None
|
||||
return json_dumps(data), None
|
||||
except gspread.SpreadsheetNotFound:
|
||||
return (
|
||||
None,
|
||||
|
||||
@@ -10,6 +10,7 @@ from redash.query_runner import (
|
||||
BaseQueryRunner,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -34,7 +35,8 @@ def _transform_result(response):
|
||||
}
|
||||
)
|
||||
|
||||
return {"columns": columns, "rows": rows}
|
||||
data = {"columns": columns, "rows": rows}
|
||||
return json_dumps(data)
|
||||
|
||||
|
||||
class Graphite(BaseQueryRunner):
|
||||
|
||||
@@ -12,6 +12,7 @@ from redash.query_runner import (
|
||||
JobTimeoutException,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -138,6 +139,7 @@ class Hive(BaseSQLQueryRunner):
|
||||
rows = [dict(zip(column_names, row)) for row in cursor]
|
||||
|
||||
data = {"columns": columns, "rows": rows}
|
||||
json_data = json_dumps(data)
|
||||
error = None
|
||||
except (KeyboardInterrupt, JobTimeoutException):
|
||||
if connection:
|
||||
@@ -148,12 +150,12 @@ class Hive(BaseSQLQueryRunner):
|
||||
error = e.args[0].status.errorMessage
|
||||
except AttributeError:
|
||||
error = str(e)
|
||||
data = None
|
||||
json_data = None
|
||||
finally:
|
||||
if connection:
|
||||
connection.close()
|
||||
|
||||
return data, error
|
||||
return json_data, error
|
||||
|
||||
|
||||
class HiveHttp(Hive):
|
||||
|
||||
@@ -12,6 +12,7 @@ from redash.query_runner import (
|
||||
JobTimeoutException,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps, json_loads
|
||||
|
||||
ignite_available = importlib.util.find_spec("pyignite") is not None
|
||||
gridgain_available = importlib.util.find_spec("pygridgain") is not None
|
||||
@@ -80,6 +81,8 @@ class Ignite(BaseSQLQueryRunner):
|
||||
if error is not None:
|
||||
raise Exception("Failed getting schema.")
|
||||
|
||||
results = json_loads(results)
|
||||
|
||||
for row in results["rows"]:
|
||||
if row["SCHEMA_NAME"] != self.configuration.get("schema", "PUBLIC"):
|
||||
table_name = "{}.{}".format(row["SCHEMA_NAME"], row["TABLE_NAME"])
|
||||
@@ -157,8 +160,8 @@ class Ignite(BaseSQLQueryRunner):
|
||||
)
|
||||
logger.debug("Ignite running query: %s", query)
|
||||
|
||||
result = self._parse_results(cursor)
|
||||
data = {"columns": result[0], "rows": result[1]}
|
||||
data = self._parse_results(cursor)
|
||||
json_data = json_dumps({"columns": data[0], "rows": data[1]})
|
||||
error = None
|
||||
|
||||
except (KeyboardInterrupt, JobTimeoutException):
|
||||
@@ -168,7 +171,7 @@ class Ignite(BaseSQLQueryRunner):
|
||||
if connection:
|
||||
connection.close()
|
||||
|
||||
return data, error
|
||||
return json_data, error
|
||||
|
||||
|
||||
register(Ignite)
|
||||
|
||||
@@ -10,6 +10,7 @@ from redash.query_runner import (
|
||||
JobTimeoutException,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -119,13 +120,14 @@ class Impala(BaseSQLQueryRunner):
|
||||
rows = [dict(zip(column_names, row)) for row in cursor]
|
||||
|
||||
data = {"columns": columns, "rows": rows}
|
||||
json_data = json_dumps(data)
|
||||
error = None
|
||||
cursor.close()
|
||||
except DatabaseError as e:
|
||||
data = None
|
||||
json_data = None
|
||||
error = str(e)
|
||||
except RPCError as e:
|
||||
data = None
|
||||
json_data = None
|
||||
error = "Metastore Error [%s]" % str(e)
|
||||
except (KeyboardInterrupt, JobTimeoutException):
|
||||
connection.cancel()
|
||||
@@ -134,7 +136,7 @@ class Impala(BaseSQLQueryRunner):
|
||||
if connection:
|
||||
connection.close()
|
||||
|
||||
return data, error
|
||||
return json_data, error
|
||||
|
||||
|
||||
register(Impala)
|
||||
|
||||
@@ -7,6 +7,7 @@ from redash.query_runner import (
|
||||
BaseQueryRunner,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -63,7 +64,7 @@ def _transform_result(results):
|
||||
else:
|
||||
result_columns = [{"name": c, "type": TYPE_STRING} for c in column_names]
|
||||
|
||||
return {"columns": result_columns, "rows": result_rows}
|
||||
return json_dumps({"columns": result_columns, "rows": result_rows})
|
||||
|
||||
|
||||
class InfluxDB(BaseQueryRunner):
|
||||
|
||||
@@ -13,6 +13,7 @@ from redash.query_runner import (
|
||||
BaseQueryRunner,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps
|
||||
|
||||
try:
|
||||
from influxdb_client import InfluxDBClient
|
||||
@@ -187,7 +188,7 @@ class InfluxDBv2(BaseQueryRunner):
|
||||
2. element: An error message, if an error occured. None, if no
|
||||
error occurred.
|
||||
"""
|
||||
data = None
|
||||
json_data = None
|
||||
error = None
|
||||
|
||||
try:
|
||||
@@ -203,12 +204,14 @@ class InfluxDBv2(BaseQueryRunner):
|
||||
tables = client.query_api().query(query)
|
||||
|
||||
data = self._get_data_from_tables(tables)
|
||||
|
||||
json_data = json_dumps(data)
|
||||
except Exception as ex:
|
||||
error = str(ex)
|
||||
finally:
|
||||
self._cleanup_cert_files(influx_kwargs)
|
||||
|
||||
return data, error
|
||||
return json_data, error
|
||||
|
||||
|
||||
register(InfluxDBv2)
|
||||
|
||||
@@ -2,11 +2,11 @@ import re
|
||||
from collections import OrderedDict
|
||||
|
||||
from redash.query_runner import TYPE_STRING, BaseHTTPQueryRunner, register
|
||||
from redash.utils import json_loads
|
||||
from redash.utils import json_dumps, json_loads
|
||||
|
||||
|
||||
# TODO: make this more general and move into __init__.py
|
||||
class ResultSet:
|
||||
class ResultSet(object):
|
||||
def __init__(self):
|
||||
self.columns = OrderedDict()
|
||||
self.rows = []
|
||||
@@ -26,7 +26,7 @@ class ResultSet:
|
||||
}
|
||||
|
||||
def to_json(self):
|
||||
return {"rows": self.rows, "columns": list(self.columns.values())}
|
||||
return json_dumps({"rows": self.rows, "columns": list(self.columns.values())})
|
||||
|
||||
def merge(self, set):
|
||||
self.rows = self.rows + set.rows
|
||||
|
||||
@@ -14,6 +14,7 @@ from redash.query_runner import (
|
||||
BaseHTTPQueryRunner,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps
|
||||
|
||||
|
||||
class QueryParseError(Exception):
|
||||
@@ -157,10 +158,11 @@ class JSON(BaseHTTPQueryRunner):
|
||||
def run_query(self, query, user):
|
||||
query = parse_query(query)
|
||||
|
||||
data, error = self._run_json_query(query)
|
||||
results, error = self._run_json_query(query)
|
||||
if error is not None:
|
||||
return None, error
|
||||
|
||||
data = json_dumps(results)
|
||||
if data:
|
||||
return data, None
|
||||
return None, "Got empty response from '{}'.".format(query["url"])
|
||||
|
||||
@@ -15,6 +15,7 @@ from redash.query_runner import (
|
||||
BaseQueryRunner,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -101,7 +102,7 @@ class Kylin(BaseQueryRunner):
|
||||
columns = self.get_columns(data["columnMetas"])
|
||||
rows = self.get_rows(columns, data["results"])
|
||||
|
||||
return {"columns": columns, "rows": rows}, None
|
||||
return json_dumps({"columns": columns, "rows": rows}), None
|
||||
|
||||
def get_schema(self, get_stats=False):
|
||||
url = self.configuration["url"]
|
||||
|
||||
@@ -10,6 +10,7 @@ from redash.query_runner import (
|
||||
JobTimeoutException,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -129,6 +130,7 @@ class MemSQL(BaseSQLQueryRunner):
|
||||
columns.append({"name": column, "friendly_name": column, "type": TYPE_STRING})
|
||||
|
||||
data = {"columns": columns, "rows": rows}
|
||||
json_data = json_dumps(data)
|
||||
error = None
|
||||
except (KeyboardInterrupt, JobTimeoutException):
|
||||
cursor.close()
|
||||
@@ -137,7 +139,7 @@ class MemSQL(BaseSQLQueryRunner):
|
||||
if cursor:
|
||||
cursor.close()
|
||||
|
||||
return data, error
|
||||
return json_data, error
|
||||
|
||||
|
||||
register(MemSQL)
|
||||
|
||||
@@ -13,7 +13,7 @@ from redash.query_runner import (
|
||||
BaseQueryRunner,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_loads, parse_human_time
|
||||
from redash.utils import JSONEncoder, json_dumps, json_loads, parse_human_time
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -42,6 +42,17 @@ TYPES_MAP = {
|
||||
}
|
||||
|
||||
|
||||
class MongoDBJSONEncoder(JSONEncoder):
|
||||
def default(self, o):
|
||||
if isinstance(o, ObjectId):
|
||||
return str(o)
|
||||
elif isinstance(o, Timestamp):
|
||||
return super(MongoDBJSONEncoder, self).default(o.as_datetime())
|
||||
elif isinstance(o, Decimal128):
|
||||
return o.to_decimal()
|
||||
return super(MongoDBJSONEncoder, self).default(o)
|
||||
|
||||
|
||||
date_regex = re.compile(r'ISODate\("(.*)"\)', re.IGNORECASE)
|
||||
|
||||
|
||||
@@ -160,16 +171,6 @@ class MongoDB(BaseQueryRunner):
|
||||
True if "replicaSetName" in self.configuration and self.configuration["replicaSetName"] else False
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def custom_json_encoder(cls, dec, o):
|
||||
if isinstance(o, ObjectId):
|
||||
return str(o)
|
||||
elif isinstance(o, Timestamp):
|
||||
return dec.default(o.as_datetime())
|
||||
elif isinstance(o, Decimal128):
|
||||
return o.to_decimal()
|
||||
return None
|
||||
|
||||
def _get_db(self):
|
||||
kwargs = {}
|
||||
if self.is_replica_set:
|
||||
@@ -347,8 +348,9 @@ class MongoDB(BaseQueryRunner):
|
||||
|
||||
data = {"columns": columns, "rows": rows}
|
||||
error = None
|
||||
json_data = json_dumps(data, cls=MongoDBJSONEncoder)
|
||||
|
||||
return data, error
|
||||
return json_data, error
|
||||
|
||||
|
||||
register(MongoDB)
|
||||
|
||||
@@ -8,6 +8,7 @@ from redash.query_runner import (
|
||||
JobTimeoutException,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps, json_loads
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -34,10 +35,6 @@ class SqlServer(BaseSQLQueryRunner):
|
||||
should_annotate_query = False
|
||||
noop_query = "SELECT 1"
|
||||
|
||||
limit_query = " TOP 1000"
|
||||
limit_keywords = ["TOP"]
|
||||
limit_after_select = True
|
||||
|
||||
@classmethod
|
||||
def configuration_schema(cls):
|
||||
return {
|
||||
@@ -90,6 +87,8 @@ class SqlServer(BaseSQLQueryRunner):
|
||||
if error is not None:
|
||||
self._handle_run_query_error(error)
|
||||
|
||||
results = json_loads(results)
|
||||
|
||||
for row in results["rows"]:
|
||||
if row["table_schema"] != self.configuration["db"]:
|
||||
table_name = "{}.{}".format(row["table_schema"], row["table_name"])
|
||||
@@ -141,10 +140,11 @@ class SqlServer(BaseSQLQueryRunner):
|
||||
rows = [dict(zip((column["name"] for column in columns), row)) for row in data]
|
||||
|
||||
data = {"columns": columns, "rows": rows}
|
||||
json_data = json_dumps(data)
|
||||
error = None
|
||||
else:
|
||||
error = "No data was returned."
|
||||
data = None
|
||||
json_data = None
|
||||
|
||||
cursor.close()
|
||||
connection.commit()
|
||||
@@ -155,7 +155,7 @@ class SqlServer(BaseSQLQueryRunner):
|
||||
except IndexError:
|
||||
# Connection errors are `args[0][1]`
|
||||
error = e.args[0][1]
|
||||
data = None
|
||||
json_data = None
|
||||
except (KeyboardInterrupt, JobTimeoutException):
|
||||
connection.cancel()
|
||||
raise
|
||||
@@ -163,7 +163,7 @@ class SqlServer(BaseSQLQueryRunner):
|
||||
if connection:
|
||||
connection.close()
|
||||
|
||||
return data, error
|
||||
return json_data, error
|
||||
|
||||
|
||||
register(SqlServer)
|
||||
|
||||
@@ -6,6 +6,7 @@ from redash.query_runner import (
|
||||
register,
|
||||
)
|
||||
from redash.query_runner.mssql import types_map
|
||||
from redash.utils import json_dumps, json_loads
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -21,10 +22,6 @@ class SQLServerODBC(BaseSQLQueryRunner):
|
||||
should_annotate_query = False
|
||||
noop_query = "SELECT 1"
|
||||
|
||||
limit_query = " TOP 1000"
|
||||
limit_keywords = ["TOP"]
|
||||
limit_after_select = True
|
||||
|
||||
@classmethod
|
||||
def configuration_schema(cls):
|
||||
return {
|
||||
@@ -97,6 +94,8 @@ class SQLServerODBC(BaseSQLQueryRunner):
|
||||
if error is not None:
|
||||
self._handle_run_query_error(error)
|
||||
|
||||
results = json_loads(results)
|
||||
|
||||
for row in results["rows"]:
|
||||
if row["table_schema"] != self.configuration["db"]:
|
||||
table_name = "{}.{}".format(row["table_schema"], row["table_name"])
|
||||
@@ -140,10 +139,11 @@ class SQLServerODBC(BaseSQLQueryRunner):
|
||||
rows = [dict(zip((column["name"] for column in columns), row)) for row in data]
|
||||
|
||||
data = {"columns": columns, "rows": rows}
|
||||
json_data = json_dumps(data)
|
||||
error = None
|
||||
else:
|
||||
error = "No data was returned."
|
||||
data = None
|
||||
json_data = None
|
||||
|
||||
cursor.close()
|
||||
except pyodbc.Error as e:
|
||||
@@ -153,7 +153,7 @@ class SQLServerODBC(BaseSQLQueryRunner):
|
||||
except IndexError:
|
||||
# Connection errors are `args[0][1]`
|
||||
error = e.args[0][1]
|
||||
data = None
|
||||
json_data = None
|
||||
except (KeyboardInterrupt, JobTimeoutException):
|
||||
connection.cancel()
|
||||
raise
|
||||
@@ -161,7 +161,7 @@ class SQLServerODBC(BaseSQLQueryRunner):
|
||||
if connection:
|
||||
connection.close()
|
||||
|
||||
return data, error
|
||||
return json_data, error
|
||||
|
||||
|
||||
register(SQLServerODBC)
|
||||
|
||||
@@ -14,6 +14,7 @@ from redash.query_runner import (
|
||||
register,
|
||||
)
|
||||
from redash.settings import parse_boolean
|
||||
from redash.utils import json_dumps, json_loads
|
||||
|
||||
try:
|
||||
import MySQLdb
|
||||
@@ -43,7 +44,7 @@ types_map = {
|
||||
}
|
||||
|
||||
|
||||
class Result:
|
||||
class Result(object):
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@@ -160,6 +161,8 @@ class Mysql(BaseSQLQueryRunner):
|
||||
if error is not None:
|
||||
self._handle_run_query_error(error)
|
||||
|
||||
results = json_loads(results)
|
||||
|
||||
for row in results["rows"]:
|
||||
if row["table_schema"] != self.configuration["db"]:
|
||||
table_name = "{}.{}".format(row["table_schema"], row["table_name"])
|
||||
@@ -191,7 +194,7 @@ class Mysql(BaseSQLQueryRunner):
|
||||
t.join()
|
||||
raise
|
||||
|
||||
return r.data, r.error
|
||||
return r.json_data, r.error
|
||||
|
||||
def _run_query(self, query, user, connection, r, ev):
|
||||
try:
|
||||
@@ -213,17 +216,17 @@ class Mysql(BaseSQLQueryRunner):
|
||||
rows = [dict(zip((column["name"] for column in columns), row)) for row in data]
|
||||
|
||||
data = {"columns": columns, "rows": rows}
|
||||
r.data = data
|
||||
r.json_data = json_dumps(data)
|
||||
r.error = None
|
||||
else:
|
||||
r.data = None
|
||||
r.json_data = None
|
||||
r.error = "No data was returned."
|
||||
|
||||
cursor.close()
|
||||
except MySQLdb.Error as e:
|
||||
if cursor:
|
||||
cursor.close()
|
||||
r.data = None
|
||||
r.json_data = None
|
||||
r.error = e.args[1]
|
||||
finally:
|
||||
ev.set()
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import json
|
||||
import logging
|
||||
import traceback
|
||||
|
||||
@@ -149,7 +150,7 @@ class Netezza(BaseSQLQueryRunner):
|
||||
return typ
|
||||
|
||||
def run_query(self, query, user):
|
||||
data, error = None, None
|
||||
json_data, error = None, None
|
||||
try:
|
||||
with self.connection.cursor() as cursor:
|
||||
cursor.execute(query)
|
||||
@@ -164,10 +165,10 @@ class Netezza(BaseSQLQueryRunner):
|
||||
)
|
||||
rows = [dict(zip((column["name"] for column in columns), row)) for row in cursor]
|
||||
|
||||
data = {"columns": columns, "rows": rows}
|
||||
json_data = json.dumps({"columns": columns, "rows": rows})
|
||||
except Exception:
|
||||
error = traceback.format_exc()
|
||||
return data, error
|
||||
return json_data, error
|
||||
|
||||
|
||||
register(Netezza)
|
||||
|
||||
@@ -10,6 +10,7 @@ from redash.query_runner import (
|
||||
JobTimeoutException,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps, json_loads
|
||||
|
||||
try:
|
||||
import oracledb
|
||||
@@ -97,6 +98,8 @@ class Oracle(BaseSQLQueryRunner):
|
||||
if error is not None:
|
||||
self._handle_run_query_error(error)
|
||||
|
||||
results = json_loads(results)
|
||||
|
||||
for row in results["rows"]:
|
||||
if row["OWNER"] is not None:
|
||||
table_name = "{}.{}".format(row["OWNER"], row["TABLE_NAME"])
|
||||
@@ -165,17 +168,19 @@ class Oracle(BaseSQLQueryRunner):
|
||||
rows = [dict(zip((c["name"] for c in columns), row)) for row in cursor]
|
||||
data = {"columns": columns, "rows": rows}
|
||||
error = None
|
||||
json_data = json_dumps(data)
|
||||
else:
|
||||
columns = [{"name": "Row(s) Affected", "type": "TYPE_INTEGER"}]
|
||||
rows = [{"Row(s) Affected": rows_count}]
|
||||
data = {"columns": columns, "rows": rows}
|
||||
json_data = json_dumps(data)
|
||||
connection.commit()
|
||||
except oracledb.DatabaseError as err:
|
||||
(err_args,) = err.args
|
||||
line_number = query.count("\n", 0, err_args.offset) + 1
|
||||
column_number = err_args.offset - query.rfind("\n", 0, err_args.offset) - 1
|
||||
error = "Query failed at line {}, column {}: {}".format(str(line_number), str(column_number), str(err))
|
||||
data = None
|
||||
json_data = None
|
||||
except (KeyboardInterrupt, JobTimeoutException):
|
||||
connection.cancel()
|
||||
raise
|
||||
@@ -183,7 +188,7 @@ class Oracle(BaseSQLQueryRunner):
|
||||
os.environ.pop("NLS_LANG", None)
|
||||
connection.close()
|
||||
|
||||
return data, error
|
||||
return json_data, error
|
||||
|
||||
|
||||
register(Oracle)
|
||||
|
||||
@@ -20,6 +20,7 @@ from redash.query_runner import (
|
||||
JobTimeoutException,
|
||||
register,
|
||||
)
|
||||
from redash.utils import JSONEncoder, json_dumps, json_loads
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -55,6 +56,20 @@ types_map = {
|
||||
}
|
||||
|
||||
|
||||
class PostgreSQLJSONEncoder(JSONEncoder):
|
||||
def default(self, o):
|
||||
if isinstance(o, Range):
|
||||
# From: https://github.com/psycopg/psycopg2/pull/779
|
||||
if o._bounds is None:
|
||||
return ""
|
||||
|
||||
items = [o._bounds[0], str(o._lower), ", ", str(o._upper), o._bounds[1]]
|
||||
|
||||
return "".join(items)
|
||||
|
||||
return super(PostgreSQLJSONEncoder, self).default(o)
|
||||
|
||||
|
||||
def _wait(conn, timeout=None):
|
||||
while 1:
|
||||
try:
|
||||
@@ -183,24 +198,14 @@ class PostgreSQL(BaseSQLQueryRunner):
|
||||
def type(cls):
|
||||
return "pg"
|
||||
|
||||
@classmethod
|
||||
def custom_json_encoder(cls, dec, o):
|
||||
if isinstance(o, Range):
|
||||
# From: https://github.com/psycopg/psycopg2/pull/779
|
||||
if o._bounds is None:
|
||||
return ""
|
||||
|
||||
items = [o._bounds[0], str(o._lower), ", ", str(o._upper), o._bounds[1]]
|
||||
|
||||
return "".join(items)
|
||||
return None
|
||||
|
||||
def _get_definitions(self, schema, query):
|
||||
results, error = self.run_query(query, None)
|
||||
|
||||
if error is not None:
|
||||
self._handle_run_query_error(error)
|
||||
|
||||
results = json_loads(results)
|
||||
|
||||
build_schema(results, schema)
|
||||
|
||||
def _get_tables(self, schema):
|
||||
@@ -277,15 +282,16 @@ class PostgreSQL(BaseSQLQueryRunner):
|
||||
|
||||
data = {"columns": columns, "rows": rows}
|
||||
error = None
|
||||
json_data = json_dumps(data, ignore_nan=True, cls=PostgreSQLJSONEncoder)
|
||||
else:
|
||||
error = "Query completed but it returned no data."
|
||||
data = None
|
||||
json_data = None
|
||||
except (select.error, OSError):
|
||||
error = "Query interrupted. Please retry."
|
||||
data = None
|
||||
json_data = None
|
||||
except psycopg2.DatabaseError as e:
|
||||
error = str(e)
|
||||
data = None
|
||||
json_data = None
|
||||
except (KeyboardInterrupt, InterruptException, JobTimeoutException):
|
||||
connection.cancel()
|
||||
raise
|
||||
@@ -293,7 +299,7 @@ class PostgreSQL(BaseSQLQueryRunner):
|
||||
connection.close()
|
||||
_cleanup_ssl_certs(self.ssl_config)
|
||||
|
||||
return data, error
|
||||
return json_data, error
|
||||
|
||||
|
||||
class Redshift(PostgreSQL):
|
||||
|
||||
@@ -9,6 +9,7 @@ from redash.query_runner import (
|
||||
BaseQueryRunner,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps, json_loads
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -81,6 +82,8 @@ class Phoenix(BaseQueryRunner):
|
||||
if error is not None:
|
||||
self._handle_run_query_error(error)
|
||||
|
||||
results = json_loads(results)
|
||||
|
||||
for row in results["rows"]:
|
||||
table_name = "{}.{}".format(row["TABLE_SCHEM"], row["TABLE_NAME"])
|
||||
|
||||
@@ -102,16 +105,17 @@ class Phoenix(BaseQueryRunner):
|
||||
columns = self.fetch_columns(column_tuples)
|
||||
rows = [dict(zip(([column["name"] for column in columns]), r)) for i, r in enumerate(cursor.fetchall())]
|
||||
data = {"columns": columns, "rows": rows}
|
||||
json_data = json_dumps(data)
|
||||
error = None
|
||||
cursor.close()
|
||||
except Error as e:
|
||||
data = None
|
||||
json_data = None
|
||||
error = "code: {}, sql state:{}, message: {}".format(e.code, e.sqlstate, str(e))
|
||||
finally:
|
||||
if connection:
|
||||
connection.close()
|
||||
|
||||
return data, error
|
||||
return json_data, error
|
||||
|
||||
|
||||
register(Phoenix)
|
||||
|
||||
@@ -19,6 +19,7 @@ from redash.query_runner import (
|
||||
BaseQueryRunner,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -91,11 +92,12 @@ class Pinot(BaseQueryRunner):
|
||||
|
||||
data = {"columns": columns, "rows": rows}
|
||||
error = None
|
||||
json_data = json_dumps(data)
|
||||
logger.debug("Pinot execute query [%s]", query)
|
||||
finally:
|
||||
connection.close()
|
||||
|
||||
return data, error
|
||||
return json_data, error
|
||||
|
||||
def get_schema(self, get_stats=False):
|
||||
schema = {}
|
||||
|
||||
@@ -11,6 +11,7 @@ from redash.query_runner import (
|
||||
JobTimeoutException,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps, json_loads
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -88,6 +89,8 @@ class Presto(BaseQueryRunner):
|
||||
if error is not None:
|
||||
self._handle_run_query_error(error)
|
||||
|
||||
results = json_loads(results)
|
||||
|
||||
for row in results["rows"]:
|
||||
table_name = "{}.{}".format(row["table_schema"], row["table_name"])
|
||||
|
||||
@@ -117,9 +120,10 @@ class Presto(BaseQueryRunner):
|
||||
columns = self.fetch_columns(column_tuples)
|
||||
rows = [dict(zip(([column["name"] for column in columns]), r)) for i, r in enumerate(cursor.fetchall())]
|
||||
data = {"columns": columns, "rows": rows}
|
||||
json_data = json_dumps(data)
|
||||
error = None
|
||||
except DatabaseError as db:
|
||||
data = None
|
||||
json_data = None
|
||||
default_message = "Unspecified DatabaseError: {0}".format(str(db))
|
||||
if isinstance(db.args[0], dict):
|
||||
message = db.args[0].get("failureInfo", {"message", None}).get("message")
|
||||
@@ -130,7 +134,7 @@ class Presto(BaseQueryRunner):
|
||||
cursor.cancel()
|
||||
raise
|
||||
|
||||
return data, error
|
||||
return json_data, error
|
||||
|
||||
|
||||
register(Presto)
|
||||
|
||||
@@ -14,6 +14,7 @@ from redash.query_runner import (
|
||||
BaseQueryRunner,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps
|
||||
|
||||
|
||||
def get_instant_rows(metrics_data):
|
||||
@@ -246,7 +247,7 @@ class Prometheus(BaseQueryRunner):
|
||||
else:
|
||||
rows = get_instant_rows(metrics)
|
||||
|
||||
data = {"rows": rows, "columns": columns}
|
||||
json_data = json_dumps({"rows": rows, "columns": columns})
|
||||
|
||||
except requests.RequestException as e:
|
||||
return None, str(e)
|
||||
@@ -255,7 +256,7 @@ class Prometheus(BaseQueryRunner):
|
||||
finally:
|
||||
self._cleanup_cert_files(promehteus_kwargs)
|
||||
|
||||
return data, error
|
||||
return json_data, error
|
||||
|
||||
|
||||
register(Prometheus)
|
||||
|
||||
@@ -23,6 +23,7 @@ from redash.query_runner import (
|
||||
BaseQueryRunner,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps, json_loads
|
||||
from redash.utils.pandas import pandas_installed
|
||||
|
||||
if pandas_installed:
|
||||
@@ -38,7 +39,7 @@ else:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CustomPrint:
|
||||
class CustomPrint(object):
|
||||
"""CustomPrint redirect "print" calls to be sent as "log" on the result object."""
|
||||
|
||||
def __init__(self):
|
||||
@@ -227,7 +228,7 @@ class Python(BaseQueryRunner):
|
||||
raise Exception(error)
|
||||
|
||||
# TODO: allow avoiding the JSON dumps/loads in same process
|
||||
query_result = data
|
||||
query_result = json_loads(data)
|
||||
|
||||
if result_type == "dataframe" and pandas_installed:
|
||||
return pd.DataFrame(query_result["rows"])
|
||||
@@ -356,14 +357,15 @@ class Python(BaseQueryRunner):
|
||||
|
||||
exec(code, restricted_globals, self._script_locals)
|
||||
|
||||
data = self._script_locals["result"]
|
||||
self.validate_result(data)
|
||||
data["log"] = self._custom_print.lines
|
||||
result = self._script_locals["result"]
|
||||
self.validate_result(result)
|
||||
result["log"] = self._custom_print.lines
|
||||
json_data = json_dumps(result)
|
||||
except Exception as e:
|
||||
error = str(type(e)) + " " + str(e)
|
||||
data = None
|
||||
json_data = None
|
||||
|
||||
return data, error
|
||||
return json_data, error
|
||||
|
||||
|
||||
register(Python)
|
||||
|
||||
@@ -10,6 +10,7 @@ from redash.query_runner import (
|
||||
JobTimeoutException,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps
|
||||
|
||||
try:
|
||||
import qds_sdk # noqa: F401
|
||||
@@ -124,13 +125,13 @@ class Qubole(BaseQueryRunner):
|
||||
columns = self.fetch_columns([(i, TYPE_STRING) for i in data.pop(0).split("\t")])
|
||||
rows = [dict(zip((column["name"] for column in columns), row.split("\t"))) for row in data]
|
||||
|
||||
data = {"columns": columns, "rows": rows}
|
||||
json_data = json_dumps({"columns": columns, "rows": rows})
|
||||
except (KeyboardInterrupt, JobTimeoutException):
|
||||
logging.info("Sending KILL signal to Qubole Command Id: %s", cmd.id)
|
||||
cmd.cancel()
|
||||
raise
|
||||
|
||||
return data, error
|
||||
return json_data, error
|
||||
|
||||
def get_schema(self, get_stats=False):
|
||||
schemas = {}
|
||||
|
||||
@@ -13,7 +13,7 @@ from redash.query_runner import (
|
||||
guess_type,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps
|
||||
from redash.utils import json_dumps, json_loads
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -76,6 +76,8 @@ def get_query_results(user, query_id, bring_from_cache, params=None):
|
||||
results, error = query.data_source.query_runner.run_query(query_text, user)
|
||||
if error:
|
||||
raise Exception("Failed loading results for query id {}.".format(query.id))
|
||||
else:
|
||||
results = json_loads(results)
|
||||
|
||||
return results
|
||||
|
||||
@@ -192,15 +194,16 @@ class Results(BaseQueryRunner):
|
||||
|
||||
data = {"columns": columns, "rows": rows}
|
||||
error = None
|
||||
json_data = json_dumps(data)
|
||||
else:
|
||||
error = "Query completed but it returned no data."
|
||||
data = None
|
||||
json_data = None
|
||||
except (KeyboardInterrupt, JobTimeoutException):
|
||||
connection.cancel()
|
||||
raise
|
||||
finally:
|
||||
connection.close()
|
||||
return data, error
|
||||
return json_data, error
|
||||
|
||||
|
||||
register(Results)
|
||||
|
||||
@@ -8,6 +8,7 @@ from redash.query_runner import (
|
||||
BaseSQLQueryRunner,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps
|
||||
|
||||
|
||||
def _get_type(value):
|
||||
@@ -24,7 +25,7 @@ def _get_type(value):
|
||||
|
||||
# The following is here, because Rockset's PyPi package is Python 3 only.
|
||||
# Should be removed once we move to Python 3.
|
||||
class RocksetAPI:
|
||||
class RocksetAPI(object):
|
||||
def __init__(self, api_key, api_server, vi_id):
|
||||
self.api_key = api_key
|
||||
self.api_server = api_server
|
||||
@@ -120,7 +121,7 @@ class Rockset(BaseSQLQueryRunner):
|
||||
columns = []
|
||||
for k in rows[0]:
|
||||
columns.append({"name": k, "friendly_name": k, "type": _get_type(rows[0][k])})
|
||||
data = {"columns": columns, "rows": rows}
|
||||
data = json_dumps({"columns": columns, "rows": rows})
|
||||
return data, None
|
||||
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ from redash.query_runner import (
|
||||
BaseQueryRunner,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -165,10 +166,11 @@ class Salesforce(BaseQueryRunner):
|
||||
columns = self.fetch_columns(cols)
|
||||
error = None
|
||||
data = {"columns": columns, "rows": rows}
|
||||
json_data = json_dumps(data)
|
||||
except SalesforceError as err:
|
||||
error = err.content
|
||||
data = None
|
||||
return data, error
|
||||
json_data = None
|
||||
return json_data, error
|
||||
|
||||
def get_schema(self, get_stats=False):
|
||||
sf = self._get_sf()
|
||||
|
||||
@@ -17,6 +17,7 @@ from redash.query_runner import (
|
||||
BaseSQLQueryRunner,
|
||||
register,
|
||||
)
|
||||
from redash.utils import json_dumps
|
||||
|
||||
TYPES_MAP = {
|
||||
0: TYPE_INTEGER,
|
||||
@@ -134,11 +135,12 @@ class Snowflake(BaseSQLQueryRunner):
|
||||
|
||||
data = self._parse_results(cursor)
|
||||
error = None
|
||||
json_data = json_dumps(data)
|
||||
finally:
|
||||
cursor.close()
|
||||
connection.close()
|
||||
|
||||
return data, error
|
||||
return json_data, error
|
||||
|
||||
def _run_query_without_warehouse(self, query):
|
||||
connection = self._get_connection()
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user