Add more flake8 tests and fail build if any test fails (#4055)

* Add more flake8 tests and fail build if any test fails

Run all flake8 E9xx + F63x + F7xx + F82x tests.

* long = long in Python 2
This commit is contained in:
Christian Clauss
2019-08-18 10:27:44 +02:00
committed by Arik Fraimovich
parent a1f11cb8d9
commit d38ca803c5
13 changed files with 50 additions and 20 deletions

View File

@@ -1,7 +1,9 @@
#!/bin/sh
set -o errexit # fail the build if any task fails
flake8 --version ; pip --version
# stop the build if there are Python syntax errors or undefined names
flake8 . --count --select=E901,E999,F821,F822,F823 --show-source --statistics
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics

View File

@@ -113,7 +113,7 @@ def sp_initiated(org_slug=None):
redirect_url = None
# Select the IdP URL to send the AuthN request to
for key, value in info['headers']:
if key is 'Location':
if key == 'Location':
redirect_url = value
response = redirect(redirect_url, code=302)

View File

@@ -6,13 +6,15 @@ from redash.permissions import require_access, view_only
from funcy import distinct
from dateutil.parser import parse
from six import string_types, text_type
def _pluck_name_and_value(default_column, row):
row = {k.lower(): v for k, v in row.items()}
name_column = "name" if "name" in row.keys() else default_column.lower()
value_column = "value" if "value" in row.keys() else default_column.lower()
return {"name": row[name_column], "value": unicode(row[value_column])}
return {"name": row[name_column], "value": text_type(row[value_column])}
def _load_result(query_id, org):
@@ -107,8 +109,8 @@ def _is_date_range(obj):
def _is_value_within_options(value, dropdown_options, allow_list=False):
if isinstance(value, list):
return allow_list and set(map(unicode, value)).issubset(set(dropdown_options))
return unicode(value) in dropdown_options
return allow_list and set(map(text_type, value)).issubset(set(dropdown_options))
return text_type(value) in dropdown_options
class ParameterizedQuery(object):
@@ -142,11 +144,11 @@ class ParameterizedQuery(object):
query_id = definition.get('queryId')
allow_multiple_values = isinstance(definition.get('multiValuesOptions'), dict)
if isinstance(enum_options, basestring):
if isinstance(enum_options, string_types):
enum_options = enum_options.split('\n')
validators = {
"text": lambda value: isinstance(value, basestring),
"text": lambda value: isinstance(value, string_types),
"number": _is_number,
"enum": lambda value: _is_value_within_options(value,
enum_options,

View File

@@ -3,6 +3,8 @@ import logging
from dateutil import parser
import requests
from six import text_type
from redash import settings
from redash.utils import json_loads
@@ -299,7 +301,7 @@ def guess_type_from_string(string_value):
except (ValueError, OverflowError):
pass
if unicode(string_value).lower() in ('true', 'false'):
if text_type(string_value).lower() in ('true', 'false'):
return TYPE_BOOLEAN
try:

View File

@@ -2,9 +2,11 @@ import datetime
import logging
from dateutil.parser import parse
from six import text_type
from redash.query_runner import *
from redash.utils import JSONEncoder, json_dumps, json_loads, parse_human_time
from redash.utils.compat import long
import json
logger = logging.getLogger(__name__)
@@ -17,7 +19,7 @@ except ImportError as e:
TYPES_MAP = {
str: TYPE_STRING,
unicode: TYPE_STRING,
text_type: TYPE_STRING,
int: TYPE_INTEGER,
long: TYPE_INTEGER,
float: TYPE_FLOAT,

View File

@@ -4,6 +4,8 @@ import re
from dateutil import parser
from six import text_type
from redash.query_runner import (
BaseHTTPQueryRunner, register,
TYPE_DATETIME, TYPE_INTEGER, TYPE_FLOAT, TYPE_BOOLEAN,
@@ -26,12 +28,12 @@ def convert_type(string_value, actual_type):
return float(string_value)
if actual_type == TYPE_BOOLEAN:
return unicode(string_value).lower() == 'true'
return text_type(string_value).lower() == 'true'
if actual_type == TYPE_DATETIME:
return parser.parse(string_value)
return unicode(string_value)
return text_type(string_value)
# Parse Drill API response and translate it to accepted format

View File

@@ -4,9 +4,11 @@ import urllib
import requests
from requests.auth import HTTPBasicAuth
from six import string_types, text_type
from redash.query_runner import *
from redash.utils import json_dumps, json_loads
from redash.utils.compat import long
try:
import http.client as http_client
@@ -35,7 +37,7 @@ ELASTICSEARCH_BUILTIN_FIELDS_MAPPING = {
PYTHON_TYPES_MAPPING = {
str: TYPE_STRING,
unicode: TYPE_STRING,
text_type: TYPE_STRING,
bool: TYPE_BOOLEAN,
int: TYPE_INTEGER,
long: TYPE_INTEGER,
@@ -345,7 +347,7 @@ class Kibana(BaseElasticSearch):
result_columns = []
result_rows = []
if isinstance(query_data, str) or isinstance(query_data, unicode):
if isinstance(query_data, string_types):
_from = 0
while True:
query_size = size if limit >= (_from + size) else (limit - _from)

View File

@@ -5,7 +5,9 @@ import ipaddress
import datetime
from urlparse import urlparse
from funcy import compact, project
from six import text_type
from redash.utils import json_dumps
from redash.utils.compat import long
from redash.query_runner import (BaseHTTPQueryRunner, register,
TYPE_BOOLEAN, TYPE_DATETIME, TYPE_FLOAT,
TYPE_INTEGER, TYPE_STRING)
@@ -25,19 +27,19 @@ def parse_query(query):
return params
except ValueError as e:
logging.exception(e)
error = unicode(e)
error = text_type(e)
raise QueryParseError(error)
def is_private_address(url):
hostname = urlparse(url).hostname
ip_address = socket.gethostbyname(hostname)
return ipaddress.ip_address(unicode(ip_address)).is_private
return ipaddress.ip_address(text_type(ip_address)).is_private
TYPES_MAP = {
str: TYPE_STRING,
unicode: TYPE_STRING,
text_type: TYPE_STRING,
int: TYPE_INTEGER,
long: TYPE_INTEGER,
float: TYPE_FLOAT,

View File

@@ -3,9 +3,11 @@ import logging
import re
from dateutil.parser import parse
from six import string_types, text_type
from redash.query_runner import *
from redash.utils import JSONEncoder, json_dumps, json_loads, parse_human_time
from redash.utils.compat import long
logger = logging.getLogger(__name__)
@@ -24,7 +26,7 @@ except ImportError:
TYPES_MAP = {
str: TYPE_STRING,
unicode: TYPE_STRING,
text_type: TYPE_STRING,
int: TYPE_INTEGER,
long: TYPE_INTEGER,
float: TYPE_FLOAT,
@@ -56,7 +58,7 @@ def parse_oids(oids):
def datetime_parser(dct):
for k, v in dct.iteritems():
if isinstance(v, basestring):
if isinstance(v, string_types):
m = date_regex.findall(v)
if len(m) > 0:
dct[k] = parse(m[0], yearfirst=True)

View File

@@ -21,6 +21,11 @@ from sqlalchemy.orm.query import Query
from .human_time import parse_human_time
try:
buffer
except NameError:
buffer = bytes
COMMENTS_REGEX = re.compile("/\*.*?\*/")
WRITER_ENCODING = os.environ.get('REDASH_CSV_WRITER_ENCODING', 'utf-8')
WRITER_ERRORS = os.environ.get('REDASH_CSV_WRITER_ERRORS', 'strict')

4
redash/utils/compat.py Normal file
View File

@@ -0,0 +1,4 @@
try:
long = long
except NameError:
long = int

View File

@@ -26,7 +26,7 @@ pytz==2016.7
PyYAML==3.12
redis==3.2.1
requests==2.21.0
six==1.11.0
six==1.12.0
SQLAlchemy==1.2.12
# We can't upgrade SQLAlchemy-Searchable version as newer versions require PostgreSQL > 9.6, but we target older versions at the moment.
SQLAlchemy-Searchable==0.10.6
@@ -56,7 +56,7 @@ user-agents==1.1.0
python-geoip-geolite2==2015.303
chromelogger==0.4.3
pypd==1.1.0
disposable-email-domains
disposable-email-domains>=0.0.52
gevent==1.4.0
# Install the dependencies of the bin/bundle-extensions script here.
# It has its own requirements file to simplify the frontend client build process

View File

@@ -4,6 +4,11 @@ from unittest import TestCase
from redash.utils import (build_url, collect_parameters_from_request,
filter_none, json_dumps, generate_token)
try:
buffer
except NameError:
buffer = bytes
DummyRequest = namedtuple('DummyRequest', ['host', 'scheme'])