mirror of
https://github.com/apache/impala.git
synced 2025-12-31 15:00:10 -05:00
190 lines
7.4 KiB
Python
190 lines
7.4 KiB
Python
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
|
|
#
|
|
# This module is used for common utilities related to parsing test files
|
|
import collections
|
|
import logging
|
|
import re
|
|
from collections import defaultdict
|
|
from os.path import isfile, isdir
|
|
from tests.common.test_dimensions import TableFormatInfo
|
|
|
|
logging.basicConfig(level=logging.INFO, format='%(threadName)s: %(message)s')
|
|
LOG = logging.getLogger('impala_test_suite')
|
|
|
|
# constants
|
|
SECTION_DELIMITER = "===="
|
|
SUBSECTION_DELIMITER = "----"
|
|
|
|
# The QueryTestSectionReader provides utility functions that help to parse content
|
|
# from a query test file
|
|
class QueryTestSectionReader(object):
|
|
@staticmethod
|
|
def build_query(query_section_text, table_format_info, scale_factor):
|
|
"""
|
|
Build a well formed query.
|
|
|
|
Given the various test parameters, construct the query that will be executed. This
|
|
does more work than replace_table_suffix because it needs to properly replace the
|
|
database name based on the given scale factor
|
|
"""
|
|
query_section_text = remove_comments(query_section_text)
|
|
return query_section_text.rstrip(';')
|
|
|
|
@staticmethod
|
|
def get_db_name(table_format, scale_factor=''):
|
|
"""
|
|
Get the database name to use.
|
|
|
|
Database names are dependent on the scale factor, file format, compression type
|
|
and compression codec. This method returns the appropriate database name to the
|
|
caller based on the table format information provided.
|
|
"""
|
|
if table_format.file_format == 'text' and table_format.compression_codec == 'none':
|
|
suffix = ''
|
|
elif table_format.compression_codec == 'none':
|
|
suffix = '_%s' % (table_format.file_format)
|
|
elif table_format.compression_type == 'record':
|
|
suffix = '_%s_record_%s' % (table_format.file_format,
|
|
table_format.compression_codec)
|
|
else:
|
|
suffix = '_%s_%s' % (table_format.file_format, table_format.compression_codec)
|
|
dataset = table_format.dataset.replace('-', '')
|
|
return dataset + scale_factor + suffix
|
|
|
|
|
|
def remove_comments(section_text):
|
|
return '\n'.join([l for l in section_text.split('\n') if not l.strip().startswith('#')])
|
|
|
|
def parse_query_test_file(file_name):
|
|
"""
|
|
Reads the specified query test file
|
|
|
|
Returns the result as a list of dictionaries. Each dictionary in the list corresponds
|
|
to a test case and each key in the dictionary maps to a section in that test case.
|
|
"""
|
|
# Update the valid section names as we support other test types
|
|
# (ex. planner, data error)
|
|
VALID_SECTION_NAMES = ['QUERY', 'RESULTS', 'TYPES', 'SETUP']
|
|
return parse_test_file(file_name, VALID_SECTION_NAMES)
|
|
|
|
def parse_table_constraints(constraints_file):
|
|
"""Reads a table contraints file, if one exists"""
|
|
schema_include = defaultdict(list)
|
|
schema_exclude = defaultdict(list)
|
|
if not isfile(constraints_file):
|
|
LOG.info('No schema constraints file file found')
|
|
else:
|
|
with open(constraints_file, 'rb') as constraints_file:
|
|
for line in constraints_file.readlines():
|
|
line = line.strip()
|
|
if not line or line.startswith('#'):
|
|
continue
|
|
# Format: table_name:<name>, constraint_type:<type>, table_format:<t1>,<t2>,...
|
|
table_name, constraint_type, table_formats =\
|
|
[value.split(':')[1].strip() for value in line.split(',', 2)]
|
|
if constraint_type == 'restrict_to':
|
|
schema_include[table_name.lower()] +=\
|
|
map(parse_table_format_constraint, table_formats.split(','))
|
|
elif constraint_type == 'exclude':
|
|
schema_exclude[table_name.lower()] +=\
|
|
map(parse_table_format_constraint, table_formats.split(','))
|
|
else:
|
|
raise ValueError, 'Unknown constraint type: %s' % constraint_type
|
|
return schema_include, schema_exclude
|
|
|
|
def parse_table_format_constraint(table_format_constraint):
|
|
# TODO: Expand how we parse table format constraints to support syntax such as
|
|
# a table format string with a wildcard character. Right now we don't do anything.
|
|
return table_format_constraint
|
|
|
|
def parse_test_file(test_file_name, valid_section_names, skip_unknown_sections=True):
|
|
"""
|
|
Parses an Impala test file
|
|
|
|
Test files have the format:
|
|
==== <- Section
|
|
---- [Name] <- Named subsection
|
|
// some text
|
|
---- [Name2] <- Named subsection
|
|
...
|
|
====
|
|
|
|
The valid section names are passed in to this function.
|
|
"""
|
|
with open(test_file_name, 'rb') as test_file:
|
|
return parse_test_file_text(test_file.read(), valid_section_names,
|
|
skip_unknown_sections)
|
|
|
|
def parse_test_file_text(text, valid_section_names, skip_unknown_sections=True):
|
|
sections = list()
|
|
section_start_regex = re.compile(r'(?m)^%s' % SECTION_DELIMITER)
|
|
match = section_start_regex.search(text)
|
|
if match is not None:
|
|
# Assume anything before the first section (==== tag) is a header and ignore it
|
|
text = text[match.start():]
|
|
|
|
# Split the test file up into sections. For each section, parse all subsections.
|
|
for section in section_start_regex.split(text):
|
|
parsed_sections = collections.defaultdict(str)
|
|
for sub_section in re.split(r'(?m)^%s' % SUBSECTION_DELIMITER, section[1:]):
|
|
# Skip empty subsections
|
|
if not sub_section:
|
|
continue
|
|
|
|
lines = sub_section.split('\n')
|
|
subsection_name = lines[0].strip()
|
|
subsection_comment = None
|
|
|
|
subsection_info = [s.strip() for s in subsection_name.split(':')]
|
|
if(len(subsection_info) == 2):
|
|
subsection_name, subsection_comment = subsection_info
|
|
|
|
if subsection_name not in valid_section_names:
|
|
if skip_unknown_sections or not subsection_name:
|
|
print sub_section
|
|
print 'Unknown section %s' % subsection_name
|
|
continue
|
|
else:
|
|
raise RuntimeError, 'Unknown subsection: %s' % subsection_name
|
|
|
|
if subsection_name == 'QUERY' and subsection_comment:
|
|
parsed_sections['QUERY_NAME'] = subsection_comment
|
|
|
|
if subsection_name == 'RESULTS' and subsection_comment:
|
|
parsed_sections['VERIFIER'] = subsection_comment
|
|
|
|
parsed_sections[subsection_name] = '\n'.join([line for line in lines[1:-1]])
|
|
|
|
if parsed_sections:
|
|
sections.append(parsed_sections)
|
|
return sections
|
|
|
|
|
|
def write_test_file(test_file_name, test_file_sections):
|
|
"""
|
|
Given a list of test file sections, write out the corresponding test file
|
|
|
|
This is useful when updating the results of a test.
|
|
"""
|
|
with open(test_file_name, 'w') as test_file:
|
|
test_file_text = list()
|
|
for test_case in test_file_sections:
|
|
test_file_text.append(SECTION_DELIMITER)
|
|
for section_name, section_value in test_case.items():
|
|
# Have to special case query name and verifier because they have annotations
|
|
# in the headers
|
|
if section_name in ['QUERY_NAME', 'VERIFIER']:
|
|
continue
|
|
|
|
# TODO: We need a more generic way of persisting the old test file.
|
|
# Special casing will blow up.
|
|
full_section_name = section_name
|
|
if section_name == 'QUERY' and test_case.get('QUERY_NAME'):
|
|
full_section_name = '%s: %s' % (section_name, test_case['QUERY_NAME'])
|
|
if section_name == 'RESULTS' and test_case.get('VERIFIER'):
|
|
full_section_name = '%s: %s' % (section_name, test_case['VERIFIER'])
|
|
test_file_text.append("%s %s" % (SUBSECTION_DELIMITER, full_section_name))
|
|
test_file_text.append(test_case[section_name])
|
|
test_file_text.append(SECTION_DELIMITER)
|
|
test_file.write(('\n').join(test_file_text))
|