mirror of
https://github.com/apache/impala.git
synced 2026-01-05 12:01:11 -05:00
Previously Impala disallowed LOAD DATA and INSERT on S3. This patch functionally enables LOAD DATA and INSERT on S3 without making major changes for the sake of improving performance over S3. This patch also enables both INSERT and LOAD DATA between file systems. S3 does not support the rename operation, so the staged files in S3 are copied instead of renamed, which contributes to the slow performance on S3. The FinalizeSuccessfulInsert() function now does not make any underlying assumptions of the filesystem it is on and works across all supported filesystems. This is done by adding a full URI field to the base directory for a partition in the TInsertPartitionStatus. Also, the HdfsOp class now does not assume a single filesystem and gets connections to the filesystems based on the URI of the file it is operating on. Added a python S3 client called 'boto3' to access S3 from the python tests. A new class called S3Client is introduced which creates wrappers around the boto3 functions and have the same function signatures as PyWebHdfsClient by deriving from a base abstract class BaseFileSystem so that they can be interchangeably through a 'generic_client'. test_load.py is refactored to use this generic client. The ImpalaTestSuite setup creates a client according to the TARGET_FILESYSTEM environment variable and assigns it to the 'generic_client'. P.S: Currently, the test_load.py runs 4x slower on S3 than on HDFS. Performance needs to be improved in future patches. INSERT performance is slower than on HDFS too. This is mainly because of an extra copy that happens between staging and the final location of a file. However, larger INSERTs come closer to HDFS permformance than smaller inserts. ACLs are not taken care of for S3 in this patch. It is something that still needs to be discussed before implementing. Change-Id: I94e15ad67752dce21c9b7c1dced6e114905a942d Reviewed-on: http://gerrit.cloudera.org:8080/2574 Reviewed-by: Sailesh Mukil <sailesh@cloudera.com> Tested-by: Internal Jenkins
179 lines
7.0 KiB
Python
179 lines
7.0 KiB
Python
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
|
|
# General Impala query tests
|
|
#
|
|
import copy
|
|
import logging
|
|
import os
|
|
import pytest
|
|
from tests.common.impala_test_suite import ImpalaTestSuite
|
|
from tests.common.test_vector import *
|
|
from tests.common.test_dimensions import create_uncompressed_text_dimension
|
|
from tests.common.test_dimensions import create_exec_option_dimension
|
|
from tests.common.skip import SkipIfS3
|
|
|
|
class TestQueries(ImpalaTestSuite):
|
|
@classmethod
|
|
def add_test_dimensions(cls):
|
|
super(TestQueries, cls).add_test_dimensions()
|
|
if cls.exploration_strategy() == 'core':
|
|
cls.TestMatrix.add_constraint(lambda v:\
|
|
v.get_value('table_format').file_format == 'parquet')
|
|
|
|
# Manually adding a test dimension here to test the small query opt
|
|
# in exhaustive.
|
|
# TODO Cleanup required, allow adding values to dimensions without having to
|
|
# manually explode them
|
|
if cls.exploration_strategy() == 'exhaustive':
|
|
dim = cls.TestMatrix.dimensions["exec_option"]
|
|
new_value = []
|
|
for v in dim:
|
|
new_value.append(TestVector.Value(v.name, copy.copy(v.value)))
|
|
new_value[-1].value["exec_single_node_rows_threshold"] = 100
|
|
dim.extend(new_value)
|
|
cls.TestMatrix.add_dimension(dim)
|
|
|
|
@classmethod
|
|
def get_workload(cls):
|
|
return 'functional-query'
|
|
|
|
def test_hdfs_scan_node(self, vector):
|
|
self.run_test_case('QueryTest/hdfs-scan-node', vector)
|
|
|
|
def test_analytic_fns(self, vector):
|
|
# TODO: Enable some of these tests for Avro if possible
|
|
# Don't attempt to evaluate timestamp expressions with Avro tables which doesn't
|
|
# support a timestamp type yet
|
|
table_format = vector.get_value('table_format')
|
|
if table_format.file_format == 'avro':
|
|
pytest.skip()
|
|
if table_format.file_format == 'hbase':
|
|
pytest.xfail("A lot of queries check for NULLs, which hbase does not recognize")
|
|
self.run_test_case('QueryTest/analytic-fns', vector)
|
|
|
|
def test_file_partitions(self, vector):
|
|
self.run_test_case('QueryTest/hdfs-partitions', vector)
|
|
|
|
def test_limit(self, vector):
|
|
if vector.get_value('table_format').file_format == 'hbase':
|
|
pytest.xfail("IMPALA-283 - select count(*) produces inconsistent results")
|
|
self.run_test_case('QueryTest/limit', vector)
|
|
|
|
def test_top_n(self, vector):
|
|
if vector.get_value('table_format').file_format == 'hbase':
|
|
pytest.xfail(reason="IMPALA-283 - select count(*) produces inconsistent results")
|
|
# QueryTest/top-n is also run in test_sort with disable_outermost_topn = 1
|
|
self.run_test_case('QueryTest/top-n', vector)
|
|
|
|
def test_union(self, vector):
|
|
self.run_test_case('QueryTest/union', vector)
|
|
|
|
def test_sort(self, vector):
|
|
if vector.get_value('table_format').file_format == 'hbase':
|
|
pytest.xfail(reason="IMPALA-283 - select count(*) produces inconsistent results")
|
|
vector.get_value('exec_option')['disable_outermost_topn'] = 1
|
|
self.run_test_case('QueryTest/sort', vector)
|
|
# We can get the sort tests for free from the top-n file
|
|
self.run_test_case('QueryTest/top-n', vector)
|
|
|
|
def test_inline_view(self, vector):
|
|
if vector.get_value('table_format').file_format == 'hbase':
|
|
pytest.xfail("jointbl does not have columns with unique values, "
|
|
"hbase collapses them")
|
|
self.run_test_case('QueryTest/inline-view', vector)
|
|
|
|
def test_inline_view_limit(self, vector):
|
|
self.run_test_case('QueryTest/inline-view-limit', vector)
|
|
|
|
def test_subquery(self, vector):
|
|
self.run_test_case('QueryTest/subquery', vector)
|
|
|
|
def test_subplans(self, vector):
|
|
pytest.xfail("Disabled due to missing nested types functionality.")
|
|
if vector.get_value('table_format').file_format != 'parquet':
|
|
pytest.xfail("Nested TPCH only available in parquet.")
|
|
self.run_test_case('QueryTest/subplannull_data', vector)
|
|
|
|
def test_empty(self, vector):
|
|
self.run_test_case('QueryTest/empty', vector)
|
|
|
|
def test_views(self, vector):
|
|
if vector.get_value('table_format').file_format == "hbase":
|
|
pytest.xfail("TODO: Enable views tests for hbase")
|
|
self.run_test_case('QueryTest/views', vector)
|
|
|
|
def test_with_clause(self, vector):
|
|
if vector.get_value('table_format').file_format == "hbase":
|
|
pytest.xfail("TODO: Enable with clause tests for hbase")
|
|
self.run_test_case('QueryTest/with-clause', vector)
|
|
|
|
def test_misc(self, vector):
|
|
table_format = vector.get_value('table_format')
|
|
if table_format.file_format in ['hbase', 'rc', 'parquet']:
|
|
msg = ("Failing on rc/snap/block despite resolution of IMP-624,IMP-503. "
|
|
"Failing on parquet because tables do not exist")
|
|
pytest.xfail(msg)
|
|
self.run_test_case('QueryTest/misc', vector)
|
|
|
|
def test_null_data(self, vector):
|
|
if vector.get_value('table_format').file_format == 'hbase':
|
|
pytest.xfail("null data does not appear to work in hbase")
|
|
self.run_test_case('QueryTest/null_data', vector)
|
|
|
|
# Tests in this class are only run against text/none either because that's the only
|
|
# format that is supported, or the tests don't exercise the file format.
|
|
class TestQueriesTextTables(ImpalaTestSuite):
|
|
@classmethod
|
|
def add_test_dimensions(cls):
|
|
super(TestQueriesTextTables, cls).add_test_dimensions()
|
|
cls.TestMatrix.add_dimension(create_uncompressed_text_dimension(cls.get_workload()))
|
|
|
|
@classmethod
|
|
def get_workload(cls):
|
|
return 'functional-query'
|
|
|
|
def test_overflow(self, vector):
|
|
self.run_test_case('QueryTest/overflow', vector)
|
|
|
|
def test_data_source_tables(self, vector):
|
|
self.run_test_case('QueryTest/data-source-tables', vector)
|
|
|
|
def test_distinct_estimate(self, vector):
|
|
# These results will vary slightly depending on how the values get split up
|
|
# so only run with 1 node and on text.
|
|
vector.get_value('exec_option')['num_nodes'] = 1
|
|
self.run_test_case('QueryTest/distinct-estimate', vector)
|
|
|
|
def test_mixed_format(self, vector):
|
|
self.run_test_case('QueryTest/mixed-format', vector)
|
|
|
|
def test_values(self, vector):
|
|
self.run_test_case('QueryTest/values', vector)
|
|
|
|
# Tests in this class are only run against Parquet because the tests don't exercise the
|
|
# file format.
|
|
class TestQueriesParquetTables(ImpalaTestSuite):
|
|
@classmethod
|
|
def add_test_dimensions(cls):
|
|
super(TestQueriesParquetTables, cls).add_test_dimensions()
|
|
cls.TestMatrix.add_constraint(lambda v:\
|
|
v.get_value('table_format').file_format == 'parquet')
|
|
|
|
@classmethod
|
|
def get_workload(cls):
|
|
return 'functional-query'
|
|
|
|
@pytest.mark.execute_serially
|
|
def test_very_large_strings(self, vector):
|
|
"""Regression test for IMPALA-1619. Doesn't need to be run on all file formats.
|
|
Executes serially to avoid large random spikes in mem usage."""
|
|
self.run_test_case('QueryTest/large_strings', vector)
|
|
|
|
def test_single_node_large_sorts(self, vector):
|
|
if self.exploration_strategy() != 'exhaustive':
|
|
pytest.skip("only run large sorts on exhaustive")
|
|
|
|
vector.get_value('exec_option')['disable_outermost_topn'] = 1
|
|
vector.get_value('exec_option')['num_nodes'] = 1
|
|
self.run_test_case('QueryTest/single-node-large-sorts', vector)
|
|
|