mirror of
https://github.com/apache/impala.git
synced 2026-01-22 18:02:34 -05:00
Previously Impala disallowed LOAD DATA and INSERT on S3. This patch functionally enables LOAD DATA and INSERT on S3 without making major changes for the sake of improving performance over S3. This patch also enables both INSERT and LOAD DATA between file systems. S3 does not support the rename operation, so the staged files in S3 are copied instead of renamed, which contributes to the slow performance on S3. The FinalizeSuccessfulInsert() function now does not make any underlying assumptions of the filesystem it is on and works across all supported filesystems. This is done by adding a full URI field to the base directory for a partition in the TInsertPartitionStatus. Also, the HdfsOp class now does not assume a single filesystem and gets connections to the filesystems based on the URI of the file it is operating on. Added a python S3 client called 'boto3' to access S3 from the python tests. A new class called S3Client is introduced which creates wrappers around the boto3 functions and have the same function signatures as PyWebHdfsClient by deriving from a base abstract class BaseFileSystem so that they can be interchangeably through a 'generic_client'. test_load.py is refactored to use this generic client. The ImpalaTestSuite setup creates a client according to the TARGET_FILESYSTEM environment variable and assigns it to the 'generic_client'. P.S: Currently, the test_load.py runs 4x slower on S3 than on HDFS. Performance needs to be improved in future patches. INSERT performance is slower than on HDFS too. This is mainly because of an extra copy that happens between staging and the final location of a file. However, larger INSERTs come closer to HDFS permformance than smaller inserts. ACLs are not taken care of for S3 in this patch. It is something that still needs to be discussed before implementing. Change-Id: I94e15ad67752dce21c9b7c1dced6e114905a942d Reviewed-on: http://gerrit.cloudera.org:8080/2574 Reviewed-by: Sailesh Mukil <sailesh@cloudera.com> Tested-by: Internal Jenkins
134 lines
6.3 KiB
Python
134 lines
6.3 KiB
Python
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
|
|
# Functional tests running EXPLAIN statements.
|
|
#
|
|
import logging
|
|
import pytest
|
|
import re
|
|
from tests.common.test_vector import *
|
|
from tests.common.impala_test_suite import *
|
|
from tests.common.skip import SkipIfS3, SkipIfLocal
|
|
from tests.util.filesystem_utils import WAREHOUSE
|
|
|
|
# Tests the different explain levels [0-3] on a few queries.
|
|
# TODO: Clean up this test to use an explain level test dimension and appropriate
|
|
# result sub-sections for the expected explain plans.
|
|
class TestExplain(ImpalaTestSuite):
|
|
# Value for the num_scanner_threads query option to ensure that the memory estimates of
|
|
# scan nodes are consistent even when run on machines with different numbers of cores.
|
|
NUM_SCANNER_THREADS = 1
|
|
|
|
@classmethod
|
|
def get_workload(self):
|
|
return 'functional-query'
|
|
|
|
@classmethod
|
|
def add_test_dimensions(cls):
|
|
super(TestExplain, cls).add_test_dimensions()
|
|
cls.TestMatrix.add_constraint(lambda v:\
|
|
v.get_value('table_format').file_format == 'text' and\
|
|
v.get_value('table_format').compression_codec == 'none' and\
|
|
v.get_value('exec_option')['batch_size'] == 0 and\
|
|
v.get_value('exec_option')['disable_codegen'] == False and\
|
|
v.get_value('exec_option')['num_nodes'] != 1)
|
|
|
|
@pytest.mark.xfail(run=False, reason="Expected per-host mem requirements inconsistent")
|
|
def test_explain_level0(self, vector):
|
|
vector.get_value('exec_option')['num_scanner_threads'] = self.NUM_SCANNER_THREADS
|
|
vector.get_value('exec_option')['explain_level'] = 0
|
|
self.run_test_case('QueryTest/explain-level0', vector)
|
|
|
|
@pytest.mark.xfail(run=False, reason="Expected per-host mem requirements inconsistent")
|
|
def test_explain_level1(self, vector):
|
|
vector.get_value('exec_option')['num_scanner_threads'] = self.NUM_SCANNER_THREADS
|
|
vector.get_value('exec_option')['explain_level'] = 1
|
|
self.run_test_case('QueryTest/explain-level1', vector)
|
|
|
|
@pytest.mark.xfail(run=False, reason="The test for missing table stats fails for avro")
|
|
def test_explain_level2(self, vector):
|
|
vector.get_value('exec_option')['num_scanner_threads'] = self.NUM_SCANNER_THREADS
|
|
vector.get_value('exec_option')['explain_level'] = 2
|
|
self.run_test_case('QueryTest/explain-level2', vector)
|
|
|
|
@pytest.mark.xfail(run=False, reason="The test for missing table stats fails for avro")
|
|
def test_explain_level3(self, vector):
|
|
vector.get_value('exec_option')['num_scanner_threads'] = self.NUM_SCANNER_THREADS
|
|
vector.get_value('exec_option')['explain_level'] = 3
|
|
self.run_test_case('QueryTest/explain-level3', vector)
|
|
|
|
def test_explain_validate_cardinality_estimates(self, vector):
|
|
# Tests that the cardinality estimates are correct for partitioned tables.
|
|
# TODO Cardinality estimation tests should eventually be part of the planner tests.
|
|
# TODO Remove this test
|
|
db_name = 'functional'
|
|
tbl_name = 'alltypes'
|
|
|
|
def check_cardinality(query_result, expected_cardinality):
|
|
regex = re.compile('tuple-ids=\d+ row-size=\d+B cardinality=(\d+)')
|
|
for res in query_result:
|
|
m = regex.match(res.strip())
|
|
if m:
|
|
assert len(m.groups()) == 1
|
|
# The cardinality should be zero.
|
|
assert m.groups()[0] == expected_cardinality
|
|
|
|
# All partitions are filtered out, cardinality should be 0.
|
|
result = self.execute_query("explain select * from %s.%s where year = 1900" % (
|
|
db_name, tbl_name), query_options={'explain_level':3})
|
|
check_cardinality(result.data, '0')
|
|
|
|
# Half of the partitions are filtered out, cardinality should be 3650.
|
|
result = self.execute_query("explain select * from %s.%s where year = 2010" % (
|
|
db_name, tbl_name), query_options={'explain_level':3})
|
|
check_cardinality(result.data, '3650')
|
|
|
|
# None of the partitions are filtered out, cardinality should be 7300.
|
|
result = self.execute_query("explain select * from %s.%s" % (db_name, tbl_name),
|
|
query_options={'explain_level':3})
|
|
check_cardinality(result.data, '7300')
|
|
|
|
class TestExplainEmptyPartition(ImpalaTestSuite):
|
|
TEST_DB_NAME = "imp_1708"
|
|
|
|
def setup_method(self, method):
|
|
self.cleanup_db(self.TEST_DB_NAME)
|
|
self.execute_query("create database if not exists {0} location '{1}/{0}.db'"
|
|
.format(self.TEST_DB_NAME, WAREHOUSE))
|
|
|
|
def teardown_method(self, method):
|
|
self.cleanup_db(self.TEST_DB_NAME)
|
|
|
|
@SkipIfLocal.hdfs_client
|
|
def test_non_empty_partition_0_rows(self):
|
|
"""Regression test for IMPALA-1708: if a partition has 0 rows but > 0 files after
|
|
COMPUTE STATS, don't warn the user about missing stats. The files are probably
|
|
corrupted, or used for something else."""
|
|
self.client.execute("SET EXPLAIN_LEVEL=3")
|
|
self.client.execute("CREATE TABLE %s.empty_partition (col int) "
|
|
"partitioned by (p int)" % self.TEST_DB_NAME)
|
|
self.client.execute(
|
|
"ALTER TABLE %s.empty_partition ADD PARTITION (p=NULL)" % self.TEST_DB_NAME)
|
|
# Put an empty file in the partition so we have > 0 files, but 0 rows
|
|
self.filesystem_client.create_file(
|
|
"test-warehouse/%s.db/empty_partition/p=__HIVE_DEFAULT_PARTITION__/empty" %
|
|
self.TEST_DB_NAME, "")
|
|
self.client.execute("REFRESH %s.empty_partition" % self.TEST_DB_NAME)
|
|
self.client.execute("COMPUTE STATS %s.empty_partition" % self.TEST_DB_NAME)
|
|
assert "NULL\t0\t1" in str(
|
|
self.client.execute("SHOW PARTITIONS %s.empty_partition" % self.TEST_DB_NAME))
|
|
assert "missing relevant table and/or column statistics" not in str(
|
|
self.client.execute(
|
|
"EXPLAIN SELECT * FROM %s.empty_partition" % self.TEST_DB_NAME))
|
|
|
|
# Now add a partition with some data (so it gets selected into the scan), to check
|
|
# that its lack of stats is correctly identified
|
|
self.client.execute(
|
|
"ALTER TABLE %s.empty_partition ADD PARTITION (p=1)" % self.TEST_DB_NAME)
|
|
self.filesystem_client.create_file("test-warehouse/%s.db/empty_partition/p=1/rows" %
|
|
self.TEST_DB_NAME, "1")
|
|
self.client.execute("REFRESH %s.empty_partition" % self.TEST_DB_NAME)
|
|
explain_result = str(
|
|
self.client.execute("EXPLAIN SELECT * FROM %s.empty_partition" % self.TEST_DB_NAME))
|
|
assert "missing relevant table and/or column statistics" in explain_result
|
|
# Also test IMPALA-1530 - adding the number of partitions missing stats
|
|
assert "1 partition(s) missing stats" in explain_result
|