mirror of
https://github.com/apache/impala.git
synced 2025-12-25 02:03:09 -05:00
IMPALA-3491: Use unique database fixture in test_partitioning.py
Testing: Ran the test locally in a loop on exhaustive. Did a private debug/exhaustive/hdfs test run. Change-Id: Ib1b33d9977a98894288662a711805e9a54329ec8 Reviewed-on: http://gerrit.cloudera.org:8080/4316 Reviewed-by: Alex Behm <alex.behm@cloudera.com> Tested-by: Internal Jenkins
This commit is contained in:
committed by
Internal Jenkins
parent
f0ffbca2c3
commit
8c37bf3543
@@ -12,7 +12,7 @@ PARTITIONED BY (
|
||||
float_col FLOAT,
|
||||
double_col DOUBLE,
|
||||
string_col STRING
|
||||
) LOCATION '/test-warehouse/all_insert_partition_col_types'
|
||||
) LOCATION '/test-warehouse/$DATABASE.db/all_insert_partition_col_types'
|
||||
====
|
||||
---- QUERY
|
||||
DESCRIBE all_insert_partition_col_types
|
||||
@@ -83,7 +83,7 @@ string_col in ('1', '2', '3')
|
||||
---- RESULTS: VERIFY_IS_SUBSET
|
||||
'01:EXCHANGE [UNPARTITIONED]'
|
||||
'|'
|
||||
'00:SCAN HDFS [hdfs_partitioning.all_insert_partition_col_types]'
|
||||
'00:SCAN HDFS [$DATABASE.all_insert_partition_col_types]'
|
||||
' partitions=3/11 files=3 size=6B'
|
||||
====
|
||||
---- QUERY
|
||||
@@ -93,7 +93,7 @@ WHERE tinyint_col < 7
|
||||
---- RESULTS: VERIFY_IS_SUBSET
|
||||
'01:EXCHANGE [UNPARTITIONED]'
|
||||
'|'
|
||||
'00:SCAN HDFS [hdfs_partitioning.all_insert_partition_col_types]'
|
||||
'00:SCAN HDFS [$DATABASE.all_insert_partition_col_types]'
|
||||
' partitions=7/11 files=7 size=14B'
|
||||
====
|
||||
---- QUERY
|
||||
@@ -103,7 +103,7 @@ WHERE smallint_col < 6
|
||||
---- RESULTS: VERIFY_IS_SUBSET
|
||||
'01:EXCHANGE [UNPARTITIONED]'
|
||||
'|'
|
||||
'00:SCAN HDFS [hdfs_partitioning.all_insert_partition_col_types]'
|
||||
'00:SCAN HDFS [$DATABASE.all_insert_partition_col_types]'
|
||||
' partitions=6/11 files=6 size=12B'
|
||||
====
|
||||
---- QUERY
|
||||
@@ -113,7 +113,7 @@ WHERE int_col < 5
|
||||
---- RESULTS: VERIFY_IS_SUBSET
|
||||
'01:EXCHANGE [UNPARTITIONED]'
|
||||
'|'
|
||||
'00:SCAN HDFS [hdfs_partitioning.all_insert_partition_col_types]'
|
||||
'00:SCAN HDFS [$DATABASE.all_insert_partition_col_types]'
|
||||
' partitions=5/11 files=5 size=10B'
|
||||
====
|
||||
---- QUERY
|
||||
@@ -123,7 +123,7 @@ WHERE bigint_col < 40
|
||||
---- RESULTS: VERIFY_IS_SUBSET
|
||||
'01:EXCHANGE [UNPARTITIONED]'
|
||||
'|'
|
||||
'00:SCAN HDFS [hdfs_partitioning.all_insert_partition_col_types]'
|
||||
'00:SCAN HDFS [$DATABASE.all_insert_partition_col_types]'
|
||||
' partitions=4/11 files=4 size=8B'
|
||||
====
|
||||
---- QUERY
|
||||
@@ -133,7 +133,7 @@ WHERE string_col in ('1', '2', '3')
|
||||
---- RESULTS: VERIFY_IS_SUBSET
|
||||
'01:EXCHANGE [UNPARTITIONED]'
|
||||
'|'
|
||||
'00:SCAN HDFS [hdfs_partitioning.all_insert_partition_col_types]'
|
||||
'00:SCAN HDFS [$DATABASE.all_insert_partition_col_types]'
|
||||
' partitions=3/11 files=3 size=6B'
|
||||
====
|
||||
---- QUERY
|
||||
@@ -143,7 +143,7 @@ WHERE double_col = 1.1
|
||||
---- RESULTS: VERIFY_IS_SUBSET
|
||||
'01:EXCHANGE [UNPARTITIONED]'
|
||||
'|'
|
||||
'00:SCAN HDFS [hdfs_partitioning.all_insert_partition_col_types]'
|
||||
'00:SCAN HDFS [$DATABASE.all_insert_partition_col_types]'
|
||||
' partitions=10/11 files=10 size=20B'
|
||||
====
|
||||
---- QUERY
|
||||
@@ -153,7 +153,7 @@ WHERE float_col = 2
|
||||
---- RESULTS: VERIFY_IS_SUBSET
|
||||
'01:EXCHANGE [UNPARTITIONED]'
|
||||
'|'
|
||||
'00:SCAN HDFS [hdfs_partitioning.all_insert_partition_col_types]'
|
||||
'00:SCAN HDFS [$DATABASE.all_insert_partition_col_types]'
|
||||
' partitions=1/11 files=1 size=3B'
|
||||
====
|
||||
---- QUERY
|
||||
@@ -193,13 +193,13 @@ STRING, STRING, STRING
|
||||
ALTER TABLE all_partition_col_types
|
||||
ADD PARTITION (bool_col=FALSE, tinyint_col=1, smallint_col=1, int_col=1,
|
||||
bigint_col=10, float_col=0, double_col=1.1, string_col='1')
|
||||
LOCATION '/test-warehouse/all_insert_partition_col_types/tinyint_col=1/smallint_col=1/int_col=1/bigint_col=10/float_col=0/double_col=1.1/string_col=1/'
|
||||
LOCATION '/test-warehouse/$DATABASE.db/all_insert_partition_col_types/tinyint_col=1/smallint_col=1/int_col=1/bigint_col=10/float_col=0/double_col=1.1/string_col=1/'
|
||||
====
|
||||
---- QUERY
|
||||
ALTER TABLE all_partition_col_types
|
||||
ADD PARTITION (bool_col=TRUE, tinyint_col=2, smallint_col=2, int_col=2,
|
||||
bigint_col=20, float_col=0, double_col=1.1, string_col='2')
|
||||
LOCATION '/test-warehouse/all_insert_partition_col_types/tinyint_col=2/smallint_col=2/int_col=2/bigint_col=20/float_col=0/double_col=1.1/string_col=2/'
|
||||
LOCATION '/test-warehouse/$DATABASE.db/all_insert_partition_col_types/tinyint_col=2/smallint_col=2/int_col=2/bigint_col=20/float_col=0/double_col=1.1/string_col=2/'
|
||||
====
|
||||
---- QUERY
|
||||
EXPLAIN
|
||||
@@ -208,7 +208,7 @@ WHERE bool_col=false
|
||||
---- RESULTS: VERIFY_IS_SUBSET
|
||||
'01:EXCHANGE [UNPARTITIONED]'
|
||||
'|'
|
||||
'00:SCAN HDFS [hdfs_partitioning.all_partition_col_types]'
|
||||
'00:SCAN HDFS [$DATABASE.all_partition_col_types]'
|
||||
' partitions=1/2 files=1 size=2B'
|
||||
====
|
||||
---- QUERY
|
||||
@@ -237,7 +237,7 @@ WHERE decimal_col = 4.34
|
||||
---- RESULTS: VERIFY_IS_SUBSET
|
||||
'01:EXCHANGE [UNPARTITIONED]'
|
||||
'|'
|
||||
'00:SCAN HDFS [hdfs_partitioning.test_dec_partition]'
|
||||
'00:SCAN HDFS [$DATABASE.test_dec_partition]'
|
||||
' partitions=1/1 files=1 size=9B'
|
||||
====
|
||||
---- QUERY
|
||||
@@ -247,7 +247,7 @@ WHERE decimal_col = 04.340
|
||||
---- RESULTS: VERIFY_IS_SUBSET
|
||||
'01:EXCHANGE [UNPARTITIONED]'
|
||||
'|'
|
||||
'00:SCAN HDFS [hdfs_partitioning.test_dec_partition]'
|
||||
'00:SCAN HDFS [$DATABASE.test_dec_partition]'
|
||||
' partitions=1/1 files=1 size=9B'
|
||||
====
|
||||
---- QUERY
|
||||
@@ -257,7 +257,7 @@ WHERE decimal_col = 4.35
|
||||
---- RESULTS: VERIFY_IS_SUBSET
|
||||
'01:EXCHANGE [UNPARTITIONED]'
|
||||
'|'
|
||||
'00:SCAN HDFS [hdfs_partitioning.test_dec_partition]'
|
||||
'00:SCAN HDFS [$DATABASE.test_dec_partition]'
|
||||
' partitions=0/1 files=0 size=0B'
|
||||
====
|
||||
---- QUERY
|
||||
@@ -280,6 +280,6 @@ WHERE decimal_col = 8.68
|
||||
---- RESULTS: VERIFY_IS_SUBSET
|
||||
'01:EXCHANGE [UNPARTITIONED]'
|
||||
'|'
|
||||
'00:SCAN HDFS [hdfs_partitioning.test_dec_partition]'
|
||||
'00:SCAN HDFS [$DATABASE.test_dec_partition]'
|
||||
' partitions=1/3 files=1 size=18B'
|
||||
====
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
# under the License.
|
||||
|
||||
import pytest
|
||||
from subprocess import call
|
||||
|
||||
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
|
||||
from tests.common.impala_test_suite import ImpalaTestSuite
|
||||
@@ -25,8 +24,6 @@ from tests.common.test_dimensions import create_single_exec_option_dimension
|
||||
|
||||
# Tests to validate HDFS partitioning.
|
||||
class TestPartitioning(ImpalaTestSuite):
|
||||
TEST_DBS = ['hdfs_partitioning', 'bool_partitions']
|
||||
|
||||
@classmethod
|
||||
def get_workload(self):
|
||||
return 'functional-query'
|
||||
@@ -41,71 +38,53 @@ class TestPartitioning(ImpalaTestSuite):
|
||||
v.get_value('table_format').file_format == 'text' and\
|
||||
v.get_value('table_format').compression_codec == 'none')
|
||||
|
||||
@classmethod
|
||||
def setup_class(cls):
|
||||
super(TestPartitioning, cls).setup_class()
|
||||
map(cls.cleanup_db, cls.TEST_DBS)
|
||||
cls.filesystem_client.delete_file_dir("test-warehouse/all_insert_partition_col_types/",\
|
||||
recursive=True)
|
||||
|
||||
@classmethod
|
||||
def teardown_class(cls):
|
||||
map(cls.cleanup_db, cls.TEST_DBS)
|
||||
super(TestPartitioning, cls).teardown_class()
|
||||
|
||||
@SkipIfLocal.root_path
|
||||
@pytest.mark.execute_serially
|
||||
def test_partition_col_types(self, vector):
|
||||
self.execute_query("create database hdfs_partitioning");
|
||||
def test_partition_col_types(self, vector, unique_database):
|
||||
self.run_test_case('QueryTest/partition-col-types', vector,
|
||||
use_db='hdfs_partitioning')
|
||||
use_db=unique_database)
|
||||
|
||||
# Missing Coverage: Impala deals with boolean partitions created by Hive on a non-hdfs
|
||||
# filesystem.
|
||||
@SkipIfS3.hive
|
||||
@SkipIfIsilon.hive
|
||||
@SkipIfLocal.hive
|
||||
@pytest.mark.execute_serially
|
||||
def test_boolean_partitions(self, vector):
|
||||
def test_boolean_partitions(self, vector, unique_database):
|
||||
# This test takes about a minute to complete due to the Hive commands that are
|
||||
# executed. To cut down on runtime, limit the test to exhaustive exploration
|
||||
# strategy.
|
||||
if self.exploration_strategy() != 'exhaustive': pytest.skip()
|
||||
|
||||
db_name = 'bool_partitions'
|
||||
tbl_name = 'tbl'
|
||||
self.execute_query("create database " + db_name)
|
||||
self.execute_query("use " + db_name)
|
||||
full_name = unique_database + ".bool_test"
|
||||
|
||||
self.execute_query("create table %s (i int) partitioned by (b boolean)" % tbl_name)
|
||||
self.execute_query("create table %s (i int) partitioned by (b boolean)" % full_name)
|
||||
|
||||
# Insert some data using Hive. Due to HIVE-6590, Hive may create multiple
|
||||
# partitions, mapping to the same boolean literal value.
|
||||
# For example, Hive may create partitions: /b=FALSE and /b=false, etc
|
||||
call(["hive", "-e", "INSERT OVERWRITE TABLE %s.%s PARTITION(b=false) SELECT 1 from "\
|
||||
"functional.alltypes limit 1" % (db_name, tbl_name)])
|
||||
call(["hive", "-e", "INSERT OVERWRITE TABLE %s.%s PARTITION(b=FALSE) SELECT 2 from "\
|
||||
"functional.alltypes limit 1" % (db_name, tbl_name)])
|
||||
call(["hive", "-e", "INSERT OVERWRITE TABLE %s.%s PARTITION(b=true) SELECT 10 from "\
|
||||
"functional.alltypes limit 1" % (db_name, tbl_name)])
|
||||
self.run_stmt_in_hive("INSERT INTO TABLE %s PARTITION(b=false) SELECT 1 from "\
|
||||
"functional.alltypes limit 1" % full_name)
|
||||
self.run_stmt_in_hive("INSERT INTO TABLE %s PARTITION(b=FALSE) SELECT 2 from "\
|
||||
"functional.alltypes limit 1" % full_name)
|
||||
self.run_stmt_in_hive("INSERT INTO TABLE %s PARTITION(b=true) SELECT 10 from "\
|
||||
"functional.alltypes limit 1" % full_name)
|
||||
|
||||
# Update the Impala metadata
|
||||
self.execute_query("invalidate metadata " + tbl_name)
|
||||
self.execute_query("invalidate metadata " + full_name)
|
||||
|
||||
# List the partitions. Show table stats returns 1 row for each partition + 1 summary
|
||||
# row
|
||||
result = self.execute_query("show table stats %s" % tbl_name)
|
||||
result = self.execute_query("show table stats %s" % full_name)
|
||||
assert len(result.data) == 3 + 1
|
||||
|
||||
# Verify Impala properly merges the results of the bad Hive metadata.
|
||||
assert '13' == self.execute_scalar("select sum(i) from %s" % tbl_name);
|
||||
assert '10' == self.execute_scalar("select sum(i) from %s where b=true" % tbl_name)
|
||||
assert '3' == self.execute_scalar("select sum(i) from %s where b=false" % tbl_name)
|
||||
assert '13' == self.execute_scalar("select sum(i) from %s" % full_name);
|
||||
assert '10' == self.execute_scalar("select sum(i) from %s where b=true" % full_name)
|
||||
assert '3' == self.execute_scalar("select sum(i) from %s where b=false" % full_name)
|
||||
|
||||
# INSERT into a boolean column is disabled in Impala due to this Hive bug.
|
||||
try:
|
||||
self.execute_query("insert into %s partition(bool_col=true) select 1" % tbl_name)
|
||||
self.execute_query("insert into %s partition(bool_col=true) select 1" % full_name)
|
||||
except ImpalaBeeswaxException, e:
|
||||
assert 'AnalysisException: INSERT into table with BOOLEAN partition column (%s) '\
|
||||
'is not supported: %s.%s' % ('b', db_name, tbl_name) in str(e)
|
||||
'is not supported: %s' % ('b', full_name) in str(e)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user