mirror of
https://github.com/apache/impala.git
synced 2026-01-10 09:00:16 -05:00
Previously Impala disallowed LOAD DATA and INSERT on S3. This patch functionally enables LOAD DATA and INSERT on S3 without making major changes for the sake of improving performance over S3. This patch also enables both INSERT and LOAD DATA between file systems. S3 does not support the rename operation, so the staged files in S3 are copied instead of renamed, which contributes to the slow performance on S3. The FinalizeSuccessfulInsert() function now does not make any underlying assumptions of the filesystem it is on and works across all supported filesystems. This is done by adding a full URI field to the base directory for a partition in the TInsertPartitionStatus. Also, the HdfsOp class now does not assume a single filesystem and gets connections to the filesystems based on the URI of the file it is operating on. Added a python S3 client called 'boto3' to access S3 from the python tests. A new class called S3Client is introduced which creates wrappers around the boto3 functions and have the same function signatures as PyWebHdfsClient by deriving from a base abstract class BaseFileSystem so that they can be interchangeably through a 'generic_client'. test_load.py is refactored to use this generic client. The ImpalaTestSuite setup creates a client according to the TARGET_FILESYSTEM environment variable and assigns it to the 'generic_client'. P.S: Currently, the test_load.py runs 4x slower on S3 than on HDFS. Performance needs to be improved in future patches. INSERT performance is slower than on HDFS too. This is mainly because of an extra copy that happens between staging and the final location of a file. However, larger INSERTs come closer to HDFS permformance than smaller inserts. ACLs are not taken care of for S3 in this patch. It is something that still needs to be discussed before implementing. Change-Id: I94e15ad67752dce21c9b7c1dced6e114905a942d Reviewed-on: http://gerrit.cloudera.org:8080/2574 Reviewed-by: Sailesh Mukil <sailesh@cloudera.com> Tested-by: Internal Jenkins
112 lines
4.5 KiB
Python
112 lines
4.5 KiB
Python
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
import logging
|
|
import pytest
|
|
import shlex
|
|
import time
|
|
from tests.common.test_result_verifier import *
|
|
from subprocess import call
|
|
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
|
|
from tests.common.test_vector import *
|
|
from tests.common.test_dimensions import ALL_NODES_ONLY
|
|
from tests.common.impala_test_suite import *
|
|
from tests.common.skip import SkipIfS3, SkipIfIsilon, SkipIfLocal
|
|
|
|
# Tests to validate HDFS partitioning.
|
|
class TestPartitioning(ImpalaTestSuite):
|
|
TEST_DBS = ['hdfs_partitioning', 'bool_partitions']
|
|
|
|
@classmethod
|
|
def get_workload(self):
|
|
return 'functional-query'
|
|
|
|
@classmethod
|
|
def add_test_dimensions(cls):
|
|
super(TestPartitioning, cls).add_test_dimensions()
|
|
cls.TestMatrix.add_dimension(create_single_exec_option_dimension())
|
|
|
|
# There is no reason to run these tests using all dimensions.
|
|
cls.TestMatrix.add_constraint(lambda v:\
|
|
v.get_value('table_format').file_format == 'text' and\
|
|
v.get_value('table_format').compression_codec == 'none')
|
|
|
|
@classmethod
|
|
def setup_class(cls):
|
|
super(TestPartitioning, cls).setup_class()
|
|
map(cls.cleanup_db, cls.TEST_DBS)
|
|
cls.filesystem_client.delete_file_dir("test-warehouse/all_insert_partition_col_types/",\
|
|
recursive=True)
|
|
|
|
@classmethod
|
|
def teardown_class(cls):
|
|
map(cls.cleanup_db, cls.TEST_DBS)
|
|
super(TestPartitioning, cls).teardown_class()
|
|
|
|
@SkipIfLocal.root_path
|
|
@pytest.mark.execute_serially
|
|
def test_partition_col_types(self, vector):
|
|
self.execute_query("create database hdfs_partitioning");
|
|
self.run_test_case('QueryTest/partition-col-types', vector,
|
|
use_db='hdfs_partitioning')
|
|
|
|
# Missing Coverage: Impala deals with boolean partitions created by Hive on a non-hdfs
|
|
# filesystem.
|
|
@SkipIfS3.hive
|
|
@SkipIfIsilon.hive
|
|
@SkipIfLocal.hive
|
|
@pytest.mark.execute_serially
|
|
def test_boolean_partitions(self, vector):
|
|
# This test takes about a minute to complete due to the Hive commands that are
|
|
# executed. To cut down on runtime, limit the test to exhaustive exploration
|
|
# strategy.
|
|
if self.exploration_strategy() != 'exhaustive': pytest.skip()
|
|
|
|
db_name = 'bool_partitions'
|
|
tbl_name = 'tbl'
|
|
self.execute_query("create database " + db_name)
|
|
self.execute_query("use " + db_name)
|
|
|
|
self.execute_query("create table %s (i int) partitioned by (b boolean)" % tbl_name)
|
|
|
|
# Insert some data using Hive. Due to HIVE-6590, Hive may create multiple
|
|
# partitions, mapping to the same boolean literal value.
|
|
# For example, Hive may create partitions: /b=FALSE and /b=false, etc
|
|
call(["hive", "-e", "INSERT OVERWRITE TABLE %s.%s PARTITION(b=false) SELECT 1 from "\
|
|
"functional.alltypes limit 1" % (db_name, tbl_name)])
|
|
call(["hive", "-e", "INSERT OVERWRITE TABLE %s.%s PARTITION(b=FALSE) SELECT 2 from "\
|
|
"functional.alltypes limit 1" % (db_name, tbl_name)])
|
|
call(["hive", "-e", "INSERT OVERWRITE TABLE %s.%s PARTITION(b=true) SELECT 10 from "\
|
|
"functional.alltypes limit 1" % (db_name, tbl_name)])
|
|
|
|
# Update the Impala metadata
|
|
self.execute_query("invalidate metadata " + tbl_name)
|
|
|
|
# List the partitions. Show table stats returns 1 row for each partition + 1 summary
|
|
# row
|
|
result = self.execute_query("show table stats %s" % tbl_name)
|
|
assert len(result.data) == 3 + 1
|
|
|
|
# Verify Impala properly merges the results of the bad Hive metadata.
|
|
assert '13' == self.execute_scalar("select sum(i) from %s" % tbl_name);
|
|
assert '10' == self.execute_scalar("select sum(i) from %s where b=true" % tbl_name)
|
|
assert '3' == self.execute_scalar("select sum(i) from %s where b=false" % tbl_name)
|
|
|
|
# INSERT into a boolean column is disabled in Impala due to this Hive bug.
|
|
try:
|
|
self.execute_query("insert into %s partition(bool_col=true) select 1" % tbl_name)
|
|
except ImpalaBeeswaxException, e:
|
|
assert 'AnalysisException: INSERT into table with BOOLEAN partition column (%s) '\
|
|
'is not supported: %s.%s' % ('b', db_name, tbl_name) in str(e)
|
|
|