IMPALA-13908: Remove reference to ImpalaBeeswaxException

This patch replace ImpalaBeeswaxException reference to
IMPALA_CONNECTION_EXCEPTION as much as possible.

Fix some easy flake8 issues caught thorugh this command:
git show HEAD --name-only | grep '^tests.*py' \
  | xargs -I {} impala-flake8 {} \
  | grep -e U100 -e E111 -e E301 -e E302 -e E303 -e F...

Testing:
- Pass exhaustive tests.

Change-Id: I676a9954404613a1cc35ebbc9ffa73e8132f436a
Reviewed-on: http://gerrit.cloudera.org:8080/22701
Reviewed-by: Jason Fehr <jfehr@cloudera.com>
Tested-by: Impala Public Jenkins <impala-public-jenkins@cloudera.com>
This commit is contained in:
Riza Suminto
2025-03-26 13:18:19 -07:00
committed by Impala Public Jenkins
parent 00dc79adf6
commit 4617c2370f
27 changed files with 137 additions and 126 deletions

View File

@@ -23,9 +23,9 @@ import threading
from multiprocessing.pool import ThreadPool from multiprocessing.pool import ThreadPool
from multiprocessing import TimeoutError from multiprocessing import TimeoutError
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.custom_cluster_test_suite import CustomClusterTestSuite from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
from tests.common.impala_connection import ERROR, FINISHED from tests.common.impala_connection import (
ERROR, FINISHED, IMPALA_CONNECTION_EXCEPTION)
from tests.util.shell_util import dump_server_stacktraces from tests.util.shell_util import dump_server_stacktraces
@@ -125,7 +125,7 @@ class TestConcurrentDdls(CustomClusterTestSuite):
is_finished = tls.client.wait_for_finished_timeout(handle, timeout=60) is_finished = tls.client.wait_for_finished_timeout(handle, timeout=60)
assert is_finished, "Query timeout(60s): " + query assert is_finished, "Query timeout(60s): " + query
tls.client.close_query(handle) tls.client.close_query(handle)
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
# Could raise exception when running with INVALIDATE METADATA # Could raise exception when running with INVALIDATE METADATA
assert TestConcurrentDdls.is_acceptable_error(str(e), sync_ddl), str(e) assert TestConcurrentDdls.is_acceptable_error(str(e), sync_ddl), str(e)
self.execute_query_expect_success(tls.client, "invalidate metadata") self.execute_query_expect_success(tls.client, "invalidate metadata")

View File

@@ -30,8 +30,8 @@ import time
from random import randint from random import randint
from RuntimeProfile.ttypes import TRuntimeProfileFormat from RuntimeProfile.ttypes import TRuntimeProfileFormat
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException from tests.common.impala_connection import (
from tests.common.impala_connection import ERROR, FINISHED, RUNNING ERROR, FINISHED, IMPALA_CONNECTION_EXCEPTION, RUNNING)
from tests.common.impala_test_suite import ImpalaTestSuite, LOG from tests.common.impala_test_suite import ImpalaTestSuite, LOG
from tests.common.custom_cluster_test_suite import CustomClusterTestSuite from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
from tests.common.errors import Timeout from tests.common.errors import Timeout
@@ -407,7 +407,7 @@ class TestQueryRetries(CustomClusterTestSuite):
try: try:
self.client.fetch(self._shuffle_heavy_query, handle) self.client.fetch(self._shuffle_heavy_query, handle)
assert False assert False
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
assert "Admission for query exceeded timeout 60000ms in pool default-pool." \ assert "Admission for query exceeded timeout 60000ms in pool default-pool." \
in str(e) in str(e)
assert "Queued reason: Waiting for executors to start. Only DDL queries and " \ assert "Queued reason: Waiting for executors to start. Only DDL queries and " \
@@ -472,7 +472,7 @@ class TestQueryRetries(CustomClusterTestSuite):
try: try:
self.client.fetch(self._shuffle_heavy_query, handle) self.client.fetch(self._shuffle_heavy_query, handle)
assert False assert False
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
assert "Max retry limit was hit. Query was retried 1 time(s)." in str(e) assert "Max retry limit was hit. Query was retried 1 time(s)." in str(e)
# Assert that the killed impalad shows up in the list of blacklisted executors from # Assert that the killed impalad shows up in the list of blacklisted executors from
@@ -643,7 +643,7 @@ class TestQueryRetries(CustomClusterTestSuite):
try: try:
self.client.fetch(query, handle) self.client.fetch(query, handle)
assert False, "fetch should fail" assert False, "fetch should fail"
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
assert "Failed due to unreachable impalad" in str(e) assert "Failed due to unreachable impalad" in str(e)
assert "Skipping retry of query_id=%s because the client has already " \ assert "Skipping retry of query_id=%s because the client has already " \
"fetched some rows" % self.client.handle_id(handle) in str(e) "fetched some rows" % self.client.handle_id(handle) in str(e)

View File

@@ -18,9 +18,9 @@
from __future__ import absolute_import, division, print_function from __future__ import absolute_import, division, print_function
import pytest import pytest
import time import time
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.custom_cluster_test_suite import CustomClusterTestSuite from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
from tests.common.skip import SkipIf, SkipIfBuildType from tests.common.impala_connection import IMPALA_CONNECTION_EXCEPTION
from tests.common.skip import SkipIfBuildType
# The BE krpc port of the impalad these tests simulate rpc errors at. # The BE krpc port of the impalad these tests simulate rpc errors at.
KRPC_PORT = 27002 KRPC_PORT = 27002
@@ -41,6 +41,7 @@ def _get_fail_action(rpc, error=None, port=KRPC_PORT, p=0.1):
action += "@" + error action += "@" + error
return _get_rpc_debug_action(rpc, action, port=port) return _get_rpc_debug_action(rpc, action, port=port)
@SkipIfBuildType.not_dev_build @SkipIfBuildType.not_dev_build
class TestRPCException(CustomClusterTestSuite): class TestRPCException(CustomClusterTestSuite):
"""Tests Impala exception handling in TransmitData() RPC to make sure no """Tests Impala exception handling in TransmitData() RPC to make sure no
@@ -93,7 +94,7 @@ class TestRPCException(CustomClusterTestSuite):
try: try:
result = self.client.execute(self.TEST_QUERY) result = self.client.execute(self.TEST_QUERY)
assert result.data == self.EXPECTED_RESULT, "Query returned unexpected results." assert result.data == self.EXPECTED_RESULT, "Query returned unexpected results."
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
if exception_string is None: if exception_string is None:
raise e raise e
assert exception_string in str(e), "Query failed with unexpected exception." assert exception_string in str(e), "Query failed with unexpected exception."
@@ -149,7 +150,7 @@ class TestRPCException(CustomClusterTestSuite):
try: try:
self.client.execute(self.TEST_QUERY) self.client.execute(self.TEST_QUERY)
assert False, "query was expected to fail" assert False, "query was expected to fail"
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
assert "Debug Action: QUERY_STATE_INIT:FAIL" in str(e) assert "Debug Action: QUERY_STATE_INIT:FAIL" in str(e)
# If we successfully cancelled all Exec() rpcs and returned to the client as soon as # If we successfully cancelled all Exec() rpcs and returned to the client as soon as
@@ -178,7 +179,7 @@ class TestRPCException(CustomClusterTestSuite):
try: try:
self.client.execute(self.TEST_QUERY) self.client.execute(self.TEST_QUERY)
assert False, "query was expected to fail" assert False, "query was expected to fail"
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
assert "Debug Action: CONSTRUCT_QUERY_STATE_REPORT:FAIL" in str(e) assert "Debug Action: CONSTRUCT_QUERY_STATE_REPORT:FAIL" in str(e)
# If we successfully cancelled all Exec() rpcs and returned to the client as soon as # If we successfully cancelled all Exec() rpcs and returned to the client as soon as

View File

@@ -18,9 +18,9 @@
from __future__ import absolute_import, division, print_function from __future__ import absolute_import, division, print_function
from builtins import range from builtins import range
import pytest import pytest
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.custom_cluster_test_suite import CustomClusterTestSuite from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
from tests.common.impala_cluster import ImpalaCluster from tests.common.impala_cluster import ImpalaCluster
from tests.common.impala_connection import IMPALA_CONNECTION_EXCEPTION
from tests.common.skip import SkipIfBuildType, SkipIfFS from tests.common.skip import SkipIfBuildType, SkipIfFS
from tests.common.test_result_verifier import error_msg_startswith from tests.common.test_result_verifier import error_msg_startswith
from tests.verifiers.metric_verifier import MetricVerifier from tests.verifiers.metric_verifier import MetricVerifier
@@ -33,6 +33,7 @@ def _get_rpc_fail_action(port):
return "IMPALA_SERVICE_POOL:127.0.0.1:{port}:ExecQueryFInstances:FAIL" \ return "IMPALA_SERVICE_POOL:127.0.0.1:{port}:ExecQueryFInstances:FAIL" \
.format(port=port) .format(port=port)
@SkipIfBuildType.not_dev_build @SkipIfBuildType.not_dev_build
class TestRPCTimeout(CustomClusterTestSuite): class TestRPCTimeout(CustomClusterTestSuite):
"""Tests for every Impala RPC timeout handling, query should not hang and """Tests for every Impala RPC timeout handling, query should not hang and
@@ -58,7 +59,7 @@ class TestRPCTimeout(CustomClusterTestSuite):
try: try:
self.execute_query(query, query_options) self.execute_query(query, query_options)
assert expected_exception is None assert expected_exception is None
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
if expected_exception is not None: if expected_exception is not None:
assert expected_exception in str(e) assert expected_exception in str(e)
verifiers = [MetricVerifier(i.service) verifiers = [MetricVerifier(i.service)
@@ -68,13 +69,13 @@ class TestRPCTimeout(CustomClusterTestSuite):
v.wait_for_metric("impala-server.num-fragments-in-flight", 0) v.wait_for_metric("impala-server.num-fragments-in-flight", 0)
v.verify_num_unused_buffers() v.verify_num_unused_buffers()
def execute_query_then_cancel(self, query, vector, repeat = 1): def execute_query_then_cancel(self, query, vector, repeat=1):
for _ in range(repeat): for _ in range(repeat):
handle = self.execute_query_async(query, vector.get_value('exec_option')) handle = self.execute_query_async(query, vector.get_value('exec_option'))
self.client.fetch(query, handle) self.client.fetch(query, handle)
try: try:
self.client.cancel(handle) self.client.cancel(handle)
except ImpalaBeeswaxException: except IMPALA_CONNECTION_EXCEPTION:
pass pass
finally: finally:
self.client.close_query(handle) self.client.close_query(handle)
@@ -98,7 +99,7 @@ class TestRPCTimeout(CustomClusterTestSuite):
@CustomClusterTestSuite.with_args("--backend_client_rpc_timeout_ms=1000" @CustomClusterTestSuite.with_args("--backend_client_rpc_timeout_ms=1000"
" --debug_actions=EXEC_QUERY_FINSTANCES_DELAY:SLEEP@1000" " --debug_actions=EXEC_QUERY_FINSTANCES_DELAY:SLEEP@1000"
" --datastream_sender_timeout_ms=30000") " --datastream_sender_timeout_ms=30000")
def test_execqueryfinstances_race(self, vector): def test_execqueryfinstances_race(self):
""" Test for IMPALA-7464, where the rpc times out while the rpc handler continues to """ Test for IMPALA-7464, where the rpc times out while the rpc handler continues to
run simultaneously.""" run simultaneously."""
self.execute_query_verify_metrics(self.TEST_QUERY) self.execute_query_verify_metrics(self.TEST_QUERY)
@@ -107,9 +108,9 @@ class TestRPCTimeout(CustomClusterTestSuite):
@CustomClusterTestSuite.with_args("--backend_client_rpc_timeout_ms=1000" @CustomClusterTestSuite.with_args("--backend_client_rpc_timeout_ms=1000"
" --debug_actions=EXEC_QUERY_FINSTANCES_DELAY:SLEEP@3000" " --debug_actions=EXEC_QUERY_FINSTANCES_DELAY:SLEEP@3000"
" --datastream_sender_timeout_ms=30000") " --datastream_sender_timeout_ms=30000")
def test_execqueryfinstances_timeout(self, vector): def test_execqueryfinstances_timeout(self):
for i in range(3): for i in range(3):
ex= self.execute_query_expect_failure(self.client, self.TEST_QUERY) ex = self.execute_query_expect_failure(self.client, self.TEST_QUERY)
assert "Exec() rpc failed: Timed out" in str(ex) assert "Exec() rpc failed: Timed out" in str(ex)
verifiers = [MetricVerifier(i.service) for i in verifiers = [MetricVerifier(i.service) for i in
ImpalaCluster.get_e2e_test_cluster().impalads] ImpalaCluster.get_e2e_test_cluster().impalads]
@@ -129,22 +130,23 @@ class TestRPCTimeout(CustomClusterTestSuite):
@pytest.mark.execute_serially @pytest.mark.execute_serially
@CustomClusterTestSuite.with_args("--backend_client_rpc_timeout_ms=1000" @CustomClusterTestSuite.with_args("--backend_client_rpc_timeout_ms=1000"
" --debug_actions=PUBLISH_FILTER_DELAY:SLEEP@3000") " --debug_actions=PUBLISH_FILTER_DELAY:SLEEP@3000")
def test_publishfilter_timeout(self, vector): def test_publishfilter_timeout(self):
self.execute_runtime_filter_query() self.execute_runtime_filter_query()
@pytest.mark.execute_serially @pytest.mark.execute_serially
@CustomClusterTestSuite.with_args("--backend_client_rpc_timeout_ms=1000" @CustomClusterTestSuite.with_args("--backend_client_rpc_timeout_ms=1000"
" --debug_actions=UPDATE_FILTER_DELAY:SLEEP@3000") " --debug_actions=UPDATE_FILTER_DELAY:SLEEP@3000")
def test_updatefilter_timeout(self, vector): def test_updatefilter_timeout(self):
self.execute_runtime_filter_query() self.execute_runtime_filter_query()
all_rpcs = ["EXEC_QUERY_FINSTANCES", "CANCEL_QUERY_FINSTANCES", "PUBLISH_FILTER", all_rpcs = ["EXEC_QUERY_FINSTANCES", "CANCEL_QUERY_FINSTANCES", "PUBLISH_FILTER",
"UPDATE_FILTER", "TRANSMIT_DATA", "END_DATA_STREAM", "REMOTE_SHUTDOWN"] "UPDATE_FILTER", "TRANSMIT_DATA", "END_DATA_STREAM", "REMOTE_SHUTDOWN"]
@pytest.mark.execute_serially @pytest.mark.execute_serially
@CustomClusterTestSuite.with_args("--backend_client_rpc_timeout_ms=1000" @CustomClusterTestSuite.with_args("--backend_client_rpc_timeout_ms=1000"
" --datastream_sender_timeout_ms=30000 --debug_actions=%s" % " --datastream_sender_timeout_ms=30000 --debug_actions=%s" %
"|".join(["%s_DELAY:JITTER@3000@0.1" % rpc for rpc in all_rpcs])) "|".join(["%s_DELAY:JITTER@3000@0.1" % rpc for rpc in all_rpcs]))
def test_random_rpc_timeout(self, vector): def test_random_rpc_timeout(self):
self.execute_query_verify_metrics(self.TEST_QUERY, None, 10) self.execute_query_verify_metrics(self.TEST_QUERY, None, 10)
# Inject jitter into the RPC handler of ReportExecStatus() to trigger RPC timeout. # Inject jitter into the RPC handler of ReportExecStatus() to trigger RPC timeout.
@@ -153,7 +155,7 @@ class TestRPCTimeout(CustomClusterTestSuite):
@CustomClusterTestSuite.with_args("--status_report_interval_ms=100" @CustomClusterTestSuite.with_args("--status_report_interval_ms=100"
" --backend_client_rpc_timeout_ms=100" " --backend_client_rpc_timeout_ms=100"
" --debug_actions=REPORT_EXEC_STATUS_DELAY:JITTER@110@0.7") " --debug_actions=REPORT_EXEC_STATUS_DELAY:JITTER@110@0.7")
def test_reportexecstatus_jitter(self, vector): def test_reportexecstatus_jitter(self):
LONG_RUNNING_QUERY = "with v as (select t1.ss_hdemo_sk as xk " +\ LONG_RUNNING_QUERY = "with v as (select t1.ss_hdemo_sk as xk " +\
"from tpcds_parquet.store_sales t1, tpcds_parquet.store_sales t2 " +\ "from tpcds_parquet.store_sales t1, tpcds_parquet.store_sales t2 " +\
"where t1.ss_hdemo_sk = t2.ss_hdemo_sk) " +\ "where t1.ss_hdemo_sk = t2.ss_hdemo_sk) " +\
@@ -168,7 +170,7 @@ class TestRPCTimeout(CustomClusterTestSuite):
" --control_service_queue_mem_limit=1" " --control_service_queue_mem_limit=1"
" --control_service_queue_mem_limit_floor_bytes=1" " --control_service_queue_mem_limit_floor_bytes=1"
" --control_service_num_svc_threads=1") " --control_service_num_svc_threads=1")
def test_reportexecstatus_retry(self, vector): def test_reportexecstatus_retry(self):
self.execute_query_verify_metrics(self.TEST_QUERY, None, 10) self.execute_query_verify_metrics(self.TEST_QUERY, None, 10)
# Inject artificial failure during thrift profile serialization / deserialization. # Inject artificial failure during thrift profile serialization / deserialization.
@@ -201,7 +203,7 @@ class TestRPCTimeout(CustomClusterTestSuite):
@pytest.mark.execute_serially @pytest.mark.execute_serially
@CustomClusterTestSuite.with_args("--status_report_interval_ms=100000 " @CustomClusterTestSuite.with_args("--status_report_interval_ms=100000 "
"--status_report_max_retry_s=1 --abort_on_config_error=false") "--status_report_max_retry_s=1 --abort_on_config_error=false")
def test_unresponsive_backend(self, unique_database): def test_unresponsive_backend(self):
"""Test the UnresponsiveBackendThread by setting a status report retry time that is """Test the UnresponsiveBackendThread by setting a status report retry time that is
much lower than the status report interval, ensuring that the coordinator will much lower than the status report interval, ensuring that the coordinator will
conclude that the backend is unresponsive.""" conclude that the backend is unresponsive."""
@@ -212,11 +214,11 @@ class TestRPCTimeout(CustomClusterTestSuite):
@SkipIfBuildType.not_dev_build @SkipIfBuildType.not_dev_build
@pytest.mark.execute_serially @pytest.mark.execute_serially
@CustomClusterTestSuite.with_args( @CustomClusterTestSuite.with_args(
impalad_args="--backend_client_rpc_timeout_ms=1000 --debug_actions=" + impalad_args=("--backend_client_rpc_timeout_ms=1000 --debug_actions="
_get_rpc_fail_action(FAILED_KRPC_PORT), + _get_rpc_fail_action(FAILED_KRPC_PORT)),
statestored_args="--statestore_heartbeat_frequency_ms=1000 \ statestored_args="--statestore_heartbeat_frequency_ms=1000 \
--statestore_max_missed_heartbeats=2") --statestore_max_missed_heartbeats=2")
def test_miss_complete_cb(self, unique_database): def test_miss_complete_cb(self):
"""Test verify cancellation should not be blocked if the callback of ExecComplate """Test verify cancellation should not be blocked if the callback of ExecComplate
are missing.""" are missing."""
@@ -232,6 +234,7 @@ class TestRPCTimeout(CustomClusterTestSuite):
assert error_msg_startswith(str(ex), "Exec() rpc failed: Remote error: " assert error_msg_startswith(str(ex), "Exec() rpc failed: Remote error: "
"Runtime error: Debug Action: IMPALA_SERVICE_POOL:FAIL") "Runtime error: Debug Action: IMPALA_SERVICE_POOL:FAIL")
class TestCatalogRPCTimeout(CustomClusterTestSuite): class TestCatalogRPCTimeout(CustomClusterTestSuite):
""""Tests RPC timeout and retry handling for catalogd operations.""" """"Tests RPC timeout and retry handling for catalogd operations."""
@@ -258,7 +261,7 @@ class TestCatalogRPCTimeout(CustomClusterTestSuite):
fail with an RPC timeout exception.""" fail with an RPC timeout exception."""
try: try:
self.execute_query("refresh functional.alltypes") self.execute_query("refresh functional.alltypes")
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
assert "RPC recv timed out" in str(e) assert "RPC recv timed out" in str(e)
@pytest.mark.execute_serially @pytest.mark.execute_serially

View File

@@ -17,8 +17,8 @@
from __future__ import absolute_import, division, print_function from __future__ import absolute_import, division, print_function
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.custom_cluster_test_suite import CustomClusterTestSuite from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
from tests.common.impala_connection import IMPALA_CONNECTION_EXCEPTION
from tests.common.test_dimensions import create_single_exec_option_dimension from tests.common.test_dimensions import create_single_exec_option_dimension
from tests.common.test_result_verifier import error_msg_startswith from tests.common.test_result_verifier import error_msg_startswith
@@ -34,21 +34,21 @@ class TestSysDb(CustomClusterTestSuite):
cls.ImpalaTestMatrix.add_dimension(create_single_exec_option_dimension()) cls.ImpalaTestMatrix.add_dimension(create_single_exec_option_dimension())
@CustomClusterTestSuite.with_args() @CustomClusterTestSuite.with_args()
def test_query_log_table_create_sys_db_blocked(self, vector): def test_query_log_table_create_sys_db_blocked(self):
"""Asserts that the sys db cannot be created.""" """Asserts that the sys db cannot be created."""
try: try:
self.client.execute("create database {0}".format(self.SYS_DB_NAME)) self.client.execute("create database {0}".format(self.SYS_DB_NAME))
assert False, "database '{0}' should have failed to create but was created" \ assert False, "database '{0}' should have failed to create but was created" \
.format(self.SYS_DB_NAME) .format(self.SYS_DB_NAME)
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
assert "Invalid db name: {0}. It has been blacklisted using --blacklisted_dbs" \ assert "Invalid db name: {0}. It has been blacklisted using --blacklisted_dbs" \
.format(self.SYS_DB_NAME) in str(e), "database '{0}' failed to create but " \ .format(self.SYS_DB_NAME) in str(e), "database '{0}' failed to create but " \
"for the wrong reason".format(self.SYS_DB_NAME) "for the wrong reason".format(self.SYS_DB_NAME)
@CustomClusterTestSuite.with_args(impalad_args="--enable_workload_mgmt", @CustomClusterTestSuite.with_args(impalad_args="--enable_workload_mgmt",
catalogd_args="--enable_workload_mgmt") catalogd_args="--enable_workload_mgmt")
def test_query_log_table_create_table_sys_db_blocked(self, vector): def test_query_log_table_create_table_sys_db_blocked(self):
"""Asserts that no other tables can be created in the sys db.""" """Asserts that no other tables can be created in the sys db."""
table_name = "{0}.should_not_create".format(self.SYS_DB_NAME) table_name = "{0}.should_not_create".format(self.SYS_DB_NAME)
@@ -57,7 +57,7 @@ class TestSysDb(CustomClusterTestSuite):
self.client.execute("create table {0} (id STRING)".format(table_name)) self.client.execute("create table {0} (id STRING)".format(table_name))
assert False, "table '{0}' should have failed to create but was created" \ assert False, "table '{0}' should have failed to create but was created" \
.format(table_name) .format(table_name)
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
expected_error = "IllegalStateException: Can't create blacklisted table: {0}" \ expected_error = "IllegalStateException: Can't create blacklisted table: {0}" \
.format(table_name) .format(table_name)
assert error_msg_startswith(str(e), expected_error), \ assert error_msg_startswith(str(e), expected_error), \

View File

@@ -23,7 +23,7 @@ from __future__ import absolute_import, division, print_function, unicode_litera
import pytest import pytest
import subprocess import subprocess
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException from tests.common.impala_connection import IMPALA_CONNECTION_EXCEPTION
from tests.common.impala_test_suite import ImpalaTestSuite from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.skip import SkipIf, SkipIfFS from tests.common.skip import SkipIf, SkipIfFS
from tests.common.test_dimensions import create_exec_option_dimension from tests.common.test_dimensions import create_exec_option_dimension
@@ -41,7 +41,6 @@ class TestDataErrors(ImpalaTestSuite):
cls.ImpalaTestMatrix.add_dimension( cls.ImpalaTestMatrix.add_dimension(
create_exec_option_dimension(batch_sizes=cls.BATCH_SIZES)) create_exec_option_dimension(batch_sizes=cls.BATCH_SIZES))
@classmethod @classmethod
def get_workload(self): def get_workload(self):
return 'functional-query' return 'functional-query'
@@ -65,7 +64,7 @@ class TestHdfsFileOpenFailErrors(ImpalaTestSuite):
assert not self.filesystem_client.exists(absolute_location) assert not self.filesystem_client.exists(absolute_location)
try: try:
self.client.execute(select_stmt) self.client.execute(select_stmt)
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
assert "Failed to open HDFS file" in str(e) assert "Failed to open HDFS file" in str(e)
self.client.execute(drop_stmt) self.client.execute(drop_stmt)
@@ -86,13 +85,13 @@ class TestHdfsUnknownErrors(ImpalaTestSuite):
output, error = subprocess.Popen( output, error = subprocess.Popen(
['hdfs', 'dfsadmin', '-safemode', 'get'], ['hdfs', 'dfsadmin', '-safemode', 'get'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
assert error is "", "Couldn't get status of Safe mode. Error: %s" % (error) assert error == "", "Couldn't get status of Safe mode. Error: %s" % (error)
assert "Safe mode is OFF" in output assert "Safe mode is OFF" in output
# Turn safe mode on. # Turn safe mode on.
output, error = subprocess.Popen( output, error = subprocess.Popen(
['hdfs', 'dfsadmin', '-safemode', 'enter'], ['hdfs', 'dfsadmin', '-safemode', 'enter'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
assert error is "", "Couldn't turn Safe mode ON. Error: %s" % (error) assert error == "", "Couldn't turn Safe mode ON. Error: %s" % (error)
assert "Safe mode is ON" in output assert "Safe mode is ON" in output
# We shouldn't be able to write to HDFS when it's in safe mode. # We shouldn't be able to write to HDFS when it's in safe mode.
@@ -107,7 +106,7 @@ class TestHdfsUnknownErrors(ImpalaTestSuite):
output, error = subprocess.Popen( output, error = subprocess.Popen(
['hdfs', 'dfsadmin', '-safemode', 'leave'], ['hdfs', 'dfsadmin', '-safemode', 'leave'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
assert error is "", "Couldn't turn Safe mode OFF. Error: %s" % (error) assert error == "", "Couldn't turn Safe mode OFF. Error: %s" % (error)
assert "Safe mode is OFF" in output assert "Safe mode is OFF" in output

View File

@@ -24,8 +24,8 @@ import pytest
import re import re
from time import sleep from time import sleep
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.impala_cluster import ImpalaCluster from tests.common.impala_cluster import ImpalaCluster
from tests.common.impala_connection import IMPALA_CONNECTION_EXCEPTION
from tests.common.impala_test_suite import ImpalaTestSuite, LOG from tests.common.impala_test_suite import ImpalaTestSuite, LOG
from tests.common.skip import SkipIf, SkipIfFS from tests.common.skip import SkipIf, SkipIfFS
from tests.common.test_dimensions import create_exec_option_dimension from tests.common.test_dimensions import create_exec_option_dimension
@@ -186,7 +186,7 @@ class TestFailpoints(ImpalaTestSuite):
try: try:
self.execute_query(query, self.execute_query(query,
query_options={'debug_action': debug_action}) query_options={'debug_action': debug_action})
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
assert 'Debug Action: FIS_FAIL_THREAD_CREATION:FAIL@0.5' \ assert 'Debug Action: FIS_FAIL_THREAD_CREATION:FAIL@0.5' \
in str(e), str(e) in str(e), str(e)
break break
@@ -195,7 +195,7 @@ class TestFailpoints(ImpalaTestSuite):
try: try:
self.execute_query(query, vector.get_value('exec_option')) self.execute_query(query, vector.get_value('exec_option'))
assert 'Expected Failure' assert 'Expected Failure'
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
LOG.debug(e) LOG.debug(e)
# IMPALA-5197: None of the debug actions should trigger corrupted file message # IMPALA-5197: None of the debug actions should trigger corrupted file message
assert 'Corrupt Parquet file' not in str(e) assert 'Corrupt Parquet file' not in str(e)

View File

@@ -25,10 +25,10 @@ import time
from copy import deepcopy from copy import deepcopy
from tests.metadata.test_ddl_base import TestDdlBase from tests.metadata.test_ddl_base import TestDdlBase
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.environ import (HIVE_MAJOR_VERSION) from tests.common.environ import (HIVE_MAJOR_VERSION)
from tests.common.file_utils import create_table_from_orc from tests.common.file_utils import create_table_from_orc
from tests.common.impala_connection import FINISHED, INITIALIZED, PENDING, RUNNING from tests.common.impala_connection import (
FINISHED, INITIALIZED, IMPALA_CONNECTION_EXCEPTION, PENDING, RUNNING)
from tests.common.impala_test_suite import LOG from tests.common.impala_test_suite import LOG
from tests.common.parametrize import UniqueDatabase from tests.common.parametrize import UniqueDatabase
from tests.common.skip import ( from tests.common.skip import (
@@ -588,7 +588,7 @@ class TestDdlStatements(TestDdlBase):
result = self.execute_query_expect_success( result = self.execute_query_expect_success(
client, "describe formatted %s" % view_name) client, "describe formatted %s" % view_name)
exp_line = [line for line in result.data if 'View Expanded' in line][0] exp_line = [line for line in result.data if 'View Expanded' in line][0]
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
# In non-SYNC_DDL tests, it's OK to get a "missing view" type error # In non-SYNC_DDL tests, it's OK to get a "missing view" type error
# until the metadata propagates. # until the metadata propagates.
exp_line = "Exception: %s" % e exp_line = "Exception: %s" % e

View File

@@ -16,7 +16,7 @@ import pytest
import requests import requests
import time import time
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException from tests.common.impala_connection import IMPALA_CONNECTION_EXCEPTION
from tests.common.impala_test_suite import ImpalaTestSuite, LOG from tests.common.impala_test_suite import ImpalaTestSuite, LOG
from tests.common.test_dimensions import create_uncompressed_text_dimension from tests.common.test_dimensions import create_uncompressed_text_dimension
from tests.common.skip import SkipIfLocal, SkipIfFS from tests.common.skip import SkipIfLocal, SkipIfFS
@@ -217,7 +217,7 @@ class TestRecursiveListing(ImpalaTestSuite):
try: try:
self.client.fetch(refresh_stmt, handle) self.client.fetch(refresh_stmt, handle)
assert not refresh_should_fail, "REFRESH should fail" assert not refresh_should_fail, "REFRESH should fail"
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
assert refresh_should_fail, "unexpected exception " + str(e) assert refresh_should_fail, "unexpected exception " + str(e)
finally: finally:
requests.get(self.reset_log_level_url) requests.get(self.reset_log_level_url)

View File

@@ -14,7 +14,7 @@
from __future__ import absolute_import, division, print_function from __future__ import absolute_import, division, print_function
from subprocess import check_call from subprocess import check_call
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException from tests.common.impala_connection import IMPALA_CONNECTION_EXCEPTION
from tests.common.impala_test_suite import ImpalaTestSuite from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.test_dimensions import create_single_exec_option_dimension from tests.common.test_dimensions import create_single_exec_option_dimension
from tests.common.test_dimensions import create_uncompressed_text_dimension from tests.common.test_dimensions import create_uncompressed_text_dimension
@@ -42,7 +42,7 @@ class TestRefreshPartition(ImpalaTestSuite):
cls.ImpalaTestMatrix.add_dimension( cls.ImpalaTestMatrix.add_dimension(
create_uncompressed_text_dimension(cls.get_workload())) create_uncompressed_text_dimension(cls.get_workload()))
def test_refresh_invalid_partition(self, vector, unique_database): def test_refresh_invalid_partition(self, unique_database):
""" """
Trying to refresh a partition that does not exist does not modify anything Trying to refresh a partition that does not exist does not modify anything
either in impala or hive. either in impala or hive.
@@ -59,7 +59,7 @@ class TestRefreshPartition(ImpalaTestSuite):
assert [('333', '5309')] == self.get_impala_partition_info(table_name, 'y', 'z') assert [('333', '5309')] == self.get_impala_partition_info(table_name, 'y', 'z')
assert ['y=333/z=5309'] == self.hive_partition_names(table_name) assert ['y=333/z=5309'] == self.hive_partition_names(table_name)
def test_remove_data_and_refresh(self, vector, unique_database): def test_remove_data_and_refresh(self, unique_database):
""" """
Data removed through hive is visible in impala after refresh of partition. Data removed through hive is visible in impala after refresh of partition.
""" """
@@ -83,14 +83,14 @@ class TestRefreshPartition(ImpalaTestSuite):
# produce an error, it should be the expected error. # produce an error, it should be the expected error.
try: try:
self.client.execute("select * from %s" % table_name) self.client.execute("select * from %s" % table_name)
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
assert expected_error in str(e) assert expected_error in str(e)
self.client.execute('refresh %s partition (y=333, z=5309)' % table_name) self.client.execute('refresh %s partition (y=333, z=5309)' % table_name)
result = self.client.execute("select count(*) from %s" % table_name) result = self.client.execute("select count(*) from %s" % table_name)
assert result.data == [str('0')] assert result.data == [str('0')]
def test_add_delete_data_to_hdfs_and_refresh(self, vector, unique_database): def test_add_delete_data_to_hdfs_and_refresh(self, unique_database):
""" """
Data added/deleted directly in HDFS is visible in impala after refresh of Data added/deleted directly in HDFS is visible in impala after refresh of
partition. partition.
@@ -126,7 +126,7 @@ class TestRefreshPartition(ImpalaTestSuite):
result = self.client.execute("select count(*) from %s" % table_name) result = self.client.execute("select count(*) from %s" % table_name)
assert result.data == [str(0)] assert result.data == [str(0)]
def test_confirm_individual_refresh(self, vector, unique_database): def test_confirm_individual_refresh(self, unique_database):
""" """
Data added directly to HDFS is only visible for the partition refreshed Data added directly to HDFS is only visible for the partition refreshed
""" """

View File

@@ -18,12 +18,13 @@
from __future__ import absolute_import, division, print_function from __future__ import absolute_import, division, print_function
from subprocess import check_call from subprocess import check_call
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException from tests.common.impala_connection import IMPALA_CONNECTION_EXCEPTION
from tests.common.impala_test_suite import ImpalaTestSuite from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.skip import SkipIfFS from tests.common.skip import SkipIfFS
from tests.common.test_dimensions import create_single_exec_option_dimension from tests.common.test_dimensions import create_single_exec_option_dimension
from tests.util.filesystem_utils import get_fs_path from tests.util.filesystem_utils import get_fs_path
class TestRewrittenFile(ImpalaTestSuite): class TestRewrittenFile(ImpalaTestSuite):
"""Tests that we gracefully handle when a file in HDFS is rewritten outside of Impala """Tests that we gracefully handle when a file in HDFS is rewritten outside of Impala
without issuing "invalidate metadata".""" without issuing "invalidate metadata"."""
@@ -69,7 +70,7 @@ class TestRewrittenFile(ImpalaTestSuite):
try: try:
result = self.client.execute("select * from %s.%s" % (db_name, table_name)) result = self.client.execute("select * from %s.%s" % (db_name, table_name))
assert False, "Query was expected to fail" assert False, "Query was expected to fail"
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
assert expected_error in str(e) assert expected_error in str(e)
# Refresh the table and make sure we get results # Refresh the table and make sure we get results
@@ -78,7 +79,7 @@ class TestRewrittenFile(ImpalaTestSuite):
assert result.data == [str(expected_new_count)] assert result.data == [str(expected_new_count)]
@SkipIfFS.read_past_eof @SkipIfFS.read_past_eof
def test_new_file_shorter(self, vector, unique_database): def test_new_file_shorter(self, unique_database):
"""Rewrites an existing file with a new shorter file.""" """Rewrites an existing file with a new shorter file."""
# Full error is something like: # Full error is something like:
# Metadata for file '...' appears stale. Try running "refresh # Metadata for file '...' appears stale. Try running "refresh
@@ -91,7 +92,7 @@ class TestRewrittenFile(ImpalaTestSuite):
self.__overwrite_file_and_query(unique_database, table_name, self.__overwrite_file_and_query(unique_database, table_name,
self.LONG_FILE, self.SHORT_FILE, 'appears stale.', self.SHORT_FILE_NUM_ROWS) self.LONG_FILE, self.SHORT_FILE, 'appears stale.', self.SHORT_FILE_NUM_ROWS)
def test_new_file_longer(self, vector, unique_database): def test_new_file_longer(self, unique_database):
"""Rewrites an existing file with a new longer file.""" """Rewrites an existing file with a new longer file."""
# Full error is something like: # Full error is something like:
# "File '..' has an invalid Parquet version number: ff4C # "File '..' has an invalid Parquet version number: ff4C
@@ -103,7 +104,7 @@ class TestRewrittenFile(ImpalaTestSuite):
self.SHORT_FILE, self.LONG_FILE, 'invalid Parquet version number', self.SHORT_FILE, self.LONG_FILE, 'invalid Parquet version number',
self.LONG_FILE_NUM_ROWS) self.LONG_FILE_NUM_ROWS)
def test_delete_file(self, vector, unique_database): def test_delete_file(self, unique_database):
"""Deletes an existing file without refreshing metadata.""" """Deletes an existing file without refreshing metadata."""
table_name = "delete_file" table_name = "delete_file"
table_location = self.__get_test_table_location(unique_database) table_location = self.__get_test_table_location(unique_database)
@@ -120,7 +121,7 @@ class TestRewrittenFile(ImpalaTestSuite):
try: try:
result = self.client.execute("select * from %s.%s" % (unique_database, table_name)) result = self.client.execute("select * from %s.%s" % (unique_database, table_name))
assert False, "Query was expected to fail" assert False, "Query was expected to fail"
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
assert 'No such file or directory' in str(e) assert 'No such file or directory' in str(e)
# Refresh the table and make sure we get results # Refresh the table and make sure we get results

View File

@@ -16,7 +16,7 @@
# under the License. # under the License.
from __future__ import absolute_import, division, print_function from __future__ import absolute_import, division, print_function
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException from tests.common.impala_connection import IMPALA_CONNECTION_EXCEPTION
from tests.common.impala_test_suite import ImpalaTestSuite from tests.common.impala_test_suite import ImpalaTestSuite
@@ -86,7 +86,7 @@ class TestBeeswax(ImpalaTestSuite):
try: try:
fn() fn()
assert False, "Expected invalid handle" assert False, "Expected invalid handle"
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
assert "Query id" in str(e) and "not found" in str(e), str(e) assert "Query id" in str(e) and "not found" in str(e), str(e)
def _assert_profile_access_denied(self, fn): def _assert_profile_access_denied(self, fn):
@@ -95,5 +95,5 @@ class TestBeeswax(ImpalaTestSuite):
try: try:
fn() fn()
assert False, "Expected invalid handle" assert False, "Expected invalid handle"
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
assert "is not authorized to access the runtime profile" in str(e), str(e) assert "is not authorized to access the runtime profile" in str(e), str(e)

View File

@@ -24,7 +24,7 @@ import decimal
import math import math
import random import random
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException from tests.common.impala_connection import IMPALA_CONNECTION_EXCEPTION
from tests.common.impala_test_suite import ImpalaTestSuite from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.test_dimensions import ( from tests.common.test_dimensions import (
add_mandatory_exec_option, add_mandatory_exec_option,
@@ -224,7 +224,7 @@ class TestDecimalFuzz(ImpalaTestSuite):
try: try:
result = self.execute_scalar(query, query_options) result = self.execute_scalar(query, query_options)
except ImpalaBeeswaxException: except IMPALA_CONNECTION_EXCEPTION:
result = None result = None
if result is not None: if result is not None:
result = decimal.Decimal(result) result = decimal.Decimal(result)
@@ -301,7 +301,7 @@ class TestDecimalFuzz(ImpalaTestSuite):
try: try:
result = self.execute_scalar(query, query_options) result = self.execute_scalar(query, query_options)
assert int(result) == expected_result assert int(result) == expected_result
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
if "You need to wrap the arguments in a CAST" not in str(e): if "You need to wrap the arguments in a CAST" not in str(e):
# Sometimes the decimal inputs are incompatible with each other, so it's ok # Sometimes the decimal inputs are incompatible with each other, so it's ok
# to ignore this error. # to ignore this error.

View File

@@ -18,16 +18,15 @@
# Targeted tests for decimal type. # Targeted tests for decimal type.
from __future__ import absolute_import, division, print_function from __future__ import absolute_import, division, print_function
from copy import copy
import pytest import pytest
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException from tests.common.impala_connection import IMPALA_CONNECTION_EXCEPTION
from tests.common.impala_test_suite import ImpalaTestSuite from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.test_dimensions import (create_exec_option_dimension_from_dict, from tests.common.test_dimensions import (create_exec_option_dimension_from_dict,
create_client_protocol_dimension, hs2_parquet_constraint) create_client_protocol_dimension, hs2_parquet_constraint)
from tests.common.test_vector import ImpalaTestDimension
from tests.util.filesystem_utils import IS_S3 from tests.util.filesystem_utils import IS_S3
class TestDecimalQueries(ImpalaTestSuite): class TestDecimalQueries(ImpalaTestSuite):
@classmethod @classmethod
def get_workload(cls): def get_workload(cls):
@@ -58,6 +57,7 @@ class TestDecimalQueries(ImpalaTestSuite):
def test_queries(self, vector): def test_queries(self, vector):
self.run_test_case('QueryTest/decimal', vector) self.run_test_case('QueryTest/decimal', vector)
# Tests involving DECIMAL typed expressions. The results depend on whether DECIMAL # Tests involving DECIMAL typed expressions. The results depend on whether DECIMAL
# version 1 or version 2 are enabled, so the .test file itself toggles the DECIMAL_V2 # version 1 or version 2 are enabled, so the .test file itself toggles the DECIMAL_V2
# query option. # query option.
@@ -75,6 +75,7 @@ class TestDecimalExprs(ImpalaTestSuite):
def test_exprs(self, vector): def test_exprs(self, vector):
self.run_test_case('QueryTest/decimal-exprs', vector) self.run_test_case('QueryTest/decimal-exprs', vector)
# TODO: when we have a good way to produce Avro decimal data (e.g. upgrade Hive), we can # TODO: when we have a good way to produce Avro decimal data (e.g. upgrade Hive), we can
# run Avro through the same tests as above instead of using avro_decimal_tbl. # run Avro through the same tests as above instead of using avro_decimal_tbl.
class TestAvroDecimalQueries(ImpalaTestSuite): class TestAvroDecimalQueries(ImpalaTestSuite):
@@ -173,7 +174,7 @@ class TestDecimalOverflowExprs(ImpalaTestSuite):
try: try:
self.execute_query_using_client(self.client, query_1, vector) self.execute_query_using_client(self.client, query_1, vector)
assert False, "Query was expected to fail" assert False, "Query was expected to fail"
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
assert "Decimal expression overflowed" in str(e) assert "Decimal expression overflowed" in str(e)
result = self.execute_query_expect_success(self.client, result = self.execute_query_expect_success(self.client,
@@ -188,7 +189,7 @@ class TestDecimalOverflowExprs(ImpalaTestSuite):
try: try:
self.execute_query_using_client(self.client, query_1, vector) self.execute_query_using_client(self.client, query_1, vector)
assert False, "Query was expected to fail" assert False, "Query was expected to fail"
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
assert "Decimal expression overflowed" in str(e) assert "Decimal expression overflowed" in str(e)
result = self.execute_query_expect_success(self.client, result = self.execute_query_expect_success(self.client,
@@ -216,7 +217,7 @@ class TestDecimalOverflowExprs(ImpalaTestSuite):
try: try:
self.execute_query_using_client(self.client, query_2, vector) self.execute_query_using_client(self.client, query_2, vector)
assert False, "Query was expected to fail" assert False, "Query was expected to fail"
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
assert "Decimal expression overflowed" in str(e) assert "Decimal expression overflowed" in str(e)
result = self.execute_query_expect_success(self.client, result = self.execute_query_expect_success(self.client,

View File

@@ -19,13 +19,14 @@
# two types of failures - cancellation of the query and a failure test hook. # two types of failures - cancellation of the query and a failure test hook.
# #
from __future__ import absolute_import, division, print_function from __future__ import absolute_import, division, print_function
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException from tests.common.impala_connection import IMPALA_CONNECTION_EXCEPTION
from tests.common.impala_test_suite import ImpalaTestSuite from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.test_dimensions import ( from tests.common.test_dimensions import (
create_single_exec_option_dimension, create_single_exec_option_dimension,
create_uncompressed_text_dimension) create_uncompressed_text_dimension)
from time import sleep from time import sleep
# Test injecting error logs in prepare phase and status::OK(). This tests one of race # Test injecting error logs in prepare phase and status::OK(). This tests one of race
# conditions in error reporting (IMPALA-3385). # conditions in error reporting (IMPALA-3385).
class TestErrorLogs(ImpalaTestSuite): class TestErrorLogs(ImpalaTestSuite):
@@ -67,5 +68,5 @@ class TestErrorLogs(ImpalaTestSuite):
assert cancel_result.status_code == 0,\ assert cancel_result.status_code == 0,\
'Unexpected status code from cancel request: %s' % cancel_result 'Unexpected status code from cancel request: %s' % cancel_result
# As long as impala did not crash we are good. # As long as impala did not crash we are good.
except ImpalaBeeswaxException: except IMPALA_CONNECTION_EXCEPTION:
return return

View File

@@ -19,8 +19,7 @@
from __future__ import absolute_import, division, print_function from __future__ import absolute_import, division, print_function
import pytest import pytest
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException from tests.common.impala_connection import IMPALA_CONNECTION_EXCEPTION
from tests.common.impala_cluster import ImpalaCluster
from tests.common.impala_test_suite import ImpalaTestSuite from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.skip import SkipIfLocal from tests.common.skip import SkipIfLocal
from tests.common.test_vector import ImpalaTestDimension from tests.common.test_vector import ImpalaTestDimension
@@ -30,6 +29,7 @@ from tests.util.filesystem_utils import FILESYSTEM_PREFIX
# Modifications to test # Modifications to test
MODIFICATION_TYPES=["delete_files", "delete_directory", "move_file", "append"] MODIFICATION_TYPES=["delete_files", "delete_directory", "move_file", "append"]
@SkipIfLocal.hdfs_client @SkipIfLocal.hdfs_client
class TestHdfsFileMods(ImpalaTestSuite): class TestHdfsFileMods(ImpalaTestSuite):
""" """
@@ -53,7 +53,7 @@ class TestHdfsFileMods(ImpalaTestSuite):
def get_workload(cls): def get_workload(cls):
return 'functional-query' return 'functional-query'
def setup_ext_table(self, vector, unique_database, new_table_location): def setup_ext_table(self, unique_database, new_table_location):
# Use HDFS commands to clone the table's files at the hdfs level # Use HDFS commands to clone the table's files at the hdfs level
old_table_location = "{0}/test-warehouse/tinytable".format(FILESYSTEM_PREFIX) old_table_location = "{0}/test-warehouse/tinytable".format(FILESYSTEM_PREFIX)
call(["hdfs", "dfs", "-mkdir", new_table_location]) call(["hdfs", "dfs", "-mkdir", new_table_location])
@@ -71,7 +71,7 @@ class TestHdfsFileMods(ImpalaTestSuite):
new_table_location = "{0}/test-warehouse/{1}".format(FILESYSTEM_PREFIX,\ new_table_location = "{0}/test-warehouse/{1}".format(FILESYSTEM_PREFIX,\
unique_database) unique_database)
self.setup_ext_table(vector, unique_database, new_table_location) self.setup_ext_table(unique_database, new_table_location)
# Query the table. If file handle caching is enabled, this will fill the cache. # Query the table. If file handle caching is enabled, this will fill the cache.
count_query = "select count(*) from {0}.t1".format(unique_database) count_query = "select count(*) from {0}.t1".format(unique_database)
@@ -99,12 +99,12 @@ class TestHdfsFileMods(ImpalaTestSuite):
new_table_location + "/data.csv"]) new_table_location + "/data.csv"])
call(["rm", local_tmp_location]) call(["rm", local_tmp_location])
else: else:
assert(false) assert False
# The query might fail, but nothing should crash. # The query might fail, but nothing should crash.
try: try:
self.execute_query(count_query) self.execute_query(count_query)
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION:
pass pass
# Invalidate metadata # Invalidate metadata

View File

@@ -35,7 +35,7 @@ from avro.datafile import DataFileReader
from avro.io import DatumReader from avro.io import DatumReader
import json import json
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException from tests.common.impala_connection import IMPALA_CONNECTION_EXCEPTION
from tests.common.iceberg_test_suite import IcebergTestSuite from tests.common.iceberg_test_suite import IcebergTestSuite
from tests.common.skip import SkipIf, SkipIfFS, SkipIfDockerizedCluster from tests.common.skip import SkipIf, SkipIfFS, SkipIfDockerizedCluster
from tests.common.test_dimensions import add_exec_option_dimension from tests.common.test_dimensions import add_exec_option_dimension
@@ -144,7 +144,7 @@ class TestIcebergTable(IcebergTestSuite):
self.run_test_case('QueryTest/iceberg-truncate', vector, use_db=unique_database) self.run_test_case('QueryTest/iceberg-truncate', vector, use_db=unique_database)
@SkipIf.not_dfs @SkipIf.not_dfs
def test_drop_incomplete_table(self, vector, unique_database): def test_drop_incomplete_table(self, unique_database):
"""Test DROP TABLE when the underlying directory is deleted. In that case table """Test DROP TABLE when the underlying directory is deleted. In that case table
loading fails, but we should be still able to drop the table from Impala.""" loading fails, but we should be still able to drop the table from Impala."""
tbl_name = unique_database + ".synchronized_iceberg_tbl" tbl_name = unique_database + ".synchronized_iceberg_tbl"
@@ -226,7 +226,7 @@ class TestIcebergTable(IcebergTestSuite):
try: try:
self.client.wait_for_finished_timeout(handle, 30) self.client.wait_for_finished_timeout(handle, 30)
assert False assert False
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
assert "Found conflicting files" in str(e) assert "Found conflicting files" in str(e)
# Test INSERT INTO during INSERT OVERWRITE, the exception closes the query handle. # Test INSERT INTO during INSERT OVERWRITE, the exception closes the query handle.
@@ -236,7 +236,7 @@ class TestIcebergTable(IcebergTestSuite):
try: try:
self.client.wait_for_finished_timeout(handle, 30) self.client.wait_for_finished_timeout(handle, 30)
assert False assert False
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
assert "Found conflicting files" in str(e) assert "Found conflicting files" in str(e)
def test_ctas(self, vector, unique_database): def test_ctas(self, vector, unique_database):
@@ -440,7 +440,7 @@ class TestIcebergTable(IcebergTestSuite):
try: try:
self.rollback_to_ts(client, tbl_name, ts) self.rollback_to_ts(client, tbl_name, ts)
assert False, "Query should have failed" assert False, "Query should have failed"
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
result = re.search(r".*no valid snapshot older than: (\d+)", str(e)) result = re.search(r".*no valid snapshot older than: (\d+)", str(e))
time_str = result.group(1) time_str = result.group(1)
snapshot_ts = int(time_str) snapshot_ts = int(time_str)
@@ -566,7 +566,6 @@ class TestIcebergTable(IcebergTestSuite):
tbl_name, snapshot_id)) tbl_name, snapshot_id))
assert " Iceberg snapshot id: {0}".format(snapshot_id) in data.data assert " Iceberg snapshot id: {0}".format(snapshot_id) in data.data
def impala_now(): def impala_now():
now_data = impalad_client.execute("select now()") now_data = impalad_client.execute("select now()")
return now_data.data[0] return now_data.data[0]
@@ -748,7 +747,7 @@ class TestIcebergTable(IcebergTestSuite):
self.run_test_case('QueryTest/iceberg-time-travel', vector, use_db=unique_database) self.run_test_case('QueryTest/iceberg-time-travel', vector, use_db=unique_database)
@SkipIf.not_dfs @SkipIf.not_dfs
def test_strings_utf8(self, vector, unique_database): def test_strings_utf8(self, unique_database):
# Create table # Create table
table_name = "ice_str_utf8" table_name = "ice_str_utf8"
qualified_table_name = "%s.%s" % (unique_database, table_name) qualified_table_name = "%s.%s" % (unique_database, table_name)
@@ -860,7 +859,7 @@ class TestIcebergTable(IcebergTestSuite):
return current_partition_spec return current_partition_spec
@SkipIf.not_dfs @SkipIf.not_dfs
def test_partition_spec_update_v1(self, vector, unique_database): def test_partition_spec_update_v1(self, unique_database):
# Create table # Create table
table_name = "ice_part" table_name = "ice_part"
qualified_table_name = "%s.%s" % (unique_database, table_name) qualified_table_name = "%s.%s" % (unique_database, table_name)
@@ -903,7 +902,7 @@ class TestIcebergTable(IcebergTestSuite):
assert old_truncate_s['transform'] == 'void' assert old_truncate_s['transform'] == 'void'
@SkipIf.not_dfs @SkipIf.not_dfs
def test_partition_spec_update_v2(self, vector, unique_database): def test_partition_spec_update_v2(self, unique_database):
# Create table # Create table
table_name = "ice_part" table_name = "ice_part"
qualified_table_name = "%s.%s" % (unique_database, table_name) qualified_table_name = "%s.%s" % (unique_database, table_name)
@@ -959,11 +958,11 @@ class TestIcebergTable(IcebergTestSuite):
assert truncate_s['field-id'] == 1004 assert truncate_s['field-id'] == 1004
@SkipIf.not_dfs @SkipIf.not_dfs
def test_writing_metrics_to_metadata_v1(self, vector, unique_database): def test_writing_metrics_to_metadata_v1(self, unique_database):
self._test_writing_metrics_to_metadata_impl(unique_database, 'ice_stats_v1', '1') self._test_writing_metrics_to_metadata_impl(unique_database, 'ice_stats_v1', '1')
@SkipIf.not_dfs @SkipIf.not_dfs
def test_writing_metrics_to_metadata_v2(self, vector, unique_database): def test_writing_metrics_to_metadata_v2(self, unique_database):
self._test_writing_metrics_to_metadata_impl(unique_database, 'ice_stats_v2', '2') self._test_writing_metrics_to_metadata_impl(unique_database, 'ice_stats_v2', '2')
def _test_writing_metrics_to_metadata_impl(self, unique_database, table_name, version): def _test_writing_metrics_to_metadata_impl(self, unique_database, table_name, version):
@@ -1111,7 +1110,7 @@ class TestIcebergTable(IcebergTestSuite):
use_db=unique_database) use_db=unique_database)
@pytest.mark.execute_serially @pytest.mark.execute_serially
def test_table_load_time_for_many_files(self, vector, unique_database): def test_table_load_time_for_many_files(self, unique_database):
if self.exploration_strategy() != 'exhaustive': if self.exploration_strategy() != 'exhaustive':
pytest.skip('runs only in exhaustive') pytest.skip('runs only in exhaustive')
tbl_name = unique_database + ".iceberg_many_files" tbl_name = unique_database + ".iceberg_many_files"
@@ -1129,7 +1128,7 @@ class TestIcebergTable(IcebergTestSuite):
time_limit = 20 time_limit = 20
assert elapsed_time < time_limit assert elapsed_time < time_limit
def test_consistent_scheduling(self, vector, unique_database): def test_consistent_scheduling(self, unique_database):
"""IMPALA-10914: This test verifies that Impala schedules scan ranges consistently for """IMPALA-10914: This test verifies that Impala schedules scan ranges consistently for
Iceberg tables.""" Iceberg tables."""
def collect_split_stats(profile): def collect_split_stats(profile):
@@ -1152,7 +1151,7 @@ class TestIcebergTable(IcebergTestSuite):
split_stats = collect_split_stats(profile) split_stats = collect_split_stats(profile)
assert ref_split_stats == split_stats assert ref_split_stats == split_stats
def test_scheduling_partitioned_tables(self, vector, unique_database): def test_scheduling_partitioned_tables(self, unique_database):
"""IMPALA-12765: Balance consecutive partitions better for Iceberg tables""" """IMPALA-12765: Balance consecutive partitions better for Iceberg tables"""
# We are setting the replica_preference query option in this test, so let's create a # We are setting the replica_preference query option in this test, so let's create a
# local impala client. # local impala client.
@@ -1369,7 +1368,7 @@ class TestIcebergTable(IcebergTestSuite):
assert parquet_column_name_type_list == iceberg_column_name_type_list assert parquet_column_name_type_list == iceberg_column_name_type_list
@SkipIfFS.hive @SkipIfFS.hive
def test_hive_external_forbidden(self, vector, unique_database): def test_hive_external_forbidden(self, unique_database):
tbl_name = unique_database + ".hive_ext" tbl_name = unique_database + ".hive_ext"
error_msg = ("cannot be loaded because it is an EXTERNAL table in the HiveCatalog " error_msg = ("cannot be loaded because it is an EXTERNAL table in the HiveCatalog "
"that points to another table. Query the original table instead.") "that points to another table. Query the original table instead.")
@@ -1475,7 +1474,7 @@ class TestIcebergTable(IcebergTestSuite):
self.run_test_case('QueryTest/iceberg-drop-partition', vector, self.run_test_case('QueryTest/iceberg-drop-partition', vector,
use_db=unique_database) use_db=unique_database)
def test_rollback_after_drop_partition(self, vector, unique_database): def test_rollback_after_drop_partition(self, unique_database):
table_name = "iceberg_drop_partition_rollback" table_name = "iceberg_drop_partition_rollback"
qualified_table_name = "{}.{}".format(unique_database, table_name) qualified_table_name = "{}.{}".format(unique_database, table_name)
create_table_stmt = """CREATE TABLE {}(identity_int int, unpartitioned_int int) create_table_stmt = """CREATE TABLE {}(identity_int int, unpartitioned_int int)
@@ -1704,7 +1703,7 @@ class TestIcebergV2Table(IcebergTestSuite):
unique_database) unique_database)
@SkipIfFS.hive @SkipIfFS.hive
def test_delete_hive_read(self, vector, unique_database): def test_delete_hive_read(self, unique_database):
ice_delete = unique_database + ".ice_delete" ice_delete = unique_database + ".ice_delete"
self.execute_query("""CREATE TABLE {} (i int, s string) self.execute_query("""CREATE TABLE {} (i int, s string)
STORED BY ICEBERG STORED BY ICEBERG

View File

@@ -18,11 +18,12 @@
from __future__ import absolute_import, division, print_function from __future__ import absolute_import, division, print_function
import pytest import pytest
import time import time
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException from tests.common.impala_connection import IMPALA_CONNECTION_EXCEPTION
from tests.common.impala_test_suite import ImpalaTestSuite from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.impala_cluster import ImpalaCluster from tests.common.impala_cluster import ImpalaCluster
from tests.verifiers.metric_verifier import MetricVerifier from tests.verifiers.metric_verifier import MetricVerifier
# TODO: Debug actions leak into other tests in the same suite (if not explicitly # TODO: Debug actions leak into other tests in the same suite (if not explicitly
# unset). Ensure they get unset between tests. # unset). Ensure they get unset between tests.
class TestFragmentLifecycleWithDebugActions(ImpalaTestSuite): class TestFragmentLifecycleWithDebugActions(ImpalaTestSuite):
@@ -30,6 +31,7 @@ class TestFragmentLifecycleWithDebugActions(ImpalaTestSuite):
fragments""" fragments"""
IN_FLIGHT_FRAGMENTS = "impala-server.num-fragments-in-flight" IN_FLIGHT_FRAGMENTS = "impala-server.num-fragments-in-flight"
@classmethod @classmethod
def get_workload(self): def get_workload(self):
return 'functional' return 'functional'
@@ -43,7 +45,7 @@ class TestFragmentLifecycleWithDebugActions(ImpalaTestSuite):
try: try:
self.client.execute("SELECT COUNT(*) FROM functional.alltypes") self.client.execute("SELECT COUNT(*) FROM functional.alltypes")
assert "Query should have thrown an error" assert "Query should have thrown an error"
except ImpalaBeeswaxException: except IMPALA_CONNECTION_EXCEPTION:
pass pass
for v in verifiers: for v in verifiers:
@@ -63,7 +65,7 @@ class TestFragmentLifecycleWithDebugActions(ImpalaTestSuite):
self.client.execute("SELECT COUNT(*) FROM functional.alltypes a JOIN [SHUFFLE] \ self.client.execute("SELECT COUNT(*) FROM functional.alltypes a JOIN [SHUFFLE] \
functional.alltypes b on a.id = b.id") functional.alltypes b on a.id = b.id")
assert "Query should have thrown an error" assert "Query should have thrown an error"
except ImpalaBeeswaxException: except IMPALA_CONNECTION_EXCEPTION:
pass pass
for v in verifiers: for v in verifiers:
@@ -74,6 +76,7 @@ class TestFragmentLifecycleWithDebugActions(ImpalaTestSuite):
# TODO: Fix when we have cancellable RPCs. # TODO: Fix when we have cancellable RPCs.
v.wait_for_metric(self.IN_FLIGHT_FRAGMENTS, 0, timeout=125) v.wait_for_metric(self.IN_FLIGHT_FRAGMENTS, 0, timeout=125)
class TestFragmentLifecycle(ImpalaTestSuite): class TestFragmentLifecycle(ImpalaTestSuite):
def test_finst_cancel_when_query_complete(self): def test_finst_cancel_when_query_complete(self):
"""Regression test for IMPALA-4295: if a query returns all its rows before all its """Regression test for IMPALA-4295: if a query returns all its rows before all its
@@ -94,4 +97,5 @@ class TestFragmentLifecycle(ImpalaTestSuite):
# Query typically completes in < 2s, but if cross join is fully evaluated, will take > # Query typically completes in < 2s, but if cross join is fully evaluated, will take >
# 10 minutes. Pick 2 minutes as a reasonable midpoint to avoid false negatives. # 10 minutes. Pick 2 minutes as a reasonable midpoint to avoid false negatives.
assert end - now < 120, "Query took too long to complete: " + duration + "s" duration = end - now
assert duration < 120, "Query took too long to complete: " + duration + "s"

View File

@@ -18,10 +18,11 @@
# Validates limit on scan nodes # Validates limit on scan nodes
from __future__ import absolute_import, division, print_function from __future__ import absolute_import, division, print_function
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException from tests.common.impala_connection import IMPALA_CONNECTION_EXCEPTION
from tests.common.impala_test_suite import ImpalaTestSuite from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.test_vector import ImpalaTestDimension from tests.common.test_vector import ImpalaTestDimension
class TestLimit(ImpalaTestSuite): class TestLimit(ImpalaTestSuite):
LIMIT_VALUES = [1, 2, 3, 4, 5, 10, 100, 5000] LIMIT_VALUES = [1, 2, 3, 4, 5, 10, 100, 5000]
LIMIT_VALUES_CORE = [1, 5, 10, 5000] LIMIT_VALUES_CORE = [1, 5, 10, 5000]
@@ -77,7 +78,7 @@ class TestLimitBase(ImpalaTestSuite):
assert should_succeed, 'Query was expected to fail' assert should_succeed, 'Query was expected to fail'
assert len(result.data) == expected_rows,\ assert len(result.data) == expected_rows,\
'Wrong number of rows returned %d' % len(result.data) 'Wrong number of rows returned %d' % len(result.data)
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
assert not should_succeed, 'Query was not expected to fail: %s' % e assert not should_succeed, 'Query was not expected to fail: %s' % e
if (expected_error not in str(e)): if (expected_error not in str(e)):
print(str(e)) print(str(e))

View File

@@ -21,10 +21,10 @@ from builtins import range
import pytest import pytest
from copy import copy from copy import copy
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.test_dimensions import (create_avro_snappy_dimension, from tests.common.test_dimensions import (create_avro_snappy_dimension,
create_parquet_dimension) create_parquet_dimension)
from tests.common.impala_cluster import ImpalaCluster from tests.common.impala_cluster import ImpalaCluster
from tests.common.impala_connection import IMPALA_CONNECTION_EXCEPTION
from tests.common.impala_test_suite import ImpalaTestSuite from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.skip import ( from tests.common.skip import (
SkipIfNotHdfsMinicluster, SkipIfNotHdfsMinicluster,
@@ -122,7 +122,7 @@ class TestLowMemoryLimits(ImpalaTestSuite):
exec_options['default_spillable_buffer_size'] = "256k" exec_options['default_spillable_buffer_size'] = "256k"
try: try:
self.run_test_case(tpch_query, new_vector) self.run_test_case(tpch_query, new_vector)
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
if not expects_error: raise if not expects_error: raise
found_expected_error = False found_expected_error = False
for error_msg in MEM_LIMIT_ERROR_MSGS: for error_msg in MEM_LIMIT_ERROR_MSGS:

View File

@@ -19,7 +19,7 @@ from __future__ import absolute_import, division, print_function
import os import os
from copy import deepcopy from copy import deepcopy
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException from tests.common.impala_connection import IMPALA_CONNECTION_EXCEPTION
from tests.common.impala_test_suite import ImpalaTestSuite from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.skip import SkipIfFS, SkipIfHive2, SkipIfNotHdfsMinicluster from tests.common.skip import SkipIfFS, SkipIfHive2, SkipIfNotHdfsMinicluster
from tests.common.test_dimensions import ( from tests.common.test_dimensions import (
@@ -963,7 +963,7 @@ class TestMaxNestingDepth(ImpalaTestSuite):
try: try:
self.client.execute("explain select 1 from %s.above_max_depth" % unique_database) self.client.execute("explain select 1 from %s.above_max_depth" % unique_database)
assert False, "Expected table loading to fail." assert False, "Expected table loading to fail."
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
assert "Type exceeds the maximum nesting depth" in str(e) assert "Type exceeds the maximum nesting depth" in str(e)

View File

@@ -18,8 +18,8 @@
from __future__ import absolute_import, division, print_function from __future__ import absolute_import, division, print_function
from collections import defaultdict from collections import defaultdict
from datetime import datetime from datetime import datetime
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.impala_cluster import ImpalaCluster from tests.common.impala_cluster import ImpalaCluster
from tests.common.impala_connection import IMPALA_CONNECTION_EXCEPTION
from tests.common.impala_test_suite import ImpalaTestSuite from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.skip import SkipIfFS, SkipIfLocal, SkipIfNotHdfsMinicluster from tests.common.skip import SkipIfFS, SkipIfLocal, SkipIfNotHdfsMinicluster
from tests.common.test_vector import HS2 from tests.common.test_vector import HS2
@@ -1076,7 +1076,7 @@ class TestQueryStates(ImpalaTestSuite):
try: try:
self.client.fetch(query, handle) self.client.fetch(query, handle)
assert False assert False
except ImpalaBeeswaxException: except IMPALA_CONNECTION_EXCEPTION:
pass pass
profile = self.client.get_runtime_profile(handle) profile = self.client.get_runtime_profile(handle)

View File

@@ -18,11 +18,12 @@
from __future__ import absolute_import, division, print_function from __future__ import absolute_import, division, print_function
import pytest import pytest
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException from tests.common.impala_connection import IMPALA_CONNECTION_EXCEPTION
from tests.common.impala_test_suite import ImpalaTestSuite from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.skip import SkipIfFS, SkipIfLocal from tests.common.skip import SkipIfFS, SkipIfLocal
from tests.common.test_dimensions import create_single_exec_option_dimension from tests.common.test_dimensions import create_single_exec_option_dimension
# Tests to validate HDFS partitioning. # Tests to validate HDFS partitioning.
class TestPartitioning(ImpalaTestSuite): class TestPartitioning(ImpalaTestSuite):
@classmethod @classmethod
@@ -47,7 +48,7 @@ class TestPartitioning(ImpalaTestSuite):
# Missing Coverage: Impala deals with boolean partitions created by Hive on a non-hdfs # Missing Coverage: Impala deals with boolean partitions created by Hive on a non-hdfs
# filesystem. # filesystem.
@SkipIfFS.hive @SkipIfFS.hive
def test_boolean_partitions(self, vector, unique_database): def test_boolean_partitions(self, unique_database):
# This test takes about a minute to complete due to the Hive commands that are # This test takes about a minute to complete due to the Hive commands that are
# executed. To cut down on runtime, limit the test to exhaustive exploration # executed. To cut down on runtime, limit the test to exhaustive exploration
# strategy. # strategy.
@@ -86,7 +87,7 @@ class TestPartitioning(ImpalaTestSuite):
# INSERT into a boolean column is disabled in Impala due to this Hive bug. # INSERT into a boolean column is disabled in Impala due to this Hive bug.
try: try:
self.execute_query("insert into %s partition(bool_col=true) select 1" % full_name) self.execute_query("insert into %s partition(bool_col=true) select 1" % full_name)
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
assert 'AnalysisException: INSERT into table with BOOLEAN partition column (%s) '\ assert 'AnalysisException: INSERT into table with BOOLEAN partition column (%s) '\
'is not supported: %s' % ('b', full_name) in str(e) 'is not supported: %s' % ('b', full_name) in str(e)

View File

@@ -23,14 +23,14 @@ import re
import sys import sys
from copy import copy from copy import copy
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException from tests.common.impala_connection import IMPALA_CONNECTION_EXCEPTION
from tests.common.impala_test_suite import ImpalaTestSuite from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.skip import SkipIfEC
from tests.common.test_dimensions import ( from tests.common.test_dimensions import (
ImpalaTestDimension, ImpalaTestDimension,
create_single_exec_option_dimension, create_single_exec_option_dimension,
create_uncompressed_text_dimension) create_uncompressed_text_dimension)
class TestQueryMemLimit(ImpalaTestSuite): class TestQueryMemLimit(ImpalaTestSuite):
"""Test class to do functional validation of per query memory limits. """Test class to do functional validation of per query memory limits.
@@ -113,7 +113,7 @@ class TestQueryMemLimit(ImpalaTestSuite):
try: try:
self.execute_query(query, exec_options, table_format=table_format) self.execute_query(query, exec_options, table_format=table_format)
assert should_succeed, "Query was expected to fail" assert should_succeed, "Query was expected to fail"
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
assert not should_succeed, "Query should not have failed: %s" % e assert not should_succeed, "Query should not have failed: %s" % e

View File

@@ -11,11 +11,12 @@
# limitations under the License. # limitations under the License.
from __future__ import absolute_import, division, print_function from __future__ import absolute_import, division, print_function
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException from tests.common.impala_connection import IMPALA_CONNECTION_EXCEPTION
from tests.common.impala_test_suite import ImpalaTestSuite from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.test_dimensions import create_single_exec_option_dimension from tests.common.test_dimensions import create_single_exec_option_dimension
from tests.common.test_dimensions import create_uncompressed_text_dimension from tests.common.test_dimensions import create_uncompressed_text_dimension
class TestScratchLimit(ImpalaTestSuite): class TestScratchLimit(ImpalaTestSuite):
""" """
This class tests the functionality of setting the scratch limit as a query option This class tests the functionality of setting the scratch limit as a query option
@@ -87,7 +88,7 @@ class TestScratchLimit(ImpalaTestSuite):
try: try:
self.execute_query(self.spilling_sort_query, exec_option) self.execute_query(self.spilling_sort_query, exec_option)
assert False, "Query was expected to fail" assert False, "Query was expected to fail"
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
assert expected_error % scratch_limit_in_bytes in str(e) assert expected_error % scratch_limit_in_bytes in str(e)
def test_with_zero_scratch_limit(self, vector): def test_with_zero_scratch_limit(self, vector):

View File

@@ -23,8 +23,8 @@ import pytest
import tempfile import tempfile
from subprocess import call, check_call from subprocess import call, check_call
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.impala_cluster import ImpalaCluster from tests.common.impala_cluster import ImpalaCluster
from tests.common.impala_connection import IMPALA_CONNECTION_EXCEPTION
from tests.common.impala_test_suite import ImpalaTestSuite from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.skip import SkipIfLocal from tests.common.skip import SkipIfLocal
from tests.common.test_dimensions import ( from tests.common.test_dimensions import (
@@ -390,13 +390,13 @@ class TestUdfExecution(TestUdfBase):
try: try:
self.run_test_case('QueryTest/udf-mem-limit', vector, use_db=unique_database) self.run_test_case('QueryTest/udf-mem-limit', vector, use_db=unique_database)
assert False, "Query was expected to fail" assert False, "Query was expected to fail"
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
self._check_mem_limit_exception(e) self._check_mem_limit_exception(e)
try: try:
self.run_test_case('QueryTest/uda-mem-limit', vector, use_db=unique_database) self.run_test_case('QueryTest/uda-mem-limit', vector, use_db=unique_database)
assert False, "Query was expected to fail" assert False, "Query was expected to fail"
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
self._check_mem_limit_exception(e) self._check_mem_limit_exception(e)
# It takes a long time for Impala to free up memory after this test, especially if # It takes a long time for Impala to free up memory after this test, especially if
@@ -527,7 +527,7 @@ class TestUdfTargeted(TestUdfBase):
self.execute_query_using_client( self.execute_query_using_client(
client, "select `{0}`.`pi_missing_jar`()".format(unique_database), vector) client, "select `{0}`.`pi_missing_jar`()".format(unique_database), vector)
assert False, "Query expected to fail" assert False, "Query expected to fail"
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
assert "Failed to get file info" in str(e) assert "Failed to get file info" in str(e)
def test_libs_with_same_filenames(self, vector, unique_database): def test_libs_with_same_filenames(self, vector, unique_database):

View File

@@ -18,8 +18,7 @@
from __future__ import absolute_import, division, print_function from __future__ import absolute_import, division, print_function
import threading import threading
from time import sleep from time import sleep
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException from tests.common.impala_connection import IMPALA_CONNECTION_EXCEPTION, create_connection
from tests.common.impala_connection import create_connection
from tests.common.impala_test_suite import ImpalaTestSuite from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.test_result_verifier import error_msg_startswith from tests.common.test_result_verifier import error_msg_startswith
@@ -154,7 +153,7 @@ def __run_cancel_query_and_validate_state(client, query, exec_option,
if not use_kill_query_statement: if not use_kill_query_statement:
try: try:
client.close_query(handle) client.close_query(handle)
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
close_error = e close_error = e
# Before accessing fetch_results_error we need to join the fetch thread # Before accessing fetch_results_error we need to join the fetch thread
@@ -202,5 +201,5 @@ def __fetch_results(query, handle):
try: try:
new_client = ImpalaTestSuite.create_impala_client() new_client = ImpalaTestSuite.create_impala_client()
new_client.fetch(query, handle) new_client.fetch(query, handle)
except ImpalaBeeswaxException as e: except IMPALA_CONNECTION_EXCEPTION as e:
threading.current_thread().fetch_results_error = e threading.current_thread().fetch_results_error = e