mirror of
https://github.com/apache/impala.git
synced 2025-12-19 18:12:08 -05:00
To remove the dependency on Python 2, existing scripts need to use python3 rather than python. These commands find those locations (for impala-python and regular python): git grep impala-python | grep -v impala-python3 | grep -v impala-python-common | grep -v init-impala-python git grep bin/python | grep -v python3 This removes or switches most of these locations by various means: 1. If a python file has a #!/bin/env impala-python (or python) but doesn't have a main function, it removes the hash-bang and makes sure that the file is not executable. 2. Most scripts can simply switch from impala-python to impala-python3 (or python to python3) with minimal changes. 3. The cm-api pypi package (which doesn't support Python 3) has been replaced by the cm-client pypi package and interfaces have changed. Rather than migrating the code (which hasn't been used in years), this deletes the old code and stops installing cm-api into the virtualenv. The code can be restored and revamped if there is any interest in interacting with CM clusters. 4. This switches tests/comparison over to impala-python3, but this code has bit-rotted. Some pieces can be run manually, but it can't be fully verified with Python 3. It shouldn't hold back the migration on its own. 5. This also replaces locations of impala-python in comments / documentation / READMEs. 6. kazoo (used for interacting with HBase) needed to be upgraded to a version that supports Python 3. The newest version of kazoo requires upgrades of other component versions, so this uses kazoo 2.8.0 to avoid needing other upgrades. The two remaining uses of impala-python are: - bin/cmake_aux/create_virtualenv.sh - bin/impala-env-versioned-python These will be removed separately when we drop Python 2 support completely. In particular, these are useful for testing impala-shell with Python 2 until we stop supporting Python 2 for impala-shell. The docker-based tests still use /usr/bin/python, but this can be switched over independently (and doesn't impact impala-python) Testing: - Ran core job - Ran build + dataload on Centos 7, Redhat 8 - Manual testing of individual scripts (except some bitrotted areas like the random query generator) Change-Id: If209b761290bc7e7c716c312ea757da3e3bca6dc Reviewed-on: http://gerrit.cloudera.org:8080/23468 Reviewed-by: Michael Smith <michael.smith@cloudera.com> Tested-by: Michael Smith <michael.smith@cloudera.com>
233 lines
9.3 KiB
Python
233 lines
9.3 KiB
Python
# Licensed to the Apache Software Foundation (ASF) under one
|
|
# or more contributor license agreements. See the NOTICE file
|
|
# distributed with this work for additional information
|
|
# regarding copyright ownership. The ASF licenses this file
|
|
# to you under the Apache License, Version 2.0 (the
|
|
# "License"); you may not use this file except in compliance
|
|
# with the License. You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing,
|
|
# software distributed under the License is distributed on an
|
|
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
# KIND, either express or implied. See the License for the
|
|
# specific language governing permissions and limitations
|
|
# under the License.
|
|
|
|
from __future__ import absolute_import, division, print_function
|
|
from builtins import range
|
|
import logging
|
|
import pytest
|
|
from time import sleep, time
|
|
|
|
from tests.util.auto_scaler import AutoScaler
|
|
from tests.util.concurrent_workload import ConcurrentWorkload
|
|
from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
|
|
|
|
LOG = logging.getLogger("test_auto_scaling")
|
|
TOTAL_BACKENDS_METRIC_NAME = "cluster-membership.backends.total"
|
|
|
|
class TestAutoScaling(CustomClusterTestSuite):
|
|
@classmethod
|
|
def setup_class(cls):
|
|
if cls.exploration_strategy() != 'exhaustive':
|
|
pytest.skip('runs only in exhaustive')
|
|
super(TestAutoScaling, cls).setup_class()
|
|
|
|
"""This class contains tests that exercise the logic related to scaling clusters up and
|
|
down by adding and removing groups of executors."""
|
|
INITIAL_STARTUP_TIME_S = 10
|
|
STATE_CHANGE_TIMEOUT_S = 60
|
|
# This query will scan two partitions (month = 1, 2) and thus will have 1 fragment
|
|
# instance per executor on groups of size 2. Each partition has 2 rows, so it performs
|
|
# two comparisons and should take around 2 second to complete.
|
|
QUERY = """select * from functional_parquet.alltypestiny where month < 3
|
|
and id + random() < sleep(1000)"""
|
|
|
|
def _get_total_admitted_queries(self):
|
|
admitted_queries = self.impalad_test_service.get_total_admitted_queries(
|
|
"default-pool")
|
|
LOG.info("Current total admitted queries: %s", admitted_queries)
|
|
return admitted_queries
|
|
|
|
def _get_num_backends(self):
|
|
metric_val = self.impalad_test_service.get_metric_value(TOTAL_BACKENDS_METRIC_NAME)
|
|
LOG.info("Getting metric %s : %s", TOTAL_BACKENDS_METRIC_NAME, metric_val)
|
|
return metric_val
|
|
|
|
def _get_num_running_queries(self):
|
|
running_queries = self.impalad_test_service.get_num_running_queries("default-pool")
|
|
LOG.info("Current running queries: %s", running_queries)
|
|
return running_queries
|
|
|
|
def test_single_workload(self):
|
|
"""This test exercises the auto-scaling logic in the admission controller. It spins up
|
|
a base cluster (coordinator, catalog, statestore), runs a workload to initiate a
|
|
scaling up event as the queries start queuing, then stops the workload and observes
|
|
that the cluster gets shutdown."""
|
|
GROUP_SIZE = 2
|
|
EXECUTOR_SLOTS = 3
|
|
auto_scaler = AutoScaler(executor_slots=EXECUTOR_SLOTS, group_size=GROUP_SIZE)
|
|
workload = None
|
|
try:
|
|
auto_scaler.start()
|
|
sleep(self.INITIAL_STARTUP_TIME_S)
|
|
|
|
workload = ConcurrentWorkload(self.QUERY, num_streams=5)
|
|
LOG.info("Starting workload")
|
|
workload.start()
|
|
|
|
# Wait for workers to spin up
|
|
cluster_size = GROUP_SIZE + 1 # +1 to include coordinator.
|
|
assert any(self._get_num_backends() >= cluster_size or sleep(1)
|
|
for _ in range(self.STATE_CHANGE_TIMEOUT_S)), \
|
|
"Number of backends did not increase within %s s" % self.STATE_CHANGE_TIMEOUT_S
|
|
assert self.impalad_test_service.get_metric_value(
|
|
"cluster-membership.executor-groups.total-healthy") >= 1
|
|
|
|
# Wait until we admitted at least 10 queries
|
|
assert any(self._get_total_admitted_queries() >= 10 or sleep(1)
|
|
for _ in range(self.STATE_CHANGE_TIMEOUT_S)), \
|
|
"Did not admit enough queries within %s s" % self.STATE_CHANGE_TIMEOUT_S
|
|
# Wait for second executor group to start
|
|
cluster_size = (2 * GROUP_SIZE) + 1
|
|
assert any(self._get_num_backends() >= cluster_size or sleep(1)
|
|
for _ in range(self.STATE_CHANGE_TIMEOUT_S)), \
|
|
"Number of backends did not reach %s within %s s" % (
|
|
cluster_size, self.STATE_CHANGE_TIMEOUT_S)
|
|
assert self.impalad_test_service.get_metric_value(
|
|
"cluster-membership.executor-groups.total-healthy") >= 2
|
|
|
|
LOG.info("Stopping workload")
|
|
workload.stop()
|
|
|
|
# Wait for workers to spin down
|
|
self.impalad_test_service.wait_for_metric_value(
|
|
TOTAL_BACKENDS_METRIC_NAME, 1,
|
|
timeout=self.STATE_CHANGE_TIMEOUT_S, interval=1)
|
|
assert self.impalad_test_service.get_metric_value(
|
|
"cluster-membership.executor-groups.total") == 0
|
|
|
|
finally:
|
|
if workload:
|
|
workload.stop()
|
|
LOG.info("Stopping auto scaler")
|
|
auto_scaler.stop()
|
|
|
|
def test_single_group_maxed_out(self):
|
|
"""This test starts an auto scaler and limits it to a single executor group. It then
|
|
makes sure that the query throughput does not exceed the expected limit."""
|
|
GROUP_SIZE = 2
|
|
EXECUTOR_SLOTS = 3
|
|
auto_scaler = AutoScaler(executor_slots=EXECUTOR_SLOTS, group_size=GROUP_SIZE,
|
|
max_groups=1, coordinator_slots=EXECUTOR_SLOTS)
|
|
workload = None
|
|
try:
|
|
auto_scaler.start()
|
|
sleep(self.INITIAL_STARTUP_TIME_S)
|
|
|
|
workload = ConcurrentWorkload(self.QUERY, num_streams=5)
|
|
LOG.info("Starting workload")
|
|
workload.start()
|
|
|
|
# Wait for workers to spin up
|
|
cluster_size = GROUP_SIZE + 1 # +1 to include coordinator.
|
|
self.impalad_test_service.wait_for_metric_value(
|
|
TOTAL_BACKENDS_METRIC_NAME, cluster_size,
|
|
timeout=self.STATE_CHANGE_TIMEOUT_S, interval=1)
|
|
|
|
# Wait until we admitted at least 10 queries
|
|
assert any(self._get_total_admitted_queries() >= 10 or sleep(1)
|
|
for _ in range(self.STATE_CHANGE_TIMEOUT_S)), \
|
|
"Did not admit enough queries within %s s" % self.STATE_CHANGE_TIMEOUT_S
|
|
|
|
# Sample the number of running queries for while
|
|
SAMPLE_NUM_RUNNING_S = 30
|
|
end_time = time() + SAMPLE_NUM_RUNNING_S
|
|
num_running = []
|
|
while time() < end_time:
|
|
num_running.append(self._get_num_running_queries())
|
|
sleep(1)
|
|
|
|
# Must reach EXECUTOR_SLOTS but not exceed it
|
|
assert max(num_running) == EXECUTOR_SLOTS, \
|
|
"Unexpected number of running queries: %s" % num_running
|
|
|
|
# Check that only a single group started
|
|
assert self.impalad_test_service.get_metric_value(
|
|
"cluster-membership.executor-groups.total-healthy") == 1
|
|
|
|
LOG.info("Stopping workload")
|
|
workload.stop()
|
|
|
|
# Wait for workers to spin down
|
|
self.impalad_test_service.wait_for_metric_value(
|
|
TOTAL_BACKENDS_METRIC_NAME, 1,
|
|
timeout=self.STATE_CHANGE_TIMEOUT_S, interval=1)
|
|
assert self.impalad_test_service.get_metric_value(
|
|
"cluster-membership.executor-groups.total") == 0
|
|
|
|
finally:
|
|
if workload:
|
|
workload.stop()
|
|
LOG.info("Stopping auto scaler")
|
|
auto_scaler.stop()
|
|
|
|
def test_sequential_startup(self):
|
|
"""This test starts an executor group sequentially and observes that no queries are
|
|
admitted until the group has been fully started."""
|
|
# Larger groups size so it takes a while to start up
|
|
GROUP_SIZE = 4
|
|
EXECUTOR_SLOTS = 3
|
|
auto_scaler = AutoScaler(executor_slots=EXECUTOR_SLOTS, group_size=GROUP_SIZE,
|
|
start_batch_size=1, max_groups=1)
|
|
workload = None
|
|
try:
|
|
auto_scaler.start()
|
|
sleep(self.INITIAL_STARTUP_TIME_S)
|
|
|
|
workload = ConcurrentWorkload(self.QUERY, num_streams=5)
|
|
LOG.info("Starting workload")
|
|
workload.start()
|
|
|
|
# Wait for first executor to start up
|
|
self.impalad_test_service.wait_for_metric_value(
|
|
"cluster-membership.executor-groups.total", 1,
|
|
timeout=self.STATE_CHANGE_TIMEOUT_S, interval=1)
|
|
|
|
# Wait for remaining executors to start up and make sure that no queries are
|
|
# admitted during startup
|
|
end_time = time() + self.STATE_CHANGE_TIMEOUT_S
|
|
startup_complete = False
|
|
cluster_size = GROUP_SIZE + 1 # +1 to include coordinator.
|
|
while time() < end_time:
|
|
num_admitted = self._get_total_admitted_queries()
|
|
num_backends = self._get_num_backends()
|
|
if num_backends < cluster_size:
|
|
assert num_admitted == 0, "%s/%s backends started but %s queries have " \
|
|
"already been admitted." % (num_backends, cluster_size, num_admitted)
|
|
if num_admitted > 0:
|
|
assert num_backends == cluster_size
|
|
startup_complete = True
|
|
break
|
|
sleep(1)
|
|
|
|
assert startup_complete, "Did not start up in %s s" % self.STATE_CHANGE_TIMEOUT_S
|
|
|
|
LOG.info("Stopping workload")
|
|
workload.stop()
|
|
|
|
# Wait for workers to spin down
|
|
self.impalad_test_service.wait_for_metric_value(
|
|
TOTAL_BACKENDS_METRIC_NAME, 1,
|
|
timeout=self.STATE_CHANGE_TIMEOUT_S, interval=1)
|
|
assert self.impalad_test_service.get_metric_value(
|
|
"cluster-membership.executor-groups.total") == 0
|
|
|
|
finally:
|
|
if workload:
|
|
workload.stop()
|
|
LOG.info("Stopping auto scaler")
|
|
auto_scaler.stop()
|