mirror of
https://github.com/apache/impala.git
synced 2025-12-19 18:12:08 -05:00
To remove the dependency on Python 2, existing scripts need to use python3 rather than python. These commands find those locations (for impala-python and regular python): git grep impala-python | grep -v impala-python3 | grep -v impala-python-common | grep -v init-impala-python git grep bin/python | grep -v python3 This removes or switches most of these locations by various means: 1. If a python file has a #!/bin/env impala-python (or python) but doesn't have a main function, it removes the hash-bang and makes sure that the file is not executable. 2. Most scripts can simply switch from impala-python to impala-python3 (or python to python3) with minimal changes. 3. The cm-api pypi package (which doesn't support Python 3) has been replaced by the cm-client pypi package and interfaces have changed. Rather than migrating the code (which hasn't been used in years), this deletes the old code and stops installing cm-api into the virtualenv. The code can be restored and revamped if there is any interest in interacting with CM clusters. 4. This switches tests/comparison over to impala-python3, but this code has bit-rotted. Some pieces can be run manually, but it can't be fully verified with Python 3. It shouldn't hold back the migration on its own. 5. This also replaces locations of impala-python in comments / documentation / READMEs. 6. kazoo (used for interacting with HBase) needed to be upgraded to a version that supports Python 3. The newest version of kazoo requires upgrades of other component versions, so this uses kazoo 2.8.0 to avoid needing other upgrades. The two remaining uses of impala-python are: - bin/cmake_aux/create_virtualenv.sh - bin/impala-env-versioned-python These will be removed separately when we drop Python 2 support completely. In particular, these are useful for testing impala-shell with Python 2 until we stop supporting Python 2 for impala-shell. The docker-based tests still use /usr/bin/python, but this can be switched over independently (and doesn't impact impala-python) Testing: - Ran core job - Ran build + dataload on Centos 7, Redhat 8 - Manual testing of individual scripts (except some bitrotted areas like the random query generator) Change-Id: If209b761290bc7e7c716c312ea757da3e3bca6dc Reviewed-on: http://gerrit.cloudera.org:8080/23468 Reviewed-by: Michael Smith <michael.smith@cloudera.com> Tested-by: Michael Smith <michael.smith@cloudera.com>
113 lines
4.5 KiB
Python
113 lines
4.5 KiB
Python
#!/usr/bin/env python3
|
|
#
|
|
# Licensed to the Apache Software Foundation (ASF) under one
|
|
# or more contributor license agreements. See the NOTICE file
|
|
# distributed with this work for additional information
|
|
# regarding copyright ownership. The ASF licenses this file
|
|
# to you under the Apache License, Version 2.0 (the
|
|
# "License"); you may not use this file except in compliance
|
|
# with the License. You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing,
|
|
# software distributed under the License is distributed on an
|
|
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
# KIND, either express or implied. See the License for the
|
|
# specific language governing permissions and limitations
|
|
# under the License.
|
|
|
|
from __future__ import absolute_import, division, print_function
|
|
import os
|
|
import sys
|
|
|
|
kerberize = os.environ.get('IMPALA_KERBERIZE') == 'true'
|
|
hive_major_version = int(os.environ['IMPALA_HIVE_VERSION'][0])
|
|
|
|
|
|
def _get_system_ram_mb():
|
|
lines = open("/proc/meminfo").readlines()
|
|
memtotal_line = [l for l in lines if l.startswith('MemTotal')][0]
|
|
mem_kb = int(memtotal_line.split()[1])
|
|
return mem_kb // 1024
|
|
|
|
|
|
def _get_yarn_nm_ram_mb():
|
|
sys_ram = _get_system_ram_mb()
|
|
available_ram_gb = int(os.getenv("IMPALA_CLUSTER_MAX_MEM_GB", str(sys_ram // 1024)))
|
|
# Fit into the following envelope:
|
|
# - need 4GB at a bare minimum
|
|
# - leave at least 20G for other services
|
|
# - don't need more than 48G
|
|
ret = min(max(available_ram_gb * 1024 - 20 * 1024, 4096), 48 * 1024)
|
|
print("Configuring Yarn NM to use {0}MB RAM".format(ret), file=sys.stderr)
|
|
return ret
|
|
|
|
|
|
CONFIG = {
|
|
# Host/port configs
|
|
'yarn.resourcemanager.webapp.address': '${EXTERNAL_LISTEN_HOST}:${YARN_WEBUI_PORT}',
|
|
'yarn.nodemanager.address': '${INTERNAL_LISTEN_HOST}:${NODEMANAGER_PORT}',
|
|
'yarn.nodemanager.localizer.address': '${INTERNAL_LISTEN_HOST}:${NODEMANAGER_LOCALIZER_PORT}',
|
|
'yarn.nodemanager.webapp.address': '${INTERNAL_LISTEN_HOST}:${NODEMANAGER_WEBUI_PORT}',
|
|
|
|
# Directories
|
|
'yarn.nodemanager.local-dirs': '${NODE_DIR}/var/lib/hadoop-yarn/cache/${USER}/nm-local-dir',
|
|
'yarn.nodemanager.log-dirs': '${NODE_DIR}/var/log/hadoop-yarn/containers',
|
|
|
|
# Set it to a large enough value so that the logs of all the containers ever created in
|
|
# a Jenkins run will be retained.
|
|
'yarn.nodemanager.log.retain-seconds': 86400,
|
|
|
|
# Enable the MR shuffle service, which is also used by Tez.
|
|
'yarn.nodemanager.aux-services': 'mapreduce_shuffle',
|
|
'yarn.nodemanager.aux-services.mapreduce_shuffle.class': 'org.apache.hadoop.mapred.ShuffleHandler',
|
|
# Disable vmem checking, since vmem is essentially free, and tasks
|
|
# fail with vmem limit errors otherwise.
|
|
'yarn.nodemanager.vmem-check-enabled': 'false',
|
|
|
|
# Limit memory used by the NM to 8GB.
|
|
# TODO(todd): auto-configure this based on the memory available on the machine
|
|
# to speed up data-loading.
|
|
'yarn.nodemanager.resource.memory-mb': _get_yarn_nm_ram_mb(),
|
|
|
|
# Allow YARN to run with at least 3GB disk free. Otherwise it hangs completely.
|
|
# Avoids disabling YARN disk monitoring completely because otherwise multiple jobs might
|
|
# use up all the disk in a scenario where otherwise they could complete sequentially.
|
|
'yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage': 99,
|
|
|
|
# Increase YARN container resources to 2GB to avoid dataload failures
|
|
'yarn.app.mapreduce.am.resource.mb': 2048,
|
|
|
|
# Increase YARN minimum container size to 2GB to avoid dataload failures
|
|
'yarn.scheduler.minimum-allocation-mb': 2048
|
|
}
|
|
|
|
app_classpath = [
|
|
# Default classpath as provided by Hadoop: these environment variables are not
|
|
# expanded by our config templating, but rather evaluated and expanded by
|
|
# YARN itself, in a context where the various _HOMEs have been defined.
|
|
'$HADOOP_CONF_DIR',
|
|
'$HADOOP_COMMON_HOME/share/hadoop/common/*',
|
|
'$HADOOP_COMMON_HOME/share/hadoop/common/lib/*',
|
|
'$HADOOP_HDFS_HOME/share/hadoop/hdfs/*',
|
|
'$HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*',
|
|
'$HADOOP_YARN_HOME/share/hadoop/yarn/*',
|
|
'$HADOOP_YARN_HOME/share/hadoop/yarn/lib/*']
|
|
|
|
# Hive 3 needs Tez on the classpath.
|
|
if hive_major_version == 3:
|
|
app_classpath += [
|
|
'${TEZ_HOME}/*',
|
|
'${TEZ_HOME}/lib/*']
|
|
|
|
CONFIG['yarn.application.classpath'] = ",".join(app_classpath)
|
|
|
|
if kerberize:
|
|
CONFIG.update({
|
|
'yarn.resourcemanager.keytab': '${KRB5_KTNAME}',
|
|
'yarn.resourcemanager.principal': '${MINIKDC_PRINC_USER}',
|
|
'yarn.nodemanager.keytab': '${KRB5_KTNAME}',
|
|
'yarn.nodemanager.principal': '${MINIKDC_PRINC_USER}',
|
|
})
|