mirror of
https://github.com/apache/impala.git
synced 2025-12-19 18:12:08 -05:00
All logs, test results and SQL files generated during data loading and testing are now consolidated under a single new directory $IMPALA_HOME/logs. The goal is to simplify archiving in Jenkins runs and debugging. The new structure is as follows: $IMPALA_HOME/logs/cluster - logs of Hadoop components and Impala $IMPALA_HOME/logs/data_loading - logs and SQL files produced in data loading $IMPALA_HOME/logs/fe_tests - logs and test output of Frontend unit tests $IMPALA_HOME/logs/be_tests - logs and test output of Backend unit tests $IMPALA_HOME/logs/ee_tests - logs and test output of end-to-end tests $IMPALA_HOME/logs/custom_cluster_tests - logs and test output of custom cluster tests I tested this change with a full data load which was successful. Change-Id: Ief1f58f3320ec39d31b3c6bc6ef87f58ff7dfdfa Reviewed-on: http://gerrit.cloudera.org:8080/2456 Reviewed-by: Alex Behm <alex.behm@cloudera.com> Tested-by: Internal Jenkins
91 lines
3.1 KiB
Bash
Executable File
91 lines
3.1 KiB
Bash
Executable File
#!/bin/bash
|
|
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
|
|
|
|
set -euo pipefail
|
|
trap 'echo Error in $0 at line $LINENO: $(cd "'$PWD'" && awk "NR == $LINENO" $0)' ERR
|
|
|
|
CLUSTER_BIN=${IMPALA_HOME}/testdata/bin
|
|
HBASE_JAAS_CLIENT=${HBASE_CONF_DIR}/hbase-jaas-client.conf
|
|
HBASE_JAAS_SERVER=${HBASE_CONF_DIR}/hbase-jaas-server.conf
|
|
HBASE_LOGDIR=${IMPALA_CLUSTER_LOGS_DIR}/hbase
|
|
|
|
# Kill and clean data for a clean start.
|
|
${CLUSTER_BIN}/kill-hbase.sh > /dev/null 2>&1
|
|
|
|
# Gives HBase startup the proper environment
|
|
cat > ${HBASE_CONF_DIR}/hbase-env.sh <<EOF
|
|
#
|
|
# This file is auto-generated by run-hbase.sh. Do not edit.
|
|
#
|
|
export JAVA_HOME=${JAVA_HOME}
|
|
export HBASE_LOG_DIR=${HBASE_LOGDIR}
|
|
export HBASE_PID_DIR=${HBASE_LOGDIR}
|
|
EOF
|
|
|
|
# Put zookeeper things in the logs/cluster/zoo directory.
|
|
# (See hbase.zookeeper.property.dataDir in hbase-site.xml)
|
|
rm -rf ${IMPALA_CLUSTER_LOGS_DIR}/zoo
|
|
mkdir -p ${IMPALA_CLUSTER_LOGS_DIR}/zoo
|
|
mkdir -p ${HBASE_LOGDIR}
|
|
|
|
if ${CLUSTER_DIR}/admin is_kerberized; then
|
|
#
|
|
# Making a kerberized cluster... set some more environment
|
|
# variables and other magic.
|
|
#
|
|
. ${MINIKDC_ENV}
|
|
|
|
if [ ! -f "${HBASE_JAAS_CLIENT}" ]; then
|
|
echo "Can't find ${HBASE_JAAS_CLIENT}"
|
|
exit 1
|
|
fi
|
|
|
|
if [ ! -f "${HBASE_JAAS_SERVER}" ]; then
|
|
echo "Can't find ${HBASE_JAAS_SERVER}"
|
|
exit 1
|
|
fi
|
|
|
|
# Catch the case where the /hbase directory is not owned by the
|
|
# hbase user. This can happen when the cluster was formed without
|
|
# kerberos and then remade with "create-test-configuration.sh -k".
|
|
if HBASE_LS_OUTPUT=`hadoop fs -ls -d /hbase 2>&1`; then
|
|
if echo ${HBASE_LS_OUTPUT} | tail -n 1 | grep -q -v " hbase "; then
|
|
# /hbase not owned by 'hbase'. Failure.
|
|
echo "The HDFS /hbase directory is not owned by \"hbase\"."
|
|
echo "This can happen if the cluster was created with kerberos,"
|
|
echo "and then switched to kerberos without a reformat."
|
|
fi
|
|
fi
|
|
|
|
# These ultimately become args to java when it starts up hbase
|
|
K1="-Djava.security.krb5.conf=${KRB5_CONFIG}"
|
|
K2="${JAVA_KRB5_DEBUG}"
|
|
K3="-Djava.security.auth.login.config=${HBASE_JAAS_CLIENT}"
|
|
K4="-Djava.security.auth.login.config=${HBASE_JAAS_SERVER}"
|
|
|
|
# Add some kerberos things...
|
|
cat >> ${HBASE_CONF_DIR}/hbase-env.sh <<EOF
|
|
export HBASE_OPTS="${K1} ${K2} ${K3}"
|
|
export HBASE_MANAGES_ZK=true
|
|
export HBASE_ZOOKEEPER_OPTS="${K1} ${K2} ${K4}"
|
|
export HBASE_MASTER_OPTS="${K1} ${K2} ${K4}"
|
|
export HBASE_REGIONSERVER_OPTS="${K1} ${K2} ${K4}"
|
|
EOF
|
|
fi
|
|
|
|
# To work around HBase bug (HBASE-4467), unset $HADOOP_HOME before calling hbase
|
|
HADOOP_HOME=
|
|
|
|
# Start HBase and 3 regionserver
|
|
$HBASE_HOME/bin/start-hbase.sh 2>&1 | tee ${HBASE_LOGDIR}/hbase-startup.out
|
|
|
|
# TODO: Remove once the race between master and RS has been resolved.
|
|
# Note wait-for-hbase-master.py requires having org.apache.zookeeper.ZooKeeperMain on the
|
|
# classpath. ZooKeeper has conflicts with JARs added as part of set-classpath.sh, so
|
|
# generate a valid classpath using the 'hadoop classpath' command.
|
|
export CLASSPATH=`hadoop classpath`
|
|
${CLUSTER_BIN}/wait-for-hbase-master.py
|
|
|
|
$HBASE_HOME/bin/local-regionservers.sh start 1 2 3 2>&1 | \
|
|
tee ${HBASE_LOGDIR}/hbase-rs-startup.out
|