mirror of
https://github.com/apache/impala.git
synced 2026-01-07 09:02:19 -05:00
The original error reporting relied on $0 being accessible from the current working dir, which failed if a script changed the working dir and $0 was relative. This updates the error reporting command to cd back to the original dir before accessing $0. Change-Id: I2185af66e35e29b41dbe1bb08de24200bacea8a1 Reviewed-on: http://gerrit.cloudera.org:8080/1666 Reviewed-by: Casey Ching <casey@cloudera.com> Tested-by: Internal Jenkins
42 lines
1.3 KiB
Bash
Executable File
42 lines
1.3 KiB
Bash
Executable File
#!/bin/bash
|
|
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
|
|
#
|
|
|
|
set -euo pipefail
|
|
trap 'echo Error in $0 at line $LINENO: $(cd "'$PWD'" && awk "NR == $LINENO" $0)' ERR
|
|
|
|
# Create a cache pool and encryption keys for tests
|
|
|
|
PREVIOUS_PRINCIPAL=""
|
|
CACHEADMIN_ARGS=""
|
|
|
|
# If we're kerberized, we need to become hdfs for this:
|
|
if ${CLUSTER_DIR}/admin is_kerberized; then
|
|
PREVIOUS_PRINCIPAL=`klist | grep ^Default | awk '{print $3}'`
|
|
PREVIOUS_USER=`echo ${PREVIOUS_PRINCIPAL} | awk -F/ '{print $1}'`
|
|
CACHEADMIN_ARGS="-group supergroup -owner ${PREVIOUS_USER}"
|
|
kinit -k -t ${KRB5_KTNAME} ${MINIKDC_PRINC_HDFS}
|
|
fi
|
|
|
|
if [[ $TARGET_FILESYSTEM == hdfs ]]; then # Otherwise assume KMS isn't setup.
|
|
# Create encryption keys for HDFS encryption tests. Keys are stored by the KMS.
|
|
EXISTING_KEYS=$(hadoop key list)
|
|
for KEY in testkey{1,2}; do
|
|
if grep $KEY <<< $EXISTING_KEYS &>/dev/null; then
|
|
hadoop key delete $KEY -f
|
|
fi
|
|
hadoop key create $KEY
|
|
done
|
|
fi
|
|
|
|
# Create test cache pool
|
|
if hdfs cacheadmin -listPools testPool | grep testPool &>/dev/null; then
|
|
hdfs cacheadmin -removePool testPool
|
|
fi
|
|
hdfs cacheadmin -addPool testPool ${CACHEADMIN_ARGS}
|
|
|
|
# Back to ourselves:
|
|
if [ "${PREVIOUS_PRINCIPAL}" != "" ]; then
|
|
kinit -k -t ${KRB5_KTNAME} ${PREVIOUS_PRINCIPAL}
|
|
fi
|