mirror of
https://github.com/apache/impala.git
synced 2025-12-19 18:12:08 -05:00
IMPALA-1598: Adding Error Codes to Log Messages
This patch introduces the concept of error codes for errors that are
recorded in Impala and are going to be presented to the client. These
error codes are used to aggregate and group incoming error / warning
messages to reduce the spill on the shell and increase the usefulness of
the messages. By splitting the message string from the implementation,
it becomes possible to edit the string independently of the code and
pave the way for internationalization.
Error messages are defined as a combination of an enum value and a
string. Both are defined in the Error.thrift file that is automatically
generated using the script in common/thrift/generate_error_codes.py. The
goal of the script is to have a central understandable repository of
error messages. Adding new messages to this file will require rebuilding
the thrift part. The proxy class ErrorMessage is responsible to
represent an error and capture the parameters that are used to format
the error message string.
When error messages are recorded they are recorded based on the
following algorithm:
- If an error message is of type GENERAL, do not aggregate this message
and simply add it to the total number of messages
- If an error messages is of specific type, record the first error
message as a sample and for all other occurrences increment the count.
- The coordinator will merge all error messages except the ones of type
GENERAL and display a count.
For example, in the case of the parquet file spanning multiple blocks
the output will look like:
Parquet files should not be split into multiple hdfs-blocks.
file=hdfs://localhost:20500/fid.parq (1 of 321 similar)
All messages are always logged to VLOG. In the coordinator error
messages are merged across all backends to retain readability in the
case of large clusters.
The current version of this patch adds these new error codes to some of
the most important error messages as a reference implementation.
Change-Id: I1f1811631836d2dd6048035ad33f7194fb71d6b8
Reviewed-on: http://gerrit.cloudera.org:8080/39
Reviewed-by: Martin Grund <mgrund@cloudera.com>
Tested-by: Internal Jenkins
This commit is contained in:
committed by
Internal Jenkins
parent
296d1bba2f
commit
b582cdc22b
3
.gitignore
vendored
3
.gitignore
vendored
@@ -36,3 +36,6 @@ tests/test-hive-udfs/target/
|
||||
cdh-*-hdfs-data/
|
||||
avro_schemas/
|
||||
cluster_logs/
|
||||
|
||||
# This file is auto-generated in the build process
|
||||
common/thrift/ErrorCodes.thrift
|
||||
@@ -50,6 +50,8 @@ set(SRC_FILES
|
||||
DataSinks_types.cpp
|
||||
Descriptors_constants.cpp
|
||||
Descriptors_types.cpp
|
||||
ErrorCodes_types.cpp
|
||||
ErrorCodes_constants.cpp
|
||||
ExecStats_constants.cpp
|
||||
ExecStats_types.cpp
|
||||
Exprs_constants.cpp
|
||||
|
||||
@@ -121,7 +121,7 @@ void TestBoostIntHash(int batch, void* d) {
|
||||
for (int j = 0; j < rows; ++j) {
|
||||
size_t h = HashUtil::FNV_SEED;
|
||||
for (int k = 0; k < cols; ++k) {
|
||||
size_t hash_value = hash<int32_t>().operator()(values[k]);
|
||||
size_t hash_value = boost::hash<int32_t>().operator()(values[k]);
|
||||
hash_combine(h, hash_value);
|
||||
}
|
||||
data->results[j] = h;
|
||||
@@ -210,15 +210,15 @@ void TestBoostMixedHash(int batch, void* d) {
|
||||
for (int j = 0; j < rows; ++j) {
|
||||
size_t h = HashUtil::FNV_SEED;
|
||||
|
||||
size_t hash_value = hash<int8_t>().operator()(*reinterpret_cast<int8_t*>(values));
|
||||
size_t hash_value = boost::hash<int8_t>().operator()(*reinterpret_cast<int8_t*>(values));
|
||||
hash_combine(h, hash_value);
|
||||
values += sizeof(int8_t);
|
||||
|
||||
hash_value = hash<int32_t>().operator()(*reinterpret_cast<int32_t*>(values));
|
||||
hash_value = boost::hash<int32_t>().operator()(*reinterpret_cast<int32_t*>(values));
|
||||
hash_combine(h, hash_value);
|
||||
values += sizeof(int32_t);
|
||||
|
||||
hash_value = hash<int64_t>().operator()(*reinterpret_cast<int64_t*>(values));
|
||||
hash_value = boost::hash<int64_t>().operator()(*reinterpret_cast<int64_t*>(values));
|
||||
hash_combine(h, hash_value);
|
||||
values += sizeof(int64_t);
|
||||
|
||||
@@ -380,7 +380,7 @@ int main(int argc, char **argv) {
|
||||
|
||||
status = codegen->FinalizeModule();
|
||||
if (!status.ok()) {
|
||||
cout << "Could not compile module: " << status.GetErrorMsg();
|
||||
cout << "Could not compile module: " << status.GetDetail();
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
@@ -59,7 +59,7 @@ class CatalogServiceThriftIf : public CatalogServiceIf {
|
||||
virtual void ExecDdl(TDdlExecResponse& resp, const TDdlExecRequest& req) {
|
||||
VLOG_RPC << "ExecDdl(): request=" << ThriftDebugString(req);
|
||||
Status status = catalog_server_->catalog()->ExecDdl(req, &resp);
|
||||
if (!status.ok()) LOG(ERROR) << status.GetErrorMsg();
|
||||
if (!status.ok()) LOG(ERROR) << status.GetDetail();
|
||||
TStatus thrift_status;
|
||||
status.ToThrift(&thrift_status);
|
||||
resp.result.__set_status(thrift_status);
|
||||
@@ -71,7 +71,7 @@ class CatalogServiceThriftIf : public CatalogServiceIf {
|
||||
const TResetMetadataRequest& req) {
|
||||
VLOG_RPC << "ResetMetadata(): request=" << ThriftDebugString(req);
|
||||
Status status = catalog_server_->catalog()->ResetMetadata(req, &resp);
|
||||
if (!status.ok()) LOG(ERROR) << status.GetErrorMsg();
|
||||
if (!status.ok()) LOG(ERROR) << status.GetDetail();
|
||||
TStatus thrift_status;
|
||||
status.ToThrift(&thrift_status);
|
||||
resp.result.__set_status(thrift_status);
|
||||
@@ -84,7 +84,7 @@ class CatalogServiceThriftIf : public CatalogServiceIf {
|
||||
const TUpdateCatalogRequest& req) {
|
||||
VLOG_RPC << "UpdateCatalog(): request=" << ThriftDebugString(req);
|
||||
Status status = catalog_server_->catalog()->UpdateCatalog(req, &resp);
|
||||
if (!status.ok()) LOG(ERROR) << status.GetErrorMsg();
|
||||
if (!status.ok()) LOG(ERROR) << status.GetDetail();
|
||||
TStatus thrift_status;
|
||||
status.ToThrift(&thrift_status);
|
||||
resp.result.__set_status(thrift_status);
|
||||
@@ -97,7 +97,7 @@ class CatalogServiceThriftIf : public CatalogServiceIf {
|
||||
const TGetFunctionsRequest& req) {
|
||||
VLOG_RPC << "GetFunctions(): request=" << ThriftDebugString(req);
|
||||
Status status = catalog_server_->catalog()->GetFunctions(req, &resp);
|
||||
if (!status.ok()) LOG(ERROR) << status.GetErrorMsg();
|
||||
if (!status.ok()) LOG(ERROR) << status.GetDetail();
|
||||
TStatus thrift_status;
|
||||
status.ToThrift(&thrift_status);
|
||||
resp.__set_status(thrift_status);
|
||||
@@ -110,7 +110,7 @@ class CatalogServiceThriftIf : public CatalogServiceIf {
|
||||
VLOG_RPC << "GetCatalogObject(): request=" << ThriftDebugString(req);
|
||||
Status status = catalog_server_->catalog()->GetCatalogObject(req.object_desc,
|
||||
&resp.catalog_object);
|
||||
if (!status.ok()) LOG(ERROR) << status.GetErrorMsg();
|
||||
if (!status.ok()) LOG(ERROR) << status.GetDetail();
|
||||
VLOG_RPC << "GetCatalogObject(): response=" << ThriftDebugString(resp);
|
||||
}
|
||||
|
||||
@@ -121,7 +121,7 @@ class CatalogServiceThriftIf : public CatalogServiceIf {
|
||||
const TPrioritizeLoadRequest& req) {
|
||||
VLOG_RPC << "PrioritizeLoad(): request=" << ThriftDebugString(req);
|
||||
Status status = catalog_server_->catalog()->PrioritizeLoad(req);
|
||||
if (!status.ok()) LOG(ERROR) << status.GetErrorMsg();
|
||||
if (!status.ok()) LOG(ERROR) << status.GetDetail();
|
||||
TStatus thrift_status;
|
||||
status.ToThrift(&thrift_status);
|
||||
resp.__set_status(thrift_status);
|
||||
@@ -132,7 +132,7 @@ class CatalogServiceThriftIf : public CatalogServiceIf {
|
||||
const TSentryAdminCheckRequest& req) {
|
||||
VLOG_RPC << "SentryAdminCheck(): request=" << ThriftDebugString(req);
|
||||
Status status = catalog_server_->catalog()->SentryAdminCheck(req);
|
||||
if (!status.ok()) LOG(ERROR) << status.GetErrorMsg();
|
||||
if (!status.ok()) LOG(ERROR) << status.GetDetail();
|
||||
TStatus thrift_status;
|
||||
status.ToThrift(&thrift_status);
|
||||
resp.__set_status(thrift_status);
|
||||
@@ -176,7 +176,7 @@ Status CatalogServer::Start() {
|
||||
bind<void>(mem_fn(&CatalogServer::UpdateCatalogTopicCallback), this, _1, _2);
|
||||
Status status = statestore_subscriber_->AddTopic(IMPALA_CATALOG_TOPIC, false, cb);
|
||||
if (!status.ok()) {
|
||||
status.AddErrorMsg("CatalogService failed to start");
|
||||
status.AddDetail("CatalogService failed to start");
|
||||
return status;
|
||||
}
|
||||
RETURN_IF_ERROR(statestore_subscriber_->Start());
|
||||
@@ -268,7 +268,7 @@ void CatalogServer::GatherCatalogUpdatesThread() {
|
||||
long current_catalog_version;
|
||||
Status status = catalog_->GetCatalogVersion(¤t_catalog_version);
|
||||
if (!status.ok()) {
|
||||
LOG(ERROR) << status.GetErrorMsg();
|
||||
LOG(ERROR) << status.GetDetail();
|
||||
} else if (current_catalog_version != last_sent_catalog_version_) {
|
||||
// If there has been a change since the last time the catalog was queried,
|
||||
// call into the Catalog to find out what has changed.
|
||||
@@ -276,7 +276,7 @@ void CatalogServer::GatherCatalogUpdatesThread() {
|
||||
status = catalog_->GetAllCatalogObjects(last_sent_catalog_version_,
|
||||
&catalog_objects);
|
||||
if (!status.ok()) {
|
||||
LOG(ERROR) << status.GetErrorMsg();
|
||||
LOG(ERROR) << status.GetDetail();
|
||||
} else {
|
||||
// Use the catalog objects to build a topic update list.
|
||||
BuildTopicUpdates(catalog_objects.objects);
|
||||
@@ -318,7 +318,7 @@ void CatalogServer::BuildTopicUpdates(const vector<TCatalogObject>& catalog_obje
|
||||
item.key = entry_key;
|
||||
Status status = thrift_serializer_.Serialize(&catalog_object, &item.value);
|
||||
if (!status.ok()) {
|
||||
LOG(ERROR) << "Error serializing topic value: " << status.GetErrorMsg();
|
||||
LOG(ERROR) << "Error serializing topic value: " << status.GetDetail();
|
||||
pending_topic_updates_.pop_back();
|
||||
}
|
||||
}
|
||||
@@ -340,7 +340,7 @@ void CatalogServer::CatalogUrlCallback(const Webserver::ArgumentMap& args,
|
||||
TGetDbsResult get_dbs_result;
|
||||
Status status = catalog_->GetDbNames(NULL, &get_dbs_result);
|
||||
if (!status.ok()) {
|
||||
Value error(status.GetErrorMsg().c_str(), document->GetAllocator());
|
||||
Value error(status.GetDetail().c_str(), document->GetAllocator());
|
||||
document->AddMember("error", error, document->GetAllocator());
|
||||
return;
|
||||
}
|
||||
@@ -353,7 +353,7 @@ void CatalogServer::CatalogUrlCallback(const Webserver::ArgumentMap& args,
|
||||
TGetTablesResult get_table_results;
|
||||
Status status = catalog_->GetTableNames(db, NULL, &get_table_results);
|
||||
if (!status.ok()) {
|
||||
Value error(status.GetErrorMsg().c_str(), document->GetAllocator());
|
||||
Value error(status.GetDetail().c_str(), document->GetAllocator());
|
||||
database.AddMember("error", error, document->GetAllocator());
|
||||
continue;
|
||||
}
|
||||
@@ -394,7 +394,7 @@ void CatalogServer::CatalogObjectsUrlCallback(const Webserver::ArgumentMap& args
|
||||
Value debug_string(ThriftDebugString(result).c_str(), document->GetAllocator());
|
||||
document->AddMember("thrift_string", debug_string, document->GetAllocator());
|
||||
} else {
|
||||
Value error(status.GetErrorMsg().c_str(), document->GetAllocator());
|
||||
Value error(status.GetDetail().c_str(), document->GetAllocator());
|
||||
document->AddMember("error", error, document->GetAllocator());
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include <boost/foreach.hpp>
|
||||
#include <boost/algorithm/string/join.hpp>
|
||||
|
||||
#include "common/status.h"
|
||||
@@ -27,110 +28,165 @@ namespace impala {
|
||||
// glog functions which also rely on static initializations.
|
||||
// TODO: is there a more controlled way to do this.
|
||||
const Status Status::OK;
|
||||
const Status Status::CANCELLED(TStatusCode::CANCELLED, "Cancelled", true);
|
||||
|
||||
const Status Status::CANCELLED(ErrorMsg::Init(TErrorCode::CANCELLED, "Cancelled"));
|
||||
|
||||
const Status Status::MEM_LIMIT_EXCEEDED(
|
||||
TStatusCode::MEM_LIMIT_EXCEEDED, "Memory limit exceeded", true);
|
||||
const Status Status::DEPRECATED_RPC(TStatusCode::NOT_IMPLEMENTED_ERROR,
|
||||
"Deprecated RPC; please update your client", true);
|
||||
ErrorMsg::Init(TErrorCode::MEM_LIMIT_EXCEEDED, "Memory limit exceeded"));
|
||||
|
||||
Status::ErrorDetail::ErrorDetail(const TStatus& status)
|
||||
: error_code(status.status_code),
|
||||
error_msgs(status.error_msgs) {
|
||||
DCHECK_NE(error_code, TStatusCode::OK);
|
||||
const Status Status::DEPRECATED_RPC(ErrorMsg::Init(TErrorCode::NOT_IMPLEMENTED_ERROR,
|
||||
"Deprecated RPC; please update your client"));
|
||||
|
||||
Status::Status(TErrorCode::type code)
|
||||
: msg_(new ErrorMsg(code)) {
|
||||
VLOG(1) << msg_->msg() << endl << GetStackTrace();
|
||||
}
|
||||
|
||||
Status::Status(const string& error_msg, bool quiet)
|
||||
: error_detail_(new ErrorDetail(TStatusCode::INTERNAL_ERROR, error_msg)) {
|
||||
if (!quiet) VLOG(1) << error_msg << endl << GetStackTrace();
|
||||
Status::Status(TErrorCode::type code, const ArgType& arg0)
|
||||
: msg_(new ErrorMsg(code, arg0)) {
|
||||
VLOG(1) << msg_->msg() << endl << GetStackTrace();
|
||||
}
|
||||
|
||||
Status::Status(TErrorCode::type code, const ArgType& arg0, const ArgType& arg1)
|
||||
: msg_(new ErrorMsg(code, arg0, arg1)) {
|
||||
VLOG(1) << msg_->msg() << endl << GetStackTrace();
|
||||
}
|
||||
|
||||
Status::Status(TErrorCode::type code, const ArgType& arg0, const ArgType& arg1,
|
||||
const ArgType& arg2)
|
||||
: msg_(new ErrorMsg(code, arg0, arg1, arg2)) {
|
||||
VLOG(1) << msg_->msg() << endl << GetStackTrace();
|
||||
}
|
||||
Status::Status(TErrorCode::type code, const ArgType& arg0, const ArgType& arg1,
|
||||
const ArgType& arg2, const ArgType& arg3)
|
||||
: msg_(new ErrorMsg(code, arg0, arg1, arg2, arg3)) {
|
||||
VLOG(1) << msg_->msg() << endl << GetStackTrace();
|
||||
}
|
||||
|
||||
Status::Status(TErrorCode::type code, const ArgType& arg0, const ArgType& arg1,
|
||||
const ArgType& arg2, const ArgType& arg3, const ArgType& arg4)
|
||||
: msg_(new ErrorMsg(code, arg0, arg1, arg2, arg3, arg4)) {
|
||||
VLOG(1) << msg_->msg() << endl << GetStackTrace();
|
||||
}
|
||||
|
||||
Status::Status(TErrorCode::type code, const ArgType& arg0, const ArgType& arg1,
|
||||
const ArgType& arg2, const ArgType& arg3, const ArgType& arg4,
|
||||
const ArgType& arg5)
|
||||
: msg_(new ErrorMsg(code, arg0, arg1, arg2, arg3, arg4, arg5)) {
|
||||
VLOG(1) << msg_->msg() << endl << GetStackTrace();
|
||||
}
|
||||
|
||||
Status::Status(TErrorCode::type code, const ArgType& arg0, const ArgType& arg1,
|
||||
const ArgType& arg2, const ArgType& arg3, const ArgType& arg4,
|
||||
const ArgType& arg5, const ArgType& arg6)
|
||||
: msg_(new ErrorMsg(code, arg0, arg1, arg2, arg3, arg4, arg5, arg6)) {
|
||||
VLOG(1) << msg_->msg() << endl << GetStackTrace();
|
||||
}
|
||||
|
||||
Status::Status(TErrorCode::type code, const ArgType& arg0, const ArgType& arg1,
|
||||
const ArgType& arg2, const ArgType& arg3, const ArgType& arg4,
|
||||
const ArgType& arg5, const ArgType& arg6, const ArgType& arg7)
|
||||
: msg_(new ErrorMsg(code, arg0, arg1, arg2, arg3, arg4, arg5, arg6,
|
||||
arg7)) {
|
||||
VLOG(1) << msg_->msg() << endl << GetStackTrace();
|
||||
}
|
||||
|
||||
Status::Status(TErrorCode::type code, const ArgType& arg0, const ArgType& arg1,
|
||||
const ArgType& arg2, const ArgType& arg3, const ArgType& arg4,
|
||||
const ArgType& arg5, const ArgType& arg6, const ArgType& arg7,
|
||||
const ArgType& arg8)
|
||||
: msg_(new ErrorMsg(code, arg0, arg1, arg2, arg3, arg4, arg5, arg6,
|
||||
arg7, arg8)) {
|
||||
VLOG(1) << msg_->msg() << endl << GetStackTrace();
|
||||
}
|
||||
|
||||
Status::Status(TErrorCode::type code, const ArgType& arg0, const ArgType& arg1,
|
||||
const ArgType& arg2, const ArgType& arg3, const ArgType& arg4,
|
||||
const ArgType& arg5, const ArgType& arg6, const ArgType& arg7,
|
||||
const ArgType& arg8, const ArgType& arg9)
|
||||
: msg_(new ErrorMsg(code, arg0, arg1, arg2, arg3, arg4, arg5, arg6,
|
||||
arg7, arg8, arg9)) {
|
||||
VLOG(1) << msg_->msg() << endl << GetStackTrace();
|
||||
}
|
||||
|
||||
|
||||
Status::Status(const string& error_msg)
|
||||
: msg_(new ErrorMsg(TErrorCode::GENERAL, error_msg)) {
|
||||
VLOG(1) << error_msg << endl << GetStackTrace();
|
||||
}
|
||||
|
||||
Status::Status(const ErrorMsg& message)
|
||||
: msg_(new ErrorMsg(message)) { }
|
||||
|
||||
Status::Status(const TStatus& status)
|
||||
: error_detail_(
|
||||
status.status_code == TStatusCode::OK
|
||||
? NULL
|
||||
: new ErrorDetail(status)) {
|
||||
}
|
||||
: msg_(status.status_code == TErrorCode::OK
|
||||
? NULL : new ErrorMsg(status.status_code, status.error_msgs)) { }
|
||||
|
||||
Status& Status::operator=(const TStatus& status) {
|
||||
delete error_detail_;
|
||||
if (status.status_code == TStatusCode::OK) {
|
||||
error_detail_ = NULL;
|
||||
delete msg_;
|
||||
if (status.status_code == TErrorCode::OK) {
|
||||
msg_ = NULL;
|
||||
} else {
|
||||
error_detail_ = new ErrorDetail(status);
|
||||
msg_ = new ErrorMsg(status.status_code, status.error_msgs);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
Status::Status(const apache::hive::service::cli::thrift::TStatus& hs2_status)
|
||||
: error_detail_(
|
||||
: msg_(
|
||||
hs2_status.statusCode
|
||||
== apache::hive::service::cli::thrift::TStatusCode::SUCCESS_STATUS ? NULL
|
||||
: new ErrorDetail(
|
||||
static_cast<TStatusCode::type>(hs2_status.statusCode),
|
||||
: new ErrorMsg(
|
||||
static_cast<TErrorCode::type>(hs2_status.statusCode),
|
||||
hs2_status.errorMessage)) {
|
||||
}
|
||||
|
||||
Status& Status::operator=(
|
||||
const apache::hive::service::cli::thrift::TStatus& hs2_status) {
|
||||
delete error_detail_;
|
||||
delete msg_;
|
||||
if (hs2_status.statusCode
|
||||
== apache::hive::service::cli::thrift::TStatusCode::SUCCESS_STATUS) {
|
||||
error_detail_ = NULL;
|
||||
msg_ = NULL;
|
||||
} else {
|
||||
error_detail_ = new ErrorDetail(
|
||||
static_cast<TStatusCode::type>(hs2_status.statusCode), hs2_status.errorMessage);
|
||||
msg_ = new ErrorMsg(
|
||||
static_cast<TErrorCode::type>(hs2_status.statusCode), hs2_status.errorMessage);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
void Status::AddErrorMsg(TStatusCode::type code, const std::string& msg) {
|
||||
if (error_detail_ == NULL) {
|
||||
error_detail_ = new ErrorDetail(code, msg);
|
||||
} else {
|
||||
error_detail_->error_msgs.push_back(msg);
|
||||
}
|
||||
void Status::AddDetail(const std::string& msg) {
|
||||
DCHECK_NOTNULL(msg_);
|
||||
msg_->AddDetail(msg);
|
||||
VLOG(2) << msg;
|
||||
}
|
||||
|
||||
void Status::AddErrorMsg(const std::string& msg) {
|
||||
AddErrorMsg(TStatusCode::INTERNAL_ERROR, msg);
|
||||
}
|
||||
|
||||
void Status::AddError(const Status& status) {
|
||||
void Status::MergeStatus(const Status& status) {
|
||||
if (status.ok()) return;
|
||||
AddErrorMsg(status.code(), status.GetErrorMsg());
|
||||
}
|
||||
|
||||
void Status::GetErrorMsgs(vector<string>* msgs) const {
|
||||
msgs->clear();
|
||||
if (error_detail_ != NULL) {
|
||||
*msgs = error_detail_->error_msgs;
|
||||
if (msg_ == NULL) {
|
||||
msg_ = new ErrorMsg(*status.msg_);
|
||||
} else {
|
||||
msg_->AddDetail(status.msg().msg());
|
||||
BOOST_FOREACH(const string& s, status.msg_->details()) {
|
||||
msg_->AddDetail(s);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Status::GetErrorMsg(string* msg) const {
|
||||
msg->clear();
|
||||
if (error_detail_ != NULL) {
|
||||
*msg = join(error_detail_->error_msgs, "\n");
|
||||
}
|
||||
}
|
||||
|
||||
string Status::GetErrorMsg() const {
|
||||
string msg;
|
||||
GetErrorMsg(&msg);
|
||||
return msg;
|
||||
const string Status::GetDetail() const {
|
||||
return msg_ != NULL ? msg_->GetFullMessageDetails() : "";
|
||||
}
|
||||
|
||||
void Status::ToThrift(TStatus* status) const {
|
||||
status->error_msgs.clear();
|
||||
if (error_detail_ == NULL) {
|
||||
status->status_code = TStatusCode::OK;
|
||||
if (msg_ == NULL) {
|
||||
status->status_code = TErrorCode::OK;
|
||||
} else {
|
||||
status->status_code = error_detail_->error_code;
|
||||
for (int i = 0; i < error_detail_->error_msgs.size(); ++i) {
|
||||
status->error_msgs.push_back(error_detail_->error_msgs[i]);
|
||||
status->status_code = msg_->error();
|
||||
status->error_msgs.push_back(msg_->msg());
|
||||
BOOST_FOREACH(const string& s, msg_->details()) {
|
||||
status->error_msgs.push_back(s);
|
||||
}
|
||||
status->__isset.error_msgs = !error_detail_->error_msgs.empty();
|
||||
status->__isset.error_msgs = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -19,24 +19,60 @@
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include <boost/lexical_cast.hpp>
|
||||
|
||||
#include "common/logging.h"
|
||||
#include "common/compiler-util.h"
|
||||
#include "gen-cpp/Status_types.h" // for TStatus
|
||||
#include "gen-cpp/ErrorCodes_types.h" // for TErrorCode
|
||||
#include "gen-cpp/TCLIService_types.h" // for HS2 TStatus
|
||||
#include "util/error-util.h" // for ErrorMessage
|
||||
|
||||
#define STATUS_API_VERSION 2
|
||||
|
||||
namespace impala {
|
||||
|
||||
// Status is used as a function return type to indicate success, failure or cancellation
|
||||
// of the function. In case of successful completion, it only occupies sizeof(void*)
|
||||
// statically allocated memory. In the error case, it records a stack of error messages.
|
||||
// statically allocated memory and therefore no more members should be added to this
|
||||
// class.
|
||||
//
|
||||
// example:
|
||||
// A Status may either be OK (represented by the singleton Status::OK), or it may
|
||||
// represent an error condition. In the latter case, a Status has both an error code,
|
||||
// which belongs to the TErrorCode enum, and an error string, which may be presented to
|
||||
// clients or logged to disk.
|
||||
//
|
||||
// An error Status may also have one or more optional 'detail' strings which provide
|
||||
// further context. These strings are intended for internal consumption only - and
|
||||
// therefore will not be sent to clients.
|
||||
//
|
||||
// The state associated with an error Status is encapsulated in an ErrorMsg instance to
|
||||
// ensure that the size of Status is kept very small, as it is passed around on the stack
|
||||
// as a return value. See ErrorMsg for more details on how error strings are constructed.
|
||||
//
|
||||
// Example Usage:
|
||||
// Status fnB(int x) {
|
||||
//
|
||||
// // Status as return value
|
||||
// Status status = fnA(x);
|
||||
// if (!status.ok()) {
|
||||
// status.AddErrorMsg("fnA(x) went wrong");
|
||||
// status.AddDetail("fnA(x) went wrong");
|
||||
// return status;
|
||||
// }
|
||||
//
|
||||
// int r = Read(fid);
|
||||
// // Attaches an ErrorMsg with type GENERAL to the status
|
||||
// if (r == -1) return Status("String Constructor");
|
||||
//
|
||||
// int x = MoreRead(x);
|
||||
// if (x == 4711) {
|
||||
// // Specific error messages with one param
|
||||
// Status s = Status(ERROR_4711_HAS_NO_BLOCKS, x);
|
||||
// // Optional detail
|
||||
// s.AddDetail("rotation-disk-broken due to weather");
|
||||
// }
|
||||
//
|
||||
// return Status::OK;
|
||||
// }
|
||||
//
|
||||
// TODO: macros:
|
||||
@@ -45,7 +81,9 @@ namespace impala {
|
||||
|
||||
class Status {
|
||||
public:
|
||||
Status(): error_detail_(NULL) {}
|
||||
typedef strings::internal::SubstituteArg ArgType;
|
||||
|
||||
Status(): msg_(NULL) {}
|
||||
|
||||
static const Status OK;
|
||||
static const Status CANCELLED;
|
||||
@@ -54,91 +92,122 @@ class Status {
|
||||
|
||||
// copy c'tor makes copy of error detail so Status can be returned by value
|
||||
Status(const Status& status)
|
||||
: error_detail_(
|
||||
status.error_detail_ != NULL
|
||||
? new ErrorDetail(*status.error_detail_)
|
||||
: NULL) {
|
||||
}
|
||||
: msg_(status.msg_ != NULL ? new ErrorMsg(*status.msg_) : NULL) { }
|
||||
|
||||
// c'tor for error case - is this useful for anything other than CANCELLED?
|
||||
Status(TStatusCode::type code)
|
||||
: error_detail_(new ErrorDetail(code)) {
|
||||
}
|
||||
// Status using only the error code as a parameter. This can be used for error messages
|
||||
// that don't take format parameters.
|
||||
Status(TErrorCode::type code);
|
||||
|
||||
// c'tor for error case
|
||||
Status(TStatusCode::type code, const std::string& error_msg, bool quiet=false)
|
||||
: error_detail_(new ErrorDetail(code, error_msg)) {
|
||||
if (!quiet) VLOG(2) << error_msg;
|
||||
}
|
||||
// These constructors are used if the caller wants to indicate a non-successful
|
||||
// execution and supply a client-facing error message. This is the preferred way of
|
||||
// instantiating a non-successful Status.
|
||||
Status(TErrorCode::type error, const ArgType& arg0);
|
||||
Status(TErrorCode::type error, const ArgType& arg0, const ArgType& arg1);
|
||||
Status(TErrorCode::type error, const ArgType& arg0, const ArgType& arg1,
|
||||
const ArgType& arg2);
|
||||
Status(TErrorCode::type error, const ArgType& arg0, const ArgType& arg1,
|
||||
const ArgType& arg2, const ArgType& arg3);
|
||||
Status(TErrorCode::type error, const ArgType& arg0, const ArgType& arg1,
|
||||
const ArgType& arg2, const ArgType& arg3, const ArgType& arg4);
|
||||
Status(TErrorCode::type error, const ArgType& arg0, const ArgType& arg1,
|
||||
const ArgType& arg2, const ArgType& arg3, const ArgType& arg4,
|
||||
const ArgType& arg5);
|
||||
Status(TErrorCode::type error, const ArgType& arg0, const ArgType& arg1,
|
||||
const ArgType& arg2, const ArgType& arg3, const ArgType& arg4,
|
||||
const ArgType& arg5, const ArgType& arg6);
|
||||
Status(TErrorCode::type error, const ArgType& arg0, const ArgType& arg1,
|
||||
const ArgType& arg2, const ArgType& arg3, const ArgType& arg4,
|
||||
const ArgType& arg5, const ArgType& arg6, const ArgType& arg7);
|
||||
Status(TErrorCode::type error, const ArgType& arg0, const ArgType& arg1,
|
||||
const ArgType& arg2, const ArgType& arg3, const ArgType& arg4,
|
||||
const ArgType& arg5, const ArgType& arg6, const ArgType& arg7,
|
||||
const ArgType& arg8);
|
||||
Status(TErrorCode::type error, const ArgType& arg0, const ArgType& arg1,
|
||||
const ArgType& arg2, const ArgType& arg3, const ArgType& arg4,
|
||||
const ArgType& arg5, const ArgType& arg6, const ArgType& arg7,
|
||||
const ArgType& arg8, const ArgType& arg9);
|
||||
|
||||
// c'tor for internal error
|
||||
Status(const std::string& error_msg, bool quiet=false);
|
||||
// Used when the ErrorMsg is created as an intermediate value that is either passed to
|
||||
// the Status or to the RuntimeState.
|
||||
Status(const ErrorMsg& e);
|
||||
|
||||
// This constructor creates a Status with a default error code of GENERAL and is not
|
||||
// intended for statuses that might be client-visible.
|
||||
// TODO: deprecate
|
||||
Status(const std::string& error_msg);
|
||||
|
||||
~Status() {
|
||||
if (error_detail_ != NULL) delete error_detail_;
|
||||
if (msg_ != NULL) delete msg_;
|
||||
}
|
||||
|
||||
// same as copy c'tor
|
||||
Status& operator=(const Status& status) {
|
||||
delete error_detail_;
|
||||
if (LIKELY(status.error_detail_ == NULL)) {
|
||||
error_detail_ = NULL;
|
||||
delete msg_;
|
||||
if (LIKELY(status.msg_ == NULL)) {
|
||||
msg_ = NULL;
|
||||
} else {
|
||||
error_detail_ = new ErrorDetail(*status.error_detail_);
|
||||
msg_ = new ErrorMsg(*status.msg_);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
// "Copy" c'tor from TStatus.
|
||||
// Retains the TErrorCode value and the message
|
||||
Status(const TStatus& status);
|
||||
|
||||
// same as previous c'tor
|
||||
// Retains the TErrorCode value and the message
|
||||
Status& operator=(const TStatus& status);
|
||||
|
||||
// "Copy c'tor from HS2 TStatus.
|
||||
// Retains the TErrorCode value and the message
|
||||
Status(const apache::hive::service::cli::thrift::TStatus& hs2_status);
|
||||
|
||||
// same as previous c'tor
|
||||
// Retains the TErrorCode value and the message
|
||||
Status& operator=(const apache::hive::service::cli::thrift::TStatus& hs2_status);
|
||||
|
||||
// assign from stringstream
|
||||
Status& operator=(const std::stringstream& stream);
|
||||
|
||||
bool ok() const { return error_detail_ == NULL; }
|
||||
bool ok() const { return msg_ == NULL; }
|
||||
|
||||
bool IsCancelled() const {
|
||||
return error_detail_ != NULL
|
||||
&& error_detail_->error_code == TStatusCode::CANCELLED;
|
||||
return msg_ != NULL && msg_->error() == TErrorCode::CANCELLED;
|
||||
}
|
||||
|
||||
bool IsMemLimitExceeded() const {
|
||||
return error_detail_ != NULL
|
||||
&& error_detail_->error_code == TStatusCode::MEM_LIMIT_EXCEEDED;
|
||||
return msg_ != NULL
|
||||
&& msg_->error() == TErrorCode::MEM_LIMIT_EXCEEDED;
|
||||
}
|
||||
|
||||
bool IsRecoverableError() const {
|
||||
return error_detail_ != NULL
|
||||
&& error_detail_->error_code == TStatusCode::RECOVERABLE_ERROR;
|
||||
return msg_ != NULL
|
||||
&& msg_->error() == TErrorCode::RECOVERABLE_ERROR;
|
||||
}
|
||||
|
||||
// Add an error message and set the code if no code has been set yet.
|
||||
// If a code has already been set, 'code' is ignored.
|
||||
void AddErrorMsg(TStatusCode::type code, const std::string& msg);
|
||||
// Returns the error message associated with a non-successful status.
|
||||
const ErrorMsg& msg() const {
|
||||
DCHECK_NOTNULL(msg_);
|
||||
return *msg_;
|
||||
}
|
||||
|
||||
// Add an error message and set the code to INTERNAL_ERROR if no code has been
|
||||
// set yet. If a code has already been set, it is left unchanged.
|
||||
void AddErrorMsg(const std::string& msg);
|
||||
// Sets the ErrorMessage on the detail of the status. Calling this method is only valid
|
||||
// if an error was reported.
|
||||
// TODO: deprecate, error should be immutable
|
||||
void SetErrorMsg(const ErrorMsg& m) {
|
||||
DCHECK_NOTNULL(msg_);
|
||||
delete msg_;
|
||||
msg_ = new ErrorMsg(m);
|
||||
}
|
||||
|
||||
// Add a detail string. Calling this method is only defined on a non-OK message
|
||||
void AddDetail(const std::string& msg);
|
||||
|
||||
// Does nothing if status.ok().
|
||||
// Otherwise: if 'this' is an error status, adds the error msg from 'status;
|
||||
// Otherwise: if 'this' is an error status, adds the error msg from 'status';
|
||||
// otherwise assigns 'status'.
|
||||
void AddError(const Status& status);
|
||||
void MergeStatus(const Status& status);
|
||||
|
||||
// Return all accumulated error msgs.
|
||||
void GetErrorMsgs(std::vector<std::string>* msgs) const;
|
||||
|
||||
// Convert into TStatus. Call this if 'status_container' contains an optional
|
||||
// TStatus field named 'status'. This also sets __isset.status.
|
||||
// Convert into TStatus. Call this if 'status_container' contains an optional TStatus
|
||||
// field named 'status'. This also sets status_container->__isset.status.
|
||||
template <typename T> void SetTStatus(T* status_container) const {
|
||||
ToThrift(&status_container->status);
|
||||
status_container->__isset.status = true;
|
||||
@@ -147,28 +216,20 @@ class Status {
|
||||
// Convert into TStatus.
|
||||
void ToThrift(TStatus* status) const;
|
||||
|
||||
// Return all accumulated error msgs in a single string.
|
||||
void GetErrorMsg(std::string* msg) const;
|
||||
// Returns the formatted message of the error message and the individual details of the
|
||||
// additional messages as a single string. This should only be called internally and not
|
||||
// to report an error back to the client.
|
||||
const std::string GetDetail() const;
|
||||
|
||||
std::string GetErrorMsg() const;
|
||||
|
||||
TStatusCode::type code() const {
|
||||
return error_detail_ == NULL ? TStatusCode::OK : error_detail_->error_code;
|
||||
TErrorCode::type code() const {
|
||||
return msg_ == NULL ? TErrorCode::OK : msg_->error();
|
||||
}
|
||||
|
||||
private:
|
||||
struct ErrorDetail {
|
||||
TStatusCode::type error_code; // anything other than OK
|
||||
std::vector<std::string> error_msgs;
|
||||
|
||||
ErrorDetail(const TStatus& status);
|
||||
ErrorDetail(TStatusCode::type code)
|
||||
: error_code(code) {}
|
||||
ErrorDetail(TStatusCode::type code, const std::string& msg)
|
||||
: error_code(code), error_msgs(1, msg) {}
|
||||
};
|
||||
|
||||
ErrorDetail* error_detail_;
|
||||
// Status uses a naked pointer to ensure the size of an instance on the stack is only
|
||||
// the sizeof(ErrorMsg*). Every Status owns its ErrorMsg instance.
|
||||
ErrorMsg* msg_;
|
||||
};
|
||||
|
||||
// some generally useful macros
|
||||
@@ -182,9 +243,7 @@ class Status {
|
||||
do { \
|
||||
Status __status__ = (stmt); \
|
||||
if (UNLIKELY(!__status__.ok())) { \
|
||||
std::string msg; \
|
||||
__status__.GetErrorMsg(&msg); \
|
||||
EXIT_WITH_ERROR(msg); \
|
||||
EXIT_WITH_ERROR(__status__.GetDetail()); \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
|
||||
@@ -509,7 +509,7 @@ llvm::Function* AggregationNode::CodegenUpdateSlot(
|
||||
Function* agg_expr_fn;
|
||||
Status status = input_expr->GetCodegendComputeFn(state, &agg_expr_fn);
|
||||
if (!status.ok()) {
|
||||
VLOG_QUERY << "Could not codegen UpdateSlot(): " << status.GetErrorMsg();
|
||||
VLOG_QUERY << "Could not codegen UpdateSlot(): " << status.GetDetail();
|
||||
return NULL;
|
||||
}
|
||||
DCHECK(agg_expr_fn != NULL);
|
||||
|
||||
@@ -107,7 +107,7 @@ Status BaseSequenceScanner::ProcessSplit() {
|
||||
Status status = ReadFileHeader();
|
||||
if (!status.ok()) {
|
||||
if (state_->abort_on_error()) return status;
|
||||
state_->LogError(status);
|
||||
state_->LogError(status.msg());
|
||||
// We need to complete the ranges for this file.
|
||||
CloseFileRanges(stream_->filename());
|
||||
return Status::OK;
|
||||
@@ -144,15 +144,12 @@ Status BaseSequenceScanner::ProcessSplit() {
|
||||
if (status.IsCancelled() || status.IsMemLimitExceeded()) return status;
|
||||
|
||||
// Log error from file format parsing.
|
||||
stringstream ss;
|
||||
ss << "Problem parsing file " << stream_->filename() << " at ";
|
||||
if (stream_->eof()) {
|
||||
ss << "end of file";
|
||||
} else {
|
||||
ss << "offset " << stream_->file_offset();
|
||||
}
|
||||
ss << ": " << status.GetErrorMsg();
|
||||
state_->LogError(ss.str());
|
||||
state_->LogError(ErrorMsg(TErrorCode::SEQUENCE_SCANNER_PARSE_ERROR,
|
||||
stream_->filename(), stream_->file_offset(),
|
||||
(stream_->eof() ? "(EOF)" : "")));
|
||||
|
||||
// Make sure errors specified in the status are logged as well
|
||||
state_->LogError(status.msg());
|
||||
|
||||
// If abort on error then return, otherwise try to recover.
|
||||
if (state_->abort_on_error()) return status;
|
||||
|
||||
@@ -350,7 +350,7 @@ void DataSourceScanNode::Close(RuntimeState* state) {
|
||||
params.__set_scan_handle(scan_handle_);
|
||||
TCloseResult result;
|
||||
Status status = data_source_executor_->Close(params, &result);
|
||||
state->LogError(status); // logs the error if status != OK
|
||||
if (!status.ok()) state->LogError(status.msg());
|
||||
ExecNode::Close(state);
|
||||
}
|
||||
|
||||
|
||||
@@ -371,7 +371,9 @@ void ExecNode::InitRuntimeProfile(const string& name) {
|
||||
Status ExecNode::ExecDebugAction(TExecNodePhase::type phase, RuntimeState* state) {
|
||||
DCHECK(phase != TExecNodePhase::INVALID);
|
||||
if (debug_phase_ != phase) return Status::OK;
|
||||
if (debug_action_ == TDebugAction::FAIL) return Status(TStatusCode::INTERNAL_ERROR);
|
||||
if (debug_action_ == TDebugAction::FAIL) {
|
||||
return Status(TErrorCode::INTERNAL_ERROR, "Debug Action: FAIL");
|
||||
}
|
||||
if (debug_action_ == TDebugAction::WAIT) {
|
||||
while (!state->is_cancelled()) {
|
||||
sleep(1);
|
||||
@@ -447,7 +449,7 @@ Function* ExecNode::CodegenEvalConjuncts(
|
||||
Status status =
|
||||
conjunct_ctxs[i]->root()->GetCodegendComputeFn(state, &conjunct_fns[i]);
|
||||
if (!status.ok()) {
|
||||
VLOG_QUERY << "Could not codegen EvalConjuncts: " << status.GetErrorMsg();
|
||||
VLOG_QUERY << "Could not codegen EvalConjuncts: " << status.GetDetail();
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -115,11 +115,11 @@ Status ExternalDataSourceExecutor::Close(const TCloseParams& params,
|
||||
JNIEnv* env = getJNIEnv();
|
||||
if (executor_ != NULL) {
|
||||
env->DeleteGlobalRef(executor_);
|
||||
status.AddError(JniUtil::GetJniExceptionMsg(env)); // no-op if Status == OK
|
||||
status.MergeStatus(JniUtil::GetJniExceptionMsg(env)); // no-op if Status == OK
|
||||
}
|
||||
if (executor_class_ != NULL) {
|
||||
env->DeleteGlobalRef(executor_class_);
|
||||
status.AddError(JniUtil::GetJniExceptionMsg(env));
|
||||
status.MergeStatus(JniUtil::GetJniExceptionMsg(env));
|
||||
}
|
||||
is_initialized_ = false;
|
||||
return status;
|
||||
|
||||
@@ -512,7 +512,7 @@ Function* HashTableCtx::CodegenEvalRow(RuntimeState* state, bool build) {
|
||||
Function* expr_fn;
|
||||
Status status = ctxs[i]->root()->GetCodegendComputeFn(state, &expr_fn);
|
||||
if (!status.ok()) {
|
||||
VLOG_QUERY << "Problem with CodegenEvalRow: " << status.GetErrorMsg();
|
||||
VLOG_QUERY << "Problem with CodegenEvalRow: " << status.GetDetail();
|
||||
fn->eraseFromParent(); // deletes function
|
||||
return NULL;
|
||||
}
|
||||
@@ -800,7 +800,7 @@ Function* HashTableCtx::CodegenEquals(RuntimeState* state) {
|
||||
Function* expr_fn;
|
||||
Status status = build_expr_ctxs_[i]->root()->GetCodegendComputeFn(state, &expr_fn);
|
||||
if (!status.ok()) {
|
||||
VLOG_QUERY << "Problem with CodegenEquals: " << status.GetErrorMsg();
|
||||
VLOG_QUERY << "Problem with CodegenEquals: " << status.GetDetail();
|
||||
fn->eraseFromParent(); // deletes function
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -140,7 +140,7 @@ void HBaseScanNode::WriteTextSlot(
|
||||
<< ":" << qualifier << ": "
|
||||
<< "'" << string(reinterpret_cast<char*>(value), value_length) << "' TO "
|
||||
<< slot->type();
|
||||
state->LogError(ss.str());
|
||||
state->LogError(ErrorMsg(TErrorCode::GENERAL, ss.str()));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -244,7 +244,7 @@ Status HBaseScanNode::GetNext(RuntimeState* state, RowBatch* row_batch, bool* eo
|
||||
int key_length;
|
||||
hbase_scanner_->GetRowKey(env, &key, &key_length);
|
||||
ss << "row key: " << string(reinterpret_cast<const char*>(key), key_length);
|
||||
state->LogError(ss.str());
|
||||
state->LogError(ErrorMsg(TErrorCode::GENERAL, ss.str()));
|
||||
}
|
||||
if (state->abort_on_error()) {
|
||||
state->ReportFileErrors(table_name_, 1);
|
||||
|
||||
@@ -274,8 +274,8 @@ void HBaseTableWriter::Close(RuntimeState* state) {
|
||||
Status status = CleanUpJni();
|
||||
if (!status.ok()) {
|
||||
stringstream ss;
|
||||
ss << "HBaseTableWriter::Close ran into an issue: " << status.GetErrorMsg();
|
||||
state->LogError(ss.str());
|
||||
ss << "HBaseTableWriter::Close ran into an issue: " << status.GetDetail();
|
||||
state->LogError(ErrorMsg(TErrorCode::GENERAL, ss.str()));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -239,7 +239,7 @@ Status HdfsAvroScanner::ResolveSchemas(const avro_schema_t& table_schema,
|
||||
stringstream ss;
|
||||
ss << "The table has " << num_cols << " non-partition columns "
|
||||
<< "but the table's Avro schema has " << num_table_fields << " fields.";
|
||||
state_->LogError(ss.str());
|
||||
state_->LogError(ErrorMsg(TErrorCode::GENERAL, ss.str()));
|
||||
}
|
||||
if (num_table_fields <= max_materialized_col_idx) {
|
||||
return Status("Cannot read column that doesn't appear in table schema");
|
||||
@@ -706,7 +706,7 @@ Function* HdfsAvroScanner::CodegenMaterializeTuple(HdfsScanNode* node,
|
||||
if (error != 0) {
|
||||
stringstream ss;
|
||||
ss << "Failed to parse table schema: " << avro_strerror();
|
||||
node->runtime_state()->LogError(ss.str());
|
||||
node->runtime_state()->LogError(ErrorMsg(TErrorCode::GENERAL, ss.str()));
|
||||
return NULL;
|
||||
}
|
||||
int num_fields = avro_schema_record_size(table_schema.get());
|
||||
|
||||
@@ -70,7 +70,7 @@ Status HdfsLzoTextScanner::IssueInitialRanges(HdfsScanNode* scan_node,
|
||||
stringstream ss;
|
||||
ss << "Error loading impala-lzo library. Check that the impala-lzo library "
|
||||
<< "is at version " << IMPALA_BUILD_VERSION;
|
||||
library_load_status_.AddErrorMsg(ss.str());
|
||||
library_load_status_.AddDetail(ss.str());
|
||||
return library_load_status_;
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
#include <gutil/strings/substitute.h>
|
||||
|
||||
#include "common/object-pool.h"
|
||||
#include "common/logging.h"
|
||||
#include "exec/hdfs-scan-node.h"
|
||||
#include "exec/scanner-context.inline.h"
|
||||
#include "exec/read-write-util.h"
|
||||
@@ -37,6 +38,7 @@
|
||||
#include "util/bit-util.h"
|
||||
#include "util/decompress.h"
|
||||
#include "util/debug-util.h"
|
||||
#include "util/error-util.h"
|
||||
#include "util/dict-encoding.h"
|
||||
#include "util/rle-encoding.h"
|
||||
#include "util/runtime-profile.h"
|
||||
@@ -62,6 +64,20 @@ const int MAX_PAGE_HEADER_SIZE = 8 * 1024 * 1024;
|
||||
// upper bound.
|
||||
const int MAX_DICT_HEADER_SIZE = 100;
|
||||
|
||||
#define LOG_OR_ABORT(error_msg, runtime_state) \
|
||||
if (runtime_state->abort_on_error()) { \
|
||||
return Status(error_msg); \
|
||||
} else { \
|
||||
runtime_state->LogError(error_msg); \
|
||||
return Status::OK; \
|
||||
}
|
||||
|
||||
#define LOG_OR_RETURN_ON_ERROR(error_msg, runtime_state) \
|
||||
if (runtime_state->abort_on_error()) { \
|
||||
return Status(error_msg.msg()); \
|
||||
} \
|
||||
runtime_state->LogError(error_msg);
|
||||
|
||||
Status HdfsParquetScanner::IssueInitialRanges(HdfsScanNode* scan_node,
|
||||
const std::vector<HdfsFileDesc*>& files) {
|
||||
vector<DiskIoMgr::ScanRange*> footer_ranges;
|
||||
@@ -76,10 +92,8 @@ Status HdfsParquetScanner::IssueInitialRanges(HdfsScanNode* scan_node,
|
||||
if (split->offset() != 0) {
|
||||
// We are expecting each file to be one hdfs block (so all the scan range offsets
|
||||
// should be 0). This is not incorrect but we will issue a warning.
|
||||
stringstream ss;
|
||||
ss << "Parquet file should not be split into multiple hdfs-blocks."
|
||||
<< " file=" << files[i]->filename;
|
||||
scan_node->runtime_state()->LogError(ss.str());
|
||||
scan_node->runtime_state()->LogError(
|
||||
ErrorMsg(TErrorCode::PARQUET_MULTIPLE_BLOCKS, files[i]->filename));
|
||||
// We assign the entire file to one scan range, so mark all but one split
|
||||
// (i.e. the first split) as complete
|
||||
scan_node->RangeComplete(THdfsFileFormat::PARQUET, THdfsCompression::NONE);
|
||||
@@ -536,16 +550,10 @@ Status HdfsParquetScanner::BaseColumnReader::ReadDataPage() {
|
||||
RETURN_IF_ERROR(stream_->GetBuffer(true, &buffer, &buffer_size));
|
||||
if (buffer_size == 0) {
|
||||
DCHECK(stream_->eosr());
|
||||
stringstream ss;
|
||||
ss << "Column metadata states there are " << metadata_->num_values
|
||||
<< " values, but only read " << num_values_read_ << " values from column "
|
||||
<< (slot_desc()->col_pos() - parent_->scan_node_->num_partition_keys());
|
||||
if (parent_->scan_node_->runtime_state()->abort_on_error()) {
|
||||
return Status(ss.str());
|
||||
} else {
|
||||
parent_->scan_node_->runtime_state()->LogError(ss.str());
|
||||
return Status::OK;
|
||||
}
|
||||
ErrorMsg msg(TErrorCode::PARQUET_COLUMN_METADATA_INVALID,
|
||||
metadata_->num_values, num_values_read_,
|
||||
slot_desc()->col_pos() - parent_->scan_node_->num_partition_keys());
|
||||
LOG_OR_ABORT(msg, parent_->scan_node_->runtime_state());
|
||||
}
|
||||
|
||||
// We don't know the actual header size until the thrift object is deserialized. Loop
|
||||
@@ -554,7 +562,7 @@ Status HdfsParquetScanner::BaseColumnReader::ReadDataPage() {
|
||||
while (true) {
|
||||
header_size = buffer_size;
|
||||
status = DeserializeThriftMsg(
|
||||
buffer, &header_size, true, ¤t_page_header_, true);
|
||||
buffer, &header_size, true, ¤t_page_header_);
|
||||
if (status.ok()) break;
|
||||
|
||||
if (buffer_size >= MAX_PAGE_HEADER_SIZE) {
|
||||
@@ -562,7 +570,7 @@ Status HdfsParquetScanner::BaseColumnReader::ReadDataPage() {
|
||||
ss << "ParquetScanner: could not read data page because page header exceeded "
|
||||
<< "maximum size of "
|
||||
<< PrettyPrinter::Print(MAX_PAGE_HEADER_SIZE, TUnit::BYTES);
|
||||
status.AddErrorMsg(ss.str());
|
||||
status.AddDetail(ss.str());
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -578,14 +586,9 @@ Status HdfsParquetScanner::BaseColumnReader::ReadDataPage() {
|
||||
DCHECK(status.ok());
|
||||
|
||||
if (buffer_size == new_buffer_size) {
|
||||
DCHECK(new_buffer_size != 0);
|
||||
string msg = "ParquetScanner: reached EOF while deserializing data page header.";
|
||||
if (parent_->scan_node_->runtime_state()->abort_on_error()) {
|
||||
return Status(msg);
|
||||
} else {
|
||||
parent_->scan_node_->runtime_state()->LogError(msg);
|
||||
return Status::OK;
|
||||
}
|
||||
DCHECK_NE(new_buffer_size, 0);
|
||||
ErrorMsg msg(TErrorCode::PARQUET_HEADER_EOF);
|
||||
LOG_OR_ABORT(msg, parent_->scan_node_->runtime_state());
|
||||
}
|
||||
DCHECK_GT(new_buffer_size, buffer_size);
|
||||
buffer_size = new_buffer_size;
|
||||
@@ -827,7 +830,7 @@ Status HdfsParquetScanner::AssembleRows(int row_group_idx) {
|
||||
// For correctly formed files, this should be the first column we
|
||||
// are reading.
|
||||
DCHECK(c == 0 || !parse_status_.ok())
|
||||
<< "c=" << c << " " << parse_status_.GetErrorMsg();;
|
||||
<< "c=" << c << " " << parse_status_.GetDetail();;
|
||||
COUNTER_ADD(scan_node_->rows_read_counter(), i);
|
||||
RETURN_IF_ERROR(CommitRows(num_to_commit));
|
||||
|
||||
@@ -837,16 +840,12 @@ Status HdfsParquetScanner::AssembleRows(int row_group_idx) {
|
||||
rows_read += i;
|
||||
if (rows_read != expected_rows_in_group) {
|
||||
HdfsParquetScanner::BaseColumnReader* reader = column_readers_[c];
|
||||
DCHECK(reader->stream_ != NULL);
|
||||
stringstream ss;
|
||||
ss << "Metadata states that in group " << reader->stream_->filename()
|
||||
<< "[" << row_group_idx << "] there are " << expected_rows_in_group
|
||||
<< " rows, but only " << rows_read << " rows were read.";
|
||||
if (scan_node_->runtime_state()->abort_on_error()) {
|
||||
return Status(ss.str());
|
||||
} else {
|
||||
scan_node_->runtime_state()->LogError(ss.str());
|
||||
}
|
||||
DCHECK_NOTNULL(reader->stream_);
|
||||
|
||||
ErrorMsg msg(TErrorCode::PARQUET_GROUP_ROW_COUNT_ERROR,
|
||||
reader->stream_->filename(), row_group_idx,
|
||||
expected_rows_in_group, rows_read);
|
||||
LOG_OR_RETURN_ON_ERROR(msg, scan_node_->runtime_state());
|
||||
}
|
||||
return parse_status_;
|
||||
}
|
||||
@@ -901,15 +900,10 @@ Status HdfsParquetScanner::AssembleRows(int row_group_idx) {
|
||||
// in the file.
|
||||
HdfsParquetScanner::BaseColumnReader* reader = column_readers_[0];
|
||||
DCHECK(reader->stream_ != NULL);
|
||||
stringstream ss;
|
||||
ss << "Metadata states that in group " << reader->stream_->filename() << "["
|
||||
<< row_group_idx << "] there are " << expected_rows_in_group << " rows, but"
|
||||
<< " there is at least one more row in the file.";
|
||||
if (scan_node_->runtime_state()->abort_on_error()) {
|
||||
return Status(ss.str());
|
||||
} else {
|
||||
scan_node_->runtime_state()->LogError(ss.str());
|
||||
}
|
||||
ErrorMsg msg(TErrorCode::PARQUET_GROUP_ROW_COUNT_OVERFLOW,
|
||||
reader->stream_->filename(), row_group_idx,
|
||||
expected_rows_in_group);
|
||||
LOG_OR_RETURN_ON_ERROR(msg, scan_node_->runtime_state());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1005,7 +999,7 @@ Status HdfsParquetScanner::ProcessFooter(bool* eosr) {
|
||||
return Status(Substitute("File $0 has invalid file metadata at file offset $1. "
|
||||
"Error = $2.", stream_->filename(),
|
||||
metadata_size + sizeof(PARQUET_VERSION_NUMBER) + sizeof(uint32_t),
|
||||
status.GetErrorMsg()));
|
||||
status.GetDetail()));
|
||||
}
|
||||
|
||||
RETURN_IF_ERROR(ValidateFileMetadata());
|
||||
@@ -1189,7 +1183,7 @@ Status HdfsParquetScanner::CreateSchemaTree(
|
||||
int* col_idx, HdfsParquetScanner::SchemaNode* node) const {
|
||||
if (*idx >= schema.size()) {
|
||||
return Status(Substitute("File $0 corrupt: could not reconstruct schema tree from "
|
||||
"flattened schema in file metadata"), stream_->filename());
|
||||
"flattened schema in file metadata", stream_->filename()));
|
||||
}
|
||||
node->element = &schema[*idx];
|
||||
++(*idx);
|
||||
@@ -1390,42 +1384,32 @@ Status HdfsParquetScanner::ValidateColumn(
|
||||
|
||||
// The other decimal metadata should be there but we don't need it.
|
||||
if (!schema_element.__isset.precision) {
|
||||
stringstream ss;
|
||||
ss << "File '" << metadata_range_->file() << "' column '" << schema_element.name
|
||||
<< "' does not have the precision set.";
|
||||
if (state_->abort_on_error()) return Status(ss.str());
|
||||
state_->LogError(ss.str());
|
||||
ErrorMsg msg(TErrorCode::PARQUET_MISSING_PRECISION,
|
||||
metadata_range_->file(), schema_element.name);
|
||||
LOG_OR_RETURN_ON_ERROR(msg, state_);
|
||||
} else {
|
||||
if (schema_element.precision != slot_desc->type().precision) {
|
||||
// TODO: we could allow a mismatch and do a conversion at this step.
|
||||
stringstream ss;
|
||||
ss << "File '" << metadata_range_->file() << "' column '" << schema_element.name
|
||||
<< "' has a precision that does not match the table metadata precision."
|
||||
<< " File metadata precision: " << schema_element.precision
|
||||
<< " Table metadata precision: " << slot_desc->type().precision;
|
||||
if (state_->abort_on_error()) return Status(ss.str());
|
||||
state_->LogError(ss.str());
|
||||
ErrorMsg msg(TErrorCode::PARQUET_WRONG_PRECISION,
|
||||
metadata_range_->file(), schema_element.name,
|
||||
schema_element.precision, slot_desc->type().precision);
|
||||
LOG_OR_RETURN_ON_ERROR(msg, state_);
|
||||
}
|
||||
}
|
||||
|
||||
if (!is_converted_type_decimal) {
|
||||
// TODO: is this validation useful? It is not required at all to read the data and
|
||||
// might only serve to reject otherwise perfectly readable files.
|
||||
stringstream ss;
|
||||
ss << "File '" << metadata_range_->file() << "' column '" << schema_element.name
|
||||
<< "' does not have converted type set to DECIMAL.";
|
||||
if (state_->abort_on_error()) return Status(ss.str());
|
||||
state_->LogError(ss.str());
|
||||
ErrorMsg msg(TErrorCode::PARQUET_BAD_CONVERTED_TYPE,
|
||||
metadata_range_->file(), schema_element.name);
|
||||
LOG_OR_RETURN_ON_ERROR(msg, state_);
|
||||
}
|
||||
} else if (schema_element.__isset.scale || schema_element.__isset.precision ||
|
||||
is_converted_type_decimal) {
|
||||
stringstream ss;
|
||||
ss << "File '" << metadata_range_->file() << "' column '" << schema_element.name
|
||||
<< "' contains decimal data but the table metadata has type " << slot_desc->type();
|
||||
if (state_->abort_on_error()) return Status(ss.str());
|
||||
state_->LogError(ss.str());
|
||||
ErrorMsg msg(TErrorCode::PARQUET_INCOMPATIBLE_DECIMAL,
|
||||
metadata_range_->file(), schema_element.name, slot_desc->type().DebugString());
|
||||
LOG_OR_RETURN_ON_ERROR(msg, state_);
|
||||
}
|
||||
|
||||
return Status::OK;
|
||||
}
|
||||
|
||||
|
||||
@@ -527,7 +527,7 @@ Status HdfsRCFileScanner::ProcessRange() {
|
||||
if (state_->LogHasSpace()) {
|
||||
stringstream ss;
|
||||
ss << "file: " << stream_->filename();
|
||||
state_->LogError(ss.str());
|
||||
state_->LogError(ErrorMsg(TErrorCode::GENERAL, ss.str()));
|
||||
}
|
||||
if (state_->abort_on_error()) {
|
||||
state_->ReportFileErrors(stream_->filename(), 1);
|
||||
|
||||
@@ -386,9 +386,7 @@ Status HdfsScanNode::Prepare(RuntimeState* state) {
|
||||
if (expected_local && (*scan_range_params_)[i].volume_id == -1) {
|
||||
if (!unknown_disk_id_warned_) {
|
||||
AddRuntimeExecOption("Missing Volume Id");
|
||||
runtime_state()->LogError(
|
||||
"Unknown disk id. This will negatively affect performance. "
|
||||
"Check your hdfs settings to enable block location metadata.");
|
||||
runtime_state()->LogError(ErrorMsg(TErrorCode::HDFS_SCAN_NODE_UNKNOWN_DISK));
|
||||
unknown_disk_id_warned_ = true;
|
||||
}
|
||||
++num_ranges_missing_volume_id;
|
||||
|
||||
@@ -465,8 +465,8 @@ Function* HdfsScanner::CodegenWriteCompleteTuple(
|
||||
conjunct_ctxs[conjunct_idx]->root()->GetCodegendComputeFn(state, &conjunct_fn);
|
||||
if (!status.ok()) {
|
||||
stringstream ss;
|
||||
ss << "Failed to codegen conjunct: " << status.GetErrorMsg();
|
||||
state->LogError(ss.str());
|
||||
ss << "Failed to codegen conjunct: " << status.GetDetail();
|
||||
state->LogError(ErrorMsg(TErrorCode::GENERAL, ss.str()));
|
||||
fn->eraseFromParent();
|
||||
return NULL;
|
||||
}
|
||||
@@ -558,7 +558,7 @@ bool HdfsScanner::ReportTupleParseError(FieldLocation* fields, uint8_t* errors,
|
||||
stringstream ss;
|
||||
ss << "file: " << stream_->filename() << endl << "record: ";
|
||||
LogRowParseError(row_idx, &ss);
|
||||
state_->LogError(ss.str());
|
||||
state_->LogError(ErrorMsg(TErrorCode::GENERAL, ss.str()));
|
||||
}
|
||||
|
||||
++num_errors_in_file_;
|
||||
@@ -586,7 +586,10 @@ void HdfsScanner::ReportColumnParseError(const SlotDescriptor* desc,
|
||||
<< desc->col_pos() - scan_node_->num_partition_keys()
|
||||
<< " TO " << desc->type()
|
||||
<< " (Data is: " << string(data,len) << ")";
|
||||
if (state_->LogHasSpace()) state_->LogError(ss.str());
|
||||
if (state_->LogHasSpace()) {
|
||||
state_->LogError(ErrorMsg(TErrorCode::GENERAL, ss.str()));
|
||||
}
|
||||
|
||||
if (state_->abort_on_error() && parse_status_.ok()) parse_status_ = Status(ss.str());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -185,7 +185,7 @@ Status HdfsSequenceScanner::ProcessBlockCompressedScanRange() {
|
||||
ss << "Expecting sync indicator (-1) at file offset "
|
||||
<< (stream_->file_offset() - sizeof(int)) << ". "
|
||||
<< "Sync indicator found " << sync_indicator << ".";
|
||||
state_->LogError(ss.str());
|
||||
state_->LogError(ErrorMsg(TErrorCode::GENERAL, ss.str()));
|
||||
}
|
||||
return Status("Bad sync hash");
|
||||
}
|
||||
@@ -452,7 +452,7 @@ Status HdfsSequenceScanner::ReadCompressedBlock() {
|
||||
stringstream ss;
|
||||
ss << "Bad compressed block record count: "
|
||||
<< num_buffered_records_in_compressed_block_;
|
||||
state_->LogError(ss.str());
|
||||
state_->LogError(ErrorMsg(TErrorCode::GENERAL, ss.str()));
|
||||
}
|
||||
return Status("bad record count");
|
||||
}
|
||||
|
||||
@@ -589,8 +589,9 @@ void HdfsTableSink::ClosePartitionFile(RuntimeState* state, OutputPartition* par
|
||||
int hdfs_ret = hdfsCloseFile(hdfs_connection_, partition->tmp_hdfs_file);
|
||||
VLOG_FILE << "hdfsCloseFile() file=" << partition->current_file_name;
|
||||
if (hdfs_ret != 0) {
|
||||
state->LogError(GetHdfsErrorMsg("Failed to close HDFS file: ",
|
||||
partition->current_file_name));
|
||||
state->LogError(ErrorMsg(TErrorCode::GENERAL,
|
||||
GetHdfsErrorMsg("Failed to close HDFS file: ",
|
||||
partition->current_file_name)));
|
||||
}
|
||||
partition->tmp_hdfs_file = NULL;
|
||||
ImpaladMetrics::NUM_FILES_OPEN_FOR_INSERT->Increment(-1);
|
||||
|
||||
@@ -99,7 +99,7 @@ Status HdfsTextScanner::IssueInitialRanges(HdfsScanNode* scan_node,
|
||||
ss << "For better performance, snappy, gzip and bzip-compressed files "
|
||||
<< "should not be split into multiple hdfs-blocks. file="
|
||||
<< files[i]->filename << " offset " << split->offset();
|
||||
scan_node->runtime_state()->LogError(ss.str());
|
||||
scan_node->runtime_state()->LogError(ErrorMsg(TErrorCode::GENERAL, ss.str()));
|
||||
warning_written = true;
|
||||
}
|
||||
// We assign the entire file to one scan range, so mark all but one split
|
||||
@@ -269,8 +269,10 @@ Status HdfsTextScanner::FinishScanRange() {
|
||||
if (!status.ok()) {
|
||||
stringstream ss;
|
||||
ss << "Read failed while trying to finish scan range: " << stream_->filename()
|
||||
<< ":" << stream_->file_offset() << endl << status.GetErrorMsg();
|
||||
if (state_->LogHasSpace()) state_->LogError(ss.str());
|
||||
<< ":" << stream_->file_offset() << endl << status.GetDetail();
|
||||
if (state_->LogHasSpace()) {
|
||||
state_->LogError(ErrorMsg(TErrorCode::GENERAL, ss.str()));
|
||||
}
|
||||
if (state_->abort_on_error()) return Status(ss.str());
|
||||
} else if (!partial_tuple_empty_ || !boundary_column_.Empty() ||
|
||||
!boundary_row_.Empty()) {
|
||||
@@ -500,7 +502,9 @@ Status HdfsTextScanner::FillByteBufferGzip(bool* eosr) {
|
||||
stringstream ss;
|
||||
ss << "Unexpected end of gzip stream before end of file: ";
|
||||
ss << stream_->filename();
|
||||
if (state_->LogHasSpace()) state_->LogError(ss.str());
|
||||
if (state_->LogHasSpace()) {
|
||||
state_->LogError(ErrorMsg(TErrorCode::GENERAL, ss.str()));
|
||||
}
|
||||
if (state_->abort_on_error()) parse_status_ = Status(ss.str());
|
||||
RETURN_IF_ERROR(parse_status_);
|
||||
}
|
||||
@@ -670,7 +674,7 @@ int HdfsTextScanner::WriteFields(MemPool* pool, TupleRow* tuple_row,
|
||||
stringstream ss;
|
||||
ss << "file: " << stream_->filename() << endl << "record: ";
|
||||
LogRowParseError(0, &ss);
|
||||
state_->LogError(ss.str());
|
||||
state_->LogError(ErrorMsg(TErrorCode::GENERAL, ss.str()));
|
||||
}
|
||||
if (state_->abort_on_error()) parse_status_ = Status(state_->ErrorLog());
|
||||
if (!parse_status_.ok()) return 0;
|
||||
|
||||
@@ -24,6 +24,7 @@
|
||||
#include "runtime/runtime-state.h"
|
||||
#include "runtime/string-value.inline.h"
|
||||
#include "util/debug-util.h"
|
||||
#include "util/error-util.h"
|
||||
#include "util/impalad-metrics.h"
|
||||
|
||||
using namespace impala;
|
||||
@@ -284,8 +285,8 @@ Function* OldHashTable::CodegenEvalTupleRow(RuntimeState* state, bool build) {
|
||||
Status status = ctxs[i]->root()->GetCodegendComputeFn(state, &expr_fn);
|
||||
if (!status.ok()) {
|
||||
stringstream ss;
|
||||
ss << "Problem with codegen: " << status.GetErrorMsg();
|
||||
state->LogError(ss.str());
|
||||
ss << "Problem with codegen: " << status.GetDetail();
|
||||
state->LogError(ErrorMsg(TErrorCode::GENERAL, ss.str()));
|
||||
fn->eraseFromParent(); // deletes function
|
||||
return NULL;
|
||||
}
|
||||
@@ -593,8 +594,8 @@ Function* OldHashTable::CodegenEquals(RuntimeState* state) {
|
||||
Status status = build_expr_ctxs_[i]->root()->GetCodegendComputeFn(state, &expr_fn);
|
||||
if (!status.ok()) {
|
||||
stringstream ss;
|
||||
ss << "Problem with codegen: " << status.GetErrorMsg();
|
||||
state->LogError(ss.str());
|
||||
ss << "Problem with codegen: " << status.GetDetail();
|
||||
state->LogError(ErrorMsg(TErrorCode::GENERAL, ss.str()));
|
||||
fn->eraseFromParent(); // deletes function
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -708,10 +708,8 @@ void PartitionedAggregationNode::DebugString(int indentation_level,
|
||||
Status PartitionedAggregationNode::CreateHashPartitions(int level) {
|
||||
if (level >= MAX_PARTITION_DEPTH) {
|
||||
Status status = Status::MEM_LIMIT_EXCEEDED;
|
||||
status.AddErrorMsg(Substitute("Cannot perform aggregation at hash aggregation node"
|
||||
" with id $0. The input data was partitioned the maximum number of $1 times."
|
||||
" This could mean there is significant skew in the data or the memory limit is"
|
||||
" set too low.", id_, MAX_PARTITION_DEPTH));
|
||||
status.SetErrorMsg(ErrorMsg(TErrorCode::PARTITIONED_AGG_MAX_PARTITION_DEPTH,
|
||||
id_, MAX_PARTITION_DEPTH));
|
||||
state_->SetMemLimitExceeded();
|
||||
return status;
|
||||
}
|
||||
@@ -806,7 +804,7 @@ Status PartitionedAggregationNode::NextPartition() {
|
||||
"more rows than the input";
|
||||
if (num_input_rows == largest_partition) {
|
||||
Status status = Status::MEM_LIMIT_EXCEEDED;
|
||||
status.AddErrorMsg(Substitute("Cannot perform aggregation at node with id $0. "
|
||||
status.AddDetail(Substitute("Cannot perform aggregation at node with id $0. "
|
||||
"Repartitioning did not reduce the size of a spilled partition. "
|
||||
"Repartitioning level $1. Number of rows $2.",
|
||||
id_, partition->level + 1, num_input_rows));
|
||||
@@ -869,8 +867,8 @@ Status PartitionedAggregationNode::SpillPartition(Partition* curr_partition,
|
||||
}
|
||||
if (!got_buffer) {
|
||||
Status status = Status::MEM_LIMIT_EXCEEDED;
|
||||
status.AddErrorMsg("Not enough memory to get the minimum required buffers for "
|
||||
"aggregation.");
|
||||
status.AddDetail("Not enough memory to get the minimum required buffers for "
|
||||
"aggregation.");
|
||||
return status;
|
||||
}
|
||||
}
|
||||
@@ -1052,7 +1050,7 @@ llvm::Function* PartitionedAggregationNode::CodegenUpdateSlot(
|
||||
Function* agg_expr_fn;
|
||||
Status status = input_expr->GetCodegendComputeFn(state_, &agg_expr_fn);
|
||||
if (!status.ok()) {
|
||||
VLOG_QUERY << "Could not codegen UpdateSlot(): " << status.GetErrorMsg();
|
||||
VLOG_QUERY << "Could not codegen UpdateSlot(): " << status.GetDetail();
|
||||
return NULL;
|
||||
}
|
||||
DCHECK(agg_expr_fn != NULL);
|
||||
|
||||
@@ -493,10 +493,9 @@ Status PartitionedHashJoinNode::ConstructBuildSide(RuntimeState* state) {
|
||||
Status PartitionedHashJoinNode::ProcessBuildInput(RuntimeState* state, int level) {
|
||||
if (level >= MAX_PARTITION_DEPTH) {
|
||||
Status status = Status::MEM_LIMIT_EXCEEDED;
|
||||
status.AddErrorMsg(Substitute("Cannot perform join at hash join node with id $0."
|
||||
" The input data was partitioned the maximum number of $1 times."
|
||||
" This could mean there is significant skew in the data or the memory limit is"
|
||||
" set too low.", id_, MAX_PARTITION_DEPTH));
|
||||
status.SetErrorMsg(ErrorMsg(
|
||||
TErrorCode::PARTITIONED_HASH_JOIN_MAX_PARTITION_DEPTH,
|
||||
id_, MAX_PARTITION_DEPTH));
|
||||
state->SetMemLimitExceeded();
|
||||
return status;
|
||||
}
|
||||
@@ -688,7 +687,7 @@ Status PartitionedHashJoinNode::PrepareNextPartition(RuntimeState* state) {
|
||||
"more rows than the input";
|
||||
if (num_input_rows == largest_partition) {
|
||||
Status status = Status::MEM_LIMIT_EXCEEDED;
|
||||
status.AddErrorMsg(Substitute("Cannot perform hash join at node with id $0. "
|
||||
status.AddDetail(Substitute("Cannot perform hash join at node with id $0. "
|
||||
"Repartitioning did not reduce the size of a spilled partition. "
|
||||
"Repartitioning level $1. Number of rows $2.",
|
||||
id_, input_partition_->level_ + 1, num_input_rows));
|
||||
@@ -1210,8 +1209,8 @@ Status PartitionedHashJoinNode::ReserveTupleStreamBlocks() {
|
||||
}
|
||||
if (!got_buffer) {
|
||||
Status status = Status::MEM_LIMIT_EXCEEDED;
|
||||
status.AddErrorMsg("Not enough memory to get the minimum required buffers for "
|
||||
"join.");
|
||||
status.AddDetail("Not enough memory to get the minimum required buffers for "
|
||||
"join.");
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -220,7 +220,7 @@ class ExprTest : public testing::Test {
|
||||
ASSERT_FALSE(status.ok()) << "Expected error\nstmt: " << stmt;
|
||||
return;
|
||||
}
|
||||
ASSERT_TRUE(status.ok()) << "stmt: " << stmt << "\nerror: " << status.GetErrorMsg();
|
||||
ASSERT_TRUE(status.ok()) << "stmt: " << stmt << "\nerror: " << status.GetDetail();
|
||||
string result_row;
|
||||
ASSERT_TRUE(executor_->FetchResult(&result_row).ok()) << expr;
|
||||
EXPECT_EQ(TypeToOdbcString(expr_type.type), result_types[0].type) << expr;
|
||||
|
||||
@@ -139,7 +139,7 @@ Status Expr::CreateExprTree(ObjectPool* pool, const TExpr& texpr, ExprContext**
|
||||
"Expression tree only partially reconstructed. Not all thrift nodes were used.");
|
||||
}
|
||||
if (!status.ok()) {
|
||||
LOG(ERROR) << "Could not construct expr tree.\n" << status.GetErrorMsg() << "\n"
|
||||
LOG(ERROR) << "Could not construct expr tree.\n" << status.GetDetail() << "\n"
|
||||
<< apache::thrift::ThriftDebugString(texpr);
|
||||
}
|
||||
return status;
|
||||
|
||||
@@ -131,7 +131,7 @@ AnyVal* HiveUdfCall::Evaluate(ExprContext* ctx, TupleRow* row) {
|
||||
if (!jni_ctx->warning_logged) {
|
||||
stringstream ss;
|
||||
ss << "Hive UDF path=" << fn_.hdfs_location << " class=" << fn_.scalar_fn.symbol
|
||||
<< " failed due to: " << status.GetErrorMsg();
|
||||
<< " failed due to: " << status.GetDetail();
|
||||
fn_ctx->AddWarning(ss.str().c_str());
|
||||
jni_ctx->warning_logged = true;
|
||||
}
|
||||
@@ -241,7 +241,7 @@ void HiveUdfCall::Close(RuntimeState* state, ExprContext* ctx,
|
||||
env->DeleteGlobalRef(jni_ctx->executor);
|
||||
// Clear any exceptions. Not much we can do about them here.
|
||||
Status status = JniUtil::GetJniExceptionMsg(env);
|
||||
if (!status.ok()) VLOG_QUERY << status.GetErrorMsg();
|
||||
if (!status.ok()) VLOG_QUERY << status.GetDetail();
|
||||
}
|
||||
delete[] jni_ctx->input_values_buffer;
|
||||
delete[] jni_ctx->input_nulls_buffer;
|
||||
|
||||
@@ -101,14 +101,13 @@ Status ScalarFnCall::Prepare(RuntimeState* state, const RowDescriptor& desc,
|
||||
if (!status.ok()) {
|
||||
if (fn_.binary_type == TFunctionBinaryType::BUILTIN) {
|
||||
// Builtins symbols should exist unless there is a version mismatch.
|
||||
status.AddErrorMsg(Substitute(
|
||||
"Builtin '$0' with symbol '$1' does not exist. Verify that all your impalads "
|
||||
"are the same version.", fn_.name.function_name, fn_.scalar_fn.symbol));
|
||||
status.SetErrorMsg(ErrorMsg(TErrorCode::MISSING_BUILTIN,
|
||||
fn_.name.function_name, fn_.scalar_fn.symbol));
|
||||
return status;
|
||||
} else {
|
||||
DCHECK_EQ(fn_.binary_type, TFunctionBinaryType::NATIVE);
|
||||
return Status(Substitute("Problem loading UDF '$0':\n$1",
|
||||
fn_.name.function_name, status.GetErrorMsg()));
|
||||
fn_.name.function_name, status.GetDetail()));
|
||||
return status;
|
||||
}
|
||||
}
|
||||
@@ -397,11 +396,8 @@ Status ScalarFnCall::GetUdf(RuntimeState* state, llvm::Function** udf) {
|
||||
fn_.hdfs_location, fn_.scalar_fn.symbol, &fn_ptr, &cache_entry_);
|
||||
if (!status.ok() && fn_.binary_type == TFunctionBinaryType::BUILTIN) {
|
||||
// Builtins symbols should exist unless there is a version mismatch.
|
||||
stringstream ss;
|
||||
ss << "Builtin '" << fn_.name.function_name << "' with symbol '"
|
||||
<< fn_.scalar_fn.symbol << "' does not exist. "
|
||||
<< "Verify that all your impalads are the same version.";
|
||||
status.AddErrorMsg(ss.str());
|
||||
status.AddDetail(ErrorMsg(TErrorCode::MISSING_BUILTIN,
|
||||
fn_.name.function_name, fn_.scalar_fn.symbol).msg());
|
||||
}
|
||||
RETURN_IF_ERROR(status);
|
||||
DCHECK(fn_ptr != NULL);
|
||||
|
||||
@@ -266,7 +266,7 @@ Status ResourceBroker::RegisterWithLlama() {
|
||||
// Cycle through the list of Llama addresses for Llama failover.
|
||||
llama_addr_idx = (llama_addr_idx + 1) % llama_addresses_.size();
|
||||
LOG(INFO) << "Failed to connect to Llama at " << llama_address << "." << endl
|
||||
<< "Error: " << client_status.GetErrorMsg() << endl
|
||||
<< "Error: " << client_status.GetDetail() << endl
|
||||
<< "Retrying to connect to Llama at "
|
||||
<< llama_addresses_[llama_addr_idx] << " in "
|
||||
<< FLAGS_llama_registration_wait_secs << "s.";
|
||||
|
||||
@@ -128,7 +128,7 @@ CreateDeserializeProtocol(
|
||||
// set to the actual length of the header.
|
||||
template <class T>
|
||||
Status DeserializeThriftMsg(const uint8_t* buf, uint32_t* len, bool compact,
|
||||
T* deserialized_msg, bool quiet = false) {
|
||||
T* deserialized_msg) {
|
||||
// Deserialize msg bytes into c++ thrift msg using memory
|
||||
// transport. TMemoryBuffer is not const-safe, although we use it in
|
||||
// a const-safe way, so we have to explicitly cast away the const.
|
||||
@@ -141,10 +141,10 @@ Status DeserializeThriftMsg(const uint8_t* buf, uint32_t* len, bool compact,
|
||||
} catch (std::exception& e) {
|
||||
std::stringstream msg;
|
||||
msg << "couldn't deserialize thrift msg:\n" << e.what();
|
||||
return Status(msg.str(), quiet);
|
||||
return Status(msg.str());
|
||||
} catch (...) {
|
||||
// TODO: Find the right exception for 0 bytes
|
||||
return Status("Unknown exception", quiet);
|
||||
return Status("Unknown exception");
|
||||
}
|
||||
uint32_t bytes_left = tmem_transport->available_read();
|
||||
*len = *len - bytes_left;
|
||||
@@ -152,14 +152,13 @@ Status DeserializeThriftMsg(const uint8_t* buf, uint32_t* len, bool compact,
|
||||
}
|
||||
|
||||
template <class T>
|
||||
Status DeserializeThriftMsg(JNIEnv* env, jbyteArray serialized_msg, T* deserialized_msg,
|
||||
bool quiet = false) {
|
||||
Status DeserializeThriftMsg(JNIEnv* env, jbyteArray serialized_msg, T* deserialized_msg) {
|
||||
jboolean is_copy = false;
|
||||
uint32_t buf_size = env->GetArrayLength(serialized_msg);
|
||||
jbyte* buf = env->GetByteArrayElements(serialized_msg, &is_copy);
|
||||
|
||||
RETURN_IF_ERROR(DeserializeThriftMsg(
|
||||
reinterpret_cast<uint8_t*>(buf), &buf_size, false, deserialized_msg, quiet));
|
||||
reinterpret_cast<uint8_t*>(buf), &buf_size, false, deserialized_msg));
|
||||
|
||||
// Return buffer back. JNI_ABORT indicates to not copy contents back to java
|
||||
// side.
|
||||
|
||||
@@ -361,7 +361,7 @@ void BufferedBlockMgr::Cancel() {
|
||||
Status BufferedBlockMgr::MemLimitTooLowError(Client* client) {
|
||||
// TODO: what to print here. We can't know the value of the entire query here.
|
||||
Status status = Status::MEM_LIMIT_EXCEEDED;
|
||||
status.AddErrorMsg(Substitute("The memory limit is set too low initialize the"
|
||||
status.AddDetail(Substitute("The memory limit is set too low initialize the"
|
||||
" spilling operator. The minimum required memory to spill this operator is $0.",
|
||||
PrettyPrinter::Print(client->num_reserved_buffers_ * max_block_size(),
|
||||
TUnit::BYTES)));
|
||||
@@ -701,7 +701,7 @@ void BufferedBlockMgr::WriteComplete(Block* block, const Status& write_status) {
|
||||
if (is_cancelled_.Read() == 1) return;
|
||||
// Check for an error. Set cancelled and wake up waiting threads if an error occurred.
|
||||
if (!write_status.ok()) {
|
||||
block->client_->state_->LogError(write_status);
|
||||
block->client_->state_->LogError(write_status.msg());
|
||||
is_cancelled_.Swap(1);
|
||||
if (block->client_local_) {
|
||||
block->write_complete_cv_.notify_one();
|
||||
@@ -859,7 +859,7 @@ Status BufferedBlockMgr::FindBufferForBlock(Block* block, bool* in_mem) {
|
||||
VLOG_QUERY << ss.str();
|
||||
}
|
||||
Status status = Status::MEM_LIMIT_EXCEEDED;
|
||||
status.AddErrorMsg("Query did not have enough memory to get the minimum required "
|
||||
status.AddDetail("Query did not have enough memory to get the minimum required "
|
||||
"buffers in the block manager.");
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -300,7 +300,7 @@ class SimpleTupleStreamTest : public testing::Test {
|
||||
void TestValues(int num_batches, RowDescriptor* desc, bool gen_null) {
|
||||
BufferedTupleStream stream(runtime_state_.get(), *desc, block_mgr_.get(), client_);
|
||||
Status status = stream.Init();
|
||||
ASSERT_TRUE(status.ok()) << status.GetErrorMsg();
|
||||
ASSERT_TRUE(status.ok()) << status.GetDetail();
|
||||
status = stream.UnpinStream();
|
||||
ASSERT_TRUE(status.ok());
|
||||
|
||||
|
||||
@@ -50,6 +50,7 @@
|
||||
#include "exec/scan-node.h"
|
||||
#include "util/container-util.h"
|
||||
#include "util/debug-util.h"
|
||||
#include "util/error-util.h"
|
||||
#include "util/hdfs-bulk-ops.h"
|
||||
#include "util/hdfs-util.h"
|
||||
#include "util/llama-util.h"
|
||||
@@ -125,7 +126,7 @@ class Coordinator::BackendExecState {
|
||||
bool done; // if true, execution terminated; do not cancel in that case
|
||||
bool profile_created; // true after the first call to profile->Update()
|
||||
RuntimeProfile* profile; // owned by obj_pool()
|
||||
std::vector<std::string> error_log; // errors reported by this backend
|
||||
ErrorLogMap error_log; // errors reported by this backend
|
||||
|
||||
// Total scan ranges complete across all scan nodes
|
||||
int64_t total_ranges_complete;
|
||||
@@ -1173,7 +1174,7 @@ void Coordinator::CancelRemoteFragments() {
|
||||
VLOG_RPC << "Retrying CancelPlanFragment: " << e.what();
|
||||
Status status = backend_client.Reopen();
|
||||
if (!status.ok()) {
|
||||
exec_state->status.AddError(status);
|
||||
exec_state->status.MergeStatus(status);
|
||||
continue;
|
||||
}
|
||||
backend_client->CancelPlanFragment(res, params);
|
||||
@@ -1184,11 +1185,11 @@ void Coordinator::CancelRemoteFragments() {
|
||||
<< " instance_id=" << exec_state->fragment_instance_id
|
||||
<< " failed: " << e.what();
|
||||
// make a note of the error status, but keep on cancelling the other fragments
|
||||
exec_state->status.AddErrorMsg(msg.str());
|
||||
exec_state->status.AddDetail(msg.str());
|
||||
continue;
|
||||
}
|
||||
if (res.status.status_code != TStatusCode::OK) {
|
||||
exec_state->status.AddErrorMsg(algorithm::join(res.status.error_msgs, "; "));
|
||||
if (res.status.status_code != TErrorCode::OK) {
|
||||
exec_state->status.AddDetail(algorithm::join(res.status.error_msgs, "; "));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1201,7 +1202,7 @@ Status Coordinator::UpdateFragmentExecStatus(const TReportExecStatusParams& para
|
||||
<< " status=" << params.status.status_code
|
||||
<< " done=" << (params.done ? "true" : "false");
|
||||
if (params.backend_num >= backend_exec_states_.size()) {
|
||||
return Status(TStatusCode::INTERNAL_ERROR, "unknown backend number");
|
||||
return Status(TErrorCode::INTERNAL_ERROR, "unknown backend number");
|
||||
}
|
||||
BackendExecState* exec_state = backend_exec_states_[params.backend_num];
|
||||
|
||||
@@ -1241,11 +1242,13 @@ Status Coordinator::UpdateFragmentExecStatus(const TReportExecStatusParams& para
|
||||
}
|
||||
exec_state->profile_created = true;
|
||||
|
||||
// Log messages aggregated by type
|
||||
if (params.__isset.error_log && params.error_log.size() > 0) {
|
||||
exec_state->error_log.insert(exec_state->error_log.end(), params.error_log.begin(),
|
||||
params.error_log.end());
|
||||
// Append the log messages from each update with the global state of the query
|
||||
// execution
|
||||
MergeErrorMaps(&exec_state->error_log, params.error_log);
|
||||
VLOG_FILE << "instance_id=" << exec_state->fragment_instance_id
|
||||
<< " error log: " << join(exec_state->error_log, "\n");
|
||||
<< " error log: " << PrintErrorMapToString(exec_state->error_log);
|
||||
}
|
||||
progress_.Update(exec_state->UpdateNumScanRangesCompleted());
|
||||
}
|
||||
@@ -1516,20 +1519,21 @@ void Coordinator::ReportQuerySummary() {
|
||||
}
|
||||
|
||||
string Coordinator::GetErrorLog() {
|
||||
stringstream ss;
|
||||
lock_guard<mutex> l(lock_);
|
||||
if (executor_.get() != NULL && executor_->runtime_state() != NULL &&
|
||||
!executor_->runtime_state()->ErrorLogIsEmpty()) {
|
||||
ss << executor_->runtime_state()->ErrorLog() << "\n";
|
||||
ErrorLogMap merged;
|
||||
{
|
||||
lock_guard<mutex> l(lock_);
|
||||
if (executor_.get() != NULL && executor_->runtime_state() != NULL &&
|
||||
!executor_->runtime_state()->ErrorLogIsEmpty()) {
|
||||
MergeErrorMaps(&merged, executor_->runtime_state()->error_log());
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < backend_exec_states_.size(); ++i) {
|
||||
lock_guard<mutex> l(backend_exec_states_[i]->lock);
|
||||
if (backend_exec_states_[i]->error_log.size() > 0) {
|
||||
ss << "Backend " << i << ":"
|
||||
<< join(backend_exec_states_[i]->error_log, "\n") << "\n";
|
||||
MergeErrorMaps(&merged, backend_exec_states_[i]->error_log);
|
||||
}
|
||||
}
|
||||
return ss.str();
|
||||
return PrintErrorMapToString(merged);
|
||||
}
|
||||
|
||||
void Coordinator::SetExecPlanFragmentParams(
|
||||
|
||||
@@ -159,7 +159,8 @@ class Coordinator {
|
||||
// Must only be called after Wait()
|
||||
bool PrepareCatalogUpdate(TUpdateCatalogRequest* catalog_update);
|
||||
|
||||
// Return error log for coord and all the fragments
|
||||
// Return error log for coord and all the fragments. The error messages from the
|
||||
// individual backends are merged into a single output to retain readability.
|
||||
std::string GetErrorLog();
|
||||
|
||||
const ProgressUpdater& progress() { return progress_; }
|
||||
|
||||
@@ -214,7 +214,7 @@ void DataStreamSender::Channel::TransmitDataHelper(const TRowBatch* batch) {
|
||||
}
|
||||
}
|
||||
|
||||
if (res.status.status_code != TStatusCode::OK) {
|
||||
if (res.status.status_code != TErrorCode::OK) {
|
||||
rpc_status_ = res.status;
|
||||
} else {
|
||||
num_data_bytes_sent_ += RowBatch::GetBatchSize(*batch);
|
||||
@@ -275,7 +275,7 @@ Status DataStreamSender::Channel::SendCurrentBatch() {
|
||||
Status DataStreamSender::Channel::GetSendStatus() {
|
||||
WaitForRpc();
|
||||
if (!rpc_status_.ok()) {
|
||||
LOG(ERROR) << "channel send status: " << rpc_status_.GetErrorMsg();
|
||||
LOG(ERROR) << "channel send status: " << rpc_status_.GetDetail();
|
||||
}
|
||||
return rpc_status_;
|
||||
}
|
||||
@@ -325,7 +325,8 @@ Status DataStreamSender::Channel::CloseInternal() {
|
||||
}
|
||||
|
||||
void DataStreamSender::Channel::Close(RuntimeState* state) {
|
||||
state->LogError(CloseInternal());
|
||||
Status s = CloseInternal();
|
||||
if (!s.ok()) state->LogError(s.msg());
|
||||
rpc_thread_.DrainAndShutdown();
|
||||
batch_.reset();
|
||||
}
|
||||
|
||||
@@ -178,7 +178,7 @@ string DiskIoMgr::RequestContext::DebugString() const {
|
||||
if (state_ == RequestContext::Cancelled) ss << "Cancelled";
|
||||
if (state_ == RequestContext::Active) ss << "Active";
|
||||
if (state_ != RequestContext::Inactive) {
|
||||
ss << " status_=" << (status_.ok() ? "OK" : status_.GetErrorMsg())
|
||||
ss << " status_=" << (status_.ok() ? "OK" : status_.GetDetail())
|
||||
<< " #ready_buffers=" << num_ready_buffers_
|
||||
<< " #used_buffers=" << num_used_buffers_
|
||||
<< " #num_buffers_in_reader=" << num_buffers_in_reader_
|
||||
|
||||
@@ -41,7 +41,7 @@ class DiskIoMgrTest : public testing::Test {
|
||||
void WriteValidateCallback(int num_writes, DiskIoMgr::WriteRange** written_range,
|
||||
DiskIoMgr* io_mgr, DiskIoMgr::RequestContext* reader, int32_t* data,
|
||||
Status expected_status, const Status& status) {
|
||||
if (expected_status.code() == TStatusCode::CANCELLED) {
|
||||
if (expected_status.code() == TErrorCode::CANCELLED) {
|
||||
EXPECT_TRUE(status.ok() || status.IsCancelled());
|
||||
} else {
|
||||
EXPECT_TRUE(status.code() == expected_status.code());
|
||||
@@ -242,7 +242,7 @@ TEST_F(DiskIoMgrTest, InvalidWrite) {
|
||||
DiskIoMgr::WriteRange::WriteDoneCallback callback =
|
||||
bind(mem_fn(&DiskIoMgrTest::WriteValidateCallback), this, 2,
|
||||
new_range, (DiskIoMgr*)NULL, (DiskIoMgr::RequestContext*)NULL,
|
||||
data, Status(TStatusCode::RUNTIME_ERROR), _1);
|
||||
data, Status(TErrorCode::RUNTIME_ERROR, "Test Failure"), _1);
|
||||
*new_range = pool_->Add(new DiskIoMgr::WriteRange(tmp_file, rand(), 0, callback));
|
||||
|
||||
(*new_range)->SetData(reinterpret_cast<uint8_t*>(data), sizeof(int32_t));
|
||||
@@ -260,7 +260,7 @@ TEST_F(DiskIoMgrTest, InvalidWrite) {
|
||||
new_range = pool_->Add(new DiskIoMgr::WriteRange*);
|
||||
callback = bind(mem_fn(&DiskIoMgrTest::WriteValidateCallback), this, 2,
|
||||
new_range, (DiskIoMgr*)NULL, (DiskIoMgr::RequestContext*)NULL,
|
||||
data, Status(TStatusCode::RUNTIME_ERROR), _1);
|
||||
data, Status(TErrorCode::RUNTIME_ERROR, "Test Failure"), _1);
|
||||
|
||||
*new_range = pool_->Add(new DiskIoMgr::WriteRange(tmp_file, -1, 0, callback));
|
||||
(*new_range)->SetData(reinterpret_cast<uint8_t*>(data), sizeof(int32_t));
|
||||
|
||||
@@ -1037,16 +1037,16 @@ void DiskIoMgr::Write(RequestContext* writer_context, WriteRange* write_range) {
|
||||
FILE* file_handle = fopen(write_range->file(), "rb+");
|
||||
Status ret_status;
|
||||
if (file_handle == NULL) {
|
||||
ret_status = Status(TStatusCode::RUNTIME_ERROR,
|
||||
ret_status = Status(ErrorMsg(TErrorCode::RUNTIME_ERROR,
|
||||
Substitute("fopen($0, \"rb+\") failed with errno=$1 description=$2",
|
||||
write_range->file_, errno, GetStrErrMsg()));
|
||||
write_range->file_, errno, GetStrErrMsg())));
|
||||
} else {
|
||||
ret_status = WriteRangeHelper(file_handle, write_range);
|
||||
|
||||
int success = fclose(file_handle);
|
||||
if (ret_status.ok() && success != 0) {
|
||||
ret_status = Status(TStatusCode::RUNTIME_ERROR, Substitute("fclose($0) failed",
|
||||
write_range->file_));
|
||||
ret_status = Status(ErrorMsg(TErrorCode::RUNTIME_ERROR, Substitute("fclose($0) failed",
|
||||
write_range->file_)));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1061,24 +1061,24 @@ Status DiskIoMgr::WriteRangeHelper(FILE* file_handle, WriteRange* write_range) {
|
||||
success = posix_fallocate(file_desc, write_range->offset(), write_range->len_);
|
||||
}
|
||||
if (success != 0) {
|
||||
return Status(TStatusCode::RUNTIME_ERROR,
|
||||
return Status(ErrorMsg(TErrorCode::RUNTIME_ERROR,
|
||||
Substitute("posix_fallocate($0, $1, $2) failed for file $3"
|
||||
" with returnval=$4 description=$5", file_desc, write_range->offset(),
|
||||
write_range->len_, write_range->file_, success, GetStrErrMsg()));
|
||||
write_range->len_, write_range->file_, success, GetStrErrMsg())));
|
||||
}
|
||||
// Seek to the correct offset and perform the write.
|
||||
success = fseek(file_handle, write_range->offset(), SEEK_SET);
|
||||
if (success != 0) {
|
||||
return Status(TStatusCode::RUNTIME_ERROR,
|
||||
return Status(ErrorMsg(TErrorCode::RUNTIME_ERROR,
|
||||
Substitute("fseek($0, $1, SEEK_SET) failed with errno=$2 description=$3",
|
||||
write_range->file_, write_range->offset(), errno, GetStrErrMsg()));
|
||||
write_range->file_, write_range->offset(), errno, GetStrErrMsg())));
|
||||
}
|
||||
|
||||
int64_t bytes_written = fwrite(write_range->data_, 1, write_range->len_, file_handle);
|
||||
if (bytes_written < write_range->len_) {
|
||||
return Status(TStatusCode::RUNTIME_ERROR,
|
||||
return Status(ErrorMsg(TErrorCode::RUNTIME_ERROR,
|
||||
Substitute("fwrite(buffer, 1, $0, $1) failed with errno=$2 description=$3",
|
||||
write_range->len_, write_range->file_, errno, GetStrErrMsg()));
|
||||
write_range->len_, write_range->file_, errno, GetStrErrMsg())));
|
||||
}
|
||||
if (ImpaladMetrics::IO_MGR_BYTES_WRITTEN != NULL) {
|
||||
ImpaladMetrics::IO_MGR_BYTES_WRITTEN->Increment(write_range->len_);
|
||||
|
||||
@@ -356,7 +356,7 @@ Status ExecEnv::StartServices() {
|
||||
if (statestore_subscriber_.get() != NULL) {
|
||||
Status status = statestore_subscriber_->Start();
|
||||
if (!status.ok()) {
|
||||
status.AddErrorMsg("State Store Subscriber did not start up.");
|
||||
status.AddDetail("State Store Subscriber did not start up.");
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,12 +51,16 @@ void HBaseTable::Close(RuntimeState* state) {
|
||||
|
||||
JNIEnv* env = getJNIEnv();
|
||||
if (env == NULL) {
|
||||
state->LogError("HBaseTable::Close(): Error creating JNIEnv");
|
||||
state->LogError(ErrorMsg(
|
||||
TErrorCode::GENERAL, "HBaseTable::Close(): Error creating JNIEnv"));
|
||||
} else {
|
||||
env->CallObjectMethod(htable_, htable_close_id_);
|
||||
state->LogError(JniUtil::GetJniExceptionMsg(env, "HBaseTable::Close(): "));
|
||||
Status s = JniUtil::GetJniExceptionMsg(env, "HBaseTable::Close(): ");
|
||||
if (!s.ok()) state->LogError(s.msg());
|
||||
env->DeleteGlobalRef(htable_);
|
||||
state->LogError(JniUtil::GetJniExceptionMsg(env, "HBaseTable::Close(): "));
|
||||
|
||||
s = JniUtil::GetJniExceptionMsg(env, "HBaseTable::Close(): ");
|
||||
if (!s.ok()) state->LogError(s.msg());
|
||||
}
|
||||
|
||||
htable_ = NULL;
|
||||
|
||||
@@ -127,11 +127,11 @@ LibCache::LibCacheEntry::~LibCacheEntry() {
|
||||
}
|
||||
|
||||
Status LibCache::GetSoFunctionPtr(const string& hdfs_lib_file, const string& symbol,
|
||||
void** fn_ptr, LibCacheEntry** ent, bool quiet) {
|
||||
void** fn_ptr, LibCacheEntry** ent) {
|
||||
if (hdfs_lib_file.empty()) {
|
||||
// Just loading a function ptr in the current process. No need to take any locks.
|
||||
DCHECK(current_process_handle_ != NULL);
|
||||
RETURN_IF_ERROR(DynamicLookup(current_process_handle_, symbol.c_str(), fn_ptr, quiet));
|
||||
RETURN_IF_ERROR(DynamicLookup(current_process_handle_, symbol.c_str(), fn_ptr));
|
||||
return Status::OK;
|
||||
}
|
||||
|
||||
@@ -153,7 +153,7 @@ Status LibCache::GetSoFunctionPtr(const string& hdfs_lib_file, const string& sym
|
||||
*fn_ptr = it->second;
|
||||
} else {
|
||||
RETURN_IF_ERROR(
|
||||
DynamicLookup(entry->shared_object_handle, symbol.c_str(), fn_ptr, quiet));
|
||||
DynamicLookup(entry->shared_object_handle, symbol.c_str(), fn_ptr));
|
||||
entry->symbol_cache[symbol] = *fn_ptr;
|
||||
}
|
||||
|
||||
@@ -189,10 +189,10 @@ Status LibCache::GetLocalLibPath(const string& hdfs_lib_file, LibType type,
|
||||
}
|
||||
|
||||
Status LibCache::CheckSymbolExists(const string& hdfs_lib_file, LibType type,
|
||||
const string& symbol, bool quiet) {
|
||||
const string& symbol) {
|
||||
if (type == TYPE_SO) {
|
||||
void* dummy_ptr = NULL;
|
||||
return GetSoFunctionPtr(hdfs_lib_file, symbol, &dummy_ptr, NULL, quiet);
|
||||
return GetSoFunctionPtr(hdfs_lib_file, symbol, &dummy_ptr, NULL);
|
||||
} else if (type == TYPE_IR) {
|
||||
unique_lock<mutex> lock;
|
||||
LibCacheEntry* entry = NULL;
|
||||
@@ -203,7 +203,7 @@ Status LibCache::CheckSymbolExists(const string& hdfs_lib_file, LibType type,
|
||||
stringstream ss;
|
||||
ss << "Symbol '" << symbol << "' does not exist in module: " << hdfs_lib_file
|
||||
<< " (local path: " << entry->local_path << ")";
|
||||
return Status(ss.str(), quiet);
|
||||
return Status(ss.str());
|
||||
}
|
||||
return Status::OK;
|
||||
} else if (type == TYPE_JAR) {
|
||||
|
||||
@@ -75,9 +75,8 @@ class LibCache {
|
||||
std::string* local_path);
|
||||
|
||||
// Returns status.ok() if the symbol exists in 'hdfs_lib_file', non-ok otherwise.
|
||||
// If 'quiet' is true, the error status for non-Java unfound symbols will not be logged.
|
||||
Status CheckSymbolExists(const std::string& hdfs_lib_file, LibType type,
|
||||
const std::string& symbol, bool quiet = false);
|
||||
const std::string& symbol);
|
||||
|
||||
// Returns a pointer to the function for the given library and symbol.
|
||||
// If 'hdfs_lib_file' is empty, the symbol is looked up in the impalad process.
|
||||
@@ -89,10 +88,8 @@ class LibCache {
|
||||
// entry is non-null and *entry is non-null, *entry will be reused (i.e., the use count
|
||||
// is not increased). The caller must call DecrementUseCount(*entry) when it is done
|
||||
// using fn_ptr and it is no longer valid to use fn_ptr.
|
||||
//
|
||||
// If 'quiet' is true, returned error statuses will not be logged.
|
||||
Status GetSoFunctionPtr(const std::string& hdfs_lib_file, const std::string& symbol,
|
||||
void** fn_ptr, LibCacheEntry** entry, bool quiet = false);
|
||||
void** fn_ptr, LibCacheEntry** entry);
|
||||
|
||||
// Marks the entry for 'hdfs_lib_file' as needing to be refreshed if the file in HDFS is
|
||||
// newer than the local cached copied. The refresh will occur the next time the entry is
|
||||
|
||||
@@ -309,7 +309,7 @@ bool MemTracker::ExpandRmReservation(int64_t bytes) {
|
||||
if (!status.ok()) {
|
||||
LOG(INFO) << "Failed to expand memory limit by "
|
||||
<< PrettyPrinter::Print(bytes, TUnit::BYTES) << ": "
|
||||
<< status.GetErrorMsg();
|
||||
<< status.GetDetail();
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@@ -148,9 +148,7 @@ Status PlanFragmentExecutor::Prepare(const TExecPlanFragmentParams& request) {
|
||||
// have a reservation larger than the hard limit. Clamp reservation bytes limit to the
|
||||
// hard limit (if it exists).
|
||||
if (rm_reservation_size_bytes > bytes_limit && bytes_limit != -1) {
|
||||
runtime_state_->LogError(Substitute("Reserved resource size ($0) is larger than "
|
||||
"query mem limit ($1), and will be restricted to $1. Configure the reservation "
|
||||
"size by setting RM_INITIAL_MEM.",
|
||||
runtime_state_->LogError(ErrorMsg(TErrorCode::FRAGMENT_EXECUTOR,
|
||||
PrettyPrinter::PrintBytes(rm_reservation_size_bytes),
|
||||
PrettyPrinter::PrintBytes(bytes_limit)));
|
||||
rm_reservation_size_bytes = bytes_limit;
|
||||
@@ -275,8 +273,8 @@ void PlanFragmentExecutor::OptimizeLlvmModule() {
|
||||
status = codegen->FinalizeModule();
|
||||
if (!status.ok()) {
|
||||
stringstream ss;
|
||||
ss << "Error with codegen for this query: " << status.GetErrorMsg();
|
||||
runtime_state_->LogError(ss.str());
|
||||
ss << "Error with codegen for this query: " << status.GetDetail();
|
||||
runtime_state_->LogError(ErrorMsg(TErrorCode::GENERAL, ss.str()));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -321,7 +319,7 @@ Status PlanFragmentExecutor::Open() {
|
||||
// Log error message in addition to returning in Status. Queries that do not
|
||||
// fetch results (e.g. insert) may not receive the message directly and can
|
||||
// only retrieve the log.
|
||||
runtime_state_->LogError(status.GetErrorMsg());
|
||||
runtime_state_->LogError(status.msg());
|
||||
}
|
||||
UpdateStatus(status);
|
||||
return status;
|
||||
|
||||
@@ -76,7 +76,7 @@ RowBatch::RowBatch(const RowDescriptor& row_desc, const TRowBatch& input_batch,
|
||||
scoped_ptr<Codec> decompressor;
|
||||
Status status = Codec::CreateDecompressor(NULL, false, input_batch.compression_type,
|
||||
&decompressor);
|
||||
DCHECK(status.ok()) << status.GetErrorMsg();
|
||||
DCHECK(status.ok()) << status.GetDetail();
|
||||
|
||||
int64_t uncompressed_size = input_batch.uncompressed_size;
|
||||
DCHECK_NE(uncompressed_size, -1) << "RowBatch decompression failed";
|
||||
@@ -187,7 +187,7 @@ int RowBatch::Serialize(TRowBatch* output_batch) {
|
||||
scoped_ptr<Codec> compressor;
|
||||
Status status = Codec::CreateCompressor(NULL, false, THdfsCompression::LZ4,
|
||||
&compressor);
|
||||
DCHECK(status.ok()) << status.GetErrorMsg();
|
||||
DCHECK(status.ok()) << status.GetDetail();
|
||||
|
||||
int64_t compressed_size = compressor->MaxOutputLen(size);
|
||||
if (compression_scratch_.size() < compressed_size) {
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
|
||||
#include "common/logging.h"
|
||||
#include <boost/algorithm/string/join.hpp>
|
||||
#include <gutil/strings/substitute.h>
|
||||
|
||||
#include "codegen/llvm-codegen.h"
|
||||
#include "common/object-pool.h"
|
||||
@@ -65,7 +66,6 @@ namespace impala {
|
||||
RuntimeState::RuntimeState(const TPlanFragmentInstanceCtx& fragment_instance_ctx,
|
||||
const string& cgroup, ExecEnv* exec_env)
|
||||
: obj_pool_(new ObjectPool()),
|
||||
unreported_error_idx_(0),
|
||||
fragment_instance_ctx_(fragment_instance_ctx),
|
||||
now_(new TimestampValue(fragment_instance_ctx_.query_ctx.now_string.c_str(),
|
||||
fragment_instance_ctx_.query_ctx.now_string.size())),
|
||||
@@ -76,12 +76,11 @@ RuntimeState::RuntimeState(const TPlanFragmentInstanceCtx& fragment_instance_ctx
|
||||
query_resource_mgr_(NULL),
|
||||
root_node_id_(-1) {
|
||||
Status status = Init(exec_env);
|
||||
DCHECK(status.ok()) << status.GetErrorMsg();
|
||||
DCHECK(status.ok()) << status.GetDetail();
|
||||
}
|
||||
|
||||
RuntimeState::RuntimeState(const TQueryCtx& query_ctx)
|
||||
: obj_pool_(new ObjectPool()),
|
||||
unreported_error_idx_(0),
|
||||
now_(new TimestampValue(query_ctx.now_string.c_str(),
|
||||
query_ctx.now_string.size())),
|
||||
exec_env_(ExecEnv::GetInstance()),
|
||||
@@ -125,6 +124,10 @@ Status RuntimeState::Init(ExecEnv* exec_env) {
|
||||
exec_env_ = exec_env;
|
||||
TQueryOptions& query_options =
|
||||
fragment_instance_ctx_.query_ctx.request.query_options;
|
||||
|
||||
// max_errors does not indicate how many errors in total have been recorded, but rather
|
||||
// how many are distinct. It is defined as the sum of the number of generic errors and
|
||||
// the number of distinct other errors.
|
||||
if (query_options.max_errors <= 0) {
|
||||
// TODO: fix linker error and uncomment this
|
||||
//query_options_.max_errors = FLAGS_max_errors;
|
||||
@@ -201,7 +204,7 @@ bool RuntimeState::ErrorLogIsEmpty() {
|
||||
|
||||
string RuntimeState::ErrorLog() {
|
||||
ScopedSpinLock l(&error_log_lock_);
|
||||
return join(error_log_, "\n");
|
||||
return PrintErrorMapToString(error_log_);
|
||||
}
|
||||
|
||||
string RuntimeState::FileErrors() {
|
||||
@@ -220,32 +223,26 @@ void RuntimeState::ReportFileErrors(const std::string& file_name, int num_errors
|
||||
file_errors_.push_back(make_pair(file_name, num_errors));
|
||||
}
|
||||
|
||||
bool RuntimeState::LogError(const string& error) {
|
||||
bool RuntimeState::LogError(const ErrorMsg& message) {
|
||||
ScopedSpinLock l(&error_log_lock_);
|
||||
if (error_log_.size() < query_options().max_errors) {
|
||||
VLOG_QUERY << "Error from query " << query_id() << ": " << error;
|
||||
error_log_.push_back(error);
|
||||
// All errors go to the log, unreported_error_count_ is counted independently of the size of the
|
||||
// error_log to account for errors that were already reported to the coordninator
|
||||
VLOG_QUERY << "Error from query " << query_id() << ": " << message.msg();
|
||||
if (ErrorCount(error_log_) < query_options().max_errors) {
|
||||
AppendError(&error_log_, message);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void RuntimeState::LogError(const Status& status) {
|
||||
if (status.ok()) return;
|
||||
// Don't log cancelled or mem limit exceeded to the log.
|
||||
// For cancelled, he error message is not useful ("Cancelled") and can happen due to
|
||||
// a limit clause.
|
||||
// For mem limit exceeded, the query will report it via SetMemLimitExceeded which
|
||||
// makes the status error message redundant.
|
||||
if (status.IsCancelled() || status.IsMemLimitExceeded()) return;
|
||||
LogError(status.GetErrorMsg());
|
||||
}
|
||||
|
||||
void RuntimeState::GetUnreportedErrors(vector<string>* new_errors) {
|
||||
void RuntimeState::GetUnreportedErrors(ErrorLogMap* new_errors) {
|
||||
ScopedSpinLock l(&error_log_lock_);
|
||||
if (unreported_error_idx_ < error_log_.size()) {
|
||||
new_errors->assign(error_log_.begin() + unreported_error_idx_, error_log_.end());
|
||||
unreported_error_idx_ = error_log_.size();
|
||||
*new_errors = error_log_;
|
||||
// Reset the map, but keep all already reported keys so that we do not
|
||||
// report the same errors multiple times.
|
||||
BOOST_FOREACH(ErrorLogMap::value_type v, error_log_) {
|
||||
v.second.messages.clear();
|
||||
v.second.count = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -277,11 +274,12 @@ Status RuntimeState::SetMemLimitExceeded(MemTracker* tracker,
|
||||
} else {
|
||||
ss << query_mem_tracker_->LogUsage();
|
||||
}
|
||||
LogError(ss.str());
|
||||
LogError(ErrorMsg(TErrorCode::GENERAL, ss.str()));
|
||||
// Add warning about missing stats.
|
||||
if (query_ctx().__isset.tables_missing_stats
|
||||
&& !query_ctx().tables_missing_stats.empty()) {
|
||||
LogError(GetTablesMissingStatsWarning(query_ctx().tables_missing_stats));
|
||||
LogError(ErrorMsg(TErrorCode::GENERAL,
|
||||
GetTablesMissingStatsWarning(query_ctx().tables_missing_stats)));
|
||||
}
|
||||
DCHECK(query_status_.IsMemLimitExceeded());
|
||||
return query_status_;
|
||||
|
||||
@@ -118,7 +118,7 @@ class RuntimeState {
|
||||
}
|
||||
const TimestampValue* now() const { return now_.get(); }
|
||||
void set_now(const TimestampValue* now);
|
||||
const std::vector<std::string>& error_log() const { return error_log_; }
|
||||
const ErrorLogMap& error_log() const { return error_log_; }
|
||||
const std::vector<std::pair<std::string, int> >& file_errors() const {
|
||||
return file_errors_;
|
||||
}
|
||||
@@ -201,12 +201,11 @@ class RuntimeState {
|
||||
return query_status_;
|
||||
};
|
||||
|
||||
// Appends error to the error_log_ if there is space. Returns true if there was space
|
||||
// and the error was logged.
|
||||
bool LogError(const std::string& error);
|
||||
|
||||
// If !status.ok(), appends the error to the error_log_
|
||||
void LogError(const Status& status);
|
||||
// Log an error that will be sent back to the coordinator based on an instance of the
|
||||
// ErrorMsg class. The runtime state aggregates log messages based on type with one
|
||||
// exception: messages with the GENERAL type are not aggregated but are kept
|
||||
// individually.
|
||||
bool LogError(const ErrorMsg& msg);
|
||||
|
||||
// Returns true if the error log has not reached max_errors_.
|
||||
bool LogHasSpace() {
|
||||
@@ -226,9 +225,9 @@ class RuntimeState {
|
||||
// Returns the error log lines as a string joined with '\n'.
|
||||
std::string ErrorLog();
|
||||
|
||||
// Append all error_log_[unreported_error_idx_+] to new_errors and set
|
||||
// unreported_error_idx_ to errors_log_.size()
|
||||
void GetUnreportedErrors(std::vector<std::string>* new_errors);
|
||||
// Append all accumulated errors since the last call to this function to new_errors to
|
||||
// be sent back to the coordinator
|
||||
void GetUnreportedErrors(ErrorLogMap* new_errors);
|
||||
|
||||
// Returns a string representation of the file_errors_.
|
||||
std::string FileErrors();
|
||||
@@ -287,10 +286,7 @@ class RuntimeState {
|
||||
SpinLock error_log_lock_;
|
||||
|
||||
// Logs error messages.
|
||||
std::vector<std::string> error_log_;
|
||||
|
||||
// error_log_[unreported_error_idx_+] has been not reported to the coordinator.
|
||||
int unreported_error_idx_;
|
||||
ErrorLogMap error_log_;
|
||||
|
||||
// Lock protecting file_errors_
|
||||
SpinLock file_errors_lock_;
|
||||
@@ -387,7 +383,7 @@ class RuntimeState {
|
||||
|
||||
#define RETURN_IF_CANCELLED(state) \
|
||||
do { \
|
||||
if (UNLIKELY((state)->is_cancelled())) return Status(TStatusCode::CANCELLED); \
|
||||
if (UNLIKELY((state)->is_cancelled())) return Status::CANCELLED; \
|
||||
} while (false)
|
||||
|
||||
}
|
||||
|
||||
@@ -401,9 +401,9 @@ Status Sorter::Run::AddBatch(RowBatch* batch, int start_index, int* num_processe
|
||||
new_tuple->MaterializeExprs<has_var_len_data>(input_row, *sort_tuple_desc_,
|
||||
sorter_->sort_tuple_slot_expr_ctxs_, NULL, &var_values, &total_var_len);
|
||||
if (total_var_len > sorter_->block_mgr_->max_block_size()) {
|
||||
return Status(TStatusCode::INTERNAL_ERROR, Substitute(
|
||||
return Status(ErrorMsg(TErrorCode::INTERNAL_ERROR, Substitute(
|
||||
"Variable length data in a single tuple larger than block size $0 > $1",
|
||||
total_var_len, sorter_->block_mgr_->max_block_size()));
|
||||
total_var_len, sorter_->block_mgr_->max_block_size())));
|
||||
}
|
||||
} else {
|
||||
memcpy(new_tuple, input_row->GetTuple(0), sort_tuple_size_);
|
||||
|
||||
@@ -203,7 +203,7 @@ Status AdmissionController::Init(StatestoreSubscriber* subscriber) {
|
||||
bind<void>(mem_fn(&AdmissionController::UpdatePoolStats), this, _1, _2);
|
||||
Status status = subscriber->AddTopic(IMPALA_REQUEST_QUEUE_TOPIC, true, cb);
|
||||
if (!status.ok()) {
|
||||
status.AddErrorMsg("AdmissionController failed to register request queue topic");
|
||||
status.AddDetail("AdmissionController failed to register request queue topic");
|
||||
}
|
||||
return status;
|
||||
}
|
||||
@@ -227,15 +227,14 @@ Status AdmissionController::CanAdmitRequest(const string& pool_name,
|
||||
// (b) Request will go over the mem limit
|
||||
// (c) This is not admitting from the queue and there are already queued requests
|
||||
if (max_requests >= 0 && total_stats.num_running >= max_requests) {
|
||||
return Status(Substitute(QUEUED_NUM_RUNNING, total_stats.num_running, max_requests),
|
||||
true);
|
||||
return Status(Substitute(QUEUED_NUM_RUNNING, total_stats.num_running, max_requests));
|
||||
} else if (mem_limit >= 0 && cluster_estimated_memory >= mem_limit) {
|
||||
return Status(Substitute(QUEUED_MEM_LIMIT,
|
||||
PrettyPrinter::Print(query_total_estimated_mem, TUnit::BYTES),
|
||||
PrettyPrinter::Print(current_cluster_estimate_mem, TUnit::BYTES),
|
||||
PrettyPrinter::Print(mem_limit, TUnit::BYTES)), true);
|
||||
PrettyPrinter::Print(mem_limit, TUnit::BYTES)));
|
||||
} else if (!admit_from_queue && total_stats.num_queued > 0) {
|
||||
return Status(Substitute(QUEUED_QUEUE_NOT_EMPTY, total_stats.num_queued), true);
|
||||
return Status(Substitute(QUEUED_QUEUE_NOT_EMPTY, total_stats.num_queued));
|
||||
}
|
||||
return Status::OK;
|
||||
}
|
||||
@@ -376,7 +375,7 @@ Status AdmissionController::AdmitQuery(QuerySchedule* schedule) {
|
||||
--total_stats->num_queued;
|
||||
if (pool_metrics != NULL) pool_metrics->local_timed_out->Increment(1L);
|
||||
return Status(Substitute(STATUS_TIME_OUT, queue_wait_timeout_ms,
|
||||
admitStatus.GetErrorMsg()));
|
||||
admitStatus.GetDetail()));
|
||||
}
|
||||
// The dequeue thread updates the stats (to avoid a race condition) so we do
|
||||
// not change them here.
|
||||
@@ -579,7 +578,7 @@ void AdmissionController::AddPoolUpdates(vector<TTopicDelta>* topic_updates) {
|
||||
topic_item.key = MakePoolTopicKey(pool_name, backend_id_);
|
||||
Status status = thrift_serializer_.Serialize(&pool_stats, &topic_item.value);
|
||||
if (!status.ok()) {
|
||||
LOG(WARNING) << "Failed to serialize query pool stats: " << status.GetErrorMsg();
|
||||
LOG(WARNING) << "Failed to serialize query pool stats: " << status.GetDetail();
|
||||
topic_updates->pop_back();
|
||||
}
|
||||
PoolMetrics* pool_metrics = GetPoolMetrics(pool_name);
|
||||
@@ -657,7 +656,7 @@ void AdmissionController::DequeueLoop() {
|
||||
schedule, true);
|
||||
if (!admitStatus.ok()) {
|
||||
VLOG_RPC << "Could not dequeue query id=" << queue_node->schedule.query_id()
|
||||
<< " reason: " << admitStatus.GetErrorMsg();
|
||||
<< " reason: " << admitStatus.GetDetail();
|
||||
break;
|
||||
}
|
||||
queue.Dequeue();
|
||||
|
||||
@@ -154,7 +154,7 @@ static void ResolveSymbolLookup(const TSymbolLookupParams params,
|
||||
params.location, type, &dummy_local_path);
|
||||
if (!status.ok()) {
|
||||
result->__set_result_code(TSymbolLookupResultCode::BINARY_NOT_FOUND);
|
||||
result->__set_error_msg(status.GetErrorMsg());
|
||||
result->__set_error_msg(status.GetDetail());
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -163,7 +163,7 @@ static void ResolveSymbolLookup(const TSymbolLookupParams params,
|
||||
// Set 'quiet' to true so we don't flood the log with unfound builtin symbols on
|
||||
// startup.
|
||||
Status status =
|
||||
LibCache::instance()->CheckSymbolExists(params.location, type, params.symbol, true);
|
||||
LibCache::instance()->CheckSymbolExists(params.location, type, params.symbol);
|
||||
if (status.ok()) {
|
||||
result->__set_result_code(TSymbolLookupResultCode::SYMBOL_FOUND);
|
||||
result->__set_symbol(params.symbol);
|
||||
@@ -180,7 +180,7 @@ static void ResolveSymbolLookup(const TSymbolLookupParams params,
|
||||
stringstream ss;
|
||||
ss << "Could not find symbol '" << params.symbol << "' in: " << params.location;
|
||||
result->__set_error_msg(ss.str());
|
||||
VLOG(1) << ss.str() << endl << status.GetErrorMsg();
|
||||
VLOG(1) << ss.str() << endl << status.GetDetail();
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -287,10 +287,10 @@ Java_com_cloudera_impala_service_FeSupport_NativePrioritizeLoad(
|
||||
TPrioritizeLoadResponse result;
|
||||
Status status = catalog_op_executor.PrioritizeLoad(request, &result);
|
||||
if (!status.ok()) {
|
||||
LOG(ERROR) << status.GetErrorMsg();
|
||||
LOG(ERROR) << status.GetDetail();
|
||||
// Create a new Status, copy in this error, then update the result.
|
||||
Status catalog_service_status(result.status);
|
||||
catalog_service_status.AddError(status);
|
||||
catalog_service_status.MergeStatus(status);
|
||||
status.ToThrift(&result.status);
|
||||
}
|
||||
|
||||
|
||||
@@ -64,7 +64,7 @@ void FragmentMgr::FragmentExecState::ReportStatusCb(
|
||||
if (!coord_status.ok()) {
|
||||
stringstream s;
|
||||
s << "couldn't get a client for " << coord_address();
|
||||
UpdateStatus(Status(TStatusCode::INTERNAL_ERROR, s.str()));
|
||||
UpdateStatus(Status(ErrorMsg(TErrorCode::INTERNAL_ERROR, s.str())));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -120,7 +120,7 @@ void FragmentMgr::FragmentExecState::ReportStatusCb(
|
||||
stringstream msg;
|
||||
msg << "ReportExecStatus() to " << coord_address() << " failed:\n" << e.what();
|
||||
VLOG_QUERY << msg.str();
|
||||
rpc_status = Status(TStatusCode::INTERNAL_ERROR, msg.str());
|
||||
rpc_status = Status(ErrorMsg(TErrorCode::INTERNAL_ERROR, msg.str()));
|
||||
}
|
||||
|
||||
if (!rpc_status.ok()) {
|
||||
|
||||
@@ -118,8 +118,8 @@ void FragmentMgr::CancelPlanFragment(TCancelPlanFragmentResult& return_val,
|
||||
shared_ptr<FragmentExecState> exec_state =
|
||||
GetFragmentExecState(params.fragment_instance_id);
|
||||
if (exec_state.get() == NULL) {
|
||||
Status status(TStatusCode::INTERNAL_ERROR, Substitute("Unknown fragment id: $0",
|
||||
lexical_cast<string>(params.fragment_instance_id)));
|
||||
Status status(ErrorMsg(TErrorCode::INTERNAL_ERROR, Substitute("Unknown fragment id: $0",
|
||||
lexical_cast<string>(params.fragment_instance_id))));
|
||||
status.SetTStatus(&return_val);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -220,7 +220,7 @@ Status Frontend::LoadData(const TLoadDataReq& request, TLoadDataResp* response)
|
||||
}
|
||||
|
||||
bool Frontend::IsAuthorizationError(const Status& status) {
|
||||
return !status.ok() && status.GetErrorMsg().find("AuthorizationException") == 0;
|
||||
return !status.ok() && status.GetDetail().find("AuthorizationException") == 0;
|
||||
}
|
||||
|
||||
Status Frontend::SetCatalogInitialized() {
|
||||
|
||||
@@ -75,7 +75,7 @@ using namespace beeswax;
|
||||
do { \
|
||||
Status __status__ = (stmt); \
|
||||
if (UNLIKELY(!__status__.ok())) { \
|
||||
RaiseBeeswaxException(__status__.GetErrorMsg(), ex_type); \
|
||||
RaiseBeeswaxException(__status__.GetDetail(), ex_type); \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
@@ -193,7 +193,7 @@ void ImpalaServer::query(QueryHandle& query_handle, const Query& query) {
|
||||
Status status = SetQueryInflight(session, exec_state);
|
||||
if (!status.ok()) {
|
||||
UnregisterQuery(exec_state->query_id(), false, &status);
|
||||
RaiseBeeswaxException(status.GetErrorMsg(), SQLSTATE_GENERAL_ERROR);
|
||||
RaiseBeeswaxException(status.GetDetail(), SQLSTATE_GENERAL_ERROR);
|
||||
}
|
||||
TUniqueIdToQueryHandle(exec_state->query_id(), &query_handle);
|
||||
}
|
||||
@@ -231,14 +231,14 @@ void ImpalaServer::executeAndWait(QueryHandle& query_handle, const Query& query,
|
||||
Status status = SetQueryInflight(session, exec_state);
|
||||
if (!status.ok()) {
|
||||
UnregisterQuery(exec_state->query_id(), false, &status);
|
||||
RaiseBeeswaxException(status.GetErrorMsg(), SQLSTATE_GENERAL_ERROR);
|
||||
RaiseBeeswaxException(status.GetDetail(), SQLSTATE_GENERAL_ERROR);
|
||||
}
|
||||
// block until results are ready
|
||||
exec_state->Wait();
|
||||
status = exec_state->query_status();
|
||||
if (!status.ok()) {
|
||||
UnregisterQuery(exec_state->query_id(), false, &status);
|
||||
RaiseBeeswaxException(status.GetErrorMsg(), SQLSTATE_GENERAL_ERROR);
|
||||
RaiseBeeswaxException(status.GetDetail(), SQLSTATE_GENERAL_ERROR);
|
||||
}
|
||||
|
||||
exec_state->UpdateQueryState(QueryState::FINISHED);
|
||||
@@ -289,7 +289,7 @@ void ImpalaServer::fetch(Results& query_results, const QueryHandle& query_handle
|
||||
<< " has_more=" << (query_results.has_more ? "true" : "false");
|
||||
if (!status.ok()) {
|
||||
UnregisterQuery(query_id, false, &status);
|
||||
RaiseBeeswaxException(status.GetErrorMsg(), SQLSTATE_GENERAL_ERROR);
|
||||
RaiseBeeswaxException(status.GetDetail(), SQLSTATE_GENERAL_ERROR);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -401,7 +401,7 @@ void ImpalaServer::get_log(string& log, const LogContextId& context) {
|
||||
stringstream error_log_ss;
|
||||
// If the query status is !ok, include the status error message at the top of the log.
|
||||
if (!exec_state->query_status().ok()) {
|
||||
error_log_ss << exec_state->query_status().GetErrorMsg() << "\n";
|
||||
error_log_ss << exec_state->query_status().GetDetail() << "\n";
|
||||
}
|
||||
|
||||
// Add warnings from analysis
|
||||
@@ -440,7 +440,7 @@ void ImpalaServer::Cancel(impala::TStatus& tstatus,
|
||||
TUniqueId query_id;
|
||||
QueryHandleToTUniqueId(query_handle, &query_id);
|
||||
RAISE_IF_ERROR(CancelInternal(query_id, true), SQLSTATE_GENERAL_ERROR);
|
||||
tstatus.status_code = TStatusCode::OK;
|
||||
tstatus.status_code = TErrorCode::OK;
|
||||
}
|
||||
|
||||
void ImpalaServer::CloseInsert(TInsertResult& insert_result,
|
||||
@@ -454,7 +454,7 @@ void ImpalaServer::CloseInsert(TInsertResult& insert_result,
|
||||
|
||||
Status status = CloseInsertInternal(query_id, &insert_result);
|
||||
if (!status.ok()) {
|
||||
RaiseBeeswaxException(status.GetErrorMsg(), SQLSTATE_GENERAL_ERROR);
|
||||
RaiseBeeswaxException(status.GetDetail(), SQLSTATE_GENERAL_ERROR);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -472,7 +472,7 @@ void ImpalaServer::GetRuntimeProfile(string& profile_output, const QueryHandle&
|
||||
stringstream ss;
|
||||
Status status = GetRuntimeProfileStr(query_id, false, &ss);
|
||||
if (!status.ok()) {
|
||||
ss << "GetRuntimeProfile error: " << status.GetErrorMsg();
|
||||
ss << "GetRuntimeProfile error: " << status.GetDetail();
|
||||
RaiseBeeswaxException(ss.str(), SQLSTATE_GENERAL_ERROR);
|
||||
}
|
||||
profile_output = ss.str();
|
||||
@@ -487,7 +487,7 @@ void ImpalaServer::GetExecSummary(impala::TExecSummary& result,
|
||||
QueryHandleToTUniqueId(handle, &query_id);
|
||||
VLOG_RPC << "GetExecSummary(): query_id=" << PrintId(query_id);
|
||||
Status status = GetExecSummary(query_id, &result);
|
||||
if (!status.ok()) RaiseBeeswaxException(status.GetErrorMsg(), SQLSTATE_GENERAL_ERROR);
|
||||
if (!status.ok()) RaiseBeeswaxException(status.GetDetail(), SQLSTATE_GENERAL_ERROR);
|
||||
}
|
||||
|
||||
void ImpalaServer::PingImpalaService(TPingImpalaServiceResp& return_val) {
|
||||
|
||||
@@ -65,7 +65,7 @@ const TProtocolVersion::type MAX_SUPPORTED_HS2_VERSION =
|
||||
#define HS2_RETURN_IF_ERROR(return_val, status, error_state) \
|
||||
do { \
|
||||
if (UNLIKELY(!status.ok())) { \
|
||||
HS2_RETURN_ERROR(return_val, status.GetErrorMsg(), error_state); \
|
||||
HS2_RETURN_ERROR(return_val, status.GetDetail(), error_state); \
|
||||
return; \
|
||||
} \
|
||||
} while (false)
|
||||
@@ -402,7 +402,7 @@ void ImpalaServer::ExecuteMetadataOp(const THandleIdentifier& session_handle,
|
||||
THandleIdentifierToTUniqueId(session_handle, &session_id, &secret);
|
||||
if (!unique_id_status.ok()) {
|
||||
status->__set_statusCode(thrift::TStatusCode::ERROR_STATUS);
|
||||
status->__set_errorMessage(unique_id_status.GetErrorMsg());
|
||||
status->__set_errorMessage(unique_id_status.GetDetail());
|
||||
status->__set_sqlState(SQLSTATE_GENERAL_ERROR);
|
||||
return;
|
||||
}
|
||||
@@ -411,7 +411,7 @@ void ImpalaServer::ExecuteMetadataOp(const THandleIdentifier& session_handle,
|
||||
Status get_session_status = scoped_session.WithSession(session_id, &session);
|
||||
if (!get_session_status.ok()) {
|
||||
status->__set_statusCode(thrift::TStatusCode::ERROR_STATUS);
|
||||
status->__set_errorMessage(get_session_status.GetErrorMsg());
|
||||
status->__set_errorMessage(get_session_status.GetDetail());
|
||||
// TODO: (here and elsewhere) - differentiate between invalid session ID and timeout
|
||||
// when setting the error code.
|
||||
status->__set_sqlState(SQLSTATE_GENERAL_ERROR);
|
||||
@@ -442,7 +442,7 @@ void ImpalaServer::ExecuteMetadataOp(const THandleIdentifier& session_handle,
|
||||
Status register_status = RegisterQuery(session, exec_state);
|
||||
if (!register_status.ok()) {
|
||||
status->__set_statusCode(thrift::TStatusCode::ERROR_STATUS);
|
||||
status->__set_errorMessage(register_status.GetErrorMsg());
|
||||
status->__set_errorMessage(register_status.GetDetail());
|
||||
status->__set_sqlState(SQLSTATE_GENERAL_ERROR);
|
||||
return;
|
||||
}
|
||||
@@ -451,7 +451,7 @@ void ImpalaServer::ExecuteMetadataOp(const THandleIdentifier& session_handle,
|
||||
if (!exec_status.ok()) {
|
||||
UnregisterQuery(exec_state->query_id(), false, &exec_status);
|
||||
status->__set_statusCode(thrift::TStatusCode::ERROR_STATUS);
|
||||
status->__set_errorMessage(exec_status.GetErrorMsg());
|
||||
status->__set_errorMessage(exec_status.GetDetail());
|
||||
status->__set_sqlState(SQLSTATE_GENERAL_ERROR);
|
||||
return;
|
||||
}
|
||||
@@ -462,7 +462,7 @@ void ImpalaServer::ExecuteMetadataOp(const THandleIdentifier& session_handle,
|
||||
if (!inflight_status.ok()) {
|
||||
UnregisterQuery(exec_state->query_id(), false, &inflight_status);
|
||||
status->__set_statusCode(thrift::TStatusCode::ERROR_STATUS);
|
||||
status->__set_errorMessage(inflight_status.GetErrorMsg());
|
||||
status->__set_errorMessage(inflight_status.GetDetail());
|
||||
status->__set_sqlState(SQLSTATE_GENERAL_ERROR);
|
||||
return;
|
||||
}
|
||||
@@ -728,7 +728,7 @@ void ImpalaServer::ExecuteStatement(TExecuteStatementResp& return_val,
|
||||
*exec_state->result_metadata()), cache_num_rows);
|
||||
if (!status.ok()) {
|
||||
UnregisterQuery(exec_state->query_id(), false, &status);
|
||||
HS2_RETURN_ERROR(return_val, status.GetErrorMsg(), SQLSTATE_GENERAL_ERROR);
|
||||
HS2_RETURN_ERROR(return_val, status.GetDetail(), SQLSTATE_GENERAL_ERROR);
|
||||
}
|
||||
}
|
||||
exec_state->UpdateQueryState(QueryState::RUNNING);
|
||||
@@ -739,7 +739,7 @@ void ImpalaServer::ExecuteStatement(TExecuteStatementResp& return_val,
|
||||
status = SetQueryInflight(session, exec_state);
|
||||
if (!status.ok()) {
|
||||
UnregisterQuery(exec_state->query_id(), false, &status);
|
||||
HS2_RETURN_ERROR(return_val, status.GetErrorMsg(), SQLSTATE_GENERAL_ERROR);
|
||||
HS2_RETURN_ERROR(return_val, status.GetDetail(), SQLSTATE_GENERAL_ERROR);
|
||||
}
|
||||
return_val.__isset.operationHandle = true;
|
||||
return_val.operationHandle.__set_operationType(TOperationType::EXECUTE_STATEMENT);
|
||||
@@ -1039,7 +1039,7 @@ void ImpalaServer::FetchResults(TFetchResultsResp& return_val,
|
||||
} else {
|
||||
UnregisterQuery(query_id, false, &status);
|
||||
}
|
||||
HS2_RETURN_ERROR(return_val, status.GetErrorMsg(), SQLSTATE_GENERAL_ERROR);
|
||||
HS2_RETURN_ERROR(return_val, status.GetDetail(), SQLSTATE_GENERAL_ERROR);
|
||||
}
|
||||
return_val.status.__set_statusCode(thrift::TStatusCode::SUCCESS_STATUS);
|
||||
}
|
||||
|
||||
@@ -140,14 +140,14 @@ void ImpalaServer::CancelQueryUrlCallback(const Webserver::ArgumentMap& args,
|
||||
TUniqueId unique_id;
|
||||
Status status = ParseQueryId(args, &unique_id);
|
||||
if (!status.ok()) {
|
||||
Value error(status.GetErrorMsg().c_str(), document->GetAllocator());
|
||||
Value error(status.GetDetail().c_str(), document->GetAllocator());
|
||||
document->AddMember("error", error, document->GetAllocator());
|
||||
return;
|
||||
}
|
||||
Status cause("Cancelled from Impala's debug web interface");
|
||||
status = UnregisterQuery(unique_id, true, &cause);
|
||||
if (!status.ok()) {
|
||||
Value error(status.GetErrorMsg().c_str(), document->GetAllocator());
|
||||
Value error(status.GetDetail().c_str(), document->GetAllocator());
|
||||
document->AddMember("error", error, document->GetAllocator());
|
||||
return;
|
||||
}
|
||||
@@ -160,7 +160,7 @@ void ImpalaServer::QueryProfileUrlCallback(const Webserver::ArgumentMap& args,
|
||||
TUniqueId unique_id;
|
||||
Status parse_status = ParseQueryId(args, &unique_id);
|
||||
if (!parse_status.ok()) {
|
||||
Value error(parse_status.GetErrorMsg().c_str(), document->GetAllocator());
|
||||
Value error(parse_status.GetDetail().c_str(), document->GetAllocator());
|
||||
document->AddMember("error", error, document->GetAllocator());
|
||||
return;
|
||||
}
|
||||
@@ -168,7 +168,7 @@ void ImpalaServer::QueryProfileUrlCallback(const Webserver::ArgumentMap& args,
|
||||
stringstream ss;
|
||||
Status status = GetRuntimeProfileStr(unique_id, false, &ss);
|
||||
if (!status.ok()) {
|
||||
Value error(status.GetErrorMsg().c_str(), document->GetAllocator());
|
||||
Value error(status.GetDetail().c_str(), document->GetAllocator());
|
||||
document->AddMember("error", error, document->GetAllocator());
|
||||
return;
|
||||
}
|
||||
@@ -186,11 +186,11 @@ void ImpalaServer::QueryProfileEncodedUrlCallback(const Webserver::ArgumentMap&
|
||||
stringstream ss;
|
||||
Status status = ParseQueryId(args, &unique_id);
|
||||
if (!status.ok()) {
|
||||
ss << status.GetErrorMsg();
|
||||
ss << status.GetDetail();
|
||||
} else {
|
||||
Status status = GetRuntimeProfileStr(unique_id, true, &ss);
|
||||
if (!status.ok()) {
|
||||
ss.str(Substitute("Could not obtain runtime profile: $0", status.GetErrorMsg()));
|
||||
ss.str(Substitute("Could not obtain runtime profile: $0", status.GetDetail()));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -364,7 +364,7 @@ void ImpalaServer::CatalogUrlCallback(const Webserver::ArgumentMap& args,
|
||||
TGetDbsResult get_dbs_result;
|
||||
Status status = exec_env_->frontend()->GetDbNames(NULL, NULL, &get_dbs_result);
|
||||
if (!status.ok()) {
|
||||
Value error(status.GetErrorMsg().c_str(), document->GetAllocator());
|
||||
Value error(status.GetDetail().c_str(), document->GetAllocator());
|
||||
document->AddMember("error", error, document->GetAllocator());
|
||||
return;
|
||||
}
|
||||
@@ -379,7 +379,7 @@ void ImpalaServer::CatalogUrlCallback(const Webserver::ArgumentMap& args,
|
||||
Status status =
|
||||
exec_env_->frontend()->GetTableNames(db, NULL, NULL, &get_table_results);
|
||||
if (!status.ok()) {
|
||||
Value error(status.GetErrorMsg().c_str(), document->GetAllocator());
|
||||
Value error(status.GetDetail().c_str(), document->GetAllocator());
|
||||
database.AddMember("error", error, document->GetAllocator());
|
||||
continue;
|
||||
}
|
||||
@@ -419,7 +419,7 @@ void ImpalaServer::CatalogObjectsUrlCallback(const Webserver::ArgumentMap& args,
|
||||
Value debug_string(ThriftDebugString(result).c_str(), document->GetAllocator());
|
||||
document->AddMember("thrift_string", debug_string, document->GetAllocator());
|
||||
} else {
|
||||
Value error(status.GetErrorMsg().c_str(), document->GetAllocator());
|
||||
Value error(status.GetDetail().c_str(), document->GetAllocator());
|
||||
document->AddMember("error", error, document->GetAllocator());
|
||||
}
|
||||
} else {
|
||||
@@ -552,7 +552,7 @@ void ImpalaServer::QuerySummaryCallback(bool include_json_plan, bool include_sum
|
||||
Status status = ParseQueryId(args, &query_id);
|
||||
if (!status.ok()) {
|
||||
// Redact the error message, it may contain part or all of the query.
|
||||
Value json_error(RedactCopy(status.GetErrorMsg()).c_str(), document->GetAllocator());
|
||||
Value json_error(RedactCopy(status.GetDetail()).c_str(), document->GetAllocator());
|
||||
document->AddMember("error", json_error, document->GetAllocator());
|
||||
return;
|
||||
}
|
||||
@@ -626,7 +626,7 @@ void ImpalaServer::QuerySummaryCallback(bool include_json_plan, bool include_sum
|
||||
|
||||
// Redact the error in case the query is contained in the error message.
|
||||
Value json_status(query_status.ok() ? "OK" :
|
||||
RedactCopy(query_status.GetErrorMsg()).c_str(), document->GetAllocator());
|
||||
RedactCopy(query_status.GetDetail()).c_str(), document->GetAllocator());
|
||||
document->AddMember("status", json_status, document->GetAllocator());
|
||||
Value json_id(PrintId(query_id).c_str(), document->GetAllocator());
|
||||
document->AddMember("query_id", json_id, document->GetAllocator());
|
||||
|
||||
@@ -218,7 +218,7 @@ ImpalaServer::ImpalaServer(ExecEnv* exec_env)
|
||||
|
||||
Status status = exec_env_->frontend()->ValidateSettings();
|
||||
if (!status.ok()) {
|
||||
LOG(ERROR) << status.GetErrorMsg();
|
||||
LOG(ERROR) << status.GetDetail();
|
||||
if (FLAGS_abort_on_config_error) {
|
||||
LOG(ERROR) << "Aborting Impala Server startup due to improper configuration";
|
||||
exit(1);
|
||||
@@ -227,7 +227,7 @@ ImpalaServer::ImpalaServer(ExecEnv* exec_env)
|
||||
|
||||
status = TmpFileMgr::Init();
|
||||
if (!status.ok()) {
|
||||
LOG(ERROR) << status.GetErrorMsg();
|
||||
LOG(ERROR) << status.GetDetail();
|
||||
if (FLAGS_abort_on_config_error) {
|
||||
LOG(ERROR) << "Aborting Impala Server startup due to improperly "
|
||||
<< "configured scratch directories.";
|
||||
@@ -330,7 +330,7 @@ Status ImpalaServer::LogLineageRecord(const TExecRequest& request) {
|
||||
if (!request.query_exec_request.__isset.lineage_graph) return Status::OK;
|
||||
Status status = lineage_logger_->AppendEntry(request.query_exec_request.lineage_graph);
|
||||
if (!status.ok()) {
|
||||
LOG(ERROR) << "Unable to record query lineage record: " << status.GetErrorMsg();
|
||||
LOG(ERROR) << "Unable to record query lineage record: " << status.GetDetail();
|
||||
if (FLAGS_abort_on_failed_lineage_event) {
|
||||
LOG(ERROR) << "Shutting down Impala Server due to abort_on_failed_lineage_event=true";
|
||||
exit(1);
|
||||
@@ -376,7 +376,7 @@ Status ImpalaServer::LogAuditRecord(const ImpalaServer::QueryExecState& exec_sta
|
||||
writer.String("authorization_failure");
|
||||
writer.Bool(Frontend::IsAuthorizationError(exec_state.query_status()));
|
||||
writer.String("status");
|
||||
writer.String(exec_state.query_status().GetErrorMsg().c_str());
|
||||
writer.String(exec_state.query_status().GetDetail().c_str());
|
||||
writer.String("user");
|
||||
writer.String(exec_state.effective_user().c_str());
|
||||
writer.String("impersonator");
|
||||
@@ -422,7 +422,7 @@ Status ImpalaServer::LogAuditRecord(const ImpalaServer::QueryExecState& exec_sta
|
||||
writer.EndObject();
|
||||
Status status = audit_event_logger_->AppendEntry(buffer.GetString());
|
||||
if (!status.ok()) {
|
||||
LOG(ERROR) << "Unable to record audit event record: " << status.GetErrorMsg();
|
||||
LOG(ERROR) << "Unable to record audit event record: " << status.GetDetail();
|
||||
if (FLAGS_abort_on_failed_audit_event) {
|
||||
LOG(ERROR) << "Shutting down Impala Server due to abort_on_failed_audit_event=true";
|
||||
exit(1);
|
||||
@@ -540,7 +540,7 @@ void ImpalaServer::AuditEventLoggerFlushThread() {
|
||||
sleep(5);
|
||||
Status status = audit_event_logger_->Flush();
|
||||
if (!status.ok()) {
|
||||
LOG(ERROR) << "Error flushing audit event log: " << status.GetErrorMsg();
|
||||
LOG(ERROR) << "Error flushing audit event log: " << status.GetDetail();
|
||||
if (FLAGS_abort_on_failed_audit_event) {
|
||||
LOG(ERROR) << "Shutting down Impala Server due to "
|
||||
<< "abort_on_failed_audit_event=true";
|
||||
@@ -555,7 +555,7 @@ void ImpalaServer::LineageLoggerFlushThread() {
|
||||
sleep(5);
|
||||
Status status = lineage_logger_->Flush();
|
||||
if (!status.ok()) {
|
||||
LOG(ERROR) << "Error flushing lineage event log: " << status.GetErrorMsg();
|
||||
LOG(ERROR) << "Error flushing lineage event log: " << status.GetDetail();
|
||||
if (FLAGS_abort_on_failed_lineage_event) {
|
||||
LOG(ERROR) << "Shutting down Impala Server due to "
|
||||
<< "abort_on_failed_lineage_event=true";
|
||||
@@ -577,7 +577,7 @@ void ImpalaServer::ArchiveQuery(const QueryExecState& query) {
|
||||
if (!status.ok()) {
|
||||
LOG_EVERY_N(WARNING, 1000) << "Could not write to profile log file file ("
|
||||
<< google::COUNTER << " attempts failed): "
|
||||
<< status.GetErrorMsg();
|
||||
<< status.GetDetail();
|
||||
LOG_EVERY_N(WARNING, 1000)
|
||||
<< "Disable query logging with --log_query_to_file=false";
|
||||
}
|
||||
@@ -684,7 +684,7 @@ Status ImpalaServer::ExecuteInternal(
|
||||
if (result.stmt_type == TStmtType::DDL) {
|
||||
Status status = UpdateCatalogMetrics();
|
||||
if (!status.ok()) {
|
||||
VLOG_QUERY << "Couldn't update catalog metrics: " << status.GetErrorMsg();
|
||||
VLOG_QUERY << "Couldn't update catalog metrics: " << status.GetDetail();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -732,7 +732,7 @@ Status ImpalaServer::RegisterQuery(shared_ptr<SessionState> session_state,
|
||||
// (query_id is globally unique)
|
||||
stringstream ss;
|
||||
ss << "query id " << PrintId(query_id) << " already exists";
|
||||
return Status(TStatusCode::INTERNAL_ERROR, ss.str());
|
||||
return Status(ErrorMsg(TErrorCode::INTERNAL_ERROR, ss.str()));
|
||||
}
|
||||
query_exec_state_map_.insert(make_pair(query_id, exec_state));
|
||||
}
|
||||
@@ -894,7 +894,7 @@ Status ImpalaServer::CloseSessionInternal(const TUniqueId& session_id,
|
||||
session_state->inflight_queries.end());
|
||||
}
|
||||
// Unregister all open queries from this session.
|
||||
Status status("Session closed", true);
|
||||
Status status("Session closed");
|
||||
BOOST_FOREACH(const TUniqueId& query_id, inflight_queries) {
|
||||
UnregisterQuery(query_id, false, &status);
|
||||
}
|
||||
@@ -942,7 +942,7 @@ void ImpalaServer::ReportExecStatus(
|
||||
// (which we have occasionally seen). Consider keeping query exec states around for a
|
||||
// little longer (until all reports have been received).
|
||||
if (exec_state.get() == NULL) {
|
||||
return_val.status.__set_status_code(TStatusCode::INTERNAL_ERROR);
|
||||
return_val.status.__set_status_code(TErrorCode::INTERNAL_ERROR);
|
||||
const string& err = Substitute("ReportExecStatus(): Received report for unknown "
|
||||
"query ID (probably closed or cancelled). (query_id: $0, backend: $1, instance:"
|
||||
" $2 done: $3)", PrintId(params.query_id), params.backend_num,
|
||||
@@ -986,7 +986,7 @@ void ImpalaServer::InitializeConfigVariables() {
|
||||
if (!status.ok()) {
|
||||
// Log error and exit if the default query options are invalid.
|
||||
LOG(ERROR) << "Invalid default query options. Please check -default_query_options.\n"
|
||||
<< status.GetErrorMsg();
|
||||
<< status.GetDetail();
|
||||
exit(1);
|
||||
}
|
||||
LOG(INFO) << "Default query options:" << ThriftDebugString(default_query_options_);
|
||||
@@ -1033,7 +1033,7 @@ void ImpalaServer::CancelFromThreadPool(uint32_t thread_id,
|
||||
&cancellation_work.cause());
|
||||
if (!status.ok()) {
|
||||
VLOG_QUERY << "Query cancellation (" << cancellation_work.query_id()
|
||||
<< ") did not succeed: " << status.GetErrorMsg();
|
||||
<< ") did not succeed: " << status.GetDetail();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1095,7 +1095,7 @@ void ImpalaServer::CatalogUpdateCallback(
|
||||
Status status = DeserializeThriftMsg(reinterpret_cast<const uint8_t*>(
|
||||
item.value.data()), &len, FLAGS_compact_catalog_topic, &catalog_object);
|
||||
if (!status.ok()) {
|
||||
LOG(ERROR) << "Error deserializing item: " << status.GetErrorMsg();
|
||||
LOG(ERROR) << "Error deserializing item: " << status.GetDetail();
|
||||
continue;
|
||||
}
|
||||
if (catalog_object.type == TCatalogObjectType::CATALOG) {
|
||||
@@ -1130,7 +1130,7 @@ void ImpalaServer::CatalogUpdateCallback(
|
||||
Status status = TCatalogObjectFromEntryKey(key, &catalog_object);
|
||||
if (!status.ok()) {
|
||||
LOG(ERROR) << "Error parsing catalog topic entry deletion key: " << key << " "
|
||||
<< "Error: " << status.GetErrorMsg();
|
||||
<< "Error: " << status.GetDetail();
|
||||
continue;
|
||||
}
|
||||
update_req.removed_objects.push_back(catalog_object);
|
||||
@@ -1158,7 +1158,7 @@ void ImpalaServer::CatalogUpdateCallback(
|
||||
Status s = exec_env_->frontend()->UpdateCatalogCache(update_req, &resp);
|
||||
if (!s.ok()) {
|
||||
LOG(ERROR) << "There was an error processing the impalad catalog update. Requesting"
|
||||
<< " a full topic update to recover: " << s.GetErrorMsg();
|
||||
<< " a full topic update to recover: " << s.GetDetail();
|
||||
subscriber_topic_updates->push_back(TTopicDelta());
|
||||
TTopicDelta& update = subscriber_topic_updates->back();
|
||||
update.topic_name = CatalogServer::IMPALA_CATALOG_TOPIC;
|
||||
@@ -1218,7 +1218,7 @@ Status ImpalaServer::ProcessCatalogUpdateResult(
|
||||
// Apply the changes to the local catalog cache.
|
||||
TUpdateCatalogCacheResponse resp;
|
||||
Status status = exec_env_->frontend()->UpdateCatalogCache(update_req, &resp);
|
||||
if (!status.ok()) LOG(ERROR) << status.GetErrorMsg();
|
||||
if (!status.ok()) LOG(ERROR) << status.GetDetail();
|
||||
RETURN_IF_ERROR(status);
|
||||
if (!wait_for_all_subscribers) return Status::OK;
|
||||
}
|
||||
@@ -1445,7 +1445,7 @@ void ImpalaServer::ConnectionEnd(
|
||||
Status status = CloseSessionInternal(session_id, true);
|
||||
if (!status.ok()) {
|
||||
LOG(WARNING) << "Error closing session " << session_id << ": "
|
||||
<< status.GetErrorMsg();
|
||||
<< status.GetDetail();
|
||||
}
|
||||
}
|
||||
connection_to_sessions_map_.erase(it);
|
||||
|
||||
@@ -77,7 +77,7 @@ int main(int argc, char** argv) {
|
||||
Status status = exec_env.StartServices();
|
||||
if (!status.ok()) {
|
||||
LOG(ERROR) << "Impalad services did not start correctly, exiting. Error: "
|
||||
<< status.GetErrorMsg();
|
||||
<< status.GetDetail();
|
||||
ShutdownLogging();
|
||||
exit(1);
|
||||
}
|
||||
|
||||
@@ -493,7 +493,7 @@ void ImpalaServer::QueryExecState::Done() {
|
||||
Status status = exec_env_->scheduler()->Release(schedule_.get());
|
||||
if (!status.ok()) {
|
||||
LOG(WARNING) << "Failed to release resources of query " << schedule_->query_id()
|
||||
<< " because of error: " << status.GetErrorMsg();
|
||||
<< " because of error: " << status.GetDetail();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -591,15 +591,15 @@ Status ImpalaServer::QueryExecState::FetchRows(const int32_t max_rows,
|
||||
Status ImpalaServer::QueryExecState::RestartFetch() {
|
||||
// No result caching for this query. Restart is invalid.
|
||||
if (result_cache_max_size_ <= 0) {
|
||||
return Status(TStatusCode::RECOVERABLE_ERROR,
|
||||
"Restarting of fetch requires enabling of query result caching.");
|
||||
return Status(ErrorMsg(TErrorCode::RECOVERABLE_ERROR,
|
||||
"Restarting of fetch requires enabling of query result caching."));
|
||||
}
|
||||
// The cache overflowed on a previous fetch.
|
||||
if (result_cache_.get() == NULL) {
|
||||
stringstream ss;
|
||||
ss << "The query result cache exceeded its limit of " << result_cache_max_size_
|
||||
<< " rows. Restarting the fetch is not possible.";
|
||||
return Status(TStatusCode::RECOVERABLE_ERROR, ss.str());
|
||||
return Status(ErrorMsg(TErrorCode::RECOVERABLE_ERROR, ss.str()));
|
||||
}
|
||||
// Reset fetch state to start over.
|
||||
eos_ = false;
|
||||
@@ -617,7 +617,7 @@ Status ImpalaServer::QueryExecState::UpdateQueryStatus(const Status& status) {
|
||||
if (!status.ok() && query_status_.ok()) {
|
||||
query_state_ = QueryState::EXCEPTION;
|
||||
query_status_ = status;
|
||||
summary_profile_.AddInfoString("Query Status", query_status_.GetErrorMsg());
|
||||
summary_profile_.AddInfoString("Query Status", query_status_.GetDetail());
|
||||
}
|
||||
|
||||
return status;
|
||||
@@ -828,7 +828,7 @@ Status ImpalaServer::QueryExecState::UpdateCatalog() {
|
||||
}
|
||||
|
||||
Status status(resp.result.status);
|
||||
if (!status.ok()) LOG(ERROR) << "ERROR Finalizing DML: " << status.GetErrorMsg();
|
||||
if (!status.ok()) LOG(ERROR) << "ERROR Finalizing DML: " << status.GetDetail();
|
||||
RETURN_IF_ERROR(status);
|
||||
RETURN_IF_ERROR(parent_server_->ProcessCatalogUpdateResult(resp.result,
|
||||
exec_request_.query_options.sync_ddl));
|
||||
|
||||
@@ -192,7 +192,7 @@ void QueryResourceMgr::AcquireVcoreResources(
|
||||
if (!status.ok()) {
|
||||
VLOG_QUERY << "Could not expand CPU resources for query " << PrintId(query_id_)
|
||||
<< ", reservation: " << PrintId(reservation_id_) << ". Error was: "
|
||||
<< status.GetErrorMsg();
|
||||
<< status.GetDetail();
|
||||
// Sleep to avoid flooding the resource broker, particularly if requests are being
|
||||
// rejected quickly (and therefore we stay oversubscribed)
|
||||
// TODO: configurable timeout
|
||||
|
||||
@@ -152,7 +152,7 @@ SimpleScheduler::SimpleScheduler(const vector<TNetworkAddress>& backends,
|
||||
Status status = HostnameToIpAddrs(backends[i].hostname, &ipaddrs);
|
||||
if (!status.ok()) {
|
||||
VLOG(1) << "Failed to resolve " << backends[i].hostname << ": "
|
||||
<< status.GetErrorMsg();
|
||||
<< status.GetDetail();
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -192,7 +192,7 @@ Status SimpleScheduler::Init() {
|
||||
bind<void>(mem_fn(&SimpleScheduler::UpdateMembership), this, _1, _2);
|
||||
Status status = statestore_subscriber_->AddTopic(IMPALA_MEMBERSHIP_TOPIC, true, cb);
|
||||
if (!status.ok()) {
|
||||
status.AddErrorMsg("SimpleScheduler failed to register membership topic");
|
||||
status.AddDetail("SimpleScheduler failed to register membership topic");
|
||||
return status;
|
||||
}
|
||||
if (!FLAGS_disable_admission_control) {
|
||||
@@ -214,8 +214,8 @@ Status SimpleScheduler::Init() {
|
||||
const string& hostname = backend_descriptor_.address.hostname;
|
||||
Status status = HostnameToIpAddrs(hostname, &ipaddrs);
|
||||
if (!status.ok()) {
|
||||
VLOG(1) << "Failed to resolve " << hostname << ": " << status.GetErrorMsg();
|
||||
status.AddErrorMsg("SimpleScheduler failed to start");
|
||||
VLOG(1) << "Failed to resolve " << hostname << ": " << status.GetDetail();
|
||||
status.AddDetail("SimpleScheduler failed to start");
|
||||
return status;
|
||||
}
|
||||
// Find a non-localhost address for this host; if one can't be
|
||||
@@ -346,7 +346,7 @@ void SimpleScheduler::UpdateMembership(
|
||||
Status status = thrift_serializer_.Serialize(&backend_descriptor_, &item.value);
|
||||
if (!status.ok()) {
|
||||
LOG(WARNING) << "Failed to serialize Impala backend address for statestore topic: "
|
||||
<< status.GetErrorMsg();
|
||||
<< status.GetDetail();
|
||||
subscriber_topic_updates->pop_back();
|
||||
}
|
||||
} else if (is_offline &&
|
||||
@@ -829,7 +829,7 @@ Status SimpleScheduler::GetRequestPool(const string& user,
|
||||
const string& configured_pool = query_options.request_pool;
|
||||
RETURN_IF_ERROR(request_pool_service_->ResolveRequestPool(configured_pool, user,
|
||||
&resolve_pool_result));
|
||||
if (resolve_pool_result.status.status_code != TStatusCode::OK) {
|
||||
if (resolve_pool_result.status.status_code != TErrorCode::OK) {
|
||||
return Status(join(resolve_pool_result.status.error_msgs, "; "));
|
||||
}
|
||||
if (resolve_pool_result.resolved_pool.empty()) {
|
||||
@@ -882,7 +882,7 @@ Status SimpleScheduler::Schedule(Coordinator* coord, QuerySchedule* schedule) {
|
||||
const TQueryCtx& query_ctx = schedule->request().query_ctx;
|
||||
if(query_ctx.__isset.tables_missing_stats &&
|
||||
!query_ctx.tables_missing_stats.empty()) {
|
||||
status.AddErrorMsg(GetTablesMissingStatsWarning(query_ctx.tables_missing_stats));
|
||||
status.AddDetail(GetTablesMissingStatsWarning(query_ctx.tables_missing_stats));
|
||||
}
|
||||
return status;
|
||||
}
|
||||
@@ -905,7 +905,7 @@ Status SimpleScheduler::Release(QuerySchedule* schedule) {
|
||||
// Remove the reservation from the active-resource maps even if there was an error
|
||||
// releasing the reservation because the query running in the reservation is done.
|
||||
RemoveFromActiveResourceMaps(*schedule->reservation());
|
||||
if (response.status.status_code != TStatusCode::OK) {
|
||||
if (response.status.status_code != TErrorCode::OK) {
|
||||
return Status(join(response.status.error_msgs, ", "));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -206,7 +206,7 @@ Status StatestoreSubscriber::Start() {
|
||||
is_registered_ = true;
|
||||
LOG(INFO) << "statestore registration successful";
|
||||
} else {
|
||||
LOG(INFO) << "statestore registration unsuccessful: " << status.GetErrorMsg();
|
||||
LOG(INFO) << "statestore registration unsuccessful: " << status.GetDetail();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -249,7 +249,7 @@ void StatestoreSubscriber::RecoveryModeChecker() {
|
||||
} else {
|
||||
// Don't exit recovery mode, continue
|
||||
LOG(WARNING) << "Failed to re-register with statestore: "
|
||||
<< status.GetErrorMsg();
|
||||
<< status.GetDetail();
|
||||
SleepForMs(SLEEP_INTERVAL_MS);
|
||||
}
|
||||
last_recovery_duration_metric_->set_value(
|
||||
@@ -292,7 +292,7 @@ void StatestoreSubscriber::Heartbeat(const TUniqueId& registration_id) {
|
||||
heartbeat_interval_timer_.Reset() / (1000.0 * 1000.0 * 1000.0));
|
||||
failure_detector_->UpdateHeartbeat(STATESTORE_ID, true);
|
||||
} else {
|
||||
VLOG_RPC << "Heartbeat: " << status.GetErrorMsg();
|
||||
VLOG_RPC << "Heartbeat: " << status.GetDetail();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -664,7 +664,7 @@ void Statestore::DoSubscriberUpdate(bool is_heartbeat, int thread_id,
|
||||
if (it == subscribers_.end()) return;
|
||||
if (!status.ok()) {
|
||||
LOG(INFO) << "Unable to send " << hb_type << " message to subscriber "
|
||||
<< update.second << ", received error " << status.GetErrorMsg();
|
||||
<< update.second << ", received error " << status.GetDetail();
|
||||
}
|
||||
|
||||
const string& registration_id = PrintId(subscriber->registration_id());
|
||||
|
||||
@@ -19,6 +19,10 @@
|
||||
#include <assert.h>
|
||||
#include <gutil/port.h> // for aligned_malloc
|
||||
|
||||
#ifndef IMPALA_UDF_SDK_BUILD
|
||||
#include "util/error-util.h"
|
||||
#endif
|
||||
|
||||
// Be careful what this includes since this needs to be linked into the UDF's
|
||||
// binary. For example, it would be unfortunate if they had a random dependency
|
||||
// on libhdfs.
|
||||
@@ -350,8 +354,12 @@ bool FunctionContext::AddWarning(const char* warning_msg) {
|
||||
// TODO: somehow print the full error log in the shell? This is a problem for any
|
||||
// function using LogError() during close.
|
||||
LOG(WARNING) << ss.str();
|
||||
#endif
|
||||
return impl_->state_->LogError(ErrorMsg(TErrorCode::GENERAL, ss.str()));
|
||||
#else
|
||||
// In case of the SDK build, we simply, forward this call to a dummy method
|
||||
return impl_->state_->LogError(ss.str());
|
||||
#endif
|
||||
|
||||
} else {
|
||||
cerr << ss.str() << endl;
|
||||
return true;
|
||||
|
||||
@@ -117,3 +117,5 @@ ADD_BE_TEST(pretty-printer-test)
|
||||
ADD_BE_TEST(redactor-config-parser-test)
|
||||
ADD_BE_TEST(redactor-test)
|
||||
ADD_BE_TEST(redactor-unconfigured-test)
|
||||
ADD_BE_TEST(error-util-test)
|
||||
target_link_libraries(error-util-test Util)
|
||||
|
||||
@@ -311,10 +311,8 @@ static Status SnappyBlockDecompress(int64_t input_len, const uint8_t* input,
|
||||
if (uncompressed_block_len > Codec::MAX_BLOCK_SIZE) {
|
||||
if (uncompressed_total_len == 0) {
|
||||
// TODO: is this check really robust?
|
||||
stringstream ss;
|
||||
ss << "Decompressor: block size is too big. Data is likely corrupt. "
|
||||
<< "Size: " << uncompressed_block_len;
|
||||
return Status(ss.str());
|
||||
return Status(TErrorCode::SNAPPY_DECOMPRESS_INVALID_BLOCK_SIZE,
|
||||
uncompressed_block_len);
|
||||
}
|
||||
break;
|
||||
}
|
||||
@@ -332,8 +330,7 @@ static Status SnappyBlockDecompress(int64_t input_len, const uint8_t* input,
|
||||
|
||||
if (compressed_len == 0 || compressed_len > input_len) {
|
||||
if (uncompressed_total_len == 0) {
|
||||
return Status(
|
||||
"Decompressor: invalid compressed length. Data is likely corrupt.");
|
||||
return Status(TErrorCode::SNAPPY_DECOMPRESS_INVALID_COMPRESSED_LENGTH);
|
||||
}
|
||||
input_len = 0;
|
||||
break;
|
||||
@@ -344,7 +341,7 @@ static Status SnappyBlockDecompress(int64_t input_len, const uint8_t* input,
|
||||
if (!snappy::GetUncompressedLength(reinterpret_cast<const char*>(input),
|
||||
input_len, &uncompressed_len)) {
|
||||
if (uncompressed_total_len == 0) {
|
||||
return Status("Snappy: GetUncompressedLength failed");
|
||||
return Status(TErrorCode::SNAPPY_DECOMPRESS_UNCOMPRESSED_LENGTH_FAILED);
|
||||
}
|
||||
input_len = 0;
|
||||
break;
|
||||
@@ -355,7 +352,7 @@ static Status SnappyBlockDecompress(int64_t input_len, const uint8_t* input,
|
||||
// Decompress this snappy block
|
||||
if (!snappy::RawUncompress(reinterpret_cast<const char*>(input),
|
||||
compressed_len, output)) {
|
||||
return Status("SnappyBlock: RawUncompress failed");
|
||||
return Status(TErrorCode::SNAPPY_DECOMPRESS_RAW_UNCOMPRESS_FAILED);
|
||||
}
|
||||
output += uncompressed_len;
|
||||
}
|
||||
@@ -370,7 +367,7 @@ static Status SnappyBlockDecompress(int64_t input_len, const uint8_t* input,
|
||||
if (size_only) {
|
||||
*output_len = uncompressed_total_len;
|
||||
} else if (*output_len != uncompressed_total_len) {
|
||||
return Status("Snappy: Decompressed size is not correct.");
|
||||
return Status(TErrorCode::SNAPPY_DECOMPRESS_DECOMPRESS_SIZE_INCORRECT);
|
||||
}
|
||||
return Status::OK;
|
||||
}
|
||||
|
||||
@@ -23,13 +23,13 @@ using namespace std;
|
||||
|
||||
namespace impala {
|
||||
|
||||
Status DynamicLookup(void* handle, const char* symbol, void** fn_ptr, bool quiet) {
|
||||
Status DynamicLookup(void* handle, const char* symbol, void** fn_ptr) {
|
||||
*(void **) (fn_ptr) = dlsym(handle, symbol);
|
||||
char* error = dlerror();
|
||||
if (error != NULL) {
|
||||
stringstream ss;
|
||||
ss << "Unable to find " << symbol << "\ndlerror: " << error;
|
||||
return Status(ss.str(), quiet);
|
||||
return Status(ss.str());
|
||||
}
|
||||
return Status::OK;
|
||||
}
|
||||
|
||||
@@ -23,8 +23,7 @@ namespace impala {
|
||||
// handle -- handle to the library. NULL if loading from the current process.
|
||||
// symbol -- symbol to lookup.
|
||||
// fn_ptr -- pointer tor retun addres of function.
|
||||
// quiet -- if true, returned error status won't be logged
|
||||
Status DynamicLookup(void* handle, const char* symbol, void** fn_ptr, bool quiet = false);
|
||||
Status DynamicLookup(void* handle, const char* symbol, void** fn_ptr);
|
||||
|
||||
// Open a dynamicly loaded library.
|
||||
// library -- name of the library. The default paths will be searched.
|
||||
|
||||
97
be/src/util/error-util-test.cc
Normal file
97
be/src/util/error-util-test.cc
Normal file
@@ -0,0 +1,97 @@
|
||||
// Copyright 2015 Cloudera Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <gutil/strings/substitute.h>
|
||||
|
||||
#include "error-util.h"
|
||||
#include "gen-cpp/Status_types.h"
|
||||
#include "gen-cpp/ErrorCodes_types.h"
|
||||
|
||||
namespace impala {
|
||||
|
||||
TEST(ErrorMsg, GenericFormatting) {
|
||||
ErrorMsg msg(TErrorCode::GENERAL, "This is a test");
|
||||
ASSERT_EQ("This is a test", msg.msg());
|
||||
|
||||
msg.AddDetail("Detail come here.");
|
||||
msg.AddDetail("Or here.");
|
||||
ASSERT_EQ("This is a test\nDetail come here.\nOr here.\n",
|
||||
msg.GetFullMessageDetails());
|
||||
|
||||
msg = ErrorMsg(TErrorCode::MISSING_BUILTIN, "fun", "sym");
|
||||
ASSERT_EQ("Builtin 'fun' with symbol 'sym' does not exist. Verify that "
|
||||
"all your impalads are the same version.", msg.msg());
|
||||
}
|
||||
|
||||
TEST(ErrorMsg, MergeMap) {
|
||||
ErrorLogMap left, right;
|
||||
left[TErrorCode::GENERAL].messages.push_back("1");
|
||||
|
||||
right[TErrorCode::GENERAL].messages.push_back("2");
|
||||
right[TErrorCode::PARQUET_MULTIPLE_BLOCKS].messages.push_back("p");
|
||||
right[TErrorCode::PARQUET_MULTIPLE_BLOCKS].count = 3;
|
||||
|
||||
MergeErrorMaps(&left, right);
|
||||
ASSERT_EQ(2, left.size());
|
||||
ASSERT_EQ(2, left[TErrorCode::GENERAL].messages.size());
|
||||
|
||||
right = ErrorLogMap();
|
||||
right[TErrorCode::PARQUET_MULTIPLE_BLOCKS].messages.push_back("p");
|
||||
right[TErrorCode::PARQUET_MULTIPLE_BLOCKS].count = 3;
|
||||
|
||||
MergeErrorMaps(&left, right);
|
||||
ASSERT_EQ(2, left.size());
|
||||
ASSERT_EQ(2, left[TErrorCode::GENERAL].messages.size());
|
||||
ASSERT_EQ(6, left[TErrorCode::PARQUET_MULTIPLE_BLOCKS].count);
|
||||
}
|
||||
|
||||
TEST(ErrorMsg, CountErrors) {
|
||||
ErrorLogMap m;
|
||||
ASSERT_EQ(0, ErrorCount(m));
|
||||
m[TErrorCode::PARQUET_MULTIPLE_BLOCKS].messages.push_back("p");
|
||||
m[TErrorCode::PARQUET_MULTIPLE_BLOCKS].count = 999;
|
||||
ASSERT_EQ(1, ErrorCount(m));
|
||||
m[TErrorCode::GENERAL].messages.push_back("1");
|
||||
m[TErrorCode::GENERAL].messages.push_back("2");
|
||||
ASSERT_EQ(3, ErrorCount(m));
|
||||
}
|
||||
|
||||
TEST(ErrorMsg, AppendError) {
|
||||
ErrorLogMap m;
|
||||
ASSERT_EQ(0, ErrorCount(m));
|
||||
AppendError(&m, ErrorMsg(TErrorCode::GENERAL, "1"));
|
||||
AppendError(&m, ErrorMsg(TErrorCode::GENERAL, "2"));
|
||||
ASSERT_EQ(2, ErrorCount(m));
|
||||
AppendError(&m, ErrorMsg(TErrorCode::PARQUET_MULTIPLE_BLOCKS, "p1"));
|
||||
ASSERT_EQ(3, ErrorCount(m));
|
||||
AppendError(&m, ErrorMsg(TErrorCode::PARQUET_MULTIPLE_BLOCKS, "p2"));
|
||||
ASSERT_EQ(3, ErrorCount(m));
|
||||
}
|
||||
|
||||
TEST(ErrorMsg, PrintMap) {
|
||||
ErrorLogMap left;
|
||||
left[TErrorCode::GENERAL].messages.push_back("1");
|
||||
left[TErrorCode::GENERAL].messages.push_back("2");
|
||||
left[TErrorCode::PARQUET_MULTIPLE_BLOCKS].messages.push_back("p");
|
||||
left[TErrorCode::PARQUET_MULTIPLE_BLOCKS].count = 999;
|
||||
ASSERT_EQ("1\n2\np (1 of 999 similar)\n", PrintErrorMapToString(left));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
::testing::InitGoogleTest(&argc, argv);
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
||||
@@ -14,11 +14,16 @@
|
||||
|
||||
#include "util/error-util.h"
|
||||
|
||||
#include <boost/foreach.hpp>
|
||||
|
||||
#include <errno.h>
|
||||
#include <string.h>
|
||||
#include <sstream>
|
||||
|
||||
using namespace std;
|
||||
using std::string;
|
||||
using std::stringstream;
|
||||
using std::vector;
|
||||
using std::ostream;
|
||||
|
||||
namespace impala {
|
||||
|
||||
@@ -45,4 +50,149 @@ string GetTablesMissingStatsWarning(const vector<TTableName>& tables_missing_sta
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
ErrorMsg::ErrorMsg(TErrorCode::type error)
|
||||
: error_(error),
|
||||
message_(strings::Substitute(g_ErrorCodes_constants.TErrorMessage[error_])) {}
|
||||
|
||||
ErrorMsg::ErrorMsg(TErrorCode::type error, const ArgType& arg0)
|
||||
: error_(error),
|
||||
message_(strings::Substitute(g_ErrorCodes_constants.TErrorMessage[error_],
|
||||
arg0)) {}
|
||||
|
||||
ErrorMsg::ErrorMsg(TErrorCode::type error, const ArgType& arg0, const ArgType& arg1)
|
||||
: error_(error),
|
||||
message_(strings::Substitute(g_ErrorCodes_constants.TErrorMessage[error_],
|
||||
arg0, arg1)) {}
|
||||
|
||||
ErrorMsg::ErrorMsg(TErrorCode::type error, const ArgType& arg0, const ArgType& arg1,
|
||||
const ArgType& arg2)
|
||||
: error_(error),
|
||||
message_(strings::Substitute(g_ErrorCodes_constants.TErrorMessage[error_],
|
||||
arg0, arg1, arg2)) {}
|
||||
|
||||
ErrorMsg::ErrorMsg(TErrorCode::type error, const ArgType& arg0, const ArgType& arg1,
|
||||
const ArgType& arg2, const ArgType& arg3)
|
||||
: error_(error),
|
||||
message_(strings::Substitute(g_ErrorCodes_constants.TErrorMessage[error_],
|
||||
arg0, arg1, arg2, arg3)) {}
|
||||
|
||||
ErrorMsg::ErrorMsg(TErrorCode::type error, const ArgType& arg0, const ArgType& arg1,
|
||||
const ArgType& arg2, const ArgType& arg3, const ArgType& arg4)
|
||||
: error_(error),
|
||||
message_(strings::Substitute(g_ErrorCodes_constants.TErrorMessage[error_],
|
||||
arg0, arg1, arg2, arg3, arg4)) {}
|
||||
|
||||
ErrorMsg::ErrorMsg(TErrorCode::type error, const ArgType& arg0, const ArgType& arg1,
|
||||
const ArgType& arg2, const ArgType& arg3, const ArgType& arg4,
|
||||
const ArgType& arg5)
|
||||
: error_(error),
|
||||
message_(strings::Substitute(g_ErrorCodes_constants.TErrorMessage[error_],
|
||||
arg0, arg1, arg2, arg3, arg4, arg5)) {}
|
||||
|
||||
ErrorMsg::ErrorMsg(TErrorCode::type error, const ArgType& arg0, const ArgType& arg1,
|
||||
const ArgType& arg2, const ArgType& arg3, const ArgType& arg4,
|
||||
const ArgType& arg5, const ArgType& arg6)
|
||||
: error_(error),
|
||||
message_(strings::Substitute(g_ErrorCodes_constants.TErrorMessage[error_],
|
||||
arg0, arg1, arg2, arg3, arg4, arg5, arg6)) {}
|
||||
|
||||
ErrorMsg::ErrorMsg(TErrorCode::type error, const ArgType& arg0, const ArgType& arg1,
|
||||
const ArgType& arg2, const ArgType& arg3, const ArgType& arg4,
|
||||
const ArgType& arg5, const ArgType& arg6, const ArgType& arg7)
|
||||
: error_(error),
|
||||
message_(strings::Substitute(g_ErrorCodes_constants.TErrorMessage[error_],
|
||||
arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)) {}
|
||||
|
||||
ErrorMsg::ErrorMsg(TErrorCode::type error, const ArgType& arg0, const ArgType& arg1,
|
||||
const ArgType& arg2, const ArgType& arg3, const ArgType& arg4,
|
||||
const ArgType& arg5, const ArgType& arg6, const ArgType& arg7,
|
||||
const ArgType& arg8)
|
||||
: error_(error),
|
||||
message_(strings::Substitute(g_ErrorCodes_constants.TErrorMessage[error_],
|
||||
arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8)) {}
|
||||
|
||||
ErrorMsg::ErrorMsg(TErrorCode::type error, const ArgType& arg0, const ArgType& arg1,
|
||||
const ArgType& arg2, const ArgType& arg3, const ArgType& arg4,
|
||||
const ArgType& arg5, const ArgType& arg6, const ArgType& arg7,
|
||||
const ArgType& arg8, const ArgType& arg9)
|
||||
: error_(error),
|
||||
message_(strings::Substitute(g_ErrorCodes_constants.TErrorMessage[error_],
|
||||
arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9)) {}
|
||||
|
||||
ErrorMsg ErrorMsg::Init(TErrorCode::type error, const ArgType& arg0,
|
||||
const ArgType& arg1, const ArgType& arg2, const ArgType& arg3,
|
||||
const ArgType& arg4, const ArgType& arg5, const ArgType& arg6, const ArgType& arg7,
|
||||
const ArgType& arg8, const ArgType& arg9) {
|
||||
|
||||
ErrorCodesConstants error_strings;
|
||||
ErrorMsg m;
|
||||
m.error_ = error;
|
||||
m.message_ = strings::Substitute(error_strings.TErrorMessage[m.error_],
|
||||
arg0, arg1, arg2, arg3, arg4, arg5,
|
||||
arg6, arg7, arg8, arg9);
|
||||
return m;
|
||||
}
|
||||
|
||||
void PrintErrorMap(ostream* stream, const ErrorLogMap& errors) {
|
||||
BOOST_FOREACH(const ErrorLogMap::value_type& v, errors) {
|
||||
if (v.first == TErrorCode::GENERAL) {
|
||||
BOOST_FOREACH(const string& s, v.second.messages) {
|
||||
*stream << s << "\n";
|
||||
}
|
||||
} else {
|
||||
*stream << v.second.messages.front();
|
||||
if (v.second.count < 2) {
|
||||
*stream << "\n";
|
||||
} else {
|
||||
*stream << " (1 of " << v.second.count << " similar)\n";
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
string PrintErrorMapToString(const ErrorLogMap& errors) {
|
||||
stringstream stream;
|
||||
PrintErrorMap(&stream, errors);
|
||||
return stream.str();
|
||||
}
|
||||
|
||||
void MergeErrorMaps(ErrorLogMap* left, const ErrorLogMap& right) {
|
||||
BOOST_FOREACH(const ErrorLogMap::value_type& v, right) {
|
||||
// Append generic message, append specific codes or increment count if exists
|
||||
if (v.first == TErrorCode::GENERAL) {
|
||||
(*left)[v.first].messages.insert(
|
||||
(*left)[v.first].messages.end(), v.second.messages.begin(),
|
||||
v.second.messages.end());
|
||||
} else {
|
||||
if ((*left).count(v.first) > 0) {
|
||||
(*left)[v.first].count += v.second.count;
|
||||
} else {
|
||||
(*left)[v.first].messages.push_back(v.second.messages.front());
|
||||
(*left)[v.first].count = v.second.count;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void AppendError(ErrorLogMap* map, const ErrorMsg& e) {
|
||||
if (e.error() == TErrorCode::GENERAL) {
|
||||
(*map)[e.error()].messages.push_back(e.msg());
|
||||
} else {
|
||||
ErrorLogMap::iterator it = map->find(e.error());
|
||||
if (it != map->end()) {
|
||||
++(it->second.count);
|
||||
} else {
|
||||
(*map)[e.error()].messages.push_back(e.msg());
|
||||
(*map)[e.error()].count = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
size_t ErrorCount(const ErrorLogMap& errors) {
|
||||
ErrorLogMap::const_iterator cit = errors.find(TErrorCode::GENERAL);
|
||||
size_t general_errors = cit != errors.end() ?
|
||||
errors.find(TErrorCode::GENERAL)->second.messages.size() - 1 : 0;
|
||||
return errors.size() + general_errors;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -16,10 +16,17 @@
|
||||
#ifndef IMPALA_UTIL_ERROR_UTIL_H
|
||||
#define IMPALA_UTIL_ERROR_UTIL_H
|
||||
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <boost/cstdint.hpp>
|
||||
#include <boost/lexical_cast.hpp>
|
||||
|
||||
#include "gen-cpp/CatalogObjects_types.h"
|
||||
#include "gen-cpp/ErrorCodes_types.h"
|
||||
#include "gen-cpp/ErrorCodes_constants.h"
|
||||
#include "gen-cpp/ImpalaInternalService_types.h"
|
||||
#include "gutil/strings/substitute.h"
|
||||
|
||||
namespace impala {
|
||||
|
||||
@@ -32,6 +39,133 @@ std::string GetStrErrMsg();
|
||||
// table/and or column statistics.
|
||||
std::string GetTablesMissingStatsWarning(
|
||||
const std::vector<TTableName>& tables_missing_stats);
|
||||
|
||||
|
||||
// Class that holds a formatted error message and potentially a set of detail
|
||||
// messages. Error messages are intended to be user facing. Error details can be attached
|
||||
// as strings to the message. These details should only be accessed internally.
|
||||
class ErrorMsg {
|
||||
public:
|
||||
typedef strings::internal::SubstituteArg ArgType;
|
||||
|
||||
// Trivial constructor
|
||||
ErrorMsg() : error_(TErrorCode::OK) {}
|
||||
|
||||
// Below are a set of overloaded constructors taking all possible number of arguments
|
||||
// that can be passed to Substitute. The reason is to try to avoid forcing the compiler
|
||||
// putting all arguments for Substitute() on the stack whenver this is called and thus
|
||||
// polute the instruction cache.
|
||||
ErrorMsg(TErrorCode::type error);
|
||||
ErrorMsg(TErrorCode::type error, const ArgType& arg0);
|
||||
ErrorMsg(TErrorCode::type error, const ArgType& arg0, const ArgType& arg1);
|
||||
ErrorMsg(TErrorCode::type error, const ArgType& arg0, const ArgType& arg1,
|
||||
const ArgType& arg2);
|
||||
ErrorMsg(TErrorCode::type error, const ArgType& arg0, const ArgType& arg1,
|
||||
const ArgType& arg2, const ArgType& arg3);
|
||||
ErrorMsg(TErrorCode::type error, const ArgType& arg0, const ArgType& arg1,
|
||||
const ArgType& arg2, const ArgType& arg3, const ArgType& arg4);
|
||||
ErrorMsg(TErrorCode::type error, const ArgType& arg0, const ArgType& arg1,
|
||||
const ArgType& arg2, const ArgType& arg3, const ArgType& arg4,
|
||||
const ArgType& arg5);
|
||||
ErrorMsg(TErrorCode::type error, const ArgType& arg0, const ArgType& arg1,
|
||||
const ArgType& arg2, const ArgType& arg3, const ArgType& arg4,
|
||||
const ArgType& arg5, const ArgType& arg6);
|
||||
ErrorMsg(TErrorCode::type error, const ArgType& arg0, const ArgType& arg1,
|
||||
const ArgType& arg2, const ArgType& arg3, const ArgType& arg4,
|
||||
const ArgType& arg5, const ArgType& arg6, const ArgType& arg7);
|
||||
ErrorMsg(TErrorCode::type error, const ArgType& arg0, const ArgType& arg1,
|
||||
const ArgType& arg2, const ArgType& arg3, const ArgType& arg4,
|
||||
const ArgType& arg5, const ArgType& arg6, const ArgType& arg7,
|
||||
const ArgType& arg8);
|
||||
ErrorMsg(TErrorCode::type error, const ArgType& arg0, const ArgType& arg1,
|
||||
const ArgType& arg2, const ArgType& arg3, const ArgType& arg4,
|
||||
const ArgType& arg5, const ArgType& arg6, const ArgType& arg7,
|
||||
const ArgType& arg8, const ArgType& arg9);
|
||||
|
||||
ErrorMsg(TErrorCode::type error, const std::vector<string>& detail)
|
||||
: error_(error), details_(detail) {}
|
||||
|
||||
// Static initializer that is needed to avoid issues with static initialization order
|
||||
// and the point in time when the string list generated via thrift becomes
|
||||
// available. This method should not be used if no static initialization is needed as
|
||||
// the cost of this method is proportional to the number of entries in the global error
|
||||
// message list.
|
||||
// WARNING: DO NOT CALL THIS METHOD IN A NON STATIC CONTEXT
|
||||
static ErrorMsg Init(TErrorCode::type error, const ArgType& arg0 = ArgType::NoArg,
|
||||
const ArgType& arg1 = ArgType::NoArg,
|
||||
const ArgType& arg2 = ArgType::NoArg,
|
||||
const ArgType& arg3 = ArgType::NoArg,
|
||||
const ArgType& arg4 = ArgType::NoArg,
|
||||
const ArgType& arg5 = ArgType::NoArg,
|
||||
const ArgType& arg6 = ArgType::NoArg,
|
||||
const ArgType& arg7 = ArgType::NoArg,
|
||||
const ArgType& arg8 = ArgType::NoArg,
|
||||
const ArgType& arg9 = ArgType::NoArg);
|
||||
|
||||
TErrorCode::type error() const { return error_; }
|
||||
|
||||
// Add detail string message
|
||||
void AddDetail(const std::string& d) {
|
||||
details_.push_back(d);
|
||||
}
|
||||
|
||||
// Set a specific error code
|
||||
void SetError(TErrorCode::type e) {
|
||||
error_ = e;
|
||||
}
|
||||
|
||||
// Returns the formatted error string
|
||||
const std::string& msg() const {
|
||||
return message_;
|
||||
}
|
||||
|
||||
const std::vector<std::string>& details() const {
|
||||
return details_;
|
||||
}
|
||||
|
||||
// Produce a string representation of the error message that includes the formatted
|
||||
// message of the original error and the attached detail strings.
|
||||
std::string GetFullMessageDetails() const {
|
||||
std::stringstream ss;
|
||||
ss << message_ << "\n";
|
||||
for(size_t i=0, end=details_.size(); i < end; ++i) {
|
||||
ss << details_[i] << "\n";
|
||||
}
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
private:
|
||||
TErrorCode::type error_;
|
||||
std::string message_;
|
||||
std::vector<std::string> details_;
|
||||
};
|
||||
|
||||
// Tracks log messages per error code
|
||||
typedef std::map<TErrorCode::type, TErrorLogEntry> ErrorLogMap;
|
||||
|
||||
// Merge error maps. Merging of error maps occurs, when the errors from multiple backends
|
||||
// are merged into a single error map. General log messages are simply appended, specific
|
||||
// errors are deduplicated by either appending a new instance or incrementing the count of
|
||||
// an existing one.
|
||||
void MergeErrorMaps(ErrorLogMap* left, const ErrorLogMap& right);
|
||||
|
||||
// Append an error to the error map. Performs the aggregation as follows: GENERAL errors
|
||||
// are appended to the list of GENERAL errors, to keep one item each in the map, while for
|
||||
// all other error codes only the count is incremented and only the first message is kept
|
||||
// as a sample.
|
||||
void AppendError(ErrorLogMap* map, const ErrorMsg& e);
|
||||
|
||||
// Helper method to print the contents of an ErrorMap to a stream
|
||||
void PrintErrorMap(std::ostream* stream, const ErrorLogMap& errors);
|
||||
|
||||
// Returns the number of errors within this error maps. General errors are counted
|
||||
// individually, while specific errors are counted once per distinct occurrence.
|
||||
size_t ErrorCount(const ErrorLogMap& errors);
|
||||
|
||||
// Generates a string representation of the error map. Produces the same output as
|
||||
// PrintErrorMap, but returns a string instead of using a stream.
|
||||
std::string PrintErrorMapToString(const ErrorLogMap& errors);
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -40,8 +40,8 @@ Status FileSystemUtil::CreateDirectories(const vector<string>& directories) {
|
||||
try {
|
||||
boost::filesystem::create_directory(directories[i]);
|
||||
} catch (exception& e) {
|
||||
return Status(TStatusCode::RUNTIME_ERROR, Substitute(
|
||||
"Encountered error creating directory $0: $1", directories[i], e.what()));
|
||||
return Status(ErrorMsg(TErrorCode::RUNTIME_ERROR, Substitute(
|
||||
"Encountered error creating directory $0: $1", directories[i], e.what())));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -53,8 +53,8 @@ Status FileSystemUtil::RemovePaths(const vector<string>& directories) {
|
||||
try {
|
||||
boost::filesystem::remove_all(directories[i]);
|
||||
} catch (exception& e) {
|
||||
return Status(TStatusCode::RUNTIME_ERROR, Substitute(
|
||||
"Encountered error removing directory $0: $1", directories[i], e.what()));
|
||||
return Status(ErrorMsg(TErrorCode::RUNTIME_ERROR, Substitute(
|
||||
"Encountered error removing directory $0: $1", directories[i], e.what())));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -65,16 +65,16 @@ Status FileSystemUtil::CreateFile(const string& file_path) {
|
||||
int fd = creat(file_path.c_str(), S_IRUSR | S_IWUSR);
|
||||
|
||||
if (fd < 0) {
|
||||
return Status(TStatusCode::RUNTIME_ERROR,
|
||||
return Status(ErrorMsg(TErrorCode::RUNTIME_ERROR,
|
||||
Substitute("Create file $0 failed with errno=$1 description=$2",
|
||||
file_path.c_str(), errno, GetStrErrMsg()));
|
||||
file_path.c_str(), errno, GetStrErrMsg())));
|
||||
}
|
||||
|
||||
int success = close(fd);
|
||||
if (success < 0) {
|
||||
return Status(TStatusCode::RUNTIME_ERROR,
|
||||
return Status(ErrorMsg(TErrorCode::RUNTIME_ERROR,
|
||||
Substitute("Close file $0 failed with errno=$1 description=$2",
|
||||
file_path.c_str(), errno, GetStrErrMsg()));
|
||||
file_path.c_str(), errno, GetStrErrMsg())));
|
||||
}
|
||||
|
||||
return Status::OK;
|
||||
@@ -83,9 +83,9 @@ Status FileSystemUtil::CreateFile(const string& file_path) {
|
||||
Status FileSystemUtil::ResizeFile(const string& file_path, int64_t trunc_len) {
|
||||
int success = truncate(file_path.c_str(), trunc_len);
|
||||
if (success != 0) {
|
||||
return Status(TStatusCode::RUNTIME_ERROR, Substitute(
|
||||
return Status(ErrorMsg(TErrorCode::RUNTIME_ERROR, Substitute(
|
||||
"Truncate file $0 to length $1 failed with errno $2 ($3)",
|
||||
file_path, trunc_len, errno, GetStrErrMsg()));
|
||||
file_path, trunc_len, errno, GetStrErrMsg())));
|
||||
}
|
||||
|
||||
return Status::OK;
|
||||
@@ -94,17 +94,17 @@ Status FileSystemUtil::ResizeFile(const string& file_path, int64_t trunc_len) {
|
||||
Status FileSystemUtil::VerifyIsDirectory(const string& directory_path) {
|
||||
try {
|
||||
if (!boost::filesystem::exists(directory_path)) {
|
||||
return Status(TStatusCode::RUNTIME_ERROR, Substitute(
|
||||
"Directory path $0 does not exist", directory_path));
|
||||
return Status(ErrorMsg(TErrorCode::RUNTIME_ERROR, Substitute(
|
||||
"Directory path $0 does not exist", directory_path)));
|
||||
}
|
||||
} catch (exception& e) {
|
||||
return Status(TStatusCode::RUNTIME_ERROR, Substitute(
|
||||
return Status(ErrorMsg(TErrorCode::RUNTIME_ERROR, Substitute(
|
||||
"Encountered exception while verifying existence of directory path $0: $1",
|
||||
directory_path, e.what()));
|
||||
directory_path, e.what())));
|
||||
}
|
||||
if (!boost::filesystem::is_directory(directory_path)) {
|
||||
return Status(TStatusCode::RUNTIME_ERROR, Substitute(
|
||||
"Path $0 is not a directory", directory_path));
|
||||
return Status(ErrorMsg(TErrorCode::RUNTIME_ERROR, Substitute(
|
||||
"Path $0 is not a directory", directory_path)));
|
||||
}
|
||||
return Status::OK;
|
||||
}
|
||||
@@ -115,9 +115,9 @@ Status FileSystemUtil::GetSpaceAvailable(const string& directory_path,
|
||||
boost::filesystem::space_info info = boost::filesystem::space(directory_path);
|
||||
*available_bytes = info.available;
|
||||
} catch (exception& e) {
|
||||
return Status(TStatusCode::RUNTIME_ERROR, Substitute(
|
||||
return Status(ErrorMsg(TErrorCode::RUNTIME_ERROR, Substitute(
|
||||
"Encountered exception while checking available space for path $0: $1",
|
||||
directory_path, e.what()));
|
||||
directory_path, e.what())));
|
||||
}
|
||||
|
||||
return Status::OK;
|
||||
|
||||
@@ -29,9 +29,7 @@
|
||||
if (!status.ok()) { \
|
||||
(adaptor)->WriteErrorLog(); \
|
||||
(adaptor)->WriteFileErrors(); \
|
||||
string error_msg; \
|
||||
status.GetErrorMsg(&error_msg); \
|
||||
(env)->ThrowNew((adaptor)->impala_exc_cl(), error_msg.c_str()); \
|
||||
(env)->ThrowNew((adaptor)->impala_exc_cl(), status.GetDetail().c_str()); \
|
||||
return; \
|
||||
} \
|
||||
} while (false)
|
||||
@@ -40,19 +38,7 @@
|
||||
do { \
|
||||
Status status = (stmt); \
|
||||
if (!status.ok()) { \
|
||||
string error_msg; \
|
||||
status.GetErrorMsg(&error_msg); \
|
||||
(env)->ThrowNew((impala_exc_cl), error_msg.c_str()); \
|
||||
return; \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
#define SET_TSTATUS_IF_ERROR(stmt, tstatus) \
|
||||
do { \
|
||||
Status status = (stmt); \
|
||||
if (!status.ok()) { \
|
||||
(tstatus)->status_code = TStatusCode::INTERNAL_ERROR; \
|
||||
status.GetErrorMsgs(&(tstatus)->error_msgs); \
|
||||
(env)->ThrowNew((impala_exc_cl), status.GetDetail().c_str()); \
|
||||
return; \
|
||||
} \
|
||||
} while (false)
|
||||
@@ -61,9 +47,7 @@
|
||||
do { \
|
||||
Status status = (stmt); \
|
||||
if (!status.ok()) { \
|
||||
string error_msg; \
|
||||
status.GetErrorMsg(&error_msg); \
|
||||
(env)->ThrowNew((impala_exc_cl), error_msg.c_str()); \
|
||||
(env)->ThrowNew((impala_exc_cl), status.GetDetail().c_str()); \
|
||||
return (ret); \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
@@ -630,7 +630,7 @@ void RuntimeProfile::SerializeToArchiveString(stringstream* out) const {
|
||||
// easy to compress.
|
||||
scoped_ptr<Codec> compressor;
|
||||
status = Codec::CreateCompressor(NULL, false, THdfsCompression::DEFAULT, &compressor);
|
||||
DCHECK(status.ok()) << status.GetErrorMsg();
|
||||
DCHECK(status.ok()) << status.GetDetail();
|
||||
if (!status.ok()) return;
|
||||
|
||||
vector<uint8_t> compressed_buffer;
|
||||
|
||||
@@ -241,7 +241,7 @@ void ThreadMgr::ThreadGroupUrlCallback(const Webserver::ArgumentMap& args,
|
||||
Status status = GetThreadStats(thread.second.thread_id(), &stats);
|
||||
if (!status.ok()) {
|
||||
LOG_EVERY_N(INFO, 100) << "Could not get per-thread statistics: "
|
||||
<< status.GetErrorMsg();
|
||||
<< status.GetDetail();
|
||||
} else {
|
||||
val.AddMember("user_ns", static_cast<double>(stats.user_ns) / 1e9,
|
||||
document->GetAllocator());
|
||||
|
||||
@@ -95,7 +95,7 @@ then
|
||||
then
|
||||
CMAKE_ARGS="${CMAKE_ARGS} -DBUILD_SHARED_LIBS=${BUILD_SHARED_LIBS}"
|
||||
fi
|
||||
cmake . ${CMAKE_ARGS}
|
||||
cmake . ${CMAKE_ARGS} -DCMAKE_EXPORT_COMPILE_COMMANDS=On
|
||||
fi
|
||||
|
||||
if [ $CLEAN -eq 1 ]
|
||||
|
||||
@@ -136,11 +136,13 @@ set(PYTHON_ARGS ${THRIFT_INCLUDE_DIR_OPTION} -r --gen py -o ${PYTHON_OUTPUT_DIR}
|
||||
set (EXT_DATA_SRC_FILES
|
||||
ExternalDataSource.thrift
|
||||
Data.thrift
|
||||
ErrorCodes.thrift
|
||||
Status.thrift
|
||||
Types.thrift
|
||||
)
|
||||
|
||||
set (SRC_FILES
|
||||
ErrorCodes.thrift
|
||||
beeswax.thrift
|
||||
CatalogInternalService.thrift
|
||||
CatalogObjects.thrift
|
||||
@@ -170,6 +172,9 @@ set (SRC_FILES
|
||||
${EXT_DATA_SRC_FILES}
|
||||
)
|
||||
|
||||
add_custom_command(OUTPUT ErrorCodes.thrift
|
||||
COMMAND python generate_error_codes.py
|
||||
DEPENDS generate_error_codes.py)
|
||||
|
||||
# Create a build command for each of the thrift src files and generate
|
||||
# a list of files they produce
|
||||
|
||||
@@ -19,6 +19,7 @@ namespace cpp impala
|
||||
namespace java com.cloudera.impala.thrift
|
||||
|
||||
include "Status.thrift"
|
||||
include "ErrorCodes.thrift"
|
||||
include "Types.thrift"
|
||||
include "Exprs.thrift"
|
||||
include "CatalogObjects.thrift"
|
||||
@@ -331,6 +332,16 @@ struct TInsertExecStatus {
|
||||
2: optional map<string, TInsertPartitionStatus> per_partition_status
|
||||
}
|
||||
|
||||
// Error message exchange format
|
||||
struct TErrorLogEntry {
|
||||
|
||||
// Number of error messages reported using the above identifier
|
||||
1: i32 count
|
||||
|
||||
// Sample messages from the above error code
|
||||
2: list<string> messages
|
||||
}
|
||||
|
||||
struct TReportExecStatusParams {
|
||||
1: required ImpalaInternalServiceVersion protocol_version
|
||||
|
||||
@@ -361,8 +372,7 @@ struct TReportExecStatusParams {
|
||||
8: optional TInsertExecStatus insert_exec_status;
|
||||
|
||||
// New errors that have not been reported to the coordinator
|
||||
// optional in V1
|
||||
9: optional list<string> error_log
|
||||
9: optional map<ErrorCodes.TErrorCode, TErrorLogEntry> error_log;
|
||||
}
|
||||
|
||||
struct TReportExecStatusResult {
|
||||
|
||||
@@ -12,21 +12,12 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
include "ErrorCodes.thrift"
|
||||
|
||||
namespace cpp impala
|
||||
namespace java com.cloudera.impala.thrift
|
||||
|
||||
enum TStatusCode {
|
||||
OK,
|
||||
CANCELLED,
|
||||
ANALYSIS_ERROR,
|
||||
NOT_IMPLEMENTED_ERROR,
|
||||
RUNTIME_ERROR,
|
||||
MEM_LIMIT_EXCEEDED,
|
||||
INTERNAL_ERROR,
|
||||
RECOVERABLE_ERROR
|
||||
}
|
||||
|
||||
struct TStatus {
|
||||
1: required TStatusCode status_code
|
||||
1: required ErrorCodes.TErrorCode status_code
|
||||
2: list<string> error_msgs
|
||||
}
|
||||
}
|
||||
180
common/thrift/generate_error_codes.py
Normal file
180
common/thrift/generate_error_codes.py
Normal file
@@ -0,0 +1,180 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2015 Cloudera Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
# For readability purposes we define the error codes and messages at the top of the
|
||||
# file. New codes and messages must be added here. Old error messages MUST NEVER BE
|
||||
# DELETED, but can be renamed. The tuple layout for a new entry is: error code enum name,
|
||||
# numeric error code, format string of the message.
|
||||
#
|
||||
# TODO Add support for SQL Error Codes
|
||||
# https://msdn.microsoft.com/en-us/library/ms714687%28v=vs.85%29.aspx
|
||||
error_codes = (
|
||||
("OK", 1, ""),
|
||||
|
||||
("GENERAL", 2, "$0"),
|
||||
|
||||
("CANCELLED", 3, "$0"),
|
||||
|
||||
("ANALYSIS_ERROR", 4, "$0"),
|
||||
|
||||
("NOT_IMPLEMENTED_ERROR", 5, "$0"),
|
||||
|
||||
("RUNTIME_ERROR", 6, "$0"),
|
||||
|
||||
("MEM_LIMIT_EXCEEDED", 7, "$0"),
|
||||
|
||||
("INTERNAL_ERROR", 8, "$0"),
|
||||
|
||||
("RECOVERABLE_ERROR", 9, "$0"),
|
||||
|
||||
("PARQUET_MULTIPLE_BLOCKS", 10,
|
||||
"Parquet files should not be split into multiple hdfs-blocks. file=$0"),
|
||||
|
||||
("PARQUET_COLUMN_METADATA_INVALID", 11,
|
||||
"Column metadata states there are $0 values, but only read $1 values "
|
||||
"from column $2"),
|
||||
|
||||
("PARQUET_HEADER_PAGE_SIZE_EXCEEDED", 12,
|
||||
"ParquetScanner: could not read data page because page header exceeded "
|
||||
"maximum size of $0"),
|
||||
|
||||
("PARQUET_HEADER_EOF", 13,
|
||||
"ParquetScanner: reached EOF while deserializing data page header."),
|
||||
|
||||
("PARQUET_GROUP_ROW_COUNT_ERROR", 14,
|
||||
"Metadata states that in group $0($1) there are $2 rows, but only $3 "
|
||||
"rows were read."),
|
||||
|
||||
("PARQUET_GROUP_ROW_COUNT_OVERFLOW", 15,
|
||||
"Metadata states that in group $0($1) there are $2 rows, but there is at least one "
|
||||
"more row in the file."),
|
||||
|
||||
("PARQUET_MISSING_PRECISION", 16,
|
||||
"File '$0' column '$1' does not have the decimal precision set."),
|
||||
|
||||
("PARQUET_WRONG_PRECISION", 17,
|
||||
"File '$0' column '$1' has a precision that does not match the table metadata "
|
||||
" precision. File metadata precision: $2, table metadata precision: $3."),
|
||||
|
||||
("PARQUET_BAD_CONVERTED_TYPE", 18,
|
||||
"File '$0' column '$1' does not have converted type set to DECIMAL"),
|
||||
|
||||
("PARQUET_INCOMPATIBLE_DECIMAL", 19,
|
||||
"File '$0' column '$1' contains decimal data but the table metadata has type $2"),
|
||||
|
||||
("SEQUENCE_SCANNER_PARSE_ERROR", 20,
|
||||
"Problem parsing file $0 at $1$2"),
|
||||
|
||||
("SNAPPY_DECOMPRESS_INVALID_BLOCK_SIZE", 21,
|
||||
"Decompressor: block size is too big. Data is likely corrupt. Size: $0"),
|
||||
|
||||
("SNAPPY_DECOMPRESS_INVALID_COMPRESSED_LENGTH", 22,
|
||||
"Decompressor: invalid compressed length. Data is likely corrupt."),
|
||||
|
||||
("SNAPPY_DECOMPRESS_UNCOMPRESSED_LENGTH_FAILED", 23,
|
||||
"Snappy: GetUncompressedLength failed"),
|
||||
|
||||
("SNAPPY_DECOMPRESS_RAW_UNCOMPRESS_FAILED", 24,
|
||||
"SnappyBlock: RawUncompress failed"),
|
||||
|
||||
("SNAPPY_DECOMPRESS_DECOMPRESS_SIZE_INCORRECT", 25,
|
||||
"Snappy: Decompressed size is not correct."),
|
||||
|
||||
("HDFS_SCAN_NODE_UNKNOWN_DISK", 26, "Unknown disk id. "
|
||||
"This will negatively affect performance. "
|
||||
"Check your hdfs settings to enable block location metadata."),
|
||||
|
||||
("FRAGMENT_EXECUTOR", 27, "Reserved resource size ($0) is larger than "
|
||||
"query mem limit ($1), and will be restricted to $1. Configure the reservation "
|
||||
"size by setting RM_INITIAL_MEM."),
|
||||
|
||||
("PARTITIONED_HASH_JOIN_MAX_PARTITION_DEPTH", 28,
|
||||
"Cannot perform join at hash join node with id $0."
|
||||
" The input data was partitioned the maximum number of $1 times."
|
||||
" This could mean there is significant skew in the data or the memory limit is"
|
||||
" set too low."),
|
||||
|
||||
("PARTITIONED_AGG_MAX_PARTITION_DEPTH", 29,
|
||||
"Cannot perform aggregation at hash aggregation node with id $0."
|
||||
" The input data was partitioned the maximum number of $1 times."
|
||||
" This could mean there is significant skew in the data or the memory limit is"
|
||||
" set too low."),
|
||||
|
||||
("MISSING_BUILTIN", 30, "Builtin '$0' with symbol '$1' does not exist. "
|
||||
"Verify that all your impalads are the same version."),
|
||||
)
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
# Verifies the uniqueness of the error constants and numeric error codes.
|
||||
def check_duplicates(codes):
|
||||
constants = {}
|
||||
num_codes = {}
|
||||
for row in codes:
|
||||
if row[0] in constants:
|
||||
print("Constant %s already used, please check definition of '%s'!" % \
|
||||
(row[0], constants[row[0]]))
|
||||
exit(1)
|
||||
if row[1] in num_codes:
|
||||
print("Numeric error code %d already used, please check definition of '%s'!" % \
|
||||
(row[1], num_codes[row[1]]))
|
||||
exit(1)
|
||||
constants[row[0]] = row[2]
|
||||
num_codes[row[1]] = row[2]
|
||||
|
||||
preamble = """
|
||||
// Copyright 2015 Cloudera Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
//
|
||||
// THIS FILE IS AUTO GENERATED BY generated_error_codes.py DO NOT MODIFY
|
||||
// IT BY HAND.
|
||||
//
|
||||
|
||||
namespace cpp impala
|
||||
namespace java com.cloudera.impala.thrift
|
||||
|
||||
"""
|
||||
# The script will always generate the file, CMake will take care of running it only if
|
||||
# necessary.
|
||||
target_file = "ErrorCodes.thrift"
|
||||
|
||||
# Check uniqueness of error constants and numeric codes
|
||||
check_duplicates(error_codes)
|
||||
|
||||
with open(target_file, "w+") as fid:
|
||||
fid.write(preamble)
|
||||
fid.write("""\nenum TErrorCode {\n""")
|
||||
fid.write(",\n".join(map(lambda x: " %s" % x[0], error_codes)))
|
||||
fid.write("\n}")
|
||||
fid.write("\n")
|
||||
fid.write("const list<string> TErrorMessage = [\n")
|
||||
fid.write(",\n".join(map(lambda x: " // %s\n \"%s\"" %(x[0], x[2]), error_codes)))
|
||||
fid.write("\n]")
|
||||
|
||||
print("%s created." % target_file)
|
||||
@@ -26,7 +26,7 @@ import com.cloudera.impala.extdatasource.thrift.TRowBatch;
|
||||
import com.cloudera.impala.extdatasource.v1.ExternalDataSource;
|
||||
import com.cloudera.impala.thrift.TColumnData;
|
||||
import com.cloudera.impala.thrift.TStatus;
|
||||
import com.cloudera.impala.thrift.TStatusCode;
|
||||
import com.cloudera.impala.thrift.TErrorCode;
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
/**
|
||||
@@ -34,7 +34,7 @@ import com.google.common.collect.Lists;
|
||||
*/
|
||||
public class EchoDataSource implements ExternalDataSource {
|
||||
private static final TStatus STATUS_OK =
|
||||
new TStatus(TStatusCode.OK, Lists.<String>newArrayList());
|
||||
new TStatus(TErrorCode.OK, Lists.<String>newArrayList());
|
||||
|
||||
private String initString_;
|
||||
|
||||
|
||||
@@ -39,7 +39,7 @@ import com.cloudera.impala.thrift.TColumnType;
|
||||
import com.cloudera.impala.thrift.TPrimitiveType;
|
||||
import com.cloudera.impala.thrift.TScalarType;
|
||||
import com.cloudera.impala.thrift.TStatus;
|
||||
import com.cloudera.impala.thrift.TStatusCode;
|
||||
import com.cloudera.impala.thrift.TErrorCode;
|
||||
import com.cloudera.impala.thrift.TTypeNodeType;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.Iterables;
|
||||
@@ -61,7 +61,7 @@ public class AllTypesDataSource implements ExternalDataSource {
|
||||
private static final int BATCH_SIZE_INCREMENT = 100;
|
||||
|
||||
private static final TStatus STATUS_OK =
|
||||
new TStatus(TStatusCode.OK, Lists.<String>newArrayList());
|
||||
new TStatus(TErrorCode.OK, Lists.<String>newArrayList());
|
||||
|
||||
private int currRow_;
|
||||
private boolean eos_;
|
||||
|
||||
@@ -22,8 +22,8 @@ import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
|
||||
import com.cloudera.impala.common.ImpalaException;
|
||||
import com.cloudera.impala.common.JniUtil;
|
||||
import com.cloudera.impala.thrift.TCatalogObjectType;
|
||||
import com.cloudera.impala.thrift.TErrorCode;
|
||||
import com.cloudera.impala.thrift.TStatus;
|
||||
import com.cloudera.impala.thrift.TStatusCode;
|
||||
import com.cloudera.impala.thrift.TTable;
|
||||
import com.cloudera.impala.thrift.TTableDescriptor;
|
||||
import com.google.common.base.Joiner;
|
||||
@@ -84,7 +84,7 @@ public class IncompleteTable extends Table {
|
||||
TTable table = new TTable(db_.getName(), name_);
|
||||
table.setId(id_.asInt());
|
||||
if (cause_ != null) {
|
||||
table.setLoad_status(new TStatus(TStatusCode.INTERNAL_ERROR,
|
||||
table.setLoad_status(new TStatus(TErrorCode.INTERNAL_ERROR,
|
||||
Lists.newArrayList(JniUtil.throwableToString(cause_),
|
||||
JniUtil.throwableToStackTrace(cause_))));
|
||||
}
|
||||
|
||||
@@ -39,8 +39,8 @@ import com.cloudera.impala.extdatasource.thrift.TOpenResult;
|
||||
import com.cloudera.impala.extdatasource.thrift.TPrepareParams;
|
||||
import com.cloudera.impala.extdatasource.thrift.TPrepareResult;
|
||||
import com.cloudera.impala.extdatasource.v1.ExternalDataSource;
|
||||
import com.cloudera.impala.thrift.TErrorCode;
|
||||
import com.cloudera.impala.thrift.TStatus;
|
||||
import com.cloudera.impala.thrift.TStatusCode;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
@@ -158,7 +158,7 @@ public class ExternalDataSourceExecutor {
|
||||
jarPath_, className_, apiVersion_.name(), opName,
|
||||
exceptionMessage);
|
||||
LOG.error(errorMessage, e); // Logs the stack
|
||||
return new TStatus(TStatusCode.RUNTIME_ERROR, Lists.newArrayList(errorMessage));
|
||||
return new TStatus(TErrorCode.RUNTIME_ERROR, Lists.newArrayList(errorMessage));
|
||||
}
|
||||
|
||||
public TPrepareResult prepare(TPrepareParams params) {
|
||||
|
||||
@@ -42,6 +42,7 @@ import com.cloudera.impala.service.FeSupport;
|
||||
import com.cloudera.impala.thrift.TCacheJarResult;
|
||||
import com.cloudera.impala.thrift.TColumnValue;
|
||||
import com.cloudera.impala.thrift.TDataSourceScanNode;
|
||||
import com.cloudera.impala.thrift.TErrorCode;
|
||||
import com.cloudera.impala.thrift.TExplainLevel;
|
||||
import com.cloudera.impala.thrift.TNetworkAddress;
|
||||
import com.cloudera.impala.thrift.TPlanNode;
|
||||
@@ -51,7 +52,6 @@ import com.cloudera.impala.thrift.TScanRange;
|
||||
import com.cloudera.impala.thrift.TScanRangeLocation;
|
||||
import com.cloudera.impala.thrift.TScanRangeLocations;
|
||||
import com.cloudera.impala.thrift.TStatus;
|
||||
import com.cloudera.impala.thrift.TStatusCode;
|
||||
import com.google.common.base.Joiner;
|
||||
import com.google.common.base.Objects;
|
||||
import com.google.common.base.Preconditions;
|
||||
@@ -158,7 +158,7 @@ public class DataSourceScanNode extends ScanNode {
|
||||
String hdfsLocation = table_.getDataSource().getHdfs_location();
|
||||
TCacheJarResult cacheResult = FeSupport.CacheJar(hdfsLocation);
|
||||
TStatus cacheJarStatus = cacheResult.getStatus();
|
||||
if (cacheJarStatus.getStatus_code() != TStatusCode.OK) {
|
||||
if (cacheJarStatus.getStatus_code() != TErrorCode.OK) {
|
||||
throw new InternalException(String.format(
|
||||
"Unable to cache data source library at location '%s'. Check that the file " +
|
||||
"exists and is readable. Message: %s",
|
||||
@@ -184,7 +184,7 @@ public class DataSourceScanNode extends ScanNode {
|
||||
"Error calling prepare() on data source %s",
|
||||
DataSource.debugString(table_.getDataSource())), e);
|
||||
}
|
||||
if (prepareStatus.getStatus_code() != TStatusCode.OK) {
|
||||
if (prepareStatus.getStatus_code() != TErrorCode.OK) {
|
||||
throw new InternalException(String.format(
|
||||
"Data source %s returned an error from prepare(): %s",
|
||||
DataSource.debugString(table_.getDataSource()),
|
||||
|
||||
@@ -112,6 +112,7 @@ import com.cloudera.impala.thrift.TDropDbParams;
|
||||
import com.cloudera.impala.thrift.TDropFunctionParams;
|
||||
import com.cloudera.impala.thrift.TDropStatsParams;
|
||||
import com.cloudera.impala.thrift.TDropTableOrViewParams;
|
||||
import com.cloudera.impala.thrift.TErrorCode;
|
||||
import com.cloudera.impala.thrift.TGrantRevokePrivParams;
|
||||
import com.cloudera.impala.thrift.TGrantRevokeRoleParams;
|
||||
import com.cloudera.impala.thrift.THdfsCachingOp;
|
||||
@@ -125,7 +126,6 @@ import com.cloudera.impala.thrift.TResultRow;
|
||||
import com.cloudera.impala.thrift.TResultSet;
|
||||
import com.cloudera.impala.thrift.TResultSetMetadata;
|
||||
import com.cloudera.impala.thrift.TStatus;
|
||||
import com.cloudera.impala.thrift.TStatusCode;
|
||||
import com.cloudera.impala.thrift.TTable;
|
||||
import com.cloudera.impala.thrift.TTableName;
|
||||
import com.cloudera.impala.thrift.TTableStats;
|
||||
@@ -254,7 +254,7 @@ public class CatalogOpExecutor {
|
||||
// At this point, the operation is considered successful. If any errors occurred
|
||||
// during execution, this function will throw an exception and the CatalogServer
|
||||
// will handle setting a bad status code.
|
||||
response.getResult().setStatus(new TStatus(TStatusCode.OK, new ArrayList<String>()));
|
||||
response.getResult().setStatus(new TStatus(TErrorCode.OK, new ArrayList<String>()));
|
||||
return response;
|
||||
}
|
||||
|
||||
@@ -2361,7 +2361,7 @@ public class CatalogOpExecutor {
|
||||
resp.result.setVersion(catalog_.getCatalogVersion());
|
||||
}
|
||||
resp.getResult().setStatus(
|
||||
new TStatus(TStatusCode.OK, new ArrayList<String>()));
|
||||
new TStatus(TErrorCode.OK, new ArrayList<String>()));
|
||||
return resp;
|
||||
}
|
||||
|
||||
@@ -2489,7 +2489,7 @@ public class CatalogOpExecutor {
|
||||
response.setResult(new TCatalogUpdateResult());
|
||||
response.getResult().setCatalog_service_id(JniCatalog.getServiceId());
|
||||
response.getResult().setStatus(
|
||||
new TStatus(TStatusCode.OK, new ArrayList<String>()));
|
||||
new TStatus(TErrorCode.OK, new ArrayList<String>()));
|
||||
// Perform an incremental refresh to load new/modified partitions and files.
|
||||
Table refreshedTbl = catalog_.reloadTable(tblName.toThrift());
|
||||
response.getResult().setUpdated_catalog_object(TableToTCatalogObject(refreshedTbl));
|
||||
|
||||
@@ -98,6 +98,7 @@ import com.cloudera.impala.thrift.TDdlExecRequest;
|
||||
import com.cloudera.impala.thrift.TDdlType;
|
||||
import com.cloudera.impala.thrift.TDescribeTableOutputStyle;
|
||||
import com.cloudera.impala.thrift.TDescribeTableResult;
|
||||
import com.cloudera.impala.thrift.TErrorCode;
|
||||
import com.cloudera.impala.thrift.TExecRequest;
|
||||
import com.cloudera.impala.thrift.TExplainLevel;
|
||||
import com.cloudera.impala.thrift.TExplainResult;
|
||||
@@ -116,7 +117,6 @@ import com.cloudera.impala.thrift.TResultRow;
|
||||
import com.cloudera.impala.thrift.TResultSet;
|
||||
import com.cloudera.impala.thrift.TResultSetMetadata;
|
||||
import com.cloudera.impala.thrift.TStatus;
|
||||
import com.cloudera.impala.thrift.TStatusCode;
|
||||
import com.cloudera.impala.thrift.TStmtType;
|
||||
import com.cloudera.impala.thrift.TTableName;
|
||||
import com.cloudera.impala.thrift.TUpdateCatalogCacheRequest;
|
||||
@@ -710,7 +710,7 @@ public class Frontend {
|
||||
LOG.info(String.format("Requesting prioritized load of table(s): %s",
|
||||
Joiner.on(", ").join(missingTbls)));
|
||||
TStatus status = FeSupport.PrioritizeLoad(missingTbls);
|
||||
if (status.getStatus_code() != TStatusCode.OK) {
|
||||
if (status.getStatus_code() != TErrorCode.OK) {
|
||||
throw new InternalException("Error requesting prioritized load: " +
|
||||
Joiner.on("\n").join(status.getError_msgs()));
|
||||
}
|
||||
|
||||
@@ -39,12 +39,12 @@ import com.cloudera.impala.common.ByteUnits;
|
||||
import com.cloudera.impala.common.ImpalaException;
|
||||
import com.cloudera.impala.common.InternalException;
|
||||
import com.cloudera.impala.common.JniUtil;
|
||||
import com.cloudera.impala.thrift.TErrorCode;
|
||||
import com.cloudera.impala.thrift.TPoolConfigParams;
|
||||
import com.cloudera.impala.thrift.TPoolConfigResult;
|
||||
import com.cloudera.impala.thrift.TResolveRequestPoolParams;
|
||||
import com.cloudera.impala.thrift.TResolveRequestPoolResult;
|
||||
import com.cloudera.impala.thrift.TStatus;
|
||||
import com.cloudera.impala.thrift.TStatusCode;
|
||||
import com.cloudera.impala.util.FileWatchService.FileChangeListener;
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
@@ -296,16 +296,16 @@ public class RequestPoolService {
|
||||
if (errorMessage == null) {
|
||||
// This occurs when assignToPool returns null (not an error), i.e. if the pool
|
||||
// cannot be resolved according to the policy.
|
||||
result.setStatus(new TStatus(TStatusCode.OK, Lists.<String>newArrayList()));
|
||||
result.setStatus(new TStatus(TErrorCode.OK, Lists.<String>newArrayList()));
|
||||
} else {
|
||||
// If Yarn throws an exception, return an error status.
|
||||
result.setStatus(
|
||||
new TStatus(TStatusCode.INTERNAL_ERROR, Lists.newArrayList(errorMessage)));
|
||||
new TStatus(TErrorCode.INTERNAL_ERROR, Lists.newArrayList(errorMessage)));
|
||||
}
|
||||
} else {
|
||||
result.setResolved_pool(pool);
|
||||
result.setHas_access(hasAccess(pool, user));
|
||||
result.setStatus(new TStatus(TStatusCode.OK, Lists.<String>newArrayList()));
|
||||
result.setStatus(new TStatus(TErrorCode.OK, Lists.<String>newArrayList()));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@@ -27,10 +27,10 @@ import org.junit.Test;
|
||||
import org.junit.rules.TemporaryFolder;
|
||||
|
||||
import com.cloudera.impala.common.ByteUnits;
|
||||
import com.cloudera.impala.thrift.TErrorCode;
|
||||
import com.cloudera.impala.thrift.TPoolConfigResult;
|
||||
import com.cloudera.impala.thrift.TResolveRequestPoolParams;
|
||||
import com.cloudera.impala.thrift.TResolveRequestPoolResult;
|
||||
import com.cloudera.impala.thrift.TStatusCode;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.io.Files;
|
||||
|
||||
@@ -129,12 +129,12 @@ public class TestRequestPoolService {
|
||||
createPoolService(ALLOCATION_FILE, LLAMA_CONFIG_FILE);
|
||||
TResolveRequestPoolResult result = poolService_.resolveRequestPool(
|
||||
new TResolveRequestPoolParams("userA@abc.com", "root.queueA"));
|
||||
Assert.assertEquals(TStatusCode.OK, result.getStatus().getStatus_code());
|
||||
Assert.assertEquals(TErrorCode.OK, result.getStatus().getStatus_code());
|
||||
Assert.assertEquals("root.queueA", result.getResolved_pool());
|
||||
|
||||
result = poolService_.resolveRequestPool(
|
||||
new TResolveRequestPoolParams("userA/a.qualified.domain@abc.com", "root.queueA"));
|
||||
Assert.assertEquals(TStatusCode.OK, result.getStatus().getStatus_code());
|
||||
Assert.assertEquals(TErrorCode.OK, result.getStatus().getStatus_code());
|
||||
Assert.assertEquals("root.queueA", result.getResolved_pool());
|
||||
}
|
||||
|
||||
@@ -146,7 +146,7 @@ public class TestRequestPoolService {
|
||||
new TResolveRequestPoolParams("userA", "root.NOT_A_POOL"));
|
||||
Assert.assertEquals(false, result.isSetResolved_pool());
|
||||
Assert.assertEquals(false, result.isSetHas_access());
|
||||
Assert.assertEquals(TStatusCode.INTERNAL_ERROR, result.getStatus().getStatus_code());
|
||||
Assert.assertEquals(TErrorCode.INTERNAL_ERROR, result.getStatus().getStatus_code());
|
||||
|
||||
String expectedMessage = "Failed to resolve user 'userA' to a pool while " +
|
||||
"evaluating the 'primaryGroup' or 'secondaryGroup' queue placement rules because " +
|
||||
|
||||
@@ -20,7 +20,8 @@ from beeswaxd import BeeswaxService
|
||||
from beeswaxd.BeeswaxService import QueryState
|
||||
from ExecStats.ttypes import TExecStats
|
||||
from ImpalaService import ImpalaService
|
||||
from Status.ttypes import TStatus, TStatusCode
|
||||
from ErrorCodes.ttypes import TErrorCode
|
||||
from Status.ttypes import TStatus
|
||||
from thrift.protocol import TBinaryProtocol
|
||||
from thrift_sasl import TSaslClientTransport
|
||||
from thrift.transport.TSocket import TSocket
|
||||
@@ -392,7 +393,7 @@ class ImpalaClient(object):
|
||||
# the TStatus return value. For now, just print any error(s) that were encountered
|
||||
# and validate the result of the operation was a success.
|
||||
if ret is not None and isinstance(ret, TStatus):
|
||||
if ret.status_code != TStatusCode.OK:
|
||||
if ret.status_code != TErrorCode.OK:
|
||||
if ret.error_msgs:
|
||||
raise RPCException ('RPC Error: %s' % '\n'.join(ret.error_msgs))
|
||||
status = RpcStatus.ERROR
|
||||
|
||||
@@ -24,7 +24,8 @@ from tests.common.impala_cluster import ImpalaCluster
|
||||
|
||||
from CatalogService import CatalogService
|
||||
from CatalogService.CatalogService import TGetFunctionsRequest, TGetFunctionsResponse
|
||||
from Status.ttypes import TStatus, TStatusCode
|
||||
from ErrorCodes.ttypes import TErrorCode
|
||||
from Status.ttypes import TStatus
|
||||
from thrift.transport.TSocket import TSocket
|
||||
from thrift.protocol import TBinaryProtocol
|
||||
from thrift.transport.TTransport import TBufferedTransport, TTransportException
|
||||
@@ -72,7 +73,7 @@ class TestCatalogServiceClient(ImpalaTestSuite):
|
||||
request = TGetFunctionsRequest()
|
||||
request.db_name = self.TEST_DB
|
||||
response = catalog_client.GetFunctions(request)
|
||||
assert response.status.status_code == TStatusCode.OK
|
||||
assert response.status.status_code == TErrorCode.OK
|
||||
assert len(response.functions) == 0
|
||||
|
||||
# Add a function and make sure it shows up.
|
||||
@@ -94,7 +95,7 @@ class TestCatalogServiceClient(ImpalaTestSuite):
|
||||
"LOCATION '/test-warehouse/libTestUdfs.so' SYMBOL='Fn'" % self.TEST_DB)
|
||||
response = catalog_client.GetFunctions(request)
|
||||
LOG.debug(response)
|
||||
assert response.status.status_code == TStatusCode.OK
|
||||
assert response.status.status_code == TErrorCode.OK
|
||||
assert len(response.functions) == 2
|
||||
|
||||
functions = [fn for fn in response.functions]
|
||||
@@ -111,7 +112,7 @@ class TestCatalogServiceClient(ImpalaTestSuite):
|
||||
"CATION '/test-warehouse/libTestUdas.so' UPDATE_FN='TwoArgUpdate'" % self.TEST_DB)
|
||||
response = catalog_client.GetFunctions(request)
|
||||
LOG.debug(response)
|
||||
assert response.status.status_code == TStatusCode.OK
|
||||
assert response.status.status_code == TErrorCode.OK
|
||||
assert len(response.functions) == 3
|
||||
functions = [fn for fn in response.functions if fn.aggregate_fn is not None]
|
||||
# Should be only 1 aggregate function
|
||||
@@ -121,11 +122,11 @@ class TestCatalogServiceClient(ImpalaTestSuite):
|
||||
request.db_name = self.TEST_DB + "_does_not_exist"
|
||||
response = catalog_client.GetFunctions(request)
|
||||
LOG.debug(response)
|
||||
assert response.status.status_code == TStatusCode.INTERNAL_ERROR
|
||||
assert response.status.status_code == TErrorCode.GENERAL
|
||||
assert 'Database does not exist: ' in str(response.status)
|
||||
|
||||
request = TGetFunctionsRequest()
|
||||
response = catalog_client.GetFunctions(request)
|
||||
LOG.debug(response)
|
||||
assert response.status.status_code == TStatusCode.INTERNAL_ERROR
|
||||
assert response.status.status_code == TErrorCode.GENERAL
|
||||
assert 'Database name must be set' in str(response.status)
|
||||
|
||||
Reference in New Issue
Block a user