mirror of
https://github.com/apache/impala.git
synced 2025-12-21 10:58:31 -05:00
IMPALA-8234: Fix ordering of Thrift enum, fix enum values, add warning
IMPALA-7694 added a field in the middle of the Metrics.TUnit enum, which broke backwards compatibility with profiles that had been written by older versions of Impala. This change fixes the ordering by moving the field to the end of the enum. Additionally, it adds a warning to the top of all Thrift files that are part of the binary profile format, and an note of caution to the main definition in RuntimeProfile.thrift. This change also fixes the order of all enums in our Thrift files to make errors like this less likely in the future. Change-Id: If215f16a636008757ceb439edbd6900a1be88c59 Reviewed-on: http://gerrit.cloudera.org:8080/12543 Reviewed-by: Impala Public Jenkins <impala-public-jenkins@cloudera.com> Tested-by: Impala Public Jenkins <impala-public-jenkins@cloudera.com>
This commit is contained in:
committed by
Impala Public Jenkins
parent
f9ae897c4b
commit
91d8a8f628
@@ -19,8 +19,8 @@ namespace cpp impala
|
|||||||
namespace java org.apache.impala.thrift
|
namespace java org.apache.impala.thrift
|
||||||
|
|
||||||
enum TReservedWordsVersion {
|
enum TReservedWordsVersion {
|
||||||
IMPALA_2_11
|
IMPALA_2_11 = 0
|
||||||
IMPALA_3_0
|
IMPALA_3_0 = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
// Used to pass gflags from backend to frontend, JniCatalog and JniFrontend
|
// Used to pass gflags from backend to frontend, JniCatalog and JniFrontend
|
||||||
|
|||||||
@@ -29,82 +29,82 @@ include "hive_metastore.thrift"
|
|||||||
enum TCatalogObjectType {
|
enum TCatalogObjectType {
|
||||||
// UNKNOWN is used to indicate an error condition when converting
|
// UNKNOWN is used to indicate an error condition when converting
|
||||||
// strings to their matching TCatalogObjectType.
|
// strings to their matching TCatalogObjectType.
|
||||||
UNKNOWN,
|
UNKNOWN = 0
|
||||||
CATALOG,
|
CATALOG = 1
|
||||||
DATABASE,
|
DATABASE = 2
|
||||||
TABLE,
|
TABLE = 3
|
||||||
VIEW,
|
VIEW = 4
|
||||||
FUNCTION,
|
FUNCTION = 5
|
||||||
DATA_SOURCE,
|
DATA_SOURCE = 6
|
||||||
PRINCIPAL,
|
PRINCIPAL = 7
|
||||||
PRIVILEGE,
|
PRIVILEGE = 8
|
||||||
HDFS_CACHE_POOL,
|
HDFS_CACHE_POOL = 9
|
||||||
}
|
}
|
||||||
|
|
||||||
enum TTableType {
|
enum TTableType {
|
||||||
HDFS_TABLE,
|
HDFS_TABLE = 0
|
||||||
HBASE_TABLE,
|
HBASE_TABLE = 1
|
||||||
VIEW,
|
VIEW = 2
|
||||||
DATA_SOURCE_TABLE,
|
DATA_SOURCE_TABLE = 3
|
||||||
KUDU_TABLE,
|
KUDU_TABLE = 4
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Separate the storage engines (e.g. Kudu) from the file formats.
|
// TODO: Separate the storage engines (e.g. Kudu) from the file formats.
|
||||||
// TODO: Make the names consistent with the file format keywords specified in
|
// TODO: Make the names consistent with the file format keywords specified in
|
||||||
// the parser.
|
// the parser.
|
||||||
enum THdfsFileFormat {
|
enum THdfsFileFormat {
|
||||||
TEXT,
|
TEXT = 0
|
||||||
RC_FILE,
|
RC_FILE = 1
|
||||||
SEQUENCE_FILE,
|
SEQUENCE_FILE = 2
|
||||||
AVRO,
|
AVRO = 3
|
||||||
PARQUET,
|
PARQUET = 4
|
||||||
KUDU,
|
KUDU = 5
|
||||||
ORC
|
ORC = 6
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Since compression is also enabled for Kudu columns, we should
|
// TODO: Since compression is also enabled for Kudu columns, we should
|
||||||
// rename this enum to not be Hdfs specific.
|
// rename this enum to not be Hdfs specific.
|
||||||
enum THdfsCompression {
|
enum THdfsCompression {
|
||||||
NONE,
|
NONE = 0
|
||||||
DEFAULT,
|
DEFAULT = 1
|
||||||
GZIP,
|
GZIP = 2
|
||||||
DEFLATE,
|
DEFLATE = 3
|
||||||
BZIP2,
|
BZIP2 = 4
|
||||||
SNAPPY,
|
SNAPPY = 5
|
||||||
SNAPPY_BLOCKED,
|
SNAPPY_BLOCKED = 6
|
||||||
LZO,
|
LZO = 7
|
||||||
LZ4,
|
LZ4 = 8
|
||||||
ZLIB,
|
ZLIB = 9
|
||||||
ZSTD
|
ZSTD = 10
|
||||||
}
|
}
|
||||||
|
|
||||||
enum TColumnEncoding {
|
enum TColumnEncoding {
|
||||||
AUTO,
|
AUTO = 0
|
||||||
PLAIN,
|
PLAIN = 1
|
||||||
PREFIX,
|
PREFIX = 2
|
||||||
GROUP_VARINT,
|
GROUP_VARINT = 3
|
||||||
RLE,
|
RLE = 4
|
||||||
DICTIONARY,
|
DICTIONARY = 5
|
||||||
BIT_SHUFFLE
|
BIT_SHUFFLE = 6
|
||||||
}
|
}
|
||||||
|
|
||||||
enum THdfsSeqCompressionMode {
|
enum THdfsSeqCompressionMode {
|
||||||
RECORD,
|
RECORD = 0
|
||||||
BLOCK
|
BLOCK = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
// The table property type.
|
// The table property type.
|
||||||
enum TTablePropertyType {
|
enum TTablePropertyType {
|
||||||
TBL_PROPERTY,
|
TBL_PROPERTY = 0
|
||||||
SERDE_PROPERTY
|
SERDE_PROPERTY = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
// The access level that is available to Impala on the Catalog object.
|
// The access level that is available to Impala on the Catalog object.
|
||||||
enum TAccessLevel {
|
enum TAccessLevel {
|
||||||
NONE,
|
NONE = 0
|
||||||
READ_WRITE,
|
READ_WRITE = 1
|
||||||
READ_ONLY,
|
READ_ONLY = 2
|
||||||
WRITE_ONLY,
|
WRITE_ONLY = 3
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mapping from names defined by Avro to values in the THdfsCompression enum.
|
// Mapping from names defined by Avro to values in the THdfsCompression enum.
|
||||||
@@ -485,8 +485,8 @@ struct TDatabase {
|
|||||||
// Represents a principal type that maps to Sentry principal type.
|
// Represents a principal type that maps to Sentry principal type.
|
||||||
// https://github.com/apache/sentry/blob/3d062f39ce6a047138660a7b3d0024bde916c5b4/sentry-service/sentry-service-api/src/gen/thrift/gen-javabean/org/apache/sentry/api/service/thrift/TSentryPrincipalType.java
|
// https://github.com/apache/sentry/blob/3d062f39ce6a047138660a7b3d0024bde916c5b4/sentry-service/sentry-service-api/src/gen/thrift/gen-javabean/org/apache/sentry/api/service/thrift/TSentryPrincipalType.java
|
||||||
enum TPrincipalType {
|
enum TPrincipalType {
|
||||||
ROLE,
|
ROLE = 0
|
||||||
USER
|
USER = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
// Represents a principal in an authorization policy.
|
// Represents a principal in an authorization policy.
|
||||||
@@ -508,23 +508,23 @@ struct TPrincipal {
|
|||||||
|
|
||||||
// The scope a TPrivilege applies to.
|
// The scope a TPrivilege applies to.
|
||||||
enum TPrivilegeScope {
|
enum TPrivilegeScope {
|
||||||
SERVER,
|
SERVER = 0
|
||||||
URI,
|
URI = 1
|
||||||
DATABASE,
|
DATABASE = 2
|
||||||
TABLE,
|
TABLE = 3
|
||||||
COLUMN,
|
COLUMN = 4
|
||||||
}
|
}
|
||||||
|
|
||||||
// The privilege level allowed.
|
// The privilege level allowed.
|
||||||
enum TPrivilegeLevel {
|
enum TPrivilegeLevel {
|
||||||
ALL,
|
ALL = 0
|
||||||
INSERT,
|
INSERT = 1
|
||||||
SELECT,
|
SELECT = 2
|
||||||
REFRESH,
|
REFRESH = 3
|
||||||
CREATE,
|
CREATE = 4
|
||||||
ALTER,
|
ALTER = 5
|
||||||
DROP,
|
DROP = 6
|
||||||
OWNER
|
OWNER = 7
|
||||||
}
|
}
|
||||||
|
|
||||||
// Represents a privilege in an authorization policy. Privileges contain the level
|
// Represents a privilege in an authorization policy. Privileges contain the level
|
||||||
|
|||||||
@@ -25,23 +25,23 @@ include "Descriptors.thrift"
|
|||||||
include "Partitions.thrift"
|
include "Partitions.thrift"
|
||||||
|
|
||||||
enum TDataSinkType {
|
enum TDataSinkType {
|
||||||
DATA_STREAM_SINK,
|
DATA_STREAM_SINK = 0
|
||||||
TABLE_SINK,
|
TABLE_SINK = 1
|
||||||
JOIN_BUILD_SINK,
|
JOIN_BUILD_SINK = 2
|
||||||
PLAN_ROOT_SINK
|
PLAN_ROOT_SINK = 3
|
||||||
}
|
}
|
||||||
|
|
||||||
enum TSinkAction {
|
enum TSinkAction {
|
||||||
INSERT,
|
INSERT = 0
|
||||||
UPDATE,
|
UPDATE = 1
|
||||||
UPSERT,
|
UPSERT = 2
|
||||||
DELETE
|
DELETE = 3
|
||||||
}
|
}
|
||||||
|
|
||||||
enum TTableSinkType {
|
enum TTableSinkType {
|
||||||
HDFS,
|
HDFS = 0
|
||||||
HBASE,
|
HBASE = 1
|
||||||
KUDU
|
KUDU = 2
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sink which forwards data to a remote plan fragment,
|
// Sink which forwards data to a remote plan fragment,
|
||||||
|
|||||||
@@ -21,15 +21,19 @@ namespace java org.apache.impala.thrift
|
|||||||
include "Status.thrift"
|
include "Status.thrift"
|
||||||
include "Types.thrift"
|
include "Types.thrift"
|
||||||
|
|
||||||
enum TExecState {
|
// NOTE: The definitions in this file are part of the binary format of the Impala query
|
||||||
REGISTERED = 0,
|
// profiles. They should preserve backwards compatibility and as such some rules apply
|
||||||
PLANNING = 1,
|
// when making changes. Please see RuntimeProfile.thrift for more details.
|
||||||
QUEUED = 2,
|
|
||||||
RUNNING = 3,
|
|
||||||
FINISHED = 4,
|
|
||||||
|
|
||||||
CANCELLED = 5,
|
enum TExecState {
|
||||||
FAILED = 6,
|
REGISTERED = 0
|
||||||
|
PLANNING = 1
|
||||||
|
QUEUED = 2
|
||||||
|
RUNNING = 3
|
||||||
|
FINISHED = 4
|
||||||
|
|
||||||
|
CANCELLED = 5
|
||||||
|
FAILED = 6
|
||||||
}
|
}
|
||||||
|
|
||||||
// Execution stats for a single plan node.
|
// Execution stats for a single plan node.
|
||||||
|
|||||||
@@ -21,25 +21,25 @@ namespace java org.apache.impala.thrift
|
|||||||
include "Types.thrift"
|
include "Types.thrift"
|
||||||
|
|
||||||
enum TExprNodeType {
|
enum TExprNodeType {
|
||||||
NULL_LITERAL,
|
NULL_LITERAL = 0
|
||||||
BOOL_LITERAL,
|
BOOL_LITERAL = 1
|
||||||
INT_LITERAL,
|
INT_LITERAL = 2
|
||||||
FLOAT_LITERAL,
|
FLOAT_LITERAL = 3
|
||||||
STRING_LITERAL,
|
STRING_LITERAL = 4
|
||||||
DECIMAL_LITERAL,
|
DECIMAL_LITERAL = 5
|
||||||
TIMESTAMP_LITERAL,
|
TIMESTAMP_LITERAL = 6
|
||||||
CASE_EXPR,
|
CASE_EXPR = 7
|
||||||
COMPOUND_PRED,
|
COMPOUND_PRED = 8
|
||||||
IN_PRED,
|
IN_PRED = 9
|
||||||
IS_NULL_PRED,
|
IS_NULL_PRED = 10
|
||||||
LIKE_PRED,
|
LIKE_PRED = 11
|
||||||
SLOT_REF,
|
SLOT_REF = 12
|
||||||
TUPLE_IS_NULL_PRED,
|
TUPLE_IS_NULL_PRED = 13
|
||||||
FUNCTION_CALL,
|
FUNCTION_CALL = 14
|
||||||
AGGREGATE_EXPR,
|
AGGREGATE_EXPR = 15
|
||||||
IS_NOT_EMPTY_PRED,
|
IS_NOT_EMPTY_PRED = 16
|
||||||
KUDU_PARTITION_EXPR,
|
KUDU_PARTITION_EXPR = 17
|
||||||
VALID_TUPLE_ID_EXPR
|
VALID_TUPLE_ID_EXPR = 18
|
||||||
}
|
}
|
||||||
|
|
||||||
struct TBoolLiteral {
|
struct TBoolLiteral {
|
||||||
@@ -78,16 +78,16 @@ struct TTimestampLiteral {
|
|||||||
// in any messages. This enum is here to provide a single definition that can be shared
|
// in any messages. This enum is here to provide a single definition that can be shared
|
||||||
// by the front and backend.
|
// by the front and backend.
|
||||||
enum TExtractField {
|
enum TExtractField {
|
||||||
INVALID_FIELD,
|
INVALID_FIELD = 0
|
||||||
YEAR,
|
YEAR = 1
|
||||||
QUARTER,
|
QUARTER = 2
|
||||||
MONTH,
|
MONTH = 3
|
||||||
DAY,
|
DAY = 4
|
||||||
HOUR,
|
HOUR = 5
|
||||||
MINUTE,
|
MINUTE = 6
|
||||||
SECOND,
|
SECOND = 7
|
||||||
MILLISECOND,
|
MILLISECOND = 8
|
||||||
EPOCH
|
EPOCH = 9
|
||||||
}
|
}
|
||||||
|
|
||||||
struct TInPredicate {
|
struct TInPredicate {
|
||||||
|
|||||||
@@ -53,7 +53,14 @@ struct TRowBatch {
|
|||||||
|
|
||||||
// Comparison operators used in predicates.
|
// Comparison operators used in predicates.
|
||||||
enum TComparisonOp {
|
enum TComparisonOp {
|
||||||
LT, LE, EQ, NE, GE, GT, DISTINCT_FROM, NOT_DISTINCT
|
LT = 0
|
||||||
|
LE = 1
|
||||||
|
EQ = 2
|
||||||
|
NE = 3
|
||||||
|
GE = 4
|
||||||
|
GT = 5
|
||||||
|
DISTINCT_FROM = 6
|
||||||
|
NOT_DISTINCT = 7
|
||||||
}
|
}
|
||||||
|
|
||||||
// Binary predicates that can be pushed to the external data source and
|
// Binary predicates that can be pushed to the external data source and
|
||||||
|
|||||||
@@ -154,13 +154,13 @@ struct TGetDataSrcsResult {
|
|||||||
enum TDescribeOutputStyle {
|
enum TDescribeOutputStyle {
|
||||||
// The default output style if no options are specified for
|
// The default output style if no options are specified for
|
||||||
// DESCRIBE DATABASE <db> and DESCRIBE <table>.
|
// DESCRIBE DATABASE <db> and DESCRIBE <table>.
|
||||||
MINIMAL,
|
MINIMAL = 0
|
||||||
|
|
||||||
// Output additional information on the database or table.
|
// Output additional information on the database or table.
|
||||||
// Set for both DESCRIBE DATABASE FORMATTED|EXTENDED <db>
|
// Set for both DESCRIBE DATABASE FORMATTED|EXTENDED <db>
|
||||||
// and DESCRIBE FORMATTED|EXTENDED <table> statements.
|
// and DESCRIBE FORMATTED|EXTENDED <table> statements.
|
||||||
EXTENDED,
|
EXTENDED = 1
|
||||||
FORMATTED
|
FORMATTED = 2
|
||||||
}
|
}
|
||||||
|
|
||||||
// Arguments to DescribeDb, which returns a list of properties for a given database.
|
// Arguments to DescribeDb, which returns a list of properties for a given database.
|
||||||
@@ -213,10 +213,10 @@ struct TShowDbsParams {
|
|||||||
|
|
||||||
// Used by SHOW STATS and SHOW PARTITIONS to control what information is returned.
|
// Used by SHOW STATS and SHOW PARTITIONS to control what information is returned.
|
||||||
enum TShowStatsOp {
|
enum TShowStatsOp {
|
||||||
TABLE_STATS,
|
TABLE_STATS = 0
|
||||||
COLUMN_STATS,
|
COLUMN_STATS = 1
|
||||||
PARTITIONS,
|
PARTITIONS = 2
|
||||||
RANGE_PARTITIONS
|
RANGE_PARTITIONS = 3
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parameters for SHOW TABLE/COLUMN STATS and SHOW PARTITIONS commands
|
// Parameters for SHOW TABLE/COLUMN STATS and SHOW PARTITIONS commands
|
||||||
@@ -441,21 +441,21 @@ struct TQueryExecRequest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
enum TCatalogOpType {
|
enum TCatalogOpType {
|
||||||
SHOW_TABLES,
|
SHOW_TABLES = 0
|
||||||
SHOW_DBS,
|
SHOW_DBS = 1
|
||||||
SHOW_STATS,
|
SHOW_STATS = 2
|
||||||
USE,
|
USE = 3
|
||||||
DESCRIBE_TABLE,
|
DESCRIBE_TABLE = 4
|
||||||
DESCRIBE_DB,
|
DESCRIBE_DB = 5
|
||||||
SHOW_FUNCTIONS,
|
SHOW_FUNCTIONS = 6
|
||||||
RESET_METADATA,
|
RESET_METADATA = 7
|
||||||
DDL,
|
DDL = 8
|
||||||
SHOW_CREATE_TABLE,
|
SHOW_CREATE_TABLE = 9
|
||||||
SHOW_DATA_SRCS,
|
SHOW_DATA_SRCS = 10
|
||||||
SHOW_ROLES,
|
SHOW_ROLES = 11
|
||||||
SHOW_GRANT_PRINCIPAL,
|
SHOW_GRANT_PRINCIPAL = 12
|
||||||
SHOW_FILES,
|
SHOW_FILES = 13
|
||||||
SHOW_CREATE_FUNCTION
|
SHOW_CREATE_FUNCTION = 14
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Combine SHOW requests with a single struct that contains a field
|
// TODO: Combine SHOW requests with a single struct that contains a field
|
||||||
@@ -540,7 +540,7 @@ struct TShutdownParams {
|
|||||||
|
|
||||||
// The type of administrative function to be executed.
|
// The type of administrative function to be executed.
|
||||||
enum TAdminRequestType {
|
enum TAdminRequestType {
|
||||||
SHUTDOWN
|
SHUTDOWN = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parameters for administrative function statement. This is essentially a tagged union
|
// Parameters for administrative function statement. This is essentially a tagged union
|
||||||
@@ -554,13 +554,13 @@ struct TAdminRequest {
|
|||||||
|
|
||||||
// HiveServer2 Metadata operations (JniFrontend.hiveServer2MetadataOperation)
|
// HiveServer2 Metadata operations (JniFrontend.hiveServer2MetadataOperation)
|
||||||
enum TMetadataOpcode {
|
enum TMetadataOpcode {
|
||||||
GET_TYPE_INFO,
|
GET_TYPE_INFO = 0
|
||||||
GET_CATALOGS,
|
GET_CATALOGS = 1
|
||||||
GET_SCHEMAS,
|
GET_SCHEMAS = 2
|
||||||
GET_TABLES,
|
GET_TABLES = 3
|
||||||
GET_TABLE_TYPES,
|
GET_TABLE_TYPES = 4
|
||||||
GET_COLUMNS,
|
GET_COLUMNS = 5
|
||||||
GET_FUNCTIONS
|
GET_FUNCTIONS = 6
|
||||||
}
|
}
|
||||||
|
|
||||||
// Input parameter to JniFrontend.hiveServer2MetadataOperation
|
// Input parameter to JniFrontend.hiveServer2MetadataOperation
|
||||||
@@ -669,9 +669,9 @@ struct TCacheJarResult {
|
|||||||
// A UDF may include optional prepare and close functions in addition the main evaluation
|
// A UDF may include optional prepare and close functions in addition the main evaluation
|
||||||
// function. This enum distinguishes between these when doing a symbol lookup.
|
// function. This enum distinguishes between these when doing a symbol lookup.
|
||||||
enum TSymbolType {
|
enum TSymbolType {
|
||||||
UDF_EVALUATE,
|
UDF_EVALUATE = 0
|
||||||
UDF_PREPARE,
|
UDF_PREPARE = 1
|
||||||
UDF_CLOSE,
|
UDF_CLOSE = 2
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parameters to pass to validate that the binary contains the symbol. If the
|
// Parameters to pass to validate that the binary contains the symbol. If the
|
||||||
@@ -710,9 +710,9 @@ struct TSymbolLookupParams {
|
|||||||
}
|
}
|
||||||
|
|
||||||
enum TSymbolLookupResultCode {
|
enum TSymbolLookupResultCode {
|
||||||
SYMBOL_FOUND,
|
SYMBOL_FOUND = 0
|
||||||
BINARY_NOT_FOUND,
|
BINARY_NOT_FOUND = 1
|
||||||
SYMBOL_NOT_FOUND,
|
SYMBOL_NOT_FOUND = 2
|
||||||
}
|
}
|
||||||
|
|
||||||
struct TSymbolLookupResult {
|
struct TSymbolLookupResult {
|
||||||
|
|||||||
@@ -43,28 +43,28 @@ const i32 NUM_NODES_ALL_RACKS = -1
|
|||||||
const i32 INVALID_PLAN_NODE_ID = -1
|
const i32 INVALID_PLAN_NODE_ID = -1
|
||||||
|
|
||||||
enum TParquetFallbackSchemaResolution {
|
enum TParquetFallbackSchemaResolution {
|
||||||
POSITION,
|
POSITION = 0
|
||||||
NAME
|
NAME = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
// The order of the enum values needs to be kept in sync with
|
// The order of the enum values needs to be kept in sync with
|
||||||
// ParquetMetadataUtils::ORDERED_ARRAY_ENCODINGS in parquet-metadata-utils.cc.
|
// ParquetMetadataUtils::ORDERED_ARRAY_ENCODINGS in parquet-metadata-utils.cc.
|
||||||
enum TParquetArrayResolution {
|
enum TParquetArrayResolution {
|
||||||
THREE_LEVEL,
|
THREE_LEVEL = 0
|
||||||
TWO_LEVEL,
|
TWO_LEVEL = 1
|
||||||
TWO_LEVEL_THEN_THREE_LEVEL
|
TWO_LEVEL_THEN_THREE_LEVEL = 2
|
||||||
}
|
}
|
||||||
|
|
||||||
enum TJoinDistributionMode {
|
enum TJoinDistributionMode {
|
||||||
BROADCAST,
|
BROADCAST = 0
|
||||||
SHUFFLE
|
SHUFFLE = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
// Consistency level options for Kudu scans.
|
// Consistency level options for Kudu scans.
|
||||||
enum TKuduReadMode {
|
enum TKuduReadMode {
|
||||||
DEFAULT,
|
DEFAULT = 0
|
||||||
READ_LATEST,
|
READ_LATEST = 1
|
||||||
READ_AT_SNAPSHOT
|
READ_AT_SNAPSHOT = 2
|
||||||
}
|
}
|
||||||
|
|
||||||
// Query options that correspond to ImpalaService.ImpalaQueryOptions, with their
|
// Query options that correspond to ImpalaService.ImpalaQueryOptions, with their
|
||||||
@@ -327,8 +327,8 @@ struct TQueryOptions {
|
|||||||
|
|
||||||
// Impala currently has two types of sessions: Beeswax and HiveServer2
|
// Impala currently has two types of sessions: Beeswax and HiveServer2
|
||||||
enum TSessionType {
|
enum TSessionType {
|
||||||
BEESWAX,
|
BEESWAX = 0
|
||||||
HIVESERVER2
|
HIVESERVER2 = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
// Per-client session state
|
// Per-client session state
|
||||||
@@ -540,7 +540,7 @@ struct TPlanFragmentInstanceCtx {
|
|||||||
// Service Protocol Details
|
// Service Protocol Details
|
||||||
|
|
||||||
enum ImpalaInternalServiceVersion {
|
enum ImpalaInternalServiceVersion {
|
||||||
V1
|
V1 = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// The following contains the per-rpc structs for the parameters and the result.
|
// The following contains the per-rpc structs for the parameters and the result.
|
||||||
|
|||||||
@@ -35,24 +35,24 @@ include "RuntimeProfile.thrift"
|
|||||||
// - TQueryOptionsToMap()
|
// - TQueryOptionsToMap()
|
||||||
enum TImpalaQueryOptions {
|
enum TImpalaQueryOptions {
|
||||||
// if true, abort execution on the first error
|
// if true, abort execution on the first error
|
||||||
ABORT_ON_ERROR,
|
ABORT_ON_ERROR = 0
|
||||||
|
|
||||||
// maximum # of errors to be reported; Unspecified or 0 indicates backend default
|
// maximum # of errors to be reported; Unspecified or 0 indicates backend default
|
||||||
MAX_ERRORS,
|
MAX_ERRORS = 1
|
||||||
|
|
||||||
// if true, disable llvm codegen
|
// if true disable llvm codegen
|
||||||
DISABLE_CODEGEN,
|
DISABLE_CODEGEN = 2
|
||||||
|
|
||||||
// batch size to be used by backend; Unspecified or a size of 0 indicates backend
|
// batch size to be used by backend; Unspecified or a size of 0 indicates backend
|
||||||
// default
|
// default
|
||||||
BATCH_SIZE,
|
BATCH_SIZE = 3
|
||||||
|
|
||||||
// a per-machine approximate limit on the memory consumption of this query;
|
// a per-machine approximate limit on the memory consumption of this query;
|
||||||
// unspecified or a limit of 0 means no limit;
|
// unspecified or a limit of 0 means no limit;
|
||||||
// otherwise specified either as:
|
// otherwise specified either as:
|
||||||
// a) an int (= number of bytes);
|
// a) an int (= number of bytes);
|
||||||
// b) a float followed by "M" (MB) or "G" (GB)
|
// b) a float followed by "M" (MB) or "G" (GB)
|
||||||
MEM_LIMIT,
|
MEM_LIMIT = 4
|
||||||
|
|
||||||
// specifies the degree of parallelism with which to execute the query;
|
// specifies the degree of parallelism with which to execute the query;
|
||||||
// 1: single-node execution
|
// 1: single-node execution
|
||||||
@@ -62,20 +62,20 @@ enum TImpalaQueryOptions {
|
|||||||
// more nodes than numNodes with plan fragments for this query, but at most
|
// more nodes than numNodes with plan fragments for this query, but at most
|
||||||
// numNodes would be active at any point in time)
|
// numNodes would be active at any point in time)
|
||||||
// Constants (NUM_NODES_ALL, NUM_NODES_ALL_RACKS) are defined in JavaConstants.thrift.
|
// Constants (NUM_NODES_ALL, NUM_NODES_ALL_RACKS) are defined in JavaConstants.thrift.
|
||||||
NUM_NODES,
|
NUM_NODES = 5
|
||||||
|
|
||||||
// maximum length of the scan range; only applicable to HDFS scan range; Unspecified or
|
// maximum length of the scan range; only applicable to HDFS scan range; Unspecified or
|
||||||
// a length of 0 indicates backend default;
|
// a length of 0 indicates backend default;
|
||||||
MAX_SCAN_RANGE_LENGTH,
|
MAX_SCAN_RANGE_LENGTH = 6
|
||||||
|
|
||||||
MAX_IO_BUFFERS, // Removed
|
MAX_IO_BUFFERS = 7 // Removed
|
||||||
|
|
||||||
// Number of scanner threads.
|
// Number of scanner threads.
|
||||||
NUM_SCANNER_THREADS,
|
NUM_SCANNER_THREADS = 8
|
||||||
|
|
||||||
ALLOW_UNSUPPORTED_FORMATS, // Removed
|
ALLOW_UNSUPPORTED_FORMATS = 9 // Removed
|
||||||
|
|
||||||
DEFAULT_ORDER_BY_LIMIT, // Removed
|
DEFAULT_ORDER_BY_LIMIT = 10 // Removed
|
||||||
|
|
||||||
// DEBUG ONLY:
|
// DEBUG ONLY:
|
||||||
// Accepted formats:
|
// Accepted formats:
|
||||||
@@ -101,16 +101,16 @@ enum TImpalaQueryOptions {
|
|||||||
//
|
//
|
||||||
// Only a single ExecNode action is allowed, but multiple global actions can be
|
// Only a single ExecNode action is allowed, but multiple global actions can be
|
||||||
// specified. To specify multiple actions, separate them with "|".
|
// specified. To specify multiple actions, separate them with "|".
|
||||||
DEBUG_ACTION,
|
DEBUG_ACTION = 11
|
||||||
|
|
||||||
ABORT_ON_DEFAULT_LIMIT_EXCEEDED, // Removed
|
ABORT_ON_DEFAULT_LIMIT_EXCEEDED = 12 // Removed
|
||||||
|
|
||||||
// Compression codec when inserting into tables.
|
// Compression codec when inserting into tables.
|
||||||
// Valid values are "snappy", "gzip", "bzip2" and "none"
|
// Valid values are "snappy", "gzip", "bzip2" and "none"
|
||||||
// Leave blank to use default.
|
// Leave blank to use default.
|
||||||
COMPRESSION_CODEC,
|
COMPRESSION_CODEC = 13
|
||||||
|
|
||||||
SEQ_COMPRESSION_MODE, // Removed
|
SEQ_COMPRESSION_MODE = 14 // Removed
|
||||||
|
|
||||||
// HBase scan query option. If set and > 0, HBASE_CACHING is the value for
|
// HBase scan query option. If set and > 0, HBASE_CACHING is the value for
|
||||||
// "hbase.client.Scan.setCaching()" when querying HBase table. Otherwise, use backend
|
// "hbase.client.Scan.setCaching()" when querying HBase table. Otherwise, use backend
|
||||||
@@ -118,7 +118,7 @@ enum TImpalaQueryOptions {
|
|||||||
// If the value is too high, then the hbase region server will have a hard time (GC
|
// If the value is too high, then the hbase region server will have a hard time (GC
|
||||||
// pressure and long response times). If the value is too small, then there will be
|
// pressure and long response times). If the value is too small, then there will be
|
||||||
// extra trips to the hbase region server.
|
// extra trips to the hbase region server.
|
||||||
HBASE_CACHING,
|
HBASE_CACHING = 15
|
||||||
|
|
||||||
// HBase scan query option. If set, HBase scan will always set
|
// HBase scan query option. If set, HBase scan will always set
|
||||||
// "hbase.client.setCacheBlocks" to CACHE_BLOCKS. Default is false.
|
// "hbase.client.setCacheBlocks" to CACHE_BLOCKS. Default is false.
|
||||||
@@ -126,134 +126,134 @@ enum TImpalaQueryOptions {
|
|||||||
// avoid polluting the cache in the hbase region server.
|
// avoid polluting the cache in the hbase region server.
|
||||||
// If the table is small and the table is used several time, set it to true to improve
|
// If the table is small and the table is used several time, set it to true to improve
|
||||||
// performance.
|
// performance.
|
||||||
HBASE_CACHE_BLOCKS,
|
HBASE_CACHE_BLOCKS = 16
|
||||||
|
|
||||||
// Target file size for inserts into parquet tables. 0 uses the default.
|
// Target file size for inserts into parquet tables. 0 uses the default.
|
||||||
PARQUET_FILE_SIZE,
|
PARQUET_FILE_SIZE = 17
|
||||||
|
|
||||||
// Level of detail for explain output (NORMAL, VERBOSE).
|
// Level of detail for explain output (NORMAL, VERBOSE).
|
||||||
EXPLAIN_LEVEL,
|
EXPLAIN_LEVEL = 18
|
||||||
|
|
||||||
// If true, waits for the result of all catalog operations to be processed by all
|
// If true, waits for the result of all catalog operations to be processed by all
|
||||||
// active impalad in the cluster before completing.
|
// active impalad in the cluster before completing.
|
||||||
SYNC_DDL,
|
SYNC_DDL = 19
|
||||||
|
|
||||||
// Request pool this request should be submitted to. If not set
|
// Request pool this request should be submitted to. If not set
|
||||||
// the pool is determined based on the user.
|
// the pool is determined based on the user.
|
||||||
REQUEST_POOL,
|
REQUEST_POOL = 20
|
||||||
|
|
||||||
V_CPU_CORES, // Removed
|
V_CPU_CORES = 21 // Removed
|
||||||
|
|
||||||
RESERVATION_REQUEST_TIMEOUT, // Removed
|
RESERVATION_REQUEST_TIMEOUT = 22 // Removed
|
||||||
|
|
||||||
// if true, disables cached reads. This option has no effect if REPLICA_PREFERENCE is
|
// if true, disables cached reads. This option has no effect if REPLICA_PREFERENCE is
|
||||||
// configured.
|
// configured.
|
||||||
// TODO: IMPALA-4306: retire at compatibility-breaking version
|
// TODO: IMPALA-4306: retire at compatibility-breaking version
|
||||||
DISABLE_CACHED_READS,
|
DISABLE_CACHED_READS = 23
|
||||||
|
|
||||||
// Temporary testing flag
|
// Temporary testing flag
|
||||||
DISABLE_OUTERMOST_TOPN,
|
DISABLE_OUTERMOST_TOPN = 24
|
||||||
|
|
||||||
RM_INITIAL_MEM, // Removed
|
RM_INITIAL_MEM = 25 // Removed
|
||||||
|
|
||||||
// Time, in s, before a query will be timed out if it is inactive. May not exceed
|
// Time, in s, before a query will be timed out if it is inactive. May not exceed
|
||||||
// --idle_query_timeout if that flag > 0. If 0, falls back to --idle_query_timeout.
|
// --idle_query_timeout if that flag > 0. If 0, falls back to --idle_query_timeout.
|
||||||
QUERY_TIMEOUT_S,
|
QUERY_TIMEOUT_S = 26
|
||||||
|
|
||||||
// Test hook for spill to disk operators
|
// Test hook for spill to disk operators
|
||||||
BUFFER_POOL_LIMIT,
|
BUFFER_POOL_LIMIT = 27
|
||||||
|
|
||||||
// Transforms all count(distinct) aggregations into NDV()
|
// Transforms all count(distinct) aggregations into NDV()
|
||||||
APPX_COUNT_DISTINCT,
|
APPX_COUNT_DISTINCT = 28
|
||||||
|
|
||||||
// If true, allows Impala to internally disable spilling for potentially
|
// If true, allows Impala to internally disable spilling for potentially
|
||||||
// disastrous query plans. Impala will excercise this option if a query
|
// disastrous query plans. Impala will excercise this option if a query
|
||||||
// has no plan hints, and at least one table is missing relevant stats.
|
// has no plan hints, and at least one table is missing relevant stats.
|
||||||
DISABLE_UNSAFE_SPILLS,
|
DISABLE_UNSAFE_SPILLS = 29
|
||||||
|
|
||||||
// If the number of rows that are processed for a single query is below the
|
// If the number of rows that are processed for a single query is below the
|
||||||
// threshold, it will be executed on the coordinator only with codegen disabled
|
// threshold, it will be executed on the coordinator only with codegen disabled
|
||||||
EXEC_SINGLE_NODE_ROWS_THRESHOLD,
|
EXEC_SINGLE_NODE_ROWS_THRESHOLD = 30
|
||||||
|
|
||||||
// If true, use the table's metadata to produce the partition columns instead of table
|
// If true, use the table's metadata to produce the partition columns instead of table
|
||||||
// scans whenever possible. This option is opt-in by default as this optimization may
|
// scans whenever possible. This option is opt-in by default as this optimization may
|
||||||
// produce different results than the scan based approach in some edge cases.
|
// produce different results than the scan based approach in some edge cases.
|
||||||
OPTIMIZE_PARTITION_KEY_SCANS,
|
OPTIMIZE_PARTITION_KEY_SCANS = 31
|
||||||
|
|
||||||
// Prefered memory distance of replicas. This parameter determines the pool of replicas
|
// Prefered memory distance of replicas. This parameter determines the pool of replicas
|
||||||
// among which scans will be scheduled in terms of the distance of the replica storage
|
// among which scans will be scheduled in terms of the distance of the replica storage
|
||||||
// from the impalad.
|
// from the impalad.
|
||||||
REPLICA_PREFERENCE,
|
REPLICA_PREFERENCE = 32
|
||||||
|
|
||||||
// Enables random backend selection during scheduling.
|
// Enables random backend selection during scheduling.
|
||||||
SCHEDULE_RANDOM_REPLICA,
|
SCHEDULE_RANDOM_REPLICA = 33
|
||||||
|
|
||||||
SCAN_NODE_CODEGEN_THRESHOLD, // Removed
|
SCAN_NODE_CODEGEN_THRESHOLD = 34 // Removed
|
||||||
|
|
||||||
// If true, the planner will not generate plans with streaming preaggregations.
|
// If true, the planner will not generate plans with streaming preaggregations.
|
||||||
DISABLE_STREAMING_PREAGGREGATIONS,
|
DISABLE_STREAMING_PREAGGREGATIONS = 35
|
||||||
|
|
||||||
RUNTIME_FILTER_MODE,
|
RUNTIME_FILTER_MODE = 36
|
||||||
|
|
||||||
// Size (in bytes) of a runtime Bloom Filter. Will be rounded up to nearest power of
|
// Size (in bytes) of a runtime Bloom Filter. Will be rounded up to nearest power of
|
||||||
// two.
|
// two.
|
||||||
RUNTIME_BLOOM_FILTER_SIZE,
|
RUNTIME_BLOOM_FILTER_SIZE = 37
|
||||||
|
|
||||||
// Time (in ms) to wait in scans for runtime filters to arrive.
|
// Time (in ms) to wait in scans for runtime filters to arrive.
|
||||||
RUNTIME_FILTER_WAIT_TIME_MS,
|
RUNTIME_FILTER_WAIT_TIME_MS = 38
|
||||||
|
|
||||||
// If true, disable application of runtime filters to individual rows.
|
// If true, disable application of runtime filters to individual rows.
|
||||||
DISABLE_ROW_RUNTIME_FILTERING,
|
DISABLE_ROW_RUNTIME_FILTERING = 39
|
||||||
|
|
||||||
// Maximum number of bloom runtime filters allowed per query.
|
// Maximum number of bloom runtime filters allowed per query.
|
||||||
MAX_NUM_RUNTIME_FILTERS,
|
MAX_NUM_RUNTIME_FILTERS = 40
|
||||||
|
|
||||||
// If true, use UTF-8 annotation for string columns. Note that char and varchar columns
|
// If true, use UTF-8 annotation for string columns. Note that char and varchar columns
|
||||||
// always use the annotation.
|
// always use the annotation.
|
||||||
PARQUET_ANNOTATE_STRINGS_UTF8,
|
PARQUET_ANNOTATE_STRINGS_UTF8 = 41
|
||||||
|
|
||||||
// Determines how to resolve Parquet files' schemas in the absence of field IDs (which
|
// Determines how to resolve Parquet files' schemas in the absence of field IDs (which
|
||||||
// is always, since fields IDs are NYI). Valid values are "position" and "name".
|
// is always, since fields IDs are NYI). Valid values are "position" and "name".
|
||||||
PARQUET_FALLBACK_SCHEMA_RESOLUTION,
|
PARQUET_FALLBACK_SCHEMA_RESOLUTION = 42
|
||||||
|
|
||||||
// Multi-threaded execution: degree of parallelism = number of active threads per
|
// Multi-threaded execution: degree of parallelism = number of active threads per
|
||||||
// backend
|
// backend
|
||||||
MT_DOP,
|
MT_DOP = 43
|
||||||
|
|
||||||
// If true, INSERT writes to S3 go directly to their final location rather than being
|
// If true, INSERT writes to S3 go directly to their final location rather than being
|
||||||
// copied there by the coordinator. We cannot do this for INSERT OVERWRITES because for
|
// copied there by the coordinator. We cannot do this for INSERT OVERWRITES because for
|
||||||
// those queries, the coordinator deletes all files in the final location before copying
|
// those queries, the coordinator deletes all files in the final location before copying
|
||||||
// the files there.
|
// the files there.
|
||||||
// TODO: Find a way to get this working for INSERT OVERWRITEs too.
|
// TODO: Find a way to get this working for INSERT OVERWRITEs too.
|
||||||
S3_SKIP_INSERT_STAGING,
|
S3_SKIP_INSERT_STAGING = 44
|
||||||
|
|
||||||
// Maximum runtime bloom filter size, in bytes.
|
// Maximum runtime bloom filter size, in bytes.
|
||||||
RUNTIME_FILTER_MAX_SIZE,
|
RUNTIME_FILTER_MAX_SIZE = 45
|
||||||
|
|
||||||
// Minimum runtime bloom filter size, in bytes.
|
// Minimum runtime bloom filter size, in bytes.
|
||||||
RUNTIME_FILTER_MIN_SIZE,
|
RUNTIME_FILTER_MIN_SIZE = 46
|
||||||
|
|
||||||
// Prefetching behavior during hash tables' building and probing.
|
// Prefetching behavior during hash tables' building and probing.
|
||||||
PREFETCH_MODE,
|
PREFETCH_MODE = 47
|
||||||
|
|
||||||
// Additional strict handling of invalid data parsing and type conversions.
|
// Additional strict handling of invalid data parsing and type conversions.
|
||||||
STRICT_MODE,
|
STRICT_MODE = 48
|
||||||
|
|
||||||
// A limit on the amount of scratch directory space that can be used;
|
// A limit on the amount of scratch directory space that can be used;
|
||||||
// Unspecified or a limit of -1 means no limit;
|
// Unspecified or a limit of -1 means no limit;
|
||||||
// Otherwise specified in the same way as MEM_LIMIT.
|
// Otherwise specified in the same way as MEM_LIMIT.
|
||||||
SCRATCH_LIMIT,
|
SCRATCH_LIMIT = 49
|
||||||
|
|
||||||
// Indicates whether the FE should rewrite Exprs for optimization purposes.
|
// Indicates whether the FE should rewrite Exprs for optimization purposes.
|
||||||
// It's sometimes useful to disable rewrites for testing, e.g., expr-test.cc.
|
// It's sometimes useful to disable rewrites for testing, e.g., expr-test.cc.
|
||||||
ENABLE_EXPR_REWRITES,
|
ENABLE_EXPR_REWRITES = 50
|
||||||
|
|
||||||
// Indicates whether to use the new decimal semantics, which includes better
|
// Indicates whether to use the new decimal semantics, which includes better
|
||||||
// rounding and output types for multiply / divide
|
// rounding and output types for multiply / divide
|
||||||
DECIMAL_V2,
|
DECIMAL_V2 = 51
|
||||||
|
|
||||||
// Indicates whether to use dictionary filtering for Parquet files
|
// Indicates whether to use dictionary filtering for Parquet files
|
||||||
PARQUET_DICTIONARY_FILTERING,
|
PARQUET_DICTIONARY_FILTERING = 52
|
||||||
|
|
||||||
// Policy for resolving nested array fields in Parquet files.
|
// Policy for resolving nested array fields in Parquet files.
|
||||||
// An Impala array type can have several different representations in
|
// An Impala array type can have several different representations in
|
||||||
@@ -261,49 +261,49 @@ enum TImpalaQueryOptions {
|
|||||||
// between the two and three level encodings with index-based field resolution.
|
// between the two and three level encodings with index-based field resolution.
|
||||||
// The ambiguity can manually be resolved using this query option, or by using
|
// The ambiguity can manually be resolved using this query option, or by using
|
||||||
// PARQUET_FALLBACK_SCHEMA_RESOLUTION=name.
|
// PARQUET_FALLBACK_SCHEMA_RESOLUTION=name.
|
||||||
PARQUET_ARRAY_RESOLUTION,
|
PARQUET_ARRAY_RESOLUTION = 53
|
||||||
|
|
||||||
// Indicates whether to read statistics from Parquet files and use them during query
|
// Indicates whether to read statistics from Parquet files and use them during query
|
||||||
// processing. This includes skipping data based on the statistics and computing query
|
// processing. This includes skipping data based on the statistics and computing query
|
||||||
// results like "select min()".
|
// results like "select min()".
|
||||||
PARQUET_READ_STATISTICS,
|
PARQUET_READ_STATISTICS = 54
|
||||||
|
|
||||||
// Join distribution mode that is used when the join inputs have an unknown
|
// Join distribution mode that is used when the join inputs have an unknown
|
||||||
// cardinality, e.g., because of missing table statistics.
|
// cardinality, e.g., because of missing table statistics.
|
||||||
DEFAULT_JOIN_DISTRIBUTION_MODE,
|
DEFAULT_JOIN_DISTRIBUTION_MODE = 55
|
||||||
|
|
||||||
// If the number of rows processed per node is below the threshold and disable_codegen
|
// If the number of rows processed per node is below the threshold and disable_codegen
|
||||||
// is unset, codegen will be automatically be disabled by the planner.
|
// is unset, codegen will be automatically be disabled by the planner.
|
||||||
DISABLE_CODEGEN_ROWS_THRESHOLD,
|
DISABLE_CODEGEN_ROWS_THRESHOLD = 56
|
||||||
|
|
||||||
// The default spillable buffer size, in bytes.
|
// The default spillable buffer size, in bytes.
|
||||||
DEFAULT_SPILLABLE_BUFFER_SIZE,
|
DEFAULT_SPILLABLE_BUFFER_SIZE = 57
|
||||||
|
|
||||||
// The minimum spillable buffer size, in bytes.
|
// The minimum spillable buffer size, in bytes.
|
||||||
MIN_SPILLABLE_BUFFER_SIZE,
|
MIN_SPILLABLE_BUFFER_SIZE = 58
|
||||||
|
|
||||||
// The maximum row size that memory is reserved for, in bytes.
|
// The maximum row size that memory is reserved for, in bytes.
|
||||||
MAX_ROW_SIZE,
|
MAX_ROW_SIZE = 59
|
||||||
|
|
||||||
// The time, in seconds, that a session may be idle for before it is closed (and all
|
// The time, in seconds, that a session may be idle for before it is closed (and all
|
||||||
// running queries cancelled) by Impala. If 0, idle sessions never expire.
|
// running queries cancelled) by Impala. If 0, idle sessions never expire.
|
||||||
IDLE_SESSION_TIMEOUT,
|
IDLE_SESSION_TIMEOUT = 60
|
||||||
|
|
||||||
// Minimum number of bytes that will be scanned in COMPUTE STATS TABLESAMPLE,
|
// Minimum number of bytes that will be scanned in COMPUTE STATS TABLESAMPLE,
|
||||||
// regardless of the user-supplied sampling percent.
|
// regardless of the user-supplied sampling percent.
|
||||||
COMPUTE_STATS_MIN_SAMPLE_SIZE,
|
COMPUTE_STATS_MIN_SAMPLE_SIZE = 61
|
||||||
|
|
||||||
// Time limit, in s, before a query will be timed out after it starts executing. Does
|
// Time limit, in s, before a query will be timed out after it starts executing. Does
|
||||||
// not include time spent in planning, scheduling or admission control. A value of 0
|
// not include time spent in planning, scheduling or admission control. A value of 0
|
||||||
// means no time limit.
|
// means no time limit.
|
||||||
EXEC_TIME_LIMIT_S,
|
EXEC_TIME_LIMIT_S = 62
|
||||||
|
|
||||||
// When a query has both grouping and distinct exprs, impala can optionally include the
|
// When a query has both grouping and distinct exprs, impala can optionally include the
|
||||||
// distinct exprs in the hash exchange of the first aggregation phase to spread the data
|
// distinct exprs in the hash exchange of the first aggregation phase to spread the data
|
||||||
// among more nodes. However, this plan requires another hash exchange on the grouping
|
// among more nodes. However, this plan requires another hash exchange on the grouping
|
||||||
// exprs in the second phase which is not required when omitting the distinct exprs in
|
// exprs in the second phase which is not required when omitting the distinct exprs in
|
||||||
// the first phase. Shuffling by both is better if the grouping exprs have low NDVs.
|
// the first phase. Shuffling by both is better if the grouping exprs have low NDVs.
|
||||||
SHUFFLE_DISTINCT_EXPRS,
|
SHUFFLE_DISTINCT_EXPRS = 63
|
||||||
|
|
||||||
// This only has an effect if memory-estimate-based admission control is enabled, i.e.
|
// This only has an effect if memory-estimate-based admission control is enabled, i.e.
|
||||||
// max_mem_resources is set for the pool and, *contrary to best practices*, MEM_LIMIT
|
// max_mem_resources is set for the pool and, *contrary to best practices*, MEM_LIMIT
|
||||||
@@ -311,51 +311,51 @@ enum TImpalaQueryOptions {
|
|||||||
// planner memory estimate) is used for admission control purposes. This provides a
|
// planner memory estimate) is used for admission control purposes. This provides a
|
||||||
// workaround if the planner's memory estimate is too high and prevents a runnable
|
// workaround if the planner's memory estimate is too high and prevents a runnable
|
||||||
// query from being admitted. 0 or -1 means this has no effect. Defaults to 0.
|
// query from being admitted. 0 or -1 means this has no effect. Defaults to 0.
|
||||||
MAX_MEM_ESTIMATE_FOR_ADMISSION,
|
MAX_MEM_ESTIMATE_FOR_ADMISSION = 64
|
||||||
|
|
||||||
// Admission control will reject queries when the number of reserved threads per backend
|
// Admission control will reject queries when the number of reserved threads per backend
|
||||||
// for the query exceeds this number. 0 or -1 means this has no effect.
|
// for the query exceeds this number. 0 or -1 means this has no effect.
|
||||||
THREAD_RESERVATION_LIMIT,
|
THREAD_RESERVATION_LIMIT = 65
|
||||||
|
|
||||||
// Admission control will reject queries when the total number of reserved threads
|
// Admission control will reject queries when the total number of reserved threads
|
||||||
// across all backends for the query exceeds this number. 0 or -1 means this has no
|
// across all backends for the query exceeds this number. 0 or -1 means this has no
|
||||||
// effect.
|
// effect.
|
||||||
THREAD_RESERVATION_AGGREGATE_LIMIT,
|
THREAD_RESERVATION_AGGREGATE_LIMIT = 66
|
||||||
|
|
||||||
// Overrides the -kudu_read_mode flag to set the consistency level for Kudu scans.
|
// Overrides the -kudu_read_mode flag to set the consistency level for Kudu scans.
|
||||||
// Possible values are DEFAULT, READ_LATEST, and READ_AT_SNAPSHOT.
|
// Possible values are DEFAULT, READ_LATEST, and READ_AT_SNAPSHOT.
|
||||||
KUDU_READ_MODE,
|
KUDU_READ_MODE = 67
|
||||||
|
|
||||||
// Allow reading of erasure coded files.
|
// Allow reading of erasure coded files.
|
||||||
ALLOW_ERASURE_CODED_FILES,
|
ALLOW_ERASURE_CODED_FILES = 68
|
||||||
|
|
||||||
// The timezone used in UTC<->localtime conversions. The default is the OS's timezone
|
// The timezone used in UTC<->localtime conversions. The default is the OS's timezone
|
||||||
// at the coordinator, which can be overridden by environment variable $TZ.
|
// at the coordinator, which can be overridden by environment variable $TZ.
|
||||||
TIMEZONE,
|
TIMEZONE = 69
|
||||||
|
|
||||||
// Scan bytes limit, after which a query will be terminated with an error.
|
// Scan bytes limit, after which a query will be terminated with an error.
|
||||||
SCAN_BYTES_LIMIT,
|
SCAN_BYTES_LIMIT = 70
|
||||||
|
|
||||||
// CPU time limit in seconds, after which a query will be terminated with an error.
|
// CPU time limit in seconds, after which a query will be terminated with an error.
|
||||||
// Note that until IMPALA-7318 is fixed, CPU usage can be very stale and this may not
|
// Note that until IMPALA-7318 is fixed, CPU usage can be very stale and this may not
|
||||||
// terminate queries soon enough.
|
// terminate queries soon enough.
|
||||||
CPU_LIMIT_S,
|
CPU_LIMIT_S = 71
|
||||||
|
|
||||||
// The max number of estimated bytes a TopN operator is allowed to materialize, if the
|
// The max number of estimated bytes a TopN operator is allowed to materialize, if the
|
||||||
// planner thinks a TopN operator will exceed this limit, it falls back to a TotalSort
|
// planner thinks a TopN operator will exceed this limit, it falls back to a TotalSort
|
||||||
// operator which is capable of spilling to disk (unlike the TopN operator which keeps
|
// operator which is capable of spilling to disk (unlike the TopN operator which keeps
|
||||||
// everything in memory). 0 or -1 means this has no effect.
|
// everything in memory). 0 or -1 means this has no effect.
|
||||||
TOPN_BYTES_LIMIT,
|
TOPN_BYTES_LIMIT = 72
|
||||||
|
|
||||||
// An opaque string, not used by Impala itself, that can be used to identify
|
// An opaque string, not used by Impala itself, that can be used to identify
|
||||||
// the client, like a User-Agent in HTTP. Drivers should set this to
|
// the client, like a User-Agent in HTTP. Drivers should set this to
|
||||||
// their version number. May also be used by tests to help identify queries.
|
// their version number. May also be used by tests to help identify queries.
|
||||||
CLIENT_IDENTIFIER,
|
CLIENT_IDENTIFIER = 73
|
||||||
|
|
||||||
// Probability to enable tracing of resource usage consumption on all fragment instance
|
// Probability to enable tracing of resource usage consumption on all fragment instance
|
||||||
// executors of a query. Must be between 0 and 1 inclusive, 0 means no query will be
|
// executors of a query. Must be between 0 and 1 inclusive, 0 means no query will be
|
||||||
// traced, 1 means all queries will be traced.
|
// traced, 1 means all queries will be traced.
|
||||||
RESOURCE_TRACE_RATIO,
|
RESOURCE_TRACE_RATIO = 74
|
||||||
|
|
||||||
// The maximum number of executor candidates to consider when scheduling remote
|
// The maximum number of executor candidates to consider when scheduling remote
|
||||||
// HDFS ranges. When non-zero, the scheduler generates a consistent set of
|
// HDFS ranges. When non-zero, the scheduler generates a consistent set of
|
||||||
@@ -366,17 +366,17 @@ enum TImpalaQueryOptions {
|
|||||||
// This increases the efficiency of file-related caches (e.g. the HDFS file handle
|
// This increases the efficiency of file-related caches (e.g. the HDFS file handle
|
||||||
// cache). If set to 0, the number of executor candidates is unlimited, and remote
|
// cache). If set to 0, the number of executor candidates is unlimited, and remote
|
||||||
// ranges will be scheduled across all executors.
|
// ranges will be scheduled across all executors.
|
||||||
NUM_REMOTE_EXECUTOR_CANDIDATES,
|
NUM_REMOTE_EXECUTOR_CANDIDATES = 75
|
||||||
|
|
||||||
// A limit on the number of rows produced by the query. The query will be
|
// A limit on the number of rows produced by the query. The query will be
|
||||||
// canceled if the query is still executing after this limit is hit. A value
|
// canceled if the query is still executing after this limit is hit. A value
|
||||||
// of 0 means there is no limit on the number of rows produced.
|
// of 0 means there is no limit on the number of rows produced.
|
||||||
NUM_ROWS_PRODUCED_LIMIT
|
NUM_ROWS_PRODUCED_LIMIT = 76
|
||||||
|
|
||||||
// Set when attempting to load a planner testcase. Typically used by developers for
|
// Set when attempting to load a planner testcase. Typically used by developers for
|
||||||
// debugging a testcase. Should not be set in user clusters. If set, a warning
|
// debugging a testcase. Should not be set in user clusters. If set, a warning
|
||||||
// is emitted in the query runtime profile.
|
// is emitted in the query runtime profile.
|
||||||
PLANNER_TESTCASE_MODE
|
PLANNER_TESTCASE_MODE = 77
|
||||||
}
|
}
|
||||||
|
|
||||||
// The summary of a DML statement.
|
// The summary of a DML statement.
|
||||||
|
|||||||
@@ -29,42 +29,42 @@ const i16 HDFS_DEFAULT_CACHE_REPLICATION_FACTOR = 1
|
|||||||
// Structs used to execute DDL operations using the JniCatalog.
|
// Structs used to execute DDL operations using the JniCatalog.
|
||||||
|
|
||||||
enum TDdlType {
|
enum TDdlType {
|
||||||
ALTER_TABLE,
|
ALTER_TABLE = 0
|
||||||
ALTER_VIEW,
|
ALTER_VIEW = 1
|
||||||
CREATE_DATABASE,
|
CREATE_DATABASE = 2
|
||||||
CREATE_TABLE,
|
CREATE_TABLE = 3
|
||||||
CREATE_TABLE_AS_SELECT,
|
CREATE_TABLE_AS_SELECT = 4
|
||||||
CREATE_TABLE_LIKE,
|
CREATE_TABLE_LIKE = 5
|
||||||
CREATE_VIEW,
|
CREATE_VIEW = 6
|
||||||
CREATE_FUNCTION,
|
CREATE_FUNCTION = 7
|
||||||
COMPUTE_STATS,
|
COMPUTE_STATS = 8
|
||||||
DROP_DATABASE,
|
DROP_DATABASE = 9
|
||||||
DROP_TABLE,
|
DROP_TABLE = 10
|
||||||
DROP_VIEW,
|
DROP_VIEW = 11
|
||||||
DROP_FUNCTION,
|
DROP_FUNCTION = 12
|
||||||
CREATE_DATA_SOURCE,
|
CREATE_DATA_SOURCE = 13
|
||||||
DROP_DATA_SOURCE,
|
DROP_DATA_SOURCE = 14
|
||||||
DROP_STATS,
|
DROP_STATS = 15
|
||||||
CREATE_ROLE,
|
CREATE_ROLE = 16
|
||||||
DROP_ROLE,
|
DROP_ROLE = 17
|
||||||
GRANT_ROLE,
|
GRANT_ROLE = 18
|
||||||
REVOKE_ROLE,
|
REVOKE_ROLE = 19
|
||||||
GRANT_PRIVILEGE,
|
GRANT_PRIVILEGE = 20
|
||||||
REVOKE_PRIVILEGE,
|
REVOKE_PRIVILEGE = 21
|
||||||
TRUNCATE_TABLE,
|
TRUNCATE_TABLE = 22
|
||||||
COMMENT_ON,
|
COMMENT_ON = 23
|
||||||
ALTER_DATABASE,
|
ALTER_DATABASE = 24
|
||||||
COPY_TESTCASE
|
COPY_TESTCASE = 25
|
||||||
}
|
}
|
||||||
|
|
||||||
enum TOwnerType {
|
enum TOwnerType {
|
||||||
USER,
|
USER = 0
|
||||||
ROLE
|
ROLE = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
// Types of ALTER DATABASE commands supported.
|
// Types of ALTER DATABASE commands supported.
|
||||||
enum TAlterDbType {
|
enum TAlterDbType {
|
||||||
SET_OWNER
|
SET_OWNER = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parameters for ALTER DATABASE SET OWNER commands.
|
// Parameters for ALTER DATABASE SET OWNER commands.
|
||||||
@@ -93,24 +93,24 @@ struct TAlterDbParams {
|
|||||||
|
|
||||||
// Types of ALTER TABLE commands supported.
|
// Types of ALTER TABLE commands supported.
|
||||||
enum TAlterTableType {
|
enum TAlterTableType {
|
||||||
ADD_COLUMNS,
|
ADD_COLUMNS = 0
|
||||||
REPLACE_COLUMNS,
|
REPLACE_COLUMNS = 1
|
||||||
ADD_PARTITION,
|
ADD_PARTITION = 2
|
||||||
ADD_DROP_RANGE_PARTITION,
|
ADD_DROP_RANGE_PARTITION = 3
|
||||||
ALTER_COLUMN,
|
ALTER_COLUMN = 4
|
||||||
DROP_COLUMN,
|
DROP_COLUMN = 5
|
||||||
DROP_PARTITION,
|
DROP_PARTITION = 6
|
||||||
RENAME_TABLE,
|
RENAME_TABLE = 7
|
||||||
RENAME_VIEW,
|
RENAME_VIEW = 8
|
||||||
SET_FILE_FORMAT,
|
SET_FILE_FORMAT = 9
|
||||||
SET_LOCATION,
|
SET_LOCATION = 10
|
||||||
SET_TBL_PROPERTIES,
|
SET_TBL_PROPERTIES = 11
|
||||||
// Used internally by COMPUTE STATS and by ALTER TABLE SET COLUMN STATS.
|
// Used internally by COMPUTE STATS and by ALTER TABLE SET COLUMN STATS.
|
||||||
UPDATE_STATS,
|
UPDATE_STATS = 12
|
||||||
SET_CACHED,
|
SET_CACHED = 13
|
||||||
RECOVER_PARTITIONS,
|
RECOVER_PARTITIONS = 14
|
||||||
SET_ROW_FORMAT,
|
SET_ROW_FORMAT = 15
|
||||||
SET_OWNER
|
SET_OWNER = 16
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parameters of CREATE DATABASE commands
|
// Parameters of CREATE DATABASE commands
|
||||||
@@ -249,8 +249,8 @@ struct TAlterTableAddPartitionParams {
|
|||||||
}
|
}
|
||||||
|
|
||||||
enum TRangePartitionOperationType {
|
enum TRangePartitionOperationType {
|
||||||
ADD,
|
ADD = 0
|
||||||
DROP
|
DROP = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parameters for ALTER TABLE ADD/DROP RANGE PARTITION command
|
// Parameters for ALTER TABLE ADD/DROP RANGE PARTITION command
|
||||||
|
|||||||
@@ -29,8 +29,8 @@ struct TVertex {
|
|||||||
}
|
}
|
||||||
|
|
||||||
enum TEdgeType {
|
enum TEdgeType {
|
||||||
PROJECTION,
|
PROJECTION = 0
|
||||||
PREDICATE
|
PREDICATE = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
struct TMultiEdge {
|
struct TMultiEdge {
|
||||||
|
|||||||
@@ -22,13 +22,13 @@ namespace java org.apache.impala.thrift
|
|||||||
|
|
||||||
// Convenience type to map between log4j levels and glog severity
|
// Convenience type to map between log4j levels and glog severity
|
||||||
enum TLogLevel {
|
enum TLogLevel {
|
||||||
VLOG_3,
|
VLOG_3 = 0
|
||||||
VLOG_2
|
VLOG_2 = 1
|
||||||
VLOG,
|
VLOG = 2
|
||||||
INFO,
|
INFO = 3
|
||||||
WARN,
|
WARN = 4
|
||||||
ERROR,
|
ERROR = 5
|
||||||
FATAL
|
FATAL = 6
|
||||||
}
|
}
|
||||||
|
|
||||||
// Helper structs for GetJavaLogLevel(), SetJavaLogLevel() methods.
|
// Helper structs for GetJavaLogLevel(), SetJavaLogLevel() methods.
|
||||||
|
|||||||
@@ -18,36 +18,46 @@
|
|||||||
namespace cpp impala
|
namespace cpp impala
|
||||||
namespace java org.apache.impala.thrift
|
namespace java org.apache.impala.thrift
|
||||||
|
|
||||||
|
// NOTE: The definitions in this file are part of the binary format of the Impala query
|
||||||
|
// profiles. They should preserve backwards compatibility and as such some rules apply
|
||||||
|
// when making changes. Please see RuntimeProfile.thrift for more details.
|
||||||
|
|
||||||
|
|
||||||
// Metric and counter data types.
|
// Metric and counter data types.
|
||||||
|
//
|
||||||
|
// WARNING (IMPALA-8236): Adding new values to TUnit and using them in TCounter will break
|
||||||
|
// old decoders of thrift profiles. The workaround is to only use the following units in
|
||||||
|
// anything that is serialised into a TCounter:
|
||||||
|
// UNIT, UNIT_PER_SECOND, CPU_TICKS, BYTES, BYTES_PER_SECOND, TIME_NS, DOUBLE_VALUE
|
||||||
enum TUnit {
|
enum TUnit {
|
||||||
// A dimensionless numerical quantity
|
// A dimensionless numerical quantity
|
||||||
UNIT,
|
UNIT = 0
|
||||||
// Rate of a dimensionless numerical quantity
|
// Rate of a dimensionless numerical quantity
|
||||||
UNIT_PER_SECOND,
|
UNIT_PER_SECOND = 1
|
||||||
CPU_TICKS,
|
CPU_TICKS = 2
|
||||||
BYTES
|
BYTES = 3
|
||||||
BYTES_PER_SECOND,
|
BYTES_PER_SECOND = 4
|
||||||
TIME_NS,
|
TIME_NS = 5
|
||||||
DOUBLE_VALUE,
|
DOUBLE_VALUE = 6
|
||||||
|
// No units at all, may not be a numerical quantity
|
||||||
|
NONE = 7
|
||||||
|
TIME_MS = 8
|
||||||
|
TIME_S = 9
|
||||||
|
TIME_US = 10
|
||||||
// 100th of a percent, used to express ratios etc., range from 0 to 10000, pretty
|
// 100th of a percent, used to express ratios etc., range from 0 to 10000, pretty
|
||||||
// printed as integer percentages from 0 to 100.
|
// printed as integer percentages from 0 to 100.
|
||||||
BASIS_POINTS,
|
BASIS_POINTS = 11
|
||||||
// No units at all, may not be a numerical quantity
|
|
||||||
NONE,
|
|
||||||
TIME_MS,
|
|
||||||
TIME_S,
|
|
||||||
TIME_US
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// The kind of value that a metric represents.
|
// The kind of value that a metric represents.
|
||||||
enum TMetricKind {
|
enum TMetricKind {
|
||||||
// May go up or down over time
|
// May go up or down over time
|
||||||
GAUGE,
|
GAUGE = 0
|
||||||
// A strictly increasing value
|
// A strictly increasing value
|
||||||
COUNTER,
|
COUNTER = 1
|
||||||
// Fixed; will never change
|
// Fixed; will never change
|
||||||
PROPERTY,
|
PROPERTY = 2
|
||||||
STATS,
|
STATS = 3
|
||||||
SET,
|
SET = 4
|
||||||
HISTOGRAM
|
HISTOGRAM = 5
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -21,24 +21,24 @@ namespace java org.apache.impala.thrift
|
|||||||
include "Exprs.thrift"
|
include "Exprs.thrift"
|
||||||
|
|
||||||
enum TPartitionType {
|
enum TPartitionType {
|
||||||
UNPARTITIONED,
|
UNPARTITIONED = 0
|
||||||
|
|
||||||
// round-robin partition
|
// round-robin partition
|
||||||
RANDOM,
|
RANDOM = 1
|
||||||
|
|
||||||
// unordered partition on a set of exprs
|
// unordered partition on a set of exprs
|
||||||
// (partition bounds overlap)
|
// (partition bounds overlap)
|
||||||
HASH_PARTITIONED,
|
HASH_PARTITIONED = 2
|
||||||
|
|
||||||
// ordered partition on a list of exprs
|
// ordered partition on a list of exprs
|
||||||
// (partition bounds don't overlap)
|
// (partition bounds don't overlap)
|
||||||
RANGE_PARTITIONED,
|
RANGE_PARTITIONED = 3
|
||||||
|
|
||||||
// use the partitioning scheme of a Kudu table
|
// use the partitioning scheme of a Kudu table
|
||||||
// TODO: this is a special case now because Kudu supports multilevel partition
|
// TODO: this is a special case now because Kudu supports multilevel partition
|
||||||
// schemes. We should add something like lists of TDataPartitions to reflect that
|
// schemes. We should add something like lists of TDataPartitions to reflect that
|
||||||
// and then this can be removed. (IMPALA-5255)
|
// and then this can be removed. (IMPALA-5255)
|
||||||
KUDU
|
KUDU = 4
|
||||||
}
|
}
|
||||||
|
|
||||||
// Specification of how a single logical data stream is partitioned.
|
// Specification of how a single logical data stream is partitioned.
|
||||||
|
|||||||
@@ -31,61 +31,61 @@ include "Types.thrift"
|
|||||||
include "ExternalDataSource.thrift"
|
include "ExternalDataSource.thrift"
|
||||||
|
|
||||||
enum TPlanNodeType {
|
enum TPlanNodeType {
|
||||||
HDFS_SCAN_NODE,
|
HDFS_SCAN_NODE = 0
|
||||||
HBASE_SCAN_NODE,
|
HBASE_SCAN_NODE = 1
|
||||||
HASH_JOIN_NODE,
|
HASH_JOIN_NODE = 2
|
||||||
AGGREGATION_NODE,
|
AGGREGATION_NODE = 3
|
||||||
SORT_NODE,
|
SORT_NODE = 4
|
||||||
EMPTY_SET_NODE,
|
EMPTY_SET_NODE = 5
|
||||||
EXCHANGE_NODE,
|
EXCHANGE_NODE = 6
|
||||||
UNION_NODE,
|
UNION_NODE = 7
|
||||||
SELECT_NODE,
|
SELECT_NODE = 8
|
||||||
NESTED_LOOP_JOIN_NODE,
|
NESTED_LOOP_JOIN_NODE = 9
|
||||||
DATA_SOURCE_NODE,
|
DATA_SOURCE_NODE = 10
|
||||||
ANALYTIC_EVAL_NODE,
|
ANALYTIC_EVAL_NODE = 11
|
||||||
SINGULAR_ROW_SRC_NODE,
|
SINGULAR_ROW_SRC_NODE = 12
|
||||||
UNNEST_NODE,
|
UNNEST_NODE = 13
|
||||||
SUBPLAN_NODE,
|
SUBPLAN_NODE = 14
|
||||||
KUDU_SCAN_NODE,
|
KUDU_SCAN_NODE = 15
|
||||||
CARDINALITY_CHECK_NODE,
|
CARDINALITY_CHECK_NODE = 16
|
||||||
MULTI_AGGREGATION_NODE
|
MULTI_AGGREGATION_NODE = 17
|
||||||
}
|
}
|
||||||
|
|
||||||
// phases of an execution node
|
// phases of an execution node
|
||||||
// must be kept in sync with tests/failure/test_failpoints.py
|
// must be kept in sync with tests/failure/test_failpoints.py
|
||||||
enum TExecNodePhase {
|
enum TExecNodePhase {
|
||||||
PREPARE,
|
PREPARE = 0
|
||||||
PREPARE_SCANNER,
|
PREPARE_SCANNER = 1
|
||||||
OPEN,
|
OPEN = 2
|
||||||
GETNEXT,
|
GETNEXT = 3
|
||||||
GETNEXT_SCANNER,
|
GETNEXT_SCANNER = 4
|
||||||
CLOSE,
|
CLOSE = 5
|
||||||
// After a scanner thread completes a range with an error but before it propagates the
|
// After a scanner thread completes a range with an error but before it propagates the
|
||||||
// error.
|
// error.
|
||||||
SCANNER_ERROR,
|
SCANNER_ERROR = 6
|
||||||
INVALID
|
INVALID = 7
|
||||||
}
|
}
|
||||||
|
|
||||||
// what to do when hitting a debug point (TImpalaQueryOptions.DEBUG_ACTION)
|
// what to do when hitting a debug point (TImpalaQueryOptions.DEBUG_ACTION)
|
||||||
enum TDebugAction {
|
enum TDebugAction {
|
||||||
WAIT,
|
WAIT = 0
|
||||||
FAIL,
|
FAIL = 1
|
||||||
INJECT_ERROR_LOG,
|
INJECT_ERROR_LOG = 2
|
||||||
MEM_LIMIT_EXCEEDED,
|
MEM_LIMIT_EXCEEDED = 3
|
||||||
// A floating point number in range [0.0, 1.0] that gives the probability of denying
|
// A floating point number in range [0.0, 1.0] that gives the probability of denying
|
||||||
// each reservation increase request after the initial reservation.
|
// each reservation increase request after the initial reservation.
|
||||||
SET_DENY_RESERVATION_PROBABILITY,
|
SET_DENY_RESERVATION_PROBABILITY = 4
|
||||||
// Delay for a short amount of time: 100ms
|
// Delay for a short amount of time: 100ms
|
||||||
DELAY,
|
DELAY = 5
|
||||||
}
|
}
|
||||||
|
|
||||||
// Preference for replica selection
|
// Preference for replica selection
|
||||||
enum TReplicaPreference {
|
enum TReplicaPreference {
|
||||||
CACHE_LOCAL,
|
CACHE_LOCAL = 0
|
||||||
CACHE_RACK,
|
CACHE_RACK = 1
|
||||||
DISK_LOCAL,
|
DISK_LOCAL = 2
|
||||||
DISK_RACK,
|
DISK_RACK = 3
|
||||||
REMOTE
|
REMOTE = 4
|
||||||
}
|
}
|
||||||
|
|
||||||
// Specification of a runtime filter target.
|
// Specification of a runtime filter target.
|
||||||
@@ -113,8 +113,8 @@ struct TRuntimeFilterTargetDesc {
|
|||||||
}
|
}
|
||||||
|
|
||||||
enum TRuntimeFilterType {
|
enum TRuntimeFilterType {
|
||||||
BLOOM,
|
BLOOM = 0
|
||||||
MIN_MAX
|
MIN_MAX = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
// Specification of a runtime filter.
|
// Specification of a runtime filter.
|
||||||
@@ -323,22 +323,22 @@ struct TEqJoinCondition {
|
|||||||
}
|
}
|
||||||
|
|
||||||
enum TJoinOp {
|
enum TJoinOp {
|
||||||
INNER_JOIN,
|
INNER_JOIN = 0
|
||||||
LEFT_OUTER_JOIN,
|
LEFT_OUTER_JOIN = 1
|
||||||
LEFT_SEMI_JOIN,
|
LEFT_SEMI_JOIN = 2
|
||||||
LEFT_ANTI_JOIN,
|
LEFT_ANTI_JOIN = 3
|
||||||
|
|
||||||
// Similar to LEFT_ANTI_JOIN with special handling for NULLs for the join conjuncts
|
// Similar to LEFT_ANTI_JOIN with special handling for NULLs for the join conjuncts
|
||||||
// on the build side. Those NULLs are considered candidate matches, and therefore could
|
// on the build side. Those NULLs are considered candidate matches, and therefore could
|
||||||
// be rejected (ANTI-join), based on the other join conjuncts. This is in contrast
|
// be rejected (ANTI-join), based on the other join conjuncts. This is in contrast
|
||||||
// to LEFT_ANTI_JOIN where NULLs are not matches and therefore always returned.
|
// to LEFT_ANTI_JOIN where NULLs are not matches and therefore always returned.
|
||||||
NULL_AWARE_LEFT_ANTI_JOIN,
|
NULL_AWARE_LEFT_ANTI_JOIN = 4
|
||||||
|
|
||||||
RIGHT_OUTER_JOIN,
|
RIGHT_OUTER_JOIN = 5
|
||||||
RIGHT_SEMI_JOIN,
|
RIGHT_SEMI_JOIN = 6
|
||||||
RIGHT_ANTI_JOIN,
|
RIGHT_ANTI_JOIN = 7
|
||||||
FULL_OUTER_JOIN,
|
FULL_OUTER_JOIN = 8
|
||||||
CROSS_JOIN
|
CROSS_JOIN = 9
|
||||||
}
|
}
|
||||||
|
|
||||||
struct THashJoinNode {
|
struct THashJoinNode {
|
||||||
@@ -426,13 +426,13 @@ struct TSortInfo {
|
|||||||
|
|
||||||
enum TSortType {
|
enum TSortType {
|
||||||
// Sort the entire input.
|
// Sort the entire input.
|
||||||
TOTAL,
|
TOTAL = 0
|
||||||
|
|
||||||
// Return the first N sorted elements.
|
// Return the first N sorted elements.
|
||||||
TOPN,
|
TOPN = 1
|
||||||
|
|
||||||
// Divide the input into batches, each of which is sorted individually.
|
// Divide the input into batches, each of which is sorted individually.
|
||||||
PARTIAL
|
PARTIAL = 2
|
||||||
}
|
}
|
||||||
|
|
||||||
struct TSortNode {
|
struct TSortNode {
|
||||||
@@ -445,21 +445,21 @@ struct TSortNode {
|
|||||||
|
|
||||||
enum TAnalyticWindowType {
|
enum TAnalyticWindowType {
|
||||||
// Specifies the window as a logical offset
|
// Specifies the window as a logical offset
|
||||||
RANGE,
|
RANGE = 0
|
||||||
|
|
||||||
// Specifies the window in physical units
|
// Specifies the window in physical units
|
||||||
ROWS
|
ROWS = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
enum TAnalyticWindowBoundaryType {
|
enum TAnalyticWindowBoundaryType {
|
||||||
// The window starts/ends at the current row.
|
// The window starts/ends at the current row.
|
||||||
CURRENT_ROW,
|
CURRENT_ROW = 0
|
||||||
|
|
||||||
// The window starts/ends at an offset preceding current row.
|
// The window starts/ends at an offset preceding current row.
|
||||||
PRECEDING,
|
PRECEDING = 1
|
||||||
|
|
||||||
// The window starts/ends at an offset following current row.
|
// The window starts/ends at an offset following current row.
|
||||||
FOLLOWING
|
FOLLOWING = 2
|
||||||
}
|
}
|
||||||
|
|
||||||
struct TAnalyticWindowBoundary {
|
struct TAnalyticWindowBoundary {
|
||||||
|
|||||||
@@ -22,17 +22,26 @@ include "ExecStats.thrift"
|
|||||||
include "Metrics.thrift"
|
include "Metrics.thrift"
|
||||||
include "Types.thrift"
|
include "Types.thrift"
|
||||||
|
|
||||||
|
// NOTE: This file and the includes above define the format of Impala query profiles. As
|
||||||
|
// newer versions of Impala should be able to read profiles written by older versions,
|
||||||
|
// some best practices must be followed when making changes to the structures below:
|
||||||
|
//
|
||||||
|
// - Only append new values at the end of enums.
|
||||||
|
// - Only add new fields at the end of structures, and always make them optional.
|
||||||
|
// - Don't remove fields.
|
||||||
|
// - Don't change the numbering of fields.
|
||||||
|
|
||||||
// Represents the different formats a runtime profile can be represented in.
|
// Represents the different formats a runtime profile can be represented in.
|
||||||
enum TRuntimeProfileFormat {
|
enum TRuntimeProfileFormat {
|
||||||
// Pretty printed.
|
// Pretty printed.
|
||||||
STRING,
|
STRING = 0
|
||||||
|
|
||||||
// The thrift profile, serialized, compressed, and encoded. Used for the query log.
|
// The thrift profile, serialized, compressed, and encoded. Used for the query log.
|
||||||
// See RuntimeProfile::SerializeToArchiveString.
|
// See RuntimeProfile::SerializeToArchiveString.
|
||||||
BASE64,
|
BASE64 = 1
|
||||||
|
|
||||||
// TRuntimeProfileTree.
|
// TRuntimeProfileTree.
|
||||||
THRIFT
|
THRIFT = 2
|
||||||
}
|
}
|
||||||
|
|
||||||
// Counter data
|
// Counter data
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ include "Status.thrift"
|
|||||||
include "Types.thrift"
|
include "Types.thrift"
|
||||||
|
|
||||||
enum StatestoreServiceVersion {
|
enum StatestoreServiceVersion {
|
||||||
V1
|
V1 = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Structure serialized for the topic AdmissionController::IMPALA_REQUEST_QUEUE_TOPIC.
|
// Structure serialized for the topic AdmissionController::IMPALA_REQUEST_QUEUE_TOPIC.
|
||||||
|
|||||||
@@ -15,11 +15,15 @@
|
|||||||
// specific language governing permissions and limitations
|
// specific language governing permissions and limitations
|
||||||
// under the License.
|
// under the License.
|
||||||
|
|
||||||
include "ErrorCodes.thrift"
|
|
||||||
|
|
||||||
namespace cpp impala
|
namespace cpp impala
|
||||||
namespace java org.apache.impala.thrift
|
namespace java org.apache.impala.thrift
|
||||||
|
|
||||||
|
include "ErrorCodes.thrift"
|
||||||
|
|
||||||
|
// NOTE: The definitions in this file are part of the binary format of the Impala query
|
||||||
|
// profiles. They should preserve backwards compatibility and as such some rules apply
|
||||||
|
// when making changes. Please see RuntimeProfile.thrift for more details.
|
||||||
|
|
||||||
struct TStatus {
|
struct TStatus {
|
||||||
1: required ErrorCodes.TErrorCode status_code
|
1: required ErrorCodes.TErrorCode status_code
|
||||||
2: list<string> error_msgs
|
2: list<string> error_msgs
|
||||||
|
|||||||
@@ -18,6 +18,10 @@
|
|||||||
namespace cpp impala
|
namespace cpp impala
|
||||||
namespace java org.apache.impala.thrift
|
namespace java org.apache.impala.thrift
|
||||||
|
|
||||||
|
// NOTE: The definitions in this file are part of the binary format of the Impala query
|
||||||
|
// profiles. They should preserve backwards compatibility and as such some rules apply
|
||||||
|
// when making changes. Please see RuntimeProfile.thrift for more details.
|
||||||
|
|
||||||
typedef i64 TTimestamp
|
typedef i64 TTimestamp
|
||||||
typedef i32 TFragmentIdx
|
typedef i32 TFragmentIdx
|
||||||
typedef i32 TPlanNodeId
|
typedef i32 TPlanNodeId
|
||||||
@@ -30,31 +34,31 @@ typedef i32 TJoinTableId
|
|||||||
// TODO: Consider moving unrelated enums to better locations.
|
// TODO: Consider moving unrelated enums to better locations.
|
||||||
|
|
||||||
enum TPrimitiveType {
|
enum TPrimitiveType {
|
||||||
INVALID_TYPE,
|
INVALID_TYPE = 0
|
||||||
NULL_TYPE,
|
NULL_TYPE = 1
|
||||||
BOOLEAN,
|
BOOLEAN = 2
|
||||||
TINYINT,
|
TINYINT = 3
|
||||||
SMALLINT,
|
SMALLINT = 4
|
||||||
INT,
|
INT = 5
|
||||||
BIGINT,
|
BIGINT = 6
|
||||||
FLOAT,
|
FLOAT = 7
|
||||||
DOUBLE,
|
DOUBLE = 8
|
||||||
DATE,
|
DATE = 9
|
||||||
DATETIME,
|
DATETIME = 10
|
||||||
TIMESTAMP,
|
TIMESTAMP = 11
|
||||||
STRING,
|
STRING = 12
|
||||||
BINARY, // Unsupported
|
BINARY = 13
|
||||||
DECIMAL,
|
DECIMAL = 14
|
||||||
CHAR,
|
CHAR = 15
|
||||||
VARCHAR,
|
VARCHAR = 16
|
||||||
FIXED_UDA_INTERMEDIATE,
|
FIXED_UDA_INTERMEDIATE = 17
|
||||||
}
|
}
|
||||||
|
|
||||||
enum TTypeNodeType {
|
enum TTypeNodeType {
|
||||||
SCALAR,
|
SCALAR = 0
|
||||||
ARRAY,
|
ARRAY = 1
|
||||||
MAP,
|
MAP = 2
|
||||||
STRUCT
|
STRUCT = 3
|
||||||
}
|
}
|
||||||
|
|
||||||
struct TScalarType {
|
struct TScalarType {
|
||||||
@@ -96,42 +100,42 @@ struct TColumnType {
|
|||||||
}
|
}
|
||||||
|
|
||||||
enum TStmtType {
|
enum TStmtType {
|
||||||
QUERY,
|
QUERY = 0
|
||||||
DDL, // Data definition, e.g. CREATE TABLE (includes read-only functions e.g. SHOW)
|
DDL = 1
|
||||||
DML, // Data modification e.g. INSERT
|
DML = 2
|
||||||
EXPLAIN,
|
EXPLAIN = 3
|
||||||
TESTCASE, // For generating a testcase for QueryStmts.
|
LOAD = 4
|
||||||
LOAD, // Statement type for LOAD commands
|
SET = 5
|
||||||
SET,
|
ADMIN_FN = 6
|
||||||
ADMIN_FN // Admin function, e.g. ": shutdown()".
|
TESTCASE = 7
|
||||||
}
|
}
|
||||||
|
|
||||||
// Level of verboseness for "explain" output.
|
// Level of verboseness for "explain" output.
|
||||||
enum TExplainLevel {
|
enum TExplainLevel {
|
||||||
MINIMAL,
|
MINIMAL = 0
|
||||||
STANDARD,
|
STANDARD = 1
|
||||||
EXTENDED,
|
EXTENDED = 2
|
||||||
VERBOSE
|
VERBOSE = 3
|
||||||
}
|
}
|
||||||
|
|
||||||
enum TRuntimeFilterMode {
|
enum TRuntimeFilterMode {
|
||||||
// No filters are computed in the FE or the BE.
|
// No filters are computed in the FE or the BE.
|
||||||
OFF,
|
OFF = 0
|
||||||
|
|
||||||
// Only broadcast filters are computed in the BE, and are only published to the local
|
// Only broadcast filters are computed in the BE, and are only published to the local
|
||||||
// fragment.
|
// fragment.
|
||||||
LOCAL,
|
LOCAL = 1
|
||||||
|
|
||||||
// All fiters are computed in the BE, and are published globally.
|
// All fiters are computed in the BE, and are published globally.
|
||||||
GLOBAL
|
GLOBAL = 2
|
||||||
}
|
}
|
||||||
|
|
||||||
enum TPrefetchMode {
|
enum TPrefetchMode {
|
||||||
// No prefetching at all.
|
// No prefetching at all.
|
||||||
NONE,
|
NONE = 0
|
||||||
|
|
||||||
// Prefetch the hash table buckets.
|
// Prefetch the hash table buckets.
|
||||||
HT_BUCKET
|
HT_BUCKET = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
// A TNetworkAddress is the standard host, port representation of a
|
// A TNetworkAddress is the standard host, port representation of a
|
||||||
@@ -149,24 +153,24 @@ struct TUniqueId {
|
|||||||
}
|
}
|
||||||
|
|
||||||
enum TFunctionCategory {
|
enum TFunctionCategory {
|
||||||
SCALAR,
|
SCALAR = 0
|
||||||
AGGREGATE,
|
AGGREGATE = 1
|
||||||
ANALYTIC
|
ANALYTIC = 2
|
||||||
}
|
}
|
||||||
|
|
||||||
enum TFunctionBinaryType {
|
enum TFunctionBinaryType {
|
||||||
// Impala builtin. We can either run this interpreted or via codegen
|
// Impala builtin. We can either run this interpreted or via codegen
|
||||||
// depending on the query option.
|
// depending on the query option.
|
||||||
BUILTIN,
|
BUILTIN = 0
|
||||||
|
|
||||||
// Java UDFs, loaded from *.jar
|
// Java UDFs, loaded from *.jar
|
||||||
JAVA,
|
JAVA = 1
|
||||||
|
|
||||||
// Native-interface, precompiled UDFs loaded from *.so
|
// Native-interface, precompiled UDFs loaded from *.so
|
||||||
NATIVE,
|
NATIVE = 2
|
||||||
|
|
||||||
// Native-interface, precompiled to IR; loaded from *.ll
|
// Native-interface, precompiled to IR; loaded from *.ll
|
||||||
IR,
|
IR = 3
|
||||||
}
|
}
|
||||||
|
|
||||||
// Represents a fully qualified function name.
|
// Represents a fully qualified function name.
|
||||||
|
|||||||
@@ -37,12 +37,12 @@ struct Query {
|
|||||||
typedef string LogContextId
|
typedef string LogContextId
|
||||||
|
|
||||||
enum QueryState {
|
enum QueryState {
|
||||||
CREATED,
|
CREATED = 0
|
||||||
INITIALIZED,
|
INITIALIZED = 1
|
||||||
COMPILED,
|
COMPILED = 2
|
||||||
RUNNING,
|
RUNNING = 3
|
||||||
FINISHED,
|
FINISHED = 4
|
||||||
EXCEPTION
|
EXCEPTION = 5
|
||||||
}
|
}
|
||||||
|
|
||||||
struct QueryHandle {
|
struct QueryHandle {
|
||||||
@@ -99,11 +99,11 @@ exception QueryNotFoundException {
|
|||||||
// Levels to use when displaying query options from Impala shell. REMOVED options should
|
// Levels to use when displaying query options from Impala shell. REMOVED options should
|
||||||
// not be displayed in the shell, but setting them is a warning rather than an error.
|
// not be displayed in the shell, but setting them is a warning rather than an error.
|
||||||
enum TQueryOptionLevel {
|
enum TQueryOptionLevel {
|
||||||
REGULAR,
|
REGULAR = 0
|
||||||
ADVANCED,
|
ADVANCED = 1
|
||||||
DEVELOPMENT,
|
DEVELOPMENT = 2
|
||||||
DEPRECATED,
|
DEPRECATED = 3
|
||||||
REMOVED
|
REMOVED = 4
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Represents a Hadoop-style configuration variable. */
|
/** Represents a Hadoop-style configuration variable. */
|
||||||
|
|||||||
Reference in New Issue
Block a user