mirror of
https://github.com/apache/impala.git
synced 2025-12-19 18:12:08 -05:00
IMPALA-8234: Fix ordering of Thrift enum, fix enum values, add warning
IMPALA-7694 added a field in the middle of the Metrics.TUnit enum, which broke backwards compatibility with profiles that had been written by older versions of Impala. This change fixes the ordering by moving the field to the end of the enum. Additionally, it adds a warning to the top of all Thrift files that are part of the binary profile format, and an note of caution to the main definition in RuntimeProfile.thrift. This change also fixes the order of all enums in our Thrift files to make errors like this less likely in the future. Change-Id: If215f16a636008757ceb439edbd6900a1be88c59 Reviewed-on: http://gerrit.cloudera.org:8080/12543 Reviewed-by: Impala Public Jenkins <impala-public-jenkins@cloudera.com> Tested-by: Impala Public Jenkins <impala-public-jenkins@cloudera.com>
This commit is contained in:
committed by
Impala Public Jenkins
parent
f9ae897c4b
commit
91d8a8f628
@@ -19,8 +19,8 @@ namespace cpp impala
|
||||
namespace java org.apache.impala.thrift
|
||||
|
||||
enum TReservedWordsVersion {
|
||||
IMPALA_2_11
|
||||
IMPALA_3_0
|
||||
IMPALA_2_11 = 0
|
||||
IMPALA_3_0 = 1
|
||||
}
|
||||
|
||||
// Used to pass gflags from backend to frontend, JniCatalog and JniFrontend
|
||||
|
||||
@@ -29,82 +29,82 @@ include "hive_metastore.thrift"
|
||||
enum TCatalogObjectType {
|
||||
// UNKNOWN is used to indicate an error condition when converting
|
||||
// strings to their matching TCatalogObjectType.
|
||||
UNKNOWN,
|
||||
CATALOG,
|
||||
DATABASE,
|
||||
TABLE,
|
||||
VIEW,
|
||||
FUNCTION,
|
||||
DATA_SOURCE,
|
||||
PRINCIPAL,
|
||||
PRIVILEGE,
|
||||
HDFS_CACHE_POOL,
|
||||
UNKNOWN = 0
|
||||
CATALOG = 1
|
||||
DATABASE = 2
|
||||
TABLE = 3
|
||||
VIEW = 4
|
||||
FUNCTION = 5
|
||||
DATA_SOURCE = 6
|
||||
PRINCIPAL = 7
|
||||
PRIVILEGE = 8
|
||||
HDFS_CACHE_POOL = 9
|
||||
}
|
||||
|
||||
enum TTableType {
|
||||
HDFS_TABLE,
|
||||
HBASE_TABLE,
|
||||
VIEW,
|
||||
DATA_SOURCE_TABLE,
|
||||
KUDU_TABLE,
|
||||
HDFS_TABLE = 0
|
||||
HBASE_TABLE = 1
|
||||
VIEW = 2
|
||||
DATA_SOURCE_TABLE = 3
|
||||
KUDU_TABLE = 4
|
||||
}
|
||||
|
||||
// TODO: Separate the storage engines (e.g. Kudu) from the file formats.
|
||||
// TODO: Make the names consistent with the file format keywords specified in
|
||||
// the parser.
|
||||
enum THdfsFileFormat {
|
||||
TEXT,
|
||||
RC_FILE,
|
||||
SEQUENCE_FILE,
|
||||
AVRO,
|
||||
PARQUET,
|
||||
KUDU,
|
||||
ORC
|
||||
TEXT = 0
|
||||
RC_FILE = 1
|
||||
SEQUENCE_FILE = 2
|
||||
AVRO = 3
|
||||
PARQUET = 4
|
||||
KUDU = 5
|
||||
ORC = 6
|
||||
}
|
||||
|
||||
// TODO: Since compression is also enabled for Kudu columns, we should
|
||||
// rename this enum to not be Hdfs specific.
|
||||
enum THdfsCompression {
|
||||
NONE,
|
||||
DEFAULT,
|
||||
GZIP,
|
||||
DEFLATE,
|
||||
BZIP2,
|
||||
SNAPPY,
|
||||
SNAPPY_BLOCKED,
|
||||
LZO,
|
||||
LZ4,
|
||||
ZLIB,
|
||||
ZSTD
|
||||
NONE = 0
|
||||
DEFAULT = 1
|
||||
GZIP = 2
|
||||
DEFLATE = 3
|
||||
BZIP2 = 4
|
||||
SNAPPY = 5
|
||||
SNAPPY_BLOCKED = 6
|
||||
LZO = 7
|
||||
LZ4 = 8
|
||||
ZLIB = 9
|
||||
ZSTD = 10
|
||||
}
|
||||
|
||||
enum TColumnEncoding {
|
||||
AUTO,
|
||||
PLAIN,
|
||||
PREFIX,
|
||||
GROUP_VARINT,
|
||||
RLE,
|
||||
DICTIONARY,
|
||||
BIT_SHUFFLE
|
||||
AUTO = 0
|
||||
PLAIN = 1
|
||||
PREFIX = 2
|
||||
GROUP_VARINT = 3
|
||||
RLE = 4
|
||||
DICTIONARY = 5
|
||||
BIT_SHUFFLE = 6
|
||||
}
|
||||
|
||||
enum THdfsSeqCompressionMode {
|
||||
RECORD,
|
||||
BLOCK
|
||||
RECORD = 0
|
||||
BLOCK = 1
|
||||
}
|
||||
|
||||
// The table property type.
|
||||
enum TTablePropertyType {
|
||||
TBL_PROPERTY,
|
||||
SERDE_PROPERTY
|
||||
TBL_PROPERTY = 0
|
||||
SERDE_PROPERTY = 1
|
||||
}
|
||||
|
||||
// The access level that is available to Impala on the Catalog object.
|
||||
enum TAccessLevel {
|
||||
NONE,
|
||||
READ_WRITE,
|
||||
READ_ONLY,
|
||||
WRITE_ONLY,
|
||||
NONE = 0
|
||||
READ_WRITE = 1
|
||||
READ_ONLY = 2
|
||||
WRITE_ONLY = 3
|
||||
}
|
||||
|
||||
// Mapping from names defined by Avro to values in the THdfsCompression enum.
|
||||
@@ -485,8 +485,8 @@ struct TDatabase {
|
||||
// Represents a principal type that maps to Sentry principal type.
|
||||
// https://github.com/apache/sentry/blob/3d062f39ce6a047138660a7b3d0024bde916c5b4/sentry-service/sentry-service-api/src/gen/thrift/gen-javabean/org/apache/sentry/api/service/thrift/TSentryPrincipalType.java
|
||||
enum TPrincipalType {
|
||||
ROLE,
|
||||
USER
|
||||
ROLE = 0
|
||||
USER = 1
|
||||
}
|
||||
|
||||
// Represents a principal in an authorization policy.
|
||||
@@ -508,23 +508,23 @@ struct TPrincipal {
|
||||
|
||||
// The scope a TPrivilege applies to.
|
||||
enum TPrivilegeScope {
|
||||
SERVER,
|
||||
URI,
|
||||
DATABASE,
|
||||
TABLE,
|
||||
COLUMN,
|
||||
SERVER = 0
|
||||
URI = 1
|
||||
DATABASE = 2
|
||||
TABLE = 3
|
||||
COLUMN = 4
|
||||
}
|
||||
|
||||
// The privilege level allowed.
|
||||
enum TPrivilegeLevel {
|
||||
ALL,
|
||||
INSERT,
|
||||
SELECT,
|
||||
REFRESH,
|
||||
CREATE,
|
||||
ALTER,
|
||||
DROP,
|
||||
OWNER
|
||||
ALL = 0
|
||||
INSERT = 1
|
||||
SELECT = 2
|
||||
REFRESH = 3
|
||||
CREATE = 4
|
||||
ALTER = 5
|
||||
DROP = 6
|
||||
OWNER = 7
|
||||
}
|
||||
|
||||
// Represents a privilege in an authorization policy. Privileges contain the level
|
||||
|
||||
@@ -25,23 +25,23 @@ include "Descriptors.thrift"
|
||||
include "Partitions.thrift"
|
||||
|
||||
enum TDataSinkType {
|
||||
DATA_STREAM_SINK,
|
||||
TABLE_SINK,
|
||||
JOIN_BUILD_SINK,
|
||||
PLAN_ROOT_SINK
|
||||
DATA_STREAM_SINK = 0
|
||||
TABLE_SINK = 1
|
||||
JOIN_BUILD_SINK = 2
|
||||
PLAN_ROOT_SINK = 3
|
||||
}
|
||||
|
||||
enum TSinkAction {
|
||||
INSERT,
|
||||
UPDATE,
|
||||
UPSERT,
|
||||
DELETE
|
||||
INSERT = 0
|
||||
UPDATE = 1
|
||||
UPSERT = 2
|
||||
DELETE = 3
|
||||
}
|
||||
|
||||
enum TTableSinkType {
|
||||
HDFS,
|
||||
HBASE,
|
||||
KUDU
|
||||
HDFS = 0
|
||||
HBASE = 1
|
||||
KUDU = 2
|
||||
}
|
||||
|
||||
// Sink which forwards data to a remote plan fragment,
|
||||
|
||||
@@ -21,15 +21,19 @@ namespace java org.apache.impala.thrift
|
||||
include "Status.thrift"
|
||||
include "Types.thrift"
|
||||
|
||||
enum TExecState {
|
||||
REGISTERED = 0,
|
||||
PLANNING = 1,
|
||||
QUEUED = 2,
|
||||
RUNNING = 3,
|
||||
FINISHED = 4,
|
||||
// NOTE: The definitions in this file are part of the binary format of the Impala query
|
||||
// profiles. They should preserve backwards compatibility and as such some rules apply
|
||||
// when making changes. Please see RuntimeProfile.thrift for more details.
|
||||
|
||||
CANCELLED = 5,
|
||||
FAILED = 6,
|
||||
enum TExecState {
|
||||
REGISTERED = 0
|
||||
PLANNING = 1
|
||||
QUEUED = 2
|
||||
RUNNING = 3
|
||||
FINISHED = 4
|
||||
|
||||
CANCELLED = 5
|
||||
FAILED = 6
|
||||
}
|
||||
|
||||
// Execution stats for a single plan node.
|
||||
|
||||
@@ -21,25 +21,25 @@ namespace java org.apache.impala.thrift
|
||||
include "Types.thrift"
|
||||
|
||||
enum TExprNodeType {
|
||||
NULL_LITERAL,
|
||||
BOOL_LITERAL,
|
||||
INT_LITERAL,
|
||||
FLOAT_LITERAL,
|
||||
STRING_LITERAL,
|
||||
DECIMAL_LITERAL,
|
||||
TIMESTAMP_LITERAL,
|
||||
CASE_EXPR,
|
||||
COMPOUND_PRED,
|
||||
IN_PRED,
|
||||
IS_NULL_PRED,
|
||||
LIKE_PRED,
|
||||
SLOT_REF,
|
||||
TUPLE_IS_NULL_PRED,
|
||||
FUNCTION_CALL,
|
||||
AGGREGATE_EXPR,
|
||||
IS_NOT_EMPTY_PRED,
|
||||
KUDU_PARTITION_EXPR,
|
||||
VALID_TUPLE_ID_EXPR
|
||||
NULL_LITERAL = 0
|
||||
BOOL_LITERAL = 1
|
||||
INT_LITERAL = 2
|
||||
FLOAT_LITERAL = 3
|
||||
STRING_LITERAL = 4
|
||||
DECIMAL_LITERAL = 5
|
||||
TIMESTAMP_LITERAL = 6
|
||||
CASE_EXPR = 7
|
||||
COMPOUND_PRED = 8
|
||||
IN_PRED = 9
|
||||
IS_NULL_PRED = 10
|
||||
LIKE_PRED = 11
|
||||
SLOT_REF = 12
|
||||
TUPLE_IS_NULL_PRED = 13
|
||||
FUNCTION_CALL = 14
|
||||
AGGREGATE_EXPR = 15
|
||||
IS_NOT_EMPTY_PRED = 16
|
||||
KUDU_PARTITION_EXPR = 17
|
||||
VALID_TUPLE_ID_EXPR = 18
|
||||
}
|
||||
|
||||
struct TBoolLiteral {
|
||||
@@ -78,16 +78,16 @@ struct TTimestampLiteral {
|
||||
// in any messages. This enum is here to provide a single definition that can be shared
|
||||
// by the front and backend.
|
||||
enum TExtractField {
|
||||
INVALID_FIELD,
|
||||
YEAR,
|
||||
QUARTER,
|
||||
MONTH,
|
||||
DAY,
|
||||
HOUR,
|
||||
MINUTE,
|
||||
SECOND,
|
||||
MILLISECOND,
|
||||
EPOCH
|
||||
INVALID_FIELD = 0
|
||||
YEAR = 1
|
||||
QUARTER = 2
|
||||
MONTH = 3
|
||||
DAY = 4
|
||||
HOUR = 5
|
||||
MINUTE = 6
|
||||
SECOND = 7
|
||||
MILLISECOND = 8
|
||||
EPOCH = 9
|
||||
}
|
||||
|
||||
struct TInPredicate {
|
||||
|
||||
@@ -53,7 +53,14 @@ struct TRowBatch {
|
||||
|
||||
// Comparison operators used in predicates.
|
||||
enum TComparisonOp {
|
||||
LT, LE, EQ, NE, GE, GT, DISTINCT_FROM, NOT_DISTINCT
|
||||
LT = 0
|
||||
LE = 1
|
||||
EQ = 2
|
||||
NE = 3
|
||||
GE = 4
|
||||
GT = 5
|
||||
DISTINCT_FROM = 6
|
||||
NOT_DISTINCT = 7
|
||||
}
|
||||
|
||||
// Binary predicates that can be pushed to the external data source and
|
||||
|
||||
@@ -154,13 +154,13 @@ struct TGetDataSrcsResult {
|
||||
enum TDescribeOutputStyle {
|
||||
// The default output style if no options are specified for
|
||||
// DESCRIBE DATABASE <db> and DESCRIBE <table>.
|
||||
MINIMAL,
|
||||
MINIMAL = 0
|
||||
|
||||
// Output additional information on the database or table.
|
||||
// Set for both DESCRIBE DATABASE FORMATTED|EXTENDED <db>
|
||||
// and DESCRIBE FORMATTED|EXTENDED <table> statements.
|
||||
EXTENDED,
|
||||
FORMATTED
|
||||
EXTENDED = 1
|
||||
FORMATTED = 2
|
||||
}
|
||||
|
||||
// Arguments to DescribeDb, which returns a list of properties for a given database.
|
||||
@@ -213,10 +213,10 @@ struct TShowDbsParams {
|
||||
|
||||
// Used by SHOW STATS and SHOW PARTITIONS to control what information is returned.
|
||||
enum TShowStatsOp {
|
||||
TABLE_STATS,
|
||||
COLUMN_STATS,
|
||||
PARTITIONS,
|
||||
RANGE_PARTITIONS
|
||||
TABLE_STATS = 0
|
||||
COLUMN_STATS = 1
|
||||
PARTITIONS = 2
|
||||
RANGE_PARTITIONS = 3
|
||||
}
|
||||
|
||||
// Parameters for SHOW TABLE/COLUMN STATS and SHOW PARTITIONS commands
|
||||
@@ -441,21 +441,21 @@ struct TQueryExecRequest {
|
||||
}
|
||||
|
||||
enum TCatalogOpType {
|
||||
SHOW_TABLES,
|
||||
SHOW_DBS,
|
||||
SHOW_STATS,
|
||||
USE,
|
||||
DESCRIBE_TABLE,
|
||||
DESCRIBE_DB,
|
||||
SHOW_FUNCTIONS,
|
||||
RESET_METADATA,
|
||||
DDL,
|
||||
SHOW_CREATE_TABLE,
|
||||
SHOW_DATA_SRCS,
|
||||
SHOW_ROLES,
|
||||
SHOW_GRANT_PRINCIPAL,
|
||||
SHOW_FILES,
|
||||
SHOW_CREATE_FUNCTION
|
||||
SHOW_TABLES = 0
|
||||
SHOW_DBS = 1
|
||||
SHOW_STATS = 2
|
||||
USE = 3
|
||||
DESCRIBE_TABLE = 4
|
||||
DESCRIBE_DB = 5
|
||||
SHOW_FUNCTIONS = 6
|
||||
RESET_METADATA = 7
|
||||
DDL = 8
|
||||
SHOW_CREATE_TABLE = 9
|
||||
SHOW_DATA_SRCS = 10
|
||||
SHOW_ROLES = 11
|
||||
SHOW_GRANT_PRINCIPAL = 12
|
||||
SHOW_FILES = 13
|
||||
SHOW_CREATE_FUNCTION = 14
|
||||
}
|
||||
|
||||
// TODO: Combine SHOW requests with a single struct that contains a field
|
||||
@@ -540,7 +540,7 @@ struct TShutdownParams {
|
||||
|
||||
// The type of administrative function to be executed.
|
||||
enum TAdminRequestType {
|
||||
SHUTDOWN
|
||||
SHUTDOWN = 0
|
||||
}
|
||||
|
||||
// Parameters for administrative function statement. This is essentially a tagged union
|
||||
@@ -554,13 +554,13 @@ struct TAdminRequest {
|
||||
|
||||
// HiveServer2 Metadata operations (JniFrontend.hiveServer2MetadataOperation)
|
||||
enum TMetadataOpcode {
|
||||
GET_TYPE_INFO,
|
||||
GET_CATALOGS,
|
||||
GET_SCHEMAS,
|
||||
GET_TABLES,
|
||||
GET_TABLE_TYPES,
|
||||
GET_COLUMNS,
|
||||
GET_FUNCTIONS
|
||||
GET_TYPE_INFO = 0
|
||||
GET_CATALOGS = 1
|
||||
GET_SCHEMAS = 2
|
||||
GET_TABLES = 3
|
||||
GET_TABLE_TYPES = 4
|
||||
GET_COLUMNS = 5
|
||||
GET_FUNCTIONS = 6
|
||||
}
|
||||
|
||||
// Input parameter to JniFrontend.hiveServer2MetadataOperation
|
||||
@@ -669,9 +669,9 @@ struct TCacheJarResult {
|
||||
// A UDF may include optional prepare and close functions in addition the main evaluation
|
||||
// function. This enum distinguishes between these when doing a symbol lookup.
|
||||
enum TSymbolType {
|
||||
UDF_EVALUATE,
|
||||
UDF_PREPARE,
|
||||
UDF_CLOSE,
|
||||
UDF_EVALUATE = 0
|
||||
UDF_PREPARE = 1
|
||||
UDF_CLOSE = 2
|
||||
}
|
||||
|
||||
// Parameters to pass to validate that the binary contains the symbol. If the
|
||||
@@ -710,9 +710,9 @@ struct TSymbolLookupParams {
|
||||
}
|
||||
|
||||
enum TSymbolLookupResultCode {
|
||||
SYMBOL_FOUND,
|
||||
BINARY_NOT_FOUND,
|
||||
SYMBOL_NOT_FOUND,
|
||||
SYMBOL_FOUND = 0
|
||||
BINARY_NOT_FOUND = 1
|
||||
SYMBOL_NOT_FOUND = 2
|
||||
}
|
||||
|
||||
struct TSymbolLookupResult {
|
||||
|
||||
@@ -43,28 +43,28 @@ const i32 NUM_NODES_ALL_RACKS = -1
|
||||
const i32 INVALID_PLAN_NODE_ID = -1
|
||||
|
||||
enum TParquetFallbackSchemaResolution {
|
||||
POSITION,
|
||||
NAME
|
||||
POSITION = 0
|
||||
NAME = 1
|
||||
}
|
||||
|
||||
// The order of the enum values needs to be kept in sync with
|
||||
// ParquetMetadataUtils::ORDERED_ARRAY_ENCODINGS in parquet-metadata-utils.cc.
|
||||
enum TParquetArrayResolution {
|
||||
THREE_LEVEL,
|
||||
TWO_LEVEL,
|
||||
TWO_LEVEL_THEN_THREE_LEVEL
|
||||
THREE_LEVEL = 0
|
||||
TWO_LEVEL = 1
|
||||
TWO_LEVEL_THEN_THREE_LEVEL = 2
|
||||
}
|
||||
|
||||
enum TJoinDistributionMode {
|
||||
BROADCAST,
|
||||
SHUFFLE
|
||||
BROADCAST = 0
|
||||
SHUFFLE = 1
|
||||
}
|
||||
|
||||
// Consistency level options for Kudu scans.
|
||||
enum TKuduReadMode {
|
||||
DEFAULT,
|
||||
READ_LATEST,
|
||||
READ_AT_SNAPSHOT
|
||||
DEFAULT = 0
|
||||
READ_LATEST = 1
|
||||
READ_AT_SNAPSHOT = 2
|
||||
}
|
||||
|
||||
// Query options that correspond to ImpalaService.ImpalaQueryOptions, with their
|
||||
@@ -327,8 +327,8 @@ struct TQueryOptions {
|
||||
|
||||
// Impala currently has two types of sessions: Beeswax and HiveServer2
|
||||
enum TSessionType {
|
||||
BEESWAX,
|
||||
HIVESERVER2
|
||||
BEESWAX = 0
|
||||
HIVESERVER2 = 1
|
||||
}
|
||||
|
||||
// Per-client session state
|
||||
@@ -540,7 +540,7 @@ struct TPlanFragmentInstanceCtx {
|
||||
// Service Protocol Details
|
||||
|
||||
enum ImpalaInternalServiceVersion {
|
||||
V1
|
||||
V1 = 0
|
||||
}
|
||||
|
||||
// The following contains the per-rpc structs for the parameters and the result.
|
||||
|
||||
@@ -35,24 +35,24 @@ include "RuntimeProfile.thrift"
|
||||
// - TQueryOptionsToMap()
|
||||
enum TImpalaQueryOptions {
|
||||
// if true, abort execution on the first error
|
||||
ABORT_ON_ERROR,
|
||||
ABORT_ON_ERROR = 0
|
||||
|
||||
// maximum # of errors to be reported; Unspecified or 0 indicates backend default
|
||||
MAX_ERRORS,
|
||||
MAX_ERRORS = 1
|
||||
|
||||
// if true, disable llvm codegen
|
||||
DISABLE_CODEGEN,
|
||||
// if true disable llvm codegen
|
||||
DISABLE_CODEGEN = 2
|
||||
|
||||
// batch size to be used by backend; Unspecified or a size of 0 indicates backend
|
||||
// default
|
||||
BATCH_SIZE,
|
||||
BATCH_SIZE = 3
|
||||
|
||||
// a per-machine approximate limit on the memory consumption of this query;
|
||||
// unspecified or a limit of 0 means no limit;
|
||||
// otherwise specified either as:
|
||||
// a) an int (= number of bytes);
|
||||
// b) a float followed by "M" (MB) or "G" (GB)
|
||||
MEM_LIMIT,
|
||||
MEM_LIMIT = 4
|
||||
|
||||
// specifies the degree of parallelism with which to execute the query;
|
||||
// 1: single-node execution
|
||||
@@ -62,20 +62,20 @@ enum TImpalaQueryOptions {
|
||||
// more nodes than numNodes with plan fragments for this query, but at most
|
||||
// numNodes would be active at any point in time)
|
||||
// Constants (NUM_NODES_ALL, NUM_NODES_ALL_RACKS) are defined in JavaConstants.thrift.
|
||||
NUM_NODES,
|
||||
NUM_NODES = 5
|
||||
|
||||
// maximum length of the scan range; only applicable to HDFS scan range; Unspecified or
|
||||
// a length of 0 indicates backend default;
|
||||
MAX_SCAN_RANGE_LENGTH,
|
||||
MAX_SCAN_RANGE_LENGTH = 6
|
||||
|
||||
MAX_IO_BUFFERS, // Removed
|
||||
MAX_IO_BUFFERS = 7 // Removed
|
||||
|
||||
// Number of scanner threads.
|
||||
NUM_SCANNER_THREADS,
|
||||
NUM_SCANNER_THREADS = 8
|
||||
|
||||
ALLOW_UNSUPPORTED_FORMATS, // Removed
|
||||
ALLOW_UNSUPPORTED_FORMATS = 9 // Removed
|
||||
|
||||
DEFAULT_ORDER_BY_LIMIT, // Removed
|
||||
DEFAULT_ORDER_BY_LIMIT = 10 // Removed
|
||||
|
||||
// DEBUG ONLY:
|
||||
// Accepted formats:
|
||||
@@ -101,16 +101,16 @@ enum TImpalaQueryOptions {
|
||||
//
|
||||
// Only a single ExecNode action is allowed, but multiple global actions can be
|
||||
// specified. To specify multiple actions, separate them with "|".
|
||||
DEBUG_ACTION,
|
||||
DEBUG_ACTION = 11
|
||||
|
||||
ABORT_ON_DEFAULT_LIMIT_EXCEEDED, // Removed
|
||||
ABORT_ON_DEFAULT_LIMIT_EXCEEDED = 12 // Removed
|
||||
|
||||
// Compression codec when inserting into tables.
|
||||
// Valid values are "snappy", "gzip", "bzip2" and "none"
|
||||
// Leave blank to use default.
|
||||
COMPRESSION_CODEC,
|
||||
COMPRESSION_CODEC = 13
|
||||
|
||||
SEQ_COMPRESSION_MODE, // Removed
|
||||
SEQ_COMPRESSION_MODE = 14 // Removed
|
||||
|
||||
// HBase scan query option. If set and > 0, HBASE_CACHING is the value for
|
||||
// "hbase.client.Scan.setCaching()" when querying HBase table. Otherwise, use backend
|
||||
@@ -118,7 +118,7 @@ enum TImpalaQueryOptions {
|
||||
// If the value is too high, then the hbase region server will have a hard time (GC
|
||||
// pressure and long response times). If the value is too small, then there will be
|
||||
// extra trips to the hbase region server.
|
||||
HBASE_CACHING,
|
||||
HBASE_CACHING = 15
|
||||
|
||||
// HBase scan query option. If set, HBase scan will always set
|
||||
// "hbase.client.setCacheBlocks" to CACHE_BLOCKS. Default is false.
|
||||
@@ -126,134 +126,134 @@ enum TImpalaQueryOptions {
|
||||
// avoid polluting the cache in the hbase region server.
|
||||
// If the table is small and the table is used several time, set it to true to improve
|
||||
// performance.
|
||||
HBASE_CACHE_BLOCKS,
|
||||
HBASE_CACHE_BLOCKS = 16
|
||||
|
||||
// Target file size for inserts into parquet tables. 0 uses the default.
|
||||
PARQUET_FILE_SIZE,
|
||||
PARQUET_FILE_SIZE = 17
|
||||
|
||||
// Level of detail for explain output (NORMAL, VERBOSE).
|
||||
EXPLAIN_LEVEL,
|
||||
EXPLAIN_LEVEL = 18
|
||||
|
||||
// If true, waits for the result of all catalog operations to be processed by all
|
||||
// active impalad in the cluster before completing.
|
||||
SYNC_DDL,
|
||||
SYNC_DDL = 19
|
||||
|
||||
// Request pool this request should be submitted to. If not set
|
||||
// the pool is determined based on the user.
|
||||
REQUEST_POOL,
|
||||
REQUEST_POOL = 20
|
||||
|
||||
V_CPU_CORES, // Removed
|
||||
V_CPU_CORES = 21 // Removed
|
||||
|
||||
RESERVATION_REQUEST_TIMEOUT, // Removed
|
||||
RESERVATION_REQUEST_TIMEOUT = 22 // Removed
|
||||
|
||||
// if true, disables cached reads. This option has no effect if REPLICA_PREFERENCE is
|
||||
// configured.
|
||||
// TODO: IMPALA-4306: retire at compatibility-breaking version
|
||||
DISABLE_CACHED_READS,
|
||||
DISABLE_CACHED_READS = 23
|
||||
|
||||
// Temporary testing flag
|
||||
DISABLE_OUTERMOST_TOPN,
|
||||
DISABLE_OUTERMOST_TOPN = 24
|
||||
|
||||
RM_INITIAL_MEM, // Removed
|
||||
RM_INITIAL_MEM = 25 // Removed
|
||||
|
||||
// Time, in s, before a query will be timed out if it is inactive. May not exceed
|
||||
// --idle_query_timeout if that flag > 0. If 0, falls back to --idle_query_timeout.
|
||||
QUERY_TIMEOUT_S,
|
||||
QUERY_TIMEOUT_S = 26
|
||||
|
||||
// Test hook for spill to disk operators
|
||||
BUFFER_POOL_LIMIT,
|
||||
BUFFER_POOL_LIMIT = 27
|
||||
|
||||
// Transforms all count(distinct) aggregations into NDV()
|
||||
APPX_COUNT_DISTINCT,
|
||||
APPX_COUNT_DISTINCT = 28
|
||||
|
||||
// If true, allows Impala to internally disable spilling for potentially
|
||||
// disastrous query plans. Impala will excercise this option if a query
|
||||
// has no plan hints, and at least one table is missing relevant stats.
|
||||
DISABLE_UNSAFE_SPILLS,
|
||||
DISABLE_UNSAFE_SPILLS = 29
|
||||
|
||||
// If the number of rows that are processed for a single query is below the
|
||||
// threshold, it will be executed on the coordinator only with codegen disabled
|
||||
EXEC_SINGLE_NODE_ROWS_THRESHOLD,
|
||||
EXEC_SINGLE_NODE_ROWS_THRESHOLD = 30
|
||||
|
||||
// If true, use the table's metadata to produce the partition columns instead of table
|
||||
// scans whenever possible. This option is opt-in by default as this optimization may
|
||||
// produce different results than the scan based approach in some edge cases.
|
||||
OPTIMIZE_PARTITION_KEY_SCANS,
|
||||
OPTIMIZE_PARTITION_KEY_SCANS = 31
|
||||
|
||||
// Prefered memory distance of replicas. This parameter determines the pool of replicas
|
||||
// among which scans will be scheduled in terms of the distance of the replica storage
|
||||
// from the impalad.
|
||||
REPLICA_PREFERENCE,
|
||||
REPLICA_PREFERENCE = 32
|
||||
|
||||
// Enables random backend selection during scheduling.
|
||||
SCHEDULE_RANDOM_REPLICA,
|
||||
SCHEDULE_RANDOM_REPLICA = 33
|
||||
|
||||
SCAN_NODE_CODEGEN_THRESHOLD, // Removed
|
||||
SCAN_NODE_CODEGEN_THRESHOLD = 34 // Removed
|
||||
|
||||
// If true, the planner will not generate plans with streaming preaggregations.
|
||||
DISABLE_STREAMING_PREAGGREGATIONS,
|
||||
DISABLE_STREAMING_PREAGGREGATIONS = 35
|
||||
|
||||
RUNTIME_FILTER_MODE,
|
||||
RUNTIME_FILTER_MODE = 36
|
||||
|
||||
// Size (in bytes) of a runtime Bloom Filter. Will be rounded up to nearest power of
|
||||
// two.
|
||||
RUNTIME_BLOOM_FILTER_SIZE,
|
||||
RUNTIME_BLOOM_FILTER_SIZE = 37
|
||||
|
||||
// Time (in ms) to wait in scans for runtime filters to arrive.
|
||||
RUNTIME_FILTER_WAIT_TIME_MS,
|
||||
RUNTIME_FILTER_WAIT_TIME_MS = 38
|
||||
|
||||
// If true, disable application of runtime filters to individual rows.
|
||||
DISABLE_ROW_RUNTIME_FILTERING,
|
||||
DISABLE_ROW_RUNTIME_FILTERING = 39
|
||||
|
||||
// Maximum number of bloom runtime filters allowed per query.
|
||||
MAX_NUM_RUNTIME_FILTERS,
|
||||
MAX_NUM_RUNTIME_FILTERS = 40
|
||||
|
||||
// If true, use UTF-8 annotation for string columns. Note that char and varchar columns
|
||||
// always use the annotation.
|
||||
PARQUET_ANNOTATE_STRINGS_UTF8,
|
||||
PARQUET_ANNOTATE_STRINGS_UTF8 = 41
|
||||
|
||||
// Determines how to resolve Parquet files' schemas in the absence of field IDs (which
|
||||
// is always, since fields IDs are NYI). Valid values are "position" and "name".
|
||||
PARQUET_FALLBACK_SCHEMA_RESOLUTION,
|
||||
PARQUET_FALLBACK_SCHEMA_RESOLUTION = 42
|
||||
|
||||
// Multi-threaded execution: degree of parallelism = number of active threads per
|
||||
// backend
|
||||
MT_DOP,
|
||||
MT_DOP = 43
|
||||
|
||||
// If true, INSERT writes to S3 go directly to their final location rather than being
|
||||
// copied there by the coordinator. We cannot do this for INSERT OVERWRITES because for
|
||||
// those queries, the coordinator deletes all files in the final location before copying
|
||||
// the files there.
|
||||
// TODO: Find a way to get this working for INSERT OVERWRITEs too.
|
||||
S3_SKIP_INSERT_STAGING,
|
||||
S3_SKIP_INSERT_STAGING = 44
|
||||
|
||||
// Maximum runtime bloom filter size, in bytes.
|
||||
RUNTIME_FILTER_MAX_SIZE,
|
||||
RUNTIME_FILTER_MAX_SIZE = 45
|
||||
|
||||
// Minimum runtime bloom filter size, in bytes.
|
||||
RUNTIME_FILTER_MIN_SIZE,
|
||||
RUNTIME_FILTER_MIN_SIZE = 46
|
||||
|
||||
// Prefetching behavior during hash tables' building and probing.
|
||||
PREFETCH_MODE,
|
||||
PREFETCH_MODE = 47
|
||||
|
||||
// Additional strict handling of invalid data parsing and type conversions.
|
||||
STRICT_MODE,
|
||||
STRICT_MODE = 48
|
||||
|
||||
// A limit on the amount of scratch directory space that can be used;
|
||||
// Unspecified or a limit of -1 means no limit;
|
||||
// Otherwise specified in the same way as MEM_LIMIT.
|
||||
SCRATCH_LIMIT,
|
||||
SCRATCH_LIMIT = 49
|
||||
|
||||
// Indicates whether the FE should rewrite Exprs for optimization purposes.
|
||||
// It's sometimes useful to disable rewrites for testing, e.g., expr-test.cc.
|
||||
ENABLE_EXPR_REWRITES,
|
||||
ENABLE_EXPR_REWRITES = 50
|
||||
|
||||
// Indicates whether to use the new decimal semantics, which includes better
|
||||
// rounding and output types for multiply / divide
|
||||
DECIMAL_V2,
|
||||
DECIMAL_V2 = 51
|
||||
|
||||
// Indicates whether to use dictionary filtering for Parquet files
|
||||
PARQUET_DICTIONARY_FILTERING,
|
||||
PARQUET_DICTIONARY_FILTERING = 52
|
||||
|
||||
// Policy for resolving nested array fields in Parquet files.
|
||||
// An Impala array type can have several different representations in
|
||||
@@ -261,49 +261,49 @@ enum TImpalaQueryOptions {
|
||||
// between the two and three level encodings with index-based field resolution.
|
||||
// The ambiguity can manually be resolved using this query option, or by using
|
||||
// PARQUET_FALLBACK_SCHEMA_RESOLUTION=name.
|
||||
PARQUET_ARRAY_RESOLUTION,
|
||||
PARQUET_ARRAY_RESOLUTION = 53
|
||||
|
||||
// Indicates whether to read statistics from Parquet files and use them during query
|
||||
// processing. This includes skipping data based on the statistics and computing query
|
||||
// results like "select min()".
|
||||
PARQUET_READ_STATISTICS,
|
||||
PARQUET_READ_STATISTICS = 54
|
||||
|
||||
// Join distribution mode that is used when the join inputs have an unknown
|
||||
// cardinality, e.g., because of missing table statistics.
|
||||
DEFAULT_JOIN_DISTRIBUTION_MODE,
|
||||
DEFAULT_JOIN_DISTRIBUTION_MODE = 55
|
||||
|
||||
// If the number of rows processed per node is below the threshold and disable_codegen
|
||||
// is unset, codegen will be automatically be disabled by the planner.
|
||||
DISABLE_CODEGEN_ROWS_THRESHOLD,
|
||||
DISABLE_CODEGEN_ROWS_THRESHOLD = 56
|
||||
|
||||
// The default spillable buffer size, in bytes.
|
||||
DEFAULT_SPILLABLE_BUFFER_SIZE,
|
||||
DEFAULT_SPILLABLE_BUFFER_SIZE = 57
|
||||
|
||||
// The minimum spillable buffer size, in bytes.
|
||||
MIN_SPILLABLE_BUFFER_SIZE,
|
||||
MIN_SPILLABLE_BUFFER_SIZE = 58
|
||||
|
||||
// The maximum row size that memory is reserved for, in bytes.
|
||||
MAX_ROW_SIZE,
|
||||
MAX_ROW_SIZE = 59
|
||||
|
||||
// The time, in seconds, that a session may be idle for before it is closed (and all
|
||||
// running queries cancelled) by Impala. If 0, idle sessions never expire.
|
||||
IDLE_SESSION_TIMEOUT,
|
||||
IDLE_SESSION_TIMEOUT = 60
|
||||
|
||||
// Minimum number of bytes that will be scanned in COMPUTE STATS TABLESAMPLE,
|
||||
// regardless of the user-supplied sampling percent.
|
||||
COMPUTE_STATS_MIN_SAMPLE_SIZE,
|
||||
COMPUTE_STATS_MIN_SAMPLE_SIZE = 61
|
||||
|
||||
// Time limit, in s, before a query will be timed out after it starts executing. Does
|
||||
// not include time spent in planning, scheduling or admission control. A value of 0
|
||||
// means no time limit.
|
||||
EXEC_TIME_LIMIT_S,
|
||||
EXEC_TIME_LIMIT_S = 62
|
||||
|
||||
// When a query has both grouping and distinct exprs, impala can optionally include the
|
||||
// distinct exprs in the hash exchange of the first aggregation phase to spread the data
|
||||
// among more nodes. However, this plan requires another hash exchange on the grouping
|
||||
// exprs in the second phase which is not required when omitting the distinct exprs in
|
||||
// the first phase. Shuffling by both is better if the grouping exprs have low NDVs.
|
||||
SHUFFLE_DISTINCT_EXPRS,
|
||||
SHUFFLE_DISTINCT_EXPRS = 63
|
||||
|
||||
// This only has an effect if memory-estimate-based admission control is enabled, i.e.
|
||||
// max_mem_resources is set for the pool and, *contrary to best practices*, MEM_LIMIT
|
||||
@@ -311,51 +311,51 @@ enum TImpalaQueryOptions {
|
||||
// planner memory estimate) is used for admission control purposes. This provides a
|
||||
// workaround if the planner's memory estimate is too high and prevents a runnable
|
||||
// query from being admitted. 0 or -1 means this has no effect. Defaults to 0.
|
||||
MAX_MEM_ESTIMATE_FOR_ADMISSION,
|
||||
MAX_MEM_ESTIMATE_FOR_ADMISSION = 64
|
||||
|
||||
// Admission control will reject queries when the number of reserved threads per backend
|
||||
// for the query exceeds this number. 0 or -1 means this has no effect.
|
||||
THREAD_RESERVATION_LIMIT,
|
||||
THREAD_RESERVATION_LIMIT = 65
|
||||
|
||||
// Admission control will reject queries when the total number of reserved threads
|
||||
// across all backends for the query exceeds this number. 0 or -1 means this has no
|
||||
// effect.
|
||||
THREAD_RESERVATION_AGGREGATE_LIMIT,
|
||||
THREAD_RESERVATION_AGGREGATE_LIMIT = 66
|
||||
|
||||
// Overrides the -kudu_read_mode flag to set the consistency level for Kudu scans.
|
||||
// Possible values are DEFAULT, READ_LATEST, and READ_AT_SNAPSHOT.
|
||||
KUDU_READ_MODE,
|
||||
KUDU_READ_MODE = 67
|
||||
|
||||
// Allow reading of erasure coded files.
|
||||
ALLOW_ERASURE_CODED_FILES,
|
||||
ALLOW_ERASURE_CODED_FILES = 68
|
||||
|
||||
// The timezone used in UTC<->localtime conversions. The default is the OS's timezone
|
||||
// at the coordinator, which can be overridden by environment variable $TZ.
|
||||
TIMEZONE,
|
||||
TIMEZONE = 69
|
||||
|
||||
// Scan bytes limit, after which a query will be terminated with an error.
|
||||
SCAN_BYTES_LIMIT,
|
||||
SCAN_BYTES_LIMIT = 70
|
||||
|
||||
// CPU time limit in seconds, after which a query will be terminated with an error.
|
||||
// Note that until IMPALA-7318 is fixed, CPU usage can be very stale and this may not
|
||||
// terminate queries soon enough.
|
||||
CPU_LIMIT_S,
|
||||
CPU_LIMIT_S = 71
|
||||
|
||||
// The max number of estimated bytes a TopN operator is allowed to materialize, if the
|
||||
// planner thinks a TopN operator will exceed this limit, it falls back to a TotalSort
|
||||
// operator which is capable of spilling to disk (unlike the TopN operator which keeps
|
||||
// everything in memory). 0 or -1 means this has no effect.
|
||||
TOPN_BYTES_LIMIT,
|
||||
TOPN_BYTES_LIMIT = 72
|
||||
|
||||
// An opaque string, not used by Impala itself, that can be used to identify
|
||||
// the client, like a User-Agent in HTTP. Drivers should set this to
|
||||
// their version number. May also be used by tests to help identify queries.
|
||||
CLIENT_IDENTIFIER,
|
||||
CLIENT_IDENTIFIER = 73
|
||||
|
||||
// Probability to enable tracing of resource usage consumption on all fragment instance
|
||||
// executors of a query. Must be between 0 and 1 inclusive, 0 means no query will be
|
||||
// traced, 1 means all queries will be traced.
|
||||
RESOURCE_TRACE_RATIO,
|
||||
RESOURCE_TRACE_RATIO = 74
|
||||
|
||||
// The maximum number of executor candidates to consider when scheduling remote
|
||||
// HDFS ranges. When non-zero, the scheduler generates a consistent set of
|
||||
@@ -366,17 +366,17 @@ enum TImpalaQueryOptions {
|
||||
// This increases the efficiency of file-related caches (e.g. the HDFS file handle
|
||||
// cache). If set to 0, the number of executor candidates is unlimited, and remote
|
||||
// ranges will be scheduled across all executors.
|
||||
NUM_REMOTE_EXECUTOR_CANDIDATES,
|
||||
NUM_REMOTE_EXECUTOR_CANDIDATES = 75
|
||||
|
||||
// A limit on the number of rows produced by the query. The query will be
|
||||
// canceled if the query is still executing after this limit is hit. A value
|
||||
// of 0 means there is no limit on the number of rows produced.
|
||||
NUM_ROWS_PRODUCED_LIMIT
|
||||
NUM_ROWS_PRODUCED_LIMIT = 76
|
||||
|
||||
// Set when attempting to load a planner testcase. Typically used by developers for
|
||||
// debugging a testcase. Should not be set in user clusters. If set, a warning
|
||||
// is emitted in the query runtime profile.
|
||||
PLANNER_TESTCASE_MODE
|
||||
PLANNER_TESTCASE_MODE = 77
|
||||
}
|
||||
|
||||
// The summary of a DML statement.
|
||||
|
||||
@@ -29,42 +29,42 @@ const i16 HDFS_DEFAULT_CACHE_REPLICATION_FACTOR = 1
|
||||
// Structs used to execute DDL operations using the JniCatalog.
|
||||
|
||||
enum TDdlType {
|
||||
ALTER_TABLE,
|
||||
ALTER_VIEW,
|
||||
CREATE_DATABASE,
|
||||
CREATE_TABLE,
|
||||
CREATE_TABLE_AS_SELECT,
|
||||
CREATE_TABLE_LIKE,
|
||||
CREATE_VIEW,
|
||||
CREATE_FUNCTION,
|
||||
COMPUTE_STATS,
|
||||
DROP_DATABASE,
|
||||
DROP_TABLE,
|
||||
DROP_VIEW,
|
||||
DROP_FUNCTION,
|
||||
CREATE_DATA_SOURCE,
|
||||
DROP_DATA_SOURCE,
|
||||
DROP_STATS,
|
||||
CREATE_ROLE,
|
||||
DROP_ROLE,
|
||||
GRANT_ROLE,
|
||||
REVOKE_ROLE,
|
||||
GRANT_PRIVILEGE,
|
||||
REVOKE_PRIVILEGE,
|
||||
TRUNCATE_TABLE,
|
||||
COMMENT_ON,
|
||||
ALTER_DATABASE,
|
||||
COPY_TESTCASE
|
||||
ALTER_TABLE = 0
|
||||
ALTER_VIEW = 1
|
||||
CREATE_DATABASE = 2
|
||||
CREATE_TABLE = 3
|
||||
CREATE_TABLE_AS_SELECT = 4
|
||||
CREATE_TABLE_LIKE = 5
|
||||
CREATE_VIEW = 6
|
||||
CREATE_FUNCTION = 7
|
||||
COMPUTE_STATS = 8
|
||||
DROP_DATABASE = 9
|
||||
DROP_TABLE = 10
|
||||
DROP_VIEW = 11
|
||||
DROP_FUNCTION = 12
|
||||
CREATE_DATA_SOURCE = 13
|
||||
DROP_DATA_SOURCE = 14
|
||||
DROP_STATS = 15
|
||||
CREATE_ROLE = 16
|
||||
DROP_ROLE = 17
|
||||
GRANT_ROLE = 18
|
||||
REVOKE_ROLE = 19
|
||||
GRANT_PRIVILEGE = 20
|
||||
REVOKE_PRIVILEGE = 21
|
||||
TRUNCATE_TABLE = 22
|
||||
COMMENT_ON = 23
|
||||
ALTER_DATABASE = 24
|
||||
COPY_TESTCASE = 25
|
||||
}
|
||||
|
||||
enum TOwnerType {
|
||||
USER,
|
||||
ROLE
|
||||
USER = 0
|
||||
ROLE = 1
|
||||
}
|
||||
|
||||
// Types of ALTER DATABASE commands supported.
|
||||
enum TAlterDbType {
|
||||
SET_OWNER
|
||||
SET_OWNER = 0
|
||||
}
|
||||
|
||||
// Parameters for ALTER DATABASE SET OWNER commands.
|
||||
@@ -93,24 +93,24 @@ struct TAlterDbParams {
|
||||
|
||||
// Types of ALTER TABLE commands supported.
|
||||
enum TAlterTableType {
|
||||
ADD_COLUMNS,
|
||||
REPLACE_COLUMNS,
|
||||
ADD_PARTITION,
|
||||
ADD_DROP_RANGE_PARTITION,
|
||||
ALTER_COLUMN,
|
||||
DROP_COLUMN,
|
||||
DROP_PARTITION,
|
||||
RENAME_TABLE,
|
||||
RENAME_VIEW,
|
||||
SET_FILE_FORMAT,
|
||||
SET_LOCATION,
|
||||
SET_TBL_PROPERTIES,
|
||||
ADD_COLUMNS = 0
|
||||
REPLACE_COLUMNS = 1
|
||||
ADD_PARTITION = 2
|
||||
ADD_DROP_RANGE_PARTITION = 3
|
||||
ALTER_COLUMN = 4
|
||||
DROP_COLUMN = 5
|
||||
DROP_PARTITION = 6
|
||||
RENAME_TABLE = 7
|
||||
RENAME_VIEW = 8
|
||||
SET_FILE_FORMAT = 9
|
||||
SET_LOCATION = 10
|
||||
SET_TBL_PROPERTIES = 11
|
||||
// Used internally by COMPUTE STATS and by ALTER TABLE SET COLUMN STATS.
|
||||
UPDATE_STATS,
|
||||
SET_CACHED,
|
||||
RECOVER_PARTITIONS,
|
||||
SET_ROW_FORMAT,
|
||||
SET_OWNER
|
||||
UPDATE_STATS = 12
|
||||
SET_CACHED = 13
|
||||
RECOVER_PARTITIONS = 14
|
||||
SET_ROW_FORMAT = 15
|
||||
SET_OWNER = 16
|
||||
}
|
||||
|
||||
// Parameters of CREATE DATABASE commands
|
||||
@@ -249,8 +249,8 @@ struct TAlterTableAddPartitionParams {
|
||||
}
|
||||
|
||||
enum TRangePartitionOperationType {
|
||||
ADD,
|
||||
DROP
|
||||
ADD = 0
|
||||
DROP = 1
|
||||
}
|
||||
|
||||
// Parameters for ALTER TABLE ADD/DROP RANGE PARTITION command
|
||||
|
||||
@@ -29,8 +29,8 @@ struct TVertex {
|
||||
}
|
||||
|
||||
enum TEdgeType {
|
||||
PROJECTION,
|
||||
PREDICATE
|
||||
PROJECTION = 0
|
||||
PREDICATE = 1
|
||||
}
|
||||
|
||||
struct TMultiEdge {
|
||||
|
||||
@@ -22,13 +22,13 @@ namespace java org.apache.impala.thrift
|
||||
|
||||
// Convenience type to map between log4j levels and glog severity
|
||||
enum TLogLevel {
|
||||
VLOG_3,
|
||||
VLOG_2
|
||||
VLOG,
|
||||
INFO,
|
||||
WARN,
|
||||
ERROR,
|
||||
FATAL
|
||||
VLOG_3 = 0
|
||||
VLOG_2 = 1
|
||||
VLOG = 2
|
||||
INFO = 3
|
||||
WARN = 4
|
||||
ERROR = 5
|
||||
FATAL = 6
|
||||
}
|
||||
|
||||
// Helper structs for GetJavaLogLevel(), SetJavaLogLevel() methods.
|
||||
|
||||
@@ -18,36 +18,46 @@
|
||||
namespace cpp impala
|
||||
namespace java org.apache.impala.thrift
|
||||
|
||||
// NOTE: The definitions in this file are part of the binary format of the Impala query
|
||||
// profiles. They should preserve backwards compatibility and as such some rules apply
|
||||
// when making changes. Please see RuntimeProfile.thrift for more details.
|
||||
|
||||
|
||||
// Metric and counter data types.
|
||||
//
|
||||
// WARNING (IMPALA-8236): Adding new values to TUnit and using them in TCounter will break
|
||||
// old decoders of thrift profiles. The workaround is to only use the following units in
|
||||
// anything that is serialised into a TCounter:
|
||||
// UNIT, UNIT_PER_SECOND, CPU_TICKS, BYTES, BYTES_PER_SECOND, TIME_NS, DOUBLE_VALUE
|
||||
enum TUnit {
|
||||
// A dimensionless numerical quantity
|
||||
UNIT,
|
||||
UNIT = 0
|
||||
// Rate of a dimensionless numerical quantity
|
||||
UNIT_PER_SECOND,
|
||||
CPU_TICKS,
|
||||
BYTES
|
||||
BYTES_PER_SECOND,
|
||||
TIME_NS,
|
||||
DOUBLE_VALUE,
|
||||
UNIT_PER_SECOND = 1
|
||||
CPU_TICKS = 2
|
||||
BYTES = 3
|
||||
BYTES_PER_SECOND = 4
|
||||
TIME_NS = 5
|
||||
DOUBLE_VALUE = 6
|
||||
// No units at all, may not be a numerical quantity
|
||||
NONE = 7
|
||||
TIME_MS = 8
|
||||
TIME_S = 9
|
||||
TIME_US = 10
|
||||
// 100th of a percent, used to express ratios etc., range from 0 to 10000, pretty
|
||||
// printed as integer percentages from 0 to 100.
|
||||
BASIS_POINTS,
|
||||
// No units at all, may not be a numerical quantity
|
||||
NONE,
|
||||
TIME_MS,
|
||||
TIME_S,
|
||||
TIME_US
|
||||
BASIS_POINTS = 11
|
||||
}
|
||||
|
||||
// The kind of value that a metric represents.
|
||||
enum TMetricKind {
|
||||
// May go up or down over time
|
||||
GAUGE,
|
||||
GAUGE = 0
|
||||
// A strictly increasing value
|
||||
COUNTER,
|
||||
COUNTER = 1
|
||||
// Fixed; will never change
|
||||
PROPERTY,
|
||||
STATS,
|
||||
SET,
|
||||
HISTOGRAM
|
||||
PROPERTY = 2
|
||||
STATS = 3
|
||||
SET = 4
|
||||
HISTOGRAM = 5
|
||||
}
|
||||
|
||||
@@ -21,24 +21,24 @@ namespace java org.apache.impala.thrift
|
||||
include "Exprs.thrift"
|
||||
|
||||
enum TPartitionType {
|
||||
UNPARTITIONED,
|
||||
UNPARTITIONED = 0
|
||||
|
||||
// round-robin partition
|
||||
RANDOM,
|
||||
RANDOM = 1
|
||||
|
||||
// unordered partition on a set of exprs
|
||||
// (partition bounds overlap)
|
||||
HASH_PARTITIONED,
|
||||
HASH_PARTITIONED = 2
|
||||
|
||||
// ordered partition on a list of exprs
|
||||
// (partition bounds don't overlap)
|
||||
RANGE_PARTITIONED,
|
||||
RANGE_PARTITIONED = 3
|
||||
|
||||
// use the partitioning scheme of a Kudu table
|
||||
// TODO: this is a special case now because Kudu supports multilevel partition
|
||||
// schemes. We should add something like lists of TDataPartitions to reflect that
|
||||
// and then this can be removed. (IMPALA-5255)
|
||||
KUDU
|
||||
KUDU = 4
|
||||
}
|
||||
|
||||
// Specification of how a single logical data stream is partitioned.
|
||||
|
||||
@@ -31,61 +31,61 @@ include "Types.thrift"
|
||||
include "ExternalDataSource.thrift"
|
||||
|
||||
enum TPlanNodeType {
|
||||
HDFS_SCAN_NODE,
|
||||
HBASE_SCAN_NODE,
|
||||
HASH_JOIN_NODE,
|
||||
AGGREGATION_NODE,
|
||||
SORT_NODE,
|
||||
EMPTY_SET_NODE,
|
||||
EXCHANGE_NODE,
|
||||
UNION_NODE,
|
||||
SELECT_NODE,
|
||||
NESTED_LOOP_JOIN_NODE,
|
||||
DATA_SOURCE_NODE,
|
||||
ANALYTIC_EVAL_NODE,
|
||||
SINGULAR_ROW_SRC_NODE,
|
||||
UNNEST_NODE,
|
||||
SUBPLAN_NODE,
|
||||
KUDU_SCAN_NODE,
|
||||
CARDINALITY_CHECK_NODE,
|
||||
MULTI_AGGREGATION_NODE
|
||||
HDFS_SCAN_NODE = 0
|
||||
HBASE_SCAN_NODE = 1
|
||||
HASH_JOIN_NODE = 2
|
||||
AGGREGATION_NODE = 3
|
||||
SORT_NODE = 4
|
||||
EMPTY_SET_NODE = 5
|
||||
EXCHANGE_NODE = 6
|
||||
UNION_NODE = 7
|
||||
SELECT_NODE = 8
|
||||
NESTED_LOOP_JOIN_NODE = 9
|
||||
DATA_SOURCE_NODE = 10
|
||||
ANALYTIC_EVAL_NODE = 11
|
||||
SINGULAR_ROW_SRC_NODE = 12
|
||||
UNNEST_NODE = 13
|
||||
SUBPLAN_NODE = 14
|
||||
KUDU_SCAN_NODE = 15
|
||||
CARDINALITY_CHECK_NODE = 16
|
||||
MULTI_AGGREGATION_NODE = 17
|
||||
}
|
||||
|
||||
// phases of an execution node
|
||||
// must be kept in sync with tests/failure/test_failpoints.py
|
||||
enum TExecNodePhase {
|
||||
PREPARE,
|
||||
PREPARE_SCANNER,
|
||||
OPEN,
|
||||
GETNEXT,
|
||||
GETNEXT_SCANNER,
|
||||
CLOSE,
|
||||
PREPARE = 0
|
||||
PREPARE_SCANNER = 1
|
||||
OPEN = 2
|
||||
GETNEXT = 3
|
||||
GETNEXT_SCANNER = 4
|
||||
CLOSE = 5
|
||||
// After a scanner thread completes a range with an error but before it propagates the
|
||||
// error.
|
||||
SCANNER_ERROR,
|
||||
INVALID
|
||||
SCANNER_ERROR = 6
|
||||
INVALID = 7
|
||||
}
|
||||
|
||||
// what to do when hitting a debug point (TImpalaQueryOptions.DEBUG_ACTION)
|
||||
enum TDebugAction {
|
||||
WAIT,
|
||||
FAIL,
|
||||
INJECT_ERROR_LOG,
|
||||
MEM_LIMIT_EXCEEDED,
|
||||
WAIT = 0
|
||||
FAIL = 1
|
||||
INJECT_ERROR_LOG = 2
|
||||
MEM_LIMIT_EXCEEDED = 3
|
||||
// A floating point number in range [0.0, 1.0] that gives the probability of denying
|
||||
// each reservation increase request after the initial reservation.
|
||||
SET_DENY_RESERVATION_PROBABILITY,
|
||||
SET_DENY_RESERVATION_PROBABILITY = 4
|
||||
// Delay for a short amount of time: 100ms
|
||||
DELAY,
|
||||
DELAY = 5
|
||||
}
|
||||
|
||||
// Preference for replica selection
|
||||
enum TReplicaPreference {
|
||||
CACHE_LOCAL,
|
||||
CACHE_RACK,
|
||||
DISK_LOCAL,
|
||||
DISK_RACK,
|
||||
REMOTE
|
||||
CACHE_LOCAL = 0
|
||||
CACHE_RACK = 1
|
||||
DISK_LOCAL = 2
|
||||
DISK_RACK = 3
|
||||
REMOTE = 4
|
||||
}
|
||||
|
||||
// Specification of a runtime filter target.
|
||||
@@ -113,8 +113,8 @@ struct TRuntimeFilterTargetDesc {
|
||||
}
|
||||
|
||||
enum TRuntimeFilterType {
|
||||
BLOOM,
|
||||
MIN_MAX
|
||||
BLOOM = 0
|
||||
MIN_MAX = 1
|
||||
}
|
||||
|
||||
// Specification of a runtime filter.
|
||||
@@ -323,22 +323,22 @@ struct TEqJoinCondition {
|
||||
}
|
||||
|
||||
enum TJoinOp {
|
||||
INNER_JOIN,
|
||||
LEFT_OUTER_JOIN,
|
||||
LEFT_SEMI_JOIN,
|
||||
LEFT_ANTI_JOIN,
|
||||
INNER_JOIN = 0
|
||||
LEFT_OUTER_JOIN = 1
|
||||
LEFT_SEMI_JOIN = 2
|
||||
LEFT_ANTI_JOIN = 3
|
||||
|
||||
// Similar to LEFT_ANTI_JOIN with special handling for NULLs for the join conjuncts
|
||||
// on the build side. Those NULLs are considered candidate matches, and therefore could
|
||||
// be rejected (ANTI-join), based on the other join conjuncts. This is in contrast
|
||||
// to LEFT_ANTI_JOIN where NULLs are not matches and therefore always returned.
|
||||
NULL_AWARE_LEFT_ANTI_JOIN,
|
||||
NULL_AWARE_LEFT_ANTI_JOIN = 4
|
||||
|
||||
RIGHT_OUTER_JOIN,
|
||||
RIGHT_SEMI_JOIN,
|
||||
RIGHT_ANTI_JOIN,
|
||||
FULL_OUTER_JOIN,
|
||||
CROSS_JOIN
|
||||
RIGHT_OUTER_JOIN = 5
|
||||
RIGHT_SEMI_JOIN = 6
|
||||
RIGHT_ANTI_JOIN = 7
|
||||
FULL_OUTER_JOIN = 8
|
||||
CROSS_JOIN = 9
|
||||
}
|
||||
|
||||
struct THashJoinNode {
|
||||
@@ -426,13 +426,13 @@ struct TSortInfo {
|
||||
|
||||
enum TSortType {
|
||||
// Sort the entire input.
|
||||
TOTAL,
|
||||
TOTAL = 0
|
||||
|
||||
// Return the first N sorted elements.
|
||||
TOPN,
|
||||
TOPN = 1
|
||||
|
||||
// Divide the input into batches, each of which is sorted individually.
|
||||
PARTIAL
|
||||
PARTIAL = 2
|
||||
}
|
||||
|
||||
struct TSortNode {
|
||||
@@ -445,21 +445,21 @@ struct TSortNode {
|
||||
|
||||
enum TAnalyticWindowType {
|
||||
// Specifies the window as a logical offset
|
||||
RANGE,
|
||||
RANGE = 0
|
||||
|
||||
// Specifies the window in physical units
|
||||
ROWS
|
||||
ROWS = 1
|
||||
}
|
||||
|
||||
enum TAnalyticWindowBoundaryType {
|
||||
// The window starts/ends at the current row.
|
||||
CURRENT_ROW,
|
||||
CURRENT_ROW = 0
|
||||
|
||||
// The window starts/ends at an offset preceding current row.
|
||||
PRECEDING,
|
||||
PRECEDING = 1
|
||||
|
||||
// The window starts/ends at an offset following current row.
|
||||
FOLLOWING
|
||||
FOLLOWING = 2
|
||||
}
|
||||
|
||||
struct TAnalyticWindowBoundary {
|
||||
|
||||
@@ -22,17 +22,26 @@ include "ExecStats.thrift"
|
||||
include "Metrics.thrift"
|
||||
include "Types.thrift"
|
||||
|
||||
// NOTE: This file and the includes above define the format of Impala query profiles. As
|
||||
// newer versions of Impala should be able to read profiles written by older versions,
|
||||
// some best practices must be followed when making changes to the structures below:
|
||||
//
|
||||
// - Only append new values at the end of enums.
|
||||
// - Only add new fields at the end of structures, and always make them optional.
|
||||
// - Don't remove fields.
|
||||
// - Don't change the numbering of fields.
|
||||
|
||||
// Represents the different formats a runtime profile can be represented in.
|
||||
enum TRuntimeProfileFormat {
|
||||
// Pretty printed.
|
||||
STRING,
|
||||
STRING = 0
|
||||
|
||||
// The thrift profile, serialized, compressed, and encoded. Used for the query log.
|
||||
// See RuntimeProfile::SerializeToArchiveString.
|
||||
BASE64,
|
||||
BASE64 = 1
|
||||
|
||||
// TRuntimeProfileTree.
|
||||
THRIFT
|
||||
THRIFT = 2
|
||||
}
|
||||
|
||||
// Counter data
|
||||
|
||||
@@ -22,7 +22,7 @@ include "Status.thrift"
|
||||
include "Types.thrift"
|
||||
|
||||
enum StatestoreServiceVersion {
|
||||
V1
|
||||
V1 = 0
|
||||
}
|
||||
|
||||
// Structure serialized for the topic AdmissionController::IMPALA_REQUEST_QUEUE_TOPIC.
|
||||
|
||||
@@ -15,11 +15,15 @@
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
include "ErrorCodes.thrift"
|
||||
|
||||
namespace cpp impala
|
||||
namespace java org.apache.impala.thrift
|
||||
|
||||
include "ErrorCodes.thrift"
|
||||
|
||||
// NOTE: The definitions in this file are part of the binary format of the Impala query
|
||||
// profiles. They should preserve backwards compatibility and as such some rules apply
|
||||
// when making changes. Please see RuntimeProfile.thrift for more details.
|
||||
|
||||
struct TStatus {
|
||||
1: required ErrorCodes.TErrorCode status_code
|
||||
2: list<string> error_msgs
|
||||
|
||||
@@ -18,6 +18,10 @@
|
||||
namespace cpp impala
|
||||
namespace java org.apache.impala.thrift
|
||||
|
||||
// NOTE: The definitions in this file are part of the binary format of the Impala query
|
||||
// profiles. They should preserve backwards compatibility and as such some rules apply
|
||||
// when making changes. Please see RuntimeProfile.thrift for more details.
|
||||
|
||||
typedef i64 TTimestamp
|
||||
typedef i32 TFragmentIdx
|
||||
typedef i32 TPlanNodeId
|
||||
@@ -30,31 +34,31 @@ typedef i32 TJoinTableId
|
||||
// TODO: Consider moving unrelated enums to better locations.
|
||||
|
||||
enum TPrimitiveType {
|
||||
INVALID_TYPE,
|
||||
NULL_TYPE,
|
||||
BOOLEAN,
|
||||
TINYINT,
|
||||
SMALLINT,
|
||||
INT,
|
||||
BIGINT,
|
||||
FLOAT,
|
||||
DOUBLE,
|
||||
DATE,
|
||||
DATETIME,
|
||||
TIMESTAMP,
|
||||
STRING,
|
||||
BINARY, // Unsupported
|
||||
DECIMAL,
|
||||
CHAR,
|
||||
VARCHAR,
|
||||
FIXED_UDA_INTERMEDIATE,
|
||||
INVALID_TYPE = 0
|
||||
NULL_TYPE = 1
|
||||
BOOLEAN = 2
|
||||
TINYINT = 3
|
||||
SMALLINT = 4
|
||||
INT = 5
|
||||
BIGINT = 6
|
||||
FLOAT = 7
|
||||
DOUBLE = 8
|
||||
DATE = 9
|
||||
DATETIME = 10
|
||||
TIMESTAMP = 11
|
||||
STRING = 12
|
||||
BINARY = 13
|
||||
DECIMAL = 14
|
||||
CHAR = 15
|
||||
VARCHAR = 16
|
||||
FIXED_UDA_INTERMEDIATE = 17
|
||||
}
|
||||
|
||||
enum TTypeNodeType {
|
||||
SCALAR,
|
||||
ARRAY,
|
||||
MAP,
|
||||
STRUCT
|
||||
SCALAR = 0
|
||||
ARRAY = 1
|
||||
MAP = 2
|
||||
STRUCT = 3
|
||||
}
|
||||
|
||||
struct TScalarType {
|
||||
@@ -96,42 +100,42 @@ struct TColumnType {
|
||||
}
|
||||
|
||||
enum TStmtType {
|
||||
QUERY,
|
||||
DDL, // Data definition, e.g. CREATE TABLE (includes read-only functions e.g. SHOW)
|
||||
DML, // Data modification e.g. INSERT
|
||||
EXPLAIN,
|
||||
TESTCASE, // For generating a testcase for QueryStmts.
|
||||
LOAD, // Statement type for LOAD commands
|
||||
SET,
|
||||
ADMIN_FN // Admin function, e.g. ": shutdown()".
|
||||
QUERY = 0
|
||||
DDL = 1
|
||||
DML = 2
|
||||
EXPLAIN = 3
|
||||
LOAD = 4
|
||||
SET = 5
|
||||
ADMIN_FN = 6
|
||||
TESTCASE = 7
|
||||
}
|
||||
|
||||
// Level of verboseness for "explain" output.
|
||||
enum TExplainLevel {
|
||||
MINIMAL,
|
||||
STANDARD,
|
||||
EXTENDED,
|
||||
VERBOSE
|
||||
MINIMAL = 0
|
||||
STANDARD = 1
|
||||
EXTENDED = 2
|
||||
VERBOSE = 3
|
||||
}
|
||||
|
||||
enum TRuntimeFilterMode {
|
||||
// No filters are computed in the FE or the BE.
|
||||
OFF,
|
||||
OFF = 0
|
||||
|
||||
// Only broadcast filters are computed in the BE, and are only published to the local
|
||||
// fragment.
|
||||
LOCAL,
|
||||
LOCAL = 1
|
||||
|
||||
// All fiters are computed in the BE, and are published globally.
|
||||
GLOBAL
|
||||
GLOBAL = 2
|
||||
}
|
||||
|
||||
enum TPrefetchMode {
|
||||
// No prefetching at all.
|
||||
NONE,
|
||||
NONE = 0
|
||||
|
||||
// Prefetch the hash table buckets.
|
||||
HT_BUCKET
|
||||
HT_BUCKET = 1
|
||||
}
|
||||
|
||||
// A TNetworkAddress is the standard host, port representation of a
|
||||
@@ -149,24 +153,24 @@ struct TUniqueId {
|
||||
}
|
||||
|
||||
enum TFunctionCategory {
|
||||
SCALAR,
|
||||
AGGREGATE,
|
||||
ANALYTIC
|
||||
SCALAR = 0
|
||||
AGGREGATE = 1
|
||||
ANALYTIC = 2
|
||||
}
|
||||
|
||||
enum TFunctionBinaryType {
|
||||
// Impala builtin. We can either run this interpreted or via codegen
|
||||
// depending on the query option.
|
||||
BUILTIN,
|
||||
BUILTIN = 0
|
||||
|
||||
// Java UDFs, loaded from *.jar
|
||||
JAVA,
|
||||
JAVA = 1
|
||||
|
||||
// Native-interface, precompiled UDFs loaded from *.so
|
||||
NATIVE,
|
||||
NATIVE = 2
|
||||
|
||||
// Native-interface, precompiled to IR; loaded from *.ll
|
||||
IR,
|
||||
IR = 3
|
||||
}
|
||||
|
||||
// Represents a fully qualified function name.
|
||||
|
||||
@@ -37,12 +37,12 @@ struct Query {
|
||||
typedef string LogContextId
|
||||
|
||||
enum QueryState {
|
||||
CREATED,
|
||||
INITIALIZED,
|
||||
COMPILED,
|
||||
RUNNING,
|
||||
FINISHED,
|
||||
EXCEPTION
|
||||
CREATED = 0
|
||||
INITIALIZED = 1
|
||||
COMPILED = 2
|
||||
RUNNING = 3
|
||||
FINISHED = 4
|
||||
EXCEPTION = 5
|
||||
}
|
||||
|
||||
struct QueryHandle {
|
||||
@@ -99,11 +99,11 @@ exception QueryNotFoundException {
|
||||
// Levels to use when displaying query options from Impala shell. REMOVED options should
|
||||
// not be displayed in the shell, but setting them is a warning rather than an error.
|
||||
enum TQueryOptionLevel {
|
||||
REGULAR,
|
||||
ADVANCED,
|
||||
DEVELOPMENT,
|
||||
DEPRECATED,
|
||||
REMOVED
|
||||
REGULAR = 0
|
||||
ADVANCED = 1
|
||||
DEVELOPMENT = 2
|
||||
DEPRECATED = 3
|
||||
REMOVED = 4
|
||||
}
|
||||
|
||||
/** Represents a Hadoop-style configuration variable. */
|
||||
|
||||
Reference in New Issue
Block a user