mirror of
https://github.com/apache/impala.git
synced 2025-12-30 12:02:10 -05:00
In 2.5 we added the ability to set per-pool default query options. A string of key-value pairs can be specified with a pool configuration. However, if any options fail to parse, then all the options are ignored. We want that behavior (and returning an error) when parsing the process-wide default query options on startup and when parsing the options sent from a client (e.g. in beeswax server) because an error can be returned immediately for the triggering action at that time (i.e. starting the impalad or submitting a query with the options set). This behavior is bad for the pool default query options because (a) the configuration is set by the administrator and there's nothing we can do until a query is submitted and (b) one invalid option shouldn't mean that other valid options aren't set. Change-Id: If04733b775963091b0314c65286df126fd812358 Reviewed-on: http://gerrit.cloudera.org:8080/3056 Reviewed-by: Dan Hecht <dhecht@cloudera.com> Tested-by: Internal Jenkins
259 lines
6.0 KiB
Plaintext
259 lines
6.0 KiB
Plaintext
====
|
|
---- QUERY
|
|
set
|
|
---- RESULTS: VERIFY_IS_SUBSET
|
|
'ABORT_ON_DEFAULT_LIMIT_EXCEEDED','0'
|
|
'ABORT_ON_ERROR','0'
|
|
'ALLOW_UNSUPPORTED_FORMATS','0'
|
|
'BATCH_SIZE','0'
|
|
'DEBUG_ACTION',''
|
|
'DEFAULT_ORDER_BY_LIMIT','-1'
|
|
'DISABLE_CACHED_READS','0'
|
|
'DISABLE_CODEGEN','0'
|
|
'DISABLE_OUTERMOST_TOPN','0'
|
|
'EXPLAIN_LEVEL','1'
|
|
'HBASE_CACHE_BLOCKS','0'
|
|
'HBASE_CACHING','0'
|
|
'MAX_ERRORS','0'
|
|
'MAX_IO_BUFFERS','0'
|
|
'MAX_SCAN_RANGE_LENGTH','0'
|
|
'MEM_LIMIT','0'
|
|
'NUM_NODES','0'
|
|
'NUM_SCANNER_THREADS','0'
|
|
'COMPRESSION_CODEC','NONE'
|
|
'PARQUET_FILE_SIZE','0'
|
|
'REQUEST_POOL',''
|
|
'RESERVATION_REQUEST_TIMEOUT','0'
|
|
'RM_INITIAL_MEM','0'
|
|
'SYNC_DDL','0'
|
|
'V_CPU_CORES','0'
|
|
---- TYPES
|
|
STRING, STRING
|
|
====
|
|
---- QUERY
|
|
set explain_level=3;
|
|
set;
|
|
---- RESULTS: VERIFY_IS_SUBSET
|
|
'ABORT_ON_DEFAULT_LIMIT_EXCEEDED','0'
|
|
'ABORT_ON_ERROR','0'
|
|
'ALLOW_UNSUPPORTED_FORMATS','0'
|
|
'BATCH_SIZE','0'
|
|
'DEBUG_ACTION',''
|
|
'DEFAULT_ORDER_BY_LIMIT','-1'
|
|
'DISABLE_CACHED_READS','0'
|
|
'DISABLE_CODEGEN','0'
|
|
'DISABLE_OUTERMOST_TOPN','0'
|
|
'EXPLAIN_LEVEL','3'
|
|
'HBASE_CACHE_BLOCKS','0'
|
|
'HBASE_CACHING','0'
|
|
'MAX_ERRORS','0'
|
|
'MAX_IO_BUFFERS','0'
|
|
'MAX_SCAN_RANGE_LENGTH','0'
|
|
'MEM_LIMIT','0'
|
|
'NUM_NODES','0'
|
|
'NUM_SCANNER_THREADS','0'
|
|
'COMPRESSION_CODEC','NONE'
|
|
'PARQUET_FILE_SIZE','0'
|
|
'REQUEST_POOL',''
|
|
'RESERVATION_REQUEST_TIMEOUT','0'
|
|
'RM_INITIAL_MEM','0'
|
|
'SYNC_DDL','0'
|
|
'V_CPU_CORES','0'
|
|
---- TYPES
|
|
STRING, STRING
|
|
====
|
|
---- QUERY
|
|
set explain_level='0';
|
|
set;
|
|
---- RESULTS: VERIFY_IS_SUBSET
|
|
'ABORT_ON_DEFAULT_LIMIT_EXCEEDED','0'
|
|
'ABORT_ON_ERROR','0'
|
|
'ALLOW_UNSUPPORTED_FORMATS','0'
|
|
'BATCH_SIZE','0'
|
|
'DEBUG_ACTION',''
|
|
'DEFAULT_ORDER_BY_LIMIT','-1'
|
|
'DISABLE_CACHED_READS','0'
|
|
'DISABLE_CODEGEN','0'
|
|
'DISABLE_OUTERMOST_TOPN','0'
|
|
'EXPLAIN_LEVEL','0'
|
|
'HBASE_CACHE_BLOCKS','0'
|
|
'HBASE_CACHING','0'
|
|
'MAX_ERRORS','0'
|
|
'MAX_IO_BUFFERS','0'
|
|
'MAX_SCAN_RANGE_LENGTH','0'
|
|
'MEM_LIMIT','0'
|
|
'NUM_NODES','0'
|
|
'NUM_SCANNER_THREADS','0'
|
|
'COMPRESSION_CODEC','NONE'
|
|
'PARQUET_FILE_SIZE','0'
|
|
'REQUEST_POOL',''
|
|
'RESERVATION_REQUEST_TIMEOUT','0'
|
|
'RM_INITIAL_MEM','0'
|
|
'SYNC_DDL','0'
|
|
'V_CPU_CORES','0'
|
|
---- TYPES
|
|
STRING, STRING
|
|
====
|
|
---- QUERY
|
|
# IMPALA-1906: Test that SET changes PARQUET_FILE_SIZE only if it's less than 2GB.
|
|
set parquet_file_size='1.5g';
|
|
set;
|
|
---- RESULTS: VERIFY_IS_SUBSET
|
|
'ABORT_ON_DEFAULT_LIMIT_EXCEEDED','0'
|
|
'ABORT_ON_ERROR','0'
|
|
'ALLOW_UNSUPPORTED_FORMATS','0'
|
|
'BATCH_SIZE','0'
|
|
'DEBUG_ACTION',''
|
|
'DEFAULT_ORDER_BY_LIMIT','-1'
|
|
'DISABLE_CACHED_READS','0'
|
|
'DISABLE_CODEGEN','0'
|
|
'DISABLE_OUTERMOST_TOPN','0'
|
|
'EXPLAIN_LEVEL','1'
|
|
'HBASE_CACHE_BLOCKS','0'
|
|
'HBASE_CACHING','0'
|
|
'MAX_ERRORS','0'
|
|
'MAX_IO_BUFFERS','0'
|
|
'MAX_SCAN_RANGE_LENGTH','0'
|
|
'MEM_LIMIT','0'
|
|
'NUM_NODES','0'
|
|
'NUM_SCANNER_THREADS','0'
|
|
'COMPRESSION_CODEC','NONE'
|
|
'PARQUET_FILE_SIZE','1610612736'
|
|
'REQUEST_POOL',''
|
|
'RESERVATION_REQUEST_TIMEOUT','0'
|
|
'RM_INITIAL_MEM','0'
|
|
'SYNC_DDL','0'
|
|
'V_CPU_CORES','0'
|
|
---- TYPES
|
|
STRING, STRING
|
|
====
|
|
---- QUERY
|
|
set parquet_file_size='2g'
|
|
---- CATCH
|
|
The PARQUET_FILE_SIZE query option must be less than 2GB.
|
|
====
|
|
---- QUERY
|
|
set foo=bar
|
|
---- CATCH
|
|
Invalid query option: foo
|
|
====
|
|
---- QUERY
|
|
set parquet_compression_codec=bar
|
|
---- CATCH
|
|
Invalid compression codec: bar
|
|
====
|
|
---- QUERY
|
|
# Test that SET actually does change the mem_limit.
|
|
# First, show mem_limit is not hit.
|
|
select 1
|
|
---- RESULTS
|
|
1
|
|
====
|
|
---- QUERY
|
|
# Set mem_limit really small so that queries will fail.
|
|
set mem_limit=1;
|
|
select count(string_col) from functional.alltypestiny
|
|
---- CATCH
|
|
Memory limit exceeded
|
|
====
|
|
---- QUERY
|
|
# Set mem_limit back to unlimited and query should succeed again.
|
|
set mem_limit=0;
|
|
select count(string_col) from functional.alltypestiny
|
|
---- RESULTS
|
|
8
|
|
---- TYPES
|
|
BIGINT
|
|
====
|
|
---- QUERY
|
|
# IMPALA-3334: 'optimize_partition_key_scans' is a boolean query option
|
|
set explain_level=0;
|
|
set optimize_partition_key_scans=true;
|
|
explain select min(month), max(year), ndv(day) from functional.alltypesagg;
|
|
---- RESULTS: VERIFY_IS_SUBSET
|
|
'01:AGGREGATE [FINALIZE]'
|
|
'00:UNION'
|
|
' constant-operands=11'
|
|
====
|
|
---- QUERY
|
|
set explain_level=0;
|
|
set optimize_partition_key_scans=1;
|
|
explain select min(month), max(year), ndv(day) from functional.alltypesagg;
|
|
---- RESULTS: VERIFY_IS_SUBSET
|
|
'01:AGGREGATE [FINALIZE]'
|
|
'00:UNION'
|
|
' constant-operands=11'
|
|
====
|
|
---- QUERY
|
|
set explain_level=0;
|
|
set optimize_partition_key_scans=false;
|
|
explain select min(month), max(year), ndv(day) from functional.alltypesagg;
|
|
---- RESULTS: VERIFY_IS_SUBSET
|
|
'03:AGGREGATE [FINALIZE]'
|
|
'02:EXCHANGE [UNPARTITIONED]'
|
|
'01:AGGREGATE'
|
|
'00:SCAN HDFS [functional.alltypesagg]'
|
|
====
|
|
---- QUERY
|
|
set explain_level=0;
|
|
set optimize_partition_key_scans=0;
|
|
explain select min(month), max(year), ndv(day) from functional.alltypesagg;
|
|
---- RESULTS: VERIFY_IS_SUBSET
|
|
'03:AGGREGATE [FINALIZE]'
|
|
'02:EXCHANGE [UNPARTITIONED]'
|
|
'01:AGGREGATE'
|
|
'00:SCAN HDFS [functional.alltypesagg]'
|
|
====
|
|
---- QUERY
|
|
set explain_level=0;
|
|
set disable_streaming_preaggregations=false;
|
|
explain select count(distinct double_col) from functional.alltypesagg;
|
|
---- RESULTS: VERIFY_IS_SUBSET
|
|
'06:AGGREGATE [FINALIZE]'
|
|
'05:EXCHANGE [UNPARTITIONED]'
|
|
'02:AGGREGATE'
|
|
'04:AGGREGATE'
|
|
'03:EXCHANGE [HASH(double_col)]'
|
|
'01:AGGREGATE [STREAMING]'
|
|
'00:SCAN HDFS [functional.alltypesagg]'
|
|
====
|
|
---- QUERY
|
|
set explain_level=0;
|
|
set disable_streaming_preaggregations=0;
|
|
explain select count(distinct double_col) from functional.alltypesagg;
|
|
---- RESULTS: VERIFY_IS_SUBSET
|
|
'06:AGGREGATE [FINALIZE]'
|
|
'05:EXCHANGE [UNPARTITIONED]'
|
|
'02:AGGREGATE'
|
|
'04:AGGREGATE'
|
|
'03:EXCHANGE [HASH(double_col)]'
|
|
'01:AGGREGATE [STREAMING]'
|
|
'00:SCAN HDFS [functional.alltypesagg]'
|
|
====
|
|
---- QUERY
|
|
set explain_level=0;
|
|
set disable_streaming_preaggregations=true;
|
|
explain select count(distinct double_col) from functional.alltypesagg;
|
|
---- RESULTS: VERIFY_IS_SUBSET
|
|
'06:AGGREGATE [FINALIZE]'
|
|
'05:EXCHANGE [UNPARTITIONED]'
|
|
'02:AGGREGATE'
|
|
'04:AGGREGATE'
|
|
'03:EXCHANGE [HASH(double_col)]'
|
|
'01:AGGREGATE'
|
|
'00:SCAN HDFS [functional.alltypesagg]'
|
|
====
|
|
---- QUERY
|
|
set explain_level=0;
|
|
set disable_streaming_preaggregations=1;
|
|
explain select count(distinct double_col) from functional.alltypesagg;
|
|
---- RESULTS: VERIFY_IS_SUBSET
|
|
'06:AGGREGATE [FINALIZE]'
|
|
'05:EXCHANGE [UNPARTITIONED]'
|
|
'02:AGGREGATE'
|
|
'04:AGGREGATE'
|
|
'03:EXCHANGE [HASH(double_col)]'
|
|
'01:AGGREGATE'
|
|
'00:SCAN HDFS [functional.alltypesagg]'
|
|
====
|