mirror of
https://github.com/apache/impala.git
synced 2025-12-25 02:03:09 -05:00
For storage systems that support block location information (HDFS,
Ozone) we always retrieve it with the assumption that we can use it for
scheduling, to do local reads. But it's also typical that Impala is not
co-located with the storage system, not even in on-prem deployments.
E.g. when Impala runs in containers, and even if they are co-located,
we don't try to figure out which container runs on which machine.
In such cases we should not reach out to the storage system to collect
file information because it can be very expensive for large tables and
we won't benefit from it at all. Since currently there is no easy way
to tell if Impala is co-located with the storage system this patch
adds configuration options to disable block location retrieval during
table loading.
It can be disabled globally via Hadoop Configuration:
'impala.preload-block-locations-for-scheduling': 'false'
We can restrict it to filesystem schemes, e.g.:
'impala.preload-block-locations-for-scheduling.scheme.hdfs': 'false'
When multiple storage systems are configured with the same scheme, we
can still control block location loading based on authority, e.g.:
'impala.preload-block-locations-for-scheduling.authority.mycluster': 'false'
The latter only disables block location loading for URIs like
'hdfs://mycluster/warehouse/tablespace/...'
If block location loading is disabled by any of the switches, it cannot
be re-enabled by another, i.e. the most restrictive setting prevails.
E.g:
disable scheme 'hdfs', enable authority 'mycluster'
==> hdfs://mycluster/ is still disabled
disable globally, enable scheme 'hdfs', enable authority 'mycluster'
==> hdfs://mycluster/ is still disabled, as everything else is.
Testing:
* added unit tests for FileSystemUtil
* added unit tests for the file metadata loaders
* custom cluster tests with custom Hadoop configuration
Change-Id: I1c7a6a91f657c99792db885991b7677d2c240867
Reviewed-on: http://gerrit.cloudera.org:8080/23175
Reviewed-by: Impala Public Jenkins <impala-public-jenkins@cloudera.com>
Tested-by: Impala Public Jenkins <impala-public-jenkins@cloudera.com>
51 lines
1.6 KiB
Plaintext
51 lines
1.6 KiB
Plaintext
====
|
|
---- QUERY
|
|
select * from alltypes where id < 5;
|
|
---- RESULTS
|
|
0,true,0,0,0,0,0,0,'01/01/09','0',2009-01-01 00:00:00,2009,1
|
|
1,false,1,1,1,10,1.100000023841858,10.1,'01/01/09','1',2009-01-01 00:01:00,2009,1
|
|
2,true,2,2,2,20,2.200000047683716,20.2,'01/01/09','2',2009-01-01 00:02:00.100000000,2009,1
|
|
3,false,3,3,3,30,3.299999952316284,30.3,'01/01/09','3',2009-01-01 00:03:00.300000000,2009,1
|
|
4,true,4,4,4,40,4.400000095367432,40.4,'01/01/09','4',2009-01-01 00:04:00.600000000,2009,1
|
|
---- TYPES
|
|
INT, BOOLEAN, TINYINT, SMALLINT, INT, BIGINT, FLOAT, DOUBLE, STRING, STRING, TIMESTAMP, INT, INT
|
|
====
|
|
---- QUERY
|
|
select count(*) from alltypes;
|
|
---- RESULTS
|
|
7300
|
|
---- TYPES
|
|
BIGINT
|
|
====
|
|
---- QUERY
|
|
select count(*) from alltypes where id % 3 = 0;
|
|
---- RESULTS
|
|
2434
|
|
---- TYPES
|
|
BIGINT
|
|
====
|
|
---- QUERY
|
|
# 'lineitem_sixblocks' contains a single data file with six HDFS blocks. Without
|
|
# block information we schedule the whole data file to a single SCAN operator.
|
|
select count(*) from functional_parquet.lineitem_sixblocks where l_orderkey % 2 = 0;
|
|
---- RESULTS
|
|
19929
|
|
---- TYPES
|
|
BIGINT
|
|
---- RUNTIME_PROFILE
|
|
# The following should be in the ExecSummary
|
|
row_regex: 00:SCAN [A-Z0-9]+ +1 +1 +.*
|
|
====
|
|
---- QUERY
|
|
# 'iceberg_lineitem_sixblocks' contains a single data file with six HDFS blocks. Without
|
|
# block information we schedule the whole data file to a single SCAN operator.
|
|
select count(*) from functional_parquet.iceberg_lineitem_sixblocks where l_orderkey % 2 = 0;
|
|
---- RESULTS
|
|
9805
|
|
---- TYPES
|
|
BIGINT
|
|
---- RUNTIME_PROFILE
|
|
# The following should be in the ExecSummary
|
|
row_regex: 00:SCAN [A-Z0-9]+ +1 +1 +.*
|
|
====
|