mirror of
https://github.com/apache/impala.git
synced 2025-12-19 18:12:08 -05:00
IMPALA-13548: Schedule scan ranges oldest to newest for tuple caching
Scheduling does not sort scan ranges by modification time. When a new file is added to a table, its order in the list of scan ranges is not based on modification time. Instead, it is based on which partition it belongs to and what its filename is. A new file that is added early in the list of scan ranges can cause cascading differences in scheduling. For tuple caching, this means that multiple runtime cache keys could change due to adding a single file. To minimize that disruption, this adds the ability to sort the scan ranges by modification time and schedule scan ranges oldest to newest. This enables it for scan nodes that feed into tuple cache nodes (similar to deterministic scan range assignment). Testing: - Modified TestTupleCacheFullCluster::test_scan_range_distributed to have stricter checks about how many cache keys change after an insert (only one should change) - Modified TupleCacheTest#testDeterministicScheduling to verify that oldest to newest scheduling is also enabled. Change-Id: Ia4108c7a00c6acf8bbfc036b2b76e7c02ae44d47 Reviewed-on: http://gerrit.cloudera.org:8080/23228 Reviewed-by: Michael Smith <michael.smith@cloudera.com> Tested-by: Impala Public Jenkins <impala-public-jenkins@cloudera.com>
This commit is contained in:
committed by
Impala Public Jenkins
parent
57eb5f653b
commit
e05d92cb3d
@@ -206,22 +206,44 @@ Status Scheduler::ComputeScanRangeAssignment(
|
||||
bool node_random_replica = node.__isset.hdfs_scan_node
|
||||
&& node.hdfs_scan_node.__isset.random_replica
|
||||
&& node.hdfs_scan_node.random_replica;
|
||||
bool node_schedule_oldest_to_newest = node.__isset.hdfs_scan_node
|
||||
&& node.hdfs_scan_node.__isset.schedule_scanranges_oldest_to_newest
|
||||
&& node.hdfs_scan_node.schedule_scanranges_oldest_to_newest;
|
||||
|
||||
FragmentScanRangeAssignment* assignment =
|
||||
&state->GetFragmentScheduleState(fragment.idx)->scan_range_assignment;
|
||||
|
||||
const vector<TScanRangeLocationList>* locations = nullptr;
|
||||
const vector<TScanRangeLocationList>* locations = &entry.second.concrete_ranges;
|
||||
vector<TScanRangeLocationList> expanded_locations;
|
||||
if (entry.second.split_specs.empty()) {
|
||||
// directly use the concrete ranges.
|
||||
locations = &entry.second.concrete_ranges;
|
||||
} else {
|
||||
// union concrete ranges and expanded specs.
|
||||
// Copy the ranges to a separate vector if:
|
||||
// 1. There are split specs to union with the concrete ranges
|
||||
// 2. We're scheduling oldest to newest and need to sort the ranges without
|
||||
// changing the original vector
|
||||
if (!entry.second.split_specs.empty() || node_schedule_oldest_to_newest) {
|
||||
locations = &expanded_locations;
|
||||
expanded_locations.insert(expanded_locations.end(),
|
||||
entry.second.concrete_ranges.begin(), entry.second.concrete_ranges.end());
|
||||
RETURN_IF_ERROR(
|
||||
GenerateScanRanges(entry.second.split_specs, &expanded_locations));
|
||||
locations = &expanded_locations;
|
||||
// union concrete ranges and expanded specs
|
||||
if (!entry.second.split_specs.empty()) {
|
||||
RETURN_IF_ERROR(
|
||||
GenerateScanRanges(entry.second.split_specs, &expanded_locations));
|
||||
}
|
||||
}
|
||||
if (node_schedule_oldest_to_newest) {
|
||||
DCHECK_GE(expanded_locations.size(),
|
||||
entry.second.concrete_ranges.size() + entry.second.split_specs.size());
|
||||
// This only makes sense for HDFS scan nodes
|
||||
DCHECK(node.__isset.hdfs_scan_node);
|
||||
// Sort the scan ranges by modification time ascending
|
||||
std::sort(expanded_locations.begin(), expanded_locations.end(),
|
||||
[](const TScanRangeLocationList& scanRange1,
|
||||
const TScanRangeLocationList& scanRange2) {
|
||||
DCHECK(scanRange1.scan_range.__isset.hdfs_file_split);
|
||||
const THdfsFileSplit& split1 = scanRange1.scan_range.hdfs_file_split;
|
||||
DCHECK(scanRange2.scan_range.__isset.hdfs_file_split);
|
||||
const THdfsFileSplit& split2 = scanRange2.scan_range.hdfs_file_split;
|
||||
return split1.mtime < split2.mtime;
|
||||
});
|
||||
}
|
||||
DCHECK(locations != nullptr);
|
||||
RETURN_IF_ERROR(
|
||||
|
||||
Reference in New Issue
Block a user