mirror of
https://github.com/apache/impala.git
synced 2026-01-03 06:00:52 -05:00
This is similar to the single-node execution optimisation, but applies to slightly larger queries that should run in a distributed manner but won't benefit from codegen. This adds a new query option disable_codegen_rows_threshold that defaults to 50,000. If fewer than this number of rows are processed by a plan node per impalad, the cost of codegen almost certainly outweighs the benefit. Using rows processed as a threshold is justified by a simple model that assumes the cost of codegen and execution per row for the same operation are proportional. E.g. if x is the complexity of the operation, n is the number of rows processed, C is a constant factor giving the cost of codegen and Ec/Ei are constant factor giving the cost of codegen'd and interpreted execution and d, then the cost of the codegen'd operator is C * x + Ec * x * n and the cost of the interpreted operator is Ei * x * n. Rearranging means that interpretation is cheaper if n < C / (Ei - Ec), i.e. that (at least with the simplified model) it makes sense to choose interpretation or codegen based on a constant threshold. The model also implies that it is somewhat safer to choose codegen because the additional cost of codegen is O(1) but the additional cost of interpretation is O(n). I ran some experiments with TPC-H Q1, varying the input table size, to determine what the cut-over point where codegen was beneficial was. The cutover was around 150k rows per node for both text and parquet. At 50k rows per node disabling codegen was very beneficial - around 0.12s versus 0.24s. To be somewhat conservative I set the default threshold to 50k rows. On more complex queries, e.g. TPC-H Q10, the cutover tends to be higher because there are plan nodes that process many fewer than the max rows. Fix a couple of minor issues in the frontend - the numNodes_ calculation could return 0 for Kudu, and the single node optimization didn't handle the case where for a scan node with conjuncts, a limit and missing stats correctly (it considered the estimate still valid.) Testing: Updated e2e tests that set disable_codegen to set disable_codegen_rows_threshold to 0, so that those tests run both with and without codegen still. Added an e2e test to make sure that the optimisation is applied in the backend. Added planner tests for various cases where codegen should and shouldn't be disabled. Perf: Added a targeted perf test for a join+agg over a small input, which benefits from this change. Change-Id: I273bcee58641f5b97de52c0b2caab043c914b32e Reviewed-on: http://gerrit.cloudera.org:8080/7153 Reviewed-by: Tim Armstrong <tarmstrong@cloudera.com> Tested-by: Impala Public Jenkins
153 lines
4.5 KiB
Plaintext
153 lines
4.5 KiB
Plaintext
====
|
|
---- QUERY
|
|
create table alltypes like functional_parquet.alltypes;
|
|
insert into alltypes partition(year, month)
|
|
select * from functional_parquet.alltypes where year = 2009;
|
|
====
|
|
---- QUERY
|
|
# No stats are available.
|
|
explain select id from alltypes;
|
|
---- RESULTS: VERIFY_IS_SUBSET
|
|
' stats-rows=unavailable extrapolated-rows=unavailable'
|
|
' table stats: rows=unavailable size=unavailable'
|
|
' column stats: unavailable'
|
|
' mem-estimate=16.00MB mem-reservation=0B'
|
|
' tuple-ids=0 row-size=4B cardinality=unavailable'
|
|
---- TYPES
|
|
STRING
|
|
====
|
|
---- QUERY
|
|
compute stats alltypes
|
|
---- RESULTS
|
|
'Updated 12 partition(s) and 11 column(s).'
|
|
---- TYPES
|
|
STRING
|
|
====
|
|
---- QUERY
|
|
# Stats are available now.
|
|
explain select id from alltypes;
|
|
---- RESULTS: VERIFY_IS_EQUAL
|
|
'Per-Host Resource Reservation: Memory=0B'
|
|
'Per-Host Resource Estimates: Memory=16.00MB'
|
|
'Codegen disabled by planner'
|
|
''
|
|
'F00:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1'
|
|
'PLAN-ROOT SINK'
|
|
'| mem-estimate=0B mem-reservation=0B'
|
|
'|'
|
|
'00:SCAN HDFS [$DATABASE.alltypes]'
|
|
row_regex:.*partitions=12/12 files=12 size=.*
|
|
' stats-rows=3650 extrapolated-rows=3650'
|
|
row_regex:.*table stats: rows=3650 size=.*
|
|
' column stats: all'
|
|
' mem-estimate=16.00MB mem-reservation=0B'
|
|
' tuple-ids=0 row-size=4B cardinality=3650'
|
|
---- TYPES
|
|
STRING
|
|
====
|
|
---- QUERY
|
|
# Select a subset of partitions.
|
|
explain select id from alltypes where month in (1, 2, 3);
|
|
---- RESULTS: VERIFY_IS_EQUAL
|
|
'Per-Host Resource Reservation: Memory=0B'
|
|
'Per-Host Resource Estimates: Memory=16.00MB'
|
|
'Codegen disabled by planner'
|
|
''
|
|
'F00:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1'
|
|
'PLAN-ROOT SINK'
|
|
'| mem-estimate=0B mem-reservation=0B'
|
|
'|'
|
|
'00:SCAN HDFS [$DATABASE.alltypes]'
|
|
row_regex:.*partitions=3/12 files=3 size=.*
|
|
' stats-rows=900 extrapolated-rows=904'
|
|
row_regex:.*table stats: rows=3650 size=.*
|
|
' column stats: all'
|
|
' mem-estimate=16.00MB mem-reservation=0B'
|
|
' tuple-ids=0 row-size=4B cardinality=904'
|
|
---- TYPES
|
|
STRING
|
|
====
|
|
---- QUERY
|
|
# Double the data in existing partitions.
|
|
insert into alltypes partition(year, month)
|
|
select * from functional_parquet.alltypes where year = 2009;
|
|
explain select id from alltypes;
|
|
---- RESULTS: VERIFY_IS_EQUAL
|
|
'Per-Host Resource Reservation: Memory=0B'
|
|
'Per-Host Resource Estimates: Memory=16.00MB'
|
|
''
|
|
'F00:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1'
|
|
'PLAN-ROOT SINK'
|
|
'| mem-estimate=0B mem-reservation=0B'
|
|
'|'
|
|
'00:SCAN HDFS [$DATABASE.alltypes]'
|
|
row_regex:.*partitions=12/12 files=24 size=.*
|
|
' stats-rows=3650 extrapolated-rows=7300'
|
|
row_regex:.*table stats: rows=3650 size=.*
|
|
' column stats: all'
|
|
' mem-estimate=16.00MB mem-reservation=0B'
|
|
' tuple-ids=0 row-size=4B cardinality=7300'
|
|
---- TYPES
|
|
STRING
|
|
====
|
|
---- QUERY
|
|
# Create new partitions and extrapolate their row count.
|
|
insert into alltypes partition(year, month)
|
|
select * from functional_parquet.alltypes where year = 2010;
|
|
explain select id from alltypes where year = 2010;
|
|
---- RESULTS: VERIFY_IS_EQUAL
|
|
'Per-Host Resource Reservation: Memory=0B'
|
|
'Per-Host Resource Estimates: Memory=16.00MB'
|
|
'Codegen disabled by planner'
|
|
''
|
|
'F00:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1'
|
|
'PLAN-ROOT SINK'
|
|
'| mem-estimate=0B mem-reservation=0B'
|
|
'|'
|
|
'00:SCAN HDFS [$DATABASE.alltypes]'
|
|
row_regex:.*partitions=12/24 files=12 size=.*
|
|
' stats-rows=unavailable extrapolated-rows=3651'
|
|
row_regex:.*table stats: rows=3650 size=.*
|
|
' column stats: all'
|
|
' mem-estimate=16.00MB mem-reservation=0B'
|
|
' tuple-ids=0 row-size=4B cardinality=3651'
|
|
---- TYPES
|
|
STRING
|
|
====
|
|
---- QUERY
|
|
# Compute stats and run the same query again.
|
|
compute stats alltypes;
|
|
explain select id from alltypes where year = 2010;
|
|
---- RESULTS: VERIFY_IS_EQUAL
|
|
'Per-Host Resource Reservation: Memory=0B'
|
|
'Per-Host Resource Estimates: Memory=16.00MB'
|
|
'Codegen disabled by planner'
|
|
''
|
|
'F00:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1'
|
|
'PLAN-ROOT SINK'
|
|
'| mem-estimate=0B mem-reservation=0B'
|
|
'|'
|
|
'00:SCAN HDFS [$DATABASE.alltypes]'
|
|
row_regex:.*partitions=12/24 files=12 size=.*
|
|
' stats-rows=3650 extrapolated-rows=3651'
|
|
row_regex:.*table stats: rows=10950 size=.*
|
|
' column stats: all'
|
|
' mem-estimate=16.00MB mem-reservation=0B'
|
|
' tuple-ids=0 row-size=4B cardinality=3651'
|
|
---- TYPES
|
|
STRING
|
|
====
|
|
---- QUERY
|
|
# Test that dropping stats resets everything.
|
|
drop stats alltypes;
|
|
explain select id from alltypes;
|
|
---- RESULTS: VERIFY_IS_SUBSET
|
|
' stats-rows=unavailable extrapolated-rows=unavailable'
|
|
' table stats: rows=unavailable size=unavailable'
|
|
' column stats: unavailable'
|
|
' mem-estimate=16.00MB mem-reservation=0B'
|
|
' tuple-ids=0 row-size=4B cardinality=unavailable'
|
|
---- TYPES
|
|
STRING
|
|
====
|