Files
impala/testdata/workloads/functional-query/queries/QueryTest/explain-level2.test
Joe McDonnell 825900fa6c IMPALA-13057: Incorporate tuple/slot information into tuple cache key
The tuple cache keys currently do not include information about
the tuples or slots, as that information is stored outside
the PlanNode thrift structures. The tuple/slot information is
critical to determining which columns are referenced and what
data layout the result tuple has. This adds code to incorporate
the TupleDescriptors and SlotDescriptors into the cache key.

Since the tuple and slot ids are indexes into a global structure
(the descriptor table), they hinder cache key matches across
different queries. If a query has an extra filter, it can shift
all the slot ids. If the query has an extra join, it can
shift all the tuple ids. To eliminate this effect, this adds the
ability to translate tuple and slot ids from global indices to
local indices. The translation only contains information from the
subtree below that point, so it is not influenced by unrelated
parts of the query.

When the code registers a tuple with the TupleCacheInfo, it also
registers a translation from the global index to a local index.
Any code that puts SlotIds or TupleIds into a Thrift data structure
can use the translateTupleId() and translateSlotId() functions to
get the local index. These are exposed on ThriftSerializationCtx
by functions of the same name, but those functions apply the
translation only when working for the tuple cache.

This passes the ThriftSerializationCtx into Exprs that have
TupleIds or SlotIds and applies the translation. It also passes
the ThriftSerializationCtx into PlanNode::toThrift(), which is
used to translate TupleIds in HdfsScanNode.

This also adds a way to register a table with the tuple cache
and incorporate information about it. This allows us to mask
out additional fields in PlanNode and enable a test case that
relies on matching with different table aliases.

Testing:
 - This fixes some commented out test cases in TupleCacheTest
   (specifically telling columns apart)
 - This adds new test cases that match due to id translation
   (extra filters, extra joins)
 - This adds a unit test for the id translation to
   TupleCacheInfoTest

Change-Id: I7f5278e9dbb976cbebdc6a21a6e66bc90ce06c6c
Reviewed-on: http://gerrit.cloudera.org:8080/21398
Reviewed-by: Joe McDonnell <joemcdonnell@cloudera.com>
Tested-by: Impala Public Jenkins <impala-public-jenkins@cloudera.com>
2024-05-31 00:12:03 +00:00

105 lines
5.3 KiB
Plaintext

====
---- QUERY
# Explain a simple hash join query.
explain
select *
from tpch.lineitem join tpch.orders on l_orderkey = o_orderkey;
---- RESULTS: VERIFY_IS_EQUAL
row_regex:.*Max Per-Host Resource Reservation: Memory=[0-9.]*MB Threads=[0-9]*.*
row_regex:.*Per-Host Resource Estimates: Memory=[0-9.]*MB.*
'Analyzed query: SELECT * FROM tpch.lineitem INNER JOIN tpch.orders ON l_orderkey'
'= o_orderkey'
''
'F02:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1'
row_regex:.*Per-Host Resources: mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=1
'PLAN-ROOT SINK'
'| output exprs: tpch.lineitem.l_orderkey, tpch.lineitem.l_partkey, tpch.lineitem.l_suppkey, tpch.lineitem.l_linenumber, tpch.lineitem.l_quantity, tpch.lineitem.l_extendedprice, tpch.lineitem.l_discount, tpch.lineitem.l_tax, tpch.lineitem.l_returnflag, tpch.lineitem.l_linestatus, tpch.lineitem.l_shipdate, tpch.lineitem.l_commitdate, tpch.lineitem.l_receiptdate, tpch.lineitem.l_shipinstruct, tpch.lineitem.l_shipmode, tpch.lineitem.l_comment, tpch.orders.o_orderkey, tpch.orders.o_custkey, tpch.orders.o_orderstatus, tpch.orders.o_totalprice, tpch.orders.o_orderdate, tpch.orders.o_orderpriority, tpch.orders.o_clerk, tpch.orders.o_shippriority, tpch.orders.o_comment'
row_regex:.*mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B spill-buffer=[0-9.]*MB thread-reservation=0
'|'
'04:EXCHANGE [UNPARTITIONED]'
row_regex:.*mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=0
'| tuple-ids=0,1 row-size=402B cardinality=5.76M'
'| in pipelines: 00(GETNEXT)'
'|'
'F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3'
row_regex:.*Per-Host Resources: mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=.*
'02:HASH JOIN [INNER JOIN, BROADCAST]'
'| hash predicates: l_orderkey = o_orderkey'
'| fk/pk conjuncts: l_orderkey = o_orderkey'
'| runtime filters: RF000[bloom] <- o_orderkey'
row_regex:.*mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B spill-buffer=[0-9.]*MB thread-reservation=0.*
'| tuple-ids=0,1 row-size=402B cardinality=5.76M'
'| in pipelines: 00(GETNEXT), 01(OPEN)'
'|'
'|--03:EXCHANGE [BROADCAST]'
row_regex:.*mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=0
'| | tuple-ids=1 row-size=171B cardinality=1.50M'
'| | in pipelines: 01(GETNEXT)'
'| |'
row_regex:.*F01:PLAN FRAGMENT \[RANDOM\] hosts=[1-2] instances=[1-2]
row_regex:.*Per-Host Resources: mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=.*
'| 01:SCAN $FILESYSTEM_NAME [tpch.orders, RANDOM]'
row_regex:.*partitions=1/1 files=1 size=.*
'| stored statistics:'
row_regex:.*table: rows=[0-9.]*[A-Z]* size=.*
'| columns: all'
row_regex:.*extrapolated-rows=disabled max-scan-range-rows=[0-9]*.*
row_regex:.*mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=1.*
'| tuple-ids=1 row-size=171B cardinality=1.50M'
'| in pipelines: 01(GETNEXT)'
'|'
'00:SCAN $FILESYSTEM_NAME [tpch.lineitem, RANDOM]'
row_regex:.*partitions=1/1 files=1 size=.*
' runtime filters: RF000[bloom] -> l_orderkey'
' stored statistics:'
row_regex:.*table: rows=[0-9.]*[A-Z]* size=.*
' columns: all'
row_regex:.*extrapolated-rows=disabled max-scan-range-rows=[0-9]*.*
row_regex:.*mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=1.*
' tuple-ids=0 row-size=231B cardinality=6.00M'
' in pipelines: 00(GETNEXT)'
====
---- QUERY
# Tests the warning about missing table stats in the explain header.
explain select count(t1.int_col), avg(t2.float_col), sum(t3.bigint_col)
from functional_avro.alltypes t1
inner join functional_parquet.alltypessmall t2 on (t1.id = t2.id)
left outer join functional_avro.alltypes t3 on (t2.id = t3.id)
where t1.month = 1 and t2.year = 2009 and t3.bool_col = false
---- RESULTS: VERIFY_IS_SUBSET
'Per-Host Resource Estimates: Memory=55MB'
'WARNING: The following tables are missing relevant table and/or column statistics.'
'functional_avro.alltypes, functional_parquet.alltypessmall'
====
---- QUERY
# Tests the warning about missing table stats in the explain header.
# Disable the estimation of cardinality for an hdfs table withot stats.
set DISABLE_HDFS_NUM_ROWS_ESTIMATE=1;
explain select count(t1.int_col), avg(t2.float_col), sum(t3.bigint_col)
from functional_avro.alltypes t1
inner join functional_parquet.alltypessmall t2 on (t1.id = t2.id)
left outer join functional_avro.alltypes t3 on (t2.id = t3.id)
where t1.month = 1 and t2.year = 2009 and t3.bool_col = false
---- RESULTS: VERIFY_IS_SUBSET
'Per-Host Resource Estimates: Memory=4.05GB'
'WARNING: The following tables are missing relevant table and/or column statistics.'
'functional_avro.alltypes, functional_parquet.alltypessmall'
====
---- QUERY
# Tests the set operation statement rewrite
explain select year, month from functional.alltypes
intersect
select year, month from functional.alltypes where year=2009;
---- RESULTS: VERIFY_IS_SUBSET
'Per-Host Resources: mem-estimate=13.98MB mem-reservation=5.88MB thread-reservation=1 runtime-filters-memory=2.00MB'
'04:HASH JOIN [LEFT SEMI JOIN, PARTITIONED]'
====
---- QUERY
# Tests for cache key and trace with ENABLE_TUPLE_CACHE=TRUE
set ENABLE_TUPLE_CACHE=TRUE;
explain select count(*) from tpch.region
---- RESULTS: VERIFY_IS_SUBSET
row_regex:.* cache key: [0-9a-f][0-9a-f]*.*
row_regex:.*\[.*TPlanNode\(.*\]
====