mirror of
https://github.com/apache/impala.git
synced 2026-01-28 09:03:52 -05:00
During serialization of a row batch header, a tuple_data_ is created
which will hold the compressed tuple data for an outbound row batch.
We would like this tuple data to be trackable as it is responsible for
a significant portion of untrackable memory from the krpc data stream
sender. By using MemTrackerAllocator, we can allocate tuple data and
compression scratch and account for it in the memory tracker of the
KrpcDataStreamSender. This solution replaces the type for tuple data
and compression scratch from std::string to TrackedString, an
std:basic_string with MemTrackerAllocator as the custom allocator.
This patch adds memory estimation in DataStreamSink.java to account
for OutboundRowBatch memory allocation. This patch also removes the
thrift-based serialization because the thrift RPC has been removed
in the prior commit.
Testing:
- Passed core tests.
- Ran a single node benchmark which shows no regression.
- Updated row-batch-serialize-test and row-batch-serialize-benchmark
to test the row-batch serialization used by KRPC.
- Manually collected query-profile, heap growth, and memory usage log
showing untracked memory decreased by 1/2.
- Add test_datastream_sender.py to verify the peak memory of EXCHANGE
SENDER node.
- Raise mem_limit in two of test_spilling_large_rows test case.
- Print test line number in PlannerTestBase.java
New row-batch serialization benchmark:
Machine Info: Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz
serialize: 10% 50% 90% 10% 50% 90%
(rel) (rel) (rel)
-------------------------------------------------------------
ser_no_dups_base 18.6 18.8 18.9 1X 1X 1X
ser_no_dups 18.5 18.5 18.8 0.998X 0.988X 0.991X
ser_no_dups_full 14.7 14.8 14.8 0.793X 0.79X 0.783X
ser_adj_dups_base 28.2 28.4 28.8 1X 1X 1X
ser_adj_dups 68.9 69.1 69.8 2.44X 2.43X 2.43X
ser_adj_dups_full 56.2 56.7 57.1 1.99X 2X 1.99X
ser_dups_base 20.7 20.9 20.9 1X 1X 1X
ser_dups 20.6 20.8 20.9 0.994X 0.995X 1X
ser_dups_full 39.8 40 40.5 1.93X 1.92X 1.94X
deserialize: 10% 50% 90% 10% 50% 90%
(rel) (rel) (rel)
-------------------------------------------------------------
deser_no_dups_base 75.9 76.6 77 1X 1X 1X
deser_no_dups 74.9 75.6 76 0.987X 0.987X 0.987X
deser_adj_dups_base 127 128 129 1X 1X 1X
deser_adj_dups 179 193 195 1.41X 1.51X 1.51X
deser_dups_base 128 128 129 1X 1X 1X
deser_dups 165 190 193 1.29X 1.48X 1.49X
Change-Id: I2ba2b907ce4f275a7a1fb8cf75453c7003eb4b82
Reviewed-on: http://gerrit.cloudera.org:8080/18798
Reviewed-by: Impala Public Jenkins <impala-public-jenkins@cloudera.com>
Tested-by: Impala Public Jenkins <impala-public-jenkins@cloudera.com>
97 lines
5.0 KiB
Plaintext
97 lines
5.0 KiB
Plaintext
====
|
|
---- QUERY
|
|
# Explain a simple hash join query.
|
|
explain
|
|
select *
|
|
from tpch.lineitem join tpch.orders on l_orderkey = o_orderkey;
|
|
---- RESULTS: VERIFY_IS_EQUAL
|
|
row_regex:.*Max Per-Host Resource Reservation: Memory=[0-9.]*MB Threads=[0-9]*.*
|
|
row_regex:.*Per-Host Resource Estimates: Memory=[0-9.]*MB.*
|
|
'Analyzed query: SELECT * FROM tpch.lineitem INNER JOIN tpch.orders ON l_orderkey'
|
|
'= o_orderkey'
|
|
''
|
|
'F02:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1'
|
|
row_regex:.*Per-Host Resources: mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=1
|
|
'PLAN-ROOT SINK'
|
|
'| output exprs: tpch.lineitem.l_orderkey, tpch.lineitem.l_partkey, tpch.lineitem.l_suppkey, tpch.lineitem.l_linenumber, tpch.lineitem.l_quantity, tpch.lineitem.l_extendedprice, tpch.lineitem.l_discount, tpch.lineitem.l_tax, tpch.lineitem.l_returnflag, tpch.lineitem.l_linestatus, tpch.lineitem.l_shipdate, tpch.lineitem.l_commitdate, tpch.lineitem.l_receiptdate, tpch.lineitem.l_shipinstruct, tpch.lineitem.l_shipmode, tpch.lineitem.l_comment, tpch.orders.o_orderkey, tpch.orders.o_custkey, tpch.orders.o_orderstatus, tpch.orders.o_totalprice, tpch.orders.o_orderdate, tpch.orders.o_orderpriority, tpch.orders.o_clerk, tpch.orders.o_shippriority, tpch.orders.o_comment'
|
|
row_regex:.*mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B spill-buffer=[0-9.]*MB thread-reservation=0
|
|
'|'
|
|
'04:EXCHANGE [UNPARTITIONED]'
|
|
row_regex:.*mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=0
|
|
'| tuple-ids=0,1 row-size=402B cardinality=5.76M'
|
|
'| in pipelines: 00(GETNEXT)'
|
|
'|'
|
|
'F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3'
|
|
row_regex:.*Per-Host Resources: mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=.*
|
|
'02:HASH JOIN [INNER JOIN, BROADCAST]'
|
|
'| hash predicates: l_orderkey = o_orderkey'
|
|
'| fk/pk conjuncts: l_orderkey = o_orderkey'
|
|
'| runtime filters: RF000[bloom] <- o_orderkey'
|
|
row_regex:.*mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B spill-buffer=[0-9.]*MB thread-reservation=0.*
|
|
'| tuple-ids=0,1 row-size=402B cardinality=5.76M'
|
|
'| in pipelines: 00(GETNEXT), 01(OPEN)'
|
|
'|'
|
|
'|--03:EXCHANGE [BROADCAST]'
|
|
row_regex:.*mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=0
|
|
'| | tuple-ids=1 row-size=171B cardinality=1.50M'
|
|
'| | in pipelines: 01(GETNEXT)'
|
|
'| |'
|
|
'| F01:PLAN FRAGMENT [RANDOM] hosts=2 instances=2'
|
|
row_regex:.*Per-Host Resources: mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=.*
|
|
'| 01:SCAN HDFS [tpch.orders, RANDOM]'
|
|
row_regex:.*partitions=1/1 files=1 size=.*
|
|
'| stored statistics:'
|
|
row_regex:.*table: rows=[0-9.]*[A-Z]* size=.*
|
|
'| columns: all'
|
|
row_regex:.*extrapolated-rows=disabled max-scan-range-rows=[0-9]*.*
|
|
row_regex:.*mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=1.*
|
|
'| tuple-ids=1 row-size=171B cardinality=1.50M'
|
|
'| in pipelines: 01(GETNEXT)'
|
|
'|'
|
|
'00:SCAN HDFS [tpch.lineitem, RANDOM]'
|
|
row_regex:.*partitions=1/1 files=1 size=.*
|
|
' runtime filters: RF000[bloom] -> l_orderkey'
|
|
' stored statistics:'
|
|
row_regex:.*table: rows=[0-9.]*[A-Z]* size=.*
|
|
' columns: all'
|
|
row_regex:.*extrapolated-rows=disabled max-scan-range-rows=[0-9]*.*
|
|
row_regex:.*mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=1.*
|
|
' tuple-ids=0 row-size=231B cardinality=6.00M'
|
|
' in pipelines: 00(GETNEXT)'
|
|
====
|
|
---- QUERY
|
|
# Tests the warning about missing table stats in the explain header.
|
|
explain select count(t1.int_col), avg(t2.float_col), sum(t3.bigint_col)
|
|
from functional_avro.alltypes t1
|
|
inner join functional_parquet.alltypessmall t2 on (t1.id = t2.id)
|
|
left outer join functional_avro.alltypes t3 on (t2.id = t3.id)
|
|
where t1.month = 1 and t2.year = 2009 and t3.bool_col = false
|
|
---- RESULTS: VERIFY_IS_SUBSET
|
|
'Per-Host Resource Estimates: Memory=55MB'
|
|
'WARNING: The following tables are missing relevant table and/or column statistics.'
|
|
'functional_avro.alltypes, functional_parquet.alltypessmall'
|
|
====
|
|
---- QUERY
|
|
# Tests the warning about missing table stats in the explain header.
|
|
# Disable the estimation of cardinality for an hdfs table withot stats.
|
|
set DISABLE_HDFS_NUM_ROWS_ESTIMATE=1;
|
|
explain select count(t1.int_col), avg(t2.float_col), sum(t3.bigint_col)
|
|
from functional_avro.alltypes t1
|
|
inner join functional_parquet.alltypessmall t2 on (t1.id = t2.id)
|
|
left outer join functional_avro.alltypes t3 on (t2.id = t3.id)
|
|
where t1.month = 1 and t2.year = 2009 and t3.bool_col = false
|
|
---- RESULTS: VERIFY_IS_SUBSET
|
|
'Per-Host Resource Estimates: Memory=4.05GB'
|
|
'WARNING: The following tables are missing relevant table and/or column statistics.'
|
|
'functional_avro.alltypes, functional_parquet.alltypessmall'
|
|
====
|
|
---- QUERY
|
|
# Tests the set operation statement rewrite
|
|
explain select year, month from functional.alltypes
|
|
intersect
|
|
select year, month from functional.alltypes where year=2009;
|
|
---- RESULTS: VERIFY_IS_SUBSET
|
|
'Per-Host Resources: mem-estimate=13.98MB mem-reservation=5.88MB thread-reservation=1 runtime-filters-memory=2.00MB'
|
|
'04:HASH JOIN [LEFT SEMI JOIN, PARTITIONED]'
|
|
====
|