mirror of
https://github.com/apache/impala.git
synced 2026-01-09 15:00:11 -05:00
Allow to specify Oracle-style hint on INSERT/UPSERT statements. For example, - insert /* +noshuffle */ into table functional.alltypes partition(year, month) select * from functional.alltypes; - upsert /* +noshuffle */ into functional_kudu.alltypes select * from functional.alltypes; Testing: Add unit tests to ParserTest#TestPlanHints Add plan check tests to PlannerTest#testInsert, PlannerTest#testKuduUpsert Add tests to ToSqlTest#planHintsTest Change-Id: Ied7629d70197a0270cdc0853e00cc021fdb4dc20 Reviewed-on: http://gerrit.cloudera.org:8080/8676 Reviewed-by: Dimitris Tsirogiannis <dtsirogiannis@cloudera.com> Tested-by: Impala Public Jenkins
724 lines
26 KiB
Plaintext
724 lines
26 KiB
Plaintext
# insert into an unpartitioned table
|
|
insert into table functional.alltypesnopart
|
|
select id, bool_col, tinyint_col, smallint_col, int_col, bigint_col,
|
|
float_col, double_col, date_string_col, string_col, timestamp_col
|
|
from functional.alltypes
|
|
where year=2009 and month=05
|
|
---- PLAN
|
|
WRITE TO HDFS [functional.alltypesnopart, OVERWRITE=false]
|
|
| partitions=1
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=1/24 files=1 size=20.36KB
|
|
---- SCANRANGELOCATIONS
|
|
NODE 0:
|
|
HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2009/month=5/090501.txt 0:20853
|
|
---- DISTRIBUTEDPLAN
|
|
WRITE TO HDFS [functional.alltypesnopart, OVERWRITE=false]
|
|
| partitions=1
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=1/24 files=1 size=20.36KB
|
|
====
|
|
# insert into a static partition
|
|
insert into table functional.alltypessmall
|
|
partition (year=2009, month=04)
|
|
select id, bool_col, tinyint_col, smallint_col, int_col, bigint_col,
|
|
float_col, double_col, date_string_col, string_col, timestamp_col
|
|
from functional.alltypes
|
|
where year=2009 and month=05
|
|
---- PLAN
|
|
WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(2009,4)]
|
|
| partitions=1
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=1/24 files=1 size=20.36KB
|
|
---- SCANRANGELOCATIONS
|
|
NODE 0:
|
|
HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2009/month=5/090501.txt 0:20853
|
|
---- DISTRIBUTEDPLAN
|
|
WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(2009,4)]
|
|
| partitions=1
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=1/24 files=1 size=20.36KB
|
|
====
|
|
# overwrite a static partition
|
|
insert overwrite table functional.alltypessmall
|
|
partition (year=2009, month=04)
|
|
select id, bool_col, tinyint_col, smallint_col, int_col, bigint_col,
|
|
float_col, double_col, date_string_col, string_col, timestamp_col
|
|
from functional.alltypes
|
|
where year=2009 and month=05
|
|
---- PLAN
|
|
WRITE TO HDFS [functional.alltypessmall, OVERWRITE=true, PARTITION-KEYS=(2009,4)]
|
|
| partitions=1
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=1/24 files=1 size=20.36KB
|
|
---- SCANRANGELOCATIONS
|
|
NODE 0:
|
|
HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2009/month=5/090501.txt 0:20853
|
|
---- DISTRIBUTEDPLAN
|
|
WRITE TO HDFS [functional.alltypessmall, OVERWRITE=true, PARTITION-KEYS=(2009,4)]
|
|
| partitions=1
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=1/24 files=1 size=20.36KB
|
|
====
|
|
# insert into fully dynamic partitions
|
|
insert into table functional.alltypessmall
|
|
partition (year, month)
|
|
select id, bool_col, tinyint_col, smallint_col, int_col, bigint_col,
|
|
float_col, double_col, date_string_col, string_col, timestamp_col, year, month
|
|
from functional.alltypes
|
|
where year=2009 and month>10
|
|
---- PLAN
|
|
WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(year,month)]
|
|
| partitions=24
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=2/24 files=2 size=40.07KB
|
|
---- SCANRANGELOCATIONS
|
|
NODE 0:
|
|
HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2009/month=11/091101.txt 0:20179
|
|
HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2009/month=12/091201.txt 0:20853
|
|
---- DISTRIBUTEDPLAN
|
|
WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(year,month)]
|
|
| partitions=24
|
|
|
|
|
01:EXCHANGE [HASH(year,month)]
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=2/24 files=2 size=40.07KB
|
|
====
|
|
# insert into fully dynamic partitions. The source table has no stats and the insert
|
|
# statement has a partition clause, so hash partition before the sink.
|
|
insert into table functional.alltypessmall
|
|
partition (year, month)
|
|
select id, bool_col, tinyint_col, smallint_col, int_col, bigint_col,
|
|
float_col, double_col, date_string_col, string_col, timestamp_col, int_col, int_col
|
|
from functional_seq_snap.alltypes
|
|
where year=2009 and month>10
|
|
---- PLAN
|
|
WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(int_col,int_col)]
|
|
| partitions=unavailable
|
|
|
|
|
00:SCAN HDFS [functional_seq_snap.alltypes]
|
|
partitions=2/24 files=2 size=11.34KB
|
|
---- DISTRIBUTEDPLAN
|
|
WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(int_col,int_col)]
|
|
| partitions=unavailable
|
|
|
|
|
01:EXCHANGE [HASH(int_col,int_col)]
|
|
|
|
|
00:SCAN HDFS [functional_seq_snap.alltypes]
|
|
partitions=2/24 files=2 size=11.34KB
|
|
====
|
|
# insert into fully dynamic partitions;
|
|
# partitioned output doesn't require repartitioning
|
|
insert into table functional.alltypessmall
|
|
partition (year, month)
|
|
select min(id), min(bool_col), min(tinyint_col), min(smallint_col), min(int_col),
|
|
min(bigint_col), min(float_col), min(double_col), min(date_string_col), min(string_col),
|
|
min(timestamp_col), year, month
|
|
from functional.alltypes
|
|
where year=2009 and month>10
|
|
group by year, month
|
|
---- PLAN
|
|
WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(year,month)]
|
|
| partitions=24
|
|
|
|
|
01:AGGREGATE [FINALIZE]
|
|
| output: min(id), min(bool_col), min(tinyint_col), min(smallint_col), min(int_col), min(bigint_col), min(float_col), min(double_col), min(date_string_col), min(string_col), min(timestamp_col)
|
|
| group by: year, month
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=2/24 files=2 size=40.07KB
|
|
---- SCANRANGELOCATIONS
|
|
NODE 0:
|
|
HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2009/month=11/091101.txt 0:20179
|
|
HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2009/month=12/091201.txt 0:20853
|
|
---- DISTRIBUTEDPLAN
|
|
WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(year,month)]
|
|
| partitions=24
|
|
|
|
|
03:AGGREGATE [FINALIZE]
|
|
| output: min:merge(id), min:merge(bool_col), min:merge(tinyint_col), min:merge(smallint_col), min:merge(int_col), min:merge(bigint_col), min:merge(float_col), min:merge(double_col), min:merge(date_string_col), min:merge(string_col), min:merge(timestamp_col)
|
|
| group by: year, month
|
|
|
|
|
02:EXCHANGE [HASH(year,month)]
|
|
|
|
|
01:AGGREGATE [STREAMING]
|
|
| output: min(id), min(bool_col), min(tinyint_col), min(smallint_col), min(int_col), min(bigint_col), min(float_col), min(double_col), min(date_string_col), min(string_col), min(timestamp_col)
|
|
| group by: year, month
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=2/24 files=2 size=40.07KB
|
|
====
|
|
# insert into a partially dynamic partition
|
|
insert into table functional.alltypessmall
|
|
partition (year=2009, month)
|
|
select id, bool_col, tinyint_col, smallint_col, int_col, bigint_col,
|
|
float_col, double_col, date_string_col, string_col, timestamp_col, month
|
|
from functional.alltypes
|
|
where year=2009 and month>10
|
|
---- PLAN
|
|
WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(2009,month)]
|
|
| partitions=12
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=2/24 files=2 size=40.07KB
|
|
---- SCANRANGELOCATIONS
|
|
NODE 0:
|
|
HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2009/month=11/091101.txt 0:20179
|
|
HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2009/month=12/091201.txt 0:20853
|
|
---- DISTRIBUTEDPLAN
|
|
WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(2009,month)]
|
|
| partitions=12
|
|
|
|
|
01:EXCHANGE [HASH(month)]
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=2/24 files=2 size=40.07KB
|
|
====
|
|
# insert into a partially dynamic partition
|
|
# partitioned output doesn't require repartitioning
|
|
insert into table functional.alltypessmall
|
|
partition (year=2009, month)
|
|
select min(id), min(bool_col), min(tinyint_col), min(smallint_col), min(int_col),
|
|
min(bigint_col), min(float_col), min(double_col), min(date_string_col), min(string_col),
|
|
min(timestamp_col), month
|
|
from functional.alltypes
|
|
where year=2009 and month>10
|
|
group by month
|
|
---- PLAN
|
|
WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(2009,month)]
|
|
| partitions=12
|
|
|
|
|
01:AGGREGATE [FINALIZE]
|
|
| output: min(id), min(bool_col), min(tinyint_col), min(smallint_col), min(int_col), min(bigint_col), min(float_col), min(double_col), min(date_string_col), min(string_col), min(timestamp_col)
|
|
| group by: month
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=2/24 files=2 size=40.07KB
|
|
---- SCANRANGELOCATIONS
|
|
NODE 0:
|
|
HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2009/month=11/091101.txt 0:20179
|
|
HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2009/month=12/091201.txt 0:20853
|
|
---- DISTRIBUTEDPLAN
|
|
WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(2009,month)]
|
|
| partitions=12
|
|
|
|
|
03:AGGREGATE [FINALIZE]
|
|
| output: min:merge(id), min:merge(bool_col), min:merge(tinyint_col), min:merge(smallint_col), min:merge(int_col), min:merge(bigint_col), min:merge(float_col), min:merge(double_col), min:merge(date_string_col), min:merge(string_col), min:merge(timestamp_col)
|
|
| group by: month
|
|
|
|
|
02:EXCHANGE [HASH(month)]
|
|
|
|
|
01:AGGREGATE [STREAMING]
|
|
| output: min(id), min(bool_col), min(tinyint_col), min(smallint_col), min(int_col), min(bigint_col), min(float_col), min(double_col), min(date_string_col), min(string_col), min(timestamp_col)
|
|
| group by: month
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=2/24 files=2 size=40.07KB
|
|
====
|
|
# insert into a partially dynamic partition
|
|
insert into table functional.alltypessmall
|
|
partition (year, month=4)
|
|
select id, bool_col, tinyint_col, smallint_col, int_col, bigint_col,
|
|
float_col, double_col, date_string_col, string_col, timestamp_col, year
|
|
from functional.alltypes
|
|
where year>2009 and month=4
|
|
---- PLAN
|
|
WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(year,4)]
|
|
| partitions=2
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=1/24 files=1 size=19.71KB
|
|
---- SCANRANGELOCATIONS
|
|
NODE 0:
|
|
HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2010/month=4/100401.txt 0:20179
|
|
---- DISTRIBUTEDPLAN
|
|
WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(year,4)]
|
|
| partitions=2
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=1/24 files=1 size=19.71KB
|
|
====
|
|
# insert with limit from partitioned table.
|
|
insert into table functional.alltypesnopart
|
|
select id, bool_col, tinyint_col, smallint_col, int_col, bigint_col,
|
|
float_col, double_col, date_string_col, string_col, timestamp_col
|
|
from functional.alltypes where year=2009 and month=1 limit 10
|
|
---- PLAN
|
|
WRITE TO HDFS [functional.alltypesnopart, OVERWRITE=false]
|
|
| partitions=1
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=1/24 files=1 size=19.95KB
|
|
limit: 10
|
|
---- SCANRANGELOCATIONS
|
|
NODE 0:
|
|
HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2009/month=1/090101.txt 0:20433
|
|
---- DISTRIBUTEDPLAN
|
|
WRITE TO HDFS [functional.alltypesnopart, OVERWRITE=false]
|
|
| partitions=1
|
|
|
|
|
01:EXCHANGE [UNPARTITIONED]
|
|
| limit: 10
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=1/24 files=1 size=19.95KB
|
|
limit: 10
|
|
====
|
|
# static partition insert from a constant select
|
|
insert into table functional.alltypessmall
|
|
partition (year=2010, month=4)
|
|
select 100, false, 1, 1, 1, 10,
|
|
10.0, 10.0, "02/01/09", "1", cast("2009-02-01 00:01:00" as timestamp)
|
|
---- PLAN
|
|
WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(2010,4)]
|
|
| partitions=1
|
|
|
|
|
00:UNION
|
|
constant-operands=1
|
|
---- DISTRIBUTEDPLAN
|
|
WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(2010,4)]
|
|
| partitions=1
|
|
|
|
|
00:UNION
|
|
constant-operands=1
|
|
====
|
|
# dynamic partition insert from a constant select
|
|
insert into table functional.alltypessmall
|
|
partition (year, month)
|
|
select 100, false, 1, 1, 1, 10,
|
|
10.0, 10.0, "02/01/09", "1", cast("2009-02-01 00:01:00" as timestamp), 2010, 4
|
|
---- PLAN
|
|
WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(2010,4)]
|
|
| partitions=1
|
|
|
|
|
00:UNION
|
|
constant-operands=1
|
|
---- DISTRIBUTEDPLAN
|
|
WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(2010,4)]
|
|
| partitions=1
|
|
|
|
|
00:UNION
|
|
constant-operands=1
|
|
====
|
|
# static partition insert from values statement
|
|
insert into table functional.alltypessmall
|
|
partition (year=2010, month=4) values
|
|
(100, false, 1, 1, 1, 10, 10.0, 10.0, "02/01/09", "1", cast("2009-02-01 00:01:00" as timestamp)),
|
|
(200, true, 2, 2, 2, 20, 20.0, 20.0, "02/02/09", "2", cast("2009-02-02 00:01:00" as timestamp)),
|
|
(300, false, 3, 3, 3, 30, 30.0, 30.0, "02/03/09", "3", cast("2009-02-03 00:01:00" as timestamp))
|
|
---- PLAN
|
|
WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(2010,4)]
|
|
| partitions=1
|
|
|
|
|
00:UNION
|
|
constant-operands=3
|
|
---- DISTRIBUTEDPLAN
|
|
WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(2010,4)]
|
|
| partitions=1
|
|
|
|
|
00:UNION
|
|
constant-operands=3
|
|
====
|
|
# dynamic partition insert from values statement
|
|
insert into table functional.alltypessmall
|
|
partition (year, month) values
|
|
(100, false, 1, 1, 1, 10, 10.0, 10.0, "02/01/09", "1", cast("2009-02-01 00:01:00" as timestamp), 2010, 4),
|
|
(200, true, 2, 2, 2, 20, 20.0, 20.0, "02/02/09", "2", cast("2009-02-02 00:01:00" as timestamp), 2010, 5),
|
|
(300, false, 3, 3, 3, 30, 30.0, 30.0, "02/03/09", "3", cast("2009-02-03 00:01:00" as timestamp), 2010, 6)
|
|
---- PLAN
|
|
WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(2010,4)]
|
|
| partitions=9
|
|
|
|
|
00:UNION
|
|
constant-operands=3
|
|
---- DISTRIBUTEDPLAN
|
|
WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(2010,4)]
|
|
| partitions=9
|
|
|
|
|
00:UNION
|
|
constant-operands=3
|
|
====
|
|
# test static partition insert from a query with grouped aggregation
|
|
# we expect the insert fragment to be partitioned by the grouping exprs of the query stmt
|
|
# and not by the partition exprs of the insert stmt
|
|
insert into functional.alltypes(bigint_col, string_col) partition (year=2010, month=10)
|
|
select count(int_col), string_col from functional.alltypes
|
|
group by string_col
|
|
---- PLAN
|
|
WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(2010,10)]
|
|
| partitions=1
|
|
|
|
|
01:AGGREGATE [FINALIZE]
|
|
| output: count(int_col)
|
|
| group by: string_col
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=24/24 files=24 size=478.45KB
|
|
---- DISTRIBUTEDPLAN
|
|
WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(2010,10)]
|
|
| partitions=1
|
|
|
|
|
03:AGGREGATE [FINALIZE]
|
|
| output: count:merge(int_col)
|
|
| group by: string_col
|
|
|
|
|
02:EXCHANGE [HASH(string_col)]
|
|
|
|
|
01:AGGREGATE [STREAMING]
|
|
| output: count(int_col)
|
|
| group by: string_col
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=24/24 files=24 size=478.45KB
|
|
====
|
|
# test static partition insert from a query with distinct grouped aggregation
|
|
# we expect the insert fragment to be partitioned by the grouping exprs of the query stmt
|
|
# and not by the partition exprs of the insert stmt
|
|
insert into functional.alltypes(bigint_col, string_col) partition (year=2010, month=10)
|
|
select count(distinct int_col), string_col from functional.alltypes
|
|
group by string_col
|
|
---- PLAN
|
|
WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(2010,10)]
|
|
| partitions=1
|
|
|
|
|
02:AGGREGATE [FINALIZE]
|
|
| output: count(int_col)
|
|
| group by: string_col
|
|
|
|
|
01:AGGREGATE
|
|
| group by: string_col, int_col
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=24/24 files=24 size=478.45KB
|
|
---- DISTRIBUTEDPLAN
|
|
WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(2010,10)]
|
|
| partitions=1
|
|
|
|
|
06:AGGREGATE [FINALIZE]
|
|
| output: count:merge(int_col)
|
|
| group by: string_col
|
|
|
|
|
05:EXCHANGE [HASH(string_col)]
|
|
|
|
|
02:AGGREGATE [STREAMING]
|
|
| output: count(int_col)
|
|
| group by: string_col
|
|
|
|
|
04:AGGREGATE
|
|
| group by: string_col, int_col
|
|
|
|
|
03:EXCHANGE [HASH(string_col,int_col)]
|
|
|
|
|
01:AGGREGATE [STREAMING]
|
|
| group by: string_col, int_col
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=24/24 files=24 size=478.45KB
|
|
====
|
|
# test that the planner chooses to repartition before the table sink
|
|
# alltypes has column stats and based on the product of the NDVs of year and month
|
|
# the planner should choose to repartition before the table sink
|
|
insert into table functional.alltypes partition(year, month)
|
|
select * from functional.alltypes
|
|
---- DISTRIBUTEDPLAN
|
|
WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(functional.alltypes.year,functional.alltypes.month)]
|
|
| partitions=24
|
|
|
|
|
01:EXCHANGE [HASH(functional.alltypes.year,functional.alltypes.month)]
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=24/24 files=24 size=478.45KB
|
|
====
|
|
# test noshuffle hint to prevent repartitioning (same query as above with hint)
|
|
insert into table functional.alltypes partition(year, month) [noshuffle]
|
|
select * from functional.alltypes
|
|
---- DISTRIBUTEDPLAN
|
|
WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(functional.alltypes.year,functional.alltypes.month)]
|
|
| partitions=24
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=24/24 files=24 size=478.45KB
|
|
====
|
|
# same as above but with traditional commented hint at default hint location
|
|
insert into table functional.alltypes partition(year, month) /* +noshuffle */
|
|
select * from functional.alltypes
|
|
---- DISTRIBUTEDPLAN
|
|
WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(functional.alltypes.year,functional.alltypes.month)]
|
|
| partitions=24
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=24/24 files=24 size=478.45KB
|
|
====
|
|
# same as above but with traditional commented hint at Oracle hint location
|
|
insert /* +noshuffle */ into table functional.alltypes partition(year, month)
|
|
select * from functional.alltypes
|
|
---- DISTRIBUTEDPLAN
|
|
WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(functional.alltypes.year,functional.alltypes.month)]
|
|
| partitions=24
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=24/24 files=24 size=478.45KB
|
|
====
|
|
# same as above but with enf-of-line commented hint
|
|
insert into table functional.alltypes partition(year, month)
|
|
-- +noshuffle
|
|
select * from functional.alltypes
|
|
---- DISTRIBUTEDPLAN
|
|
WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(functional.alltypes.year,functional.alltypes.month)]
|
|
| partitions=24
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=24/24 files=24 size=478.45KB
|
|
====
|
|
# test that the planner does not repartition before the table sink
|
|
# alltypes has column stats and since year only has 2 distinct values the planner
|
|
# should choose not to repartition before the table sink
|
|
insert into table functional.alltypes partition(year, month=1)
|
|
select id, bool_col, tinyint_col, smallint_col, int_col, bigint_col,
|
|
float_col, double_col, date_string_col, string_col, timestamp_col, year
|
|
from functional.alltypes
|
|
---- DISTRIBUTEDPLAN
|
|
WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(year,1)]
|
|
| partitions=2
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=24/24 files=24 size=478.45KB
|
|
====
|
|
# test shuffle hint to force repartitioning (same query as above with hint)
|
|
insert into table functional.alltypes partition(year, month=1) [shuffle]
|
|
select id, bool_col, tinyint_col, smallint_col, int_col, bigint_col,
|
|
float_col, double_col, date_string_col, string_col, timestamp_col, year
|
|
from functional.alltypes
|
|
---- DISTRIBUTEDPLAN
|
|
WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(year,1)]
|
|
| partitions=2
|
|
|
|
|
01:EXCHANGE [HASH(year)]
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=24/24 files=24 size=478.45KB
|
|
====
|
|
# test insert/select stmt that contains an analytic function (IMPALA-1400)
|
|
insert into table functional.alltypestiny partition(year=2009, month=1)
|
|
select id, bool_col, tinyint_col, smallint_col, int_col, bigint_col,
|
|
float_col, double_col, date_string_col, string_col,
|
|
lag(timestamp_col, 1) over (partition by id order by id) as timestamp_col
|
|
from functional.alltypestiny
|
|
---- PLAN
|
|
WRITE TO HDFS [functional.alltypestiny, OVERWRITE=false, PARTITION-KEYS=(2009,1)]
|
|
| partitions=1
|
|
|
|
|
02:ANALYTIC
|
|
| functions: lag(timestamp_col, 1, NULL)
|
|
| partition by: id
|
|
| order by: id ASC
|
|
| window: ROWS BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING
|
|
|
|
|
01:SORT
|
|
| order by: id ASC NULLS FIRST, id ASC
|
|
|
|
|
00:SCAN HDFS [functional.alltypestiny]
|
|
partitions=4/4 files=4 size=460B
|
|
====
|
|
# IMPALA-3930: Test insert with shuffle hint on constant partition exprs. The table sink
|
|
# is executed at the coordinator.
|
|
insert into table functional.alltypes partition(year=2009, month=1) /* +shuffle */
|
|
select id, bool_col, tinyint_col, smallint_col, int_col, bigint_col,
|
|
float_col, double_col, date_string_col, string_col, timestamp_col
|
|
from functional.alltypes
|
|
---- DISTRIBUTEDPLAN
|
|
WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(2009,1)]
|
|
| partitions=1
|
|
|
|
|
01:EXCHANGE [UNPARTITIONED]
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=24/24 files=24 size=478.45KB
|
|
====
|
|
# IMPALA-3930: Same as above but with a dynamic partition insert.
|
|
insert into table functional.alltypes partition(year, month) /* +shuffle */
|
|
select id, bool_col, tinyint_col, smallint_col, int_col, bigint_col,
|
|
float_col, double_col, date_string_col, string_col, timestamp_col, 2009, 1
|
|
from functional.alltypes
|
|
---- DISTRIBUTEDPLAN
|
|
WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(2009,1)]
|
|
| partitions=1
|
|
|
|
|
01:EXCHANGE [UNPARTITIONED]
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=24/24 files=24 size=478.45KB
|
|
====
|
|
# IMPALA-3930: Same as above but with a mix of static/dynamic partition exprs, and
|
|
# with more complex constant exprs.
|
|
insert into table functional.alltypes partition(year, month=cast(10/2 as int)) /* +shuffle */
|
|
select id, bool_col, tinyint_col, smallint_col, int_col, bigint_col,
|
|
float_col, double_col, date_string_col, string_col, timestamp_col, cast(concat("2", "010") as smallint) - 1
|
|
from functional.alltypes
|
|
---- DISTRIBUTEDPLAN
|
|
WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(2009,5)]
|
|
| partitions=1
|
|
|
|
|
01:EXCHANGE [UNPARTITIONED]
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=24/24 files=24 size=478.45KB
|
|
====
|
|
# Test insert into an unpartitioned table with shuffle hint.
|
|
insert into table functional.alltypesnopart /* +shuffle */
|
|
select id, bool_col, tinyint_col, smallint_col, int_col, bigint_col,
|
|
float_col, double_col, date_string_col, string_col, timestamp_col
|
|
from functional.alltypes
|
|
---- DISTRIBUTEDPLAN
|
|
WRITE TO HDFS [functional.alltypesnopart, OVERWRITE=false]
|
|
| partitions=1
|
|
|
|
|
01:EXCHANGE [UNPARTITIONED]
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=24/24 files=24 size=478.45KB
|
|
====
|
|
# IMPALA-2521: clustered insert into partitioned table adds sort node.
|
|
insert into table functional.alltypes partition(year, month) /*+ clustered */
|
|
select * from functional.alltypes
|
|
---- PLAN
|
|
WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(year,month)]
|
|
| partitions=24
|
|
|
|
|
01:SORT
|
|
| order by: year ASC NULLS LAST, month ASC NULLS LAST
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=24/24 files=24 size=478.45KB
|
|
---- DISTRIBUTEDPLAN
|
|
WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(year,month)]
|
|
| partitions=24
|
|
|
|
|
02:SORT
|
|
| order by: year ASC NULLS LAST, month ASC NULLS LAST
|
|
|
|
|
01:EXCHANGE [HASH(functional.alltypes.year,functional.alltypes.month)]
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=24/24 files=24 size=478.45KB
|
|
====
|
|
# IMPALA-2521: clustered insert into partitioned table adds sort node.
|
|
insert into table functional.alltypes partition(year, month) /*+ clustered,noshuffle */
|
|
select * from functional.alltypes
|
|
---- PLAN
|
|
WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(year,month)]
|
|
| partitions=24
|
|
|
|
|
01:SORT
|
|
| order by: year ASC NULLS LAST, month ASC NULLS LAST
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=24/24 files=24 size=478.45KB
|
|
---- DISTRIBUTEDPLAN
|
|
WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(year,month)]
|
|
| partitions=24
|
|
|
|
|
01:SORT
|
|
| order by: year ASC NULLS LAST, month ASC NULLS LAST
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=24/24 files=24 size=478.45KB
|
|
====
|
|
# IMPALA-2521: clustered insert into partitioned table adds sort node. Subquery in
|
|
# WHERE-clause exercises the reset() + analyze() path during rewrite.
|
|
insert into table functional.alltypes partition(year, month) /*+ clustered */
|
|
select * from functional.alltypes
|
|
where int_col = (select max(int_col) from functional.alltypes)
|
|
---- PLAN
|
|
WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(year,month)]
|
|
| partitions=24
|
|
|
|
|
04:SORT
|
|
| order by: year ASC NULLS LAST, month ASC NULLS LAST
|
|
|
|
|
03:HASH JOIN [LEFT SEMI JOIN]
|
|
| hash predicates: int_col = max(int_col)
|
|
| runtime filters: RF000 <- max(int_col)
|
|
|
|
|
|--02:AGGREGATE [FINALIZE]
|
|
| | output: max(int_col)
|
|
| |
|
|
| 01:SCAN HDFS [functional.alltypes]
|
|
| partitions=24/24 files=24 size=478.45KB
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=24/24 files=24 size=478.45KB
|
|
runtime filters: RF000 -> int_col
|
|
---- DISTRIBUTEDPLAN
|
|
WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(year,month)]
|
|
| partitions=24
|
|
|
|
|
08:SORT
|
|
| order by: year ASC NULLS LAST, month ASC NULLS LAST
|
|
|
|
|
07:EXCHANGE [HASH(functional.alltypes.year,functional.alltypes.month)]
|
|
|
|
|
03:HASH JOIN [LEFT SEMI JOIN, BROADCAST]
|
|
| hash predicates: int_col = max(int_col)
|
|
| runtime filters: RF000 <- max(int_col)
|
|
|
|
|
|--06:EXCHANGE [BROADCAST]
|
|
| |
|
|
| 05:AGGREGATE [FINALIZE]
|
|
| | output: max:merge(int_col)
|
|
| |
|
|
| 04:EXCHANGE [UNPARTITIONED]
|
|
| |
|
|
| 02:AGGREGATE
|
|
| | output: max(int_col)
|
|
| |
|
|
| 01:SCAN HDFS [functional.alltypes]
|
|
| partitions=24/24 files=24 size=478.45KB
|
|
|
|
|
00:SCAN HDFS [functional.alltypes]
|
|
partitions=24/24 files=24 size=478.45KB
|
|
runtime filters: RF000 -> int_col
|
|
====
|
|
# IMPALA-2521: clustered insert into non-partitioned table does not add sort node.
|
|
insert into table functional.alltypesnopart /*+ clustered */
|
|
select * from functional.alltypesnopart
|
|
---- PLAN
|
|
WRITE TO HDFS [functional.alltypesnopart, OVERWRITE=false]
|
|
| partitions=1
|
|
|
|
|
00:SCAN HDFS [functional.alltypesnopart]
|
|
partitions=1/1 files=0 size=0B
|
|
---- DISTRIBUTEDPLAN
|
|
WRITE TO HDFS [functional.alltypesnopart, OVERWRITE=false]
|
|
| partitions=1
|
|
|
|
|
00:SCAN HDFS [functional.alltypesnopart]
|
|
partitions=1/1 files=0 size=0B
|
|
====
|
|
# IMPALA-2521: clustered insert into non-partitioned table does not add sort node.
|
|
insert into table functional.alltypesnopart /*+ clustered,shuffle */
|
|
select * from functional.alltypesnopart
|
|
---- PLAN
|
|
WRITE TO HDFS [functional.alltypesnopart, OVERWRITE=false]
|
|
| partitions=1
|
|
|
|
|
00:SCAN HDFS [functional.alltypesnopart]
|
|
partitions=1/1 files=0 size=0B
|
|
---- DISTRIBUTEDPLAN
|
|
WRITE TO HDFS [functional.alltypesnopart, OVERWRITE=false]
|
|
| partitions=1
|
|
|
|
|
01:EXCHANGE [UNPARTITIONED]
|
|
|
|
|
00:SCAN HDFS [functional.alltypesnopart]
|
|
partitions=1/1 files=0 size=0B
|
|
====
|