mirror of
https://github.com/apache/impala.git
synced 2026-01-07 00:02:28 -05:00
IMPALA-5903: Inconsistent specification of result set and result set metadata
Before this commit it was quite random which DDL oprations returned a result set and which didn't. With this commit, every DDL operations return a summary of its execution. They declare their result set schema in Frontend.java, and provide the summary in CalatogOpExecutor.java. Updated the tests according to the new behavior. Change-Id: Ic542fb8e49e850052416ac663ee329ee3974e3b9 Reviewed-on: http://gerrit.cloudera.org:8080/9090 Reviewed-by: Alex Behm <alex.behm@cloudera.com> Tested-by: Impala Public Jenkins <impala-public-jenkins@cloudera.com>
This commit is contained in:
committed by
Impala Public Jenkins
parent
27c028f057
commit
2ee914d5b3
@@ -4,6 +4,7 @@
|
||||
create external table t1 (i int)
|
||||
location '$FILESYSTEM_PREFIX/test-warehouse/$DATABASE.db/t1_tmp1'
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
describe t1
|
||||
@@ -16,6 +17,9 @@ string,string,string
|
||||
# Add some columns
|
||||
alter table t1 add columns (t tinyint, s string comment 'Str Col')
|
||||
---- RESULTS
|
||||
'New column(s) have been added to the table.'
|
||||
---- TYPES
|
||||
string
|
||||
====
|
||||
---- QUERY
|
||||
describe t1
|
||||
@@ -29,6 +33,7 @@ string,string,string
|
||||
---- QUERY
|
||||
alter table t1 rename to t2
|
||||
---- RESULTS
|
||||
'Renaming was successful.'
|
||||
====
|
||||
---- QUERY
|
||||
show tables
|
||||
@@ -42,6 +47,7 @@ string
|
||||
# Move the table to a different database
|
||||
alter table t2 rename to $DATABASE2.t1_inotherdb
|
||||
---- RESULTS
|
||||
'Renaming was successful.'
|
||||
====
|
||||
---- QUERY
|
||||
# No longer appears in this database
|
||||
@@ -63,6 +69,7 @@ string
|
||||
# Move the table back to this database
|
||||
alter table $DATABASE2.t1_inotherdb rename to t2
|
||||
---- RESULTS
|
||||
'Renaming was successful.'
|
||||
====
|
||||
---- QUERY
|
||||
# make sure the new table shows the same columns as the old table
|
||||
@@ -77,6 +84,7 @@ string,string,string
|
||||
---- QUERY
|
||||
alter table t2 drop column t
|
||||
---- RESULTS
|
||||
'Column has been dropped.'
|
||||
====
|
||||
---- QUERY
|
||||
# The dropped column no longer shows up
|
||||
@@ -91,6 +99,7 @@ string,string,string
|
||||
# Replace the columns with new values
|
||||
alter table t2 replace columns (c1 bigint comment 'id col', c2 string, c3 int)
|
||||
---- RESULTS
|
||||
'Table columns have been replaced.'
|
||||
====
|
||||
---- QUERY
|
||||
describe t2
|
||||
@@ -118,14 +127,17 @@ bigint,string,int
|
||||
---- QUERY
|
||||
alter table t2 change column c2 int_col int comment 'changed to int col'
|
||||
---- RESULTS
|
||||
'Column has been altered.'
|
||||
====
|
||||
---- QUERY
|
||||
alter table t2 change column c1 id_col bigint
|
||||
---- RESULTS
|
||||
'Column has been altered.'
|
||||
====
|
||||
---- QUERY
|
||||
alter table t2 change column c3 c3 int comment 'added a comment'
|
||||
---- RESULTS
|
||||
'Column has been altered.'
|
||||
====
|
||||
---- QUERY
|
||||
describe t2
|
||||
@@ -151,6 +163,7 @@ x array<int>,
|
||||
y map<string,float> comment 'Map Col',
|
||||
z struct<f1:boolean,f2:bigint>)
|
||||
---- RESULTS
|
||||
'New column(s) have been added to the table.'
|
||||
====
|
||||
---- QUERY
|
||||
describe t2
|
||||
@@ -171,6 +184,7 @@ a int comment 'Int Col',
|
||||
b struct<f1:array<int>,f2:map<string,struct<f1:bigint>>>,
|
||||
c double)
|
||||
---- RESULTS
|
||||
'Table columns have been replaced.'
|
||||
====
|
||||
---- QUERY
|
||||
describe t2
|
||||
@@ -217,6 +231,7 @@ string,string,string
|
||||
---- QUERY
|
||||
create external table jointbl_test like functional.jointbl
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
# Expect new table to be empty
|
||||
@@ -282,6 +297,7 @@ alter table t_part add if not exists partition (j=1, s='2012');
|
||||
alter table t_part add if not exists partition (j=1, s='2012/withslash');
|
||||
alter table t_part add partition (j=1, s=substring('foo2013bar', 4, 8));
|
||||
---- RESULTS
|
||||
'New partition has been added to the table.'
|
||||
====
|
||||
---- QUERY
|
||||
# Add another partition that points to the same location as another partition.
|
||||
@@ -289,6 +305,7 @@ alter table t_part add partition (j=1, s=substring('foo2013bar', 4, 8));
|
||||
alter table t_part add partition (j=100, s='same_location')
|
||||
location '$FILESYSTEM_PREFIX/test-warehouse/$DATABASE.db/t_part_tmp/j=1/s=2012'
|
||||
---- RESULTS
|
||||
'New partition has been added to the table.'
|
||||
====
|
||||
---- QUERY
|
||||
# Add another partition that points to an existing data location that does not
|
||||
@@ -296,6 +313,7 @@ location '$FILESYSTEM_PREFIX/test-warehouse/$DATABASE.db/t_part_tmp/j=1/s=2012'
|
||||
alter table t_part add partition (j=101, s='different_part_dir')
|
||||
location '$FILESYSTEM_PREFIX/test-warehouse/$DATABASE.db/part_data/'
|
||||
---- RESULTS
|
||||
'New partition has been added to the table.'
|
||||
====
|
||||
---- QUERY
|
||||
insert overwrite table t_part partition(j=1, s='2012') select 2 from functional.alltypes limit 2
|
||||
@@ -337,10 +355,12 @@ int,int,string
|
||||
---- QUERY
|
||||
alter table t_part add partition (j=NULL, s='2013')
|
||||
---- RESULTS
|
||||
'New partition has been added to the table.'
|
||||
====
|
||||
---- QUERY
|
||||
alter table t_part add partition (j=NULL, s=NULL)
|
||||
---- RESULTS
|
||||
'New partition has been added to the table.'
|
||||
====
|
||||
---- QUERY
|
||||
# Drop the partition that points to a duplication location. The data will no longer
|
||||
@@ -527,6 +547,7 @@ int,int,string
|
||||
# rename a partitioned table
|
||||
alter table t_part rename to t_part2
|
||||
---- RESULTS
|
||||
'Renaming was successful.'
|
||||
====
|
||||
---- QUERY
|
||||
# only the new table shows up
|
||||
@@ -558,6 +579,7 @@ int
|
||||
alter table alltypes_test partition(month=4, year=2009)
|
||||
set location '$FILESYSTEM_PREFIX/test-warehouse/alltypes_seq_snap/year=2009/month=4'
|
||||
---- RESULTS
|
||||
'New location has been set for the specified partition.'
|
||||
====
|
||||
---- QUERY
|
||||
alter table alltypes_test partition(month=4, year=2009)
|
||||
@@ -590,6 +612,7 @@ int,bigint
|
||||
alter table alltypes_test partition(month=cast(1+4 as int), year=cast(100*20+9 as int))
|
||||
set location '$FILESYSTEM_PREFIX/test-warehouse/alltypes_rc/year=2009/month=5'
|
||||
---- RESULTS
|
||||
'New location has been set for the specified partition.'
|
||||
====
|
||||
---- QUERY
|
||||
alter table alltypes_test partition(month=cast(2+3 as int), year=2009)
|
||||
@@ -661,6 +684,7 @@ STRING, STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
|
||||
# IMPALA-1016: Testing scanning newly added columns
|
||||
CREATE TABLE imp1016 (string1 string)
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
INSERT INTO imp1016 VALUES ('test')
|
||||
@@ -670,6 +694,7 @@ INSERT INTO imp1016 VALUES ('test')
|
||||
---- QUERY
|
||||
ALTER TABLE imp1016 ADD COLUMNS (string2 string)
|
||||
---- RESULTS
|
||||
'New column(s) have been added to the table.'
|
||||
====
|
||||
---- QUERY
|
||||
DESCRIBE imp1016
|
||||
@@ -718,6 +743,7 @@ bigint
|
||||
# Create a larger table to test scanning newly added columns
|
||||
CREATE TABLE imp1016Large (string1 string)
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
# There is a codepath that operates on chunks of 1024 tuples, inserting
|
||||
@@ -729,6 +755,7 @@ INSERT INTO imp1016Large SELECT 'test' FROM functional.alltypes LIMIT 2000
|
||||
---- QUERY
|
||||
ALTER TABLE imp1016Large ADD COLUMNS (string2 string)
|
||||
---- RESULTS
|
||||
'New column(s) have been added to the table.'
|
||||
====
|
||||
---- QUERY
|
||||
DESCRIBE imp1016Large
|
||||
@@ -801,6 +828,7 @@ STRING, STRING, BIGINT, BIGINT, BIGINT, DOUBLE
|
||||
---- QUERY
|
||||
drop table $DATABASE2.mv2
|
||||
---- RESULTS
|
||||
'Table has been dropped.'
|
||||
====
|
||||
---- QUERY
|
||||
show tables in $DATABASE2 like '*mv*'
|
||||
@@ -855,6 +883,7 @@ STRING, STRING, BIGINT, BIGINT, BIGINT, DOUBLE
|
||||
---- QUERY
|
||||
drop table $DATABASE2.mv2
|
||||
---- RESULTS
|
||||
'Table has been dropped.'
|
||||
====
|
||||
---- QUERY
|
||||
show tables in $DATABASE2 like '*mv*'
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
---- QUERY
|
||||
create table test_char_tmp (c char(5))
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
insert into test_char_tmp select cast("hello" as char(5))
|
||||
@@ -64,6 +65,7 @@ string
|
||||
---- QUERY
|
||||
create table test_varchar_tmp (vc varchar(5))
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
insert into test_varchar_tmp values (cast("hello" as varchar(5)))
|
||||
@@ -101,6 +103,7 @@ string
|
||||
create table allchars
|
||||
(cshort char(5), clong char(140), vc varchar(5))
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
insert into allchars values (cast("123456" as char(5)), cast("123456" as char(140)),
|
||||
@@ -117,6 +120,7 @@ char,char,string
|
||||
create table allchars_par
|
||||
(cshort char(5), clong char(140), vc varchar(5)) stored as parquet
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
insert into allchars_par values (cast("123456" as char(5)), cast("123456" as char(140)),
|
||||
@@ -193,6 +197,7 @@ test_char_nulls ( c20 char(20),
|
||||
c120 char(120),
|
||||
c140 char(140))
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
insert into test_char_nulls
|
||||
|
||||
@@ -609,6 +609,7 @@ STRING
|
||||
# The table was not changed. Validate that the next compute incremental stats is a no-op.
|
||||
compute incremental stats complextypestbl_part;
|
||||
---- RESULTS
|
||||
'No partitions selected for incremental stats update.'
|
||||
---- ERRORS
|
||||
No partitions selected for incremental stats update
|
||||
====
|
||||
|
||||
@@ -70,6 +70,7 @@ STRING, STRING, BIGINT, BIGINT, BIGINT, DOUBLE
|
||||
# Adding a column shouldn't cause the stats to be dropped.
|
||||
alter table alltypes add columns (new_col int)
|
||||
---- RESULTS
|
||||
'New column(s) have been added to the table.'
|
||||
====
|
||||
---- QUERY
|
||||
show column stats alltypes
|
||||
@@ -99,6 +100,7 @@ STRING, STRING, BIGINT, BIGINT, BIGINT, DOUBLE
|
||||
# HIVE-15075 is resolved.
|
||||
alter table alltypes change new_col new_col2 int
|
||||
---- RESULTS
|
||||
'Column has been altered.'
|
||||
====
|
||||
---- QUERY
|
||||
show column stats alltypes
|
||||
@@ -126,6 +128,7 @@ STRING, STRING, BIGINT, BIGINT, BIGINT, DOUBLE
|
||||
# Removing a column shouldn't cause the stats to be dropped.
|
||||
alter table alltypes drop column new_col2
|
||||
---- RESULTS
|
||||
'Column has been dropped.'
|
||||
====
|
||||
---- QUERY
|
||||
show column stats alltypes
|
||||
@@ -345,6 +348,7 @@ STRING, STRING, BIGINT, BIGINT, BIGINT, DOUBLE
|
||||
# Add partitions with NULL values and check for stats.
|
||||
alter table alltypes add partition (year=NULL, month=NULL)
|
||||
---- RESULTS
|
||||
'New partition has been added to the table.'
|
||||
====
|
||||
---- QUERY
|
||||
show column stats alltypes
|
||||
@@ -370,6 +374,7 @@ STRING, STRING, BIGINT, BIGINT, BIGINT, DOUBLE
|
||||
---- QUERY
|
||||
alter table alltypes add partition (year=2011, month=NULL)
|
||||
---- RESULTS
|
||||
'New partition has been added to the table.'
|
||||
====
|
||||
---- QUERY
|
||||
show column stats alltypes
|
||||
@@ -1025,9 +1030,13 @@ STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
|
||||
insert into table empty_partitioned partition (j=2) select 1;
|
||||
====
|
||||
---- QUERY
|
||||
drop stats empty_partitioned
|
||||
---- RESULTS
|
||||
'Stats have been dropped.'
|
||||
====
|
||||
---- QUERY
|
||||
# Verify partition stats work with empty and non-empty partition.
|
||||
drop stats empty_partitioned;
|
||||
compute stats empty_partitioned;
|
||||
compute stats empty_partitioned
|
||||
---- RESULTS
|
||||
'Updated 2 partition(s) and 1 column(s).'
|
||||
---- TYPES
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
---- QUERY
|
||||
create database $DATABASE_2 comment "For testing"
|
||||
---- RESULTS
|
||||
'Database has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
show databases like "$DATABASE_2"
|
||||
@@ -15,11 +16,13 @@ STRING, STRING
|
||||
# IF NOT EXISTS is specified.
|
||||
create database if not exists $DATABASE_2
|
||||
---- RESULTS
|
||||
'Database already exists.'
|
||||
====
|
||||
---- QUERY
|
||||
# Test dropping the database.
|
||||
drop database $DATABASE_2
|
||||
---- RESULTS
|
||||
'Database has been dropped.'
|
||||
====
|
||||
---- QUERY
|
||||
show databases like "$DATABASE_2"
|
||||
@@ -31,6 +34,7 @@ STRING, STRING
|
||||
# Dropping a non-existent databases is ok with IF EXISTS
|
||||
drop database if exists $DATABASE_2
|
||||
---- RESULTS
|
||||
'Database has been dropped.'
|
||||
====
|
||||
---- QUERY
|
||||
# Test DROP DATABASE ... CASCADE
|
||||
@@ -50,6 +54,7 @@ create function if not exists $DATABASE_cascade.f1() returns string
|
||||
create aggregate function if not exists $DATABASE_cascade.f2(int, string) RETURNS int
|
||||
location '$FILESYSTEM_PREFIX/test-warehouse/libTestUdas.so' UPDATE_FN='TwoArgUpdate'
|
||||
---- RESULTS
|
||||
'Function has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
show tables in $DATABASE_cascade
|
||||
@@ -80,6 +85,7 @@ STRING, STRING, STRING, STRING
|
||||
# as the database itself.
|
||||
drop database $DATABASE_cascade cascade
|
||||
---- RESULTS
|
||||
'Database has been dropped.'
|
||||
====
|
||||
---- QUERY
|
||||
show databases like '$DATABASE_cascade'
|
||||
@@ -99,6 +105,7 @@ STRING,STRING
|
||||
---- QUERY
|
||||
drop database $DATABASE_restrict restrict
|
||||
---- RESULTS
|
||||
'Database has been dropped.'
|
||||
====
|
||||
---- QUERY
|
||||
show databases like '$DATABASE_restrict'
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
create table $DATABASE.temp_decimal_table like parquet
|
||||
'$FILESYSTEM_PREFIX/test-warehouse/schemas/decimal.parquet'
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
describe $DATABASE.temp_decimal_table
|
||||
@@ -17,6 +18,7 @@ STRING, STRING, STRING
|
||||
create table $DATABASE.like_zipcodes_file like parquet
|
||||
'$FILESYSTEM_PREFIX/test-warehouse/schemas/zipcode_incomes.parquet'
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
describe $DATABASE.like_zipcodes_file
|
||||
@@ -33,6 +35,7 @@ STRING, STRING, STRING
|
||||
create table $DATABASE.like_alltypestiny_file like parquet
|
||||
'$FILESYSTEM_PREFIX/test-warehouse/schemas/alltypestiny.parquet'
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
describe $DATABASE.like_alltypestiny_file
|
||||
@@ -59,6 +62,7 @@ create external table $DATABASE.like_enumtype_file like parquet
|
||||
STORED AS PARQUET
|
||||
LOCATION '$FILESYSTEM_PREFIX/test-warehouse/schemas/enum'
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
describe $DATABASE.like_enumtype_file
|
||||
@@ -81,6 +85,7 @@ STRING, STRING
|
||||
create table if not exists $DATABASE.like_alltypestiny_file like parquet
|
||||
'$FILESYSTEM_PREFIX/test-warehouse/schemas/zipcode_incomes.parquet'
|
||||
---- RESULTS
|
||||
'Table already exists.'
|
||||
====
|
||||
---- QUERY
|
||||
# Should not have changed since last statement was IF NOT EXISTS.
|
||||
@@ -103,11 +108,13 @@ STRING, STRING, STRING
|
||||
---- QUERY
|
||||
drop table if exists allcomplextypes_clone
|
||||
---- RESULTS
|
||||
'Table does not exist.'
|
||||
====
|
||||
---- QUERY
|
||||
create table allcomplextypes_clone like functional.allcomplextypes
|
||||
stored as parquet
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
describe allcomplextypes_clone
|
||||
@@ -133,15 +140,18 @@ STRING, STRING, STRING
|
||||
---- QUERY
|
||||
drop table allcomplextypes_clone
|
||||
---- RESULTS
|
||||
'Table has been dropped.'
|
||||
====
|
||||
---- QUERY
|
||||
drop table if exists $DATABASE.temp_legacy_table
|
||||
---- RESULTS
|
||||
'Table does not exist.'
|
||||
====
|
||||
---- QUERY
|
||||
create table $DATABASE.temp_legacy_table like parquet
|
||||
'$FILESYSTEM_PREFIX/test-warehouse/schemas/legacy_nested.parquet'
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
describe $DATABASE.temp_legacy_table
|
||||
@@ -162,15 +172,18 @@ STRING, STRING, STRING
|
||||
---- QUERY
|
||||
drop table if exists $DATABASE.temp_legacy_table
|
||||
---- RESULTS
|
||||
'Table has been dropped.'
|
||||
====
|
||||
---- QUERY
|
||||
drop table if exists $DATABASE.temp_modern_table
|
||||
---- RESULTS
|
||||
'Table does not exist.'
|
||||
====
|
||||
---- QUERY
|
||||
create table $DATABASE.temp_modern_table like parquet
|
||||
'$FILESYSTEM_PREFIX/test-warehouse/schemas/modern_nested.parquet'
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
describe $DATABASE.temp_modern_table
|
||||
@@ -191,6 +204,7 @@ STRING, STRING, STRING
|
||||
---- QUERY
|
||||
drop table if exists $DATABASE.temp_modern_table
|
||||
---- RESULTS
|
||||
'Table has been dropped.'
|
||||
====
|
||||
---- QUERY
|
||||
# Test adding sort.columns when creating a table like a parquet file.
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
create table alltypes_test like functional_seq_snap.alltypes
|
||||
stored as parquet
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
# Make sure no data exists for this table
|
||||
@@ -35,6 +36,7 @@ BIGINT
|
||||
# CREATE TABLE LIKE on a view
|
||||
create table like_view like functional.view_view
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
describe like_view
|
||||
@@ -67,6 +69,7 @@ BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
|
||||
---- QUERY
|
||||
create table like_view_parquet like functional.view_view stored as parquet
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
show table stats like_view_parquet
|
||||
@@ -82,6 +85,7 @@ BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
|
||||
create external table jointbl_rc_like like functional_rc_gzip.jointbl
|
||||
location '$FILESYSTEM_PREFIX/test-warehouse/jointbl_rc_gzip'
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
# should get some results back
|
||||
@@ -97,6 +101,7 @@ BIGINT, STRING, INT, INT
|
||||
# CREATE TABLE LIKE on unpartitioned table.
|
||||
create table jointbl_like like functional.jointbl
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
# Make sure the new table can be queried and no data exists for this table.
|
||||
@@ -110,11 +115,13 @@ BIGINT
|
||||
# No error is thrown when IF NOT EXISTS is specified and the table already exists.
|
||||
create table if not exists jointbl_like like functional.jointbl
|
||||
---- RESULTS
|
||||
'Table already exists.'
|
||||
====
|
||||
---- QUERY
|
||||
# IF NOT EXISTS also applies when the src table is the same as the new table.
|
||||
create table if not exists jointbl_like like jointbl_like
|
||||
---- RESULTS
|
||||
'Table already exists.'
|
||||
====
|
||||
---- QUERY
|
||||
insert overwrite table jointbl_like
|
||||
@@ -155,6 +162,7 @@ create table no_avro_schema (
|
||||
partitioned by (year int, month int)
|
||||
stored as avro
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
describe no_avro_schema
|
||||
@@ -182,6 +190,7 @@ STRING, STRING, STRING
|
||||
# Test creating an Avro table without an Avro schema via CREATE TABLE LIKE (IMPALA-1813)
|
||||
create table like_no_avro_schema like no_avro_schema stored as avro
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
describe like_no_avro_schema
|
||||
@@ -208,10 +217,12 @@ STRING, STRING, STRING
|
||||
---- QUERY
|
||||
drop table like_no_avro_schema
|
||||
---- RESULTS
|
||||
'Table has been dropped.'
|
||||
====
|
||||
---- QUERY
|
||||
drop table no_avro_schema
|
||||
---- RESULTS
|
||||
'Table has been dropped.'
|
||||
====
|
||||
---- QUERY
|
||||
# Test setting sort.columns when using create table like.
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
---- QUERY
|
||||
create table $DATABASE.testtbl(i int, s string COMMENT 'String col') STORED AS TEXTFILE
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
# Make sure creating a table with the same name doesn't throw an error when
|
||||
@@ -13,6 +14,7 @@ ESCAPED BY '\\'
|
||||
LINES TERMINATED BY '\n'
|
||||
STORED AS TEXTFILE
|
||||
---- RESULTS
|
||||
'Table already exists.'
|
||||
====
|
||||
---- QUERY
|
||||
show tables in $DATABASE
|
||||
@@ -54,6 +56,7 @@ INT, STRING
|
||||
---- QUERY
|
||||
create table $DATABASE.testtbl_part(i int, s string) PARTITIONED BY (id int comment 'C')
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
# Partition columns are displayed as part of DESCRIBE <table>
|
||||
@@ -105,6 +108,7 @@ STRING
|
||||
# Make sure we create the table in the proper database after a "use"
|
||||
create table testtbl2(f float, d double) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
show tables
|
||||
@@ -118,6 +122,7 @@ STRING
|
||||
---- QUERY
|
||||
drop table testtbl2
|
||||
---- RESULTS
|
||||
'Table has been dropped.'
|
||||
====
|
||||
---- QUERY
|
||||
show tables
|
||||
@@ -134,6 +139,7 @@ with serdeproperties
|
||||
('avro.schema.url'='$FILESYSTEM_PREFIX/test-warehouse/avro_schemas/functional/alltypes.json')
|
||||
stored as avro
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
describe avro_alltypes_nopart
|
||||
@@ -160,6 +166,7 @@ with serdeproperties
|
||||
('avro.schema.url'='$FILESYSTEM_PREFIX/test-warehouse/avro_schemas/functional/alltypes.json')
|
||||
stored as avro
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
describe avro_alltypes_part
|
||||
@@ -183,18 +190,22 @@ STRING, STRING, STRING
|
||||
---- QUERY
|
||||
drop table avro_alltypes_part
|
||||
---- RESULTS
|
||||
'Table has been dropped.'
|
||||
====
|
||||
---- QUERY
|
||||
drop table avro_alltypes_nopart
|
||||
---- RESULTS
|
||||
'Table has been dropped.'
|
||||
====
|
||||
---- QUERY
|
||||
drop table testtbl
|
||||
---- RESULTS
|
||||
'Table has been dropped.'
|
||||
====
|
||||
---- QUERY
|
||||
drop table testtbl_part
|
||||
---- RESULTS
|
||||
'Table has been dropped.'
|
||||
====
|
||||
---- QUERY
|
||||
show tables
|
||||
@@ -205,6 +216,7 @@ STRING
|
||||
---- QUERY
|
||||
drop table if exists non_existent_db.tbl
|
||||
---- RESULTS
|
||||
'Database does not exist: non_existent_db'
|
||||
====
|
||||
---- QUERY
|
||||
# Test table creation with tblproperty and serdeproperty lengths just within limits
|
||||
@@ -219,17 +231,20 @@ with serdeproperties(
|
||||
'valuevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevaluevalue'
|
||||
)
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
# IMPALA-1740: Test setting the skip.header.line.count tblproperty
|
||||
create table skip_header_test_a (i1 integer) tblproperties('skip.header.line.count'='2')
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
# IMPALA-1740: Test setting the skip.header.line.count tblproperty on a Parquet table
|
||||
create table skip_header_test_d (i1 integer) stored as parquet
|
||||
tblproperties('skip.header.line.count'='2')
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
# IMPALA-1740: Test setting an invalid skip.header.line.count tblproperty
|
||||
|
||||
@@ -18,6 +18,7 @@ STRING,STRING,INT,INT
|
||||
# create new tables like the ones above to test inserting
|
||||
create table tecn like functional.text_thorn_ecirc_newline;
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
# insert data into tecn table and check results
|
||||
@@ -35,4 +36,4 @@ select * from tecn
|
||||
'efg','xyz',3,4
|
||||
---- TYPES
|
||||
STRING,STRING,INT,INT
|
||||
====
|
||||
====
|
||||
|
||||
@@ -34,6 +34,7 @@ STRING,STRING,INT,INT
|
||||
create table cbn like functional.text_comma_backslash_newline;
|
||||
create table dhp like functional.text_dollar_hash_pipe;
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
# insert data into cbn table and check results
|
||||
@@ -70,4 +71,4 @@ select * from dhp
|
||||
'abc #$#$ abc','xyz $#$# xyz',5,6
|
||||
---- TYPES
|
||||
STRING,STRING,INT,INT
|
||||
====
|
||||
====
|
||||
|
||||
@@ -90,6 +90,7 @@ create table if not exists nested_structs (
|
||||
map_array_map_struct_col
|
||||
map<string, array<map<string, struct<f1:string, f2:int>>>>)
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
describe nested_structs
|
||||
|
||||
@@ -3,12 +3,21 @@
|
||||
# Add functions and test function overloading and scoping.
|
||||
create function $DATABASE.fn() RETURNS int
|
||||
LOCATION '$FILESYSTEM_PREFIX/test-warehouse/libTestUdfs.so' SYMBOL='Fn'
|
||||
---- RESULTS
|
||||
'Function has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
create function $DATABASE.fn(int) RETURNS double
|
||||
LOCATION '$FILESYSTEM_PREFIX/test-warehouse/libTestUdfs.so' SYMBOL='Fn'
|
||||
====
|
||||
---- QUERY
|
||||
# Test IF NOT EXISTS
|
||||
create function if not exists $DATABASE.fn(int) RETURNS double
|
||||
LOCATION '$FILESYSTEM_PREFIX/test-warehouse/libTestUdfs.so' SYMBOL='Fn'
|
||||
---- RESULTS
|
||||
'Function already exists.'
|
||||
====
|
||||
---- QUERY
|
||||
create function $DATABASE.fn(int, string) RETURNS int
|
||||
LOCATION '$FILESYSTEM_PREFIX/test-warehouse/libTestUdfs.so' SYMBOL='Fn'
|
||||
====
|
||||
@@ -149,6 +158,8 @@ STRING
|
||||
====
|
||||
---- QUERY
|
||||
drop function $DATABASE.fn2(int, string)
|
||||
---- RESULTS
|
||||
'Function has been dropped.'
|
||||
====
|
||||
---- QUERY
|
||||
show functions
|
||||
@@ -199,6 +210,11 @@ STRING, STRING, STRING, STRING
|
||||
drop function fn()
|
||||
====
|
||||
---- QUERY
|
||||
drop function if exists fn()
|
||||
---- RESULTS
|
||||
'Function does not exist.'
|
||||
====
|
||||
---- QUERY
|
||||
show functions;
|
||||
---- LABELS
|
||||
return type, signature, binary type, is persistent
|
||||
|
||||
@@ -2,18 +2,22 @@
|
||||
---- QUERY
|
||||
create role grant_revoke_test_ALL_SERVER
|
||||
---- RESULTS
|
||||
'Role has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
create role grant_revoke_test_ALL_TEST_DB
|
||||
---- RESULTS
|
||||
'Role has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
create role grant_revoke_test_SELECT_INSERT_TEST_TBL
|
||||
---- RESULTS
|
||||
'Role has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
create role grant_revoke_test_ALL_URI
|
||||
---- RESULTS
|
||||
'Role has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
# Shows all roles in the system
|
||||
@@ -489,10 +493,12 @@ root
|
||||
REVOKE ROLE grant_revoke_test_ALL_URI FROM GROUP `$GROUP_NAME`;
|
||||
REVOKE ROLE grant_revoke_test_SELECT_INSERT_TEST_TBL FROM GROUP `$GROUP_NAME`;
|
||||
---- RESULTS
|
||||
'Role has been revoked.'
|
||||
====
|
||||
---- QUERY
|
||||
GRANT ROLE grant_revoke_test_ALL_SERVER TO GROUP `$GROUP_NAME`
|
||||
---- RESULTS
|
||||
'Role has been granted.'
|
||||
====
|
||||
---- QUERY
|
||||
show current roles
|
||||
@@ -505,10 +511,12 @@ STRING
|
||||
# Create a table with multiple columns to test column-level security.
|
||||
create table grant_rev_db.test_tbl3(a int, b int, c int, d int, e int) partitioned by (x int, y int)
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
GRANT SELECT (a, b, x) ON TABLE grant_rev_db.test_tbl3 TO grant_revoke_test_ALL_SERVER
|
||||
---- RESULTS
|
||||
'Privilege(s) have been granted.'
|
||||
====
|
||||
---- QUERY
|
||||
show grant role grant_revoke_test_ALL_SERVER
|
||||
@@ -525,6 +533,7 @@ STRING, STRING, STRING, STRING, STRING, STRING, BOOLEAN, STRING
|
||||
---- QUERY
|
||||
GRANT SELECT (c, d, y) ON TABLE grant_rev_db.test_tbl3 TO grant_revoke_test_ALL_SERVER
|
||||
---- RESULTS
|
||||
'Privilege(s) have been granted.'
|
||||
====
|
||||
---- QUERY
|
||||
show grant role grant_revoke_test_ALL_SERVER
|
||||
@@ -544,6 +553,7 @@ STRING, STRING, STRING, STRING, STRING, STRING, BOOLEAN, STRING
|
||||
---- QUERY
|
||||
GRANT SELECT (a, a, e, x) ON TABLE grant_rev_db.test_tbl3 TO grant_revoke_test_ALL_SERVER
|
||||
---- RESULTS
|
||||
'Privilege(s) have been granted.'
|
||||
====
|
||||
---- QUERY
|
||||
show grant role grant_revoke_test_ALL_SERVER
|
||||
@@ -565,6 +575,7 @@ STRING, STRING, STRING, STRING, STRING, STRING, BOOLEAN, STRING
|
||||
# Revoke SELECT privileges from columns
|
||||
REVOKE SELECT (a, b, b, y) ON TABLE grant_rev_db.test_tbl3 FROM grant_revoke_test_ALL_SERVER
|
||||
---- RESULTS
|
||||
'Privilege(s) have been revoked.'
|
||||
====
|
||||
---- QUERY
|
||||
show grant role grant_revoke_test_ALL_SERVER
|
||||
@@ -582,6 +593,7 @@ STRING, STRING, STRING, STRING, STRING, STRING, BOOLEAN, STRING
|
||||
---- QUERY
|
||||
REVOKE SELECT (a, b, c, x) ON TABLE grant_rev_db.test_tbl3 FROM grant_revoke_test_ALL_SERVER
|
||||
---- RESULTS
|
||||
'Privilege(s) have been revoked.'
|
||||
====
|
||||
---- QUERY
|
||||
show grant role grant_revoke_test_ALL_SERVER
|
||||
@@ -597,6 +609,7 @@ STRING, STRING, STRING, STRING, STRING, STRING, BOOLEAN, STRING
|
||||
---- QUERY
|
||||
REVOKE SELECT (a, b, c, d, e) ON TABLE grant_rev_db.test_tbl3 FROM grant_revoke_test_ALL_SERVER;
|
||||
---- RESULTS
|
||||
'Privilege(s) have been revoked.'
|
||||
====
|
||||
---- QUERY
|
||||
show grant role grant_revoke_test_ALL_SERVER
|
||||
@@ -613,6 +626,7 @@ GRANT ROLE grant_revoke_test_ROOT TO GROUP root;
|
||||
GRANT SELECT ON TABLE grant_rev_db.test_tbl3 TO grant_revoke_test_ROOT;
|
||||
REVOKE ALL ON DATABASE functional FROM grant_revoke_test_ROOT;
|
||||
---- RESULTS
|
||||
'Privilege(s) have been revoked.'
|
||||
====
|
||||
---- USER
|
||||
root
|
||||
@@ -635,17 +649,20 @@ User 'root' does not have privileges to execute: GRANT_PRIVILEGE
|
||||
---- QUERY
|
||||
REVOKE SELECT ON TABLE grant_rev_db.test_tbl3 FROM grant_revoke_test_ROOT
|
||||
---- RESULTS
|
||||
'Privilege(s) have been revoked.'
|
||||
====
|
||||
---- QUERY
|
||||
# Grant SELECT on table to 'root' with 'WITH GRANT' option.
|
||||
GRANT SELECT ON TABLE grant_rev_db.test_tbl3 TO grant_revoke_test_ROOT WITH GRANT OPTION
|
||||
---- RESULTS
|
||||
'Privilege(s) have been granted.'
|
||||
====
|
||||
---- USER
|
||||
root
|
||||
---- QUERY
|
||||
GRANT SELECT (a) ON TABLE grant_rev_db.test_tbl3 TO grant_revoke_test_ROOT
|
||||
---- RESULTS
|
||||
'Privilege(s) have been granted.'
|
||||
====
|
||||
---- USER
|
||||
root
|
||||
@@ -662,6 +679,7 @@ STRING, STRING, STRING, STRING, STRING, STRING, BOOLEAN, STRING
|
||||
---- QUERY
|
||||
GRANT SELECT (a, c, e) ON TABLE grant_rev_db.test_tbl3 TO grant_revoke_test_ALL_SERVER WITH GRANT OPTION
|
||||
---- RESULTS
|
||||
'Privilege(s) have been granted.'
|
||||
====
|
||||
---- QUERY
|
||||
show grant role grant_revoke_test_ALL_SERVER
|
||||
@@ -678,6 +696,7 @@ STRING, STRING, STRING, STRING, STRING, STRING, BOOLEAN, STRING
|
||||
---- QUERY
|
||||
REVOKE GRANT OPTION FOR SELECT (a, c) ON TABLE grant_rev_db.test_tbl3 FROM grant_revoke_test_ALL_SERVER
|
||||
---- RESULTS
|
||||
'Privilege(s) have been revoked.'
|
||||
====
|
||||
---- QUERY
|
||||
# TODO: Add a test case that exercises the cascading effect of REVOKE ALL.
|
||||
@@ -699,6 +718,7 @@ revoke role grant_revoke_test_ALL_SERVER from group `$GROUP_NAME`
|
||||
# Test 'grant all on server' with explicit server name specified.
|
||||
create role grant_revoke_test_ALL_SERVER1
|
||||
---- RESULTS
|
||||
'Role has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
grant all on server server1 to grant_revoke_test_ALL_SERVER1
|
||||
@@ -745,6 +765,7 @@ STRING, STRING, STRING, STRING, STRING, STRING, BOOLEAN, STRING
|
||||
# to a table in the database
|
||||
grant role grant_revoke_test_ALL_SERVER to group `$GROUP_NAME`
|
||||
---- RESULTS
|
||||
'Role has been granted.'
|
||||
====
|
||||
---- QUERY
|
||||
create role grant_revoke_test_COLUMN_PRIV
|
||||
@@ -800,12 +821,15 @@ STRING,STRING
|
||||
---- QUERY
|
||||
grant role grant_revoke_test_ALL_SERVER to group `$GROUP_NAME`
|
||||
---- RESULTS
|
||||
'Role has been granted.'
|
||||
====
|
||||
---- QUERY
|
||||
drop database if exists grant_rev_db cascade
|
||||
====
|
||||
---- QUERY
|
||||
revoke role grant_revoke_test_ALL_SERVER from group `$GROUP_NAME`
|
||||
---- RESULTS
|
||||
'Role has been revoked.'
|
||||
====
|
||||
---- QUERY
|
||||
revoke role grant_revoke_test_COLUMN_PRIV from group `$GROUP_NAME`
|
||||
@@ -818,4 +842,5 @@ drop role grant_revoke_test_ALL_URI;
|
||||
drop role grant_revoke_test_ROOT;
|
||||
drop role grant_revoke_test_COLUMN_PRIV;
|
||||
---- RESULTS
|
||||
'Role has been dropped.'
|
||||
====
|
||||
|
||||
@@ -2,10 +2,12 @@
|
||||
---- QUERY
|
||||
create role grant_revoke_test_ALL_SERVER
|
||||
---- RESULTS
|
||||
'Role has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
create role grant_revoke_test_ALL_TEST_DB
|
||||
---- RESULTS
|
||||
'Role has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
show roles
|
||||
@@ -156,6 +158,7 @@ does not have privileges to access: grant_rev_db.kudu_tbl
|
||||
---- QUERY
|
||||
grant select(a) on table grant_rev_db.kudu_tbl to grant_revoke_test_KUDU
|
||||
---- RESULTS
|
||||
'Privilege(s) have been granted.'
|
||||
====
|
||||
---- QUERY
|
||||
grant ALL on table grant_rev_db.kudu_tbl to grant_revoke_test_KUDU
|
||||
@@ -184,4 +187,5 @@ drop role grant_revoke_test_ALL_SERVER;
|
||||
drop role grant_revoke_test_ALL_TEST_DB;
|
||||
drop role grant_revoke_test_KUDU;
|
||||
---- RESULTS
|
||||
'Role has been dropped.'
|
||||
====
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
---- QUERY
|
||||
create table alltypessmall_hbase like functional_hbase.alltypessmall
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
compute incremental stats alltypessmall_hbase
|
||||
|
||||
@@ -6,6 +6,7 @@ create function if not exists twenty_one_args(int, int, int, int, int, int,
|
||||
location '$FILESYSTEM_PREFIX/test-warehouse/libTestUdfs.so'
|
||||
symbol='TwentyOneArgs';
|
||||
---- RESULTS
|
||||
'Function has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
# Regression test for IMPALA-6262: failure to initialize the output expressions
|
||||
@@ -21,5 +22,6 @@ Cannot interpret native UDF 'twenty_one_args': number of arguments is more than
|
||||
drop function twenty_one_args(int, int, int, int, int, int, int, int,
|
||||
int, int, int, int, int, int, int, int, int, int, int, int, int);
|
||||
---- RESULTS
|
||||
'Function has been dropped.'
|
||||
====
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
create database insert_permutation_test location
|
||||
'$FILESYSTEM_PREFIX/test-warehouse/insert_permutation_test'
|
||||
---- RESULTS
|
||||
'Database has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
use insert_permutation_test
|
||||
@@ -14,6 +15,7 @@ create table perm_part(int_col1 int, string_col string) partitioned by (p1 int,
|
||||
create table parquet_part(int_col1 int, string_col string)
|
||||
partitioned by (p1 int, p2 string) stored as parquet;
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
# Simple non-permutation
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
create table simple (id int primary key, name string, valf float, vali bigint)
|
||||
partition by hash (id) partitions 3 stored as kudu
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
# Hash partitions cannot be enumerated as range partitions
|
||||
@@ -44,6 +45,7 @@ ImpalaRuntimeException: Kudu table 'impala::$DATABASE.simple' does not exist on
|
||||
---- QUERY
|
||||
alter table simple rename to simple_new;
|
||||
---- RESULTS
|
||||
'Renaming was successful.'
|
||||
====
|
||||
---- QUERY
|
||||
select count(*) from simple_new;
|
||||
@@ -57,6 +59,7 @@ BIGINT
|
||||
create table tbl_to_alter (id int primary key, name string null, vali bigint not null)
|
||||
partition by range (id) (partition 1 < values <= 10) stored as kudu
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
# Verify partition layout
|
||||
@@ -68,6 +71,7 @@ show range partitions tbl_to_alter;
|
||||
# Add a range partition
|
||||
alter table tbl_to_alter add range partition 10 < values <= 20
|
||||
---- RESULTS
|
||||
'Range partition has been added.'
|
||||
====
|
||||
---- QUERY
|
||||
# Verify partition layout
|
||||
@@ -93,6 +97,7 @@ INT,STRING,BIGINT
|
||||
# Add a singleton range partition
|
||||
alter table tbl_to_alter add range partition value = 100
|
||||
---- RESULTS
|
||||
'Range partition has been added.'
|
||||
====
|
||||
---- QUERY
|
||||
# Verify partition layout
|
||||
@@ -120,6 +125,7 @@ INT,STRING,BIGINT
|
||||
# Add an unbounded range partition
|
||||
alter table tbl_to_alter add range partition 1000 < values
|
||||
---- RESULTS
|
||||
'Range partition has been added.'
|
||||
====
|
||||
---- QUERY
|
||||
# Verify partition layout
|
||||
@@ -141,11 +147,13 @@ NonRecoverableException: New range partition conflicts with existing range parti
|
||||
# to hide the error
|
||||
alter table tbl_to_alter add if not exists range partition 10 < values <= 30
|
||||
---- RESULTS
|
||||
'Range partition has been added.'
|
||||
====
|
||||
---- QUERY
|
||||
# Drop one of the recently inserted partitions
|
||||
alter table tbl_to_alter drop range partition value = 100
|
||||
---- RESULTS
|
||||
'Range partition has been dropped.'
|
||||
====
|
||||
---- QUERY
|
||||
# Verify partition layout
|
||||
@@ -167,6 +175,7 @@ INT,STRING,BIGINT
|
||||
# Drop an existing range partition
|
||||
alter table tbl_to_alter drop range partition 11 <= values < 21
|
||||
---- RESULTS
|
||||
'Range partition has been dropped.'
|
||||
====
|
||||
---- QUERY
|
||||
# Verify partition layout
|
||||
@@ -180,6 +189,7 @@ show range partitions tbl_to_alter;
|
||||
alter table tbl_to_alter drop range partition 1 < values <= 10;
|
||||
alter table tbl_to_alter drop range partition 1000 < values
|
||||
---- RESULTS
|
||||
'Range partition has been dropped.'
|
||||
====
|
||||
---- QUERY
|
||||
# Verify partition layout
|
||||
@@ -209,6 +219,7 @@ alter table tbl_to_alter add range partition 1 < values <= 20;
|
||||
alter table tbl_to_alter add columns (new_col1 int not null default 10,
|
||||
new_col2 bigint not null default 1000)
|
||||
---- RESULTS
|
||||
'Column has been added/replaced.'
|
||||
====
|
||||
---- QUERY
|
||||
# Verify partition layout
|
||||
@@ -261,6 +272,7 @@ INT,STRING,BIGINT,INT,BIGINT
|
||||
# Add nullable columns: with and without a default
|
||||
alter table tbl_to_alter add columns (new_col3 string null, new_col4 int null default -1)
|
||||
---- RESULTS
|
||||
'Column has been added/replaced.'
|
||||
====
|
||||
---- QUERY
|
||||
# Add a row
|
||||
@@ -307,6 +319,7 @@ A new non-null column must have a default value
|
||||
# Drop a column
|
||||
alter table tbl_to_alter drop column vali
|
||||
---- RESULTS
|
||||
'Column has been dropped.'
|
||||
====
|
||||
---- QUERY
|
||||
# Retrieve table rows after column got dropped
|
||||
@@ -330,6 +343,7 @@ NonRecoverableException: cannot remove a key column
|
||||
# Rename a column
|
||||
alter table tbl_to_alter change column new_col3 last_name string
|
||||
---- RESULTS
|
||||
'Column has been altered.'
|
||||
====
|
||||
---- QUERY
|
||||
# Ensure the renamed column is accessible
|
||||
@@ -355,6 +369,7 @@ BIGINT
|
||||
# Rename the Impala table
|
||||
alter table tbl_to_alter rename to kudu_tbl_to_alter
|
||||
---- RESULTS
|
||||
'Renaming was successful.'
|
||||
====
|
||||
---- QUERY
|
||||
# Ensure the Impala table is accessible after it got renamed
|
||||
@@ -421,6 +436,7 @@ partition by range (
|
||||
cast('2009-01-02 00:00:00' as timestamp)
|
||||
) stored as kudu
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
show range partitions ts_ranges
|
||||
@@ -434,6 +450,7 @@ alter table ts_ranges add range partition
|
||||
cast('2009-01-02 00:00:00' as timestamp) <= VALUES <
|
||||
cast('2009-01-03 00:00:00' as timestamp)
|
||||
---- RESULTS
|
||||
'Range partition has been added.'
|
||||
====
|
||||
---- QUERY
|
||||
show range partitions ts_ranges
|
||||
@@ -448,6 +465,7 @@ alter table ts_ranges drop range partition
|
||||
cast('2009-01-02 00:00:00' as timestamp) <= VALUES <
|
||||
cast('2009-01-03 00:00:00' as timestamp)
|
||||
---- RESULTS
|
||||
'Range partition has been dropped.'
|
||||
====
|
||||
---- QUERY
|
||||
show range partitions ts_ranges
|
||||
|
||||
@@ -84,6 +84,7 @@ create table tab (a int not null primary key)
|
||||
partition by range (a) (partition value = false)
|
||||
stored as kudu
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
# Invalid hostname
|
||||
@@ -107,6 +108,7 @@ Couldn't resolve this master's address bogus.host.name:7051
|
||||
create table tdata_master_addresses_whitespace (id int primary key) stored as kudu
|
||||
tblproperties('kudu.master_addresses' = ' localhost ')
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
insert into tdata_master_addresses_whitespace values (0), (1)
|
||||
@@ -120,6 +122,7 @@ create table ignore_column_case (Id int, NAME string, vAlf float, vali bigint,
|
||||
primary key (Id, NAME)) PARTITION BY RANGE (PARTITION VALUE = (1, 'Martin'))
|
||||
STORED AS KUDU
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
insert into ignore_column_case values (1, 'Martin', 1.0, 10);
|
||||
@@ -145,6 +148,7 @@ create table tbl_with_null_defaults (x int primary key, i1 tinyint default null,
|
||||
valdec8 decimal(18) default null, valdec16 decimal(38) default null)
|
||||
partition by hash (x) partitions 3 stored as kudu
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
insert into tbl_with_null_defaults (x) values (1);
|
||||
@@ -181,6 +185,7 @@ partition by range (
|
||||
partition '2009-01-03 00:00:00' <= VALUES
|
||||
) stored as kudu
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
show range partitions ts_ranges
|
||||
@@ -236,6 +241,7 @@ create table ts_default (i int primary key, ts1 timestamp,
|
||||
ts2 timestamp default cast('2009-01-01 00:00:00' as timestamp))
|
||||
partition by hash(i) partitions 3 stored as kudu
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
insert into ts_default (i) values (1);
|
||||
@@ -267,6 +273,7 @@ INT,TIMESTAMP,TIMESTAMP
|
||||
create table unpartitioned_kudu_table (col0 bigint primary key, col1 string)
|
||||
stored as kudu
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
---- ERRORS
|
||||
Unpartitioned Kudu tables are inefficient for large data sizes.
|
||||
====
|
||||
@@ -312,6 +319,7 @@ create table create_decimal
|
||||
primary key (decimal_4))
|
||||
stored as kudu;
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
# Create as select table with decimal columns and primary key
|
||||
@@ -327,4 +335,4 @@ select * from ctas_decimal;
|
||||
132842,333,12345.6789000000,0.12345678900000000000000000000000000000,0.77889,1
|
||||
---- TYPES
|
||||
DECIMAL,DECIMAL,DECIMAL,DECIMAL,DECIMAL,DECIMAL
|
||||
====
|
||||
====
|
||||
|
||||
@@ -8,6 +8,7 @@ create table tdata
|
||||
PARTITION BY RANGE (PARTITION VALUES < 100, PARTITION 100 <= VALUES < 1000,
|
||||
PARTITION 1000 <= VALUES <= 10000) STORED AS KUDU
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
insert into table tdata values
|
||||
@@ -385,6 +386,7 @@ STRING,BIGINT,TINYINT,SMALLINT,BOOLEAN,INT,DOUBLE,FLOAT
|
||||
create table impala_3454 (key_1 tinyint, key_2 bigint, PRIMARY KEY (key_1, key_2))
|
||||
PARTITION BY HASH PARTITIONS 3 STORED AS KUDU
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
insert into impala_3454 values
|
||||
|
||||
@@ -8,6 +8,7 @@ create table tdata
|
||||
PARTITION BY RANGE (PARTITION VALUES < 10, PARTITION 10 <= VALUES < 30,
|
||||
PARTITION 30 <= VALUES) STORED AS KUDU
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
# VALUES, single row, all target cols, no errors
|
||||
@@ -295,6 +296,7 @@ create table allkeytypes (i1 tinyint, i2 smallint, i3 int, i4 bigint, name strin
|
||||
partition value = (2,2,2,2,'2','2009-01-01 00:02:00.100000000'),
|
||||
partition value = (3,3,3,3,'3','2009-01-01 00:03:00.300000000')) stored as kudu
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
insert into allkeytypes select cast(id as tinyint), smallint_col, int_col,
|
||||
@@ -325,6 +327,7 @@ create table tbl_with_defaults (a int primary key, b int null default 10,
|
||||
i decimal(9, 2) default 1111.11) partition by hash (a)
|
||||
partitions 3 stored as kudu
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
insert into tbl_with_defaults (a, f) values (1, 1), (2, 2), (3, 3), (4, 4)
|
||||
@@ -360,6 +363,7 @@ INT,INT,INT,INT,INT,INT,STRING,BOOLEAN,DECIMAL
|
||||
---- QUERY
|
||||
alter table tbl_with_defaults add columns (j int null, k int not null default 10000)
|
||||
---- RESULTS
|
||||
'Column has been added/replaced.'
|
||||
====
|
||||
---- QUERY
|
||||
select * from tbl_with_defaults
|
||||
@@ -421,6 +425,7 @@ INT,INT,INT,INT,INT,INT,STRING,BOOLEAN,DECIMAL,INT,INT
|
||||
create table multiple_partition_cols (x bigint, y bigint, z string, primary key(x, y))
|
||||
partition by hash(x, y) partitions 8 stored as kudu
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
# SELECT with constant
|
||||
|
||||
@@ -5,6 +5,7 @@ create table simple_hash (id int, name string, valf float, vali bigint,
|
||||
primary key (id, name)) partition by hash(id) partitions 4,
|
||||
hash(name) partitions 2 stored as kudu
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
show table stats simple_hash
|
||||
@@ -29,6 +30,7 @@ create table range_part_bounds (id int, name string, valf float, vali bigint,
|
||||
(partition values <= 10, partition 10 < values <= 20, partition 20 < values)
|
||||
stored as kudu
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
show table stats range_part_bounds
|
||||
@@ -48,6 +50,7 @@ create table range_part_single (id int, name string, valf float, vali bigint,
|
||||
(partition value = 1, partition value = 10, partition value = 100)
|
||||
stored as kudu
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
show table stats range_part_single
|
||||
@@ -68,6 +71,7 @@ create table range_part_multiple_bounds (id int, name string, valf float,
|
||||
(partition values <= 10, partition 10 < values <= 20, partition 20 < values <= 30,
|
||||
partition value = 40, partition value = 50) stored as kudu
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
show table stats range_part_multiple_bounds
|
||||
@@ -89,6 +93,7 @@ create table range_part_multiple_cols (id int, name string, valf float, vali big
|
||||
(partition value = (10, 'martin'), partition value = (20, 'dimitris'),
|
||||
partition value = (30, 'matthew')) stored as kudu
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
-- Test printing of multiple column range partitioning
|
||||
@@ -116,6 +121,7 @@ create table range_part_single_string_col (id int, name string, valf float,
|
||||
(partition values <= 'aaa', partition 'aaa' < values <= 'bbb',
|
||||
partition 'bbb' < values <= 'ccc', partition value = 'ddd') stored as kudu
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
show table stats range_part_single_string_col
|
||||
@@ -135,6 +141,7 @@ create table simple_hash_range (id int, name string, valf float, vali bigint,
|
||||
primary key (id, name)) partition by hash(id) partitions 4, range(id, name)
|
||||
(partition value = (10, 'martin'), partition value = (20, 'alex')) stored as kudu
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
show table stats simple_hash_range
|
||||
@@ -182,6 +189,7 @@ INT,STRING,STRING,STRING,INT
|
||||
create table simple_hash_all_columns (id int, name string, valf float, vali bigint,
|
||||
primary key (id, name)) partition by hash partitions 4 stored as kudu
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
show table stats simple_hash_all_columns
|
||||
@@ -202,6 +210,7 @@ create table simple_range_all_columns (id int, name string, valf float, vali big
|
||||
(partition value = (1, 'a'), partition value = (2, 'b'))
|
||||
stored as kudu
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
show table stats simple_range_all_columns
|
||||
@@ -220,6 +229,7 @@ create table range_complex_const_boundary_vals (x int, y int, primary key (x))
|
||||
partition factorial(4) < values < factorial(5), partition value = factorial(6))
|
||||
stored as kudu
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
show table stats range_complex_const_boundary_vals
|
||||
|
||||
@@ -4,6 +4,7 @@ create table simple (id int primary key, name string, valf float, vali bigint)
|
||||
partition by range (partition values < 10, partition 10 <= values < 30,
|
||||
partition 30 <= values) stored as kudu tblproperties('kudu.num_tablet_replicas' = '1')
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
show table stats simple
|
||||
|
||||
@@ -8,6 +8,7 @@ create table tdata
|
||||
PARTITION BY RANGE (PARTITION VALUES < 10, PARTITION 10 <= VALUES < 30,
|
||||
PARTITION 30 <= VALUES <= 10000) STORED AS KUDU
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
insert into tdata values
|
||||
|
||||
@@ -7,6 +7,7 @@ create table tdata
|
||||
PARTITION BY RANGE (PARTITION VALUES < 10, PARTITION 10 <= VALUES < 30,
|
||||
PARTITION 30 <= VALUES) STORED AS KUDU
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
insert into table tdata values
|
||||
|
||||
@@ -6,6 +6,7 @@ location '$FILESYSTEM_PREFIX/test-warehouse/libTestUdfs.so' symbol='NoArgs';
|
||||
create function no_args2() returns string
|
||||
location '$FILESYSTEM_PREFIX/test-warehouse/udf_test/libTestUdfs.so' symbol='NoArgs';
|
||||
---- RESULTS
|
||||
'Function has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
select no_args();
|
||||
|
||||
@@ -3,11 +3,13 @@
|
||||
alter table functional.test_load add partition
|
||||
(year=2009, month=1)
|
||||
---- RESULTS
|
||||
'New partition has been added to the table.'
|
||||
====
|
||||
---- QUERY
|
||||
alter table functional.test_load add partition
|
||||
(year=2010, month=1)
|
||||
---- RESULTS
|
||||
'New partition has been added to the table.'
|
||||
====
|
||||
---- QUERY
|
||||
# Insert some data into one of the partitions, used to verify we are not clobbering
|
||||
|
||||
@@ -5,6 +5,7 @@ create external table tbl
|
||||
row format delimited fields terminated by ','
|
||||
location 'file://$IMPALA_HOME/testdata/data/local_tbl'
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
describe tbl
|
||||
|
||||
@@ -157,7 +157,7 @@ STRING, STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
|
||||
# Tests no matching partition.
|
||||
alter table p1 partition (j=100) set location '$FILESYSTEM_PREFIX/test-warehouse/newtable';
|
||||
---- RESULTS
|
||||
# TODO: IMPALA-6775
|
||||
'New location has been set.'
|
||||
==== QUERY
|
||||
# Check nothing was updated.
|
||||
show partitions p1
|
||||
@@ -204,4 +204,4 @@ alter table p1 partition (j=100) set row format delimited fields terminated by '
|
||||
'Updated 0 partition(s).'
|
||||
---- TYPES
|
||||
STRING
|
||||
====
|
||||
====
|
||||
|
||||
@@ -29,6 +29,7 @@ BIGINT, STRING, INT, INT
|
||||
---- QUERY
|
||||
alter table jointbl_test add columns(new_col string)
|
||||
---- RESULTS
|
||||
'New column(s) have been added to the table.'
|
||||
====
|
||||
---- QUERY
|
||||
select * from jointbl_test
|
||||
@@ -58,6 +59,7 @@ BIGINT, STRING, INT, INT, STRING
|
||||
---- QUERY
|
||||
alter table jointbl_test add columns(new_int_col int)
|
||||
---- RESULTS
|
||||
'New column(s) have been added to the table.'
|
||||
====
|
||||
---- QUERY
|
||||
select * from jointbl_test
|
||||
@@ -87,6 +89,7 @@ BIGINT, STRING, INT, INT, STRING, INT
|
||||
---- QUERY
|
||||
alter table jointbl_test drop column new_int_col
|
||||
---- RESULTS
|
||||
'Column has been dropped.'
|
||||
====
|
||||
---- QUERY
|
||||
select * from jointbl_test
|
||||
@@ -116,10 +119,12 @@ BIGINT, STRING, INT, INT, STRING
|
||||
---- QUERY
|
||||
alter table jointbl_test drop column new_col
|
||||
---- RESULTS
|
||||
'Column has been dropped.'
|
||||
====
|
||||
---- QUERY
|
||||
alter table jointbl_test drop column alltypes_id
|
||||
---- RESULTS
|
||||
'Column has been dropped.'
|
||||
====
|
||||
---- QUERY
|
||||
select * from jointbl_test
|
||||
|
||||
@@ -166,6 +166,7 @@ STRING, STRING, BIGINT, BIGINT, BIGINT, DOUBLE
|
||||
# TRUNCATE IF EXISTS does not fail on non existent table
|
||||
truncate table if exists non_existent;
|
||||
---- RESULTS
|
||||
'Table does not exist.'
|
||||
====
|
||||
---- QUERY
|
||||
# Create an unpartitioned table.
|
||||
@@ -182,6 +183,7 @@ BIGINT
|
||||
# TRUNCATE IF EXISTS base scenario
|
||||
truncate table if exists t3;
|
||||
---- RESULTS
|
||||
'Table has been truncated.'
|
||||
====
|
||||
---- QUERY
|
||||
# Verify that truncate was successful
|
||||
|
||||
@@ -4,6 +4,7 @@ create function if not exists hive_pi() returns double
|
||||
location '$FILESYSTEM_PREFIX/test-warehouse/hive-exec.jar'
|
||||
symbol='org.apache.hadoop.hive.ql.udf.UDFPI';
|
||||
---- RESULTS
|
||||
'Function has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
create function if not exists foo() returns double
|
||||
@@ -33,6 +34,7 @@ create function if not exists twenty_args(int, int, int, int, int, int,
|
||||
location '$FILESYSTEM_PREFIX/test-warehouse/libTestUdfs.so'
|
||||
symbol='TwentyArgs';
|
||||
---- RESULTS
|
||||
'Function has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
# Verifies that interpretation can support up to 20 arguments
|
||||
@@ -49,6 +51,7 @@ create function if not exists twenty_one_args(int, int, int, int, int, int,
|
||||
location '$FILESYSTEM_PREFIX/test-warehouse/libTestUdfs.so'
|
||||
symbol='TwentyOneArgs';
|
||||
---- RESULTS
|
||||
'Function has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
# Verifies that interpretation fails with more than 20 arguments.
|
||||
@@ -63,6 +66,7 @@ create function if not exists nine_args_ir(int, int, int, int, int, int,
|
||||
location '$FILESYSTEM_PREFIX/test-warehouse/test-udfs.ll'
|
||||
symbol='NineArgs';
|
||||
---- RESULTS
|
||||
'Function has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
select nine_args_ir(1,2,3,4,5,6,7,8,9);
|
||||
@@ -74,12 +78,14 @@ create function if not exists bad_expr(double) returns boolean
|
||||
location '$FILESYSTEM_PREFIX/test-warehouse/libTestUdfs.so'
|
||||
symbol='BadExpr' prepare_fn='BadExprPrepare' close_fn='BadExprClose';
|
||||
---- RESULTS
|
||||
'Function has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
create function if not exists bad_expr2(double) returns boolean
|
||||
location '$FILESYSTEM_PREFIX/test-warehouse/libTestUdfs.so'
|
||||
symbol='BadExpr' prepare_fn='BadExpr2Prepare' close_fn='BadExprClose';
|
||||
---- RESULTS
|
||||
'Function has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
select count(t1.int_col) from functional.alltypes t1 join functional.alltypes t2
|
||||
@@ -120,4 +126,5 @@ drop function nine_args_ir(int, int, int, int, int, int, int, int, int);
|
||||
drop function bad_expr(double);
|
||||
drop function bad_expr2(double);
|
||||
---- RESULTS
|
||||
'Function has been dropped.'
|
||||
====
|
||||
|
||||
@@ -4,12 +4,14 @@
|
||||
create view $DATABASE.simple_view as
|
||||
select * from functional.alltypes
|
||||
---- RESULTS
|
||||
'View has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
# Test that 'if not exists' swallows the error (view already exists)
|
||||
create view if not exists $DATABASE.simple_view as
|
||||
select * from functional.alltypesagg
|
||||
---- RESULTS
|
||||
'View already exists.'
|
||||
====
|
||||
---- QUERY
|
||||
# Create another simple view with 'if not exists' on a subset of
|
||||
@@ -18,12 +20,14 @@ create view if not exists
|
||||
$DATABASE.simple_view_sub (x, y comment 'hello', z) as
|
||||
select int_col, string_col, timestamp_col from functional.alltypes
|
||||
---- RESULTS
|
||||
'View has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
# Create a view on a parquet table (Hive cannot create/read/write parquet)
|
||||
create view $DATABASE.parquet_view as
|
||||
select * from functional_parquet.alltypes where id < 20
|
||||
---- RESULTS
|
||||
'View has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
# Create a complex view with predicates, joins, aggregates and order by
|
||||
@@ -34,12 +38,14 @@ on a.id = b.id where a.bigint_col < 50
|
||||
group by b.string_col having count(a.bigint_col) > 1
|
||||
order by b.string_col limit 100
|
||||
---- RESULTS
|
||||
'View has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
# Create a view on a view
|
||||
create view $DATABASE.view_view (aaa, bbb) as
|
||||
select * from $DATABASE.complex_view
|
||||
---- RESULTS
|
||||
'View has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
# Test that the views are displayed by 'show tables'
|
||||
@@ -155,6 +161,7 @@ bigint
|
||||
# Test dropping a view
|
||||
drop view $DATABASE.simple_view_sub
|
||||
---- RESULTS
|
||||
'View has been dropped.'
|
||||
====
|
||||
---- QUERY
|
||||
# Test that the view is gone
|
||||
@@ -169,15 +176,18 @@ show tables in $DATABASE
|
||||
# Test 'if exists' for dropping a view (view does not exist)
|
||||
drop view if exists $DATABASE.bad_view
|
||||
---- RESULTS
|
||||
'View does not exist.'
|
||||
====
|
||||
---- QUERY
|
||||
# Test 'if exists' does not drop a table with same name
|
||||
create table $DATABASE.drop_tbl_test(a int)
|
||||
---- RESULTS
|
||||
'Table has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
drop view if exists $DATABASE.drop_tbl_test
|
||||
---- RESULTS
|
||||
'Drop view is not allowed on a table.'
|
||||
====
|
||||
---- QUERY
|
||||
# Test drop table 'if exists' does not drop a view with same name.
|
||||
@@ -185,6 +195,7 @@ drop view if exists $DATABASE.drop_tbl_test
|
||||
# still be listed in the subsequent show tables output (as a view).
|
||||
drop table if exists $DATABASE.complex_view
|
||||
---- RESULTS
|
||||
'Drop table is not allowed on a view.'
|
||||
====
|
||||
---- QUERY
|
||||
# Test that the table is present
|
||||
@@ -200,11 +211,13 @@ show tables in $DATABASE
|
||||
# Test renaming a view
|
||||
alter view $DATABASE.view_view rename to $DATABASE.view_on_view
|
||||
---- RESULTS
|
||||
'Renaming was successful.'
|
||||
====
|
||||
---- QUERY
|
||||
# Test renaming a parquet view
|
||||
alter view $DATABASE.parquet_view rename to $DATABASE.new_parquet_view
|
||||
---- RESULTS
|
||||
'Renaming was successful.'
|
||||
====
|
||||
---- QUERY
|
||||
# Test that the view was renamed
|
||||
@@ -222,6 +235,7 @@ alter view $DATABASE.new_parquet_view as
|
||||
select bigint_col, string_col from functional_parquet.alltypesagg
|
||||
where bigint_col is null limit 10
|
||||
---- RESULTS
|
||||
'View has been altered.'
|
||||
====
|
||||
---- QUERY
|
||||
# Test querying the altered view
|
||||
@@ -236,6 +250,7 @@ bigint,bigint
|
||||
create view $DATABASE.const_view
|
||||
as select 1, 'a', cast(10.0 as float)
|
||||
---- RESULTS
|
||||
'View has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
select * from $DATABASE.const_view
|
||||
@@ -251,6 +266,7 @@ create view $DATABASE.paren_view as
|
||||
select count(*) from functional.alltypessmall
|
||||
where true and (true or false) and false
|
||||
---- RESULTS
|
||||
'View has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
# Test that parentheses are preserved in view creation.
|
||||
@@ -265,6 +281,7 @@ bigint
|
||||
create view $DATABASE.decimal_view as
|
||||
select * from functional.decimal_tbl
|
||||
---- RESULTS
|
||||
'View has been created.'
|
||||
====
|
||||
---- QUERY
|
||||
# Query a view with decimal columns. Regression test for IMPALA-1021.
|
||||
|
||||
Reference in New Issue
Block a user