Files
impala/testdata/workloads/functional-query/queries/QueryTest/iceberg-negative.test
Noemi Pap-Takacs 2d3289027c IMPALA-12406: OPTIMIZE statement as an alias for INSERT OVERWRITE
If an Iceberg table is frequently updated/written to in small batches,
a lot of small files are created. This decreases read performance.
Similarly, frequent row-level deletes contribute to this problem
by creating delete files, which have to be merged on read.

So far INSERT OVERWRITE (rewriting the table with itself) has been used
to compact Iceberg tables.
However, it comes with some RESTRICTIONS:
- The table should not have multiple partition specs/partition evolution.
- The table should not contain complex types.

The OPTIMIZE statement offers a new syntax and a solution limited to
Iceberg tables to enhance read performance for subsequent operations.
See IMPALA-12293 for details.

Syntax: OPTIMIZE TABLE <table_name>;

This first patch introduces the new syntax, temporarily as an alias
for INSERT OVERWRITE.

Note that executing OPTIMIZE TABLE requires ALL privileges.

Testing:
 - negative tests
 - FE planner test
 - Ranger test
 - E2E tests

Change-Id: Ief42537499ffe64fafdefe25c8d175539234c4e7
Reviewed-on: http://gerrit.cloudera.org:8080/20405
Reviewed-by: Impala Public Jenkins <impala-public-jenkins@cloudera.com>
Tested-by: Impala Public Jenkins <impala-public-jenkins@cloudera.com>
2023-09-28 15:31:20 +00:00

803 lines
29 KiB
Plaintext

====
---- QUERY
CREATE TABLE iceberg_test
STORED AS ICEBERG;
---- CATCH
AnalysisException: Table requires at least 1 column for managed iceberg table.
====
---- QUERY
CREATE TABLE iceberg_test(
level STRING
)
PARTITIONED BY SPEC(level, hour(event_time))
STORED AS ICEBERG;
---- CATCH
AnalysisException: Cannot find source column: event_time
====
---- QUERY
CREATE TABLE non_iceberg_table_with_spec (i INT)
PARTITIONED BY SPEC (i);
---- CATCH
AnalysisException: PARTITIONED BY SPEC is only valid for Iceberg tables.
====
---- QUERY
CREATE TABLE iceberg_table_hadoop_tables(
level STRING
)
STORED AS ICEBERG
TBLPROPERTIES('iceberg.catalog'='hadoop.tables');
====
---- QUERY
# iceberg_non_partitioned is not partitioned
SHOW PARTITIONS functional_parquet.iceberg_non_partitioned
---- CATCH
AnalysisException: Table is not partitioned: functional_parquet.iceberg_non_partitioned
====
---- QUERY
CREATE TABLE iceberg_table_hadoop_tables_cat_loc(i INT)
STORED AS ICEBERG
TBLPROPERTIES('iceberg.catalog'='hadoop.tables',
'iceberg.catalog_location'='$WAREHOUSE_LOCATION_PREFIX/test-warehouse/cat_loc')
---- CATCH
iceberg.catalog_location cannot be set for Iceberg table stored in hadoop.tables
====
---- QUERY
CREATE TABLE iceberg_table_hadoop_catalog(
level STRING
)
STORED AS ICEBERG
LOCATION '$WAREHOUSE_LOCATION_PREFIX/test-warehouse/$DATABASE/hadoop_catalog_test/iceberg_test'
TBLPROPERTIES('iceberg.catalog'='hadoop.catalog');
---- CATCH
AnalysisException: Location cannot be set for Iceberg table with 'hadoop.catalog'.
====
---- QUERY
CREATE TABLE iceberg_table_hadoop_catalog(
level STRING
)
STORED AS ICEBERG
TBLPROPERTIES('iceberg.catalog'='hadoop.catalog');
---- CATCH
AnalysisException: Table property 'iceberg.catalog_location' is necessary for Iceberg table with 'hadoop.catalog'.
====
---- QUERY
CREATE EXTERNAL TABLE iceberg_external_table_hadoop_catalog
STORED AS ICEBERG
LOCATION '$WAREHOUSE_LOCATION_PREFIX/test-warehouse/$DATABASE/hadoop_catalog_test/iceberg_test'
TBLPROPERTIES('iceberg.catalog'='hadoop.catalog',
'iceberg.catalog_location'='$WAREHOUSE_LOCATION_PREFIX/test-warehouse/fake_table',
'iceberg.table_identifier'='fake_db.fake_table');
---- CATCH
AnalysisException: Location cannot be set for Iceberg table with 'hadoop.catalog'.
====
---- QUERY
CREATE EXTERNAL TABLE iceberg_table_hadoop_catalog
STORED AS ICEBERG
TBLPROPERTIES('iceberg.catalog'='hadoop.catalog',
'iceberg.table_identifier'='fake_db.fake_table');
---- CATCH
AnalysisException: Table property 'iceberg.catalog_location' is necessary for Iceberg table with 'hadoop.catalog'.
====
---- QUERY
CREATE EXTERNAL TABLE iceberg_hadoop_tbls_external(
level STRING
)
STORED AS ICEBERG
TBLPROPERTIES('iceberg.catalog'='hadoop.tables');
---- CATCH
Set LOCATION for external Iceberg tables stored in hadoop.tables. For creating a completely new Iceberg table, use 'CREATE TABLE' (no EXTERNAL keyword).
====
---- QUERY
CREATE EXTERNAL TABLE iceberg_hive_tbls_external(
level STRING
)
STORED AS ICEBERG;
---- CATCH
Cannot create EXTERNAL Iceberg table in the Hive Catalog.
====
---- QUERY
CREATE EXTERNAL TABLE iceberg_hive_tbls_external(
level STRING
)
STORED AS ICEBERG
TBLPROPERTIES ('iceberg.table_identifier'='functional_parquet.iceberg_partitioned');
---- CATCH
Cannot create EXTERNAL Iceberg table in the Hive Catalog.
====
---- QUERY
CREATE EXTERNAL TABLE iceberg_hive_tbls_external(
level STRING
)
STORED AS ICEBERG
TBLPROPERTIES ('iceberg.catalog'='hive.catalog',
'iceberg.table_identifier'='functional_parquet.iceberg_partitioned');
---- CATCH
Cannot create EXTERNAL Iceberg table in the Hive Catalog.
====
---- QUERY
CREATE TABLE iceberg_hive_alias(
level STRING
)
STORED AS ICEBERG
TBLPROPERTIES ('iceberg.table_identifier'='functional_parquet.iceberg_mixed_file_format');
---- CATCH
AlreadyExistsException: Table already exists: functional_parquet.iceberg_mixed_file_format
====
---- QUERY
CREATE EXTERNAL TABLE iceberg_hive_tbls_external(
level STRING
)
STORED AS ICEBERG
TBLPROPERTIES ('iceberg.catalog'='hive.catalog',
'iceberg.table_identifier'='functional_parquet.iceberg_partitioned');
---- CATCH
Cannot create EXTERNAL Iceberg table in the Hive Catalog.
====
---- QUERY
CREATE EXTERNAL TABLE iceberg_hive_tbls_external(
level STRING
)
STORED AS ICEBERG
TBLPROPERTIES ('iceberg.catalog'='ice_hive_catalog',
'iceberg.table_identifier'='functional_parquet.iceberg_partitioned');
---- CATCH
Cannot create EXTERNAL Iceberg table in the Hive Catalog.
====
---- QUERY
CREATE EXTERNAL TABLE fake_iceberg_table_hadoop_catalog
STORED AS ICEBERG
TBLPROPERTIES('iceberg.catalog'='hadoop.catalog',
'iceberg.catalog_location'='$WAREHOUSE_LOCATION_PREFIX/test-warehouse/fake_table',
'iceberg.table_identifier'='fake_db.fake_table');
SHOW CREATE TABLE fake_iceberg_table_hadoop_catalog;
---- CATCH
row_regex:.*CAUSED BY: IcebergTableLoadingException: Table does not exist: fake_db.fake_table*
====
---- QUERY
CREATE TABLE iceberg_overwrite_bucket (i int, j int)
PARTITIONED BY SPEC (bucket(3, i))
STORED AS ICEBERG;
INSERT OVERWRITE iceberg_overwrite_bucket values (1, 2);
---- CATCH
AnalysisException: The Iceberg table has BUCKET partitioning. The outcome of static partition overwrite is unforeseeable.
====
---- QUERY
INSERT OVERWRITE iceberg_overwrite_bucket SELECT 1, j FROM iceberg_overwrite_bucket;
---- CATCH
AnalysisException: The Iceberg table has BUCKET partitioning. The outcome of dynamic partition overwrite is unforeseeable with the given select list, only '*' allowed.
====
---- QUERY
INSERT OVERWRITE iceberg_overwrite_bucket SELECT * FROM iceberg_overwrite_bucket WHERE j = 1;
---- CATCH
AnalysisException: The Iceberg table has BUCKET partitioning. The outcome of dynamic partition overwrite is unforeseeable with the given select query with WHERE clause
====
---- QUERY
CREATE TABLE iceberg_overwrite_bucket_other (i int, j int)
PARTITIONED BY SPEC (bucket(3, i))
STORED AS ICEBERG;
INSERT OVERWRITE iceberg_overwrite_bucket SELECT * FROM iceberg_overwrite_bucket_other;
---- CATCH
AnalysisException: The Iceberg table has BUCKET partitioning and the source table does not match the target table.
====
---- QUERY
ALTER TABLE iceberg_overwrite_bucket SET PARTITION SPEC (
bucket(3, i),
truncate(3, j)
);
INSERT OVERWRITE iceberg_overwrite_bucket SELECT * FROM iceberg_overwrite_bucket_other;
---- CATCH
AnalysisException: The Iceberg table has multiple partition specs.
====
---- QUERY
CREATE TABLE iceberg_hive_cat_with_cat_locaction (i int)
STORED AS ICEBERG
TBLPROPERTIES('iceberg.catalog'='hive.catalog',
'iceberg.catalog_location'='$WAREHOUSE_LOCATION_PREFIX/test-warehouse/catalog_loc')
---- CATCH
iceberg.catalog_location cannot be set for Iceberg table stored in hive.catalog
====
---- QUERY
CREATE TABLE iceberg_hadoop_tables_with_metdata_locaction (i int)
STORED AS ICEBERG
TBLPROPERTIES('iceberg.catalog'='hadoop.tables',
'metadata_location'='$WAREHOUSE_LOCATION_PREFIX/test-warehouse/catalog/metadata_loc')
---- CATCH
metadata_location cannot be set for Iceberg tables
====
---- QUERY
CREATE TABLE iceberg_hadoop_cat_with_metadata_locaction (i int)
STORED AS ICEBERG
TBLPROPERTIES('iceberg.catalog'='hadoop.catalog',
'iceberg.catalog_location'='$WAREHOUSE_LOCATION_PREFIX/test-warehouse/catalog',
'metadata_location'='$WAREHOUSE_LOCATION_PREFIX/test-warehouse/catalog/metadata_loc')
---- CATCH
metadata_location cannot be set for Iceberg tables
====
---- QUERY
CREATE TABLE iceberg_hive_cat_with_metadata_locaction (i int)
STORED AS ICEBERG
TBLPROPERTIES('iceberg.catalog'='hive.catalog',
'metadata_location'='$WAREHOUSE_LOCATION_PREFIX/test-warehouse/catalog/metadata_loc')
---- CATCH
metadata_location cannot be set for Iceberg tables
====
---- QUERY
CREATE TABLE iceberg_partitioned_insert(
level STRING,
event_time TIMESTAMP
)
PARTITIONED BY SPEC(level)
STORED AS ICEBERG
TBLPROPERTIES('iceberg.catalog'='hadoop.tables');
---- RESULTS
'Table has been created.'
====
---- QUERY
INSERT INTO iceberg_partitioned_insert PARTITION(level='level') VALUES(now());
---- CATCH
Static partitioning is not supported for Iceberg tables.
====
---- QUERY
CREATE TABLE all_colss_needed_for_insert (i int, j int, k int)
PARTITIONED BY SPEC (j, k)
STORED AS ICEBERG;
---- RESULTS
'Table has been created.'
====
---- QUERY
insert into all_colss_needed_for_insert values (1);
---- CATCH
has more columns (3) than the SELECT / VALUES clause returns
====
---- QUERY
ALTER TABLE iceberg_table_hadoop_tables RENAME TO iceberg_table_hadoop_tables_new;
---- CATCH
UnsupportedOperationException: Cannot rename Iceberg tables that use 'hadoop.tables' as catalog.
====
---- QUERY
CREATE TABLE iceberg_table_hive_catalog (level STRING)
STORED AS ICEBERG
TBLPROPERTIES('iceberg.catalog'='hive.catalog')
====
---- QUERY
CREATE TABLE iceberg_table_hadoop_catalog(
level STRING,
event_time TIMESTAMP
)
STORED AS ICEBERG
TBLPROPERTIES('iceberg.catalog'='hadoop.catalog',
'iceberg.catalog_location'='$WAREHOUSE_LOCATION_PREFIX/other/$DATABASE/hadoop_catalog_test');
ALTER TABLE iceberg_table_hadoop_catalog RENAME TO iceberg_table_hadoop_catalog_new;
---- CATCH
UnsupportedOperationException: Cannot rename Iceberg tables that use 'hadoop.catalog' as catalog.
====
---- QUERY
ALTER TABLE iceberg_table_hadoop_catalog set TBLPROPERTIES('iceberg.catalog'='hadoop.tables');
---- CATCH
AnalysisException: Changing the 'iceberg.catalog' table property is not supported for Iceberg table.
====
---- QUERY
ALTER TABLE iceberg_table_hadoop_catalog unset TBLPROPERTIES('iceberg.catalog');
---- CATCH
AnalysisException: Unsetting the 'iceberg.catalog' table property is not supported for Iceberg table.
====
---- QUERY
ALTER TABLE iceberg_table_hadoop_catalog set TBLPROPERTIES('iceberg.catalog_location'='/fake_location');
---- CATCH
AnalysisException: Changing the 'iceberg.catalog_location' table property is not supported for Iceberg table.
====
---- QUERY
ALTER TABLE iceberg_table_hadoop_catalog unset TBLPROPERTIES('iceberg.catalog_location');
---- CATCH
AnalysisException: Unsetting the 'iceberg.catalog_location' table property is not supported for Iceberg table.
====
---- QUERY
ALTER TABLE iceberg_table_hadoop_catalog set TBLPROPERTIES('iceberg.table_identifier'='fake_db.fake_table');
---- CATCH
AnalysisException: Changing the 'iceberg.table_identifier' table property is not supported for Iceberg table.
====
---- QUERY
ALTER TABLE iceberg_table_hadoop_catalog set TBLPROPERTIES('name'='fake_db.fake_table');
---- CATCH
AnalysisException: Changing the 'name' table property is not supported for Iceberg table.
====
---- QUERY
ALTER TABLE iceberg_table_hadoop_catalog set TBLPROPERTIES('iceberg.mr.table.identifier'='fake_db.fake_table');
---- CATCH
AnalysisException: Changing the 'iceberg.mr.table.identifier' table property is not supported for Iceberg table.
====
---- QUERY
ALTER TABLE iceberg_table_hadoop_catalog unset TBLPROPERTIES('iceberg.table_identifier');
---- CATCH
AnalysisException: Unsetting the 'iceberg.table_identifier' table property is not supported for Iceberg table.
====
---- QUERY
ALTER TABLE iceberg_table_hadoop_catalog set TBLPROPERTIES('metadata_location'='$WAREHOUSE_LOCATION_PREFIX/test-warehouse/metadata_loc');
---- CATCH
AnalysisException: Changing the 'metadata_location' table property is not supported for Iceberg table.
====
---- QUERY
ALTER TABLE iceberg_table_hadoop_catalog unset TBLPROPERTIES('metadata_location');
---- CATCH
AnalysisException: Unsetting the 'metadata_location' table property is not supported for Iceberg table.
====
---- QUERY
ALTER TABLE iceberg_table_hive_catalog set TBLPROPERTIES('metadata_location'='$WAREHOUSE_LOCATION_PREFIX/test-warehouse/metadata_loc');
---- CATCH
AnalysisException: Changing the 'metadata_location' table property is not supported for Iceberg table.
====
---- QUERY
ALTER TABLE iceberg_table_hadoop_catalog set FILEFORMAT PARQUET;
---- CATCH
AnalysisException: ALTER TABLE SET FILEFORMAT is not supported on Iceberg tables: $DATABASE.iceberg_table_hadoop_catalog
====
---- QUERY
ALTER TABLE iceberg_table_hadoop_catalog SET ROW FORMAT DELIMITED FIELDS TERMINATED BY ',';
---- CATCH
AnalysisException: ALTER TABLE SET ROWFORMAT is not supported on Iceberg tables: $DATABASE.iceberg_table_hadoop_catalog
====
---- QUERY
ALTER TABLE iceberg_table_hadoop_catalog SET LOCATION '/fake_location';
---- CATCH
AnalysisException: ALTER TABLE SET LOCATION is not supported on Iceberg tables: $DATABASE.iceberg_table_hadoop_catalog
====
---- QUERY
ALTER TABLE iceberg_table_hadoop_catalog ADD PARTITION(fake_col='fake_value');
---- CATCH
AnalysisException: ALTER TABLE ADD PARTITION is not supported for Iceberg tables: $DATABASE.iceberg_table_hadoop_catalog
====
---- QUERY
ALTER TABLE iceberg_table_hadoop_catalog DROP PARTITION(fake_col='fake_value');
---- CATCH
AnalysisException: ALTER TABLE DROP PARTITION is not supported for Iceberg tables: $DATABASE.iceberg_table_hadoop_catalog
====
---- QUERY
ALTER TABLE iceberg_table_hadoop_catalog RECOVER PARTITIONS;
---- CATCH
AnalysisException: ALTER TABLE RECOVER PARTITIONS is not supported on Iceberg tables: $DATABASE.iceberg_table_hadoop_catalog
====
---- QUERY
ALTER TABLE iceberg_table_hadoop_catalog REPLACE COLUMNS(level INT, register_time DATE);
---- CATCH
AnalysisException: ALTER TABLE REPLACE COLUMNS is not supported on Iceberg tables.
====
---- QUERY
CREATE TABLE iceberg_transactional(i int)
STORED AS ICEBERG
tblproperties('iceberg.catalog'='hadoop.tables', 'transactional'='true');
---- CATCH
Iceberg tables cannot have Hive ACID table properties.
====
---- QUERY
CREATE TABLE iceberg_insert_only(i int)
STORED AS ICEBERG
tblproperties('iceberg.catalog'='hadoop.catalog',
'iceberg.catalog_location'='$WAREHOUSE_LOCATION_PREFIX/$EXTERNAL_WAREHOUSE_DIR/specified_location',
'transactional'='true', 'transactional_properties'='insert_only');
---- CATCH
Iceberg tables cannot have Hive ACID table properties.
====
---- QUERY
DESCRIBE HISTORY functional_parquet.emptytable;
---- CATCH
DESCRIBE HISTORY must specify an Iceberg table: functional_parquet.emptytable
====
---- QUERY
# CHAR is not supported.
CREATE TABLE chars_formats (cs CHAR(5))
STORED AS iceberg;
---- CATCH
Type CHAR(5) is not supported in Iceberg
====
---- QUERY
# VARCHAR is not supported.
CREATE TABLE chars_formats (vc VARCHAR(100))
STORED AS iceberg;
---- CATCH
Type VARCHAR(100) is not supported in Iceberg
====
---- QUERY
# Impala cannot write UTC normalized timestamps.
INSERT INTO functional_parquet.iceberg_non_partitioned
VALUES (1, "user", "action", now());
---- CATCH
AnalysisException: The Iceberg table has a TIMESTAMPTZ column that Impala cannot write.
====
---- QUERY
CREATE TABLE iceberg_kudu_option(id int primary key)
STORED AS ICEBERG
---- CATCH
AnalysisException: Unsupported column options for file format 'ICEBERG': 'id INT PRIMARY KEY'
====
---- QUERY
# PARTITIONED BY and PARTITIONED BY SPEC is not allowed in same statement.
CREATE TABLE iceberg_part_spec_part (i INT)
PARTITIONED BY (p INT)
PARTITIONED BY SPEC (TRUNCATE(10, i))
STORED AS ICEBERG;
---- CATCH
Syntax error in line
====
---- QUERY
# PARTITIONED BY SPEC and PARTITIONED BY is not allowed in same statement.
CREATE TABLE iceberg_part_part_spec (i INT)
PARTITIONED BY SPEC (TRUNCATE(10, i))
PARTITIONED BY (p INT)
STORED AS ICEBERG;
---- CATCH
Syntax error in line
====
---- QUERY
CREATE TABLE iceberg_wrong_fileformat (i int)
STORED AS ICEBERG
TBLPROPERTIES('write.format.default'='or');
---- CATCH
Invalid fileformat for Iceberg table: or
====
---- QUERY
CREATE TABLE iceberg_set_wrong_fileformat (i int)
STORED AS ICEBERG;
ALTER TABLE iceberg_set_wrong_fileformat SET TBLPROPERTIES ('write.format.default'='parq');
---- CATCH
Invalid fileformat for Iceberg table: parq
====
---- QUERY
CREATE TABLE iceberg_wrong_partition (i int)
PARTITIONED BY SPEC (wrong(i))
STORED AS ICEBERG;
---- CATCH
Unsupported iceberg partition type: WRONG
====
---- QUERY
CREATE TABLE iceberg_wrong_parquet_row_group_size1 ( i int)
STORED AS ICEBERG
TBLPROPERTIES(
'write.format.default'='orc',
'write.parquet.row-group-size-bytes'='134217728');
---- CATCH
write.parquet.row-group-size-bytes should be set only for parquet file format
====
---- QUERY
CREATE TABLE iceberg_wrong_parquet_row_group_size2 ( i int)
STORED AS ICEBERG
TBLPROPERTIES('write.parquet.row-group-size-bytes'='8388607');
---- CATCH
Parquet row group size for Iceberg table should fall in the range of [8388608..2146435072]
====
---- QUERY
CREATE TABLE iceberg_set_wrong_parquet_row_group_size2 ( i int)
STORED AS ICEBERG;
ALTER TABLE iceberg_set_wrong_parquet_row_group_size2 SET
TBLPROPERTIES('write.parquet.row-group-size-bytes'='8388607');
---- CATCH
Parquet row group size for Iceberg table should fall in the range of [8388608..2146435072]
====
---- QUERY
CREATE TABLE iceberg_wrong_parquet_row_group_size3 ( i int)
STORED AS ICEBERG
TBLPROPERTIES('write.parquet.row-group-size-bytes'='134217728a');
---- CATCH
Invalid parquet row group size for Iceberg table: 134217728a
====
---- QUERY
CREATE TABLE iceberg_set_wrong_parquet_row_group_size3 ( i int)
STORED AS ICEBERG;
ALTER TABLE iceberg_set_wrong_parquet_row_group_size3 SET
TBLPROPERTIES('write.parquet.row-group-size-bytes'='134217728a');
---- CATCH
Invalid parquet row group size for Iceberg table: 134217728a
====
---- QUERY
CREATE TABLE iceberg_wrong_parquet_row_group_size4 ( i int)
STORED AS ICEBERG
TBLPROPERTIES('write.parquet.row-group-size-bytes'='2146435073');
---- CATCH
Parquet row group size for Iceberg table should fall in the range of [8388608..2146435072]
====
---- QUERY
CREATE TABLE iceberg_set_wrong_parquet_row_group_size4 ( i int)
STORED AS ICEBERG;
ALTER TABLE iceberg_set_wrong_parquet_row_group_size4 SET
TBLPROPERTIES('write.parquet.row-group-size-bytes'='2146435073');
---- CATCH
Parquet row group size for Iceberg table should fall in the range of [8388608..2146435072]
====
---- QUERY
CREATE TABLE iceberg_wrong_parquet_page_size1 ( i int)
STORED AS ICEBERG
TBLPROPERTIES('write.format.default'='orc', 'write.parquet.page-size-bytes'='65536');
---- CATCH
write.parquet.page-size-bytes should be set only for parquet file format
====
---- QUERY
CREATE TABLE iceberg_wrong_parquet_page_size2 ( i int)
STORED AS ICEBERG
TBLPROPERTIES('write.parquet.page-size-bytes'='65535');
---- CATCH
Parquet page size for Iceberg table should fall in the range of [65536..1073741824]
====
---- QUERY
CREATE TABLE iceberg_set_wrong_parquet_page_size2 ( i int)
STORED AS ICEBERG;
ALTER TABLE iceberg_set_wrong_parquet_page_size2 SET
TBLPROPERTIES('write.parquet.page-size-bytes'='65535');
---- CATCH
Parquet page size for Iceberg table should fall in the range of [65536..1073741824]
====
---- QUERY
CREATE TABLE iceberg_wrong_parquet_page_size3 ( i int)
STORED AS ICEBERG
TBLPROPERTIES('write.parquet.page-size-bytes'='655 36');
---- CATCH
Invalid parquet page size for Iceberg table: 655 36
====
---- QUERY
CREATE TABLE iceberg_set_wrong_parquet_page_size3 ( i int)
STORED AS ICEBERG;
ALTER TABLE iceberg_set_wrong_parquet_page_size3 SET
TBLPROPERTIES('write.parquet.page-size-bytes'='655 36');
---- CATCH
Invalid parquet page size for Iceberg table: 655 36
====
---- QUERY
CREATE TABLE iceberg_wrong_parquet_page_size4 ( i int)
STORED AS ICEBERG
TBLPROPERTIES('write.parquet.page-size-bytes'='1073741825');
---- CATCH
Parquet page size for Iceberg table should fall in the range of [65536..1073741824]
====
---- QUERY
CREATE TABLE iceberg_set_wrong_parquet_page_size4 ( i int)
STORED AS ICEBERG;
ALTER TABLE iceberg_set_wrong_parquet_page_size4 SET
TBLPROPERTIES('write.parquet.page-size-bytes'='1073741825');
---- CATCH
Parquet page size for Iceberg table should fall in the range of [65536..1073741824]
====
---- QUERY
CREATE TABLE iceberg_wrong_parquet_dict_size1 ( i int)
STORED AS ICEBERG
TBLPROPERTIES('write.format.default'='orc', 'write.parquet.dict-size-bytes'='65536');
---- CATCH
write.parquet.dict-size-bytes should be set only for parquet file format
====
---- QUERY
CREATE TABLE iceberg_wrong_parquet_dict_size2 ( i int)
STORED AS ICEBERG
TBLPROPERTIES('write.parquet.dict-size-bytes'='65535');
---- CATCH
Parquet dictionary page size for Iceberg table should fall in the range of [65536..1073741824]
====
---- QUERY
CREATE TABLE iceberg_set_wrong_parquet_dict_size2 ( i int)
STORED AS ICEBERG;
ALTER TABLE iceberg_set_wrong_parquet_dict_size2 SET
TBLPROPERTIES('write.parquet.dict-size-bytes'='65535');
---- CATCH
Parquet dictionary page size for Iceberg table should fall in the range of [65536..1073741824]
====
---- QUERY
CREATE TABLE iceberg_wrong_parquet_dict_size3 ( i int)
STORED AS ICEBERG
TBLPROPERTIES('write.parquet.dict-size-bytes'='655 36');
---- CATCH
Invalid parquet dictionary page size for Iceberg table: 655 36
====
---- QUERY
CREATE TABLE iceberg_set_wrong_parquet_dict_size3 ( i int)
STORED AS ICEBERG;
ALTER TABLE iceberg_set_wrong_parquet_dict_size3 SET
TBLPROPERTIES('write.parquet.dict-size-bytes'='655 36');
---- CATCH
Invalid parquet dictionary page size for Iceberg table: 655 36
====
---- QUERY
CREATE TABLE iceberg_wrong_parquet_dict_size4 ( i int)
STORED AS ICEBERG
TBLPROPERTIES('write.parquet.dict-size-bytes'='1073741825');
---- CATCH
Parquet dictionary page size for Iceberg table should fall in the range of [65536..1073741824]
====
---- QUERY
CREATE TABLE iceberg_set_wrong_parquet_dict_size4 ( i int)
STORED AS ICEBERG;
ALTER TABLE iceberg_set_wrong_parquet_dict_size4 SET
TBLPROPERTIES('write.parquet.dict-size-bytes'='1073741825');
---- CATCH
Parquet dictionary page size for Iceberg table should fall in the range of [65536..1073741824]
====
---- QUERY
CREATE TABLE iceberg_wrong_parquet_comp_codec1 ( i int)
STORED AS ICEBERG
TBLPROPERTIES('write.format.default'='orc',
'write.parquet.compression-codec'='snappy');
---- CATCH
write.parquet.compression-codec should be set only for parquet file format
====
---- QUERY
CREATE TABLE iceberg_wrong_parquet_comp_codec2 ( i int)
STORED AS ICEBERG
TBLPROPERTIES('write.parquet.compression-codec'='snapp');
---- CATCH
Invalid parquet compression codec for Iceberg table: snapp
====
---- QUERY
CREATE TABLE iceberg_set_wrong_parquet_comp_codec2 ( i int)
STORED AS ICEBERG;
ALTER TABLE iceberg_set_wrong_parquet_comp_codec2 SET
TBLPROPERTIES('write.parquet.compression-codec'='snapp');
---- CATCH
Invalid parquet compression codec for Iceberg table: snapp
====
---- QUERY
CREATE TABLE iceberg_wrong_parquet_comp_level1 ( i int)
STORED AS ICEBERG
TBLPROPERTIES('write.format.default'='orc',
'write.parquet.compression-level'='2');
---- CATCH
write.parquet.compression-level should be set only for parquet file format
====
---- QUERY
CREATE TABLE iceberg_wrong_parquet_comp_level2 ( i int)
STORED AS ICEBERG
TBLPROPERTIES('write.parquet.compression-level'='2');
---- CATCH
Parquet compression level cannot be set for codec SNAPPY. Only ZSTD codec supports compression level table property.
====
---- QUERY
CREATE TABLE iceberg_wrong_parquet_comp_level3 ( i int)
STORED AS ICEBERG
TBLPROPERTIES('write.parquet.compression-codec'='zstd',
'write.parquet.compression-level'='0');
---- CATCH
Parquet compression level for Iceberg table should fall in the range of [1..22]
====
---- QUERY
CREATE TABLE iceberg_set_wrong_parquet_comp_level4 ( i int)
STORED AS ICEBERG;
ALTER TABLE iceberg_set_wrong_parquet_comp_level4 SET
TBLPROPERTIES('write.parquet.compression-level'='0');
---- CATCH
Parquet compression level for Iceberg table should fall in the range of [1..22]
====
---- QUERY
CREATE TABLE non_iceberg_table (i INT);
ALTER TABLE non_iceberg_table SET PARTITION SPEC (i);
---- CATCH
AnalysisException: ALTER TABLE SET PARTITION SPEC is only supported for Iceberg tables: $DATABASE.non_iceberg_table
====
---- QUERY
SELECT * FROM non_iceberg_table FOR SYSTEM_TIME AS OF now();
---- CATCH
AS OF clause is only supported for Iceberg tables.
====
---- QUERY
SELECT * FROM non_iceberg_table FOR SYSTEM_VERSION AS OF 42;
---- CATCH
AS OF clause is only supported for Iceberg tables.
====
---- QUERY
CREATE TABLE iceberg_alter_part ( i int, d DATE, s STRUCT<f1:BIGINT,f2:BIGINT>)
STORED AS ICEBERG;
ALTER TABLE iceberg_alter_part SET PARTITION SPEC (g);
---- CATCH
AnalysisException: Source column 'g' does not exist in table: $DATABASE.iceberg_alter_part
====
---- QUERY
ALTER TABLE iceberg_alter_part SET PARTITION SPEC (s);
---- CATCH
AnalysisException: Source column 's' in table $DATABASE.iceberg_alter_part must be a primitive type
====
---- QUERY
ALTER TABLE iceberg_alter_part SET PARTITION SPEC (YEAR(i));
---- CATCH
ImpalaRuntimeException: Failed to ALTER table 'iceberg_alter_part': Cannot partition type int by year
====
---- QUERY
ALTER TABLE iceberg_alter_part SET PARTITION SPEC (HOUR(d));
---- CATCH
ImpalaRuntimeException: Failed to ALTER table 'iceberg_alter_part': Cannot partition type date by hour
====
---- QUERY
CREATE TABLE clone_ice LIKE functional_parquet.alltypestiny STORED AS ICEBERG;
---- CATCH
functional_parquet.alltypestiny cannot be cloned into an Iceberg table because it is not an Iceberg table.
====
---- QUERY
select * from functional_parquet.iceberg_alltypes_part for system_time as of '2000-01-01 01:02:03';
---- CATCH
IllegalArgumentException: Cannot find a snapshot older than 2000-01-01 01:02:03
====
---- QUERY
SET TIMEZONE='Europe/Budapest';
select * from functional_parquet.iceberg_alltypes_part for system_time as of '2000-01-01 01:02:03';
---- CATCH
IllegalArgumentException: Cannot find a snapshot older than 2000-01-01 01:02:03
====
---- QUERY
SET TIMEZONE='Asia/Shanghai';
select * from functional_parquet.iceberg_alltypes_part for system_time as of '2000-01-01 01:02:03';
---- CATCH
IllegalArgumentException: Cannot find a snapshot older than 2000-01-01 01:02:03
====
---- QUERY
# Querying a table with equality deletes is not allowed.
# We don't use time-travel, so we plan the query from cached metadata.
select * from functional_parquet.iceberg_v2_delete_equality;
---- CATCH
ImpalaRuntimeException: Iceberg table functional_parquet.iceberg_v2_delete_equality has EQUALITY delete file which is currently not supported by Impala
====
---- QUERY
# Querying a table with equality deletes is not allowed.
# Use time-travel based on snapshot id.
select * from functional_parquet.iceberg_v2_delete_equality for system_version as of 5763349507283783091;
---- CATCH
ImpalaRuntimeException: Iceberg table functional_parquet.iceberg_v2_delete_equality has EQUALITY delete file which is currently not supported by Impala
====
---- QUERY
# Querying a table with equality deletes is not allowed.
# Use time-travel based on timestamp.
select * from functional_parquet.iceberg_v2_delete_equality for system_time as of now();
---- CATCH
ImpalaRuntimeException: Iceberg table functional_parquet.iceberg_v2_delete_equality has EQUALITY delete file which is currently not supported by Impala
====
---- QUERY
create table ice_delete (i int, s string)
stored by iceberg
tblproperties ('format-version'='1');
====
---- QUERY
# Cannot DELETE from Iceberg V1 table
delete from ice_delete where i = 1;
---- CATCH
AnalysisException: Iceberg V1 table do not support DELETE/UPDATE operations: $DATABASE.ice_delete
====
---- QUERY
# For deleting every row, users must use TRUNCATE.
alter table ice_delete set tblproperties ('format-version'='2');
delete from ice_delete;
---- CATCH
AnalysisException: For deleting every row, please use TRUNCATE.
====
---- QUERY
delete from ice_delete where true;
---- CATCH
AnalysisException: For deleting every row, please use TRUNCATE.
====
---- QUERY
# Cannot delete from Iceberg table if the write format is not Parquet (and there is no delete format)
alter table ice_delete set tblproperties ('write.format.default'='ORC');
delete from ice_delete where i = 1;
---- CATCH
AnalysisException: Impala can only write delete files in PARQUET, but the given table uses a different file format: $DATABASE.ice_delete
====
---- QUERY
# Cannot delete from Iceberg table is delete format is not Parquet
alter table ice_delete set tblproperties ('write.format.default'='PARQUET', 'write.delete.format.default'='ORC');
delete from ice_delete where i = 1;
---- CATCH
AnalysisException: Impala can only write delete files in PARQUET, but the given table uses a different file format: $DATABASE.ice_delete
====
---- QUERY
# Cannot delete from Iceberg table if write mode is not 'merge-on-read'
alter table ice_delete set tblproperties ('write.delete.format.default'='PARQUET', 'write.delete.mode'='copy-on-write');
delete from ice_delete where i = 1;
---- CATCH
AnalysisException: Unsupported delete mode: 'copy-on-write' for Iceberg table: $DATABASE.ice_delete
====
---- QUERY
optimize table non_iceberg_table;
---- CATCH
AnalysisException: OPTIMIZE is only supported for Iceberg tables.
====
---- QUERY
optimize table iceberg_overwrite_bucket;
---- CATCH
AnalysisException: The Iceberg table has multiple partition specs. This means the outcome of dynamic partition overwrite is unforeseeable. Consider using TRUNCATE then INSERT INTO from the previous snapshot to overwrite your table.
====
---- QUERY
CREATE TABLE ice_complex (id BIGINT NULL, int_array ARRAY<INT> NULL) STORED AS ICEBERG;
optimize table ice_complex;
---- CATCH
AnalysisException: Unable to INSERT into target table ($DATABASE.ice_complex) because the column 'int_array' has a complex type 'ARRAY<INT>' and Impala doesn't support inserting into tables containing complex type columns
====