mirror of
https://github.com/apache/impala.git
synced 2025-12-19 09:58:28 -05:00
This patch mainly implement the querying of paimon data table
through JNI based scanner.
Features implemented:
- support column pruning.
The partition pruning and predicate push down will be submitted
as the third part of the patch.
We implemented this by treating the paimon table as normal
unpartitioned table. When querying paimon table:
- PaimonScanNode will decide paimon splits need to be scanned,
and then transfer splits to BE do the jni-based scan operation.
- We also collect the required columns that need to be scanned,
and pass the columns to Scanner for column pruning. This is
implemented by passing the field ids of the columns to BE,
instead of column position to support schema evolution.
- In the original implementation, PaimonJniScanner will directly
pass paimon row object to BE, and call corresponding paimon row
field accessor, which is a java method to convert row fields to
impala row batch tuples. We find it is slow due to overhead of
JVM method calling.
To minimize the overhead, we refashioned the implementation,
the PaimonJniScanner will convert the paimon row batches to
arrow recordbatch, which stores data in offheap region of
impala JVM. And PaimonJniScanner will pass the arrow offheap
record batch memory pointer to the BE backend.
BE PaimonJniScanNode will directly read data from JVM offheap
region, and convert the arrow record batch to impala row batch.
The benchmark shows the later implementation is 2.x better
than the original implementation.
The lifecycle of arrow row batch is mainly like this:
the arrow row batch is generated in FE,and passed to BE.
After the record batch is imported to BE successfully,
BE will be in charge of freeing the row batch.
There are two free paths: the normal path, and the
exception path. For the normal path, when the arrow batch
is totally consumed by BE, BE will call jni to fetch the next arrow
batch. For this case, the arrow batch is freed automatically.
For the exceptional path, it happends when query is cancelled, or memory
failed to allocate. For these corner cases, arrow batch is freed in the
method close if it is not totally consumed by BE.
Current supported impala data types for query includes:
- BOOLEAN
- TINYINT
- SMALLINT
- INTEGER
- BIGINT
- FLOAT
- DOUBLE
- STRING
- DECIMAL(P,S)
- TIMESTAMP
- CHAR(N)
- VARCHAR(N)
- BINARY
- DATE
TODO:
- Patches pending submission:
- Support tpcds/tpch data-loading
for paimon data table.
- Virtual Column query support for querying
paimon data table.
- Query support with time travel.
- Query support for paimon meta tables.
- WIP:
- Snapshot incremental read.
- Complex type query support.
- Native paimon table scanner, instead of
jni based.
Testing:
- Create tests table in functional_schema_template.sql
- Add TestPaimonScannerWithLimit in test_scanners.py
- Add test_paimon_query in test_paimon.py.
- Already passed the tpcds/tpch test for paimon table, due to the
testing table data is currently generated by spark, and it is
not supported by impala now, we have to do this since hive
doesn't support generating paimon table for dynamic-partitioned
tables. we plan to submit a separate patch for tpcds/tpch data
loading and associated tpcds/tpch query tests.
- JVM Offheap memory leak tests, have run looped tpch tests for
1 day, no obvious offheap memory increase is observed,
offheap memory usage is within 10M.
Change-Id: Ie679a89a8cc21d52b583422336b9f747bdf37384
Reviewed-on: http://gerrit.cloudera.org:8080/23613
Tested-by: Impala Public Jenkins <impala-public-jenkins@cloudera.com>
Reviewed-by: Zoltan Borok-Nagy <boroknagyz@cloudera.com>
Reviewed-by: Riza Suminto <riza.suminto@cloudera.com>
594 lines
22 KiB
XML
594 lines
22 KiB
XML
<?xml version="1.0" encoding="UTF-8"?>
|
|
<!--
|
|
Licensed to the Apache Software Foundation (ASF) under one
|
|
or more contributor license agreements. See the NOTICE file
|
|
distributed with this work for additional information
|
|
regarding copyright ownership. The ASF licenses this file
|
|
to you under the Apache License, Version 2.0 (the
|
|
"License"); you may not use this file except in compliance
|
|
with the License. You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing,
|
|
software distributed under the License is distributed on an
|
|
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
KIND, either express or implied. See the License for the
|
|
specific language governing permissions and limitations
|
|
under the License.
|
|
-->
|
|
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
|
|
<modelVersion>4.0.0</modelVersion>
|
|
<groupId>org.apache.impala</groupId>
|
|
<artifactId>impala-parent</artifactId>
|
|
<version>5.0.0-SNAPSHOT</version>
|
|
<packaging>pom</packaging>
|
|
<name>Apache Impala Parent POM</name>
|
|
|
|
<!--
|
|
When adding new properties to control library versions, also add them to
|
|
bin/impala-config.sh as environment variables so other branches can easily override
|
|
them in bin/impala-config-branch.sh for cleaner patches.
|
|
-->
|
|
<properties>
|
|
<surefire.reports.dir>${env.IMPALA_LOGS_DIR}/fe_tests</surefire.reports.dir>
|
|
<jacoco.skip>true</jacoco.skip>
|
|
<jacoco.data.file>${env.IMPALA_FE_TEST_COVERAGE_DIR}/jacoco.exec</jacoco.data.file>
|
|
<jacoco.report.dir>${env.IMPALA_FE_TEST_COVERAGE_DIR}</jacoco.report.dir>
|
|
<hadoop.version>${env.IMPALA_HADOOP_VERSION}</hadoop.version>
|
|
<hive.version>${env.IMPALA_HIVE_VERSION}</hive.version>
|
|
<hive.storage.api.version>${env.IMPALA_HIVE_STORAGE_API_VERSION}</hive.storage.api.version>
|
|
<hive.major.version>${env.IMPALA_HIVE_MAJOR_VERSION}</hive.major.version>
|
|
<hive.dist.type>${env.IMPALA_HIVE_DIST_TYPE}</hive.dist.type>
|
|
<hudi.version>${env.IMPALA_HUDI_VERSION}</hudi.version>
|
|
<paimon.version>${env.IMPALA_PAIMON_VERSION}</paimon.version>
|
|
<arrow.version>${env.IMPALA_ARROW_VERSION}</arrow.version>
|
|
<ranger.version>${env.IMPALA_RANGER_VERSION}</ranger.version>
|
|
<postgres.jdbc.version>${env.IMPALA_POSTGRES_JDBC_DRIVER_VERSION}</postgres.jdbc.version>
|
|
<hbase.version>${env.IMPALA_HBASE_VERSION}</hbase.version>
|
|
<avro.version>${env.IMPALA_AVRO_JAVA_VERSION}</avro.version>
|
|
<orc.version>${env.IMPALA_ORC_JAVA_VERSION}</orc.version>
|
|
<ozone.version>${env.IMPALA_OZONE_VERSION}</ozone.version>
|
|
<parquet.version>${env.IMPALA_PARQUET_VERSION}</parquet.version>
|
|
<kite.version>${env.IMPALA_KITE_VERSION}</kite.version>
|
|
<knox.version>${env.IMPALA_KNOX_VERSION}</knox.version>
|
|
<cos.version>${env.IMPALA_COS_VERSION}</cos.version>
|
|
<obs.version>${env.IMPALA_OBS_VERSION}</obs.version>
|
|
<thrift.version>${env.IMPALA_THRIFT_POM_VERSION}</thrift.version>
|
|
<impala.extdatasrc.api.version>${project.version}</impala.extdatasrc.api.version>
|
|
<impala.query.event.hook.api.version>${project.version}</impala.query.event.hook.api.version>
|
|
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
|
<kudu.version>${env.IMPALA_KUDU_VERSION}</kudu.version>
|
|
<slf4j.version>${env.IMPALA_SLF4J_VERSION}</slf4j.version>
|
|
<commons-lang3.version>${env.IMPALA_COMMONS_LANG3_VERSION}</commons-lang3.version>
|
|
<reload4j.version>${env.IMPALA_RELOAD4j_VERSION}</reload4j.version>
|
|
<junit.version>${env.IMPALA_JUNIT_VERSION}</junit.version>
|
|
<!-- Beware compatibility requirements with Thrift and
|
|
KMS; see IMPALA-4210. -->
|
|
<httpcomponents.core.version>${env.IMPALA_HTTP_CORE_VERSION}</httpcomponents.core.version>
|
|
<yarn-extras.version>${project.version}</yarn-extras.version>
|
|
<eclipse.output.directory>eclipse-classes</eclipse.output.directory>
|
|
<!-- hive-exec seems to leak this version of guava onto our classpath,
|
|
so it's important to depend on the same one -->
|
|
<guava.version>${env.IMPALA_GUAVA_VERSION}</guava.version>
|
|
<derby.version>${env.IMPALA_DERBY_VERSION}</derby.version>
|
|
<jackson.version>${env.IMPALA_JACKSON_VERSION}</jackson.version>
|
|
<jackson-databind.version>${env.IMPALA_JACKSON_DATABIND_VERSION}</jackson-databind.version>
|
|
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
|
<iceberg.version>${env.IMPALA_ICEBERG_VERSION}</iceberg.version>
|
|
<pac4j.version>${env.IMPALA_PAC4J_VERSION}</pac4j.version>
|
|
<!-- xmlsec, bcprov-jdk18on, springframework, and velocity-engine-core are not used by
|
|
Impala directly, but they are needed by pac4j. This uses newer versions to
|
|
address CVEs. -->
|
|
<xmlsec.version>${env.IMPALA_XMLSEC_VERSION}</xmlsec.version>
|
|
<bouncy-castle.version>${env.IMPALA_BOUNCY_CASTLE_VERSION}</bouncy-castle.version>
|
|
<springframework.version>${env.IMPALA_SPRINGFRAMEWORK_VERSION}</springframework.version>
|
|
<velocity-engine-core.version>${env.IMPALA_VELOCITY_ENGINE_CORE_VERSION}</velocity-engine-core.version>
|
|
<json-smart.version>${env.IMPALA_JSON_SMART_VERSION}</json-smart.version>
|
|
<commons-dbcp2.version>${env.IMPALA_DBCP2_VERSION}</commons-dbcp2.version>
|
|
<dropwizard-metrics.version>${env.IMPALA_DROPWIZARD_METRICS_VERSION}</dropwizard-metrics.version>
|
|
<aircompressor.version>${env.IMPALA_AIRCOMPRESSOR_VERSION}</aircompressor.version>
|
|
<datasketches.version>${env.IMPALA_DATASKETCHES_VERSION}</datasketches.version>
|
|
<!-- Informs maven-compiler-plugin to build for specified target.
|
|
Source version is fixed at 8, primarily due to sun.misc.Unsafe. -->
|
|
<maven.compiler.source>8</maven.compiler.source>
|
|
<maven.compiler.target>${env.IMPALA_JAVA_TARGET}</maven.compiler.target>
|
|
</properties>
|
|
|
|
<repositories>
|
|
<!--
|
|
Repository order matters, especially when using mirrors. This optimizes
|
|
for the common case where the mirror is intended to catch artifacts
|
|
that would otherwise come from central and other physical servers.
|
|
That means putting repositories in this order:
|
|
1. Local / S3 repositories
|
|
2. Regular remote repositories
|
|
3. Banned repositories
|
|
This allows builds to get artifacts from the local repositories
|
|
and s3 buckets without trying the mirror first, while still
|
|
getting other artifacts from the mirror.
|
|
-->
|
|
<!-- Local/S3 repositories -->
|
|
<repository>
|
|
<id>impala.toolchain.kudu.repo</id>
|
|
<url>${env.IMPALA_TOOLCHAIN_KUDU_MAVEN_REPOSITORY}</url>
|
|
<name>Impala Toolchain Kudu Repository</name>
|
|
<releases>
|
|
<enabled>${env.IMPALA_TOOLCHAIN_KUDU_MAVEN_REPOSITORY_ENABLED}</enabled>
|
|
</releases>
|
|
<!--
|
|
This repository now uses explicit versions, so snapshots are no longer required.
|
|
-->
|
|
<snapshots>
|
|
<enabled>false</enabled>
|
|
</snapshots>
|
|
</repository>
|
|
<repository>
|
|
<!--
|
|
The Maven repository for the CDP build identified by CDP_BUILD_NUMBER.
|
|
CDP does not use maven SNAPSHOT versions - every build has a version number.
|
|
-->
|
|
<id>impala.cdp.repo</id>
|
|
<url>${env.CDP_MAVEN_REPOSITORY}</url>
|
|
<name>Impala CDP Repository</name>
|
|
<snapshots>
|
|
<enabled>false</enabled>
|
|
</snapshots>
|
|
</repository>
|
|
|
|
<!-- Regular remote repositories -->
|
|
<repository>
|
|
<id>cdh.rcs.releases.repo</id>
|
|
<url>https://repository.cloudera.com/artifactory/cdh-releases-rcs</url>
|
|
<name>CDH Releases Repository</name>
|
|
<snapshots>
|
|
<enabled>false</enabled>
|
|
</snapshots>
|
|
</repository>
|
|
|
|
<!-- Banned repositories -->
|
|
<!--
|
|
Ban Apache repositories that tend to cause timeouts - see IMPALA-8516.
|
|
These are pulled in via transitive deps, e.g. Sentry. Other repositories contain
|
|
mirrored versions of these dependencies but don't have the same timeout issues.
|
|
-->
|
|
<repository>
|
|
<id>apache.snapshots</id>
|
|
<name>Apache Development Snapshot Repository</name>
|
|
<url>https://repository.apache.org/content/repositories/snapshots/</url>
|
|
<releases>
|
|
<enabled>false</enabled>
|
|
</releases>
|
|
<snapshots>
|
|
<enabled>false</enabled>
|
|
</snapshots>
|
|
</repository>
|
|
<repository>
|
|
<!--
|
|
Hadoop defines the same repo with this name. We need to include it here to
|
|
effectively blacklist it.
|
|
-->
|
|
<id>apache.snapshots.https</id>
|
|
<name>Apache Development Snapshot Repository</name>
|
|
<url>https://repository.apache.org/content/repositories/snapshots/</url>
|
|
<releases>
|
|
<enabled>false</enabled>
|
|
</releases>
|
|
<snapshots>
|
|
<enabled>false</enabled>
|
|
</snapshots>
|
|
</repository>
|
|
<repository>
|
|
<id>apache</id>
|
|
<name>Apache Repository</name>
|
|
<url>https://repository.apache.org/content/repositories/</url>
|
|
<releases>
|
|
<enabled>false</enabled>
|
|
</releases>
|
|
<snapshots>
|
|
<enabled>false</enabled>
|
|
</snapshots>
|
|
</repository>
|
|
<repository>
|
|
<!--
|
|
The Impala development bootstrapping depends on CDH Maven snapshots
|
|
which transitively pull dependencies from other repositories which
|
|
can cause the build to be non-reproducible, e.g. IMPALA-7316. This
|
|
patch makes the build to be reproducible by banning
|
|
cdh.snapshots.repo so that Maven does not accidentally download the
|
|
latest CDH snapshots when running a build, which can cause
|
|
incompatibility issues.
|
|
-->
|
|
<id>cdh.snapshots.repo</id>
|
|
<url>https://repository.cloudera.com/content/repositories/snapshots</url>
|
|
<name>CDH Snapshots Repository</name>
|
|
<releases>
|
|
<enabled>false</enabled>
|
|
</releases>
|
|
<snapshots>
|
|
<enabled>false</enabled>
|
|
</snapshots>
|
|
</repository>
|
|
<repository>
|
|
<!--
|
|
HWX Nexus is disabled. This is a tombstone to list out why:
|
|
1. Snapshots are disabled because HWX Nexus contains snapshots of artifacts
|
|
that can conflict with the artifacts in any of the other repositories with
|
|
SNAPSHOT versions. We don't want any conflicting sources for SNAPSHOT versions,
|
|
so it is better to keep this disabled. In the past, this was a particular
|
|
problem for CDH Hadoop components that used SNAPSHOT versions.
|
|
2. In a previous change, we depended on the hadoop-cloud-storage artifact from
|
|
the impala.cdp.repo. This had the odd property that it referenced versions of
|
|
artifacts that were not in the impala.cdp.repo. For example, artifact A at
|
|
version 280 may have a dependency on artifact B at version 279, but the maven
|
|
repository may only have artifact B at version 280. Nexus was meant to handle
|
|
these dangling dependencies (see IMPALA-8766). However, HWX Nexus ages out jars
|
|
from old build numbers. When the artifact B at version 279 ages out, it breaks
|
|
the build. Just as seriously, if Impala uses the impala.cdp.repo in a way that
|
|
requires an external maven repository, old commits may become unbuildable if that
|
|
external maven repository removes any of the jars we need.
|
|
So, HWX Nexus is disabled and should face strong scrutiny before being reenabled.
|
|
-->
|
|
<id>hwx.public.repo</id>
|
|
<url>https://nexus-private.hortonworks.com/nexus/content/groups/public</url>
|
|
<name>Hortonworks public repository</name>
|
|
<snapshots>
|
|
<enabled>false</enabled>
|
|
</snapshots>
|
|
<releases>
|
|
<enabled>false</enabled>
|
|
</releases>
|
|
</repository>
|
|
</repositories>
|
|
<pluginRepositories>
|
|
<pluginRepository>
|
|
<id>cdh.rcs.releases.repo</id>
|
|
<url>https://repository.cloudera.com/artifactory/cdh-releases-rcs</url>
|
|
<name>CDH Releases Repository</name>
|
|
<snapshots>
|
|
<enabled>false</enabled>
|
|
</snapshots>
|
|
</pluginRepository>
|
|
</pluginRepositories>
|
|
|
|
<dependencyManagement>
|
|
<dependencies>
|
|
<dependency>
|
|
<groupId>com.google.guava</groupId>
|
|
<artifactId>guava</artifactId>
|
|
<version>${guava.version}</version>
|
|
</dependency>
|
|
|
|
<dependency>
|
|
<groupId>net.minidev</groupId>
|
|
<artifactId>json-smart</artifactId>
|
|
<version>${json-smart.version}</version>
|
|
</dependency>
|
|
|
|
<dependency>
|
|
<groupId>org.apache.avro</groupId>
|
|
<artifactId>avro</artifactId>
|
|
<version>${avro.version}</version>
|
|
</dependency>
|
|
|
|
<dependency>
|
|
<groupId>org.apache.commons</groupId>
|
|
<artifactId>commons-dbcp2</artifactId>
|
|
<version>${commons-dbcp2.version}</version>
|
|
</dependency>
|
|
|
|
<dependency>
|
|
<groupId>org.apache.commons</groupId>
|
|
<artifactId>commons-lang3</artifactId>
|
|
<version>${commons-lang3.version}</version>
|
|
</dependency>
|
|
|
|
<dependency>
|
|
<groupId>org.apache.thrift</groupId>
|
|
<artifactId>libthrift</artifactId>
|
|
<version>${thrift.version}</version>
|
|
</dependency>
|
|
|
|
<!-- Override httpcore from libthrift to a later maintenance version. Noticed
|
|
certain SPNEGO auth errors when connecting to kerberized HTTP endpoints
|
|
using v4.4.1 pulled by libthrift. -->
|
|
<dependency>
|
|
<groupId>org.apache.httpcomponents</groupId>
|
|
<artifactId>httpcore</artifactId>
|
|
<version>${httpcomponents.core.version}</version>
|
|
</dependency>
|
|
|
|
<dependency>
|
|
<groupId>org.apache.parquet</groupId>
|
|
<artifactId>parquet-avro</artifactId>
|
|
<version>${parquet.version}</version>
|
|
</dependency>
|
|
|
|
<dependency>
|
|
<groupId>org.apache.santuario</groupId>
|
|
<artifactId>xmlsec</artifactId>
|
|
<version>${xmlsec.version}</version>
|
|
</dependency>
|
|
|
|
<dependency>
|
|
<groupId>org.bouncycastle</groupId>
|
|
<artifactId>bcprov-jdk18on</artifactId>
|
|
<version>${bouncy-castle.version}</version>
|
|
</dependency>
|
|
<dependency>
|
|
<groupId>org.bouncycastle</groupId>
|
|
<artifactId>bcpkix-jdk18on</artifactId>
|
|
<version>${bouncy-castle.version}</version>
|
|
</dependency>
|
|
|
|
<dependency>
|
|
<groupId>org.pac4j</groupId>
|
|
<artifactId>pac4j-core</artifactId>
|
|
<version>${pac4j.version}</version>
|
|
</dependency>
|
|
|
|
<dependency>
|
|
<groupId>ch.qos.reload4j</groupId>
|
|
<artifactId>reload4j</artifactId>
|
|
<version>${reload4j.version}</version>
|
|
</dependency>
|
|
<dependency>
|
|
<groupId>org.slf4j</groupId>
|
|
<artifactId>jcl-over-slf4j</artifactId>
|
|
<version>${slf4j.version}</version>
|
|
</dependency>
|
|
<dependency>
|
|
<groupId>org.slf4j</groupId>
|
|
<artifactId>slf4j-api</artifactId>
|
|
<version>${slf4j.version}</version>
|
|
</dependency>
|
|
<!-- Later versions of slf4j-log4j12 map to slf4j-reload4j. This is the simplest
|
|
way to ensure any instances of slf4j-log4j12 are mapped to slf4j-reload4j. -->
|
|
<dependency>
|
|
<groupId>org.slf4j</groupId>
|
|
<artifactId>slf4j-log4j12</artifactId>
|
|
<version>${slf4j.version}</version>
|
|
</dependency>
|
|
<dependency>
|
|
<groupId>org.slf4j</groupId>
|
|
<artifactId>slf4j-reload4j</artifactId>
|
|
<version>${slf4j.version}</version>
|
|
</dependency>
|
|
|
|
<dependency>
|
|
<groupId>org.springframework</groupId>
|
|
<artifactId>spring-core</artifactId>
|
|
<version>${springframework.version}</version>
|
|
</dependency>
|
|
|
|
<dependency>
|
|
<groupId>org.apache.velocity</groupId>
|
|
<artifactId>velocity-engine-core</artifactId>
|
|
<version>${velocity-engine-core.version}</version>
|
|
</dependency>
|
|
|
|
<dependency>
|
|
<groupId>com.fasterxml.jackson.core</groupId>
|
|
<artifactId>jackson-databind</artifactId>
|
|
<version>${jackson-databind.version}</version>
|
|
</dependency>
|
|
<dependency>
|
|
<groupId>com.fasterxml.jackson.core</groupId>
|
|
<artifactId>jackson-core</artifactId>
|
|
<version>${jackson.version}</version>
|
|
</dependency>
|
|
<dependency>
|
|
<groupId>com.fasterxml.jackson.core</groupId>
|
|
<artifactId>jackson-annotations</artifactId>
|
|
<version>${jackson.version}</version>
|
|
</dependency>
|
|
|
|
<dependency>
|
|
<groupId>org.apache.datasketches</groupId>
|
|
<artifactId>datasketches-java</artifactId>
|
|
<version>${datasketches.version}</version>
|
|
</dependency>
|
|
|
|
<dependency>
|
|
<groupId>org.apache.hadoop</groupId>
|
|
<artifactId>hadoop-auth</artifactId>
|
|
<version>${hadoop.version}</version>
|
|
</dependency>
|
|
<dependency>
|
|
<groupId>org.apache.hadoop</groupId>
|
|
<artifactId>hadoop-aws</artifactId>
|
|
<version>${hadoop.version}</version>
|
|
</dependency>
|
|
<dependency>
|
|
<groupId>org.apache.hadoop</groupId>
|
|
<artifactId>hadoop-client</artifactId>
|
|
<version>${hadoop.version}</version>
|
|
</dependency>
|
|
<dependency>
|
|
<groupId>org.apache.hadoop</groupId>
|
|
<artifactId>hadoop-common</artifactId>
|
|
<version>${hadoop.version}</version>
|
|
<exclusions>
|
|
<exclusion>
|
|
<groupId>com.sun.jersey</groupId>
|
|
<artifactId>jersey-server</artifactId>
|
|
</exclusion>
|
|
<exclusion>
|
|
<groupId>com.sun.jersey</groupId>
|
|
<artifactId>jersey-servlet</artifactId>
|
|
</exclusion>
|
|
</exclusions>
|
|
</dependency>
|
|
<dependency>
|
|
<groupId>org.apache.hadoop</groupId>
|
|
<artifactId>hadoop-hdfs-client</artifactId>
|
|
<version>${hadoop.version}</version>
|
|
</dependency>
|
|
|
|
<dependency>
|
|
<groupId>org.apache.hive</groupId>
|
|
<artifactId>hive-exec</artifactId>
|
|
<version>${hive.version}</version>
|
|
<exclusions>
|
|
<exclusion>
|
|
<groupId>org.apache.hive.shims</groupId>
|
|
<artifactId>hive-shims-0.20</artifactId>
|
|
</exclusion>
|
|
<exclusion>
|
|
<groupId>org.apache.atlas</groupId>
|
|
<artifactId>*</artifactId>
|
|
</exclusion>
|
|
<exclusion>
|
|
<groupId>com.cloudera</groupId>
|
|
<artifactId>*</artifactId>
|
|
</exclusion>
|
|
<exclusion>
|
|
<groupId>org.cloudera.logredactor</groupId>
|
|
<artifactId>*</artifactId>
|
|
</exclusion>
|
|
<exclusion>
|
|
<groupId>org.bouncycastle</groupId>
|
|
<artifactId>*</artifactId>
|
|
</exclusion>
|
|
</exclusions>
|
|
</dependency>
|
|
|
|
<dependency>
|
|
<groupId>org.apache.iceberg</groupId>
|
|
<artifactId>iceberg-api</artifactId>
|
|
<version>${iceberg.version}</version>
|
|
</dependency>
|
|
<dependency>
|
|
<groupId>org.apache.iceberg</groupId>
|
|
<artifactId>iceberg-hive-runtime</artifactId>
|
|
<version>${iceberg.version}</version>
|
|
</dependency>
|
|
|
|
<dependency>
|
|
<groupId>org.kitesdk</groupId>
|
|
<artifactId>kite-data-core</artifactId>
|
|
<version>${kite.version}</version>
|
|
<exclusions>
|
|
<exclusion>
|
|
<groupId>com.twitter</groupId>
|
|
<artifactId>parquet-avro</artifactId>
|
|
</exclusion>
|
|
</exclusions>
|
|
</dependency>
|
|
|
|
<!-- Pin Dropwizard Metrics versions. We directly use metrics-core, and
|
|
metrics-jvm/metrics-json are imported via HMS. -->
|
|
<dependency>
|
|
<groupId>io.dropwizard.metrics</groupId>
|
|
<artifactId>metrics-core</artifactId>
|
|
<version>${dropwizard-metrics.version}</version>
|
|
</dependency>
|
|
<dependency>
|
|
<groupId>io.dropwizard.metrics</groupId>
|
|
<artifactId>metrics-jvm</artifactId>
|
|
<version>${dropwizard-metrics.version}</version>
|
|
</dependency>
|
|
<dependency>
|
|
<groupId>io.dropwizard.metrics</groupId>
|
|
<artifactId>metrics-json</artifactId>
|
|
<version>${dropwizard-metrics.version}</version>
|
|
</dependency>
|
|
|
|
<!-- Pin aircompressor version. Orc depends on this, but Orc upgrade will be
|
|
planned separately. Remove once Orc is upgraded to 1.9.4+. -->
|
|
<dependency>
|
|
<groupId>io.airlift</groupId>
|
|
<artifactId>aircompressor</artifactId>
|
|
<version>${aircompressor.version}</version>
|
|
</dependency>
|
|
</dependencies>
|
|
</dependencyManagement>
|
|
|
|
<build>
|
|
<pluginManagement>
|
|
<plugins>
|
|
<plugin>
|
|
<groupId>org.apache.maven.plugins</groupId>
|
|
<artifactId>maven-compiler-plugin</artifactId>
|
|
<version>3.14.0</version>
|
|
</plugin>
|
|
|
|
<plugin>
|
|
<groupId>org.apache.maven.plugins</groupId>
|
|
<artifactId>maven-dependency-plugin</artifactId>
|
|
<version>3.5.0</version>
|
|
</plugin>
|
|
|
|
<plugin>
|
|
<groupId>org.apache.maven.plugins</groupId>
|
|
<artifactId>maven-jar-plugin</artifactId>
|
|
<version>3.3.0</version>
|
|
</plugin>
|
|
|
|
<plugin>
|
|
<groupId>org.apache.maven.plugins</groupId>
|
|
<artifactId>maven-shade-plugin</artifactId>
|
|
<version>3.4.1</version>
|
|
<executions>
|
|
<execution>
|
|
<phase>package</phase>
|
|
<goals>
|
|
<goal>shade</goal>
|
|
</goals>
|
|
</execution>
|
|
</executions>
|
|
</plugin>
|
|
|
|
<plugin>
|
|
<groupId>org.apache.maven.plugins</groupId>
|
|
<artifactId>maven-source-plugin</artifactId>
|
|
<version>3.2.1</version>
|
|
<executions>
|
|
<execution>
|
|
<id>attach-sources</id>
|
|
<goals>
|
|
<goal>jar</goal>
|
|
</goals>
|
|
</execution>
|
|
</executions>
|
|
</plugin>
|
|
|
|
<plugin>
|
|
<groupId>org.apache.maven.plugins</groupId>
|
|
<artifactId>maven-surefire-plugin</artifactId>
|
|
<version>3.0.0</version>
|
|
<configuration>
|
|
<redirectTestOutputToFile>true</redirectTestOutputToFile>
|
|
</configuration>
|
|
</plugin>
|
|
|
|
<plugin>
|
|
<groupId>org.codehaus.mojo</groupId>
|
|
<artifactId>build-helper-maven-plugin</artifactId>
|
|
<version>3.3.0</version>
|
|
</plugin>
|
|
</plugins>
|
|
</pluginManagement>
|
|
</build>
|
|
|
|
<modules>
|
|
<module>datagenerator</module>
|
|
<module>puffin-data-generator</module>
|
|
<module>iceberg-rest-catalog-test</module>
|
|
<module>executor-deps</module>
|
|
<module>ext-data-source</module>
|
|
<module>../fe</module>
|
|
<module>external-frontend</module>
|
|
<module>calcite-planner</module>
|
|
<module>impala-package</module>
|
|
<module>query-event-hook-api</module>
|
|
<module>shaded-deps/hive-exec</module>
|
|
<module>shaded-deps/s3a-aws-sdk</module>
|
|
<module>TableFlattener</module>
|
|
<module>test-hive-udfs</module>
|
|
<module>test-corrupt-hive-udfs</module>
|
|
<module>yarn-extras</module>
|
|
</modules>
|
|
|
|
</project>
|