mirror of
https://github.com/apache/impala.git
synced 2025-12-19 09:58:28 -05:00
IMPALA-14536: Fix CONVERT TO ICEBERG to not throw exception on Iceberg tables
Previously, running ALTER TABLE <table> CONVERT TO ICEBERG on an Iceberg table produced an error. This patch fixes that, so the statement will do nothing when called on an Iceberg table and return with 'Table has already been migrated.' message. This is achieved by adding a new flag to StatementBase to signal when a statement ends up NO_OP, if that's true, the new TStmtType::NO_OP will be set as TExecRequest's type and noop_result can be used to set result from Frontend-side. Tests: * extended fe and e2e tests Change-Id: I41ecbfd350d38e4e3fd7b813a4fc27211d828f73 Reviewed-on: http://gerrit.cloudera.org:8080/23699 Tested-by: Impala Public Jenkins <impala-public-jenkins@cloudera.com> Reviewed-by: Peter Rozsa <prozsa@cloudera.com>
This commit is contained in:
committed by
Peter Rozsa
parent
d54b75ccf1
commit
9d112dae23
@@ -388,6 +388,12 @@ Status ClientRequestState::Exec() {
|
||||
DCHECK(exec_req.__isset.kill_query_request);
|
||||
LOG_AND_RETURN_IF_ERROR(ExecKillQueryRequest());
|
||||
break;
|
||||
case TStmtType::NO_OP:
|
||||
if (exec_req.__isset.noop_result) {
|
||||
SetResultSet(exec_req.noop_result);
|
||||
}
|
||||
return Status::OK();
|
||||
break;
|
||||
default:
|
||||
return Status(Substitute("Unknown exec request stmt type: $0", exec_req.stmt_type));
|
||||
}
|
||||
@@ -1464,8 +1470,11 @@ void ClientRequestState::UpdateNonErrorExecState(ExecState new_state) {
|
||||
// valid to transition from ERROR to FINISHED, so skip any attempt to do so.
|
||||
if (old_state != ExecState::ERROR) {
|
||||
// A query can transition from PENDING to FINISHED if it is cancelled by the
|
||||
// client.
|
||||
DCHECK(old_state == ExecState::PENDING || old_state == ExecState::RUNNING)
|
||||
// client. NO_OP statements can also transition from INITIALIZED to FINISHED.
|
||||
bool valid_transition =
|
||||
old_state == ExecState::PENDING || old_state == ExecState::RUNNING
|
||||
|| (stmt_type() == TStmtType::NO_OP && old_state == ExecState::INITIALIZED);
|
||||
DCHECK(valid_transition)
|
||||
<< Substitute(error_msg, ExecStateToString(old_state),
|
||||
ExecStateToString(new_state), PrintId(query_id()));
|
||||
UpdateExecState(new_state);
|
||||
|
||||
@@ -750,6 +750,10 @@ struct TExecRequest {
|
||||
|
||||
// Request for "KILL QUERY" statements.
|
||||
26: optional TKillQueryReq kill_query_request
|
||||
|
||||
// Result of statements which end up being NO_OP. Set iff stmt_type is NO_OP. E.g.,
|
||||
// "ALTER TABLE ... CONVERT TO ICEBERG" when the table is already an Iceberg table.
|
||||
27: optional list<string> noop_result
|
||||
}
|
||||
|
||||
// Parameters to FeSupport.cacheJar().
|
||||
|
||||
@@ -115,6 +115,7 @@ enum TStmtType {
|
||||
CONVERT = 8
|
||||
UNKNOWN = 9
|
||||
KILL = 10
|
||||
NO_OP = 11
|
||||
}
|
||||
|
||||
enum TIcebergOperation {
|
||||
|
||||
@@ -20,6 +20,8 @@ package org.apache.impala.analysis;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.base.Strings;
|
||||
import com.google.common.collect.Maps;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
|
||||
@@ -32,6 +34,7 @@ import org.apache.impala.analysis.QueryStringBuilder.Rename;
|
||||
import org.apache.impala.analysis.QueryStringBuilder.SetTblProps;
|
||||
import org.apache.impala.authorization.Privilege;
|
||||
import org.apache.impala.catalog.FeFsTable;
|
||||
import org.apache.impala.catalog.FeIcebergTable;
|
||||
import org.apache.impala.catalog.FeTable;
|
||||
import org.apache.impala.catalog.IcebergTable;
|
||||
import org.apache.impala.catalog.Table;
|
||||
@@ -88,6 +91,13 @@ public class ConvertTableToIcebergStmt extends StatementBase implements SingleTa
|
||||
// table. Once it's fixed, ALL privileges on the table are enough.
|
||||
analyzer.getDb(tableName_.getDb(), Privilege.ALL);
|
||||
FeTable table = analyzer.getTable(tableName_, Privilege.ALL);
|
||||
|
||||
// Do nothing if the table is already an Iceberg table.
|
||||
if (table instanceof FeIcebergTable) {
|
||||
setIsNoOp();
|
||||
return;
|
||||
}
|
||||
|
||||
if (!(table instanceof FeFsTable)) {
|
||||
throw new AnalysisException("CONVERT TO ICEBERG is not supported for " +
|
||||
table.getClass().getSimpleName());
|
||||
@@ -209,6 +219,11 @@ public class ConvertTableToIcebergStmt extends StatementBase implements SingleTa
|
||||
return TableName.parse(tmpTableNameStr);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> getNoopSummary() throws AnalysisException {
|
||||
return Collections.singletonList("Table has already been migrated.");
|
||||
}
|
||||
|
||||
public TConvertTableRequest toThrift() {
|
||||
Preconditions.checkNotNull(tableName_);
|
||||
Preconditions.checkNotNull(tmpHdfsTableName_);
|
||||
|
||||
@@ -44,6 +44,9 @@ public abstract class StatementBase extends StmtNode {
|
||||
// True if this Stmt is the top level of an explain stmt.
|
||||
protected boolean isExplain_ = false;
|
||||
|
||||
// True if the statement is noop.
|
||||
protected boolean isNoOp_ = false;
|
||||
|
||||
/////////////////////////////////////////
|
||||
// BEGIN: Members that need to be reset()
|
||||
|
||||
@@ -61,6 +64,7 @@ public abstract class StatementBase extends StmtNode {
|
||||
protected StatementBase(StatementBase other) {
|
||||
analyzer_ = other.analyzer_;
|
||||
isExplain_ = other.isExplain_;
|
||||
isNoOp_ = other.isNoOp_;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -167,6 +171,14 @@ public abstract class StatementBase extends StmtNode {
|
||||
public void setIsExplain() { isExplain_ = true; }
|
||||
public boolean isExplain() { return isExplain_; }
|
||||
|
||||
public void setIsNoOp() { isNoOp_ = true; }
|
||||
public boolean isNoOp() { return isNoOp_; }
|
||||
|
||||
public List<String> getNoopSummary() throws AnalysisException {
|
||||
throw new IllegalStateException(
|
||||
"getNoopSummary() not implemented for this stmt: " + getClass().getSimpleName());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a deep copy of this node including its analysis state. Some members such as
|
||||
* tuple and slot descriptors are generally not deep copied to avoid potential
|
||||
|
||||
@@ -76,6 +76,7 @@ import org.apache.impala.analysis.AlterDbStmt;
|
||||
import org.apache.impala.analysis.AnalysisContext;
|
||||
import org.apache.impala.analysis.AnalysisContext.AnalysisResult;
|
||||
import org.apache.impala.analysis.CommentOnStmt;
|
||||
import org.apache.impala.analysis.ConvertTableToIcebergStmt;
|
||||
import org.apache.impala.analysis.CopyTestCaseStmt;
|
||||
import org.apache.impala.analysis.CreateDataSrcStmt;
|
||||
import org.apache.impala.analysis.CreateDropRoleStmt;
|
||||
@@ -3097,11 +3098,17 @@ public class Frontend {
|
||||
result.setAdmin_request(analysisResult.getAdminFnStmt().toThrift());
|
||||
return result;
|
||||
} else if (analysisResult.isConvertTableToIcebergStmt()) {
|
||||
result.stmt_type = TStmtType.CONVERT;
|
||||
result.setResult_set_metadata(new TResultSetMetadata(
|
||||
Collections.singletonList(new TColumn("summary", Type.STRING.toThrift()))));
|
||||
result.setConvert_table_request(
|
||||
analysisResult.getConvertTableToIcebergStmt().toThrift());
|
||||
ConvertTableToIcebergStmt stmt = analysisResult.getConvertTableToIcebergStmt();
|
||||
if (stmt.isNoOp()) {
|
||||
result.setStmt_type(TStmtType.NO_OP);
|
||||
result.setNoop_result(stmt.getNoopSummary());
|
||||
} else {
|
||||
result.setStmt_type(TStmtType.CONVERT);
|
||||
result.setConvert_table_request(
|
||||
analysisResult.getConvertTableToIcebergStmt().toThrift());
|
||||
}
|
||||
return result;
|
||||
} else if (analysisResult.isTestCaseStmt()) {
|
||||
CopyTestCaseStmt testCaseStmt = ((CopyTestCaseStmt) analysisResult.getStmt());
|
||||
|
||||
@@ -5234,5 +5234,6 @@ public class AnalyzeStmtsTest extends AnalyzerTest {
|
||||
AnalysisError("alter table functional_parquet.tinytable convert to iceberg"
|
||||
+ " tblproperties('metadata.generator.threads'='a1')",
|
||||
"CONVERT TO ICEBERG doesn't accept 'metadata.generator.threads' as TBLPROPERTY.");
|
||||
AnalyzesOk("alter table functional_parquet.iceberg_alltypes_part convert to iceberg");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -78,6 +78,11 @@ alter table parquet_partitioned convert to iceberg tblproperties('iceberg.catalo
|
||||
'Table has been migrated.'
|
||||
====
|
||||
---- QUERY
|
||||
alter table parquet_partitioned convert to iceberg tblproperties('iceberg.catalog' = 'hadoop.tables');
|
||||
---- RESULTS
|
||||
'Table has already been migrated.'
|
||||
====
|
||||
---- QUERY
|
||||
select count(*) from parquet_partitioned;
|
||||
---- RESULTS
|
||||
7301
|
||||
|
||||
Reference in New Issue
Block a user