use of org.apache.hadoop.hive.ql.ddl.DDLDesc.DDLDescWithWriteId in project hive by apache.
the class DriverTxnHandler method setWriteIdForAcidDdl.
private boolean setWriteIdForAcidDdl() throws SemanticException, LockException {
DDLDescWithWriteId acidDdlDesc = driverContext.getPlan().getAcidDdlDesc();
boolean hasAcidDdl = acidDdlDesc != null && acidDdlDesc.mayNeedWriteId();
if (hasAcidDdl) {
String fqTableName = acidDdlDesc.getFullTableName();
TableName tableName = HiveTableName.of(fqTableName);
long writeId = driverContext.getTxnManager().getTableWriteId(tableName.getDb(), tableName.getTable());
acidDdlDesc.setWriteId(writeId);
}
return hasAcidDdl;
}
use of org.apache.hadoop.hive.ql.ddl.DDLDesc.DDLDescWithWriteId in project hive by apache.
the class AlterTableSkewedByAnalyzer method analyzeCommand.
@Override
protected void analyzeCommand(TableName tableName, Map<String, String> partitionSpec, ASTNode command) throws SemanticException {
Table table = getTable(tableName);
validateAlterTableType(table, AlterTableType.SKEWED_BY, false);
inputs.add(new ReadEntity(table));
outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL_EXCLUSIVE));
DDLDescWithWriteId desc = null;
if (command.getChildCount() == 0) {
desc = new AlterTableNotSkewedDesc(tableName);
setAcidDdlDesc(table, desc);
} else {
switch(((ASTNode) command.getChild(0)).getToken().getType()) {
case HiveParser.TOK_TABLESKEWED:
desc = handleAlterTableSkewedBy(command, tableName, table);
setAcidDdlDesc(table, desc);
break;
case HiveParser.TOK_STOREDASDIRS:
desc = handleAlterTableDisableStoredAsDirs(tableName, table);
setAcidDdlDesc(table, desc);
break;
default:
assert false;
}
}
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
}
Aggregations