use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.
the class LoadPartitions method dropPartitionTask.
private Task<?> dropPartitionTask(Table table, Map<String, String> partSpec) throws SemanticException {
Task<DDLWork> dropPtnTask = null;
Map<Integer, List<ExprNodeGenericFuncDesc>> partSpecsExpr = ReplUtils.genPartSpecs(table, Collections.singletonList(partSpec));
if (partSpecsExpr.size() > 0) {
AlterTableDropPartitionDesc dropPtnDesc = new AlterTableDropPartitionDesc(HiveTableName.of(table), partSpecsExpr, true, event.replicationSpec());
dropPtnTask = TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), dropPtnDesc, true, (new Path(context.dumpDirectory)).getParent().toString(), this.metricCollector), context.hiveConf);
}
return dropPtnTask;
}
use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.
the class ReplUtils method getTableCheckpointTask.
public static Task<?> getTableCheckpointTask(ImportTableDesc tableDesc, HashMap<String, String> partSpec, String dumpRoot, ReplicationMetricCollector metricCollector, HiveConf conf) throws SemanticException {
HashMap<String, String> mapProp = new HashMap<>();
mapProp.put(REPL_CHECKPOINT_KEY, dumpRoot);
final TableName tName = TableName.fromString(tableDesc.getTableName(), null, tableDesc.getDatabaseName());
AlterTableSetPropertiesDesc alterTblDesc = new AlterTableSetPropertiesDesc(tName, partSpec, null, false, mapProp, false, false, null);
return TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), alterTblDesc, true, (new Path(dumpRoot)).getParent().toString(), metricCollector), conf);
}
use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.
the class AcidExportSemanticAnalyzer method analyzeAcidExport.
/**
* See {@link #isAcidExport(ASTNode)}
* 1. create the temp table T
* 2. compile 'insert into T select * from acidTable'
* 3. compile 'export acidTable' (acidTable will be replaced with T during execution)
* 4. create task to drop T
*
* Using a true temp (session level) table means it should not affect replication and the table
* is not visible outside the Session that created for security
*/
private void analyzeAcidExport(ASTNode ast) throws SemanticException {
assert ast != null && ast.getToken() != null && ast.getToken().getType() == HiveParser.TOK_EXPORT;
ASTNode tableTree = (ASTNode) ast.getChild(0);
assert tableTree != null && tableTree.getType() == HiveParser.TOK_TAB;
ASTNode tokRefOrNameExportTable = (ASTNode) tableTree.getChild(0);
Table exportTable = getTargetTable(tokRefOrNameExportTable);
if (exportTable != null && (exportTable.isView() || exportTable.isMaterializedView())) {
throw new SemanticException("Views and Materialized Views can not be exported.");
}
assert AcidUtils.isFullAcidTable(exportTable);
// need to create the table "manually" rather than creating a task since it has to exist to
// compile the insert into T...
// this is db.table
final String newTableName = getTmptTableNameForExport(exportTable);
final TableName newTableNameRef = HiveTableName.of(newTableName);
Map<String, String> tblProps = new HashMap<>();
tblProps.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, Boolean.FALSE.toString());
String location;
// it has the same life cycle as the tmp table
try {
// Generate a unique ID for temp table path.
// This path will be fixed for the life of the temp table.
Path path = new Path(SessionState.getTempTableSpace(conf), UUID.randomUUID().toString());
path = Warehouse.getDnsPath(path, conf);
location = path.toString();
} catch (MetaException err) {
throw new SemanticException("Error while generating temp table path:", err);
}
CreateTableLikeDesc ctlt = new CreateTableLikeDesc(newTableName, false, true, null, null, location, null, null, tblProps, // important so we get an exception on name collision
true, Warehouse.getQualifiedName(exportTable.getTTable()), false);
Table newTable;
try {
ReadEntity dbForTmpTable = new ReadEntity(db.getDatabase(exportTable.getDbName()));
// so the plan knows we are 'reading' this db - locks, security...
inputs.add(dbForTmpTable);
DDLTask createTableTask = (DDLTask) TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), ctlt), conf);
// above get() doesn't set it
createTableTask.setConf(conf);
Context context = new Context(conf);
createTableTask.initialize(null, null, new TaskQueue(context), context);
createTableTask.execute();
newTable = db.getTable(newTableName);
} catch (HiveException ex) {
throw new SemanticException(ex);
}
// now generate insert statement
// insert into newTableName select * from ts <where partition spec>
StringBuilder rewrittenQueryStr = generateExportQuery(newTable.getPartCols(), tokRefOrNameExportTable, tableTree, newTableName);
ReparseResult rr = parseRewrittenQuery(rewrittenQueryStr, ctx.getCmd());
Context rewrittenCtx = rr.rewrittenCtx;
// it's set in parseRewrittenQuery()
rewrittenCtx.setIsUpdateDeleteMerge(false);
ASTNode rewrittenTree = rr.rewrittenTree;
try {
useSuper = true;
// newTable has to exist at this point to compile
super.analyze(rewrittenTree, rewrittenCtx);
} finally {
useSuper = false;
}
// now we have the rootTasks set up for Insert ... Select
removeStatsTasks(rootTasks);
// now make an ExportTask from temp table
/*analyzeExport() creates TableSpec which in turn tries to build
"public List<Partition> partitions" by looking in the metastore to find Partitions matching
the partition spec in the Export command. These of course don't exist yet since we've not
ran the insert stmt yet!!!!!!!
*/
Task<ExportWork> exportTask = ExportSemanticAnalyzer.analyzeExport(ast, newTableName, db, conf, inputs, outputs);
// Add an alter table task to set transactional props
// do it after populating temp table so that it's written as non-transactional table but
// update props before export so that export archive metadata has these props. This way when
// IMPORT is done for this archive and target table doesn't exist, it will be created as Acid.
Map<String, String> mapProps = new HashMap<>();
mapProps.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, Boolean.TRUE.toString());
AlterTableSetPropertiesDesc alterTblDesc = new AlterTableSetPropertiesDesc(newTableNameRef, null, null, false, mapProps, false, false, null);
addExportTask(rootTasks, exportTask, TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc)));
// Now make a task to drop temp table
// {@link DropTableAnalyzer#analyzeInternal(ASTNode ast)
ReplicationSpec replicationSpec = new ReplicationSpec();
DropTableDesc dropTblDesc = new DropTableDesc(newTableName, false, true, replicationSpec);
Task<DDLWork> dropTask = TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), dropTblDesc), conf);
exportTask.addDependentTask(dropTask);
markReadEntityForUpdate();
if (ctx.isExplainPlan()) {
try {
// so that "explain" doesn't "leak" tmp tables
// TODO: catalog
db.dropTable(newTable.getDbName(), newTable.getTableName(), true, true, true);
} catch (HiveException ex) {
LOG.warn("Unable to drop " + newTableName + " due to: " + ex.getMessage(), ex);
}
}
}
use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.
the class AddNotNullConstraintHandler method handle.
@Override
public List<Task<?>> handle(Context context) throws SemanticException {
AddNotNullConstraintMessage msg = deserializer.getAddNotNullConstraintMessage(context.dmd.getPayload());
List<SQLNotNullConstraint> nns;
try {
nns = msg.getNotNullConstraints();
} catch (Exception e) {
if (!(e instanceof SemanticException)) {
throw new SemanticException("Error reading message members", e);
} else {
throw (SemanticException) e;
}
}
List<Task<?>> tasks = new ArrayList<Task<?>>();
if (nns.isEmpty()) {
return tasks;
}
final String actualDbName = context.isDbNameEmpty() ? nns.get(0).getTable_db() : context.dbName;
final String actualTblName = nns.get(0).getTable_name();
final TableName tName = TableName.fromString(actualTblName, null, actualDbName);
for (SQLNotNullConstraint nn : nns) {
nn.setTable_db(actualDbName);
nn.setTable_name(actualTblName);
}
Constraints constraints = new Constraints(null, null, nns, null, null, null);
AlterTableAddConstraintDesc addConstraintsDesc = new AlterTableAddConstraintDesc(tName, context.eventOnlyReplicationSpec(), constraints);
Task<DDLWork> addConstraintsTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc, true, context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf);
tasks.add(addConstraintsTask);
context.log.debug("Added add constrains task : {}:{}", addConstraintsTask.getId(), actualTblName);
updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, null);
return Collections.singletonList(addConstraintsTask);
}
use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.
the class AlterDatabaseHandler method handle.
@Override
public List<Task<?>> handle(Context context) throws SemanticException {
AlterDatabaseMessage msg = deserializer.getAlterDatabaseMessage(context.dmd.getPayload());
String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName;
try {
Database oldDb = msg.getDbObjBefore();
Database newDb = msg.getDbObjAfter();
AbstractAlterDatabaseDesc alterDbDesc;
if ((oldDb.getOwnerType() == newDb.getOwnerType()) && oldDb.getOwnerName().equalsIgnoreCase(newDb.getOwnerName())) {
// If owner information is unchanged, then DB properties would've changed
Map<String, String> newDbProps = new HashMap<>();
Map<String, String> dbProps = newDb.getParameters();
for (Map.Entry<String, String> entry : dbProps.entrySet()) {
String key = entry.getKey();
// Ignore the keys which are local to source warehouse
if (key.startsWith(Utils.BOOTSTRAP_DUMP_STATE_KEY_PREFIX) || key.equals(ReplicationSpec.KEY.CURR_STATE_ID_SOURCE.toString()) || key.equals(ReplicationSpec.KEY.CURR_STATE_ID_TARGET.toString()) || key.equals(ReplUtils.REPL_CHECKPOINT_KEY) || key.equals(ReplChangeManager.SOURCE_OF_REPLICATION) || key.equals(ReplUtils.REPL_FIRST_INC_PENDING_FLAG) || key.equals(ReplConst.REPL_FAILOVER_ENDPOINT)) {
continue;
}
newDbProps.put(key, entry.getValue());
}
alterDbDesc = new AlterDatabaseSetPropertiesDesc(actualDbName, newDbProps, context.eventOnlyReplicationSpec());
} else {
alterDbDesc = new AlterDatabaseSetOwnerDesc(actualDbName, new PrincipalDesc(newDb.getOwnerName(), newDb.getOwnerType()), context.eventOnlyReplicationSpec());
}
Task<DDLWork> alterDbTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, alterDbDesc, true, context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf);
context.log.debug("Added alter database task : {}:{}", alterDbTask.getId(), actualDbName);
// Only database object is updated
updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, null, null);
return Collections.singletonList(alterDbTask);
} catch (Exception e) {
throw (e instanceof SemanticException) ? (SemanticException) e : new SemanticException("Error reading message members", e);
}
}
Aggregations