use of org.apache.hadoop.hive.metastore.messaging.CommitTxnMessage in project hive by apache.
the class CommitTxnHandler method handle.
@Override
public List<Task<?>> handle(Context context) throws SemanticException {
if (!AcidUtils.isAcidEnabled(context.hiveConf)) {
context.log.error("Cannot load transaction events as acid is not enabled");
throw new SemanticException("Cannot load transaction events as acid is not enabled");
}
CommitTxnMessage msg = deserializer.getCommitTxnMessage(context.dmd.getPayload());
int numEntry = (msg.getTables() == null ? 0 : msg.getTables().size());
List<Task<?>> tasks = new ArrayList<>();
String dbName = context.dbName;
String tableNamePrev = null;
String tblName = null;
ReplTxnWork work = new ReplTxnWork(HiveUtils.getReplPolicy(context.dbName), context.dbName, null, msg.getTxnId(), ReplTxnWork.OperationType.REPL_COMMIT_TXN, context.eventOnlyReplicationSpec(), context.getDumpDirectory(), context.getMetricCollector());
if (numEntry > 0) {
context.log.debug("Commit txn handler for txnid " + msg.getTxnId() + " databases : " + msg.getDatabases() + " tables : " + msg.getTables() + " partitions : " + msg.getPartitions() + " files : " + msg.getFilesList() + " write ids : " + msg.getWriteIds());
}
for (int idx = 0; idx < numEntry; idx++) {
String actualTblName = msg.getTables().get(idx);
String actualDBName = msg.getDatabases().get(idx);
String completeName = Table.getCompleteName(actualDBName, actualTblName);
// grouped together in commit txn message.
if (tableNamePrev == null || !(completeName.equals(tableNamePrev))) {
// The data location is created by source, so the location should be formed based on the table name in msg.
Path location = HiveUtils.getDumpPath(new Path(context.location), actualDBName, actualTblName);
tblName = actualTblName;
// for warehouse level dump, use db name from write event
dbName = (context.isDbNameEmpty() ? actualDBName : context.dbName);
Context currentContext = new Context(context, dbName, context.getDumpDirectory(), context.getMetricCollector());
currentContext.setLocation(location.toUri().toString());
// Piggybacking in Import logic for now
TableHandler tableHandler = new TableHandler();
tasks.addAll((tableHandler.handle(currentContext)));
readEntitySet.addAll(tableHandler.readEntities());
writeEntitySet.addAll(tableHandler.writeEntities());
getUpdatedMetadata().copyUpdatedMetadata(tableHandler.getUpdatedMetadata());
tableNamePrev = completeName;
}
try {
WriteEventInfo writeEventInfo = new WriteEventInfo(msg.getWriteIds().get(idx), dbName, tblName, msg.getFiles(idx));
if (msg.getPartitions().get(idx) != null && !msg.getPartitions().get(idx).isEmpty()) {
writeEventInfo.setPartition(msg.getPartitions().get(idx));
}
work.addWriteEventInfo(writeEventInfo);
} catch (Exception e) {
throw new SemanticException("Failed to extract write event info from commit txn message : " + e.getMessage());
}
}
Task<ReplTxnWork> commitTxnTask = TaskFactory.get(work, context.hiveConf);
// Anyways, if this event gets executed again, it is taken care of.
if (!context.isDbNameEmpty()) {
updatedMetadata.set(context.dmd.getEventTo().toString(), context.dbName, null, null);
}
context.log.debug("Added Commit txn task : {}", commitTxnTask.getId());
if (tasks.isEmpty()) {
// will be used for setting the last repl id.
return Collections.singletonList(commitTxnTask);
}
DAGTraversal.traverse(tasks, new AddDependencyToLeaves(commitTxnTask));
return tasks;
}
use of org.apache.hadoop.hive.metastore.messaging.CommitTxnMessage in project hive by apache.
the class DbNotificationListener method onCommitTxn.
@Override
public void onCommitTxn(CommitTxnEvent commitTxnEvent, Connection dbConn, SQLGenerator sqlGenerator) throws MetaException {
if (commitTxnEvent.getTxnType() == TxnType.READ_ONLY) {
return;
}
CommitTxnMessage msg = MessageBuilder.getInstance().buildCommitTxnMessage(commitTxnEvent.getTxnId());
NotificationEvent event = new NotificationEvent(0, now(), EventType.COMMIT_TXN.toString(), msgEncoder.getSerializer().serialize(msg));
try {
addNotificationLog(event, commitTxnEvent, dbConn, sqlGenerator);
} catch (SQLException e) {
throw new MetaException("Unable to execute direct SQL " + StringUtils.stringifyException(e));
}
}
Aggregations