use of org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData in project hive by apache.
the class OpenTxnHandler method handle.
@Override
public void handle(Context withinContext) throws Exception {
if (!ReplUtils.includeAcidTableInDump(withinContext.hiveConf)) {
return;
}
LOG.info("Processing#{} OPEN_TXN message : {}", fromEventId(), eventMessageAsJSON);
DumpMetaData dmd = withinContext.createDmd(this);
dmd.setPayload(eventMessageAsJSON);
dmd.write();
}
use of org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData in project hive by apache.
the class UpdateTableColStatHandler method handle.
@Override
public void handle(Context withinContext) throws Exception {
LOG.info("Processing#{} UpdateTableColumnStat message : {}", fromEventId(), eventMessageAsJSON);
Table qlMdTable = new Table(eventMessage.getTableObject());
if (!Utils.shouldReplicate(withinContext.replicationSpec, qlMdTable, true, withinContext.getTablesForBootstrap(), withinContext.oldReplScope, withinContext.hiveConf)) {
return;
}
// Statistics without data doesn't make sense.
if (withinContext.replicationSpec.isMetadataOnly() || Utils.shouldDumpMetaDataOnlyForExternalTables(qlMdTable, withinContext.hiveConf)) {
return;
}
DumpMetaData dmd = withinContext.createDmd(this);
dmd.setPayload(eventMessageAsJSON);
dmd.write();
}
use of org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData in project hive by apache.
the class AddUniqueConstraintHandler method handle.
@Override
public void handle(Context withinContext) throws Exception {
LOG.debug("Processing#{} ADD_UNIQUECONSTRAINT_MESSAGE message : {}", fromEventId(), eventMessageAsJSON);
if (shouldReplicate(withinContext)) {
DumpMetaData dmd = withinContext.createDmd(this);
dmd.setPayload(eventMessageAsJSON);
dmd.write();
}
}
use of org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData in project hive by apache.
the class AlterTableHandler method handle.
@Override
public void handle(Context withinContext) throws Exception {
LOG.info("Processing#{} ALTER_TABLE message : {}", fromEventId(), eventMessageAsJSON);
Table qlMdTableBefore = new Table(before);
Set<String> bootstrapTableList;
ReplScope oldReplScope;
if (Scenario.RENAME == scenario) {
// Handling for table level replication is not done in shouldReplicate method for rename events. Its done in
// handleRenameForReplacePolicy and handleRenameForTableLevelReplication method.
bootstrapTableList = null;
oldReplScope = null;
} else {
// This check was ignored for alter table event during event filter.
if (!ReplUtils.tableIncludedInReplScope(withinContext.replScope, before.getTableName())) {
LOG.debug("Table " + before.getTableName() + " does not satisfy the policy");
return;
}
bootstrapTableList = withinContext.getTablesForBootstrap();
oldReplScope = withinContext.oldReplScope;
}
if (!Utils.shouldReplicate(withinContext.replicationSpec, qlMdTableBefore, true, bootstrapTableList, oldReplScope, withinContext.hiveConf)) {
return;
}
if (withinContext.hiveConf.getBoolVar(HiveConf.ConfVars.REPL_BOOTSTRAP_ACID_TABLES)) {
if (!AcidUtils.isTransactionalTable(before) && AcidUtils.isTransactionalTable(after)) {
LOG.info("The table " + after.getTableName() + " is converted to ACID table." + " It will be replicated with bootstrap load as hive.repl.bootstrap.acid.tables is set to true.");
return;
}
}
if (Scenario.RENAME == scenario) {
String oldName = before.getTableName();
String newName = after.getTableName();
boolean needDump = true;
if (withinContext.oldReplScope != null && !withinContext.oldReplScope.equals(withinContext.replScope)) {
needDump = handleRenameForReplacePolicy(withinContext, oldName, newName);
} else if (!withinContext.replScope.includeAllTables()) {
needDump = handleRenameForTableLevelReplication(withinContext, oldName, newName);
}
if (!needDump) {
LOG.info("Rename event for table " + oldName + " to " + newName + " is skipped from dumping");
return;
}
}
if (Scenario.ALTER == scenario) {
withinContext.replicationSpec.setIsMetadataOnly(true);
Table qlMdTableAfter = new Table(after);
Path metaDataPath = new Path(withinContext.eventRoot, EximUtil.METADATA_NAME);
// empty table.
if (Utils.shouldDumpMetaDataOnly(withinContext.hiveConf) || Utils.shouldDumpMetaDataOnlyForExternalTables(qlMdTableAfter, withinContext.hiveConf)) {
qlMdTableAfter.setStatsStateLikeNewTable();
}
EximUtil.createExportDump(metaDataPath.getFileSystem(withinContext.hiveConf), metaDataPath, qlMdTableAfter, null, withinContext.replicationSpec, withinContext.hiveConf);
}
DumpMetaData dmd = withinContext.createDmd(this);
dmd.setPayload(eventMessageAsJSON);
dmd.write();
}
use of org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData in project hive by apache.
the class DefaultHandler method handle.
@Override
public void handle(Context withinContext) throws Exception {
// we specifically use the the message string from the original event since we dont know what type of message
// to convert this message to, this handler should not be called since with different message formats we need
// the ability to convert messages to a given message type.
LOG.info("Dummy processing#{} message : {}", fromEventId(), event.getMessage());
DumpMetaData dmd = withinContext.createDmd(this);
dmd.setPayload(event.getMessage());
dmd.write();
}
Aggregations