use of org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData in project hive by apache.
the class ReplDumpTask method shouldResumePreviousDump.
private boolean shouldResumePreviousDump(Path lastDumpPath, boolean isBootStrap) throws IOException {
if (validDump(lastDumpPath)) {
return false;
}
Path hiveDumpPath = new Path(lastDumpPath, ReplUtils.REPL_HIVE_BASE_DIR);
DumpMetaData dumpMetaData = new DumpMetaData(hiveDumpPath, conf);
if (tableExpressionModified(dumpMetaData)) {
return false;
}
if (isBootStrap) {
return shouldResumePreviousDump(dumpMetaData);
}
// In case of incremental we should resume if _events_dump file is present and is valid
Path lastEventFile = new Path(hiveDumpPath, ReplAck.EVENTS_DUMP.toString());
long resumeFrom = 0;
try {
resumeFrom = getResumeFrom(lastEventFile);
} catch (SemanticException ex) {
LOG.info("Could not get last repl id from {}, because of:", lastEventFile, ex.getMessage());
}
return resumeFrom > 0L;
}
use of org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData in project hive by apache.
the class AlterPartitionHandler method handle.
@Override
public void handle(Context withinContext) throws Exception {
LOG.info("Processing#{} ALTER_PARTITION message : {}", fromEventId(), eventMessageAsJSON);
// dump partition related events for metadata-only dump.
if (withinContext.hiveConf.getBoolVar(HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY)) {
return;
}
Table qlMdTable = new Table(tableObject);
if (!Utils.shouldReplicate(withinContext.replicationSpec, qlMdTable, true, withinContext.getTablesForBootstrap(), withinContext.oldReplScope, withinContext.hiveConf)) {
return;
}
if (Scenario.ALTER == scenario) {
withinContext.replicationSpec.setIsMetadataOnly(true);
List<Partition> partitions = new ArrayList<>();
partitions.add(new Partition(qlMdTable, after));
Path metaDataPath = new Path(withinContext.eventRoot, EximUtil.METADATA_NAME);
EximUtil.createExportDump(metaDataPath.getFileSystem(withinContext.hiveConf), metaDataPath, qlMdTable, partitions, withinContext.replicationSpec, withinContext.hiveConf);
}
DumpMetaData dmd = withinContext.createDmd(this);
dmd.setPayload(eventMessageAsJSON);
dmd.write();
}
use of org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData in project hive by apache.
the class AddCheckConstraintHandler method handle.
@Override
public void handle(Context withinContext) throws Exception {
LOG.debug("Processing#{} ADD_CHECKCONSTRAINT_MESSAGE message : {}", fromEventId(), eventMessageAsJSON);
if (shouldReplicate(withinContext)) {
DumpMetaData dmd = withinContext.createDmd(this);
dmd.setPayload(eventMessageAsJSON);
dmd.write();
}
}
use of org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData in project hive by apache.
the class AddForeignKeyHandler method handle.
@Override
public void handle(Context withinContext) throws Exception {
LOG.debug("Processing#{} ADD_FOREIGNKEY_MESSAGE message : {}", fromEventId(), eventMessageAsJSON);
if (shouldReplicate(withinContext)) {
DumpMetaData dmd = withinContext.createDmd(this);
dmd.setPayload(eventMessageAsJSON);
dmd.write();
}
}
use of org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData in project hive by apache.
the class AddNotNullConstraintHandler method handle.
@Override
public void handle(Context withinContext) throws Exception {
LOG.debug("Processing#{} ADD_NOTNULLCONSTRAINT_MESSAGE message : {}", fromEventId(), eventMessageAsJSON);
if (shouldReplicate(withinContext)) {
DumpMetaData dmd = withinContext.createDmd(this);
dmd.setPayload(eventMessageAsJSON);
dmd.write();
}
}
Aggregations