use of org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData in project hive by apache.
the class AllocWriteIdHandler method handle.
@Override
public void handle(Context withinContext) throws Exception {
LOG.info("Processing#{} ALLOC_WRITE_ID message : {}", fromEventId(), eventMessageAsJSON);
// target and hence we do not need to dump these events.
if (withinContext.hiveConf.getBoolVar(HiveConf.ConfVars.REPL_BOOTSTRAP_ACID_TABLES)) {
return;
}
if (!ReplUtils.includeAcidTableInDump(withinContext.hiveConf)) {
return;
}
// Note: If any event dump reaches here, it means, it is included in new replication policy.
if (!ReplUtils.tableIncludedInReplScope(withinContext.oldReplScope, eventMessage.getTableName())) {
return;
}
DumpMetaData dmd = withinContext.createDmd(this);
dmd.setPayload(eventMessageAsJSON);
dmd.write();
}
use of org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData in project hive by apache.
the class LoadConstraint method tasks.
public TaskTracker tasks() throws IOException, SemanticException {
URI fromURI = EximUtil.getValidatedURI(context.hiveConf, stripQuotes(event.rootDir().toUri().toString()));
Path fromPath = new Path(fromURI.getScheme(), fromURI.getAuthority(), fromURI.getPath());
try {
FileSystem fs = FileSystem.get(fromPath.toUri(), context.hiveConf);
JSONObject json = new JSONObject(EximUtil.readAsString(fs, fromPath));
String pksString = json.getString("pks");
String fksString = json.getString("fks");
String uksString = json.getString("uks");
String nnsString = json.getString("nns");
String dksString = json.getString("dks");
String cksString = json.getString("cks");
List<Task<?>> tasks = new ArrayList<Task<?>>();
if (StringUtils.isNotEmpty(StringUtils.trim(pksString)) && !isPrimaryKeysAlreadyLoaded(pksString)) {
AddPrimaryKeyHandler pkHandler = new AddPrimaryKeyHandler();
DumpMetaData pkDumpMetaData = new DumpMetaData(fromPath, DumpType.EVENT_ADD_PRIMARYKEY, Long.MAX_VALUE, Long.MAX_VALUE, null, context.hiveConf);
pkDumpMetaData.setPayload(pksString);
tasks.addAll(pkHandler.handle(new MessageHandler.Context(dbNameToLoadIn, fromPath.toString(), null, pkDumpMetaData, context.hiveConf, context.hiveDb, context.nestedContext, LOG, dumpDirectory, metricCollector)));
}
if (StringUtils.isNotEmpty(StringUtils.trim(uksString)) && !isUniqueConstraintsAlreadyLoaded(uksString)) {
AddUniqueConstraintHandler ukHandler = new AddUniqueConstraintHandler();
DumpMetaData ukDumpMetaData = new DumpMetaData(fromPath, DumpType.EVENT_ADD_UNIQUECONSTRAINT, Long.MAX_VALUE, Long.MAX_VALUE, null, context.hiveConf);
ukDumpMetaData.setPayload(uksString);
tasks.addAll(ukHandler.handle(new MessageHandler.Context(dbNameToLoadIn, fromPath.toString(), null, ukDumpMetaData, context.hiveConf, context.hiveDb, context.nestedContext, LOG, dumpDirectory, metricCollector)));
}
if (StringUtils.isNotEmpty(StringUtils.trim(nnsString)) && !isNotNullConstraintsAlreadyLoaded(nnsString)) {
AddNotNullConstraintHandler nnHandler = new AddNotNullConstraintHandler();
DumpMetaData nnDumpMetaData = new DumpMetaData(fromPath, DumpType.EVENT_ADD_NOTNULLCONSTRAINT, Long.MAX_VALUE, Long.MAX_VALUE, null, context.hiveConf);
nnDumpMetaData.setPayload(nnsString);
tasks.addAll(nnHandler.handle(new MessageHandler.Context(dbNameToLoadIn, fromPath.toString(), null, nnDumpMetaData, context.hiveConf, context.hiveDb, context.nestedContext, LOG, dumpDirectory, metricCollector)));
}
if (StringUtils.isNotEmpty(StringUtils.trim(dksString)) && !isDefaultConstraintsAlreadyLoaded(dksString)) {
AddDefaultConstraintHandler dkHandler = new AddDefaultConstraintHandler();
DumpMetaData dkDumpMetaData = new DumpMetaData(fromPath, DumpType.EVENT_ADD_DEFAULTCONSTRAINT, Long.MAX_VALUE, Long.MAX_VALUE, null, context.hiveConf);
dkDumpMetaData.setPayload(dksString);
tasks.addAll(dkHandler.handle(new MessageHandler.Context(dbNameToLoadIn, fromPath.toString(), null, dkDumpMetaData, context.hiveConf, context.hiveDb, context.nestedContext, LOG, dumpDirectory, metricCollector)));
}
if (StringUtils.isNotEmpty(StringUtils.trim(cksString)) && !isCheckConstraintsAlreadyLoaded(cksString)) {
AddCheckConstraintHandler ckHandler = new AddCheckConstraintHandler();
DumpMetaData dkDumpMetaData = new DumpMetaData(fromPath, DumpType.EVENT_ADD_CHECKCONSTRAINT, Long.MAX_VALUE, Long.MAX_VALUE, null, context.hiveConf);
dkDumpMetaData.setPayload(cksString);
tasks.addAll(ckHandler.handle(new MessageHandler.Context(dbNameToLoadIn, fromPath.toString(), null, dkDumpMetaData, context.hiveConf, context.hiveDb, context.nestedContext, LOG, dumpDirectory, metricCollector)));
}
if (StringUtils.isNotEmpty(StringUtils.trim(fksString)) && !isForeignKeysAlreadyLoaded(fksString)) {
AddForeignKeyHandler fkHandler = new AddForeignKeyHandler();
DumpMetaData fkDumpMetaData = new DumpMetaData(fromPath, DumpType.EVENT_ADD_FOREIGNKEY, Long.MAX_VALUE, Long.MAX_VALUE, null, context.hiveConf);
fkDumpMetaData.setPayload(fksString);
tasks.addAll(fkHandler.handle(new MessageHandler.Context(dbNameToLoadIn, fromPath.toString(), null, fkDumpMetaData, context.hiveConf, context.hiveDb, context.nestedContext, LOG, dumpDirectory, metricCollector)));
}
tasks.forEach(tracker::addTask);
return tracker;
} catch (Exception e) {
throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(), e);
}
}
use of org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData in project hive by apache.
the class TestTableLevelReplicationScenarios method replicateAndVerify.
private String replicateAndVerify(String replPolicy, String oldReplPolicy, String lastReplId, List<String> dumpWithClause, List<String> loadWithClause, String[] bootstrappedTables, String[] expectedTables, String[] records) throws Throwable {
if (dumpWithClause == null) {
dumpWithClause = new ArrayList<>();
}
if (loadWithClause == null) {
loadWithClause = new ArrayList<>();
}
// For bootstrap replication, drop the target database before triggering it.
if (lastReplId == null) {
replica.run("drop database if exists " + replicatedDbName + " cascade");
}
WarehouseInstance.Tuple tuple = primary.dump(replPolicy, dumpWithClause);
DumpMetaData dumpMetaData = new DumpMetaData(new Path(tuple.dumpLocation, ReplUtils.REPL_HIVE_BASE_DIR), conf);
Assert.assertEquals(oldReplPolicy != null && !replPolicy.equals(oldReplPolicy), dumpMetaData.isReplScopeModified());
if (bootstrappedTables != null) {
verifyBootstrapDirInIncrementalDump(tuple.dumpLocation, bootstrappedTables);
}
// If the policy contains '.'' means its table level replication.
verifyTableListForPolicy(tuple.dumpLocation, replPolicy.contains(".'") ? expectedTables : null);
verifyDumpMetadata(replPolicy, new Path(tuple.dumpLocation, ReplUtils.REPL_HIVE_BASE_DIR));
replica.load(replicatedDbName, replPolicy, loadWithClause).run("use " + replicatedDbName).run("show tables").verifyResults(expectedTables).verifyReplTargetProperty(replicatedDbName);
if (records == null) {
records = new String[] { "1" };
}
for (String table : expectedTables) {
replica.run("use " + replicatedDbName).run("select a from " + table).verifyResults(records);
}
return tuple.lastReplicationId;
}
use of org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData in project hive by apache.
the class TestTableLevelReplicationScenarios method verifyDumpMetadata.
private void verifyDumpMetadata(String replPolicy, Path dumpPath) throws SemanticException {
String[] parseReplPolicy = replPolicy.split("\\.'");
assertEquals(parseReplPolicy[0], new DumpMetaData(dumpPath, conf).getReplScope().getDbName());
if (parseReplPolicy.length > 1) {
parseReplPolicy[1] = parseReplPolicy[1].replaceAll("'", "");
assertEquals(parseReplPolicy[1], new DumpMetaData(dumpPath, conf).getReplScope().getIncludedTableNames());
}
if (parseReplPolicy.length > 2) {
parseReplPolicy[2] = parseReplPolicy[2].replaceAll("'", "");
assertEquals(parseReplPolicy[2], new DumpMetaData(dumpPath, conf).getReplScope().getExcludedTableNames());
}
}
Aggregations