use of org.apache.hadoop.hive.metastore.messaging.EventMessage in project hive by apache.
the class DbNotificationListener method onAddPartition.
/**
* @param partitionEvent partition event
* @throws MetaException
*/
@Override
public void onAddPartition(AddPartitionEvent partitionEvent) throws MetaException {
Table t = partitionEvent.getTable();
PartitionFilesIterator fileIter = MetaStoreUtils.isExternalTable(t) ? null : new PartitionFilesIterator(partitionEvent.getPartitionIterator(), t);
EventMessage msg = MessageBuilder.getInstance().buildAddPartitionMessage(t, partitionEvent.getPartitionIterator(), fileIter);
MessageSerializer serializer = msgEncoder.getSerializer();
NotificationEvent event = new NotificationEvent(0, now(), EventType.ADD_PARTITION.toString(), serializer.serialize(msg));
event.setCatName(t.isSetCatName() ? t.getCatName() : DEFAULT_CATALOG_NAME);
event.setDbName(t.getDbName());
event.setTableName(t.getTableName());
process(event, partitionEvent);
}
use of org.apache.hadoop.hive.metastore.messaging.EventMessage in project hive by apache.
the class AddPartitionHandler method handle.
@Override
public void handle(Context withinContext) throws Exception {
LOG.info("Processing#{} ADD_PARTITION message : {}", fromEventId(), eventMessageAsJSON);
// dump partition related events for metadata-only dump.
if (withinContext.hiveConf.getBoolVar(HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY)) {
return;
}
AddPartitionMessage apm = (AddPartitionMessage) eventMessage;
org.apache.hadoop.hive.metastore.api.Table tobj = apm.getTableObj();
if (tobj == null) {
LOG.debug("Event#{} was a ADD_PTN_EVENT with no table listed", fromEventId());
return;
}
final Table qlMdTable = new Table(tobj);
if (!Utils.shouldReplicate(withinContext.replicationSpec, qlMdTable, true, withinContext.getTablesForBootstrap(), withinContext.oldReplScope, withinContext.hiveConf)) {
return;
}
Iterable<org.apache.hadoop.hive.metastore.api.Partition> ptns = apm.getPartitionObjs();
if ((ptns == null) || (!ptns.iterator().hasNext())) {
LOG.debug("Event#{} was an ADD_PTN_EVENT with no partitions", fromEventId());
return;
}
Iterable<Partition> qlPtns = StreamSupport.stream(ptns.spliterator(), true).map(input -> {
if (input == null) {
return null;
}
try {
return new Partition(qlMdTable, input);
} catch (HiveException e) {
throw new IllegalArgumentException(e);
}
}).collect(Collectors.toList());
Path metaDataPath = new Path(withinContext.eventRoot, EximUtil.METADATA_NAME);
EximUtil.createExportDump(metaDataPath.getFileSystem(withinContext.hiveConf), metaDataPath, qlMdTable, qlPtns, withinContext.replicationSpec, withinContext.hiveConf);
boolean copyAtLoad = withinContext.hiveConf.getBoolVar(HiveConf.ConfVars.REPL_RUN_DATA_COPY_TASKS_ON_TARGET);
Iterator<PartitionFiles> partitionFilesIter = apm.getPartitionFilesIter().iterator();
// list would be empty. So, it is enough to check hasNext outside the loop.
if (partitionFilesIter.hasNext()) {
for (Partition qlPtn : qlPtns) {
Iterable<String> files = partitionFilesIter.next().getFiles();
if (files != null) {
if (copyAtLoad) {
// encoded filename/checksum of files, write into _files
Path ptnDataPath = new Path(withinContext.eventRoot, EximUtil.DATA_PATH_NAME + File.separator + qlPtn.getName());
writeEncodedDumpFiles(withinContext, files, ptnDataPath);
} else {
for (String file : files) {
writeFileEntry(qlMdTable, qlPtn, file, withinContext);
}
}
}
}
}
withinContext.createDmd(this).write();
}
Aggregations