use of org.apache.hadoop.hdfs.inotify.EventBatch in project SSM by Intel-bigdata.
the class TestEventBatchSerializer method testSerializer.
@Test
public void testSerializer() throws InvalidProtocolBufferException {
Event close = new Event.CloseEvent("/user1", 1024, 0);
Event create = new Event.CreateEvent.Builder().iNodeType(Event.CreateEvent.INodeType.FILE).ctime(1).defaultBlockSize(1024).groupName("cg1").overwrite(true).ownerName("user1").path("/file1").perms(new FsPermission("777")).replication(3).build();
Event meta = new Event.MetadataUpdateEvent.Builder().path("/file2").metadataType(Event.MetadataUpdateEvent.MetadataType.OWNER).mtime(2).atime(3).replication(4).ownerName("user2").groupName("cg2").build();
Event rename = new Event.RenameEvent.Builder().dstPath("/file4").srcPath("/file3").timestamp(5).build();
Event append = new Event.AppendEvent.Builder().newBlock(true).path("/file5").build();
Event unlink = new Event.UnlinkEvent.Builder().path("/file6").timestamp(6).build();
// Event truncate = new Event.TruncateEvent("/file7", 1024, 16);
List<Event> events = Arrays.asList(close, create, meta, rename, append, unlink);
EventBatch batch = new EventBatch(1023, events.toArray(new Event[0]));
List<String> expected = new ArrayList<>();
for (Event event : events) {
expected.add(event.toString());
}
byte[] bytes = EventBatchSerializer.serialize(batch);
EventBatch result = EventBatchSerializer.deserialize(bytes);
List<String> actual = new ArrayList<>();
for (Event event : result.getEvents()) {
actual.add(event.toString());
}
Assert.assertEquals(batch.getTxid(), result.getTxid());
Assert.assertEquals(expected.size(), actual.size());
// Assert.assertTrue(expected.containsAll(actual));
}
use of org.apache.hadoop.hdfs.inotify.EventBatch in project SSM by Intel-bigdata.
the class InotifyEventFetcher method canContinueFromLastTxid.
@VisibleForTesting
static boolean canContinueFromLastTxid(DFSClient client, Long lastId) {
try {
if (client.getNamenode().getCurrentEditLogTxid() == lastId) {
return true;
}
DFSInotifyEventInputStream is = client.getInotifyEventStream(lastId);
EventBatch eventBatch = is.poll();
return eventBatch != null;
} catch (Exception e) {
return false;
}
}
use of org.apache.hadoop.hdfs.inotify.EventBatch in project SSM by Intel-bigdata.
the class TestEventBatchSerializer method testSerializer.
@Test
public void testSerializer() throws InvalidProtocolBufferException {
Event close = new Event.CloseEvent("/user1", 1024, 0);
Event create = new Event.CreateEvent.Builder().iNodeType(Event.CreateEvent.INodeType.FILE).ctime(1).defaultBlockSize(1024).groupName("cg1").overwrite(true).ownerName("user1").path("/file1").perms(new FsPermission("777")).replication(3).build();
Event meta = new Event.MetadataUpdateEvent.Builder().path("/file2").metadataType(Event.MetadataUpdateEvent.MetadataType.OWNER).mtime(2).atime(3).replication(4).ownerName("user2").groupName("cg2").build();
Event rename = new Event.RenameEvent.Builder().dstPath("/file4").srcPath("/file3").timestamp(5).build();
Event append = new Event.AppendEvent.Builder().path("/file5").build();
Event unlink = new Event.UnlinkEvent.Builder().path("/file6").timestamp(6).build();
// Event truncate = new Event.TruncateEvent("/file7", 1024, 16);
List<Event> events = Arrays.asList(close, create, meta, rename, append, unlink);
EventBatch batch = new EventBatch(1023, events.toArray(new Event[0]));
List<String> expected = new ArrayList<>();
for (Event event : events) {
expected.add(event.toString());
}
byte[] bytes = EventBatchSerializer.serialize(batch);
EventBatch result = EventBatchSerializer.deserialize(bytes);
List<String> actual = new ArrayList<>();
for (Event event : result.getEvents()) {
actual.add(event.toString());
}
Assert.assertEquals(batch.getTxid(), result.getTxid());
Assert.assertEquals(expected.size(), actual.size());
// Assert.assertTrue(expected.containsAll(actual));
}
use of org.apache.hadoop.hdfs.inotify.EventBatch in project alluxio by Alluxio.
the class SupportedHdfsActiveSyncProvider method pollEvent.
/**
* Fetch and process events.
* @param eventStream event stream
*/
public void pollEvent(DFSInotifyEventInputStream eventStream) {
LOG.debug("Polling thread starting, with timeout {} ms", mActiveUfsPollTimeoutMs);
long start = System.currentTimeMillis();
long behind = eventStream.getTxidsBehindEstimate();
while (!Thread.currentThread().isInterrupted()) {
try {
List<Callable<Integer>> process = new LinkedList<>();
for (int i = 0; i < mBatchSize; i++) {
EventBatch batch = eventStream.poll(mActiveUfsPollTimeoutMs, TimeUnit.MILLISECONDS);
if (batch == null) {
break;
}
process.add(() -> {
for (Event event : batch.getEvents()) {
processEvent(event, mUfsUriList, batch.getTxid());
}
return batch.getEvents().length;
});
}
mProcessTasks.add(mExecutorService.submit(() -> process.stream().map(callable -> {
try {
return callable.call();
} catch (Exception e) {
LogUtils.warnWithException(LOG, "Failed to process event", e);
return 0;
}
}).reduce(0, Integer::sum)));
long end = System.currentTimeMillis();
if (end > (start + mActiveUfsSyncEventRateInterval)) {
long currentlyBehind = eventStream.getTxidsBehindEstimate();
long processedEvents = getCountSinceLastLog();
long hdfsEvents = processedEvents + currentlyBehind - behind;
long durationMs = end - start;
if (LOG.isDebugEnabled()) {
// for debug, print every interval
LOG.debug("HDFS sync stats. past duration: {} ms. HDFS generated events: {} ({} events/s). " + "Processed events: {} ({} events/s). TxidsBehindEstimate: {}", durationMs, hdfsEvents, String.format("%.2f", hdfsEvents * 1000.0f / durationMs), processedEvents, String.format("%.2f", processedEvents * 1000.0f / durationMs), currentlyBehind);
} else {
// for info, print with the sampling logger
SAMPLING_LOG.info("HDFS sync stats. past duration: {} ms. HDFS generated events: {} ({} events/s). " + "Processed events: {} ({} events/s). TxidsBehindEstimate: {}", durationMs, hdfsEvents, String.format("%.2f", hdfsEvents * 1000.0f / durationMs), processedEvents, String.format("%.2f", processedEvents * 1000.0f / durationMs), currentlyBehind);
}
behind = currentlyBehind;
start = end;
}
} catch (IOException e) {
LOG.warn("IOException occured during polling inotify: {}", e.toString());
if (e.getCause() instanceof InterruptedException) {
return;
}
} catch (MissingEventsException e) {
LOG.warn("MissingEventException during polling: {}", e.toString());
mEventMissed = true;
// need to sync all syncpoints at this point
} catch (InterruptedException e) {
LOG.warn("InterruptedException during polling: {}", e.toString());
return;
}
}
}
use of org.apache.hadoop.hdfs.inotify.EventBatch in project hadoop by apache.
the class PBHelperClient method convertEditsResponse.
public static GetEditsFromTxidResponseProto convertEditsResponse(EventBatchList el) {
InotifyProtos.EventsListProto.Builder builder = InotifyProtos.EventsListProto.newBuilder();
for (EventBatch b : el.getBatches()) {
List<InotifyProtos.EventProto> events = Lists.newArrayList();
for (Event e : b.getEvents()) {
switch(e.getEventType()) {
case CLOSE:
Event.CloseEvent ce = (Event.CloseEvent) e;
events.add(InotifyProtos.EventProto.newBuilder().setType(InotifyProtos.EventType.EVENT_CLOSE).setContents(InotifyProtos.CloseEventProto.newBuilder().setPath(ce.getPath()).setFileSize(ce.getFileSize()).setTimestamp(ce.getTimestamp()).build().toByteString()).build());
break;
case CREATE:
Event.CreateEvent ce2 = (Event.CreateEvent) e;
events.add(InotifyProtos.EventProto.newBuilder().setType(InotifyProtos.EventType.EVENT_CREATE).setContents(InotifyProtos.CreateEventProto.newBuilder().setType(createTypeConvert(ce2.getiNodeType())).setPath(ce2.getPath()).setCtime(ce2.getCtime()).setOwnerName(ce2.getOwnerName()).setGroupName(ce2.getGroupName()).setPerms(convert(ce2.getPerms())).setReplication(ce2.getReplication()).setSymlinkTarget(ce2.getSymlinkTarget() == null ? "" : ce2.getSymlinkTarget()).setDefaultBlockSize(ce2.getDefaultBlockSize()).setOverwrite(ce2.getOverwrite()).build().toByteString()).build());
break;
case METADATA:
Event.MetadataUpdateEvent me = (Event.MetadataUpdateEvent) e;
InotifyProtos.MetadataUpdateEventProto.Builder metaB = InotifyProtos.MetadataUpdateEventProto.newBuilder().setPath(me.getPath()).setType(metadataUpdateTypeConvert(me.getMetadataType())).setMtime(me.getMtime()).setAtime(me.getAtime()).setReplication(me.getReplication()).setOwnerName(me.getOwnerName() == null ? "" : me.getOwnerName()).setGroupName(me.getGroupName() == null ? "" : me.getGroupName()).addAllAcls(me.getAcls() == null ? Lists.<AclEntryProto>newArrayList() : convertAclEntryProto(me.getAcls())).addAllXAttrs(me.getxAttrs() == null ? Lists.<XAttrProto>newArrayList() : convertXAttrProto(me.getxAttrs())).setXAttrsRemoved(me.isxAttrsRemoved());
if (me.getPerms() != null) {
metaB.setPerms(convert(me.getPerms()));
}
events.add(InotifyProtos.EventProto.newBuilder().setType(InotifyProtos.EventType.EVENT_METADATA).setContents(metaB.build().toByteString()).build());
break;
case RENAME:
Event.RenameEvent re = (Event.RenameEvent) e;
events.add(InotifyProtos.EventProto.newBuilder().setType(InotifyProtos.EventType.EVENT_RENAME).setContents(InotifyProtos.RenameEventProto.newBuilder().setSrcPath(re.getSrcPath()).setDestPath(re.getDstPath()).setTimestamp(re.getTimestamp()).build().toByteString()).build());
break;
case APPEND:
Event.AppendEvent re2 = (Event.AppendEvent) e;
events.add(InotifyProtos.EventProto.newBuilder().setType(InotifyProtos.EventType.EVENT_APPEND).setContents(InotifyProtos.AppendEventProto.newBuilder().setPath(re2.getPath()).setNewBlock(re2.toNewBlock()).build().toByteString()).build());
break;
case UNLINK:
Event.UnlinkEvent ue = (Event.UnlinkEvent) e;
events.add(InotifyProtos.EventProto.newBuilder().setType(InotifyProtos.EventType.EVENT_UNLINK).setContents(InotifyProtos.UnlinkEventProto.newBuilder().setPath(ue.getPath()).setTimestamp(ue.getTimestamp()).build().toByteString()).build());
break;
case TRUNCATE:
Event.TruncateEvent te = (Event.TruncateEvent) e;
events.add(InotifyProtos.EventProto.newBuilder().setType(InotifyProtos.EventType.EVENT_TRUNCATE).setContents(InotifyProtos.TruncateEventProto.newBuilder().setPath(te.getPath()).setFileSize(te.getFileSize()).setTimestamp(te.getTimestamp()).build().toByteString()).build());
break;
default:
throw new RuntimeException("Unexpected inotify event: " + e);
}
}
builder.addBatch(InotifyProtos.EventBatchProto.newBuilder().setTxid(b.getTxid()).addAllEvents(events));
}
builder.setFirstTxid(el.getFirstTxid());
builder.setLastTxid(el.getLastTxid());
builder.setSyncTxid(el.getSyncTxid());
return GetEditsFromTxidResponseProto.newBuilder().setEventsList(builder.build()).build();
}
Aggregations