use of org.apache.hadoop.hdfs.inotify.EventBatch in project nifi by apache.
the class TestGetHDFSEvents method onTriggerShouldOnlyProcessEventsWithSpecificPath.
@Test
public void onTriggerShouldOnlyProcessEventsWithSpecificPath() throws Exception {
Event[] events = getEvents();
EventBatch eventBatch = mock(EventBatch.class);
when(eventBatch.getEvents()).thenReturn(events);
when(inotifyEventInputStream.poll(1000000L, TimeUnit.MICROSECONDS)).thenReturn(eventBatch);
when(hdfsAdmin.getInotifyEventStream()).thenReturn(inotifyEventInputStream);
when(eventBatch.getTxid()).thenReturn(100L);
GetHDFSEvents processor = new TestableGetHDFSEvents(kerberosProperties, hdfsAdmin);
TestRunner runner = TestRunners.newTestRunner(processor);
runner.setProperty(GetHDFSEvents.HDFS_PATH_TO_WATCH, "/some/path/create(/)?");
runner.run();
List<MockFlowFile> successfulFlowFiles = runner.getFlowFilesForRelationship(GetHDFSEvents.REL_SUCCESS);
assertEquals(1, successfulFlowFiles.size());
verify(eventBatch).getTxid();
assertEquals("100", runner.getProcessContext().getStateManager().getState(Scope.CLUSTER).get("last.tx.id"));
}
use of org.apache.hadoop.hdfs.inotify.EventBatch in project SSM by Intel-bigdata.
the class InotifyFetchAndApplyTask method run.
@Override
public void run() {
LOG.trace("InotifyFetchAndApplyTask run at " + new Date());
try {
EventBatch eventBatch = inotifyEventInputStream.poll();
while (eventBatch != null) {
applier.apply(eventBatch.getEvents());
lastId.getAndSet(eventBatch.getTxid());
metaStore.updateAndInsertIfNotExist(new SystemInfo(SmartConstants.SMART_HDFS_LAST_INOTIFY_TXID, String.valueOf(lastId.get())));
eventBatch = inotifyEventInputStream.poll();
}
} catch (Throwable t) {
LOG.error("Inotify Apply Events error", t);
}
}
Aggregations