use of co.cask.cdap.filetailer.event.FileTailerEvent in project cdap-ingest by caskdata.
the class FileTailerSink method uploadEventPack.
/**
* Uploads all events in the pack; this method blocks until the entire package is uploaded.
*
* @param pack the event pack
*/
private void uploadEventPack(EventPack pack) throws InterruptedException, IOException {
List<FileTailerEvent> events = pack.getEvents();
UploadLatch upload = new UploadLatch(events.size());
for (FileTailerEvent event : events) {
uploadEvent(upload, event);
}
upload.await();
if (!upload.isSuccessful()) {
List<FileTailerEvent> failedEvents = upload.getFailedEvents();
LOG.debug("Failed to upload {} events ", failedEvents.size());
throw new IOException("Failed to upload events!");
}
}
use of co.cask.cdap.filetailer.event.FileTailerEvent in project cdap-ingest by caskdata.
the class FileTailerSinkTest method basicTestWithCustomPackSizeMultipleWriters.
@Test
public void basicTestWithCustomPackSizeMultipleWriters() throws Exception {
FileTailerStateProcessor stateProcessor = Mockito.mock(FileTailerStateProcessor.class);
FileTailerMetricsProcessor metricsProcessor = Mockito.mock(FileTailerMetricsProcessor.class);
FileTailerQueue queue = new FileTailerQueue(DEFAULT_QUEUE_SIZE);
final AtomicInteger count = new AtomicInteger(0);
StreamWriter writers = getDummyConcurrentWriter(count);
boolean success = false;
FileTailerSink sink = new FileTailerSink(queue, writers, SinkStrategy.LOADBALANCE, stateProcessor, metricsProcessor, null, CUSTOM_PACK_SIZE);
try {
sink.startAsync();
for (int i = 0; i < TEST_EVENTS_SIZE; i++) {
queue.put(new FileTailerEvent(new FileTailerState("file", 0L, 42, 0L), "test", Charset.defaultCharset()));
}
int attempts = 10;
while (attempts > 0) {
attempts--;
if (count.get() == TEST_EVENTS_SIZE) {
success = true;
break;
}
Thread.sleep(1000);
}
} finally {
sink.stopAsync();
}
org.junit.Assert.assertTrue(success);
}
use of co.cask.cdap.filetailer.event.FileTailerEvent in project cdap-ingest by caskdata.
the class FileTailerSinkTest method basicTestWithDefaultPackSize.
@Test
public void basicTestWithDefaultPackSize() throws Exception {
FileTailerStateProcessor stateProcessor = Mockito.mock(FileTailerStateProcessor.class);
FileTailerMetricsProcessor metricsProcessor = Mockito.mock(FileTailerMetricsProcessor.class);
FileTailerQueue queue = new FileTailerQueue(DEFAULT_QUEUE_SIZE);
StreamWriter writerMock = getDummyStreamWriter();
FileTailerSink sink = new FileTailerSink(queue, writerMock, SinkStrategy.LOADBALANCE, stateProcessor, metricsProcessor, null);
sink.startAsync();
for (int i = 0; i < TEST_EVENTS_SIZE; i++) {
queue.put(new FileTailerEvent(new FileTailerState("file", 0L, 42, 0L), "test", Charset.defaultCharset()));
}
Mockito.verify(writerMock, Mockito.timeout(10000).times(TEST_EVENTS_SIZE)).write("test", Charset.defaultCharset());
sink.stopAsync();
}
use of co.cask.cdap.filetailer.event.FileTailerEvent in project cdap-ingest by caskdata.
the class RunFromSaveStateTest method saveState.
private void saveState(LinkedBlockingQueue<FileTailerEvent> internalQueue, FileTailerQueue queue, List<String> readLogList, FileTailerStateProcessor stateProcessor) throws InterruptedException, FileTailerStateProcessorException {
while (internalQueue.size() > 0) {
FileTailerEvent event = queue.take();
readLogList.add(event.getEventData());
stateProcessor.saveState(event.getState());
}
}
use of co.cask.cdap.filetailer.event.FileTailerEvent in project cdap-ingest by caskdata.
the class RunFromSaveStateTest method runFromSaveStateTest.
@Test
public void runFromSaveStateTest() throws Exception {
FileTailerQueue queue = new FileTailerQueue(QUEUE_SIZE);
PipeConfiguration flowConfig = TailerLogUtils.loadConfig();
FileTailerStateProcessor stateProcessor = new FileTailerStateProcessorImpl(flowConfig.getDaemonDir(), flowConfig.getStateFile());
FileTailerMetricsProcessor metricsProcessor = new FileTailerMetricsProcessor(flowConfig.getDaemonDir(), flowConfig.getStatisticsFile(), flowConfig.getStatisticsSleepInterval(), flowConfig.getPipeName(), flowConfig.getSourceConfiguration().getFileName());
String filePath = flowConfig.getSourceConfiguration().getWorkDir().getAbsolutePath() + "/" + flowConfig.getSourceConfiguration().getFileName();
List<String> logList = new ArrayList<String>(ENTRY_WRITE_NUMBER);
List<String> readLogList = new ArrayList<String>(ENTRY_WRITE_NUMBER);
Logger logger = TailerLogUtils.getSizeLogger(filePath, LOG_FILE_SIZE);
Field queueField = queue.getClass().getDeclaredField("queue");
queueField.setAccessible(true);
LinkedBlockingQueue<FileTailerEvent> intQueue = (LinkedBlockingQueue<FileTailerEvent>) queueField.get(queue);
write_log(ENTRY_WRITE_NUMBER, logger, logList);
LogTailer tailer = new LogTailer(TailerLogUtils.loadConfig(), queue, stateProcessor, metricsProcessor, null);
tailer.startAsync();
Thread.sleep(SLEEP_TIME);
tailer.stopAsync();
saveState(intQueue, queue, readLogList, stateProcessor);
write_log(ENTRY_WRITE_NUMBER, logger, logList);
tailer = new LogTailer(TailerLogUtils.loadConfig(), queue, stateProcessor, metricsProcessor, null);
tailer.startAsync();
Thread.sleep(SLEEP_TIME);
tailer.stopAsync();
saveState(intQueue, queue, readLogList, stateProcessor);
write_log(ENTRY_WRITE_NUMBER, logger, logList);
tailer = new LogTailer(TailerLogUtils.loadConfig(), queue, stateProcessor, metricsProcessor, null);
tailer.startAsync();
Thread.sleep(SLEEP_TIME);
tailer.stopAsync();
saveState(intQueue, queue, readLogList, stateProcessor);
for (int i = 0; i < logList.size(); i++) {
Assert.assertEquals(true, readLogList.get(i).contains(logList.get(i)));
}
}
Aggregations