use of io.divolte.server.AvroRecordBuffer in project divolte-collector by divolte.
the class FileFlusherTest method shouldRollFileOnHeartbeatWithNoPendingRecords.
@Test
public void shouldRollFileOnHeartbeatWithNoPendingRecords() throws IOException, InterruptedException {
final FileStrategyConfiguration fileStrategyConfiguration = setupConfiguration("100 milliseconds", "1 hour", "1");
// Mocks
final FileManager manager = mock(FileManager.class);
final DivolteFile file = mock(DivolteFile.class);
final InOrder calls = inOrder(manager, file);
// Expect new file creation on file flusher construction
when(manager.createFile(anyString())).thenReturn(file);
final FileFlusher flusher = new FileFlusher(fileStrategyConfiguration, manager, 1L);
final Item<AvroRecordBuffer> item = itemFromAvroRecordBuffer(newAvroRecordBuffer());
assertEquals(CONTINUE, flusher.process(item));
calls.verify(file).append(item.payload);
Thread.sleep(200);
assertEquals(CONTINUE, flusher.heartbeat());
calls.verify(file).closeAndPublish();
calls.verify(manager).createFile(anyString());
calls.verifyNoMoreInteractions();
}
use of io.divolte.server.AvroRecordBuffer in project divolte-collector by divolte.
the class FileFlusherTest method shouldPostponeFailureOnConstruction.
@Test
public void shouldPostponeFailureOnConstruction() throws IOException, InterruptedException {
final FileStrategyConfiguration fileStrategyConfiguration = setupConfiguration("1 hour", "1 hour", "200");
// Mocks
final FileManager manager = mock(FileManager.class);
final DivolteFile file = mock(DivolteFile.class);
final InOrder calls = inOrder(manager, file);
final Item<AvroRecordBuffer> item = itemFromAvroRecordBuffer(newAvroRecordBuffer());
when(manager.createFile(anyString())).thenThrow(// First file creation fails
new IOException("file create")).thenReturn(// Second file creation fails
file);
// Actual failing invocation of manager.createNewFile(...) happens here
final FileFlusher flusher = new FileFlusher(fileStrategyConfiguration, manager, 50L);
calls.verify(manager).createFile(anyString());
// Exception should be re-thrown at this point
assertEquals(PAUSE, flusher.process(item));
// Important: should not hit file.append(...)
Thread.sleep(100);
assertEquals(CONTINUE, flusher.heartbeat());
calls.verify(manager).createFile(anyString());
calls.verifyNoMoreInteractions();
}
use of io.divolte.server.AvroRecordBuffer in project divolte-collector by divolte.
the class KafkaFlusher method sendBatch.
@Override
protected ImmutableList<ProducerRecord<DivolteIdentifier, AvroRecordBuffer>> sendBatch(final List<ProducerRecord<DivolteIdentifier, AvroRecordBuffer>> batch) throws InterruptedException {
// First start sending the messages.
// (This will serialize them, determine the partition and then assign them to a per-partition buffer.)
final int batchSize = batch.size();
final List<Future<RecordMetadata>> sendResults = batch.stream().map(producer::send).collect(Collectors.toCollection(() -> new ArrayList<>(batchSize)));
// Force a flush so we can check the results without blocking unnecessarily due to
// a user-configured flushing policy.
producer.flush();
// When finished, each message can be in one of several states.
// - Completed.
// - An error occurred, but a retry may succeed.
// - A fatal error occurred.
// (In addition, we can be interrupted due to shutdown.)
final ImmutableList.Builder<ProducerRecord<DivolteIdentifier, AvroRecordBuffer>> remaining = ImmutableList.builder();
for (int i = 0; i < batchSize; ++i) {
final Future<RecordMetadata> result = sendResults.get(i);
try {
final RecordMetadata metadata = result.get();
if (logger.isDebugEnabled()) {
final ProducerRecord<DivolteIdentifier, AvroRecordBuffer> record = batch.get(i);
logger.debug("Finished sending event (partyId={}) to Kafka: topic/partition/offset = {}/{}/{}", record.key(), metadata.topic(), metadata.partition(), metadata.offset());
}
} catch (final ExecutionException e) {
final Throwable cause = e.getCause();
final ProducerRecord<DivolteIdentifier, AvroRecordBuffer> record = batch.get(i);
if (cause instanceof RetriableException) {
// A retry may succeed.
if (logger.isDebugEnabled()) {
logger.debug("Transient error sending event (partyId=" + record.key() + ") to Kafka. Will retry.", cause);
}
remaining.add(record);
} else {
// Fatal error.
logger.error("Error sending event (partyId=" + record.key() + ") to Kafka; abandoning.", cause);
}
}
}
return remaining.build();
}
Aggregations