use of io.divolte.server.DivolteIdentifier in project divolte-collector by divolte.
the class GoogleCloudPubSubFlusherTest method generateMessage.
private AvroRecordBuffer generateMessage() {
final DivolteIdentifier partyId = this.partyId.orElseThrow(IllegalStateException::new);
final DivolteIdentifier sessionId = this.sessionId.orElseThrow(IllegalStateException::new);
final GenericRecord record = new GenericRecordBuilder(MINIMAL_SCHEMA).set("partyId", partyId.toString()).set("sessionId", sessionId.toString()).set("counter", generatedEventCounter++).build();
return AvroRecordBuffer.fromRecord(partyId, sessionId, record);
}
use of io.divolte.server.DivolteIdentifier in project divolte-collector by divolte.
the class DivolteIdentifierSerializerTest method serializerShouldPrependIdWithVersion.
@Test
public void serializerShouldPrependIdWithVersion() {
DivolteIdentifierSerializer serializer = new DivolteIdentifierSerializer();
DivolteIdentifier cv = DivolteIdentifier.generate(42);
byte[] asBytes = serializer.serialize("topic", cv);
assertEquals('0', asBytes[0]);
}
use of io.divolte.server.DivolteIdentifier in project divolte-collector by divolte.
the class KafkaFlusher method sendBatch.
@Override
protected ImmutableList<ProducerRecord<DivolteIdentifier, AvroRecordBuffer>> sendBatch(final List<ProducerRecord<DivolteIdentifier, AvroRecordBuffer>> batch) throws InterruptedException {
// First start sending the messages.
// (This will serialize them, determine the partition and then assign them to a per-partition buffer.)
final int batchSize = batch.size();
final List<Future<RecordMetadata>> sendResults = batch.stream().map(producer::send).collect(Collectors.toCollection(() -> new ArrayList<>(batchSize)));
// Force a flush so we can check the results without blocking unnecessarily due to
// a user-configured flushing policy.
producer.flush();
// When finished, each message can be in one of several states.
// - Completed.
// - An error occurred, but a retry may succeed.
// - A fatal error occurred.
// (In addition, we can be interrupted due to shutdown.)
final ImmutableList.Builder<ProducerRecord<DivolteIdentifier, AvroRecordBuffer>> remaining = ImmutableList.builder();
for (int i = 0; i < batchSize; ++i) {
final Future<RecordMetadata> result = sendResults.get(i);
try {
final RecordMetadata metadata = result.get();
if (logger.isDebugEnabled()) {
final ProducerRecord<DivolteIdentifier, AvroRecordBuffer> record = batch.get(i);
logger.debug("Finished sending event (partyId={}) to Kafka: topic/partition/offset = {}/{}/{}", record.key(), metadata.topic(), metadata.partition(), metadata.offset());
}
} catch (final ExecutionException e) {
final Throwable cause = e.getCause();
final ProducerRecord<DivolteIdentifier, AvroRecordBuffer> record = batch.get(i);
if (cause instanceof RetriableException) {
// A retry may succeed.
if (logger.isDebugEnabled()) {
logger.debug("Transient error sending event (partyId=" + record.key() + ") to Kafka. Will retry.", cause);
}
remaining.add(record);
} else {
// Fatal error.
logger.error("Error sending event (partyId=" + record.key() + ") to Kafka; abandoning.", cause);
}
}
}
return remaining.build();
}
Aggregations