use of cz.o2.proxima.direct.kafka.LocalKafkaCommitLogDescriptor.LocalKafkaLogReader in project proxima-platform by O2-Czech-Republic.
the class LocalKafkaCommitLogDescriptorTest method testHandleRebalanceInProgressException.
@Test(timeout = 10000)
public void testHandleRebalanceInProgressException() throws InterruptedException {
final AtomicInteger invokedCount = new AtomicInteger();
final int numElements = 2000;
final LocalKafkaCommitLogDescriptor descriptor = new LocalKafkaCommitLogDescriptor() {
@Override
public Accessor createAccessor(DirectDataOperator direct, AttributeFamilyDescriptor family) {
return new Accessor(family.getEntity(), family.getStorageUri(), family.getCfg(), id) {
@Override
<K, V> KafkaConsumer<K, V> mockKafkaConsumer(String name, ConsumerGroup group, ElementSerializer<K, V> serializer, @Nullable Collection<Partition> assignedPartitions, @Nullable ConsumerRebalanceListener listener) {
final Map<TopicPartition, OffsetAndMetadata> committed = new HashMap<>();
KafkaConsumer<K, V> mock = super.mockKafkaConsumer(name, group, serializer, assignedPartitions, listener);
doAnswer(invocationOnMock -> {
if (invokedCount.getAndIncrement() == 1) {
throw new RebalanceInProgressException();
}
Map<TopicPartition, OffsetAndMetadata> toCommit = invocationOnMock.getArgument(0);
committed.putAll(toCommit);
return null;
}).when(mock).commitSync(anyMap());
doAnswer(invocationOnMock -> {
Set<TopicPartition> parts = invocationOnMock.getArgument(0);
return parts.stream().map(tp -> Pair.of(tp, committed.get(tp))).filter(p -> p.getSecond() != null).collect(Collectors.toMap(Pair::getFirst, Pair::getSecond));
}).when(mock).committed(anySet());
return mock;
}
};
}
};
Accessor accessor = descriptor.createAccessor(direct, createTestFamily(entity, storageUri, partitionsCfg(1)));
LocalKafkaLogReader reader = accessor.newReader(direct.getContext());
Map<String, StreamElement> observedAfterRepartition = new HashMap<>();
LocalKafkaWriter<?, ?> writer = accessor.newWriter();
CountDownLatch latch = new CountDownLatch(1);
try (ObserveHandle handle = reader.observe("dummy", new CommitLogObserver() {
@Override
public boolean onNext(StreamElement ingest, OnNextContext context) {
observedAfterRepartition.put(ingest.getKey(), ingest);
context.confirm();
if (ingest.getKey().equals("last-key")) {
latch.countDown();
return false;
}
return true;
}
@Override
public boolean onError(Throwable error) {
return false;
}
})) {
for (int i = 0; i < numElements; i++) {
writer.write(StreamElement.upsert(entity, attr, UUID.randomUUID().toString(), "key" + i, attr.getName(), System.currentTimeMillis(), new byte[] {}), (succ, exc) -> {
});
}
writer.write(StreamElement.upsert(entity, attr, UUID.randomUUID().toString(), "last-key", attr.getName(), System.currentTimeMillis(), new byte[] {}), (succ, exc) -> {
});
latch.await();
}
assertEquals(numElements + 1, observedAfterRepartition.size());
assertTrue(invokedCount.get() > 1);
}
use of cz.o2.proxima.direct.kafka.LocalKafkaCommitLogDescriptor.LocalKafkaLogReader in project proxima-platform by O2-Czech-Republic.
the class LocalKafkaCommitLogDescriptorTest method testFailedPollDoesNotDeadlock.
@Test(timeout = 10000)
public void testFailedPollDoesNotDeadlock() throws InterruptedException {
final LocalKafkaCommitLogDescriptor descriptor = new LocalKafkaCommitLogDescriptor() {
@Override
public Accessor createAccessor(DirectDataOperator direct, AttributeFamilyDescriptor family) {
return new Accessor(family.getEntity(), family.getStorageUri(), family.getCfg(), id) {
@Override
<K, V> KafkaConsumer<K, V> mockKafkaConsumer(String name, ConsumerGroup group, ElementSerializer<K, V> serializer, @Nullable Collection<Partition> assignedPartitions, @Nullable ConsumerRebalanceListener listener) {
KafkaConsumer<K, V> mock = super.mockKafkaConsumer(name, group, serializer, assignedPartitions, listener);
doThrow(new RuntimeException("Failed poll")).when(mock).poll(any());
return mock;
}
};
}
};
Accessor accessor = descriptor.createAccessor(direct, createTestFamily(entity, storageUri, partitionsCfg(1)));
LocalKafkaLogReader reader = accessor.newReader(direct.getContext());
ObserveHandle handle = reader.observe("dummy", new CommitLogObserver() {
@Override
public boolean onNext(StreamElement ingest, OnNextContext context) {
return false;
}
@Override
public boolean onError(Throwable error) {
return false;
}
});
handle.waitUntilReady();
handle.close();
}
use of cz.o2.proxima.direct.kafka.LocalKafkaCommitLogDescriptor.LocalKafkaLogReader in project proxima-platform by O2-Czech-Republic.
the class LocalKafkaCommitLogDescriptorTest method testObserveOnNonExistingTopic.
@Test
public void testObserveOnNonExistingTopic() {
Accessor accessor = kafka.createAccessor(direct, createTestFamily(entity, storageUri, partitionsCfg(3)));
LocalKafkaLogReader reader = accessor.newReader(context());
try {
// need this to initialize the consumer
assertNotNull(reader.getPartitions());
reader.validateTopic(reader.getConsumer(), "non-existing-topic");
fail("Should throw exception");
} catch (IllegalArgumentException ex) {
assertEquals("Received null or empty partitions for topic [non-existing-topic]. " + "Please check that the topic exists and has at least one partition.", ex.getMessage());
return;
}
fail("Should throw IllegalArgumentException");
}
Aggregations