use of cz.o2.proxima.direct.commitlog.CommitLogReader in project proxima-platform by O2-Czech-Republic.
the class LocalKafkaCommitLogDescriptorTest method testObserveOffsetsWithLogRoll.
@Test(timeout = 10000)
public void testObserveOffsetsWithLogRoll() throws InterruptedException {
String topic = Utils.topic(storageUri);
Map<TopicPartition, Long> endOffsets = IntStream.range(0, 3).mapToObj(i -> new TopicPartition(topic, i)).collect(Collectors.toMap(Function.identity(), e -> 2L));
Map<TopicPartition, Long> beginningOffsets = IntStream.range(0, 3).mapToObj(i -> new TopicPartition(topic, i)).collect(Collectors.toMap(Function.identity(), e -> 0L));
final LocalKafkaCommitLogDescriptor descriptor = new LocalKafkaCommitLogDescriptor() {
@Override
public Accessor createAccessor(DirectDataOperator direct, AttributeFamilyDescriptor family) {
AtomicInteger invokedCount = new AtomicInteger();
return new Accessor(family.getEntity(), family.getStorageUri(), family.getCfg(), id) {
@Override
<K, V> KafkaConsumer<K, V> mockKafkaConsumer(String name, ConsumerGroup group, ElementSerializer<K, V> serializer, @Nullable Collection<Partition> assignedPartitions, @Nullable ConsumerRebalanceListener listener) {
KafkaConsumer<K, V> mock = super.mockKafkaConsumer(name, group, serializer, assignedPartitions, listener);
doAnswer(invocationOnMock -> {
if (invokedCount.incrementAndGet() > 2) {
return endOffsets;
}
return beginningOffsets;
}).when(mock).beginningOffsets(any());
doAnswer(invocationOnMock -> endOffsets).when(mock).endOffsets(any());
return mock;
}
};
}
};
final Accessor accessor = descriptor.createAccessor(direct, createTestFamily(entity, storageUri, partitionsCfg(3)));
final CommitLogReader reader = Optionals.get(accessor.getCommitLogReader(context()));
final CountDownLatch latch = new CountDownLatch(1);
final CommitLogObserver observer = new CommitLogObserver() {
@Override
public boolean onNext(StreamElement ingest, OnNextContext context) {
context.confirm();
return false;
}
@Override
public boolean onError(Throwable error) {
throw new RuntimeException(error);
}
@Override
public void onCompleted() {
latch.countDown();
}
};
try (final ObserveHandle handle = reader.observeBulkOffsets(reader.fetchOffsets(Position.OLDEST, reader.getPartitions()).values(), true, observer)) {
latch.await();
}
}
use of cz.o2.proxima.direct.commitlog.CommitLogReader in project proxima-platform by O2-Czech-Republic.
the class LocalKafkaCommitLogDescriptorTest method testObserveCancelled.
@Test(timeout = 10000)
public void testObserveCancelled() throws InterruptedException {
Accessor accessor = kafka.createAccessor(direct, createTestFamily(entity, storageUri, partitionsCfg(3)));
CommitLogReader reader = Optionals.get(accessor.getCommitLogReader(context()));
final CountDownLatch latch = new CountDownLatch(1);
final ObserveHandle handle = reader.observe("test", Position.NEWEST, new CommitLogObserver() {
@Override
public boolean onNext(StreamElement ingest, OnNextContext context) {
return true;
}
@Override
public void onCompleted() {
fail("This should not be called");
}
@Override
public boolean onError(Throwable error) {
throw new RuntimeException(error);
}
@Override
public void onCancelled() {
latch.countDown();
}
});
handle.close();
latch.await();
}
use of cz.o2.proxima.direct.commitlog.CommitLogReader in project proxima-platform by O2-Czech-Republic.
the class LocalKafkaCommitLogDescriptorTest method testBatchObserveWithLogRoll.
@Test(timeout = 10000)
public void testBatchObserveWithLogRoll() throws InterruptedException {
String topic = Utils.topic(storageUri);
Map<TopicPartition, Long> endOffsets = IntStream.range(0, 3).mapToObj(i -> new TopicPartition(topic, i)).collect(Collectors.toMap(Function.identity(), e -> 2L));
Map<TopicPartition, Long> beginningOffsets = IntStream.range(0, 3).mapToObj(i -> new TopicPartition(topic, i)).collect(Collectors.toMap(Function.identity(), e -> 0L));
final LocalKafkaCommitLogDescriptor descriptor = new LocalKafkaCommitLogDescriptor() {
@Override
public Accessor createAccessor(DirectDataOperator direct, AttributeFamilyDescriptor family) {
AtomicInteger invokedCount = new AtomicInteger();
return new Accessor(family.getEntity(), family.getStorageUri(), family.getCfg(), id) {
@Override
<K, V> KafkaConsumer<K, V> mockKafkaConsumer(String name, ConsumerGroup group, ElementSerializer<K, V> serializer, @Nullable Collection<Partition> assignedPartitions, @Nullable ConsumerRebalanceListener listener) {
KafkaConsumer<K, V> mock = super.mockKafkaConsumer(name, group, serializer, assignedPartitions, listener);
doAnswer(invocationOnMock -> {
if (invokedCount.incrementAndGet() > 2) {
return endOffsets;
}
return beginningOffsets;
}).when(mock).beginningOffsets(any());
doAnswer(invocationOnMock -> endOffsets).when(mock).endOffsets(any());
return mock;
}
};
}
};
final Accessor accessor = descriptor.createAccessor(direct, createTestFamily(entity, storageUri, partitionsCfg(3)));
final CommitLogReader reader = Optionals.get(accessor.getCommitLogReader(context()));
final CountDownLatch latch = new CountDownLatch(1);
final CommitLogObserver observer = new CommitLogObserver() {
@Override
public boolean onNext(StreamElement ingest, OnNextContext context) {
context.confirm();
return false;
}
@Override
public boolean onError(Throwable error) {
throw new RuntimeException(error);
}
@Override
public void onCompleted() {
latch.countDown();
}
};
try (final ObserveHandle handle = reader.observeBulk("dummy", Position.OLDEST, true, observer)) {
latch.await();
}
}
use of cz.o2.proxima.direct.commitlog.CommitLogReader in project proxima-platform by O2-Czech-Republic.
the class LocalKafkaCommitLogDescriptorTest method testBulkObserveWithExceptionAndRetry.
@Test(timeout = 10000)
public void testBulkObserveWithExceptionAndRetry() throws InterruptedException {
Accessor accessor = kafka.createAccessor(direct, createTestFamily(entity, storageUri, partitionsCfg(3)));
LocalKafkaWriter writer = accessor.newWriter();
CommitLogReader reader = Optionals.get(accessor.getCommitLogReader(context()));
AtomicInteger restarts = new AtomicInteger();
StreamElement update = StreamElement.upsert(entity, attr, UUID.randomUUID().toString(), "key", attr.getName(), System.currentTimeMillis(), new byte[] { 1, 2 });
final int numRetries = 3;
final CountDownLatch latch = new CountDownLatch(numRetries);
final CommitLogObserver observer = CommitLogObservers.withNumRetriedExceptions("test", numRetries, new CommitLogObserver() {
@Override
public boolean onError(Throwable error) {
latch.countDown();
return true;
}
@Override
public boolean onNext(StreamElement ingest, OnNextContext confirm) {
restarts.incrementAndGet();
throw new RuntimeException("FAIL!");
}
});
reader.observe("test", observer);
Executors.newCachedThreadPool().execute(() -> {
while (true) {
try {
TimeUnit.MILLISECONDS.sleep(100);
} catch (InterruptedException ex) {
break;
}
writer.write(update, (succ, e) -> assertTrue(succ));
}
});
latch.await();
assertEquals(3, restarts.get());
}
use of cz.o2.proxima.direct.commitlog.CommitLogReader in project proxima-platform by O2-Czech-Republic.
the class LocalKafkaCommitLogDescriptorTest method testBulkObservePartitionsResetOffsetsSuccess.
@Test(timeout = 10000)
public void testBulkObservePartitionsResetOffsetsSuccess() throws InterruptedException {
Accessor accessor = kafka.createAccessor(direct, createTestFamily(entity, storageUri, partitionsCfg(3)));
LocalKafkaWriter writer = accessor.newWriter();
CommitLogReader reader = Optionals.get(accessor.getCommitLogReader(context()));
AtomicInteger restarts = new AtomicInteger();
AtomicReference<Throwable> exc = new AtomicReference<>();
AtomicReference<StreamElement> input = new AtomicReference<>();
AtomicReference<CountDownLatch> latch = new AtomicReference<>(new CountDownLatch(2));
StreamElement update = StreamElement.upsert(entity, attr, UUID.randomUUID().toString(), "key", attr.getName(), System.currentTimeMillis(), new byte[] { 1, 2 });
final ObserveHandle handle = reader.observePartitions(reader.getPartitions(), Position.NEWEST, new CommitLogObserver() {
@Override
public void onRepartition(OnRepartitionContext context) {
restarts.incrementAndGet();
}
@Override
public boolean onNext(StreamElement ingest, OnNextContext context) {
input.set(ingest);
context.confirm();
latch.get().countDown();
return true;
}
@Override
public boolean onError(Throwable error) {
exc.set(error);
throw new RuntimeException(error);
}
});
handle.waitUntilReady();
writer.write(update, (succ, e) -> {
assertTrue(succ);
latch.get().countDown();
});
latch.get().await();
latch.set(new CountDownLatch(1));
handle.resetOffsets(reader.getPartitions().stream().map(p -> (PartitionWithTopic) p).map(p -> new TopicOffset(new PartitionWithTopic(p.getTopic(), p.getId()), 0, Watermarks.MIN_WATERMARK)).collect(Collectors.toList()));
latch.get().await();
assertEquals(1L, (long) (Long) handle.getCommittedOffsets().stream().mapToLong(o -> ((TopicOffset) o).getOffset()).sum());
}
Aggregations