use of cz.o2.proxima.direct.commitlog.CommitLogObserver.OnNextContext in project proxima-platform by O2-Czech-Republic.
the class LocalKafkaCommitLogDescriptorTest method testBulkObservePartitionsFromOldestSuccess.
@Test(timeout = 10000)
public void testBulkObservePartitionsFromOldestSuccess() throws InterruptedException {
Accessor accessor = kafka.createAccessor(direct, createTestFamily(entity, storageUri, partitionsCfg(3)));
LocalKafkaWriter writer = accessor.newWriter();
CommitLogReader reader = Optionals.get(accessor.getCommitLogReader(context()));
AtomicInteger consumed = new AtomicInteger();
StreamElement update = StreamElement.upsert(entity, attr, UUID.randomUUID().toString(), "key", attr.getName(), System.currentTimeMillis(), new byte[] { 1, 2 });
for (int i = 0; i < 1000; i++) {
writer.write(update, (succ, e) -> assertTrue(succ));
}
CountDownLatch latch = new CountDownLatch(1);
reader.observeBulkPartitions(reader.getPartitions(), Position.OLDEST, true, new CommitLogObserver() {
@Override
public void onRepartition(OnRepartitionContext context) {
}
@Override
public boolean onNext(StreamElement ingest, OnNextContext context) {
consumed.incrementAndGet();
context.confirm();
return true;
}
@Override
public void onCompleted() {
latch.countDown();
}
@Override
public boolean onError(Throwable error) {
throw new RuntimeException(error);
}
});
latch.await();
assertEquals(1000, consumed.get());
}
use of cz.o2.proxima.direct.commitlog.CommitLogObserver.OnNextContext in project proxima-platform by O2-Czech-Republic.
the class LocalKafkaCommitLogDescriptorTest method testCustomWatermarkEstimator.
@Test(timeout = 10000)
public void testCustomWatermarkEstimator() throws InterruptedException {
Map<String, Object> cfg = partitionsCfg(3);
cfg.put("watermark.estimator-factory", FixedWatermarkEstimatorFactory.class.getName());
Accessor accessor = kafka.createAccessor(direct, createTestFamily(entity, storageUri, cfg));
LocalKafkaWriter writer = accessor.newWriter();
CommitLogReader reader = Optionals.get(accessor.getCommitLogReader(context()));
long now = System.currentTimeMillis();
final UnaryFunction<Integer, StreamElement> update = pos -> StreamElement.upsert(entity, attr, UUID.randomUUID().toString(), "key" + pos, attr.getName(), now + pos, new byte[] { 1, 2 });
AtomicLong watermark = new AtomicLong();
CountDownLatch latch = new CountDownLatch(100);
reader.observe("test", Position.NEWEST, new CommitLogObserver() {
@Override
public boolean onNext(StreamElement ingest, OnNextContext context) {
watermark.set(context.getWatermark());
latch.countDown();
return true;
}
@Override
public void onCompleted() {
fail("This should not be called");
}
@Override
public boolean onError(Throwable error) {
throw new RuntimeException(error);
}
}).waitUntilReady();
for (int i = 0; i < 100; i++) {
writer.write(update.apply(i), (succ, e) -> {
});
}
latch.await();
assertEquals(FixedWatermarkEstimatorFactory.FIXED_WATERMARK, watermark.get());
}
use of cz.o2.proxima.direct.commitlog.CommitLogObserver.OnNextContext in project proxima-platform by O2-Czech-Republic.
the class KafkaLogReaderIT method testReadFromOldestVerifyIsAtHead.
@Test(timeout = 30_000L)
public void testReadFromOldestVerifyIsAtHead() throws InterruptedException {
final EmbeddedKafkaBroker embeddedKafka = rule.getEmbeddedKafka();
final int numPartitions = 3;
embeddedKafka.addTopics(new NewTopic("foo", numPartitions, (short) 1));
final CommitLogReader commitLogReader = Optionals.get(operator.getCommitLogReader(fooDescriptor));
// Write everything up front, so we can be sure that we really seek all the way to beginning.
final int numElements = 100;
await(writeElements(numElements));
AtomicReference<ObserveHandle> handle = new AtomicReference<>();
CountDownLatch firstLatch = new CountDownLatch(1);
CountDownLatch lastLatch = new CountDownLatch(1);
CommitLogObserver observer = new CommitLogObserver() {
int consumed = 0;
@Override
public boolean onNext(StreamElement ingest, OnNextContext context) {
context.confirm();
if (consumed == 0) {
ExceptionUtils.ignoringInterrupted(firstLatch::await);
}
if (++consumed == numElements) {
lastLatch.countDown();
}
return true;
}
};
handle.set(commitLogReader.observe("test-reader", Position.OLDEST, observer));
assertFalse(ObserveHandleUtils.isAtHead(handle.get(), commitLogReader));
firstLatch.countDown();
lastLatch.await();
assertTrue(ObserveHandleUtils.isAtHead(handle.get(), commitLogReader));
}
use of cz.o2.proxima.direct.commitlog.CommitLogObserver.OnNextContext in project proxima-platform by O2-Czech-Republic.
the class LocalKafkaCommitLogDescriptorTest method testFetchOffsets.
@Test(timeout = 10_000)
public void testFetchOffsets() throws InterruptedException {
Accessor accessor = kafka.createAccessor(direct, createTestFamily(entity, storageUri, partitionsCfg(3)));
LocalKafkaWriter writer = accessor.newWriter();
CommitLogReader reader = Optionals.get(accessor.getCommitLogReader(context()));
final CountDownLatch latch = new CountDownLatch(2);
final StreamElement[] updates = new StreamElement[] { StreamElement.upsert(entity, attr, UUID.randomUUID().toString(), "key", attr.getName(), System.currentTimeMillis(), new byte[] { 1, 2 }), StreamElement.upsert(entity, attr, UUID.randomUUID().toString(), "key2", attr.getName(), System.currentTimeMillis(), new byte[] { 1, 2, 3 }) };
Arrays.stream(updates).forEach(update -> writer.write(update, (succ, exc) -> {
}));
CommitLogObserver observer = new CommitLogObserver() {
@Override
public boolean onNext(StreamElement ingest, OnNextContext context) {
context.confirm();
latch.countDown();
return true;
}
@Override
public void onCompleted() {
fail("This should not be called");
}
@Override
public boolean onError(Throwable error) {
throw new RuntimeException(error);
}
};
ObserveHandle handle = reader.observeBulkOffsets(reader.fetchOffsets(Position.OLDEST, reader.getPartitions()).values(), observer);
latch.await();
assertEquals(handle.getCommittedOffsets().stream().map(TopicOffset.class::cast).sorted(Comparator.comparing(tp -> tp.getPartition().getId())).collect(Collectors.toList()), reader.fetchOffsets(Position.NEWEST, reader.getPartitions()).values().stream().map(TopicOffset.class::cast).sorted(Comparator.comparing(tp -> tp.getPartition().getId())).collect(Collectors.toList()));
}
Aggregations