use of cz.o2.proxima.direct.commitlog.CommitLogReader in project proxima-platform by O2-Czech-Republic.
the class KafkaLogReaderIT method testReadFromOldest.
@Test(timeout = 30_000L)
public void testReadFromOldest() throws InterruptedException {
final EmbeddedKafkaBroker embeddedKafka = rule.getEmbeddedKafka();
final int numPartitions = 3;
embeddedKafka.addTopics(new NewTopic("foo", numPartitions, (short) 1));
final CommitLogReader commitLogReader = Optionals.get(operator.getCommitLogReader(fooDescriptor));
// Write everything up front, so we can be sure that we really seek all the way to beginning.
final int numElements = 100;
await(writeElements(numElements));
await(writePoisonedPills(numPartitions));
final TestLogObserver firstObserver = new TestLogObserver();
final ObserveHandle firstHandle = commitLogReader.observe("test-reader", Position.OLDEST, firstObserver);
// First observer should successfully complete and commit offsets.
await(firstObserver.getCompleted());
Assert.assertEquals(numElements, firstObserver.getNumReceivedElements());
Assert.assertEquals(numElements + numPartitions, numCommittedElements(firstHandle));
firstHandle.close();
// Second observer share the same name and should start from committed offsets.
await(writePoisonedPills(3));
final TestLogObserver secondObserver = new TestLogObserver();
final ObserveHandle secondHandle = commitLogReader.observe("test-reader", Position.OLDEST, secondObserver);
await(secondObserver.getCompleted());
Assert.assertEquals(0, secondObserver.getNumReceivedElements());
Assert.assertEquals(numElements + 2 * numPartitions, numCommittedElements(secondHandle));
}
use of cz.o2.proxima.direct.commitlog.CommitLogReader in project proxima-platform by O2-Czech-Republic.
the class KafkaLogReaderIT method testReadFromCurrent.
@Test(timeout = 30_000L)
@Ignore(value = "https://github.com/O2-Czech-Republic/proxima-platform/issues/183")
public void testReadFromCurrent() throws InterruptedException {
final EmbeddedKafkaBroker embeddedKafka = rule.getEmbeddedKafka();
final int numPartitions = 3;
embeddedKafka.addTopics(new NewTopic("foo", numPartitions, (short) 1));
final CommitLogReader commitLogReader = Optionals.get(operator.getCommitLogReader(fooDescriptor));
final TestLogObserver observer = new TestLogObserver();
final ObserveHandle handle = commitLogReader.observe("test-reader", Position.CURRENT, observer);
handle.waitUntilReady();
final int numElements = 100;
writeElements(numElements);
writePoisonedPills(numPartitions);
await(observer.getCompleted());
handle.close();
Assert.assertEquals(numElements, observer.getNumReceivedElements());
Assert.assertEquals(numElements + numPartitions, numCommittedElements(handle));
}
use of cz.o2.proxima.direct.commitlog.CommitLogReader in project proxima-platform by O2-Czech-Republic.
the class KafkaLogReaderIT method testLastPartitionReadFromOldest.
@Test(timeout = 30_000L)
public void testLastPartitionReadFromOldest() throws InterruptedException {
final EmbeddedKafkaBroker embeddedKafka = rule.getEmbeddedKafka();
final int numPartitions = 3;
embeddedKafka.addTopics(new NewTopic("foo", numPartitions, (short) 1));
final CommitLogReader commitLogReader = Optionals.get(operator.getCommitLogReader(fooDescriptor));
// Write everything up front, so we can be sure that we really seek all the way to beginning.
final int numElements = 100;
await(writeElements(numElements));
await(writePoisonedPills(numPartitions));
final TestLogObserver observer = new TestLogObserver();
final Partition lastPartition = commitLogReader.getPartitions().get(commitLogReader.getPartitions().size() - 1);
final ObserveHandle handle = commitLogReader.observePartitions("test-reader", Collections.singletonList(lastPartition), Position.OLDEST, false, observer);
// First observer should successfully complete and commit offsets.
await(observer.getCompleted());
// These numbers are determined hashing all elements into three partitions.
final int numElementsInLastPartition = 33;
final int lastPartitionWatermark = 97;
Assert.assertEquals(numElementsInLastPartition, observer.getNumReceivedElements());
Assert.assertEquals(lastPartitionWatermark, observer.getWatermark(lastPartition));
handle.close();
}
use of cz.o2.proxima.direct.commitlog.CommitLogReader in project proxima-platform by O2-Czech-Republic.
the class KafkaLogReaderIT method testReadFromOldestVerifyIsAtHead.
@Test(timeout = 30_000L)
public void testReadFromOldestVerifyIsAtHead() throws InterruptedException {
final EmbeddedKafkaBroker embeddedKafka = rule.getEmbeddedKafka();
final int numPartitions = 3;
embeddedKafka.addTopics(new NewTopic("foo", numPartitions, (short) 1));
final CommitLogReader commitLogReader = Optionals.get(operator.getCommitLogReader(fooDescriptor));
// Write everything up front, so we can be sure that we really seek all the way to beginning.
final int numElements = 100;
await(writeElements(numElements));
AtomicReference<ObserveHandle> handle = new AtomicReference<>();
CountDownLatch firstLatch = new CountDownLatch(1);
CountDownLatch lastLatch = new CountDownLatch(1);
CommitLogObserver observer = new CommitLogObserver() {
int consumed = 0;
@Override
public boolean onNext(StreamElement ingest, OnNextContext context) {
context.confirm();
if (consumed == 0) {
ExceptionUtils.ignoringInterrupted(firstLatch::await);
}
if (++consumed == numElements) {
lastLatch.countDown();
}
return true;
}
};
handle.set(commitLogReader.observe("test-reader", Position.OLDEST, observer));
assertFalse(ObserveHandleUtils.isAtHead(handle.get(), commitLogReader));
firstLatch.countDown();
lastLatch.await();
assertTrue(ObserveHandleUtils.isAtHead(handle.get(), commitLogReader));
}
use of cz.o2.proxima.direct.commitlog.CommitLogReader in project proxima-platform by O2-Czech-Republic.
the class LocalKafkaCommitLogDescriptorTest method testFetchOffsets.
@Test(timeout = 10_000)
public void testFetchOffsets() throws InterruptedException {
Accessor accessor = kafka.createAccessor(direct, createTestFamily(entity, storageUri, partitionsCfg(3)));
LocalKafkaWriter writer = accessor.newWriter();
CommitLogReader reader = Optionals.get(accessor.getCommitLogReader(context()));
final CountDownLatch latch = new CountDownLatch(2);
final StreamElement[] updates = new StreamElement[] { StreamElement.upsert(entity, attr, UUID.randomUUID().toString(), "key", attr.getName(), System.currentTimeMillis(), new byte[] { 1, 2 }), StreamElement.upsert(entity, attr, UUID.randomUUID().toString(), "key2", attr.getName(), System.currentTimeMillis(), new byte[] { 1, 2, 3 }) };
Arrays.stream(updates).forEach(update -> writer.write(update, (succ, exc) -> {
}));
CommitLogObserver observer = new CommitLogObserver() {
@Override
public boolean onNext(StreamElement ingest, OnNextContext context) {
context.confirm();
latch.countDown();
return true;
}
@Override
public void onCompleted() {
fail("This should not be called");
}
@Override
public boolean onError(Throwable error) {
throw new RuntimeException(error);
}
};
ObserveHandle handle = reader.observeBulkOffsets(reader.fetchOffsets(Position.OLDEST, reader.getPartitions()).values(), observer);
latch.await();
assertEquals(handle.getCommittedOffsets().stream().map(TopicOffset.class::cast).sorted(Comparator.comparing(tp -> tp.getPartition().getId())).collect(Collectors.toList()), reader.fetchOffsets(Position.NEWEST, reader.getPartitions()).values().stream().map(TopicOffset.class::cast).sorted(Comparator.comparing(tp -> tp.getPartition().getId())).collect(Collectors.toList()));
}
Aggregations