use of cz.o2.proxima.direct.commitlog.CommitLogObserver.OnNextContext in project proxima-platform by O2-Czech-Republic.
the class LocalKafkaCommitLogDescriptorTest method testSequentialConsumption.
private long testSequentialConsumption(long maxBytesPerSec) throws InterruptedException {
final Accessor accessor = kafka.createAccessor(direct, createTestFamily(entity, storageUri, cfg(Pair.of(KafkaAccessor.ASSIGNMENT_TIMEOUT_MS, 1L), Pair.of(KafkaAccessor.MAX_BYTES_PER_SEC, maxBytesPerSec), Pair.of(KafkaAccessor.MAX_POLL_RECORDS, 1))));
final LocalKafkaWriter writer = accessor.newWriter();
CommitLogReader reader = accessor.getCommitLogReader(context()).orElseThrow(() -> new IllegalStateException("Missing log reader"));
final AtomicLong lastOnNext = new AtomicLong(Long.MIN_VALUE);
final AtomicLong maxLatency = new AtomicLong(0);
final int numElements = 2;
final CountDownLatch latch = new CountDownLatch(numElements);
CommitLogObserver observer = new CommitLogObserver() {
@Override
public boolean onNext(StreamElement ingest, OnNextContext context) {
long now = System.nanoTime();
long last = lastOnNext.getAndSet(now);
if (last > 0) {
long latency = now - last;
maxLatency.getAndUpdate(old -> Math.max(old, latency));
}
latch.countDown();
return true;
}
@Override
public boolean onError(Throwable error) {
throw new RuntimeException(error);
}
};
reader.observe("dummy", Position.OLDEST, observer);
for (int i = 0; i < numElements; i++) {
writer.write(StreamElement.upsert(entity, attr, UUID.randomUUID().toString(), "key1", attr.getName(), System.currentTimeMillis(), emptyValue()), (succ, exc) -> {
assertTrue(succ);
assertNull(exc);
});
}
latch.await();
return maxLatency.get();
}
use of cz.o2.proxima.direct.commitlog.CommitLogObserver.OnNextContext in project proxima-platform by O2-Czech-Republic.
the class LocalKafkaCommitLogDescriptorTest method testObserveSuccess.
@Test(timeout = 10000)
public void testObserveSuccess() throws InterruptedException {
Accessor accessor = kafka.createAccessor(direct, createTestFamily(entity, storageUri, partitionsCfg(3)));
LocalKafkaWriter writer = accessor.newWriter();
CommitLogReader reader = Optionals.get(accessor.getCommitLogReader(context()));
final CountDownLatch latch = new CountDownLatch(2);
final StreamElement update = StreamElement.upsert(entity, attr, UUID.randomUUID().toString(), "key", attr.getName(), System.currentTimeMillis(), new byte[] { 1, 2 });
final ObserveHandle handle = reader.observe("test", Position.NEWEST, new CommitLogObserver() {
@Override
public boolean onNext(StreamElement ingest, OnNextContext context) {
context.confirm();
latch.countDown();
return true;
}
@Override
public void onCompleted() {
fail("This should not be called");
}
@Override
public boolean onError(Throwable error) {
throw new RuntimeException(error);
}
});
writer.write(update, (succ, e) -> {
assertTrue(succ);
latch.countDown();
});
latch.await();
assertEquals(3, handle.getCommittedOffsets().size());
long sum = handle.getCommittedOffsets().stream().mapToLong(o -> {
TopicOffset tpo = (TopicOffset) o;
assertTrue(tpo.getOffset() <= 1);
return tpo.getOffset();
}).sum();
// single partition has committed one element
assertEquals(1, sum);
}
use of cz.o2.proxima.direct.commitlog.CommitLogObserver.OnNextContext in project proxima-platform by O2-Czech-Republic.
the class LocalKafkaCommitLogDescriptorTest method testCurrentOffsetsReflectSeek.
@Test(timeout = 60000)
public void testCurrentOffsetsReflectSeek() throws InterruptedException {
final Accessor accessor = kafka.createAccessor(direct, createTestFamily(entity, storageUri, partitionsCfg(3)));
final CommitLogReader reader = Optionals.get(accessor.getCommitLogReader(context()));
final LocalKafkaWriter writer = accessor.newWriter();
final CountDownLatch latch = new CountDownLatch(10);
final StreamElement update = StreamElement.upsert(entity, attr, UUID.randomUUID().toString(), "key", attr.getName(), System.currentTimeMillis(), new byte[] { 1, 2 });
for (int i = 0; i < 10; i++) {
writer.write(update, (succ, exc) -> latch.countDown());
}
latch.await();
ObserveHandle handle = reader.observe("name", Position.OLDEST, new CommitLogObserver() {
@Override
public boolean onError(Throwable error) {
return false;
}
@Override
public boolean onNext(StreamElement ingest, OnNextContext context) {
return false;
}
});
handle.waitUntilReady();
handle.close();
assertEquals(3, handle.getCurrentOffsets().size());
assertEquals(0, handle.getCurrentOffsets().stream().mapToLong(o -> ((TopicOffset) o).getOffset()).filter(o -> o >= 0).sum());
}
use of cz.o2.proxima.direct.commitlog.CommitLogObserver.OnNextContext in project proxima-platform by O2-Czech-Republic.
the class LocalKafkaCommitLogDescriptorTest method testObserveBulkCommitsCorrectly.
@Test(timeout = 10000)
public void testObserveBulkCommitsCorrectly() throws InterruptedException {
Accessor accessor = kafka.createAccessor(direct, createTestFamily(entity, storageUri, cfg(Pair.of(KafkaAccessor.ASSIGNMENT_TIMEOUT_MS, 1L), Pair.of(LocalKafkaCommitLogDescriptor.CFG_NUM_PARTITIONS, 3))));
LocalKafkaWriter writer = accessor.newWriter();
CommitLogReader reader = Optionals.get(accessor.getCommitLogReader(context()));
long now = System.currentTimeMillis();
for (int i = 0; i < 100; i++) {
StreamElement update = StreamElement.upsert(entity, attr, UUID.randomUUID().toString(), "key-" + i, attr.getName(), now + 2000, new byte[] { 1, 2 });
// then we write single element
writer.write(update, (succ, e) -> {
});
}
CountDownLatch latch = new CountDownLatch(1);
ObserveHandle handle = reader.observeBulk("test", Position.OLDEST, true, new CommitLogObserver() {
int processed = 0;
@Override
public boolean onNext(StreamElement ingest, OnNextContext context) {
if (++processed == 100) {
context.confirm();
}
return true;
}
@Override
public void onCompleted() {
latch.countDown();
}
@Override
public boolean onError(Throwable error) {
throw new RuntimeException(error);
}
});
latch.await();
long offsetSum = handle.getCommittedOffsets().stream().mapToLong(o -> ((TopicOffset) o).getOffset()).sum();
assertEquals(100, offsetSum);
KafkaConsumer<Object, Object> consumer = ((LocalKafkaCommitLogDescriptor.LocalKafkaLogReader) reader).getConsumer();
String topic = accessor.getTopic();
assertEquals(100, consumer.committed(handle.getCommittedOffsets().stream().map(o -> new TopicPartition(topic, o.getPartition().getId())).collect(Collectors.toSet())).values().stream().mapToLong(OffsetAndMetadata::offset).sum());
}
use of cz.o2.proxima.direct.commitlog.CommitLogObserver.OnNextContext in project proxima-platform by O2-Czech-Republic.
the class LocalKafkaCommitLogDescriptorTest method testObserveWithException.
@Test(timeout = 10000)
public void testObserveWithException() throws InterruptedException {
Accessor accessor = kafka.createAccessor(direct, createTestFamily(entity, storageUri, partitionsCfg(3)));
LocalKafkaWriter writer = accessor.newWriter();
CommitLogReader reader = Optionals.get(accessor.getCommitLogReader(context()));
final AtomicInteger restarts = new AtomicInteger();
final AtomicReference<Throwable> exc = new AtomicReference<>();
final CountDownLatch latch = new CountDownLatch(2);
final StreamElement update = StreamElement.upsert(entity, attr, UUID.randomUUID().toString(), "key", attr.getName(), System.currentTimeMillis(), new byte[] { 1, 2 });
final ObserveHandle handle = reader.observe("test", Position.NEWEST, new CommitLogObserver() {
@Override
public boolean onNext(StreamElement ingest, OnNextContext context) {
restarts.incrementAndGet();
throw new RuntimeException("FAIL!");
}
@Override
public void onCompleted() {
fail("This should not be called");
}
@Override
public boolean onError(Throwable error) {
exc.set(error);
latch.countDown();
throw new RuntimeException(error);
}
});
writer.write(update, (succ, e) -> {
assertTrue(succ);
latch.countDown();
});
latch.await();
assertEquals("FAIL!", exc.get().getMessage());
assertEquals(1, restarts.get());
assertEquals(3, handle.getCommittedOffsets().size());
List<Long> startedOffsets = handle.getCurrentOffsets().stream().map(o -> ((TopicOffset) o).getOffset()).filter(o -> o >= 0).collect(Collectors.toList());
assertEquals(Collections.singletonList(0L), startedOffsets);
}
Aggregations