use of cz.o2.proxima.direct.commitlog.CommitLogReader in project proxima-platform by O2-Czech-Republic.
the class TransactionResourceManager method transitionToActive.
private void transitionToActive(DirectAttributeFamilyDescriptor desc) {
boolean isAtHead = false;
while (!Thread.currentThread().isInterrupted() && !isAtHead) {
CachedView view = Objects.requireNonNull(stateViews.get(desc));
CommitLogReader reader = view.getUnderlyingReader();
Optional<ObserveHandle> handle = view.getRunningHandle();
if (handle.isPresent()) {
isAtHead = ObserveHandleUtils.isAtHead(handle.get(), reader);
}
if (!isAtHead) {
ExceptionUtils.ignoringInterrupted(() -> TimeUnit.MILLISECONDS.sleep(100));
}
}
if (isAtHead) {
log.info("Transitioned to ACTIVE state for {}", desc);
activeForFamily.get(desc.getDesc()).set(true);
}
}
use of cz.o2.proxima.direct.commitlog.CommitLogReader in project proxima-platform by O2-Czech-Republic.
the class ReadMe method consumeCommitLog.
private void consumeCommitLog() {
Model model = createModel();
DirectDataOperator operator = model.getRepo().getOrCreateOperator(DirectDataOperator.class);
CommitLogReader commitLog = operator.getCommitLogReader(model.getEvent().getDataDescriptor()).orElseThrow(() -> new IllegalArgumentException("Missing commit log for " + model.getEvent().getDataDescriptor()));
commitLog.observe("MyObservationProcess", new CommitLogObserver() {
@Override
public boolean onError(Throwable error) {
throw new RuntimeException(error);
}
@Override
public boolean onNext(StreamElement elem, OnNextContext context) {
log.info("Consumed element {}", elem);
// commit processing, so that it is not redelivered
context.confirm();
// continue processing
return true;
}
});
}
use of cz.o2.proxima.direct.commitlog.CommitLogReader in project proxima-platform by O2-Czech-Republic.
the class LocalKafkaCommitLogDescriptorTest method testBulkObservePartitionsSuccess.
@Test(timeout = 10000)
public void testBulkObservePartitionsSuccess() throws InterruptedException {
Accessor accessor = kafka.createAccessor(direct, createTestFamily(entity, storageUri, partitionsCfg(3)));
LocalKafkaWriter writer = accessor.newWriter();
CommitLogReader reader = Optionals.get(accessor.getCommitLogReader(context()));
AtomicInteger restarts = new AtomicInteger();
AtomicReference<Throwable> exc = new AtomicReference<>();
AtomicReference<StreamElement> input = new AtomicReference<>();
CountDownLatch latch = new CountDownLatch(2);
StreamElement update = StreamElement.upsert(entity, attr, UUID.randomUUID().toString(), "key", attr.getName(), System.currentTimeMillis(), new byte[] { 1, 2 });
final ObserveHandle handle = reader.observeBulkPartitions(reader.getPartitions(), Position.NEWEST, new CommitLogObserver() {
@Override
public void onRepartition(OnRepartitionContext context) {
restarts.incrementAndGet();
}
@Override
public boolean onNext(StreamElement ingest, OnNextContext context) {
input.set(ingest);
context.confirm();
latch.countDown();
return true;
}
@Override
public void onCompleted() {
fail("This should not be called");
}
@Override
public boolean onError(Throwable error) {
exc.set(error);
throw new RuntimeException(error);
}
});
writer.write(update, (succ, e) -> {
assertTrue(succ);
latch.countDown();
});
latch.await();
assertNull(exc.get());
assertEquals(1, restarts.get());
assertArrayEquals(update.getValue(), input.get().getValue());
assertEquals(3, handle.getCommittedOffsets().size());
assertEquals(1L, (long) (Long) handle.getCommittedOffsets().stream().mapToLong(o -> ((TopicOffset) o).getOffset()).sum());
}
use of cz.o2.proxima.direct.commitlog.CommitLogReader in project proxima-platform by O2-Czech-Republic.
the class LocalKafkaCommitLogDescriptorTest method testBulkObserveOffsets.
@Test(timeout = 10000)
public void testBulkObserveOffsets() throws InterruptedException {
final Accessor accessor = kafka.createAccessor(direct, createTestFamily(entity, storageUri, partitionsCfg(3)));
final LocalKafkaWriter writer = accessor.newWriter();
final CommitLogReader reader = Optionals.get(accessor.getCommitLogReader(context()));
final List<KafkaStreamElement> input = new ArrayList<>();
final AtomicReference<CountDownLatch> latch = new AtomicReference<>(new CountDownLatch(3));
final StreamElement update = StreamElement.upsert(entity, attr, UUID.randomUUID().toString(), "key", attr.getName(), System.currentTimeMillis(), new byte[] { 1, 2 });
final Map<Integer, Offset> currentOffsets = new HashMap<>();
final CommitLogObserver observer = new CommitLogObserver() {
@Override
public boolean onNext(StreamElement ingest, OnNextContext context) {
input.add((KafkaStreamElement) ingest);
context.confirm();
latch.get().countDown();
// terminate after reading first record
return false;
}
@Override
public boolean onError(Throwable error) {
throw new RuntimeException(error);
}
};
try (final ObserveHandle handle = reader.observeBulkPartitions(reader.getPartitions(), Position.NEWEST, observer)) {
// write two elements
for (int i = 0; i < 2; i++) {
writer.write(update, (succ, e) -> {
assertTrue(succ);
latch.get().countDown();
});
}
latch.get().await();
latch.set(new CountDownLatch(1));
handle.getCommittedOffsets().forEach(o -> currentOffsets.put(o.getPartition().getId(), o));
}
// each partitions has a record here
assertEquals(3, currentOffsets.size());
assertEquals(currentOffsets.toString(), 1L, currentOffsets.values().stream().mapToLong(o -> ((TopicOffset) o).getOffset()).sum());
// restart from old offset
final ObserveHandle handle2 = reader.observeBulkOffsets(Lists.newArrayList(currentOffsets.values()), observer);
latch.get().await();
assertEquals(2, input.size());
assertEquals(0, input.get(0).getOffset());
assertEquals(1, input.get(1).getOffset());
// committed offset 1 and 2
assertEquals(2L, handle2.getCommittedOffsets().stream().mapToLong(o -> ((TopicOffset) o).getOffset()).sum());
}
use of cz.o2.proxima.direct.commitlog.CommitLogReader in project proxima-platform by O2-Czech-Republic.
the class LocalKafkaCommitLogDescriptorTest method testEmptyPollWithNoDataMovesWatermark.
@Test(timeout = 10000)
public void testEmptyPollWithNoDataMovesWatermark() throws InterruptedException {
Accessor accessor = kafka.createAccessor(direct, createTestFamily(entity, storageUri, and(partitionsCfg(3), cfg(Pair.of(KafkaAccessor.EMPTY_POLL_TIME, "1000")))));
CommitLogReader reader = Optionals.get(accessor.getCommitLogReader(context()));
final long now = System.currentTimeMillis();
AtomicLong watermark = new AtomicLong();
CountDownLatch latch = new CountDownLatch(30);
reader.observe("test", Position.NEWEST, new CommitLogObserver() {
@Override
public boolean onNext(StreamElement ingest, OnNextContext context) {
return true;
}
@Override
public void onCompleted() {
fail("This should not be called");
}
@Override
public boolean onError(Throwable error) {
throw new RuntimeException(error);
}
@Override
public void onIdle(OnIdleContext context) {
watermark.set(context.getWatermark());
latch.countDown();
}
}).waitUntilReady();
// for two seconds we have empty data
TimeUnit.SECONDS.sleep(2);
latch.await();
// watermark should be moved
assertTrue(watermark.get() > 0);
assertTrue(watermark.get() < now * 10);
}
Aggregations