use of cz.o2.proxima.direct.commitlog.CommitLogObserver.OnNextContext in project proxima-platform by O2-Czech-Republic.
the class LocalKafkaCommitLogDescriptorTest method testCustomIdlePolicy.
@Test(timeout = 10000)
public void testCustomIdlePolicy() throws InterruptedException {
Map<String, Object> cfg = and(partitionsCfg(3), cfg(Pair.of(KafkaAccessor.EMPTY_POLL_TIME, "1000")));
cfg.put("watermark.idle-policy-factory", FixedWatermarkIdlePolicyFactory.class.getName());
Accessor accessor = kafka.createAccessor(direct, createTestFamily(entity, storageUri, cfg));
LocalKafkaWriter writer = accessor.newWriter();
CommitLogReader reader = Optionals.get(accessor.getCommitLogReader(context()));
long now = System.currentTimeMillis();
final StreamElement update = StreamElement.upsert(entity, attr, UUID.randomUUID().toString(), "key", attr.getName(), now + 2000, new byte[] { 1, 2 });
AtomicLong watermark = new AtomicLong();
CountDownLatch latch = new CountDownLatch(2);
reader.observe("test", Position.NEWEST, new CommitLogObserver() {
@Override
public boolean onNext(StreamElement ingest, OnNextContext context) {
watermark.set(context.getWatermark());
latch.countDown();
return true;
}
@Override
public void onCompleted() {
fail("This should not be called");
}
@Override
public boolean onError(Throwable error) {
throw new RuntimeException(error);
}
}).waitUntilReady();
// then we write single element
writer.write(update, (succ, e) -> {
});
// for two seconds we have empty data
TimeUnit.SECONDS.sleep(2);
// finally, last update to save watermark
writer.write(update, (succ, e) -> {
});
latch.await();
assertEquals(FixedWatermarkIdlePolicyFactory.FIXED_IDLE_WATERMARK, watermark.get());
}
use of cz.o2.proxima.direct.commitlog.CommitLogObserver.OnNextContext in project proxima-platform by O2-Czech-Republic.
the class LocalKafkaCommitLogDescriptorTest method testBulkObserveSuccess.
@Test(timeout = 10000)
public void testBulkObserveSuccess() throws InterruptedException {
Accessor accessor = kafka.createAccessor(direct, createTestFamily(entity, storageUri, partitionsCfg(3)));
LocalKafkaWriter writer = accessor.newWriter();
CommitLogReader reader = Optionals.get(accessor.getCommitLogReader(context()));
final AtomicInteger restarts = new AtomicInteger();
final AtomicReference<Throwable> exc = new AtomicReference<>();
final AtomicReference<StreamElement> input = new AtomicReference<>();
final CountDownLatch latch = new CountDownLatch(2);
final StreamElement update = StreamElement.upsert(entity, attr, UUID.randomUUID().toString(), "key", attr.getName(), System.currentTimeMillis(), new byte[] { 1, 2 });
final ObserveHandle handle = reader.observeBulk("test", Position.NEWEST, new CommitLogObserver() {
@Override
public void onRepartition(OnRepartitionContext context) {
restarts.incrementAndGet();
}
@Override
public boolean onNext(StreamElement ingest, OnNextContext context) {
input.set(ingest);
context.confirm();
latch.countDown();
return true;
}
@Override
public void onCompleted() {
fail("This should not be called");
}
@Override
public boolean onError(Throwable error) {
exc.set(error);
throw new RuntimeException(error);
}
});
writer.write(update, (succ, e) -> {
assertTrue(succ);
latch.countDown();
});
latch.await();
assertNull(exc.get());
assertTrue(restarts.get() > 0);
assertArrayEquals(update.getValue(), input.get().getValue());
assertEquals(3, handle.getCommittedOffsets().size());
assertEquals(handle.getCommittedOffsets().toString(), 1L, (long) (Long) handle.getCommittedOffsets().stream().mapToLong(o -> ((TopicOffset) o).getOffset()).sum());
}
use of cz.o2.proxima.direct.commitlog.CommitLogObserver.OnNextContext in project proxima-platform by O2-Czech-Republic.
the class LocalKafkaCommitLogDescriptorTest method testBulkObserveOffsets2.
@Test(timeout = 10000)
public void testBulkObserveOffsets2() throws InterruptedException {
final Accessor accessor = kafka.createAccessor(direct, createTestFamily(entity, storageUri, partitionsCfg(3)));
final LocalKafkaWriter writer = accessor.newWriter();
final CommitLogReader reader = Optionals.get(accessor.getCommitLogReader(context()));
final List<KafkaStreamElement> input = new ArrayList<>();
final AtomicReference<CountDownLatch> latch = new AtomicReference<>(new CountDownLatch(3));
final StreamElement update = StreamElement.upsert(entity, attr, UUID.randomUUID().toString(), "key", attr.getName(), System.currentTimeMillis(), new byte[] { 1, 2 });
final CommitLogObserver observer = new CommitLogObserver() {
@Override
public boolean onNext(StreamElement ingest, OnNextContext context) {
input.add((KafkaStreamElement) ingest);
latch.get().countDown();
// terminate after reading first record
return false;
}
@Override
public boolean onError(Throwable error) {
throw new RuntimeException(error);
}
};
final List<Offset> offsets;
try (final ObserveHandle handle = reader.observeBulkPartitions(reader.getPartitions(), Position.NEWEST, observer)) {
// write two elements
for (int i = 0; i < 2; i++) {
writer.write(update, (succ, e) -> {
assertTrue(succ);
latch.get().countDown();
});
}
latch.get().await();
latch.set(new CountDownLatch(1));
offsets = handle.getCurrentOffsets();
}
// restart from old offset
reader.observeBulkOffsets(Lists.newArrayList(offsets), observer);
latch.get().await();
assertEquals(2, input.size());
assertEquals(0, input.get(0).getOffset());
assertEquals(0, input.get(1).getOffset());
}
use of cz.o2.proxima.direct.commitlog.CommitLogObserver.OnNextContext in project proxima-platform by O2-Czech-Republic.
the class LocalKafkaCommitLogDescriptorTest method testHandleRebalanceInProgressException.
@Test(timeout = 10000)
public void testHandleRebalanceInProgressException() throws InterruptedException {
final AtomicInteger invokedCount = new AtomicInteger();
final int numElements = 2000;
final LocalKafkaCommitLogDescriptor descriptor = new LocalKafkaCommitLogDescriptor() {
@Override
public Accessor createAccessor(DirectDataOperator direct, AttributeFamilyDescriptor family) {
return new Accessor(family.getEntity(), family.getStorageUri(), family.getCfg(), id) {
@Override
<K, V> KafkaConsumer<K, V> mockKafkaConsumer(String name, ConsumerGroup group, ElementSerializer<K, V> serializer, @Nullable Collection<Partition> assignedPartitions, @Nullable ConsumerRebalanceListener listener) {
final Map<TopicPartition, OffsetAndMetadata> committed = new HashMap<>();
KafkaConsumer<K, V> mock = super.mockKafkaConsumer(name, group, serializer, assignedPartitions, listener);
doAnswer(invocationOnMock -> {
if (invokedCount.getAndIncrement() == 1) {
throw new RebalanceInProgressException();
}
Map<TopicPartition, OffsetAndMetadata> toCommit = invocationOnMock.getArgument(0);
committed.putAll(toCommit);
return null;
}).when(mock).commitSync(anyMap());
doAnswer(invocationOnMock -> {
Set<TopicPartition> parts = invocationOnMock.getArgument(0);
return parts.stream().map(tp -> Pair.of(tp, committed.get(tp))).filter(p -> p.getSecond() != null).collect(Collectors.toMap(Pair::getFirst, Pair::getSecond));
}).when(mock).committed(anySet());
return mock;
}
};
}
};
Accessor accessor = descriptor.createAccessor(direct, createTestFamily(entity, storageUri, partitionsCfg(1)));
LocalKafkaLogReader reader = accessor.newReader(direct.getContext());
Map<String, StreamElement> observedAfterRepartition = new HashMap<>();
LocalKafkaWriter<?, ?> writer = accessor.newWriter();
CountDownLatch latch = new CountDownLatch(1);
try (ObserveHandle handle = reader.observe("dummy", new CommitLogObserver() {
@Override
public boolean onNext(StreamElement ingest, OnNextContext context) {
observedAfterRepartition.put(ingest.getKey(), ingest);
context.confirm();
if (ingest.getKey().equals("last-key")) {
latch.countDown();
return false;
}
return true;
}
@Override
public boolean onError(Throwable error) {
return false;
}
})) {
for (int i = 0; i < numElements; i++) {
writer.write(StreamElement.upsert(entity, attr, UUID.randomUUID().toString(), "key" + i, attr.getName(), System.currentTimeMillis(), new byte[] {}), (succ, exc) -> {
});
}
writer.write(StreamElement.upsert(entity, attr, UUID.randomUUID().toString(), "last-key", attr.getName(), System.currentTimeMillis(), new byte[] {}), (succ, exc) -> {
});
latch.await();
}
assertEquals(numElements + 1, observedAfterRepartition.size());
assertTrue(invokedCount.get() > 1);
}
use of cz.o2.proxima.direct.commitlog.CommitLogObserver.OnNextContext in project proxima-platform by O2-Czech-Republic.
the class LocalKafkaCommitLogDescriptorTest method testObserveMovesWatermark.
@Test(timeout = 10000)
public void testObserveMovesWatermark() throws InterruptedException {
Accessor accessor = kafka.createAccessor(direct, createTestFamily(entity, storageUri, partitionsCfg(3)));
LocalKafkaWriter writer = accessor.newWriter();
CommitLogReader reader = Optionals.get(accessor.getCommitLogReader(context()));
long now = System.currentTimeMillis();
final UnaryFunction<Integer, StreamElement> update = pos -> StreamElement.upsert(entity, attr, UUID.randomUUID().toString(), "key" + pos, attr.getName(), now + pos, new byte[] { 1, 2 });
AtomicLong watermark = new AtomicLong();
CountDownLatch latch = new CountDownLatch(100);
reader.observe("test", Position.NEWEST, new CommitLogObserver() {
@Override
public boolean onNext(StreamElement ingest, OnNextContext context) {
watermark.set(context.getWatermark());
latch.countDown();
return true;
}
@Override
public void onCompleted() {
fail("This should not be called");
}
@Override
public boolean onError(Throwable error) {
throw new RuntimeException(error);
}
}).waitUntilReady();
for (int i = 0; i < 100; i++) {
writer.write(update.apply(i), (succ, e) -> {
});
}
latch.await();
assertTrue(watermark.get() > 0);
assertTrue(watermark.get() < now * 10);
}
Aggregations