use of cz.o2.proxima.direct.core.DirectDataOperator in project proxima-platform by O2-Czech-Republic.
the class LocalKafkaCommitLogDescriptorTest method testHandleRebalanceInProgressException.
@Test(timeout = 10000)
public void testHandleRebalanceInProgressException() throws InterruptedException {
final AtomicInteger invokedCount = new AtomicInteger();
final int numElements = 2000;
final LocalKafkaCommitLogDescriptor descriptor = new LocalKafkaCommitLogDescriptor() {
@Override
public Accessor createAccessor(DirectDataOperator direct, AttributeFamilyDescriptor family) {
return new Accessor(family.getEntity(), family.getStorageUri(), family.getCfg(), id) {
@Override
<K, V> KafkaConsumer<K, V> mockKafkaConsumer(String name, ConsumerGroup group, ElementSerializer<K, V> serializer, @Nullable Collection<Partition> assignedPartitions, @Nullable ConsumerRebalanceListener listener) {
final Map<TopicPartition, OffsetAndMetadata> committed = new HashMap<>();
KafkaConsumer<K, V> mock = super.mockKafkaConsumer(name, group, serializer, assignedPartitions, listener);
doAnswer(invocationOnMock -> {
if (invokedCount.getAndIncrement() == 1) {
throw new RebalanceInProgressException();
}
Map<TopicPartition, OffsetAndMetadata> toCommit = invocationOnMock.getArgument(0);
committed.putAll(toCommit);
return null;
}).when(mock).commitSync(anyMap());
doAnswer(invocationOnMock -> {
Set<TopicPartition> parts = invocationOnMock.getArgument(0);
return parts.stream().map(tp -> Pair.of(tp, committed.get(tp))).filter(p -> p.getSecond() != null).collect(Collectors.toMap(Pair::getFirst, Pair::getSecond));
}).when(mock).committed(anySet());
return mock;
}
};
}
};
Accessor accessor = descriptor.createAccessor(direct, createTestFamily(entity, storageUri, partitionsCfg(1)));
LocalKafkaLogReader reader = accessor.newReader(direct.getContext());
Map<String, StreamElement> observedAfterRepartition = new HashMap<>();
LocalKafkaWriter<?, ?> writer = accessor.newWriter();
CountDownLatch latch = new CountDownLatch(1);
try (ObserveHandle handle = reader.observe("dummy", new CommitLogObserver() {
@Override
public boolean onNext(StreamElement ingest, OnNextContext context) {
observedAfterRepartition.put(ingest.getKey(), ingest);
context.confirm();
if (ingest.getKey().equals("last-key")) {
latch.countDown();
return false;
}
return true;
}
@Override
public boolean onError(Throwable error) {
return false;
}
})) {
for (int i = 0; i < numElements; i++) {
writer.write(StreamElement.upsert(entity, attr, UUID.randomUUID().toString(), "key" + i, attr.getName(), System.currentTimeMillis(), new byte[] {}), (succ, exc) -> {
});
}
writer.write(StreamElement.upsert(entity, attr, UUID.randomUUID().toString(), "last-key", attr.getName(), System.currentTimeMillis(), new byte[] {}), (succ, exc) -> {
});
latch.await();
}
assertEquals(numElements + 1, observedAfterRepartition.size());
assertTrue(invokedCount.get() > 1);
}
use of cz.o2.proxima.direct.core.DirectDataOperator in project proxima-platform by O2-Czech-Republic.
the class LocalKafkaCommitLogDescriptorTest method testObserveOffsetsWithLogRoll.
@Test(timeout = 10000)
public void testObserveOffsetsWithLogRoll() throws InterruptedException {
String topic = Utils.topic(storageUri);
Map<TopicPartition, Long> endOffsets = IntStream.range(0, 3).mapToObj(i -> new TopicPartition(topic, i)).collect(Collectors.toMap(Function.identity(), e -> 2L));
Map<TopicPartition, Long> beginningOffsets = IntStream.range(0, 3).mapToObj(i -> new TopicPartition(topic, i)).collect(Collectors.toMap(Function.identity(), e -> 0L));
final LocalKafkaCommitLogDescriptor descriptor = new LocalKafkaCommitLogDescriptor() {
@Override
public Accessor createAccessor(DirectDataOperator direct, AttributeFamilyDescriptor family) {
AtomicInteger invokedCount = new AtomicInteger();
return new Accessor(family.getEntity(), family.getStorageUri(), family.getCfg(), id) {
@Override
<K, V> KafkaConsumer<K, V> mockKafkaConsumer(String name, ConsumerGroup group, ElementSerializer<K, V> serializer, @Nullable Collection<Partition> assignedPartitions, @Nullable ConsumerRebalanceListener listener) {
KafkaConsumer<K, V> mock = super.mockKafkaConsumer(name, group, serializer, assignedPartitions, listener);
doAnswer(invocationOnMock -> {
if (invokedCount.incrementAndGet() > 2) {
return endOffsets;
}
return beginningOffsets;
}).when(mock).beginningOffsets(any());
doAnswer(invocationOnMock -> endOffsets).when(mock).endOffsets(any());
return mock;
}
};
}
};
final Accessor accessor = descriptor.createAccessor(direct, createTestFamily(entity, storageUri, partitionsCfg(3)));
final CommitLogReader reader = Optionals.get(accessor.getCommitLogReader(context()));
final CountDownLatch latch = new CountDownLatch(1);
final CommitLogObserver observer = new CommitLogObserver() {
@Override
public boolean onNext(StreamElement ingest, OnNextContext context) {
context.confirm();
return false;
}
@Override
public boolean onError(Throwable error) {
throw new RuntimeException(error);
}
@Override
public void onCompleted() {
latch.countDown();
}
};
try (final ObserveHandle handle = reader.observeBulkOffsets(reader.fetchOffsets(Position.OLDEST, reader.getPartitions()).values(), true, observer)) {
latch.await();
}
}
use of cz.o2.proxima.direct.core.DirectDataOperator in project proxima-platform by O2-Czech-Republic.
the class LocalKafkaCommitLogDescriptorTest method testBatchObserveWithLogRoll.
@Test(timeout = 10000)
public void testBatchObserveWithLogRoll() throws InterruptedException {
String topic = Utils.topic(storageUri);
Map<TopicPartition, Long> endOffsets = IntStream.range(0, 3).mapToObj(i -> new TopicPartition(topic, i)).collect(Collectors.toMap(Function.identity(), e -> 2L));
Map<TopicPartition, Long> beginningOffsets = IntStream.range(0, 3).mapToObj(i -> new TopicPartition(topic, i)).collect(Collectors.toMap(Function.identity(), e -> 0L));
final LocalKafkaCommitLogDescriptor descriptor = new LocalKafkaCommitLogDescriptor() {
@Override
public Accessor createAccessor(DirectDataOperator direct, AttributeFamilyDescriptor family) {
AtomicInteger invokedCount = new AtomicInteger();
return new Accessor(family.getEntity(), family.getStorageUri(), family.getCfg(), id) {
@Override
<K, V> KafkaConsumer<K, V> mockKafkaConsumer(String name, ConsumerGroup group, ElementSerializer<K, V> serializer, @Nullable Collection<Partition> assignedPartitions, @Nullable ConsumerRebalanceListener listener) {
KafkaConsumer<K, V> mock = super.mockKafkaConsumer(name, group, serializer, assignedPartitions, listener);
doAnswer(invocationOnMock -> {
if (invokedCount.incrementAndGet() > 2) {
return endOffsets;
}
return beginningOffsets;
}).when(mock).beginningOffsets(any());
doAnswer(invocationOnMock -> endOffsets).when(mock).endOffsets(any());
return mock;
}
};
}
};
final Accessor accessor = descriptor.createAccessor(direct, createTestFamily(entity, storageUri, partitionsCfg(3)));
final CommitLogReader reader = Optionals.get(accessor.getCommitLogReader(context()));
final CountDownLatch latch = new CountDownLatch(1);
final CommitLogObserver observer = new CommitLogObserver() {
@Override
public boolean onNext(StreamElement ingest, OnNextContext context) {
context.confirm();
return false;
}
@Override
public boolean onError(Throwable error) {
throw new RuntimeException(error);
}
@Override
public void onCompleted() {
latch.countDown();
}
};
try (final ObserveHandle handle = reader.observeBulk("dummy", Position.OLDEST, true, observer)) {
latch.await();
}
}
use of cz.o2.proxima.direct.core.DirectDataOperator in project proxima-platform by O2-Czech-Republic.
the class BeamDataOperatorTest method testStreamFromOldestWithKafkaTest.
@Test
public void testStreamFromOldestWithKafkaTest() {
Config config = ConfigFactory.parseMap(Collections.singletonMap("attributeFamilies.event-storage-stream.storage", "kafka-test://dummy/events")).withFallback(ConfigFactory.load("test-reference.conf"));
Repository repo = Repository.ofTest(config);
EntityDescriptor event = repo.getEntity("event");
AttributeDescriptor<?> data = event.getAttribute("data");
int numElements = 10000;
long now = System.currentTimeMillis();
try (DirectDataOperator direct = repo.getOrCreateOperator(DirectDataOperator.class);
BeamDataOperator operator = repo.getOrCreateOperator(BeamDataOperator.class)) {
for (int i = 0; i < numElements; i++) {
direct.getWriter(data).orElseThrow(() -> new IllegalStateException("Missing writer for data")).write(StreamElement.upsert(event, data, UUID.randomUUID().toString(), UUID.randomUUID().toString(), data.getName(), now + i, new byte[] {}), (succ, exc) -> {
});
}
Pipeline p = Pipeline.create();
PCollection<StreamElement> input = operator.getStream(p, Position.OLDEST, true, true, data);
PCollection<Long> count = input.apply(Count.globally());
PAssert.that(count).containsInAnyOrder(Collections.singletonList((long) numElements));
assertNotNull(p.run());
}
}
use of cz.o2.proxima.direct.core.DirectDataOperator in project proxima-platform by O2-Czech-Republic.
the class BatchLogSourceFunctionTest method testRunAndClose.
@Test
void testRunAndClose() throws Exception {
final Repository repository = Repository.ofTest(ConfigFactory.parseString(MODEL));
final AttributeDescriptor<?> attribute = repository.getEntity("test").getAttribute("data");
final BatchLogSourceFunction<StreamElement> sourceFunction = new BatchLogSourceFunction<StreamElement>(repository.asFactory(), Collections.singletonList(attribute), ResultExtractor.identity()) {
@Override
BatchLogReader createLogReader(List<AttributeDescriptor<?>> attributeDescriptors) {
final DirectDataOperator direct = repository.getOrCreateOperator(DirectDataOperator.class);
final ListBatchReader reader = ListBatchReader.ofPartitioned(direct.getContext());
return OffsetTrackingBatchLogReader.of(reader);
}
};
final AbstractStreamOperatorTestHarness<StreamElement> testHarness = createTestHarness(sourceFunction, 1, 0);
testHarness.initializeEmptyState();
testHarness.open();
final CheckedThread runThread = new CheckedThread("run") {
@Override
public void go() throws Exception {
sourceFunction.run(new TestSourceContext<StreamElement>() {
@Override
public void collect(StreamElement element) {
// No-op.
}
});
}
};
runThread.start();
sourceFunction.awaitRunning();
sourceFunction.cancel();
testHarness.close();
// Make sure run thread finishes normally.
runThread.sync();
}
Aggregations