use of cz.o2.proxima.direct.view.CachedView in project proxima-platform by O2-Czech-Republic.
the class LocalKafkaCommitLogDescriptorTest method testCachedViewWrite.
@Test(timeout = 10000)
public void testCachedViewWrite() throws InterruptedException {
Accessor accessor = kafka.createAccessor(direct, createTestFamily(entity, storageUri, partitionsCfg(3, FirstBytePartitioner.class)));
CachedView view = Optionals.get(accessor.getCachedView(context()));
List<StreamElement> updates = Arrays.asList(StreamElement.upsert(entity, attr, UUID.randomUUID().toString(), "key1", attr.getName(), System.currentTimeMillis(), new byte[] { 1, 2 }), StreamElement.upsert(entity, attr, UUID.randomUUID().toString(), "key2", attr.getName(), System.currentTimeMillis(), new byte[] { 2, 3 }));
CountDownLatch latch = new CountDownLatch(2);
updates.forEach(update -> view.write(update, (succ, exc) -> {
assertTrue("Exception: " + exc, succ);
latch.countDown();
}));
latch.await();
assertTrue(view.get("key2", attr).isPresent());
assertTrue(view.get("key1", attr).isPresent());
view.assign(IntStream.range(0, 3).mapToObj(this::getPartition).collect(Collectors.toList()));
assertTrue(view.get("key2", attr).isPresent());
assertTrue(view.get("key1", attr).isPresent());
}
use of cz.o2.proxima.direct.view.CachedView in project proxima-platform by O2-Czech-Republic.
the class LocalKafkaCommitLogDescriptorTest method testCachedViewWriteAndDelete.
@Test(timeout = 10000)
public void testCachedViewWriteAndDelete() throws InterruptedException {
Accessor accessor = kafka.createAccessor(direct, createTestFamily(entity, storageUri, partitionsCfg(3, FirstBytePartitioner.class)));
CachedView view = Optionals.get(accessor.getCachedView(context()));
long now = System.currentTimeMillis();
List<StreamElement> updates = Arrays.asList(StreamElement.upsert(entity, attr, UUID.randomUUID().toString(), "key1", attr.getName(), now - 1000, new byte[] { 1, 2 }), StreamElement.delete(entity, attr, UUID.randomUUID().toString(), "key1", attr.getName(), now));
CountDownLatch latch = new CountDownLatch(2);
updates.forEach(update -> view.write(update, (succ, exc) -> {
assertTrue("Exception: " + exc, succ);
latch.countDown();
}));
latch.await();
assertFalse(view.get("key1", attr).isPresent());
view.assign(IntStream.range(0, 3).mapToObj(this::getPartition).collect(Collectors.toList()));
assertFalse(view.get("key1", attr).isPresent());
}
use of cz.o2.proxima.direct.view.CachedView in project proxima-platform by O2-Czech-Republic.
the class LocalKafkaCommitLogDescriptorTest method testCachedViewWritePreUpdate.
@Test(timeout = 10000)
public void testCachedViewWritePreUpdate() throws InterruptedException {
Accessor accessor = kafka.createAccessor(direct, createTestFamily(entity, storageUri, partitionsCfg(3, FirstBytePartitioner.class)));
CachedView view = Optionals.get(accessor.getCachedView(context()));
List<StreamElement> updates = Arrays.asList(StreamElement.upsert(entity, attr, UUID.randomUUID().toString(), "key1", attr.getName(), System.currentTimeMillis(), new byte[] { 1, 2 }), StreamElement.upsert(entity, attr, UUID.randomUUID().toString(), "key2", attr.getName(), System.currentTimeMillis(), new byte[] { 2, 3 }));
CountDownLatch latch = new CountDownLatch(updates.size());
updates.forEach(update -> view.write(update, (succ, exc) -> {
assertTrue("Exception: " + exc, succ);
latch.countDown();
}));
latch.await();
AtomicInteger calls = new AtomicInteger();
view.assign(IntStream.range(0, 3).mapToObj(this::getPartition).collect(Collectors.toList()), (e, c) -> calls.incrementAndGet());
assertEquals(2, calls.get());
}
use of cz.o2.proxima.direct.view.CachedView in project proxima-platform by O2-Czech-Republic.
the class LocalKafkaCommitLogDescriptorTest method testCachedView.
@Test(timeout = 10000)
public void testCachedView() throws InterruptedException {
final Accessor accessor = kafka.createAccessor(direct, createTestFamily(entity, storageUri, partitionsCfg(3)));
final LocalKafkaWriter writer = accessor.newWriter();
final CachedView view = Optionals.get(accessor.getCachedView(context()));
final AtomicReference<CountDownLatch> latch = new AtomicReference<>(new CountDownLatch(1));
StreamElement update = StreamElement.upsert(entity, attr, UUID.randomUUID().toString(), "key", attr.getName(), System.currentTimeMillis(), new byte[] { 1, 2 });
writer.write(update, (succ, exc) -> {
assertTrue(succ);
latch.get().countDown();
});
latch.get().await();
latch.set(new CountDownLatch(1));
view.assign(IntStream.range(0, 3).mapToObj(this::getPartition).collect(Collectors.toList()));
assertArrayEquals(new byte[] { 1, 2 }, Optionals.get(view.get("key", attr)).getValue());
update = StreamElement.upsert(entity, attr, UUID.randomUUID().toString(), "key", attr.getName(), System.currentTimeMillis(), new byte[] { 1, 2, 3 });
writer.write(update, (succ, exc) -> {
assertTrue(succ);
latch.get().countDown();
});
latch.get().await();
TimeUnit.SECONDS.sleep(1);
assertArrayEquals(new byte[] { 1, 2, 3 }, Optionals.get(view.get("key", attr)).getValue());
}
use of cz.o2.proxima.direct.view.CachedView in project proxima-platform by O2-Czech-Republic.
the class TransactionResourceManager method runObservations.
/**
* Observe all transactional families with given observer.
*
* @param name name of the observer (will be appended with name of the family)
* @param requestObserver the observer (need not be synchronized)
*/
@Override
public void runObservations(String name, BiConsumer<StreamElement, Pair<Long, Object>> updateConsumer, CommitLogObserver requestObserver) {
final CommitLogObserver effectiveObserver;
if (isNotThreadSafe(requestObserver)) {
effectiveObserver = requestObserver;
} else {
effectiveObserver = new ThreadPooledObserver(direct.getContext().getExecutorService(), requestObserver, getDeclaredParallelism(requestObserver).orElse(Runtime.getRuntime().availableProcessors()));
}
List<Set<String>> families = direct.getRepository().getAllEntities().filter(EntityDescriptor::isTransactional).flatMap(e -> e.getAllAttributes().stream()).filter(a -> a.getTransactionMode() != TransactionMode.NONE).map(AttributeDescriptor::getTransactionalManagerFamilies).map(Sets::newHashSet).distinct().collect(Collectors.toList());
CountDownLatch initializedLatch = new CountDownLatch(families.size());
families.stream().map(this::toRequestStatePair).forEach(p -> {
DirectAttributeFamilyDescriptor requestFamily = p.getFirst();
DirectAttributeFamilyDescriptor stateFamily = p.getSecond();
String consumerName = name + "-" + requestFamily.getDesc().getName();
log.info("Starting to observe family {} with URI {} and associated state family {} as {}", requestFamily, requestFamily.getDesc().getStorageUri(), stateFamily, consumerName);
CommitLogReader reader = Optionals.get(requestFamily.getCommitLogReader());
CachedView view = stateViews.get(stateFamily);
if (view == null) {
view = Optionals.get(stateFamily.getCachedView());
Duration ttl = Duration.ofMillis(cleanupIntervalMs);
stateViews.put(stateFamily, view);
view.assign(view.getPartitions(), updateConsumer, ttl);
}
initializedLatch.countDown();
serverObservedFamilies.put(requestFamily, reader.observe(consumerName, repartitionHookForBeingActive(stateFamily, reader.getPartitions().size(), effectiveObserver)));
});
ExceptionUtils.unchecked(initializedLatch::await);
}
Aggregations