Search in sources :

Example 21 with ObserveHandle

use of cz.o2.proxima.direct.commitlog.ObserveHandle in project proxima-platform by O2-Czech-Republic.

the class OffsetTrackingBatchLogReader method observe.

@Override
public ObserveHandle observe(List<Partition> partitions, List<AttributeDescriptor<?>> attributes, BatchLogObserver observer) {
    final OffsetTrackingBatchLogObserver wrappedObserver = new OffsetTrackingBatchLogObserver(observer);
    final ObserveHandle handle = delegate.observe(partitions, attributes, wrappedObserver);
    return new OffsetTrackingObserveHandle() {

        @Override
        public List<Offset> getCurrentOffsets() {
            final Map<Partition, Offset> result = new HashMap<>();
            partitions.forEach(p -> result.put(p, Offset.of(p, -1, false)));
            wrappedObserver.getConsumedOffsets().forEach((p, o) -> result.merge(p, o, OffsetTrackingBatchLogReader::mergeOffsets));
            return new ArrayList<>(result.values());
        }

        @Override
        public void close() {
            handle.close();
        }
    };
}
Also used : Partition(cz.o2.proxima.storage.Partition) ObserveHandle(cz.o2.proxima.direct.batch.ObserveHandle) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Offset(cz.o2.proxima.direct.batch.Offset)

Example 22 with ObserveHandle

use of cz.o2.proxima.direct.commitlog.ObserveHandle in project proxima-platform by O2-Czech-Republic.

the class BlobLogReaderTest method testObservePartitionsCancelled.

@Test
public void testObservePartitionsCancelled() throws InterruptedException {
    List<Pair<Long, Long>> stamps = Lists.newArrayList(Pair.of(1234566000000L, 1234566000000L + 3_600_000L), Pair.of(1234566000000L + 3_600_000L, (1234566000000L + 2 * 3_600_000L)));
    writePartitions(stamps.stream().map(p -> (p.getSecond() + p.getFirst()) / 2).collect(Collectors.toList()));
    BlobReader reader = accessor.new BlobReader(context);
    CountDownLatch latch = new CountDownLatch(1);
    AtomicReference<ObserveHandle> handle = new AtomicReference<>();
    handle.set(reader.observe(reader.getPartitions(), Collections.singletonList(status), new BatchLogObserver() {

        @Override
        public boolean onNext(StreamElement element) {
            handle.get().close();
            return true;
        }

        @Override
        public void onCancelled() {
            latch.countDown();
        }

        @Override
        public void onCompleted() {
            fail("onCompleted should not have been called");
        }
    }));
    latch.await();
}
Also used : ObserveHandle(cz.o2.proxima.direct.batch.ObserveHandle) StreamElement(cz.o2.proxima.storage.StreamElement) AtomicReference(java.util.concurrent.atomic.AtomicReference) CountDownLatch(java.util.concurrent.CountDownLatch) BatchLogObserver(cz.o2.proxima.direct.batch.BatchLogObserver) BlobReader(cz.o2.proxima.direct.blob.TestBlobStorageAccessor.BlobReader) Pair(cz.o2.proxima.util.Pair) Test(org.junit.Test)

Example 23 with ObserveHandle

use of cz.o2.proxima.direct.commitlog.ObserveHandle in project proxima-platform by O2-Czech-Republic.

the class CassandraDBAccessorTest method testBatchReader.

@Test(timeout = 10000)
public void testBatchReader() throws InterruptedException {
    TestDBAccessor accessor = new TestDBAccessor(entity, URI.create("cassandra://host:9042/table/?primary=data"), getCfg(TestCqlFactory.class, 2));
    CassandraLogReader reader = accessor.newBatchReader(direct.getContext());
    int numElements = 100;
    ResultSet result = mockResultSet(numElements);
    accessor.setRes(result);
    AtomicInteger numConsumed = new AtomicInteger();
    CountDownLatch latch = new CountDownLatch(1);
    try (ObserveHandle handle = reader.observe(reader.getPartitions(), Collections.singletonList(attr), new BatchLogObserver() {

        @Override
        public boolean onNext(StreamElement element) {
            numConsumed.incrementAndGet();
            return true;
        }

        @Override
        public boolean onError(Throwable error) {
            while (latch.getCount() > 0) {
                latch.countDown();
            }
            throw new RuntimeException(error);
        }

        @Override
        public void onCompleted() {
            latch.countDown();
        }
    })) {
        latch.await();
        assertEquals(numElements, numConsumed.get());
        List<Statement> executed = accessor.getExecuted();
        assertEquals(2, executed.size());
    }
    assertTrue("Expected empty CLUSTER_MAP, got " + CassandraDBAccessor.getCLUSTER_MAP(), CassandraDBAccessor.getCLUSTER_MAP().isEmpty());
}
Also used : ObserveHandle(cz.o2.proxima.direct.batch.ObserveHandle) Statement(com.datastax.driver.core.Statement) PreparedStatement(com.datastax.driver.core.PreparedStatement) BoundStatement(com.datastax.driver.core.BoundStatement) StreamElement(cz.o2.proxima.storage.StreamElement) CountDownLatch(java.util.concurrent.CountDownLatch) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ResultSet(com.datastax.driver.core.ResultSet) BatchLogObserver(cz.o2.proxima.direct.batch.BatchLogObserver) Test(org.junit.Test)

Example 24 with ObserveHandle

use of cz.o2.proxima.direct.commitlog.ObserveHandle in project proxima-platform by O2-Czech-Republic.

the class InMemStorageTest method testObservePartitions.

@Test(timeout = 10000)
public void testObservePartitions() throws InterruptedException {
    InMemStorage storage = new InMemStorage();
    DataAccessor accessor = storage.createAccessor(direct, createFamilyDescriptor(URI.create("inmem:///inmemstoragetest")));
    CommitLogReader reader = Optionals.get(accessor.getCommitLogReader(direct.getContext()));
    AttributeWriterBase writer = Optionals.get(accessor.getWriter(direct.getContext()));
    AtomicReference<CountDownLatch> latch = new AtomicReference<>();
    ObserveHandle handle = reader.observePartitions(reader.getPartitions(), new CommitLogObserver() {

        @Override
        public void onRepartition(OnRepartitionContext context) {
            assertEquals(1, context.partitions().size());
            latch.set(new CountDownLatch(1));
        }

        @Override
        public boolean onNext(StreamElement ingest, OnNextContext context) {
            assertEquals(0, context.getPartition().getId());
            assertEquals("key", ingest.getKey());
            context.confirm();
            latch.get().countDown();
            return false;
        }

        @Override
        public boolean onError(Throwable error) {
            throw new RuntimeException(error);
        }
    });
    assertTrue(ObserveHandleUtils.isAtHead(handle, reader));
    writer.online().write(StreamElement.upsert(entity, data, UUID.randomUUID().toString(), "key", data.getName(), System.currentTimeMillis(), new byte[] { 1, 2, 3 }), (succ, exc) -> {
    });
    latch.get().await();
}
Also used : ObserveHandle(cz.o2.proxima.direct.commitlog.ObserveHandle) DataAccessor(cz.o2.proxima.direct.core.DataAccessor) CommitLogReader(cz.o2.proxima.direct.commitlog.CommitLogReader) AttributeWriterBase(cz.o2.proxima.direct.core.AttributeWriterBase) StreamElement(cz.o2.proxima.storage.StreamElement) AtomicReference(java.util.concurrent.atomic.AtomicReference) CountDownLatch(java.util.concurrent.CountDownLatch) CommitLogObserver(cz.o2.proxima.direct.commitlog.CommitLogObserver) Test(org.junit.Test)

Example 25 with ObserveHandle

use of cz.o2.proxima.direct.commitlog.ObserveHandle in project proxima-platform by O2-Czech-Republic.

the class InMemStorageTest method testObserveSinglePartitionOutOfMultiplePartitions.

@Test
public void testObserveSinglePartitionOutOfMultiplePartitions() throws InterruptedException {
    final int numPartitions = 3;
    final InMemStorage storage = new InMemStorage();
    final DataAccessor accessor = storage.createAccessor(direct, createFamilyDescriptor(URI.create("inmem:///test"), numPartitions));
    final CommitLogReader reader = Optionals.get(accessor.getCommitLogReader(direct.getContext()));
    final AttributeWriterBase writer = Optionals.get(accessor.getWriter(direct.getContext()));
    final int numElements = 999;
    final ConcurrentMap<Partition, Long> partitionHistogram = new ConcurrentHashMap<>();
    // Elements are uniformly distributed between partitions.
    final CountDownLatch elementsReceived = new CountDownLatch(numElements / numPartitions);
    // Start observer.
    final List<Partition> consumedPartitions = reader.getPartitions().subList(0, 1);
    final ObserveHandle observeHandle = reader.observePartitions(reader.getPartitions().subList(0, 1), new CommitLogObserver() {

        @Override
        public void onRepartition(OnRepartitionContext context) {
            assertEquals(numPartitions, context.partitions().size());
        }

        @Override
        public boolean onNext(StreamElement ingest, OnNextContext context) {
            partitionHistogram.merge(context.getPartition(), 1L, Long::sum);
            context.confirm();
            elementsReceived.countDown();
            return elementsReceived.getCount() > 0;
        }

        @Override
        public boolean onError(Throwable error) {
            throw new RuntimeException(error);
        }
    });
    // Write data.
    final Partitioner partitioner = new KeyAttributePartitioner();
    final Map<Partition, Long> expectedPartitionHistogram = new HashMap<>();
    for (int i = 0; i < numElements; i++) {
        final StreamElement element = StreamElement.upsert(entity, data, UUID.randomUUID().toString(), "key_" + i, data.getName(), System.currentTimeMillis(), new byte[] { 1, 2, 3 });
        expectedPartitionHistogram.merge(Partition.of(Partitioners.getTruncatedPartitionId(partitioner, element, numPartitions)), 1L, Long::sum);
        writer.online().write(element, CommitCallback.noop());
    }
    assertEquals(3, expectedPartitionHistogram.size());
    // Wait for all elements to be received.
    elementsReceived.await();
    assertEquals(1, partitionHistogram.size());
    assertEquals(1, observeHandle.getCurrentOffsets().size());
    assertEquals(expectedPartitionHistogram.get(Iterables.getOnlyElement(consumedPartitions)), partitionHistogram.get(Iterables.getOnlyElement(consumedPartitions)));
}
Also used : Partition(cz.o2.proxima.storage.Partition) ObserveHandle(cz.o2.proxima.direct.commitlog.ObserveHandle) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) DataAccessor(cz.o2.proxima.direct.core.DataAccessor) CommitLogReader(cz.o2.proxima.direct.commitlog.CommitLogReader) AttributeWriterBase(cz.o2.proxima.direct.core.AttributeWriterBase) StreamElement(cz.o2.proxima.storage.StreamElement) KeyAttributePartitioner(cz.o2.proxima.storage.commitlog.KeyAttributePartitioner) CountDownLatch(java.util.concurrent.CountDownLatch) CommitLogObserver(cz.o2.proxima.direct.commitlog.CommitLogObserver) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) KeyAttributePartitioner(cz.o2.proxima.storage.commitlog.KeyAttributePartitioner) Partitioner(cz.o2.proxima.storage.commitlog.Partitioner) Test(org.junit.Test)

Aggregations

StreamElement (cz.o2.proxima.storage.StreamElement)58 ObserveHandle (cz.o2.proxima.direct.commitlog.ObserveHandle)56 Test (org.junit.Test)52 CommitLogObserver (cz.o2.proxima.direct.commitlog.CommitLogObserver)47 CommitLogReader (cz.o2.proxima.direct.commitlog.CommitLogReader)45 CountDownLatch (java.util.concurrent.CountDownLatch)43 ArrayList (java.util.ArrayList)38 Offset (cz.o2.proxima.direct.commitlog.Offset)32 AtomicReference (java.util.concurrent.atomic.AtomicReference)30 List (java.util.List)29 Partition (cz.o2.proxima.storage.Partition)26 HashMap (java.util.HashMap)26 DirectDataOperator (cz.o2.proxima.direct.core.DirectDataOperator)25 AttributeDescriptor (cz.o2.proxima.repository.AttributeDescriptor)24 UUID (java.util.UUID)24 WatermarkEstimator (cz.o2.proxima.time.WatermarkEstimator)23 Collections (java.util.Collections)23 Collectors (java.util.stream.Collectors)23 Repository (cz.o2.proxima.repository.Repository)22 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)22