Search in sources :

Example 16 with OnNextContext

use of cz.o2.proxima.direct.commitlog.CommitLogObserver.OnNextContext in project proxima-platform by O2-Czech-Republic.

the class LocalKafkaCommitLogDescriptorTest method testBulkObservePartitionsResetOffsetsSuccess.

@Test(timeout = 10000)
public void testBulkObservePartitionsResetOffsetsSuccess() throws InterruptedException {
    Accessor accessor = kafka.createAccessor(direct, createTestFamily(entity, storageUri, partitionsCfg(3)));
    LocalKafkaWriter writer = accessor.newWriter();
    CommitLogReader reader = Optionals.get(accessor.getCommitLogReader(context()));
    AtomicInteger restarts = new AtomicInteger();
    AtomicReference<Throwable> exc = new AtomicReference<>();
    AtomicReference<StreamElement> input = new AtomicReference<>();
    AtomicReference<CountDownLatch> latch = new AtomicReference<>(new CountDownLatch(2));
    StreamElement update = StreamElement.upsert(entity, attr, UUID.randomUUID().toString(), "key", attr.getName(), System.currentTimeMillis(), new byte[] { 1, 2 });
    final ObserveHandle handle = reader.observePartitions(reader.getPartitions(), Position.NEWEST, new CommitLogObserver() {

        @Override
        public void onRepartition(OnRepartitionContext context) {
            restarts.incrementAndGet();
        }

        @Override
        public boolean onNext(StreamElement ingest, OnNextContext context) {
            input.set(ingest);
            context.confirm();
            latch.get().countDown();
            return true;
        }

        @Override
        public boolean onError(Throwable error) {
            exc.set(error);
            throw new RuntimeException(error);
        }
    });
    handle.waitUntilReady();
    writer.write(update, (succ, e) -> {
        assertTrue(succ);
        latch.get().countDown();
    });
    latch.get().await();
    latch.set(new CountDownLatch(1));
    handle.resetOffsets(reader.getPartitions().stream().map(p -> (PartitionWithTopic) p).map(p -> new TopicOffset(new PartitionWithTopic(p.getTopic(), p.getId()), 0, Watermarks.MIN_WATERMARK)).collect(Collectors.toList()));
    latch.get().await();
    assertEquals(1L, (long) (Long) handle.getCommittedOffsets().stream().mapToLong(o -> ((TopicOffset) o).getOffset()).sum());
}
Also used : Arrays(java.util.Arrays) LocalKafkaLogReader(cz.o2.proxima.direct.kafka.LocalKafkaCommitLogDescriptor.LocalKafkaLogReader) LocalKafkaWriter(cz.o2.proxima.direct.kafka.LocalKafkaCommitLogDescriptor.LocalKafkaWriter) Partition(cz.o2.proxima.storage.Partition) EntityDescriptor(cz.o2.proxima.repository.EntityDescriptor) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) CachedView(cz.o2.proxima.direct.view.CachedView) StreamElement(cz.o2.proxima.storage.StreamElement) WatermarkEstimator(cz.o2.proxima.time.WatermarkEstimator) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Pair(cz.o2.proxima.util.Pair) Serde(org.apache.kafka.common.serialization.Serde) UnaryFunction(cz.o2.proxima.functional.UnaryFunction) Duration(java.time.Duration) Map(java.util.Map) Serdes(org.apache.kafka.common.serialization.Serdes) URI(java.net.URI) WatermarkEstimatorFactory(cz.o2.proxima.time.WatermarkEstimatorFactory) Optionals(cz.o2.proxima.util.Optionals) WatermarkIdlePolicyFactory(cz.o2.proxima.time.WatermarkIdlePolicyFactory) TopicPartition(org.apache.kafka.common.TopicPartition) Collection(java.util.Collection) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) ObserveHandle(cz.o2.proxima.direct.commitlog.ObserveHandle) UUID(java.util.UUID) Accessor(cz.o2.proxima.direct.kafka.LocalKafkaCommitLogDescriptor.Accessor) Collectors(java.util.stream.Collectors) RebalanceInProgressException(org.apache.kafka.common.errors.RebalanceInProgressException) Executors(java.util.concurrent.Executors) Serializable(java.io.Serializable) CommitLogObservers(cz.o2.proxima.direct.commitlog.CommitLogObservers) Objects(java.util.Objects) CountDownLatch(java.util.concurrent.CountDownLatch) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) List(java.util.List) Slf4j(lombok.extern.slf4j.Slf4j) ConfigRepository(cz.o2.proxima.repository.ConfigRepository) Stream(java.util.stream.Stream) KeyValue(cz.o2.proxima.direct.randomaccess.KeyValue) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) OnNextContext(cz.o2.proxima.direct.commitlog.CommitLogObserver.OnNextContext) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) DirectDataOperator(cz.o2.proxima.direct.core.DirectDataOperator) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) ArgumentMatchers.any(org.mockito.ArgumentMatchers.any) Context(cz.o2.proxima.direct.core.Context) IntStream(java.util.stream.IntStream) TestUtils.createTestFamily(cz.o2.proxima.util.TestUtils.createTestFamily) KeyPartitioner(cz.o2.proxima.storage.commitlog.KeyPartitioner) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) ArgumentMatchers.anyMap(org.mockito.ArgumentMatchers.anyMap) HashMap(java.util.HashMap) OnlineAttributeWriter(cz.o2.proxima.direct.core.OnlineAttributeWriter) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) Iterators(com.google.common.collect.Iterators) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) Lists(com.google.common.collect.Lists) ConfigFactory(com.typesafe.config.ConfigFactory) Watermarks(cz.o2.proxima.time.Watermarks) Factory(cz.o2.proxima.functional.Factory) CommitLogReader(cz.o2.proxima.direct.commitlog.CommitLogReader) ExecutorService(java.util.concurrent.ExecutorService) Nullable(javax.annotation.Nullable) WatermarkIdlePolicy(cz.o2.proxima.time.WatermarkIdlePolicy) Before(org.junit.Before) Properties(java.util.Properties) Repository(cz.o2.proxima.repository.Repository) Iterator(java.util.Iterator) AttributeDescriptor(cz.o2.proxima.repository.AttributeDescriptor) MoreObjects(com.google.common.base.MoreObjects) CommitLogObserver(cz.o2.proxima.direct.commitlog.CommitLogObserver) Test(org.junit.Test) AttributeFamilyDescriptor(cz.o2.proxima.repository.AttributeFamilyDescriptor) Offset(cz.o2.proxima.direct.commitlog.Offset) TimeUnit(java.util.concurrent.TimeUnit) Mockito(org.mockito.Mockito) AtomicLong(java.util.concurrent.atomic.AtomicLong) AttributeDescriptorBase(cz.o2.proxima.repository.AttributeDescriptorBase) Partitioner(cz.o2.proxima.storage.commitlog.Partitioner) Assert(org.junit.Assert) Comparator(java.util.Comparator) Collections(java.util.Collections) Position(cz.o2.proxima.storage.commitlog.Position) LocalKafkaWriter(cz.o2.proxima.direct.kafka.LocalKafkaCommitLogDescriptor.LocalKafkaWriter) ObserveHandle(cz.o2.proxima.direct.commitlog.ObserveHandle) OnNextContext(cz.o2.proxima.direct.commitlog.CommitLogObserver.OnNextContext) CommitLogReader(cz.o2.proxima.direct.commitlog.CommitLogReader) StreamElement(cz.o2.proxima.storage.StreamElement) AtomicReference(java.util.concurrent.atomic.AtomicReference) CountDownLatch(java.util.concurrent.CountDownLatch) Accessor(cz.o2.proxima.direct.kafka.LocalKafkaCommitLogDescriptor.Accessor) CommitLogObserver(cz.o2.proxima.direct.commitlog.CommitLogObserver) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) AtomicLong(java.util.concurrent.atomic.AtomicLong) Test(org.junit.Test)

Example 17 with OnNextContext

use of cz.o2.proxima.direct.commitlog.CommitLogObserver.OnNextContext in project proxima-platform by O2-Czech-Republic.

the class TransactionLogObserver method processTransactionRequest.

private void processTransactionRequest(String transactionId, String requestId, Request request, OnNextContext context) {
    log.debug("Processing request to {} with {} for transaction {}", requestId, request, transactionId);
    State currentState = manager.getCurrentState(transactionId);
    @Nullable State newState = transitionState(transactionId, currentState, request);
    if (newState != null) {
        // we have successfully computed new state, produce response
        Response response = getResponseForNewState(request, currentState, newState);
        manager.ensureTransactionOpen(transactionId, newState);
        manager.writeResponseAndUpdateState(transactionId, newState, requestId, response, context::commit);
    } else if (request.getFlags() == Request.Flags.OPEN && (currentState.getFlags() == State.Flags.OPEN || currentState.getFlags() == State.Flags.COMMITTED)) {
        manager.writeResponseAndUpdateState(transactionId, currentState, requestId, Response.forRequest(request).duplicate(currentState.getSequentialId()), context::commit);
    } else {
        log.warn("Unexpected {} request for transaction {} seqId {} when the state is {}. " + "Refusing to respond, because the correct response is unknown.", request.getFlags(), transactionId, currentState.getSequentialId(), currentState.getFlags());
        context.confirm();
    }
}
Also used : Response(cz.o2.proxima.transaction.Response) State(cz.o2.proxima.transaction.State) Nullable(javax.annotation.Nullable)

Example 18 with OnNextContext

use of cz.o2.proxima.direct.commitlog.CommitLogObserver.OnNextContext in project proxima-platform by O2-Czech-Republic.

the class PubSubReaderTest method testObserve.

@Test(timeout = 10000)
public void testObserve() throws InterruptedException {
    long now = System.currentTimeMillis();
    Deque<PubsubMessage> inputs = new LinkedList<>(Arrays.asList(update("key1", "attr", new byte[] { 1, 2 }, now), delete("key2", "attr", now + 1000), deleteWildcard("key3", wildcard, now)));
    reader.setSupplier(() -> {
        if (inputs.isEmpty()) {
            LockSupport.park();
        }
        return inputs.pop();
    });
    List<StreamElement> elems = new ArrayList<>();
    AtomicBoolean cancelled = new AtomicBoolean();
    CountDownLatch latch = new CountDownLatch(3);
    CommitLogObserver observer = new CommitLogObserver() {

        @Override
        public boolean onNext(StreamElement ingest, OnNextContext context) {
            elems.add(ingest);
            context.confirm();
            latch.countDown();
            return true;
        }

        @Override
        public void onCancelled() {
            cancelled.set(true);
        }

        @Override
        public boolean onError(Throwable error) {
            throw new RuntimeException(error);
        }
    };
    try (ObserveHandle handle = reader.observe("dummy", observer)) {
        latch.await();
    }
    assertEquals(3, elems.size());
    StreamElement elem = elems.get(0);
    assertEquals("key1", elem.getKey());
    assertEquals("attr", elem.getAttribute());
    assertFalse(elem.isDelete());
    assertFalse(elem.isDeleteWildcard());
    assertArrayEquals(new byte[] { 1, 2 }, elem.getValue());
    assertEquals(now, elem.getStamp());
    elem = elems.get(1);
    assertEquals("key2", elem.getKey());
    assertEquals("attr", elem.getAttribute());
    assertTrue(elem.isDelete());
    assertFalse(elem.isDeleteWildcard());
    assertEquals(now + 1000L, elem.getStamp());
    elem = elems.get(2);
    assertEquals("key3", elem.getKey());
    assertEquals(wildcard.toAttributePrefix() + "*", elem.getAttribute());
    assertTrue(elem.isDelete());
    assertTrue(elem.isDeleteWildcard());
    assertEquals(now, elem.getStamp());
    assertTrue(cancelled.get());
    assertEquals(Sets.newHashSet(0, 1, 2), reader.acked);
}
Also used : ObserveHandle(cz.o2.proxima.direct.commitlog.ObserveHandle) ArrayList(java.util.ArrayList) StreamElement(cz.o2.proxima.storage.StreamElement) CountDownLatch(java.util.concurrent.CountDownLatch) PubsubMessage(com.google.pubsub.v1.PubsubMessage) LinkedList(java.util.LinkedList) CommitLogObserver(cz.o2.proxima.direct.commitlog.CommitLogObserver) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Test(org.junit.Test)

Example 19 with OnNextContext

use of cz.o2.proxima.direct.commitlog.CommitLogObserver.OnNextContext in project proxima-platform by O2-Czech-Republic.

the class PubSubReaderTest method testPartitionsSplit.

@Test
public void testPartitionsSplit() throws InterruptedException {
    List<Partition> partitions = reader.getPartitions();
    assertEquals(1, partitions.size());
    partitions = new ArrayList<>(partitions.get(0).split(3));
    assertEquals(3, partitions.size());
    reader.setSupplier(() -> {
        LockSupport.park();
        return null;
    });
    CommitLogObserver observer = new CommitLogObserver() {

        @Override
        public boolean onNext(StreamElement ingest, OnNextContext context) {
            context.confirm();
            return false;
        }

        @Override
        public void onCancelled() {
        }

        @Override
        public boolean onError(Throwable error) {
            throw new RuntimeException(error);
        }
    };
    try (ObserveHandle handle = reader.observeBulkPartitions(partitions, Position.NEWEST, observer)) {
        handle.waitUntilReady();
    }
}
Also used : CommitLogObserver(cz.o2.proxima.direct.commitlog.CommitLogObserver) Partition(cz.o2.proxima.storage.Partition) ObserveHandle(cz.o2.proxima.direct.commitlog.ObserveHandle) StreamElement(cz.o2.proxima.storage.StreamElement) Test(org.junit.Test)

Example 20 with OnNextContext

use of cz.o2.proxima.direct.commitlog.CommitLogObserver.OnNextContext in project proxima-platform by O2-Czech-Republic.

the class PubSubReaderTest method testObserveCommittedOffset.

@Test
public void testObserveCommittedOffset() throws InterruptedException {
    long now = System.currentTimeMillis();
    Deque<PubsubMessage> inputs = new LinkedList<>(Arrays.asList(update("key1", "attr", new byte[] { 1, 2 }, now), delete("key2", "attr", now + 1000), deleteWildcard("key3", wildcard, now)));
    reader.setSupplier(() -> {
        if (inputs.isEmpty()) {
            LockSupport.park();
        }
        return inputs.pop();
    });
    CountDownLatch latch = new CountDownLatch(1);
    ObserveHandle handle = reader.observe("dummy", new CommitLogObserver() {

        @Override
        public boolean onNext(StreamElement ingest, OnNextContext context) {
            timestampSupplier.addAndGet(1000);
            context.confirm();
            latch.countDown();
            return false;
        }

        @Override
        public void onCancelled() {
        }

        @Override
        public boolean onError(Throwable error) {
            throw new RuntimeException(error);
        }
    });
    latch.await();
    assertEquals(1, handle.getCommittedOffsets().size());
    assertTrue(handle.getCommittedOffsets().get(0).getWatermark() > 0);
}
Also used : CommitLogObserver(cz.o2.proxima.direct.commitlog.CommitLogObserver) ObserveHandle(cz.o2.proxima.direct.commitlog.ObserveHandle) StreamElement(cz.o2.proxima.storage.StreamElement) CountDownLatch(java.util.concurrent.CountDownLatch) PubsubMessage(com.google.pubsub.v1.PubsubMessage) LinkedList(java.util.LinkedList) Test(org.junit.Test)

Aggregations

StreamElement (cz.o2.proxima.storage.StreamElement)83 Test (org.junit.Test)73 CountDownLatch (java.util.concurrent.CountDownLatch)67 CommitLogObserver (cz.o2.proxima.direct.commitlog.CommitLogObserver)64 CommitLogReader (cz.o2.proxima.direct.commitlog.CommitLogReader)50 OnNextContext (cz.o2.proxima.direct.commitlog.CommitLogObserver.OnNextContext)40 ArrayList (java.util.ArrayList)39 ObserveHandle (cz.o2.proxima.direct.commitlog.ObserveHandle)35 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)30 AtomicReference (java.util.concurrent.atomic.AtomicReference)29 EntityDescriptor (cz.o2.proxima.repository.EntityDescriptor)28 Accessor (cz.o2.proxima.direct.kafka.LocalKafkaCommitLogDescriptor.Accessor)26 List (java.util.List)26 UUID (java.util.UUID)25 AtomicLong (java.util.concurrent.atomic.AtomicLong)24 ConfigFactory (com.typesafe.config.ConfigFactory)23 AttributeDescriptor (cz.o2.proxima.repository.AttributeDescriptor)23 Collections (java.util.Collections)23 Offset (cz.o2.proxima.direct.commitlog.Offset)22 DirectDataOperator (cz.o2.proxima.direct.core.DirectDataOperator)22