Search in sources :

Example 1 with BiConsumer

use of cz.o2.proxima.functional.BiConsumer in project proxima-platform by O2-Czech-Republic.

the class TransactionResourceManager method runObservations.

/**
 * Observe all transactional families with given observer.
 *
 * @param name name of the observer (will be appended with name of the family)
 * @param requestObserver the observer (need not be synchronized)
 */
@Override
public void runObservations(String name, BiConsumer<StreamElement, Pair<Long, Object>> updateConsumer, CommitLogObserver requestObserver) {
    final CommitLogObserver effectiveObserver;
    if (isNotThreadSafe(requestObserver)) {
        effectiveObserver = requestObserver;
    } else {
        effectiveObserver = new ThreadPooledObserver(direct.getContext().getExecutorService(), requestObserver, getDeclaredParallelism(requestObserver).orElse(Runtime.getRuntime().availableProcessors()));
    }
    List<Set<String>> families = direct.getRepository().getAllEntities().filter(EntityDescriptor::isTransactional).flatMap(e -> e.getAllAttributes().stream()).filter(a -> a.getTransactionMode() != TransactionMode.NONE).map(AttributeDescriptor::getTransactionalManagerFamilies).map(Sets::newHashSet).distinct().collect(Collectors.toList());
    CountDownLatch initializedLatch = new CountDownLatch(families.size());
    families.stream().map(this::toRequestStatePair).forEach(p -> {
        DirectAttributeFamilyDescriptor requestFamily = p.getFirst();
        DirectAttributeFamilyDescriptor stateFamily = p.getSecond();
        String consumerName = name + "-" + requestFamily.getDesc().getName();
        log.info("Starting to observe family {} with URI {} and associated state family {} as {}", requestFamily, requestFamily.getDesc().getStorageUri(), stateFamily, consumerName);
        CommitLogReader reader = Optionals.get(requestFamily.getCommitLogReader());
        CachedView view = stateViews.get(stateFamily);
        if (view == null) {
            view = Optionals.get(stateFamily.getCachedView());
            Duration ttl = Duration.ofMillis(cleanupIntervalMs);
            stateViews.put(stateFamily, view);
            view.assign(view.getPartitions(), updateConsumer, ttl);
        }
        initializedLatch.countDown();
        serverObservedFamilies.put(requestFamily, reader.observe(consumerName, repartitionHookForBeingActive(stateFamily, reader.getPartitions().size(), effectiveObserver)));
    });
    ExceptionUtils.unchecked(initializedLatch::await);
}
Also used : Arrays(java.util.Arrays) Partition(cz.o2.proxima.storage.Partition) EntityDescriptor(cz.o2.proxima.repository.EntityDescriptor) Flags(cz.o2.proxima.transaction.Request.Flags) Wildcard(cz.o2.proxima.repository.EntityAwareAttributeDescriptor.Wildcard) InetAddress(java.net.InetAddress) ExceptionUtils(cz.o2.proxima.util.ExceptionUtils) CachedView(cz.o2.proxima.direct.view.CachedView) StreamElement(cz.o2.proxima.storage.StreamElement) ForwardingObserver(cz.o2.proxima.direct.commitlog.CommitLogObservers.ForwardingObserver) Pair(cz.o2.proxima.util.Pair) Duration(java.time.Duration) Map(java.util.Map) Optionals(cz.o2.proxima.util.Optionals) Collection(java.util.Collection) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Set(java.util.Set) TransactionMode(cz.o2.proxima.repository.TransactionMode) ThreadSafe(javax.annotation.concurrent.ThreadSafe) ObserveHandle(cz.o2.proxima.direct.commitlog.ObserveHandle) CommitCallback(cz.o2.proxima.direct.core.CommitCallback) Collectors(java.util.stream.Collectors) Sets(com.google.common.collect.Sets) BiConsumer(cz.o2.proxima.functional.BiConsumer) Objects(java.util.Objects) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) Slf4j(lombok.extern.slf4j.Slf4j) Internal(cz.o2.proxima.annotations.Internal) KeyValue(cz.o2.proxima.direct.randomaccess.KeyValue) Response(cz.o2.proxima.transaction.Response) State(cz.o2.proxima.transaction.State) Optional(java.util.Optional) DirectDataOperator(cz.o2.proxima.direct.core.DirectDataOperator) Getter(lombok.Getter) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) OnlineAttributeWriter(cz.o2.proxima.direct.core.OnlineAttributeWriter) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) Lists(com.google.common.collect.Lists) AccessLevel(lombok.AccessLevel) Regular(cz.o2.proxima.repository.EntityAwareAttributeDescriptor.Regular) ThreadLocalRandom(java.util.concurrent.ThreadLocalRandom) ObserveHandleUtils(cz.o2.proxima.direct.commitlog.ObserveHandleUtils) Commit(cz.o2.proxima.transaction.Commit) CommitLogReader(cz.o2.proxima.direct.commitlog.CommitLogReader) Nullable(javax.annotation.Nullable) DeclaredThreadSafe(cz.o2.proxima.annotations.DeclaredThreadSafe) Classpath(cz.o2.proxima.util.Classpath) Request(cz.o2.proxima.transaction.Request) AttributeDescriptor(cz.o2.proxima.repository.AttributeDescriptor) CommitLogObserver(cz.o2.proxima.direct.commitlog.CommitLogObserver) KeyAttribute(cz.o2.proxima.transaction.KeyAttribute) AttributeFamilyDescriptor(cz.o2.proxima.repository.AttributeFamilyDescriptor) UnknownHostException(java.net.UnknownHostException) TimeUnit(java.util.concurrent.TimeUnit) DirectAttributeFamilyDescriptor(cz.o2.proxima.direct.core.DirectAttributeFamilyDescriptor) Preconditions(com.google.common.base.Preconditions) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Comparator(java.util.Comparator) Collections(java.util.Collections) DirectAttributeFamilyDescriptor(cz.o2.proxima.direct.core.DirectAttributeFamilyDescriptor) CachedView(cz.o2.proxima.direct.view.CachedView) Set(java.util.Set) HashSet(java.util.HashSet) CommitLogReader(cz.o2.proxima.direct.commitlog.CommitLogReader) Duration(java.time.Duration) CountDownLatch(java.util.concurrent.CountDownLatch) CommitLogObserver(cz.o2.proxima.direct.commitlog.CommitLogObserver) EntityDescriptor(cz.o2.proxima.repository.EntityDescriptor) Sets(com.google.common.collect.Sets)

Example 2 with BiConsumer

use of cz.o2.proxima.functional.BiConsumer in project proxima-platform by O2-Czech-Republic.

the class LocalCachedPartitionedView method assign.

@Override
public void assign(Collection<Partition> partitions, BiConsumer<StreamElement, Pair<Long, Object>> updateCallback, @Nullable Duration ttl) {
    close();
    this.updateCallback = Objects.requireNonNull(updateCallback);
    BlockingQueue<Optional<Throwable>> errorDuringPrefetch = new SynchronousQueue<>();
    AtomicLong prefetchedCount = new AtomicLong();
    final long prefetchStartTime = getCurrentTimeMillis();
    final long ttlMs = ttl == null ? Long.MAX_VALUE : ttl.toMillis();
    CommitLogObserver prefetchObserver = new CommitLogObserver() {

        @Override
        public boolean onNext(StreamElement ingest, OnNextContext context) {
            log.debug("Prefetched element {} with ttlMs {}", ingest, ttlMs);
            final long prefetched = prefetchedCount.incrementAndGet();
            if (ttl == null || getCurrentTimeMillis() - ingest.getStamp() < ttlMs) {
                if (prefetched % 10000 == 0) {
                    log.info("Prefetched so far {} elements in {} millis", prefetched, getCurrentTimeMillis() - prefetchStartTime);
                }
                onCache(ingest, false);
            }
            context.confirm();
            return true;
        }

        @Override
        public boolean onError(Throwable error) {
            log.error("Failed to prefetch data", error);
            ExceptionUtils.unchecked(() -> errorDuringPrefetch.put(Optional.of(error)));
            return false;
        }

        @Override
        public void onCompleted() {
            ExceptionUtils.unchecked(() -> errorDuringPrefetch.put(Optional.empty()));
        }
    };
    CommitLogObserver observer = new CommitLogObserver() {

        private long lastCleanup = 0;

        @Override
        public boolean onNext(StreamElement ingest, OnNextContext context) {
            onCache(ingest, false);
            context.confirm();
            if (ttl != null) {
                lastCleanup = maybeDoCleanup(lastCleanup, ttlMs);
            }
            return true;
        }

        @Override
        public boolean onError(Throwable error) {
            log.error("Error in caching data. Restarting consumption", error);
            assign(partitions);
            return false;
        }
    };
    synchronized (this) {
        try {
            // prefetch the data
            log.info("Starting prefetching old topic data for partitions {} with preUpdate {}", partitions.stream().map(p -> String.format("%s[%d]", getUri(), p.getId())).collect(Collectors.toList()), updateCallback);
            ObserveHandle h = reader.observeBulkPartitions(partitions, Position.OLDEST, true, prefetchObserver);
            errorDuringPrefetch.take().ifPresent(ExceptionUtils::rethrowAsIllegalStateException);
            log.info("Finished prefetching after {} records in {} millis. Starting consumption of updates.", prefetchedCount.get(), getCurrentTimeMillis() - prefetchStartTime);
            List<Offset> offsets = h.getCommittedOffsets();
            // continue the processing
            handle.set(reader.observeBulkOffsets(offsets, observer));
            handle.get().waitUntilReady();
        } catch (InterruptedException ex) {
            Thread.currentThread().interrupt();
            throw new RuntimeException(ex);
        }
    }
}
Also used : ObserveHandle(cz.o2.proxima.direct.commitlog.ObserveHandle) Optional(java.util.Optional) StreamElement(cz.o2.proxima.storage.StreamElement) ExceptionUtils(cz.o2.proxima.util.ExceptionUtils) RawOffset(cz.o2.proxima.direct.randomaccess.RawOffset) Offset(cz.o2.proxima.direct.commitlog.Offset) RandomOffset(cz.o2.proxima.direct.randomaccess.RandomOffset) CommitLogObserver(cz.o2.proxima.direct.commitlog.CommitLogObserver) AtomicLong(java.util.concurrent.atomic.AtomicLong) SynchronousQueue(java.util.concurrent.SynchronousQueue)

Example 3 with BiConsumer

use of cz.o2.proxima.functional.BiConsumer in project proxima-platform by O2-Czech-Republic.

the class KafkaLogReader method processConsumer.

/**
 * Process given consumer in online fashion.
 *
 * @param name name of the consumer
 * @param offsets assigned offsets
 * @param position where to read from
 * @param stopAtCurrent termination flag
 * @param commitToKafka should we commit to kafka
 * @param observer the observer
 * @param executor executor to use for async processing
 * @return observe handle
 */
@VisibleForTesting
ObserveHandle processConsumer(@Nullable String name, @Nullable Collection<Offset> offsets, Position position, boolean stopAtCurrent, boolean commitToKafka, CommitLogObserver observer, ExecutorService executor) throws InterruptedException {
    // offsets that should be committed to kafka
    Map<TopicPartition, OffsetAndMetadata> kafkaCommitMap;
    kafkaCommitMap = Collections.synchronizedMap(new HashMap<>());
    final OffsetCommitter<TopicPartition> offsetCommitter = createOffsetCommitter();
    final BiConsumer<TopicPartition, ConsumerRecord<Object, Object>> preWrite = (tp, r) -> {
        final long offset = r.offset();
        offsetCommitter.register(tp, offset, 1, () -> {
            OffsetAndMetadata mtd = new OffsetAndMetadata(offset + 1);
            if (commitToKafka) {
                kafkaCommitMap.put(tp, mtd);
            }
        });
    };
    OnlineConsumer<Object, Object> onlineConsumer = new OnlineConsumer<>(observer, offsetCommitter, () -> {
        synchronized (kafkaCommitMap) {
            Map<TopicPartition, OffsetAndMetadata> clone = new HashMap<>(kafkaCommitMap);
            kafkaCommitMap.clear();
            return clone;
        }
    });
    AtomicReference<ObserveHandle> handle = new AtomicReference<>();
    submitConsumerWithObserver(name, offsets, position, stopAtCurrent, preWrite, onlineConsumer, executor, handle);
    return dynamicHandle(handle);
}
Also used : Partition(cz.o2.proxima.storage.Partition) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) ExceptionUtils(cz.o2.proxima.util.ExceptionUtils) Collectors.toMap(java.util.stream.Collectors.toMap) StreamElement(cz.o2.proxima.storage.StreamElement) WatermarkEstimator(cz.o2.proxima.time.WatermarkEstimator) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) PartitionedWatermarkEstimator(cz.o2.proxima.time.PartitionedWatermarkEstimator) OnlineConsumer(cz.o2.proxima.direct.kafka.ElementConsumers.OnlineConsumer) Duration(java.time.Duration) Map(java.util.Map) WatermarkEstimatorFactory(cz.o2.proxima.time.WatermarkEstimatorFactory) WatermarkIdlePolicyFactory(cz.o2.proxima.time.WatermarkIdlePolicyFactory) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetExternalizer(cz.o2.proxima.direct.commitlog.OffsetExternalizer) Collection(java.util.Collection) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Set(java.util.Set) ObserveHandle(cz.o2.proxima.direct.commitlog.ObserveHandle) UUID(java.util.UUID) PartitionInfo(org.apache.kafka.common.PartitionInfo) Collectors(java.util.stream.Collectors) RebalanceInProgressException(org.apache.kafka.common.errors.RebalanceInProgressException) BiConsumer(cz.o2.proxima.functional.BiConsumer) Objects(java.util.Objects) BulkConsumer(cz.o2.proxima.direct.kafka.ElementConsumers.BulkConsumer) CountDownLatch(java.util.concurrent.CountDownLatch) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) List(java.util.List) Slf4j(lombok.extern.slf4j.Slf4j) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Optional(java.util.Optional) MinimalPartitionWatermarkEstimator(cz.o2.proxima.direct.time.MinimalPartitionWatermarkEstimator) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) Context(cz.o2.proxima.direct.core.Context) Getter(lombok.Getter) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) Watermarks(cz.o2.proxima.time.Watermarks) CommitLogReader(cz.o2.proxima.direct.commitlog.CommitLogReader) ExecutorService(java.util.concurrent.ExecutorService) Nullable(javax.annotation.Nullable) AbstractStorage(cz.o2.proxima.storage.AbstractStorage) CommitLogObserver(cz.o2.proxima.direct.commitlog.CommitLogObserver) Offset(cz.o2.proxima.direct.commitlog.Offset) Preconditions(com.google.common.base.Preconditions) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Collections(java.util.Collections) Position(cz.o2.proxima.storage.commitlog.Position) ObserveHandle(cz.o2.proxima.direct.commitlog.ObserveHandle) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) OnlineConsumer(cz.o2.proxima.direct.kafka.ElementConsumers.OnlineConsumer) AtomicReference(java.util.concurrent.atomic.AtomicReference) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 4 with BiConsumer

use of cz.o2.proxima.functional.BiConsumer in project proxima-platform by O2-Czech-Republic.

the class KafkaLogReader method submitConsumerWithObserver.

private void submitConsumerWithObserver(@Nullable final String name, @Nullable final Collection<Offset> offsets, final Position position, boolean stopAtCurrent, final BiConsumer<TopicPartition, ConsumerRecord<Object, Object>> preWrite, final ElementConsumer<Object, Object> consumer, final ExecutorService executor, final AtomicReference<ObserveHandle> handle) throws InterruptedException {
    final CountDownLatch readyLatch = new CountDownLatch(1);
    final CountDownLatch completedLatch = new CountDownLatch(1);
    final AtomicBoolean completed = new AtomicBoolean();
    final AtomicBoolean shutdown = new AtomicBoolean();
    List<TopicOffset> seekOffsets = Collections.synchronizedList(new ArrayList<>());
    Preconditions.checkArgument(!accessor.isTopicRegex() || !stopAtCurrent, "Cannot use stopAtCurrent with regex URI");
    executor.submit(() -> {
        final AtomicReference<KafkaConsumer<Object, Object>> consumerRef = new AtomicReference<>();
        final AtomicReference<PartitionedWatermarkEstimator> watermarkEstimator = new AtomicReference<>(null);
        final Map<TopicPartition, Integer> emptyPollCount = new ConcurrentHashMap<>();
        final Map<TopicPartition, Integer> topicPartitionToId = new HashMap<>();
        final Duration pollDuration = Duration.ofMillis(consumerPollInterval);
        final KafkaThroughputLimiter throughputLimiter = new KafkaThroughputLimiter(maxBytesPerSec);
        handle.set(createObserveHandle(shutdown, seekOffsets, consumer, readyLatch, completedLatch));
        consumer.onStart();
        ConsumerRebalanceListener listener = listener(name, consumerRef, consumer, emptyPollCount, topicPartitionToId, watermarkEstimator);
        try (KafkaConsumer<Object, Object> kafka = createConsumer(name, offsets, name != null ? listener : null, position)) {
            consumerRef.set(kafka);
            // we need to poll first to initialize kafka assignments and rebalance listener
            ConsumerRecords<Object, Object> poll;
            Map<TopicPartition, Long> endOffsets;
            do {
                poll = kafka.poll(pollDuration);
                endOffsets = stopAtCurrent ? findNonEmptyEndOffsets(kafka) : null;
                if (log.isDebugEnabled()) {
                    log.debug("End offsets of current assignment {}: {}", kafka.assignment(), endOffsets);
                }
            } while (poll.isEmpty() && accessor.isTopicRegex() && kafka.assignment().isEmpty() && !shutdown.get() && !Thread.currentThread().isInterrupted());
            Set<TopicPartition> assignment = kafka.assignment();
            if (!assignment.isEmpty()) {
                listener.onPartitionsRevoked(assignment);
                listener.onPartitionsAssigned(assignment);
            }
            readyLatch.countDown();
            AtomicReference<Throwable> error = new AtomicReference<>();
            long pollTimeMs = 0L;
            do {
                if (poll.isEmpty()) {
                    Optional.ofNullable(watermarkEstimator.get()).ifPresent(consumer::onIdle);
                }
                logConsumerWatermark(name, offsets, watermarkEstimator, poll.count());
                poll = seekToNewOffsetsIfNeeded(seekOffsets, consumer, watermarkEstimator, kafka, poll);
                long bytesPolled = 0L;
                // increase all partition's empty poll counter by 1
                emptyPollCount.replaceAll((k, v) -> v + 1);
                for (ConsumerRecord<Object, Object> r : poll) {
                    bytesPolled += r.serializedKeySize() + r.serializedValueSize();
                    TopicPartition tp = new TopicPartition(r.topic(), r.partition());
                    emptyPollCount.put(tp, 0);
                    preWrite.accept(tp, r);
                    StreamElement ingest = serializer.read(r, getEntityDescriptor());
                    if (ingest != null) {
                        watermarkEstimator.get().update(Objects.requireNonNull(topicPartitionToId.get(tp)), ingest);
                    }
                    boolean cont = consumer.consumeWithConfirm(ingest, tp, r.offset(), watermarkEstimator.get(), error::set);
                    if (!cont) {
                        log.info("Terminating consumption by request");
                        completed.set(true);
                        shutdown.set(true);
                        break;
                    }
                    if (stopAtCurrent) {
                        Long end = endOffsets.get(tp);
                        if (end != null && end - 1 <= r.offset()) {
                            log.debug("Reached end of partition {} at offset {}", tp, r.offset());
                            endOffsets.remove(tp);
                        }
                    }
                }
                increaseWatermarkOnEmptyPolls(emptyPollCount, topicPartitionToId, watermarkEstimator);
                if (!flushCommits(kafka, consumer)) {
                    handleRebalanceInOffsetCommit(kafka, listener);
                }
                rethrowErrorIfPresent(name, error);
                terminateIfConsumed(stopAtCurrent, kafka, endOffsets, emptyPollCount, completed);
                throughputLimiter.sleepToLimitThroughput(bytesPolled, pollTimeMs);
                long startTime = System.currentTimeMillis();
                poll = kafka.poll(pollDuration);
                pollTimeMs = System.currentTimeMillis() - startTime;
            } while (!shutdown.get() && !completed.get() && !Thread.currentThread().isInterrupted());
            if (log.isDebugEnabled()) {
                log.debug("Terminating poll loop for assignment {}: shutdown: {}, completed: {}, interrupted: {}", kafka.assignment(), shutdown.get(), completed.get(), Thread.currentThread().isInterrupted());
            }
            if (!Thread.currentThread().isInterrupted() && !shutdown.get()) {
                consumer.onCompleted();
            } else {
                consumer.onCancelled();
            }
            completedLatch.countDown();
        } catch (InterruptedException ex) {
            log.info("Interrupted while polling kafka. Terminating consumption.", ex);
            Thread.currentThread().interrupt();
            consumer.onCancelled();
            completedLatch.countDown();
        } catch (Throwable err) {
            completedLatch.countDown();
            log.error("Error processing consumer {}", name, err);
            if (consumer.onError(err)) {
                try {
                    submitConsumerWithObserver(name, offsets, position, stopAtCurrent, preWrite, consumer, executor, handle);
                } catch (InterruptedException ex) {
                    log.warn("Interrupted while restarting observer");
                    Thread.currentThread().interrupt();
                    throw new RuntimeException(ex);
                }
            }
        } finally {
            readyLatch.countDown();
        }
    });
    readyLatch.await();
}
Also used : ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) StreamElement(cz.o2.proxima.storage.StreamElement) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) AtomicReference(java.util.concurrent.atomic.AtomicReference) Duration(java.time.Duration) CountDownLatch(java.util.concurrent.CountDownLatch) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) TopicPartition(org.apache.kafka.common.TopicPartition) PartitionedWatermarkEstimator(cz.o2.proxima.time.PartitionedWatermarkEstimator)

Aggregations

StreamElement (cz.o2.proxima.storage.StreamElement)4 CommitLogObserver (cz.o2.proxima.direct.commitlog.CommitLogObserver)3 ObserveHandle (cz.o2.proxima.direct.commitlog.ObserveHandle)3 ExceptionUtils (cz.o2.proxima.util.ExceptionUtils)3 Duration (java.time.Duration)3 VisibleForTesting (com.google.common.annotations.VisibleForTesting)2 Preconditions (com.google.common.base.Preconditions)2 CommitLogReader (cz.o2.proxima.direct.commitlog.CommitLogReader)2 Offset (cz.o2.proxima.direct.commitlog.Offset)2 BiConsumer (cz.o2.proxima.functional.BiConsumer)2 Partition (cz.o2.proxima.storage.Partition)2 PartitionedWatermarkEstimator (cz.o2.proxima.time.PartitionedWatermarkEstimator)2 ArrayList (java.util.ArrayList)2 HashMap (java.util.HashMap)2 Optional (java.util.Optional)2 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)2 CountDownLatch (java.util.concurrent.CountDownLatch)2 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)2 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)2 AtomicReference (java.util.concurrent.atomic.AtomicReference)2