use of cz.o2.proxima.functional.BiConsumer in project proxima-platform by O2-Czech-Republic.
the class TransactionResourceManager method runObservations.
/**
* Observe all transactional families with given observer.
*
* @param name name of the observer (will be appended with name of the family)
* @param requestObserver the observer (need not be synchronized)
*/
@Override
public void runObservations(String name, BiConsumer<StreamElement, Pair<Long, Object>> updateConsumer, CommitLogObserver requestObserver) {
final CommitLogObserver effectiveObserver;
if (isNotThreadSafe(requestObserver)) {
effectiveObserver = requestObserver;
} else {
effectiveObserver = new ThreadPooledObserver(direct.getContext().getExecutorService(), requestObserver, getDeclaredParallelism(requestObserver).orElse(Runtime.getRuntime().availableProcessors()));
}
List<Set<String>> families = direct.getRepository().getAllEntities().filter(EntityDescriptor::isTransactional).flatMap(e -> e.getAllAttributes().stream()).filter(a -> a.getTransactionMode() != TransactionMode.NONE).map(AttributeDescriptor::getTransactionalManagerFamilies).map(Sets::newHashSet).distinct().collect(Collectors.toList());
CountDownLatch initializedLatch = new CountDownLatch(families.size());
families.stream().map(this::toRequestStatePair).forEach(p -> {
DirectAttributeFamilyDescriptor requestFamily = p.getFirst();
DirectAttributeFamilyDescriptor stateFamily = p.getSecond();
String consumerName = name + "-" + requestFamily.getDesc().getName();
log.info("Starting to observe family {} with URI {} and associated state family {} as {}", requestFamily, requestFamily.getDesc().getStorageUri(), stateFamily, consumerName);
CommitLogReader reader = Optionals.get(requestFamily.getCommitLogReader());
CachedView view = stateViews.get(stateFamily);
if (view == null) {
view = Optionals.get(stateFamily.getCachedView());
Duration ttl = Duration.ofMillis(cleanupIntervalMs);
stateViews.put(stateFamily, view);
view.assign(view.getPartitions(), updateConsumer, ttl);
}
initializedLatch.countDown();
serverObservedFamilies.put(requestFamily, reader.observe(consumerName, repartitionHookForBeingActive(stateFamily, reader.getPartitions().size(), effectiveObserver)));
});
ExceptionUtils.unchecked(initializedLatch::await);
}
use of cz.o2.proxima.functional.BiConsumer in project proxima-platform by O2-Czech-Republic.
the class LocalCachedPartitionedView method assign.
@Override
public void assign(Collection<Partition> partitions, BiConsumer<StreamElement, Pair<Long, Object>> updateCallback, @Nullable Duration ttl) {
close();
this.updateCallback = Objects.requireNonNull(updateCallback);
BlockingQueue<Optional<Throwable>> errorDuringPrefetch = new SynchronousQueue<>();
AtomicLong prefetchedCount = new AtomicLong();
final long prefetchStartTime = getCurrentTimeMillis();
final long ttlMs = ttl == null ? Long.MAX_VALUE : ttl.toMillis();
CommitLogObserver prefetchObserver = new CommitLogObserver() {
@Override
public boolean onNext(StreamElement ingest, OnNextContext context) {
log.debug("Prefetched element {} with ttlMs {}", ingest, ttlMs);
final long prefetched = prefetchedCount.incrementAndGet();
if (ttl == null || getCurrentTimeMillis() - ingest.getStamp() < ttlMs) {
if (prefetched % 10000 == 0) {
log.info("Prefetched so far {} elements in {} millis", prefetched, getCurrentTimeMillis() - prefetchStartTime);
}
onCache(ingest, false);
}
context.confirm();
return true;
}
@Override
public boolean onError(Throwable error) {
log.error("Failed to prefetch data", error);
ExceptionUtils.unchecked(() -> errorDuringPrefetch.put(Optional.of(error)));
return false;
}
@Override
public void onCompleted() {
ExceptionUtils.unchecked(() -> errorDuringPrefetch.put(Optional.empty()));
}
};
CommitLogObserver observer = new CommitLogObserver() {
private long lastCleanup = 0;
@Override
public boolean onNext(StreamElement ingest, OnNextContext context) {
onCache(ingest, false);
context.confirm();
if (ttl != null) {
lastCleanup = maybeDoCleanup(lastCleanup, ttlMs);
}
return true;
}
@Override
public boolean onError(Throwable error) {
log.error("Error in caching data. Restarting consumption", error);
assign(partitions);
return false;
}
};
synchronized (this) {
try {
// prefetch the data
log.info("Starting prefetching old topic data for partitions {} with preUpdate {}", partitions.stream().map(p -> String.format("%s[%d]", getUri(), p.getId())).collect(Collectors.toList()), updateCallback);
ObserveHandle h = reader.observeBulkPartitions(partitions, Position.OLDEST, true, prefetchObserver);
errorDuringPrefetch.take().ifPresent(ExceptionUtils::rethrowAsIllegalStateException);
log.info("Finished prefetching after {} records in {} millis. Starting consumption of updates.", prefetchedCount.get(), getCurrentTimeMillis() - prefetchStartTime);
List<Offset> offsets = h.getCommittedOffsets();
// continue the processing
handle.set(reader.observeBulkOffsets(offsets, observer));
handle.get().waitUntilReady();
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
throw new RuntimeException(ex);
}
}
}
use of cz.o2.proxima.functional.BiConsumer in project proxima-platform by O2-Czech-Republic.
the class KafkaLogReader method processConsumer.
/**
* Process given consumer in online fashion.
*
* @param name name of the consumer
* @param offsets assigned offsets
* @param position where to read from
* @param stopAtCurrent termination flag
* @param commitToKafka should we commit to kafka
* @param observer the observer
* @param executor executor to use for async processing
* @return observe handle
*/
@VisibleForTesting
ObserveHandle processConsumer(@Nullable String name, @Nullable Collection<Offset> offsets, Position position, boolean stopAtCurrent, boolean commitToKafka, CommitLogObserver observer, ExecutorService executor) throws InterruptedException {
// offsets that should be committed to kafka
Map<TopicPartition, OffsetAndMetadata> kafkaCommitMap;
kafkaCommitMap = Collections.synchronizedMap(new HashMap<>());
final OffsetCommitter<TopicPartition> offsetCommitter = createOffsetCommitter();
final BiConsumer<TopicPartition, ConsumerRecord<Object, Object>> preWrite = (tp, r) -> {
final long offset = r.offset();
offsetCommitter.register(tp, offset, 1, () -> {
OffsetAndMetadata mtd = new OffsetAndMetadata(offset + 1);
if (commitToKafka) {
kafkaCommitMap.put(tp, mtd);
}
});
};
OnlineConsumer<Object, Object> onlineConsumer = new OnlineConsumer<>(observer, offsetCommitter, () -> {
synchronized (kafkaCommitMap) {
Map<TopicPartition, OffsetAndMetadata> clone = new HashMap<>(kafkaCommitMap);
kafkaCommitMap.clear();
return clone;
}
});
AtomicReference<ObserveHandle> handle = new AtomicReference<>();
submitConsumerWithObserver(name, offsets, position, stopAtCurrent, preWrite, onlineConsumer, executor, handle);
return dynamicHandle(handle);
}
use of cz.o2.proxima.functional.BiConsumer in project proxima-platform by O2-Czech-Republic.
the class KafkaLogReader method submitConsumerWithObserver.
private void submitConsumerWithObserver(@Nullable final String name, @Nullable final Collection<Offset> offsets, final Position position, boolean stopAtCurrent, final BiConsumer<TopicPartition, ConsumerRecord<Object, Object>> preWrite, final ElementConsumer<Object, Object> consumer, final ExecutorService executor, final AtomicReference<ObserveHandle> handle) throws InterruptedException {
final CountDownLatch readyLatch = new CountDownLatch(1);
final CountDownLatch completedLatch = new CountDownLatch(1);
final AtomicBoolean completed = new AtomicBoolean();
final AtomicBoolean shutdown = new AtomicBoolean();
List<TopicOffset> seekOffsets = Collections.synchronizedList(new ArrayList<>());
Preconditions.checkArgument(!accessor.isTopicRegex() || !stopAtCurrent, "Cannot use stopAtCurrent with regex URI");
executor.submit(() -> {
final AtomicReference<KafkaConsumer<Object, Object>> consumerRef = new AtomicReference<>();
final AtomicReference<PartitionedWatermarkEstimator> watermarkEstimator = new AtomicReference<>(null);
final Map<TopicPartition, Integer> emptyPollCount = new ConcurrentHashMap<>();
final Map<TopicPartition, Integer> topicPartitionToId = new HashMap<>();
final Duration pollDuration = Duration.ofMillis(consumerPollInterval);
final KafkaThroughputLimiter throughputLimiter = new KafkaThroughputLimiter(maxBytesPerSec);
handle.set(createObserveHandle(shutdown, seekOffsets, consumer, readyLatch, completedLatch));
consumer.onStart();
ConsumerRebalanceListener listener = listener(name, consumerRef, consumer, emptyPollCount, topicPartitionToId, watermarkEstimator);
try (KafkaConsumer<Object, Object> kafka = createConsumer(name, offsets, name != null ? listener : null, position)) {
consumerRef.set(kafka);
// we need to poll first to initialize kafka assignments and rebalance listener
ConsumerRecords<Object, Object> poll;
Map<TopicPartition, Long> endOffsets;
do {
poll = kafka.poll(pollDuration);
endOffsets = stopAtCurrent ? findNonEmptyEndOffsets(kafka) : null;
if (log.isDebugEnabled()) {
log.debug("End offsets of current assignment {}: {}", kafka.assignment(), endOffsets);
}
} while (poll.isEmpty() && accessor.isTopicRegex() && kafka.assignment().isEmpty() && !shutdown.get() && !Thread.currentThread().isInterrupted());
Set<TopicPartition> assignment = kafka.assignment();
if (!assignment.isEmpty()) {
listener.onPartitionsRevoked(assignment);
listener.onPartitionsAssigned(assignment);
}
readyLatch.countDown();
AtomicReference<Throwable> error = new AtomicReference<>();
long pollTimeMs = 0L;
do {
if (poll.isEmpty()) {
Optional.ofNullable(watermarkEstimator.get()).ifPresent(consumer::onIdle);
}
logConsumerWatermark(name, offsets, watermarkEstimator, poll.count());
poll = seekToNewOffsetsIfNeeded(seekOffsets, consumer, watermarkEstimator, kafka, poll);
long bytesPolled = 0L;
// increase all partition's empty poll counter by 1
emptyPollCount.replaceAll((k, v) -> v + 1);
for (ConsumerRecord<Object, Object> r : poll) {
bytesPolled += r.serializedKeySize() + r.serializedValueSize();
TopicPartition tp = new TopicPartition(r.topic(), r.partition());
emptyPollCount.put(tp, 0);
preWrite.accept(tp, r);
StreamElement ingest = serializer.read(r, getEntityDescriptor());
if (ingest != null) {
watermarkEstimator.get().update(Objects.requireNonNull(topicPartitionToId.get(tp)), ingest);
}
boolean cont = consumer.consumeWithConfirm(ingest, tp, r.offset(), watermarkEstimator.get(), error::set);
if (!cont) {
log.info("Terminating consumption by request");
completed.set(true);
shutdown.set(true);
break;
}
if (stopAtCurrent) {
Long end = endOffsets.get(tp);
if (end != null && end - 1 <= r.offset()) {
log.debug("Reached end of partition {} at offset {}", tp, r.offset());
endOffsets.remove(tp);
}
}
}
increaseWatermarkOnEmptyPolls(emptyPollCount, topicPartitionToId, watermarkEstimator);
if (!flushCommits(kafka, consumer)) {
handleRebalanceInOffsetCommit(kafka, listener);
}
rethrowErrorIfPresent(name, error);
terminateIfConsumed(stopAtCurrent, kafka, endOffsets, emptyPollCount, completed);
throughputLimiter.sleepToLimitThroughput(bytesPolled, pollTimeMs);
long startTime = System.currentTimeMillis();
poll = kafka.poll(pollDuration);
pollTimeMs = System.currentTimeMillis() - startTime;
} while (!shutdown.get() && !completed.get() && !Thread.currentThread().isInterrupted());
if (log.isDebugEnabled()) {
log.debug("Terminating poll loop for assignment {}: shutdown: {}, completed: {}, interrupted: {}", kafka.assignment(), shutdown.get(), completed.get(), Thread.currentThread().isInterrupted());
}
if (!Thread.currentThread().isInterrupted() && !shutdown.get()) {
consumer.onCompleted();
} else {
consumer.onCancelled();
}
completedLatch.countDown();
} catch (InterruptedException ex) {
log.info("Interrupted while polling kafka. Terminating consumption.", ex);
Thread.currentThread().interrupt();
consumer.onCancelled();
completedLatch.countDown();
} catch (Throwable err) {
completedLatch.countDown();
log.error("Error processing consumer {}", name, err);
if (consumer.onError(err)) {
try {
submitConsumerWithObserver(name, offsets, position, stopAtCurrent, preWrite, consumer, executor, handle);
} catch (InterruptedException ex) {
log.warn("Interrupted while restarting observer");
Thread.currentThread().interrupt();
throw new RuntimeException(ex);
}
}
} finally {
readyLatch.countDown();
}
});
readyLatch.await();
}
Aggregations