use of cz.o2.proxima.util.ExceptionUtils in project proxima-platform by O2-Czech-Republic.
the class LocalCachedPartitionedView method assign.
@Override
public void assign(Collection<Partition> partitions, BiConsumer<StreamElement, Pair<Long, Object>> updateCallback, @Nullable Duration ttl) {
close();
this.updateCallback = Objects.requireNonNull(updateCallback);
BlockingQueue<Optional<Throwable>> errorDuringPrefetch = new SynchronousQueue<>();
AtomicLong prefetchedCount = new AtomicLong();
final long prefetchStartTime = getCurrentTimeMillis();
final long ttlMs = ttl == null ? Long.MAX_VALUE : ttl.toMillis();
CommitLogObserver prefetchObserver = new CommitLogObserver() {
@Override
public boolean onNext(StreamElement ingest, OnNextContext context) {
log.debug("Prefetched element {} with ttlMs {}", ingest, ttlMs);
final long prefetched = prefetchedCount.incrementAndGet();
if (ttl == null || getCurrentTimeMillis() - ingest.getStamp() < ttlMs) {
if (prefetched % 10000 == 0) {
log.info("Prefetched so far {} elements in {} millis", prefetched, getCurrentTimeMillis() - prefetchStartTime);
}
onCache(ingest, false);
}
context.confirm();
return true;
}
@Override
public boolean onError(Throwable error) {
log.error("Failed to prefetch data", error);
ExceptionUtils.unchecked(() -> errorDuringPrefetch.put(Optional.of(error)));
return false;
}
@Override
public void onCompleted() {
ExceptionUtils.unchecked(() -> errorDuringPrefetch.put(Optional.empty()));
}
};
CommitLogObserver observer = new CommitLogObserver() {
private long lastCleanup = 0;
@Override
public boolean onNext(StreamElement ingest, OnNextContext context) {
onCache(ingest, false);
context.confirm();
if (ttl != null) {
lastCleanup = maybeDoCleanup(lastCleanup, ttlMs);
}
return true;
}
@Override
public boolean onError(Throwable error) {
log.error("Error in caching data. Restarting consumption", error);
assign(partitions);
return false;
}
};
synchronized (this) {
try {
// prefetch the data
log.info("Starting prefetching old topic data for partitions {} with preUpdate {}", partitions.stream().map(p -> String.format("%s[%d]", getUri(), p.getId())).collect(Collectors.toList()), updateCallback);
ObserveHandle h = reader.observeBulkPartitions(partitions, Position.OLDEST, true, prefetchObserver);
errorDuringPrefetch.take().ifPresent(ExceptionUtils::rethrowAsIllegalStateException);
log.info("Finished prefetching after {} records in {} millis. Starting consumption of updates.", prefetchedCount.get(), getCurrentTimeMillis() - prefetchStartTime);
List<Offset> offsets = h.getCommittedOffsets();
// continue the processing
handle.set(reader.observeBulkOffsets(offsets, observer));
handle.get().waitUntilReady();
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
throw new RuntimeException(ex);
}
}
}
Aggregations