use of org.infinispan.commons.util.AbstractIterator in project infinispan by infinispan.
the class DistributedCacheStream method iterator.
// The next ones are key tracking terminal operators
@Override
public Iterator<R> iterator() {
log.tracef("Distributed iterator invoked with rehash: %s", rehashAware);
Function usedTransformer;
if (intermediateOperations.isEmpty()) {
usedTransformer = MarshallableFunctions.identity();
} else {
usedTransformer = new CacheIntermediatePublisher(intermediateOperations);
}
DeliveryGuarantee deliveryGuarantee = rehashAware ? DeliveryGuarantee.EXACTLY_ONCE : DeliveryGuarantee.AT_MOST_ONCE;
Publisher<R> publisherToSubscribeTo;
SegmentPublisherSupplier<R> publisher;
if (toKeyFunction == null) {
publisher = cpm.keyPublisher(segmentsToFilter, keysToFilter, invocationContext, explicitFlags, deliveryGuarantee, distributedBatchSize, usedTransformer);
} else {
publisher = cpm.entryPublisher(segmentsToFilter, keysToFilter, invocationContext, explicitFlags, deliveryGuarantee, distributedBatchSize, usedTransformer);
}
CompletionSegmentTracker<R> segmentTracker;
if (segmentCompletionListener != null) {
// Tracker relies on ordering that a segment completion occurs
segmentTracker = new CompletionSegmentTracker<>(segmentCompletionListener);
publisherToSubscribeTo = Flowable.fromPublisher(publisher.publisherWithSegments()).mapOptional(segmentTracker);
} else {
segmentTracker = null;
publisherToSubscribeTo = publisher.publisherWithoutSegments();
}
CloseableIterator<R> realIterator = Closeables.iterator(Flowable.fromPublisher(publisherToSubscribeTo).onErrorResumeNext(RxJavaInterop.cacheExceptionWrapper()), distributedBatchSize);
onClose(realIterator::close);
if (segmentTracker != null) {
return new AbstractIterator<R>() {
@Override
protected R getNext() {
if (realIterator.hasNext()) {
R value = realIterator.next();
segmentTracker.returningObject(value);
return value;
} else {
segmentTracker.onComplete();
}
return null;
}
};
}
return realIterator;
}
use of org.infinispan.commons.util.AbstractIterator in project infinispan by infinispan.
the class RocksDBStore method actualPurgeExpired.
private Flowable<MarshallableEntry<K, V>> actualPurgeExpired(long now) {
// The following flowable is responsible for emitting entries that have expired from expiredDb and removing the
// given entries
Flowable<byte[]> expiredFlowable = Flowable.using(() -> {
ReadOptions readOptions = new ReadOptions().setFillCache(false);
return new AbstractMap.SimpleImmutableEntry<>(readOptions, expiredDb.newIterator(readOptions));
}, entry -> {
if (entry.getValue() == null) {
return Flowable.empty();
}
RocksIterator iterator = entry.getValue();
iterator.seekToFirst();
return Flowable.fromIterable(() -> new AbstractIterator<byte[]>() {
@Override
protected byte[] getNext() {
if (!iterator.isValid()) {
return null;
}
byte[] keyBytes = iterator.key();
Long time = unmarshall(keyBytes);
if (time > now)
return null;
try {
expiredDb.delete(keyBytes);
} catch (RocksDBException e) {
throw new PersistenceException(e);
}
byte[] value = iterator.value();
iterator.next();
return value;
}
});
}, entry -> {
entry.getKey().close();
RocksIterator rocksIterator = entry.getValue();
if (rocksIterator != null) {
rocksIterator.close();
}
});
Flowable<MarshallableEntry<K, V>> expiredEntryFlowable = expiredFlowable.flatMap(expiredBytes -> {
Object bucketKey = unmarshall(expiredBytes);
if (bucketKey instanceof ExpiryBucket) {
return Flowable.fromIterable(((ExpiryBucket) bucketKey).entries).flatMapMaybe(marshalledKey -> {
ColumnFamilyHandle columnFamilyHandle = handler.getHandleForMarshalledKey(marshalledKey);
MarshalledValue mv = handlePossiblyExpiredKey(columnFamilyHandle, marshalledKey, now);
return mv == null ? Maybe.empty() : Maybe.just(entryFactory.create(unmarshall(marshalledKey), mv));
});
} else {
// The bucketKey is an actual key
ColumnFamilyHandle columnFamilyHandle = handler.getHandle(bucketKey);
MarshalledValue mv = handlePossiblyExpiredKey(columnFamilyHandle, marshall(bucketKey), now);
return mv == null ? Flowable.empty() : Flowable.just(entryFactory.create(bucketKey, mv));
}
});
if (log.isTraceEnabled()) {
// Note this tracing only works properly for one subscriber
FlowableProcessor<MarshallableEntry<K, V>> mirrorEntries = UnicastProcessor.create();
expiredEntryFlowable = expiredEntryFlowable.doOnEach(mirrorEntries).doOnSubscribe(subscription -> log.tracef("Purging entries from RocksDBStore"));
mirrorEntries.count().subscribe(count -> log.tracef("Purged %d entries from RocksDBStore", count));
}
return expiredEntryFlowable;
}
Aggregations