use of de.invesdwin.util.collections.iterable.FlatteningIterable in project invesdwin-context-persistence by subes.
the class ATimeSeriesUpdater method doUpdate.
private void doUpdate() {
final Pair<FDate, List<V>> pair = lookupTable.prepareForUpdate();
final FDate updateFrom = pair.getFirst();
final List<V> lastValues = pair.getSecond();
Assertions.checkNotNull(lastValues);
ICloseableIterable<? extends V> source = getSource(updateFrom);
if (updateFrom != null) {
// ensure we add no duplicate values
source = new ASkippingIterable<V>(source) {
@Override
protected boolean skip(final V element) {
return extractTime(element).isBefore(updateFrom);
}
};
}
final FlatteningIterable<? extends V> flatteningSources = new FlatteningIterable<>(lastValues, source);
try (ICloseableIterator<? extends V> elements = flatteningSources.iterator()) {
final ICloseableIterator<UpdateProgress> batchWriterProducer = new ICloseableIterator<UpdateProgress>() {
@Override
public boolean hasNext() {
return elements.hasNext();
}
@Override
public UpdateProgress next() {
final UpdateProgress progress = new UpdateProgress();
while (elements.hasNext()) {
final V element = elements.next();
if (progress.onElement(element)) {
return progress;
}
}
return progress;
}
@Override
public void close() {
elements.close();
}
};
// do IO in a different thread than batch filling
try (ACloseableIterator<UpdateProgress> batchProducer = new AProducerQueueIterator<UpdateProgress>(getClass().getSimpleName() + "_batchProducer_" + table.hashKeyToString(key), BATCH_QUEUE_SIZE) {
@Override
protected ICloseableIterator<ATimeSeriesUpdater<K, V>.UpdateProgress> newProducer() {
return batchWriterProducer;
}
}) {
final AtomicInteger flushIndex = new AtomicInteger();
try (ACloseableIterator<UpdateProgress> parallelConsumer = new AParallelChunkConsumerIterator<UpdateProgress, UpdateProgress>(getClass().getSimpleName() + "_batchConsumer_" + table.hashKeyToString(key), batchProducer, BATCH_WRITER_THREADS) {
@Override
protected UpdateProgress doWork(final UpdateProgress request) {
request.write(flushIndex.incrementAndGet());
return request;
}
}) {
while (parallelConsumer.hasNext()) {
final UpdateProgress progress = parallelConsumer.next();
count += progress.getCount();
if (minTime == null) {
minTime = progress.getMinTime();
}
maxTime = progress.getMaxTime();
}
}
}
}
}
use of de.invesdwin.util.collections.iterable.FlatteningIterable in project invesdwin-context-persistence by subes.
the class ATimeSeriesUpdater method doUpdate.
private void doUpdate() {
final PrepareForUpdateResult<V> prepareForUpdateResult = lookupTable.prepareForUpdate(shouldRedoLastFile());
final FDate updateFrom = prepareForUpdateResult.getUpdateFrom();
final List<V> lastValues = prepareForUpdateResult.getLastValues();
final long initialAddressOffset = prepareForUpdateResult.getAddressOffset();
final ICloseableIterable<? extends V> source = getSource(updateFrom);
final FlatteningIterable<? extends V> flatteningSources = new FlatteningIterable<>(lastValues, source);
try (ICloseableIterator<UpdateProgress> batchWriterProducer = new ICloseableIterator<UpdateProgress>() {
private final UpdateProgress progress = new UpdateProgress(initialAddressOffset);
private final ICloseableIterator<? extends V> elements = flatteningSources.iterator();
@Override
public boolean hasNext() {
return elements.hasNext();
}
@Override
public UpdateProgress next() {
progress.reset();
try {
while (true) {
final V element = elements.next();
final FDate endTime = extractEndTime(element);
if (updateFrom != null) {
if (endTime.isBeforeNotNullSafe(updateFrom)) {
// ensure we add no duplicate values
continue;
}
}
if (progress.onElement(element, endTime)) {
return progress;
}
}
} catch (NoSuchElementException e) {
// end reached
if (progress.firstElement == null) {
throw e;
}
}
return progress;
}
@Override
public void close() {
elements.close();
progress.close();
}
}) {
flush(batchWriterProducer);
}
}
use of de.invesdwin.util.collections.iterable.FlatteningIterable in project invesdwin-context-persistence by subes.
the class ASegmentedTimeSeriesStorageCache method readRangeValues.
public ICloseableIterable<V> readRangeValues(final FDate from, final FDate to, final Lock readLock, final ISkipFileFunction skipFileFunction) {
final FDate firstAvailableSegmentFrom = getFirstAvailableSegmentFrom(key);
if (firstAvailableSegmentFrom == null) {
return EmptyCloseableIterable.getInstance();
}
final FDate lastAvailableSegmentTo = getLastAvailableSegmentTo(key, to);
if (lastAvailableSegmentTo == null) {
return EmptyCloseableIterable.getInstance();
}
// adjust dates directly to prevent unnecessary segment calculations
final FDate adjFrom = FDates.max(from, firstAvailableSegmentFrom);
final FDate adjTo = FDates.min(to, lastAvailableSegmentTo);
final ICloseableIterable<TimeRange> segments = getSegments(adjFrom, adjTo, lastAvailableSegmentTo);
final ATransformingIterable<TimeRange, ICloseableIterable<V>> segmentQueries = new ATransformingIterable<TimeRange, ICloseableIterable<V>>(segments) {
@Override
protected ICloseableIterable<V> transform(final TimeRange value) {
return new ICloseableIterable<V>() {
@Override
public ICloseableIterator<V> iterator() {
final SegmentedKey<K> segmentedKey = new SegmentedKey<K>(key, value);
maybeInitSegment(segmentedKey);
final FDate segmentAdjFrom = FDates.max(adjFrom, value.getFrom());
final FDate segmentAdjTo = FDates.min(adjTo, value.getTo());
final Lock compositeReadLock = Locks.newCompositeLock(readLock, segmentedTable.getTableLock(segmentedKey).readLock());
return segmentedTable.getLookupTableCache(segmentedKey).readRangeValues(segmentAdjFrom, segmentAdjTo, compositeReadLock, skipFileFunction);
}
};
}
};
final ICloseableIterable<V> rangeValues = new FlatteningIterable<V>(segmentQueries);
return rangeValues;
}
use of de.invesdwin.util.collections.iterable.FlatteningIterable in project invesdwin-context-persistence by subes.
the class ASegmentedTimeSeriesStorageCache method readRangeValuesReverse.
public ICloseableIterable<V> readRangeValuesReverse(final FDate from, final FDate to, final Lock readLock, final ISkipFileFunction skipFileFunction) {
final FDate firstAvailableSegmentFrom = getFirstAvailableSegmentFrom(key);
final FDate lastAvailableSegmentTo = getLastAvailableSegmentTo(key, to);
// adjust dates directly to prevent unnecessary segment calculations
final FDate adjFrom = FDates.min(from, lastAvailableSegmentTo);
final FDate adjTo = FDates.max(to, firstAvailableSegmentFrom);
final ICloseableIterable<TimeRange> filteredSegments = getSegmentsReverse(adjFrom, adjTo, lastAvailableSegmentTo);
final ATransformingIterable<TimeRange, ICloseableIterable<V>> segmentQueries = new ATransformingIterable<TimeRange, ICloseableIterable<V>>(filteredSegments) {
@Override
protected ICloseableIterable<V> transform(final TimeRange value) {
return new ICloseableIterable<V>() {
@Override
public ICloseableIterator<V> iterator() {
final SegmentedKey<K> segmentedKey = new SegmentedKey<K>(key, value);
maybeInitSegment(segmentedKey);
final FDate segmentAdjFrom = FDates.min(adjFrom, value.getTo());
final FDate segmentAdjTo = FDates.max(adjTo, value.getFrom());
final Lock compositeReadLock = Locks.newCompositeLock(readLock, segmentedTable.getTableLock(segmentedKey).readLock());
return segmentedTable.getLookupTableCache(segmentedKey).readRangeValuesReverse(segmentAdjFrom, segmentAdjTo, compositeReadLock, skipFileFunction);
}
};
}
};
final ICloseableIterable<V> rangeValues = new FlatteningIterable<V>(segmentQueries);
return rangeValues;
}
Aggregations