use of de.invesdwin.util.collections.iterable.concurrent.AProducerQueueIterator in project invesdwin-context-persistence by subes.
the class ATimeSeriesUpdater method doUpdate.
private void doUpdate() {
final Pair<FDate, List<V>> pair = lookupTable.prepareForUpdate();
final FDate updateFrom = pair.getFirst();
final List<V> lastValues = pair.getSecond();
Assertions.checkNotNull(lastValues);
ICloseableIterable<? extends V> source = getSource(updateFrom);
if (updateFrom != null) {
// ensure we add no duplicate values
source = new ASkippingIterable<V>(source) {
@Override
protected boolean skip(final V element) {
return extractTime(element).isBefore(updateFrom);
}
};
}
final FlatteningIterable<? extends V> flatteningSources = new FlatteningIterable<>(lastValues, source);
try (ICloseableIterator<? extends V> elements = flatteningSources.iterator()) {
final ICloseableIterator<UpdateProgress> batchWriterProducer = new ICloseableIterator<UpdateProgress>() {
@Override
public boolean hasNext() {
return elements.hasNext();
}
@Override
public UpdateProgress next() {
final UpdateProgress progress = new UpdateProgress();
while (elements.hasNext()) {
final V element = elements.next();
if (progress.onElement(element)) {
return progress;
}
}
return progress;
}
@Override
public void close() {
elements.close();
}
};
// do IO in a different thread than batch filling
try (ACloseableIterator<UpdateProgress> batchProducer = new AProducerQueueIterator<UpdateProgress>(getClass().getSimpleName() + "_batchProducer_" + table.hashKeyToString(key), BATCH_QUEUE_SIZE) {
@Override
protected ICloseableIterator<ATimeSeriesUpdater<K, V>.UpdateProgress> newProducer() {
return batchWriterProducer;
}
}) {
final AtomicInteger flushIndex = new AtomicInteger();
try (ACloseableIterator<UpdateProgress> parallelConsumer = new AParallelChunkConsumerIterator<UpdateProgress, UpdateProgress>(getClass().getSimpleName() + "_batchConsumer_" + table.hashKeyToString(key), batchProducer, BATCH_WRITER_THREADS) {
@Override
protected UpdateProgress doWork(final UpdateProgress request) {
request.write(flushIndex.incrementAndGet());
return request;
}
}) {
while (parallelConsumer.hasNext()) {
final UpdateProgress progress = parallelConsumer.next();
count += progress.getCount();
if (minTime == null) {
minTime = progress.getMinTime();
}
maxTime = progress.getMaxTime();
}
}
}
}
}
Aggregations