use of de.invesdwin.context.integration.retry.RetryLaterRuntimeException in project invesdwin-context-persistence by subes.
the class ASegmentedTimeSeriesStorageCache method maybeInitSegment.
public boolean maybeInitSegment(final SegmentedKey<K> segmentedKey, final Function<SegmentedKey<K>, ICloseableIterable<? extends V>> source) {
if (!assertValidSegment(segmentedKey)) {
return false;
}
// 1. check segment status in series storage
final IReadWriteLock segmentTableLock = segmentedTable.getTableLock(segmentedKey);
/*
* We need this synchronized block so that we don't collide on the write lock not being possible to be acquired
* after 1 minute. The ReadWriteLock object should be safe to lock via synchronized keyword since no internal
* synchronization occurs on that object itself
*/
synchronized (segmentTableLock) {
final SegmentStatus status = getSegmentStatusWithReadLock(segmentedKey, segmentTableLock);
// 2. if not existing or false, set status to false -> start segment update -> after update set status to true
if (status == null || status == SegmentStatus.INITIALIZING) {
final ILock segmentWriteLock = segmentTableLock.writeLock();
try {
if (!segmentWriteLock.tryLock(1, TimeUnit.MINUTES)) {
/*
* should not happen here because segment should not yet exist. Though if it happens we would
* rather like an exception instead of a deadlock!
*/
throw Locks.getLockTrace().handleLockException(segmentWriteLock.getName(), new RetryLaterRuntimeException("Write lock could not be acquired for table [" + segmentedTable.getName() + "] and key [" + segmentedKey + "]. Please ensure all iterators are closed!"));
}
} catch (final InterruptedException e1) {
throw new RuntimeException(e1);
}
try {
// no double checked locking required between read and write lock here because of the outer synchronized block
if (status == SegmentStatus.INITIALIZING) {
// initialization got aborted, retry from a fresh state
segmentedTable.deleteRange(segmentedKey);
storage.getSegmentStatusTable().delete(hashKey, segmentedKey.getSegment());
}
initSegmentWithStatusHandling(segmentedKey, source);
onSegmentCompleted(segmentedKey, readRangeValues(segmentedKey.getSegment().getFrom(), segmentedKey.getSegment().getTo(), DisabledLock.INSTANCE, null));
return true;
} finally {
segmentWriteLock.unlock();
}
}
}
// 3. if true do nothing
return false;
}
use of de.invesdwin.context.integration.retry.RetryLaterRuntimeException in project invesdwin-context-persistence by subes.
the class ASegmentedTimeSeriesStorageCache method initSegment.
private void initSegment(final SegmentedKey<K> segmentedKey, final Function<SegmentedKey<K>, ICloseableIterable<? extends V>> source) {
try {
final ITimeSeriesUpdater<SegmentedKey<K>, V> updater = newSegmentUpdater(segmentedKey, source);
final Callable<Void> task = new Callable<Void>() {
@Override
public Void call() throws Exception {
// write lock is reentrant
updater.update();
return null;
}
};
final String taskName = "Loading " + getElementsName() + " for " + hashKey;
final Callable<Percent> progress = new Callable<Percent>() {
@Override
public Percent call() throws Exception {
return updater.getProgress();
}
};
TaskInfoCallable.of(taskName, task, progress).call();
final FDate minTime = updater.getMinTime();
if (minTime != null) {
final FDate segmentFrom = segmentedKey.getSegment().getFrom();
final TimeRange prevSegment = getSegmentFinder(segmentedKey.getKey()).query().getValue(segmentFrom.addMilliseconds(-1));
if (prevSegment.getTo().equalsNotNullSafe(segmentFrom) && minTime.isBeforeOrEqualTo(segmentFrom)) {
throw new IllegalStateException(segmentedKey + ": minTime [" + minTime + "] should not be before or equal to segmentFrom [" + segmentFrom + "] when overlapping segments are used");
} else if (minTime.isBefore(segmentFrom)) {
throw new IllegalStateException(segmentedKey + ": minTime [" + minTime + "] should not be before segmentFrom [" + segmentFrom + "] when non overlapping segments are used");
}
final FDate maxTime = updater.getMaxTime();
final FDate segmentTo = segmentedKey.getSegment().getTo();
if (maxTime.isAfter(segmentTo)) {
throw new IllegalStateException(segmentedKey + ": maxTime [" + maxTime + "] should not be after segmentTo [" + segmentTo + "]");
}
}
} catch (final Throwable t) {
if (Throwables.isCausedByType(t, IncompleteUpdateFoundException.class)) {
segmentedTable.deleteRange(new SegmentedKey<K>(segmentedKey.getKey(), segmentedKey.getSegment()));
throw new RetryLaterRuntimeException(t);
} else {
throw Throwables.propagate(t);
}
}
}
use of de.invesdwin.context.integration.retry.RetryLaterRuntimeException in project invesdwin-context-persistence by subes.
the class TimeSeriesStorageCache method newResult.
private SerializingCollection<V> newResult(final String method, final MemoryFileSummary summary, final Lock readLock) {
final TextDescription name = new TextDescription("%s[%s]: %s(%s)", ATimeSeriesUpdater.class.getSimpleName(), hashKey, method, summary);
final File memoryFile = new File(summary.getMemoryResourceUri());
return new SerializingCollection<V>(name, memoryFile, true) {
@Override
protected ISerde<V> newSerde() {
return new ISerde<V>() {
@Override
public V fromBytes(final byte[] bytes) {
return valueSerde.fromBytes(bytes);
}
@Override
public V fromBuffer(final IByteBuffer buffer, final int length) {
return valueSerde.fromBuffer(buffer, length);
}
@Override
public int toBuffer(final IByteBuffer buffer, final V obj) {
throw new UnsupportedOperationException();
}
@Override
public byte[] toBytes(final V obj) {
throw new UnsupportedOperationException();
}
};
}
@Override
protected InputStream newFileInputStream(final File file) throws IOException {
if (TimeseriesProperties.FILE_BUFFER_CACHE_MMAP_ENABLED) {
readLock.lock();
final MemoryMappedFile mmapFile = FileBufferCache.getFile(hashKey, summary.getMemoryResourceUri());
if (mmapFile.incrementRefCount()) {
return new MmapInputStream(readLock, summary.newBuffer(mmapFile).asInputStream(), mmapFile);
} else {
readLock.unlock();
}
}
if (TimeseriesProperties.FILE_BUFFER_CACHE_SEGMENTS_ENABLED) {
readLock.lock();
// file buffer cache will close the file quickly
final PreLockedBufferedFileDataInputStream in = new PreLockedBufferedFileDataInputStream(readLock, memoryFile);
in.position(summary.getMemoryOffset());
in.limit(summary.getMemoryOffset() + summary.getMemoryLength());
return in;
} else {
// keep file input stream open as shorty as possible to prevent too many open files error
readLock.lock();
try (BufferedFileDataInputStream in = new BufferedFileDataInputStream(memoryFile)) {
in.position(summary.getMemoryOffset());
in.limit(summary.getMemoryOffset() + summary.getMemoryLength());
final PooledFastByteArrayOutputStream bos = PooledFastByteArrayOutputStream.newInstance();
IOUtils.copy(in, bos.asNonClosing());
return bos.asInputStream();
} catch (final FileNotFoundException e) {
// maybe retry because of this in the outer iterator?
throw new RetryLaterRuntimeException("File might have been deleted in the mean time between read locks: " + file.getAbsolutePath(), e);
} finally {
readLock.unlock();
}
}
}
@Override
protected Integer getFixedLength() {
return fixedLength;
}
@Override
protected OutputStream newCompressor(final OutputStream out) {
return storage.getCompressionFactory().newCompressor(out, ATimeSeriesUpdater.LARGE_COMPRESSOR);
}
@Override
protected InputStream newDecompressor(final InputStream inputStream) {
return storage.getCompressionFactory().newDecompressor(inputStream);
}
};
}
use of de.invesdwin.context.integration.retry.RetryLaterRuntimeException in project invesdwin-context-persistence by subes.
the class FileLiveSegment method getFlushedValues.
private SerializingCollection<V> getFlushedValues() {
synchronized (this) {
if (needsFlush) {
values.flush();
needsFlush = false;
}
}
final TextDescription name = new TextDescription("%s[%s]: getFlushedValues()", FileLiveSegment.class.getSimpleName(), segmentedKey);
return new SerializingCollection<V>(name, values.getFile(), true) {
@Override
protected ISerde<V> newSerde() {
return historicalSegmentTable.newValueSerde();
}
@Override
protected Integer getFixedLength() {
return historicalSegmentTable.newValueFixedLength();
}
@Override
protected OutputStream newCompressor(final OutputStream out) {
return compressionFactory.newCompressor(out, LARGE_COMPRESSOR);
}
@Override
protected InputStream newDecompressor(final InputStream inputStream) {
return compressionFactory.newDecompressor(inputStream);
}
@Override
protected InputStream newFileInputStream(final File file) throws IOException {
// keep file input stream open as shorty as possible to prevent too many open files error
try (InputStream fis = super.newFileInputStream(file)) {
final PooledFastByteArrayOutputStream bos = PooledFastByteArrayOutputStream.newInstance();
IOUtils.copy(fis, bos.asNonClosing());
return bos.asInputStream();
} catch (final FileNotFoundException e) {
// maybe retry because of this in the outer iterator?
throw new RetryLaterRuntimeException("File might have been deleted in the mean time between read locks: " + file.getAbsolutePath(), e);
}
}
};
}
Aggregations