use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class TableBucketReader method find.
/**
* Attempts to locate something in a TableBucket that matches a particular key.
*
* @param soughtKey A {@link BufferView} instance representing the Key we are looking for.
* @param bucketOffset The current segment offset of the Table Bucket we are looking into.
* @param timer A {@link TimeoutTimer} for the operation.
* @return A CompletableFuture that, when completed, will contain the desired result, or null of no such result
* was found.
*/
CompletableFuture<ResultT> find(BufferView soughtKey, long bucketOffset, TimeoutTimer timer) {
int maxReadLength = getMaxReadLength();
// Read the Key at the current offset and check it against the sought one.
AtomicLong offset = new AtomicLong(bucketOffset);
CompletableFuture<ResultT> result = new CompletableFuture<>();
Futures.loop(() -> !result.isDone(), () -> {
// Read the Key from the Segment. Copy it out of the Segment to avoid losing it or getting corrupted
// values back in case of a cache eviction. See {@link ReadResult#setCopyOnRead(boolean)}.
ReadResult readResult = this.segment.read(offset.get(), maxReadLength, timer.getRemaining());
val reader = getReader(soughtKey, offset.get(), timer);
AsyncReadResultProcessor.process(readResult, reader, this.executor);
return reader.getResult().thenComposeAsync(r -> {
SearchContinuation sc = processResult(r, soughtKey);
if (sc == SearchContinuation.ResultFound || sc == SearchContinuation.NoResult) {
// We either definitely found the result or definitely did not find the result.
// In the case we did not find what we were looking for, we may still have some
// partial result to return to the caller (i.e., a TableEntry with no value, but with
// a version, which indicates a deleted entry (as opposed from an inexistent one).
result.complete(r);
} else {
return this.getBackpointer.apply(this.segment, offset.get(), timer.getRemaining()).thenAccept(newOffset -> {
offset.set(newOffset);
if (newOffset < 0) {
// Could not find anything.
result.complete(null);
}
});
}
return CompletableFuture.completedFuture(null);
}, this.executor);
}, this.executor).exceptionally(ex -> {
result.completeExceptionally(ex);
return null;
});
return result;
}
use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class TableEntryDeltaIterator method toEntries.
private CompletableFuture<List<Map.Entry<DeltaIteratorState, TableEntry>>> toEntries(long startOffset) {
TimeoutTimer timer = new TimeoutTimer(this.fetchTimeout);
int length = Math.min(maxBytesToRead, MAX_READ_SIZE);
if (endOfSegment()) {
return CompletableFuture.completedFuture(Collections.emptyList());
}
ReadResult result = this.segment.read(startOffset, length, timer.getRemaining());
return AsyncReadResultProcessor.processAll(result, this.executor, timer.getRemaining()).thenApply(data -> parseEntries(data, startOffset, length));
}
use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class TableCompactor method compact.
/**
* Performs a compaction of the Table Segment. Refer to this class' Javadoc for a description of the compaction process.
*
* @param timer Timer for the operation.
* @return A CompletableFuture that, when completed, indicate the compaction completed. When this future completes,
* some of the Segment's Table Attributes may change to reflect the modifications to the Segment and/or compaction progress.
* Notable exceptions:
* <ul>
* <li>{@link BadAttributeUpdateException} If the {@link TableAttributes#COMPACTION_OFFSET} changed while this method
* was executing. In this case, no change will be performed and it can be resolved with a retry.</li>
* </ul>
*/
CompletableFuture<Void> compact(TimeoutTimer timer) {
long startOffset = getCompactionStartOffset();
int maxLength = (int) Math.min(this.config.getMaxCompactionSize(), getLastIndexedOffset() - startOffset);
if (startOffset < 0 || maxLength < 0) {
// The Segment's Compaction offset must be a value between 0 and the current LastIndexedOffset.
return Futures.failedFuture(new DataCorruptionException(String.format("%s: '%s' has CompactionStartOffset=%s and CompactionLength=%s.", this.traceLogId, this.metadata.getName(), startOffset, maxLength)));
} else if (maxLength == 0) {
// Nothing to do.
log.debug("{}: Up to date.", this.traceLogId);
return CompletableFuture.completedFuture(null);
}
// Read the Table Entries beginning at the specified offset, without exceeding the given maximum length.
return getRetryPolicy().runAsync(() -> readCandidates(startOffset, maxLength, timer).thenComposeAsync(candidates -> excludeObsolete(candidates, timer).thenComposeAsync(v -> copyCandidates(candidates, timer), this.executor), this.executor), this.executor);
}
use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class WriterTableProcessor method flush.
@Override
public CompletableFuture<WriterFlushResult> flush(boolean force, Duration timeout) {
Exceptions.checkNotClosed(this.closed.get(), this);
if (!force && !mustFlush()) {
return CompletableFuture.completedFuture(new WriterFlushResult());
}
TimeoutTimer timer = new TimeoutTimer(timeout);
return this.connector.getSegment(timer.getRemaining()).thenComposeAsync(segment -> flushWithSingleRetry(segment, timer).thenComposeAsync(flushResult -> {
flushComplete(flushResult);
return compactIfNeeded(segment, flushResult.highestCopiedOffset, timer).thenApply(v -> flushResult);
}, this.executor), this.executor);
}
use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class BTreeIndex method initialize.
/**
* Initializes the BTreeIndex by fetching metadata from the external data source. This method must be invoked (and
* completed) prior to executing any other operation on this instance.
*
* @param timeout Timeout for the operation.
* @return A CompletableFuture that, when completed, will indicate that the operation completed.
*/
public CompletableFuture<Void> initialize(Duration timeout) {
if (isInitialized()) {
log.warn("{}: Reinitializing.", this.traceObjectId);
}
TimeoutTimer timer = new TimeoutTimer(timeout);
return this.getLength.apply(timer.getRemaining()).thenCompose(indexInfo -> {
if (indexInfo.getIndexLength() <= FOOTER_LENGTH) {
// Empty index.
setState(indexInfo.getIndexLength(), PagePointer.NO_OFFSET, 0);
this.statistics = this.maintainStatistics ? Statistics.EMPTY : null;
return CompletableFuture.completedFuture(null);
}
long footerOffset = indexInfo.getRootPointer() >= 0 ? indexInfo.getRootPointer() : getFooterOffset(indexInfo.getIndexLength());
return this.read.apply(footerOffset, FOOTER_LENGTH, false, timer.getRemaining()).thenAcceptAsync(footer -> initialize(footer, footerOffset, indexInfo.getIndexLength()), this.executor).thenCompose(v -> loadStatistics(timer.getRemaining())).thenRun(() -> log.info("{}: Initialized. State = {}, Stats = {}.", this.traceObjectId, this.state, this.statistics));
});
}
Aggregations