use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class SegmentAggregator method beginReconciliation.
// endregion
// region Reconciliation
/**
* Initiates the Storage reconciliation procedure. Gets the current state of the Segment from Storage, and based on that,
* does one of the following:
* * Nothing, if the Storage agrees with the Metadata.
* * Throws a show-stopping DataCorruptionException (wrapped in a CompletionException) if the situation is unrecoverable.
* * Initiates the Reconciliation Procedure, which allows the reconcile() method to execute.
*
* @param timer Timer for the operation.
* @return A CompletableFuture that indicates when the operation completed.
*/
private CompletableFuture<Void> beginReconciliation(TimeoutTimer timer) {
assert this.state.get() == AggregatorState.ReconciliationNeeded : "beginReconciliation cannot be called if state == " + this.state;
return this.storage.getStreamSegmentInfo(this.metadata.getName(), timer.getRemaining()).thenAcceptAsync(sp -> {
if (sp.getLength() > this.metadata.getLength()) {
// actor altering the Segment. We cannot recover automatically from this situation.
throw new CompletionException(new ReconciliationFailureException("Actual Segment length in Storage is larger than the Metadata Length.", this.metadata, sp));
} else if (sp.getLength() < this.metadata.getStorageLength()) {
// We cannot recover automatically from this situation.
throw new CompletionException(new ReconciliationFailureException("Actual Segment length in Storage is smaller than the Metadata StorageLength.", this.metadata, sp));
} else if (sp.getLength() == this.metadata.getStorageLength() && sp.isSealed() == this.metadata.isSealedInStorage()) {
// Nothing to do. Exit reconciliation and re-enter normal writing mode.
setState(AggregatorState.Writing);
return;
}
// If we get here, it means we have work to do. Set the state accordingly and move on.
this.reconciliationState.set(new ReconciliationState(this.metadata, sp));
setState(AggregatorState.Reconciling);
}, this.executor).exceptionally(ex -> {
ex = Exceptions.unwrap(ex);
if (ex instanceof StreamSegmentNotExistsException) {
// Segment does not exist in Storage. There are three possible situations:
if (this.metadata.isMerged() || this.metadata.isDeleted()) {
// Segment has actually been deleted. This is either due to un-acknowledged Merge/Delete operations
// or because of a concurrent instance of the same container (with a lower epoch) is still
// running and was in the middle of executing the Merge/Delete operation while we were initializing.
updateMetadataPostDeletion(this.metadata);
log.info("{}: Segment '{}' does not exist in Storage (reconciliation). Ignoring all further operations on it.", this.traceObjectId, this.metadata.getName());
this.reconciliationState.set(null);
setState(AggregatorState.Reconciling);
} else if (this.metadata.getStorageLength() > 0) {
// Storage. We cannot recover automatically from this situation.
throw new CompletionException(new ReconciliationFailureException("Segment does not exist in Storage, but Metadata StorageLength is non-zero.", this.metadata, StreamSegmentInformation.builder().name(this.metadata.getName()).deleted(true).build()));
} else {
// Segment does not exist in Storage, but the Metadata indicates it should be empty. This is
// a valid situation since we may not have had a chance to create it yet.
this.reconciliationState.set(new ReconciliationState(this.metadata, StreamSegmentInformation.builder().name(this.metadata.getName()).build()));
setState(AggregatorState.Reconciling);
}
} else {
// Other kind of error - re-throw.
throw new CompletionException(ex);
}
return null;
});
}
use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class WriterTableProcessor method flushOnce.
/**
* Performs a single flush attempt.
*
* @param segment A {@link DirectSegmentAccess} representing the Segment to flush on.
* @param timer Timer for the operation.
* @return A CompletableFuture that, when completed, will indicate the flush has completed successfully. If the
* operation failed, it will be failed with the appropriate exception. Notable exceptions:
* <ul>
* <li>{@link BadAttributeUpdateException} If a conditional update on the {@link TableAttributes#INDEX_OFFSET} attribute failed.
* </ul>
*/
private CompletableFuture<TableWriterFlushResult> flushOnce(DirectSegmentAccess segment, TimeoutTimer timer) {
// Index all the keys in the segment range pointed to by the aggregator.
long lastOffset = this.aggregator.getLastIndexToProcessAtOnce(this.connector.getMaxFlushSize());
assert lastOffset - this.aggregator.getFirstOffset() <= this.connector.getMaxFlushSize();
if (lastOffset < this.aggregator.getLastOffset()) {
log.info("{}: Partial flush initiated up to offset {}. State: {}.", this.traceObjectId, lastOffset, this.aggregator);
}
KeyUpdateCollection keyUpdates = readKeysFromSegment(segment, this.aggregator.getFirstOffset(), lastOffset, timer);
log.debug("{}: Flush.ReadFromSegment KeyCount={}, UpdateCount={}, HighestCopiedOffset={}, LastIndexedOffset={}.", this.traceObjectId, keyUpdates.getUpdates().size(), keyUpdates.getTotalUpdateCount(), keyUpdates.getHighestCopiedOffset(), keyUpdates.getLastIndexedOffset());
// for each such bucket and finally (reindex) update the bucket.
return this.indexWriter.groupByBucket(segment, keyUpdates.getUpdates(), timer).thenComposeAsync(builders -> fetchExistingKeys(builders, segment, timer).thenComposeAsync(v -> {
val bucketUpdates = builders.stream().map(BucketUpdate.Builder::build).collect(Collectors.toList());
logBucketUpdates(bucketUpdates);
return this.indexWriter.updateBuckets(segment, bucketUpdates, this.aggregator.getLastIndexedOffset(), keyUpdates.getLastIndexedOffset(), keyUpdates.getTotalUpdateCount(), timer.getRemaining());
}, this.executor), this.executor).thenApply(updateCount -> new TableWriterFlushResult(keyUpdates, updateCount));
}
use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class TableBucketReader method findAllExisting.
// endregion
// region Searching
/**
* Locates all {@link ResultT} instances in a TableBucket.
*
* @param bucketOffset The current segment offset of the Table Bucket we are looking into.
* @param timer A {@link TimeoutTimer} for the operation.
* @return A CompletableFuture that, when completed, will contain a List with the desired result items. This list
* will exclude all {@link ResultT} items that are marked as deleted.
*/
CompletableFuture<List<ResultT>> findAllExisting(long bucketOffset, TimeoutTimer timer) {
val result = new HashMap<BufferView, ResultT>();
// This handler ensures that items are only added once (per key) and only if they are not deleted. Since the items
// are processed in descending version order, the first time we encounter its key is its latest value.
BiConsumer<ResultT, Long> handler = (item, offset) -> {
TableKey key = getKey(item);
if (!result.containsKey(key.getKey())) {
result.put(key.getKey(), key.getVersion() == TableKey.NOT_EXISTS ? null : item);
}
};
return findAll(bucketOffset, handler, timer).thenApply(v -> result.values().stream().filter(Objects::nonNull).collect(Collectors.toList()));
}
use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class StreamSegmentContainer method append.
@Override
public CompletableFuture<Long> append(String streamSegmentName, long offset, BufferView data, AttributeUpdateCollection attributeUpdates, Duration timeout) {
ensureRunning();
TimeoutTimer timer = new TimeoutTimer(timeout);
logRequest("appendWithOffset", streamSegmentName, data.getLength());
this.metrics.appendWithOffset();
return this.metadataStore.getOrAssignSegmentId(streamSegmentName, timer.getRemaining(), streamSegmentId -> {
val operation = new StreamSegmentAppendOperation(streamSegmentId, offset, data, attributeUpdates);
return processAppend(operation, timer).thenApply(v -> operation.getLastStreamSegmentOffset());
});
}
use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class StreamSegmentContainer method sealStreamSegment.
@Override
public CompletableFuture<Long> sealStreamSegment(String streamSegmentName, Duration timeout) {
ensureRunning();
logRequest("seal", streamSegmentName);
this.metrics.seal();
TimeoutTimer timer = new TimeoutTimer(timeout);
return this.metadataStore.getOrAssignSegmentId(streamSegmentName, timer.getRemaining(), streamSegmentId -> {
StreamSegmentSealOperation operation = new StreamSegmentSealOperation(streamSegmentId);
return addOperation(operation, timeout).thenApply(seqNo -> operation.getStreamSegmentOffset());
});
}
Aggregations